1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <stdint.h> 8 #include <inttypes.h> 9 #include <sys/un.h> 10 #include <fcntl.h> 11 #include <unistd.h> 12 #include <dirent.h> 13 #include <errno.h> 14 15 #include <sys/sysinfo.h> 16 #include <sys/types.h> 17 18 #include <rte_log.h> 19 #include <rte_power.h> 20 #include <rte_spinlock.h> 21 22 #include "channel_manager.h" 23 #include "power_manager.h" 24 #include "oob_monitor.h" 25 26 #define POWER_SCALE_CORE(DIRECTION, core_num , ret) do { \ 27 if (core_num >= ci.core_count) \ 28 return -1; \ 29 if (!(ci.cd[core_num].global_enabled_cpus)) \ 30 return -1; \ 31 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); \ 32 ret = rte_power_freq_##DIRECTION(core_num); \ 33 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \ 34 } while (0) 35 36 struct freq_info { 37 rte_spinlock_t power_sl; 38 uint32_t freqs[RTE_MAX_LCORE_FREQS]; 39 unsigned num_freqs; 40 } __rte_cache_aligned; 41 42 static struct freq_info global_core_freq_info[POWER_MGR_MAX_CPUS]; 43 44 struct core_info ci; 45 46 #define SYSFS_CPU_PATH "/sys/devices/system/cpu/cpu%u/topology/core_id" 47 48 struct core_info * 49 get_core_info(void) 50 { 51 return &ci; 52 } 53 54 int 55 core_info_init(void) 56 { 57 struct core_info *ci; 58 int i; 59 60 ci = get_core_info(); 61 62 ci->core_count = get_nprocs_conf(); 63 ci->branch_ratio_threshold = BRANCH_RATIO_THRESHOLD; 64 ci->cd = malloc(ci->core_count * sizeof(struct core_details)); 65 if (!ci->cd) { 66 RTE_LOG(ERR, POWER_MANAGER, "Failed to allocate memory for core info."); 67 return -1; 68 } 69 for (i = 0; i < ci->core_count; i++) { 70 ci->cd[i].global_enabled_cpus = 1; 71 ci->cd[i].oob_enabled = 0; 72 ci->cd[i].msr_fd = 0; 73 } 74 printf("%d cores in system\n", ci->core_count); 75 return 0; 76 } 77 78 int 79 power_manager_init(void) 80 { 81 unsigned int i, num_cpus = 0, num_freqs = 0; 82 int ret = 0; 83 struct core_info *ci; 84 unsigned int max_core_num; 85 86 rte_power_set_env(PM_ENV_NOT_SET); 87 88 ci = get_core_info(); 89 if (!ci) { 90 RTE_LOG(ERR, POWER_MANAGER, 91 "Failed to get core info!\n"); 92 return -1; 93 } 94 95 if (ci->core_count > POWER_MGR_MAX_CPUS) 96 max_core_num = POWER_MGR_MAX_CPUS; 97 else 98 max_core_num = ci->core_count; 99 100 for (i = 0; i < max_core_num; i++) { 101 if (ci->cd[i].global_enabled_cpus) { 102 if (rte_power_init(i) < 0) 103 RTE_LOG(ERR, POWER_MANAGER, 104 "Unable to initialize power manager " 105 "for core %u\n", i); 106 num_cpus++; 107 num_freqs = rte_power_freqs(i, 108 global_core_freq_info[i].freqs, 109 RTE_MAX_LCORE_FREQS); 110 if (num_freqs == 0) { 111 RTE_LOG(ERR, POWER_MANAGER, 112 "Unable to get frequency list for core %u\n", 113 i); 114 ci->cd[i].oob_enabled = 0; 115 ret = -1; 116 } 117 global_core_freq_info[i].num_freqs = num_freqs; 118 119 rte_spinlock_init(&global_core_freq_info[i].power_sl); 120 } 121 if (ci->cd[i].oob_enabled) 122 add_core_to_monitor(i); 123 } 124 RTE_LOG(INFO, POWER_MANAGER, "Managing %u cores out of %u available host cores\n", 125 num_cpus, ci->core_count); 126 return ret; 127 128 } 129 130 uint32_t 131 power_manager_get_current_frequency(unsigned core_num) 132 { 133 uint32_t freq, index; 134 135 if (core_num >= POWER_MGR_MAX_CPUS) { 136 RTE_LOG(ERR, POWER_MANAGER, "Core(%u) is out of range 0...%d\n", 137 core_num, POWER_MGR_MAX_CPUS-1); 138 return -1; 139 } 140 if (!(ci.cd[core_num].global_enabled_cpus)) 141 return 0; 142 143 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); 144 index = rte_power_get_freq(core_num); 145 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); 146 if (index >= POWER_MGR_MAX_CPUS) 147 freq = 0; 148 else 149 freq = global_core_freq_info[core_num].freqs[index]; 150 151 return freq; 152 } 153 154 int 155 power_manager_exit(void) 156 { 157 unsigned int i; 158 int ret = 0; 159 struct core_info *ci; 160 unsigned int max_core_num; 161 162 ci = get_core_info(); 163 if (!ci) { 164 RTE_LOG(ERR, POWER_MANAGER, 165 "Failed to get core info!\n"); 166 return -1; 167 } 168 169 if (ci->core_count > POWER_MGR_MAX_CPUS) 170 max_core_num = POWER_MGR_MAX_CPUS; 171 else 172 max_core_num = ci->core_count; 173 174 for (i = 0; i < max_core_num; i++) { 175 if (ci->cd[i].global_enabled_cpus) { 176 if (rte_power_exit(i) < 0) { 177 RTE_LOG(ERR, POWER_MANAGER, "Unable to shutdown power manager " 178 "for core %u\n", i); 179 ret = -1; 180 } 181 ci->cd[i].global_enabled_cpus = 0; 182 } 183 remove_core_from_monitor(i); 184 } 185 return ret; 186 } 187 188 int 189 power_manager_scale_core_up(unsigned core_num) 190 { 191 int ret = 0; 192 193 POWER_SCALE_CORE(up, core_num, ret); 194 return ret; 195 } 196 197 int 198 power_manager_scale_core_down(unsigned core_num) 199 { 200 int ret = 0; 201 202 POWER_SCALE_CORE(down, core_num, ret); 203 return ret; 204 } 205 206 int 207 power_manager_scale_core_min(unsigned core_num) 208 { 209 int ret = 0; 210 211 POWER_SCALE_CORE(min, core_num, ret); 212 return ret; 213 } 214 215 int 216 power_manager_scale_core_max(unsigned core_num) 217 { 218 int ret = 0; 219 220 POWER_SCALE_CORE(max, core_num, ret); 221 return ret; 222 } 223 224 int 225 power_manager_enable_turbo_core(unsigned int core_num) 226 { 227 int ret = 0; 228 229 POWER_SCALE_CORE(enable_turbo, core_num, ret); 230 return ret; 231 } 232 233 int 234 power_manager_disable_turbo_core(unsigned int core_num) 235 { 236 int ret = 0; 237 238 POWER_SCALE_CORE(disable_turbo, core_num, ret); 239 return ret; 240 } 241 242 int 243 power_manager_scale_core_med(unsigned int core_num) 244 { 245 int ret = 0; 246 struct core_info *ci; 247 248 ci = get_core_info(); 249 if (core_num >= POWER_MGR_MAX_CPUS) 250 return -1; 251 if (!(ci->cd[core_num].global_enabled_cpus)) 252 return -1; 253 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); 254 ret = rte_power_set_freq(core_num, 255 global_core_freq_info[core_num].num_freqs / 2); 256 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); 257 return ret; 258 } 259