1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <stdint.h> 8 #include <inttypes.h> 9 #include <sys/un.h> 10 #include <fcntl.h> 11 #include <unistd.h> 12 #include <dirent.h> 13 #include <errno.h> 14 15 #include <sys/sysinfo.h> 16 #include <sys/types.h> 17 18 #include <rte_log.h> 19 #include <rte_power.h> 20 #include <rte_spinlock.h> 21 22 #include "channel_manager.h" 23 #include "power_manager.h" 24 #include "oob_monitor.h" 25 26 #define POWER_SCALE_CORE(DIRECTION, core_num , ret) do { \ 27 if (core_num >= ci.core_count) \ 28 return -1; \ 29 if (!(ci.cd[core_num].global_enabled_cpus)) \ 30 return -1; \ 31 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); \ 32 ret = rte_power_freq_##DIRECTION(core_num); \ 33 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \ 34 } while (0) 35 36 struct freq_info { 37 rte_spinlock_t power_sl; 38 uint32_t freqs[RTE_MAX_LCORE_FREQS]; 39 unsigned num_freqs; 40 } __rte_cache_aligned; 41 42 static struct freq_info global_core_freq_info[RTE_MAX_LCORE]; 43 44 struct core_info ci; 45 46 #define SYSFS_CPU_PATH "/sys/devices/system/cpu/cpu%u/topology/core_id" 47 48 struct core_info * 49 get_core_info(void) 50 { 51 return &ci; 52 } 53 54 int 55 core_info_init(void) 56 { 57 struct core_info *ci; 58 int i; 59 60 ci = get_core_info(); 61 62 ci->core_count = get_nprocs_conf(); 63 ci->branch_ratio_threshold = BRANCH_RATIO_THRESHOLD; 64 ci->cd = malloc(ci->core_count * sizeof(struct core_details)); 65 memset(ci->cd, 0, ci->core_count * sizeof(struct core_details)); 66 if (!ci->cd) { 67 RTE_LOG(ERR, POWER_MANAGER, "Failed to allocate memory for core info."); 68 return -1; 69 } 70 for (i = 0; i < ci->core_count; i++) { 71 ci->cd[i].global_enabled_cpus = 1; 72 } 73 printf("%d cores in system\n", ci->core_count); 74 return 0; 75 } 76 77 int 78 power_manager_init(void) 79 { 80 unsigned int i, num_cpus = 0, num_freqs = 0; 81 int ret = 0; 82 struct core_info *ci; 83 unsigned int max_core_num; 84 85 rte_power_set_env(PM_ENV_NOT_SET); 86 87 ci = get_core_info(); 88 if (!ci) { 89 RTE_LOG(ERR, POWER_MANAGER, 90 "Failed to get core info!\n"); 91 return -1; 92 } 93 94 if (ci->core_count > RTE_MAX_LCORE) 95 max_core_num = RTE_MAX_LCORE; 96 else 97 max_core_num = ci->core_count; 98 99 for (i = 0; i < max_core_num; i++) { 100 if (ci->cd[i].global_enabled_cpus) { 101 if (rte_power_init(i) < 0) 102 RTE_LOG(ERR, POWER_MANAGER, 103 "Unable to initialize power manager " 104 "for core %u\n", i); 105 num_cpus++; 106 num_freqs = rte_power_freqs(i, 107 global_core_freq_info[i].freqs, 108 RTE_MAX_LCORE_FREQS); 109 if (num_freqs == 0) { 110 RTE_LOG(ERR, POWER_MANAGER, 111 "Unable to get frequency list for core %u\n", 112 i); 113 ci->cd[i].oob_enabled = 0; 114 ret = -1; 115 } 116 global_core_freq_info[i].num_freqs = num_freqs; 117 118 rte_spinlock_init(&global_core_freq_info[i].power_sl); 119 } 120 if (ci->cd[i].oob_enabled) 121 add_core_to_monitor(i); 122 } 123 RTE_LOG(INFO, POWER_MANAGER, "Managing %u cores out of %u available host cores\n", 124 num_cpus, ci->core_count); 125 return ret; 126 127 } 128 129 uint32_t 130 power_manager_get_current_frequency(unsigned core_num) 131 { 132 uint32_t freq, index; 133 134 if (core_num >= RTE_MAX_LCORE) { 135 RTE_LOG(ERR, POWER_MANAGER, "Core(%u) is out of range 0...%d\n", 136 core_num, RTE_MAX_LCORE-1); 137 return -1; 138 } 139 if (!(ci.cd[core_num].global_enabled_cpus)) 140 return 0; 141 142 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); 143 index = rte_power_get_freq(core_num); 144 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); 145 if (index >= RTE_MAX_LCORE_FREQS) 146 freq = 0; 147 else 148 freq = global_core_freq_info[core_num].freqs[index]; 149 150 return freq; 151 } 152 153 int 154 power_manager_exit(void) 155 { 156 unsigned int i; 157 int ret = 0; 158 struct core_info *ci; 159 unsigned int max_core_num; 160 161 ci = get_core_info(); 162 if (!ci) { 163 RTE_LOG(ERR, POWER_MANAGER, 164 "Failed to get core info!\n"); 165 return -1; 166 } 167 168 if (ci->core_count > RTE_MAX_LCORE) 169 max_core_num = RTE_MAX_LCORE; 170 else 171 max_core_num = ci->core_count; 172 173 for (i = 0; i < max_core_num; i++) { 174 if (ci->cd[i].global_enabled_cpus) { 175 if (rte_power_exit(i) < 0) { 176 RTE_LOG(ERR, POWER_MANAGER, "Unable to shutdown power manager " 177 "for core %u\n", i); 178 ret = -1; 179 } 180 ci->cd[i].global_enabled_cpus = 0; 181 } 182 remove_core_from_monitor(i); 183 } 184 return ret; 185 } 186 187 int 188 power_manager_scale_core_up(unsigned core_num) 189 { 190 int ret = 0; 191 192 POWER_SCALE_CORE(up, core_num, ret); 193 return ret; 194 } 195 196 int 197 power_manager_scale_core_down(unsigned core_num) 198 { 199 int ret = 0; 200 201 POWER_SCALE_CORE(down, core_num, ret); 202 return ret; 203 } 204 205 int 206 power_manager_scale_core_min(unsigned core_num) 207 { 208 int ret = 0; 209 210 POWER_SCALE_CORE(min, core_num, ret); 211 return ret; 212 } 213 214 int 215 power_manager_scale_core_max(unsigned core_num) 216 { 217 int ret = 0; 218 219 POWER_SCALE_CORE(max, core_num, ret); 220 return ret; 221 } 222 223 int 224 power_manager_enable_turbo_core(unsigned int core_num) 225 { 226 int ret = 0; 227 228 POWER_SCALE_CORE(enable_turbo, core_num, ret); 229 return ret; 230 } 231 232 int 233 power_manager_disable_turbo_core(unsigned int core_num) 234 { 235 int ret = 0; 236 237 POWER_SCALE_CORE(disable_turbo, core_num, ret); 238 return ret; 239 } 240 241 int 242 power_manager_scale_core_med(unsigned int core_num) 243 { 244 int ret = 0; 245 struct core_info *ci; 246 247 ci = get_core_info(); 248 if (core_num >= RTE_MAX_LCORE) 249 return -1; 250 if (!(ci->cd[core_num].global_enabled_cpus)) 251 return -1; 252 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); 253 ret = rte_power_set_freq(core_num, 254 global_core_freq_info[core_num].num_freqs / 2); 255 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); 256 return ret; 257 } 258