xref: /dpdk/examples/vm_power_manager/power_manager.c (revision ceccf8dc7c3d7797e380f12b45cd3ea1d7396b58)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/un.h>
10 #include <fcntl.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <errno.h>
14 
15 #include <sys/sysinfo.h>
16 #include <sys/types.h>
17 
18 #include <rte_log.h>
19 #include <rte_power.h>
20 #include <rte_spinlock.h>
21 
22 #include "channel_manager.h"
23 #include "power_manager.h"
24 #include "oob_monitor.h"
25 
26 #define POWER_SCALE_CORE(DIRECTION, core_num , ret) do { \
27 	if (core_num >= ci.core_count) \
28 		return -1; \
29 	if (!(ci.cd[core_num].global_enabled_cpus)) \
30 		return -1; \
31 	rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); \
32 	ret = rte_power_freq_##DIRECTION(core_num); \
33 	rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \
34 } while (0)
35 
36 #define POWER_SCALE_MASK(DIRECTION, core_mask, ret) do { \
37 	int i; \
38 	for (i = 0; core_mask; core_mask &= ~(1 << i++)) { \
39 		if ((core_mask >> i) & 1) { \
40 			if (!(ci.cd[i].global_enabled_cpus)) \
41 				continue; \
42 			rte_spinlock_lock(&global_core_freq_info[i].power_sl); \
43 			if (rte_power_freq_##DIRECTION(i) != 1) \
44 				ret = -1; \
45 			rte_spinlock_unlock(&global_core_freq_info[i].power_sl); \
46 		} \
47 	} \
48 } while (0)
49 
50 struct freq_info {
51 	rte_spinlock_t power_sl;
52 	uint32_t freqs[RTE_MAX_LCORE_FREQS];
53 	unsigned num_freqs;
54 } __rte_cache_aligned;
55 
56 static struct freq_info global_core_freq_info[POWER_MGR_MAX_CPUS];
57 
58 struct core_info ci;
59 
60 #define SYSFS_CPU_PATH "/sys/devices/system/cpu/cpu%u/topology/core_id"
61 
62 struct core_info *
63 get_core_info(void)
64 {
65 	return &ci;
66 }
67 
68 int
69 core_info_init(void)
70 {
71 	struct core_info *ci;
72 	int i;
73 
74 	ci = get_core_info();
75 
76 	ci->core_count = get_nprocs_conf();
77 	ci->branch_ratio_threshold = BRANCH_RATIO_THRESHOLD;
78 	ci->cd = malloc(ci->core_count * sizeof(struct core_details));
79 	if (!ci->cd) {
80 		RTE_LOG(ERR, POWER_MANAGER, "Failed to allocate memory for core info.");
81 		return -1;
82 	}
83 	for (i = 0; i < ci->core_count; i++) {
84 		ci->cd[i].global_enabled_cpus = 1;
85 		ci->cd[i].oob_enabled = 0;
86 		ci->cd[i].msr_fd = 0;
87 	}
88 	printf("%d cores in system\n", ci->core_count);
89 	return 0;
90 }
91 
92 int
93 power_manager_init(void)
94 {
95 	unsigned int i, num_cpus = 0, num_freqs = 0;
96 	int ret = 0;
97 	struct core_info *ci;
98 	unsigned int max_core_num;
99 
100 	rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
101 
102 	ci = get_core_info();
103 	if (!ci) {
104 		RTE_LOG(ERR, POWER_MANAGER,
105 				"Failed to get core info!\n");
106 		return -1;
107 	}
108 
109 	if (ci->core_count > POWER_MGR_MAX_CPUS)
110 		max_core_num = POWER_MGR_MAX_CPUS;
111 	else
112 		max_core_num = ci->core_count;
113 
114 	for (i = 0; i < max_core_num; i++) {
115 		if (ci->cd[i].global_enabled_cpus) {
116 			if (rte_power_init(i) < 0)
117 				RTE_LOG(ERR, POWER_MANAGER,
118 						"Unable to initialize power manager "
119 						"for core %u\n", i);
120 			num_cpus++;
121 			num_freqs = rte_power_freqs(i,
122 					global_core_freq_info[i].freqs,
123 					RTE_MAX_LCORE_FREQS);
124 			if (num_freqs == 0) {
125 				RTE_LOG(ERR, POWER_MANAGER,
126 					"Unable to get frequency list for core %u\n",
127 					i);
128 				ci->cd[i].oob_enabled = 0;
129 				ret = -1;
130 			}
131 			global_core_freq_info[i].num_freqs = num_freqs;
132 
133 			rte_spinlock_init(&global_core_freq_info[i].power_sl);
134 		}
135 		if (ci->cd[i].oob_enabled)
136 			add_core_to_monitor(i);
137 	}
138 	RTE_LOG(INFO, POWER_MANAGER, "Managing %u cores out of %u available host cores\n",
139 			num_cpus, ci->core_count);
140 	return ret;
141 
142 }
143 
144 uint32_t
145 power_manager_get_current_frequency(unsigned core_num)
146 {
147 	uint32_t freq, index;
148 
149 	if (core_num >= POWER_MGR_MAX_CPUS) {
150 		RTE_LOG(ERR, POWER_MANAGER, "Core(%u) is out of range 0...%d\n",
151 				core_num, POWER_MGR_MAX_CPUS-1);
152 		return -1;
153 	}
154 	if (!(ci.cd[core_num].global_enabled_cpus))
155 		return 0;
156 
157 	rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
158 	index = rte_power_get_freq(core_num);
159 	rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
160 	if (index >= POWER_MGR_MAX_CPUS)
161 		freq = 0;
162 	else
163 		freq = global_core_freq_info[core_num].freqs[index];
164 
165 	return freq;
166 }
167 
168 int
169 power_manager_exit(void)
170 {
171 	unsigned int i;
172 	int ret = 0;
173 	struct core_info *ci;
174 	unsigned int max_core_num;
175 
176 	ci = get_core_info();
177 	if (!ci) {
178 		RTE_LOG(ERR, POWER_MANAGER,
179 				"Failed to get core info!\n");
180 		return -1;
181 	}
182 
183 	if (ci->core_count > POWER_MGR_MAX_CPUS)
184 		max_core_num = POWER_MGR_MAX_CPUS;
185 	else
186 		max_core_num = ci->core_count;
187 
188 	for (i = 0; i < max_core_num; i++) {
189 		if (ci->cd[i].global_enabled_cpus) {
190 			if (rte_power_exit(i) < 0) {
191 				RTE_LOG(ERR, POWER_MANAGER, "Unable to shutdown power manager "
192 						"for core %u\n", i);
193 				ret = -1;
194 			}
195 			ci->cd[i].global_enabled_cpus = 0;
196 		}
197 		remove_core_from_monitor(i);
198 	}
199 	return ret;
200 }
201 
202 int
203 power_manager_scale_mask_up(uint64_t core_mask)
204 {
205 	int ret = 0;
206 
207 	POWER_SCALE_MASK(up, core_mask, ret);
208 	return ret;
209 }
210 
211 int
212 power_manager_scale_mask_down(uint64_t core_mask)
213 {
214 	int ret = 0;
215 
216 	POWER_SCALE_MASK(down, core_mask, ret);
217 	return ret;
218 }
219 
220 int
221 power_manager_scale_mask_min(uint64_t core_mask)
222 {
223 	int ret = 0;
224 
225 	POWER_SCALE_MASK(min, core_mask, ret);
226 	return ret;
227 }
228 
229 int
230 power_manager_scale_mask_max(uint64_t core_mask)
231 {
232 	int ret = 0;
233 
234 	POWER_SCALE_MASK(max, core_mask, ret);
235 	return ret;
236 }
237 
238 int
239 power_manager_enable_turbo_mask(uint64_t core_mask)
240 {
241 	int ret = 0;
242 
243 	POWER_SCALE_MASK(enable_turbo, core_mask, ret);
244 	return ret;
245 }
246 
247 int
248 power_manager_disable_turbo_mask(uint64_t core_mask)
249 {
250 	int ret = 0;
251 
252 	POWER_SCALE_MASK(disable_turbo, core_mask, ret);
253 	return ret;
254 }
255 
256 int
257 power_manager_scale_core_up(unsigned core_num)
258 {
259 	int ret = 0;
260 
261 	POWER_SCALE_CORE(up, core_num, ret);
262 	return ret;
263 }
264 
265 int
266 power_manager_scale_core_down(unsigned core_num)
267 {
268 	int ret = 0;
269 
270 	POWER_SCALE_CORE(down, core_num, ret);
271 	return ret;
272 }
273 
274 int
275 power_manager_scale_core_min(unsigned core_num)
276 {
277 	int ret = 0;
278 
279 	POWER_SCALE_CORE(min, core_num, ret);
280 	return ret;
281 }
282 
283 int
284 power_manager_scale_core_max(unsigned core_num)
285 {
286 	int ret = 0;
287 
288 	POWER_SCALE_CORE(max, core_num, ret);
289 	return ret;
290 }
291 
292 int
293 power_manager_enable_turbo_core(unsigned int core_num)
294 {
295 	int ret = 0;
296 
297 	POWER_SCALE_CORE(enable_turbo, core_num, ret);
298 	return ret;
299 }
300 
301 int
302 power_manager_disable_turbo_core(unsigned int core_num)
303 {
304 	int ret = 0;
305 
306 	POWER_SCALE_CORE(disable_turbo, core_num, ret);
307 	return ret;
308 }
309 
310 int
311 power_manager_scale_core_med(unsigned int core_num)
312 {
313 	int ret = 0;
314 	struct core_info *ci;
315 
316 	ci = get_core_info();
317 	if (core_num >= POWER_MGR_MAX_CPUS)
318 		return -1;
319 	if (!(ci->cd[core_num].global_enabled_cpus))
320 		return -1;
321 	rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
322 	ret = rte_power_set_freq(core_num,
323 				global_core_freq_info[core_num].num_freqs / 2);
324 	rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
325 	return ret;
326 }
327