xref: /dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c (revision b462f2737eb08b07b84da4204fbd1c9b9ba00b2d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 
6 #include <stdint.h>
7 #include <string.h>
8 #include <stdio.h>
9 #include <termios.h>
10 
11 #include <cmdline_rdline.h>
12 #include <cmdline_parse.h>
13 #include <cmdline_parse_string.h>
14 #include <cmdline_parse_num.h>
15 #include <cmdline_socket.h>
16 #include <cmdline.h>
17 #include <rte_log.h>
18 #include <rte_lcore.h>
19 #include <rte_ethdev.h>
20 
21 #include <rte_power_cpufreq.h>
22 #include <rte_power_guest_channel.h>
23 
24 #include "vm_power_cli_guest.h"
25 
26 
27 #define CHANNEL_PATH "/dev/virtio-ports/virtio.serial.port.poweragent"
28 
29 
30 #define RTE_LOGTYPE_GUEST_CLI RTE_LOGTYPE_USER1
31 
32 struct cmd_quit_result {
33 	cmdline_fixed_string_t quit;
34 };
35 
36 union PFID {
37 	struct rte_ether_addr addr;
38 	uint64_t pfid;
39 };
40 
41 static struct rte_power_channel_packet policy;
42 
43 struct rte_power_channel_packet *
44 get_policy(void)
45 {
46 	return &policy;
47 }
48 
49 int
50 set_policy_mac(int port, int idx)
51 {
52 	struct rte_power_channel_packet *policy;
53 	union PFID pfid;
54 	int ret;
55 
56 	/* Use port MAC address as the vfid */
57 	ret = rte_eth_macaddr_get(port, &pfid.addr);
58 	if (ret != 0) {
59 		printf("Failed to get device (port %u) MAC address: %s\n",
60 				port, rte_strerror(-ret));
61 		return ret;
62 	}
63 
64 	printf("Port %u MAC: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
65 			"%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
66 			port, RTE_ETHER_ADDR_BYTES(&pfid.addr));
67 	policy = get_policy();
68 	policy->vfid[idx] = pfid.pfid;
69 	return 0;
70 }
71 
72 int
73 set_policy_defaults(struct rte_power_channel_packet *pkt)
74 {
75 	int ret;
76 
77 	ret = set_policy_mac(0, 0);
78 	if (ret != 0)
79 		pkt->nb_mac_to_monitor = 0;
80 	else
81 		pkt->nb_mac_to_monitor = 1;
82 
83 	pkt->t_boost_status.tbEnabled = false;
84 
85 	pkt->vcpu_to_control[0] = 0;
86 	pkt->vcpu_to_control[1] = 1;
87 	pkt->num_vcpu = 2;
88 	/* Dummy Population. */
89 	pkt->traffic_policy.min_packet_thresh = 96000;
90 	pkt->traffic_policy.avg_max_packet_thresh = 1800000;
91 	pkt->traffic_policy.max_max_packet_thresh = 2000000;
92 
93 	pkt->timer_policy.busy_hours[0] = 3;
94 	pkt->timer_policy.busy_hours[1] = 4;
95 	pkt->timer_policy.busy_hours[2] = 5;
96 	pkt->timer_policy.quiet_hours[0] = 11;
97 	pkt->timer_policy.quiet_hours[1] = 12;
98 	pkt->timer_policy.quiet_hours[2] = 13;
99 
100 	pkt->timer_policy.hours_to_use_traffic_profile[0] = 8;
101 	pkt->timer_policy.hours_to_use_traffic_profile[1] = 10;
102 
103 	pkt->core_type = RTE_POWER_CORE_TYPE_VIRTUAL;
104 	pkt->workload = RTE_POWER_WL_LOW;
105 	pkt->policy_to_use = RTE_POWER_POLICY_TIME;
106 	pkt->command = RTE_POWER_PKT_POLICY;
107 	strlcpy(pkt->vm_name, "ubuntu2", sizeof(pkt->vm_name));
108 
109 	return 0;
110 }
111 
112 static void cmd_quit_parsed(__rte_unused void *parsed_result,
113 		__rte_unused struct cmdline *cl,
114 		__rte_unused void *data)
115 {
116 	unsigned lcore_id;
117 
118 	RTE_LCORE_FOREACH(lcore_id) {
119 		rte_power_exit(lcore_id);
120 	}
121 	cmdline_quit(cl);
122 }
123 
124 cmdline_parse_token_string_t cmd_quit_quit =
125 	TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
126 
127 cmdline_parse_inst_t cmd_quit = {
128 	.f = cmd_quit_parsed,  /* function to call */
129 	.data = NULL,      /* 2nd arg of func */
130 	.help_str = "close the application",
131 	.tokens = {        /* token list, NULL terminated */
132 		(void *)&cmd_quit_quit,
133 		NULL,
134 	},
135 };
136 
137 /* *** VM operations *** */
138 
139 struct cmd_freq_list_result {
140 	cmdline_fixed_string_t query_freq;
141 	cmdline_fixed_string_t cpu_num;
142 };
143 
144 static int
145 query_data(struct rte_power_channel_packet *pkt, unsigned int lcore_id)
146 {
147 	int ret;
148 	ret = rte_power_guest_channel_send_msg(pkt, lcore_id);
149 	if (ret < 0) {
150 		RTE_LOG(ERR, GUEST_CLI, "Error sending message.\n");
151 		return -1;
152 	}
153 	return 0;
154 }
155 
156 static int
157 receive_freq_list(struct rte_power_channel_packet_freq_list *pkt_freq_list,
158 		unsigned int lcore_id)
159 {
160 	int ret;
161 
162 	ret = rte_power_guest_channel_receive_msg(pkt_freq_list,
163 			sizeof(*pkt_freq_list),
164 			lcore_id);
165 	if (ret < 0) {
166 		RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n");
167 		return -1;
168 	}
169 	if (pkt_freq_list->command != RTE_POWER_FREQ_LIST) {
170 		RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n");
171 		return -1;
172 	}
173 	return 0;
174 }
175 
176 static void
177 cmd_query_freq_list_parsed(void *parsed_result,
178 		__rte_unused struct cmdline *cl,
179 		__rte_unused void *data)
180 {
181 	struct cmd_freq_list_result *res = parsed_result;
182 	unsigned int lcore_id;
183 	struct rte_power_channel_packet_freq_list pkt_freq_list;
184 	struct rte_power_channel_packet pkt;
185 	bool query_list = false;
186 	int ret;
187 	char *ep;
188 
189 	memset(&pkt, 0, sizeof(pkt));
190 	memset(&pkt_freq_list, 0, sizeof(pkt_freq_list));
191 
192 	if (!strcmp(res->cpu_num, "all")) {
193 
194 		/* Get first enabled lcore. */
195 		lcore_id = rte_get_next_lcore(-1,
196 				0,
197 				0);
198 		if (lcore_id == RTE_MAX_LCORE) {
199 			cmdline_printf(cl, "Enabled core not found.\n");
200 			return;
201 		}
202 
203 		pkt.command = RTE_POWER_QUERY_FREQ_LIST;
204 		strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
205 		query_list = true;
206 	} else {
207 		errno = 0;
208 		lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10);
209 		if (errno != 0 || lcore_id >= RTE_POWER_MAX_VCPU_PER_VM ||
210 			ep == res->cpu_num) {
211 			cmdline_printf(cl, "Invalid parameter provided.\n");
212 			return;
213 		}
214 		pkt.command = RTE_POWER_QUERY_FREQ;
215 		strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
216 		pkt.resource_id = lcore_id;
217 	}
218 
219 	ret = query_data(&pkt, lcore_id);
220 	if (ret < 0) {
221 		cmdline_printf(cl, "Error during sending frequency list query.\n");
222 		return;
223 	}
224 
225 	ret = receive_freq_list(&pkt_freq_list, lcore_id);
226 	if (ret < 0) {
227 		cmdline_printf(cl, "Error during frequency list reception.\n");
228 		return;
229 	}
230 	if (query_list) {
231 		unsigned int i;
232 		for (i = 0; i < pkt_freq_list.num_vcpu; ++i)
233 			cmdline_printf(cl, "Frequency of [%d] vcore is %d.\n",
234 					i,
235 					pkt_freq_list.freq_list[i]);
236 	} else {
237 		cmdline_printf(cl, "Frequency of [%d] vcore is %d.\n",
238 				lcore_id,
239 				pkt_freq_list.freq_list[lcore_id]);
240 	}
241 }
242 
243 cmdline_parse_token_string_t cmd_query_freq_token =
244 	TOKEN_STRING_INITIALIZER(struct cmd_freq_list_result, query_freq, "query_cpu_freq");
245 cmdline_parse_token_string_t cmd_query_freq_cpu_num_token =
246 	TOKEN_STRING_INITIALIZER(struct cmd_freq_list_result, cpu_num, NULL);
247 
248 cmdline_parse_inst_t cmd_query_freq_list = {
249 	.f = cmd_query_freq_list_parsed,  /* function to call */
250 	.data = NULL,      /* 2nd arg of func */
251 	.help_str = "query_cpu_freq <core_num>|all, request"
252 				" information regarding virtual core frequencies."
253 				" The keyword 'all' will query list of all vcores for the VM",
254 	.tokens = {        /* token list, NULL terminated */
255 		(void *)&cmd_query_freq_token,
256 		(void *)&cmd_query_freq_cpu_num_token,
257 		NULL,
258 	},
259 };
260 
261 struct cmd_query_caps_result {
262 	cmdline_fixed_string_t query_caps;
263 	cmdline_fixed_string_t cpu_num;
264 };
265 
266 static int
267 receive_capabilities(struct rte_power_channel_packet_caps_list *pkt_caps_list,
268 		unsigned int lcore_id)
269 {
270 	int ret;
271 
272 	ret = rte_power_guest_channel_receive_msg(pkt_caps_list,
273 		sizeof(*pkt_caps_list),
274 		lcore_id);
275 	if (ret < 0) {
276 		RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n");
277 		return -1;
278 	}
279 	if (pkt_caps_list->command != RTE_POWER_CAPS_LIST) {
280 		RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n");
281 		return -1;
282 	}
283 	return 0;
284 }
285 
286 static void
287 cmd_query_caps_list_parsed(void *parsed_result,
288 		__rte_unused struct cmdline *cl,
289 		__rte_unused void *data)
290 {
291 	struct cmd_query_caps_result *res = parsed_result;
292 	unsigned int lcore_id;
293 	struct rte_power_channel_packet_caps_list pkt_caps_list;
294 	struct rte_power_channel_packet pkt;
295 	bool query_list = false;
296 	int ret;
297 	char *ep;
298 
299 	memset(&pkt, 0, sizeof(pkt));
300 	memset(&pkt_caps_list, 0, sizeof(pkt_caps_list));
301 
302 	if (!strcmp(res->cpu_num, "all")) {
303 
304 		/* Get first enabled lcore. */
305 		lcore_id = rte_get_next_lcore(-1,
306 				0,
307 				0);
308 		if (lcore_id == RTE_MAX_LCORE) {
309 			cmdline_printf(cl, "Enabled core not found.\n");
310 			return;
311 		}
312 
313 		pkt.command = RTE_POWER_QUERY_CAPS_LIST;
314 		strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
315 		query_list = true;
316 	} else {
317 		errno = 0;
318 		lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10);
319 		if (errno != 0 || lcore_id >= RTE_POWER_MAX_VCPU_PER_VM ||
320 			ep == res->cpu_num) {
321 			cmdline_printf(cl, "Invalid parameter provided.\n");
322 			return;
323 		}
324 		pkt.command = RTE_POWER_QUERY_CAPS;
325 		strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
326 		pkt.resource_id = lcore_id;
327 	}
328 
329 	ret = query_data(&pkt, lcore_id);
330 	if (ret < 0) {
331 		cmdline_printf(cl, "Error during sending capabilities query.\n");
332 		return;
333 	}
334 
335 	ret = receive_capabilities(&pkt_caps_list, lcore_id);
336 	if (ret < 0) {
337 		cmdline_printf(cl, "Error during capabilities reception.\n");
338 		return;
339 	}
340 	if (query_list) {
341 		unsigned int i;
342 		for (i = 0; i < pkt_caps_list.num_vcpu; ++i)
343 			cmdline_printf(cl, "Capabilities of [%d] vcore are:"
344 					" turbo possibility: %" PRId64 ", "
345 					"is priority core: %" PRId64 ".\n",
346 					i,
347 					pkt_caps_list.turbo[i],
348 					pkt_caps_list.priority[i]);
349 	} else {
350 		cmdline_printf(cl, "Capabilities of [%d] vcore are:"
351 				" turbo possibility: %" PRId64 ", "
352 				"is priority core: %" PRId64 ".\n",
353 				lcore_id,
354 				pkt_caps_list.turbo[lcore_id],
355 				pkt_caps_list.priority[lcore_id]);
356 	}
357 }
358 
359 cmdline_parse_token_string_t cmd_query_caps_token =
360 	TOKEN_STRING_INITIALIZER(struct cmd_query_caps_result, query_caps, "query_cpu_caps");
361 cmdline_parse_token_string_t cmd_query_caps_cpu_num_token =
362 	TOKEN_STRING_INITIALIZER(struct cmd_query_caps_result, cpu_num, NULL);
363 
364 cmdline_parse_inst_t cmd_query_caps_list = {
365 	.f = cmd_query_caps_list_parsed,  /* function to call */
366 	.data = NULL,      /* 2nd arg of func */
367 	.help_str = "query_cpu_caps <core_num>|all, request"
368 				" information regarding virtual core capabilities."
369 				" The keyword 'all' will query list of all vcores for the VM",
370 	.tokens = {        /* token list, NULL terminated */
371 		(void *)&cmd_query_caps_token,
372 		(void *)&cmd_query_caps_cpu_num_token,
373 		NULL,
374 	},
375 };
376 
377 static int
378 check_response_cmd(unsigned int lcore_id, int *result)
379 {
380 	struct rte_power_channel_packet pkt;
381 	int ret;
382 
383 	ret = rte_power_guest_channel_receive_msg(&pkt, sizeof pkt, lcore_id);
384 	if (ret < 0)
385 		return -1;
386 
387 	switch (pkt.command) {
388 	case(RTE_POWER_CMD_ACK):
389 		*result = 1;
390 		break;
391 	case(RTE_POWER_CMD_NACK):
392 		*result = 0;
393 		break;
394 	default:
395 		RTE_LOG(ERR, GUEST_CLI,
396 				"Received invalid response from host, expecting ACK/NACK.\n");
397 		return -1;
398 	}
399 
400 	return 0;
401 }
402 
403 struct cmd_set_cpu_freq_result {
404 	cmdline_fixed_string_t set_cpu_freq;
405 	uint32_t lcore_id;
406 	cmdline_fixed_string_t cmd;
407 };
408 
409 static void
410 cmd_set_cpu_freq_parsed(void *parsed_result, struct cmdline *cl,
411 	       __rte_unused void *data)
412 {
413 	int ret = -1;
414 	struct cmd_set_cpu_freq_result *res = parsed_result;
415 
416 	if (!strcmp(res->cmd, "up"))
417 		ret = rte_power_freq_up(res->lcore_id);
418 	else if (!strcmp(res->cmd, "down"))
419 		ret = rte_power_freq_down(res->lcore_id);
420 	else if (!strcmp(res->cmd, "min"))
421 		ret = rte_power_freq_min(res->lcore_id);
422 	else if (!strcmp(res->cmd, "max"))
423 		ret = rte_power_freq_max(res->lcore_id);
424 	else if (!strcmp(res->cmd, "enable_turbo"))
425 		ret = rte_power_freq_enable_turbo(res->lcore_id);
426 	else if (!strcmp(res->cmd, "disable_turbo"))
427 		ret = rte_power_freq_disable_turbo(res->lcore_id);
428 
429 	if (ret != 1) {
430 		cmdline_printf(cl, "Error sending message: %s\n", strerror(ret));
431 		return;
432 	}
433 	int result;
434 	ret = check_response_cmd(res->lcore_id, &result);
435 	if (ret < 0) {
436 		RTE_LOG(ERR, GUEST_CLI, "No confirmation for sent message received\n");
437 	} else {
438 		cmdline_printf(cl, "%s received for message sent to host.\n",
439 				result == 1 ? "ACK" : "NACK");
440 	}
441 }
442 
443 cmdline_parse_token_string_t cmd_set_cpu_freq =
444 	TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
445 			set_cpu_freq, "set_cpu_freq");
446 cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
447 	TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
448 			lcore_id, RTE_UINT32);
449 cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
450 	TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
451 			cmd, "up#down#min#max#enable_turbo#disable_turbo");
452 
453 cmdline_parse_inst_t cmd_set_cpu_freq_set = {
454 	.f = cmd_set_cpu_freq_parsed,
455 	.data = NULL,
456 	.help_str = "set_cpu_freq <core_num> "
457 			"<up|down|min|max|enable_turbo|disable_turbo>, "
458 			"adjust the frequency for the specified core.",
459 	.tokens = {
460 		(void *)&cmd_set_cpu_freq,
461 		(void *)&cmd_set_cpu_freq_core_num,
462 		(void *)&cmd_set_cpu_freq_cmd_cmd,
463 		NULL,
464 	},
465 };
466 
467 struct cmd_send_policy_result {
468 	cmdline_fixed_string_t send_policy;
469 	cmdline_fixed_string_t cmd;
470 };
471 
472 static inline int
473 send_policy(struct rte_power_channel_packet *pkt, struct cmdline *cl)
474 {
475 	int ret;
476 
477 	ret = rte_power_guest_channel_send_msg(pkt, 1);
478 	if (ret < 0) {
479 		RTE_LOG(ERR, GUEST_CLI, "Error sending message: %s\n",
480 				ret > 0 ? strerror(ret) : "channel not connected");
481 		return -1;
482 	}
483 
484 	int result;
485 	ret = check_response_cmd(1, &result);
486 	if (ret < 0) {
487 		RTE_LOG(ERR, GUEST_CLI, "No confirmation for sent policy received\n");
488 	} else {
489 		cmdline_printf(cl, "%s for sent policy received.\n",
490 				result == 1 ? "ACK" : "NACK");
491 	}
492 	return 1;
493 }
494 
495 static void
496 cmd_send_policy_parsed(void *parsed_result, struct cmdline *cl,
497 		__rte_unused void *data)
498 {
499 	int ret = -1;
500 	struct cmd_send_policy_result *res = parsed_result;
501 
502 	if (!strcmp(res->cmd, "now")) {
503 		printf("Sending Policy down now!\n");
504 		ret = send_policy(&policy, cl);
505 	}
506 	if (ret != 1)
507 		cmdline_printf(cl, "Error sending message: %s\n",
508 				strerror(ret));
509 }
510 
511 cmdline_parse_token_string_t cmd_send_policy =
512 	TOKEN_STRING_INITIALIZER(struct cmd_send_policy_result,
513 			send_policy, "send_policy");
514 cmdline_parse_token_string_t cmd_send_policy_cmd_cmd =
515 	TOKEN_STRING_INITIALIZER(struct cmd_send_policy_result,
516 			cmd, "now");
517 
518 cmdline_parse_inst_t cmd_send_policy_set = {
519 	.f = cmd_send_policy_parsed,
520 	.data = NULL,
521 	.help_str = "send_policy now",
522 	.tokens = {
523 		(void *)&cmd_send_policy,
524 		(void *)&cmd_send_policy_cmd_cmd,
525 		NULL,
526 	},
527 };
528 
529 cmdline_parse_ctx_t main_ctx[] = {
530 		(cmdline_parse_inst_t *)&cmd_quit,
531 		(cmdline_parse_inst_t *)&cmd_send_policy_set,
532 		(cmdline_parse_inst_t *)&cmd_set_cpu_freq_set,
533 		(cmdline_parse_inst_t *)&cmd_query_freq_list,
534 		(cmdline_parse_inst_t *)&cmd_query_caps_list,
535 		NULL,
536 };
537 
538 void
539 run_cli(__rte_unused void *arg)
540 {
541 	struct cmdline *cl;
542 
543 	cl = cmdline_stdin_new(main_ctx, "vmpower(guest)> ");
544 	if (cl == NULL)
545 		return;
546 
547 	cmdline_interact(cl);
548 	cmdline_stdin_exit(cl);
549 }
550