xref: /dpdk/examples/vm_power_manager/channel_monitor.c (revision a63504a90f6aa55ae7aea204a8944cd9af9342bd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <unistd.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <signal.h>
10 #include <errno.h>
11 #include <string.h>
12 #include <fcntl.h>
13 #include <sys/types.h>
14 #include <sys/epoll.h>
15 #include <sys/queue.h>
16 #include <sys/time.h>
17 #include <sys/socket.h>
18 #include <sys/select.h>
19 #ifdef USE_JANSSON
20 #include <jansson.h>
21 #else
22 #pragma message "Jansson dev libs unavailable, not including JSON parsing"
23 #endif
24 #include <rte_log.h>
25 #include <rte_memory.h>
26 #include <rte_malloc.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_ethdev.h>
30 #include <rte_pmd_i40e.h>
31 
32 #include <libvirt/libvirt.h>
33 #include "channel_monitor.h"
34 #include "channel_commands.h"
35 #include "channel_manager.h"
36 #include "power_manager.h"
37 #include "oob_monitor.h"
38 
39 #define RTE_LOGTYPE_CHANNEL_MONITOR RTE_LOGTYPE_USER1
40 
41 #define MAX_EVENTS 256
42 
43 uint64_t vsi_pkt_count_prev[384];
44 uint64_t rdtsc_prev[384];
45 #define MAX_JSON_STRING_LEN 1024
46 char json_data[MAX_JSON_STRING_LEN];
47 
48 double time_period_ms = 1;
49 static volatile unsigned run_loop = 1;
50 static int global_event_fd;
51 static unsigned int policy_is_set;
52 static struct epoll_event *global_events_list;
53 static struct policy policies[MAX_CLIENTS];
54 
55 #ifdef USE_JANSSON
56 
57 union PFID {
58 	struct ether_addr addr;
59 	uint64_t pfid;
60 };
61 
62 static int
63 str_to_ether_addr(const char *a, struct ether_addr *ether_addr)
64 {
65 	int i;
66 	char *end;
67 	unsigned long o[ETHER_ADDR_LEN];
68 
69 	i = 0;
70 	do {
71 		errno = 0;
72 		o[i] = strtoul(a, &end, 16);
73 		if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
74 			return -1;
75 		a = end + 1;
76 	} while (++i != RTE_DIM(o) / sizeof(o[0]) && end[0] != 0);
77 
78 	/* Junk at the end of line */
79 	if (end[0] != 0)
80 		return -1;
81 
82 	/* Support the format XX:XX:XX:XX:XX:XX */
83 	if (i == ETHER_ADDR_LEN) {
84 		while (i-- != 0) {
85 			if (o[i] > UINT8_MAX)
86 				return -1;
87 			ether_addr->addr_bytes[i] = (uint8_t)o[i];
88 		}
89 	/* Support the format XXXX:XXXX:XXXX */
90 	} else if (i == ETHER_ADDR_LEN / 2) {
91 		while (i-- != 0) {
92 			if (o[i] > UINT16_MAX)
93 				return -1;
94 			ether_addr->addr_bytes[i * 2] =
95 					(uint8_t)(o[i] >> 8);
96 			ether_addr->addr_bytes[i * 2 + 1] =
97 					(uint8_t)(o[i] & 0xff);
98 		}
99 	/* unknown format */
100 	} else
101 		return -1;
102 
103 	return 0;
104 }
105 
106 static int
107 set_policy_mac(struct channel_packet *pkt, int idx, char *mac)
108 {
109 	union PFID pfid;
110 	int ret;
111 
112 	/* Use port MAC address as the vfid */
113 	ret = str_to_ether_addr(mac, &pfid.addr);
114 
115 	if (ret != 0) {
116 		RTE_LOG(ERR, CHANNEL_MONITOR,
117 			"Invalid mac address received in JSON\n");
118 		pkt->vfid[idx] = 0;
119 		return -1;
120 	}
121 
122 	printf("Received MAC Address: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
123 			"%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
124 			pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
125 			pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
126 			pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
127 
128 	pkt->vfid[idx] = pfid.pfid;
129 	return 0;
130 }
131 
132 
133 static int
134 parse_json_to_pkt(json_t *element, struct channel_packet *pkt)
135 {
136 	const char *key;
137 	json_t *value;
138 	int ret;
139 
140 	memset(pkt, 0, sizeof(struct channel_packet));
141 
142 	pkt->nb_mac_to_monitor = 0;
143 	pkt->t_boost_status.tbEnabled = false;
144 	pkt->workload = LOW;
145 	pkt->policy_to_use = TIME;
146 	pkt->command = PKT_POLICY;
147 	pkt->core_type = CORE_TYPE_PHYSICAL;
148 
149 	json_object_foreach(element, key, value) {
150 		if (!strcmp(key, "policy")) {
151 			/* Recurse in to get the contents of profile */
152 			ret = parse_json_to_pkt(value, pkt);
153 			if (ret)
154 				return ret;
155 		} else if (!strcmp(key, "instruction")) {
156 			/* Recurse in to get the contents of instruction */
157 			ret = parse_json_to_pkt(value, pkt);
158 			if (ret)
159 				return ret;
160 		} else if (!strcmp(key, "name")) {
161 			strcpy(pkt->vm_name, json_string_value(value));
162 		} else if (!strcmp(key, "command")) {
163 			char command[32];
164 			snprintf(command, 32, "%s", json_string_value(value));
165 			if (!strcmp(command, "power")) {
166 				pkt->command = CPU_POWER;
167 			} else if (!strcmp(command, "create")) {
168 				pkt->command = PKT_POLICY;
169 			} else if (!strcmp(command, "destroy")) {
170 				pkt->command = PKT_POLICY_REMOVE;
171 			} else {
172 				RTE_LOG(ERR, CHANNEL_MONITOR,
173 					"Invalid command received in JSON\n");
174 				return -1;
175 			}
176 		} else if (!strcmp(key, "policy_type")) {
177 			char command[32];
178 			snprintf(command, 32, "%s", json_string_value(value));
179 			if (!strcmp(command, "TIME")) {
180 				pkt->policy_to_use = TIME;
181 			} else if (!strcmp(command, "TRAFFIC")) {
182 				pkt->policy_to_use = TRAFFIC;
183 			} else if (!strcmp(command, "WORKLOAD")) {
184 				pkt->policy_to_use = WORKLOAD;
185 			} else if (!strcmp(command, "BRANCH_RATIO")) {
186 				pkt->policy_to_use = BRANCH_RATIO;
187 			} else {
188 				RTE_LOG(ERR, CHANNEL_MONITOR,
189 					"Wrong policy_type received in JSON\n");
190 				return -1;
191 			}
192 		} else if (!strcmp(key, "workload")) {
193 			char command[32];
194 			snprintf(command, 32, "%s", json_string_value(value));
195 			if (!strcmp(command, "HIGH")) {
196 				pkt->workload = HIGH;
197 			} else if (!strcmp(command, "MEDIUM")) {
198 				pkt->workload = MEDIUM;
199 			} else if (!strcmp(command, "LOW")) {
200 				pkt->workload = LOW;
201 			} else {
202 				RTE_LOG(ERR, CHANNEL_MONITOR,
203 					"Wrong workload received in JSON\n");
204 				return -1;
205 			}
206 		} else if (!strcmp(key, "busy_hours")) {
207 			unsigned int i;
208 			size_t size = json_array_size(value);
209 
210 			for (i = 0; i < size; i++) {
211 				int hour = (int)json_integer_value(
212 						json_array_get(value, i));
213 				pkt->timer_policy.busy_hours[i] = hour;
214 			}
215 		} else if (!strcmp(key, "quiet_hours")) {
216 			unsigned int i;
217 			size_t size = json_array_size(value);
218 
219 			for (i = 0; i < size; i++) {
220 				int hour = (int)json_integer_value(
221 						json_array_get(value, i));
222 				pkt->timer_policy.quiet_hours[i] = hour;
223 			}
224 		} else if (!strcmp(key, "core_list")) {
225 			unsigned int i;
226 			size_t size = json_array_size(value);
227 
228 			for (i = 0; i < size; i++) {
229 				int core = (int)json_integer_value(
230 						json_array_get(value, i));
231 				pkt->vcpu_to_control[i] = core;
232 			}
233 			pkt->num_vcpu = size;
234 		} else if (!strcmp(key, "mac_list")) {
235 			unsigned int i;
236 			size_t size = json_array_size(value);
237 
238 			for (i = 0; i < size; i++) {
239 				char mac[32];
240 				snprintf(mac, 32, "%s", json_string_value(
241 						json_array_get(value, i)));
242 				set_policy_mac(pkt, i, mac);
243 			}
244 			pkt->nb_mac_to_monitor = size;
245 		} else if (!strcmp(key, "avg_packet_thresh")) {
246 			pkt->traffic_policy.avg_max_packet_thresh =
247 					(uint32_t)json_integer_value(value);
248 		} else if (!strcmp(key, "max_packet_thresh")) {
249 			pkt->traffic_policy.max_max_packet_thresh =
250 					(uint32_t)json_integer_value(value);
251 		} else if (!strcmp(key, "unit")) {
252 			char unit[32];
253 			snprintf(unit, 32, "%s", json_string_value(value));
254 			if (!strcmp(unit, "SCALE_UP")) {
255 				pkt->unit = CPU_POWER_SCALE_UP;
256 			} else if (!strcmp(unit, "SCALE_DOWN")) {
257 				pkt->unit = CPU_POWER_SCALE_DOWN;
258 			} else if (!strcmp(unit, "SCALE_MAX")) {
259 				pkt->unit = CPU_POWER_SCALE_MAX;
260 			} else if (!strcmp(unit, "SCALE_MIN")) {
261 				pkt->unit = CPU_POWER_SCALE_MIN;
262 			} else if (!strcmp(unit, "ENABLE_TURBO")) {
263 				pkt->unit = CPU_POWER_ENABLE_TURBO;
264 			} else if (!strcmp(unit, "DISABLE_TURBO")) {
265 				pkt->unit = CPU_POWER_DISABLE_TURBO;
266 			} else {
267 				RTE_LOG(ERR, CHANNEL_MONITOR,
268 					"Invalid command received in JSON\n");
269 				return -1;
270 			}
271 		} else if (!strcmp(key, "resource_id")) {
272 			pkt->resource_id = (uint32_t)json_integer_value(value);
273 		} else {
274 			RTE_LOG(ERR, CHANNEL_MONITOR,
275 				"Unknown key received in JSON string: %s\n",
276 				key);
277 		}
278 	}
279 	return 0;
280 }
281 #endif
282 
283 void channel_monitor_exit(void)
284 {
285 	run_loop = 0;
286 	rte_free(global_events_list);
287 }
288 
289 static void
290 core_share(int pNo, int z, int x, int t)
291 {
292 	if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) {
293 		if (strcmp(policies[pNo].pkt.vm_name,
294 				lvm_info[x].vm_name) != 0) {
295 			policies[pNo].core_share[z].status = 1;
296 			power_manager_scale_core_max(
297 					policies[pNo].core_share[z].pcpu);
298 		}
299 	}
300 }
301 
302 static void
303 core_share_status(int pNo)
304 {
305 
306 	int noVms = 0, noVcpus = 0, z, x, t;
307 
308 	get_all_vm(&noVms, &noVcpus);
309 
310 	/* Reset Core Share Status. */
311 	for (z = 0; z < noVcpus; z++)
312 		policies[pNo].core_share[z].status = 0;
313 
314 	/* Foreach vcpu in a policy. */
315 	for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) {
316 		/* Foreach VM on the platform. */
317 		for (x = 0; x < noVms; x++) {
318 			/* Foreach vcpu of VMs on platform. */
319 			for (t = 0; t < lvm_info[x].num_cpus; t++)
320 				core_share(pNo, z, x, t);
321 		}
322 	}
323 }
324 
325 
326 static int
327 pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count)
328 {
329 	int ret = 0;
330 
331 	if (pol->pkt.policy_to_use == BRANCH_RATIO) {
332 		ci->cd[pcpu].oob_enabled = 1;
333 		ret = add_core_to_monitor(pcpu);
334 		if (ret == 0)
335 			RTE_LOG(INFO, CHANNEL_MONITOR,
336 					"Monitoring pcpu %d OOB for %s\n",
337 					pcpu, pol->pkt.vm_name);
338 		else
339 			RTE_LOG(ERR, CHANNEL_MONITOR,
340 					"Error monitoring pcpu %d OOB for %s\n",
341 					pcpu, pol->pkt.vm_name);
342 
343 	} else {
344 		pol->core_share[count].pcpu = pcpu;
345 		RTE_LOG(INFO, CHANNEL_MONITOR,
346 				"Monitoring pcpu %d for %s\n",
347 				pcpu, pol->pkt.vm_name);
348 	}
349 	return ret;
350 }
351 
352 static void
353 get_pcpu_to_control(struct policy *pol)
354 {
355 
356 	/* Convert vcpu to pcpu. */
357 	struct vm_info info;
358 	int pcpu, count;
359 	uint64_t mask_u64b;
360 	struct core_info *ci;
361 
362 	ci = get_core_info();
363 
364 	RTE_LOG(INFO, CHANNEL_MONITOR,
365 			"Looking for pcpu for %s\n", pol->pkt.vm_name);
366 
367 	/*
368 	 * So now that we're handling virtual and physical cores, we need to
369 	 * differenciate between them when adding them to the branch monitor.
370 	 * Virtual cores need to be converted to physical cores.
371 	 */
372 	if (pol->pkt.core_type == CORE_TYPE_VIRTUAL) {
373 		/*
374 		 * If the cores in the policy are virtual, we need to map them
375 		 * to physical core. We look up the vm info and use that for
376 		 * the mapping.
377 		 */
378 		get_info_vm(pol->pkt.vm_name, &info);
379 		for (count = 0; count < pol->pkt.num_vcpu; count++) {
380 			mask_u64b =
381 				info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
382 			for (pcpu = 0; mask_u64b;
383 					mask_u64b &= ~(1ULL << pcpu++)) {
384 				if ((mask_u64b >> pcpu) & 1)
385 					pcpu_monitor(pol, ci, pcpu, count);
386 			}
387 		}
388 	} else {
389 		/*
390 		 * If the cores in the policy are physical, we just use
391 		 * those core id's directly.
392 		 */
393 		for (count = 0; count < pol->pkt.num_vcpu; count++) {
394 			pcpu = pol->pkt.vcpu_to_control[count];
395 			pcpu_monitor(pol, ci, pcpu, count);
396 		}
397 	}
398 }
399 
400 static int
401 get_pfid(struct policy *pol)
402 {
403 
404 	int i, x, ret = 0;
405 
406 	for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
407 
408 		RTE_ETH_FOREACH_DEV(x) {
409 			ret = rte_pmd_i40e_query_vfid_by_mac(x,
410 				(struct ether_addr *)&(pol->pkt.vfid[i]));
411 			if (ret != -EINVAL) {
412 				pol->port[i] = x;
413 				break;
414 			}
415 		}
416 		if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) {
417 			RTE_LOG(INFO, CHANNEL_MONITOR,
418 				"Error with Policy. MAC not found on "
419 				"attached ports ");
420 			pol->enabled = 0;
421 			return ret;
422 		}
423 		pol->pfid[i] = ret;
424 	}
425 	return 1;
426 }
427 
428 static int
429 update_policy(struct channel_packet *pkt)
430 {
431 
432 	unsigned int updated = 0;
433 	int i;
434 
435 
436 	RTE_LOG(INFO, CHANNEL_MONITOR,
437 			"Applying policy for %s\n", pkt->vm_name);
438 
439 	for (i = 0; i < MAX_CLIENTS; i++) {
440 		if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
441 			/* Copy the contents of *pkt into the policy.pkt */
442 			policies[i].pkt = *pkt;
443 			get_pcpu_to_control(&policies[i]);
444 			if (get_pfid(&policies[i]) == -1) {
445 				updated = 1;
446 				break;
447 			}
448 			core_share_status(i);
449 			policies[i].enabled = 1;
450 			updated = 1;
451 		}
452 	}
453 	if (!updated) {
454 		for (i = 0; i < MAX_CLIENTS; i++) {
455 			if (policies[i].enabled == 0) {
456 				policies[i].pkt = *pkt;
457 				get_pcpu_to_control(&policies[i]);
458 				if (get_pfid(&policies[i]) == -1)
459 					break;
460 				core_share_status(i);
461 				policies[i].enabled = 1;
462 				break;
463 			}
464 		}
465 	}
466 	return 0;
467 }
468 
469 static int
470 remove_policy(struct channel_packet *pkt __rte_unused)
471 {
472 	int i;
473 
474 	/*
475 	 * Disabling the policy is simply a case of setting
476 	 * enabled to 0
477 	 */
478 	for (i = 0; i < MAX_CLIENTS; i++) {
479 		if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
480 			policies[i].enabled = 0;
481 			return 0;
482 		}
483 	}
484 	return -1;
485 }
486 
487 static uint64_t
488 get_pkt_diff(struct policy *pol)
489 {
490 
491 	uint64_t vsi_pkt_count,
492 		vsi_pkt_total = 0,
493 		vsi_pkt_count_prev_total = 0;
494 	double rdtsc_curr, rdtsc_diff, diff;
495 	int x;
496 	struct rte_eth_stats vf_stats;
497 
498 	for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) {
499 
500 		/*Read vsi stats*/
501 		if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0)
502 			vsi_pkt_count = vf_stats.ipackets;
503 		else
504 			vsi_pkt_count = -1;
505 
506 		vsi_pkt_total += vsi_pkt_count;
507 
508 		vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]];
509 		vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count;
510 	}
511 
512 	rdtsc_curr = rte_rdtsc_precise();
513 	rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]];
514 	rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr;
515 
516 	diff = (vsi_pkt_total - vsi_pkt_count_prev_total) *
517 			((double)rte_get_tsc_hz() / rdtsc_diff);
518 
519 	return diff;
520 }
521 
522 static void
523 apply_traffic_profile(struct policy *pol)
524 {
525 
526 	int count;
527 	uint64_t diff = 0;
528 
529 	diff = get_pkt_diff(pol);
530 
531 	RTE_LOG(INFO, CHANNEL_MONITOR, "Applying traffic profile\n");
532 
533 	if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) {
534 		for (count = 0; count < pol->pkt.num_vcpu; count++) {
535 			if (pol->core_share[count].status != 1)
536 				power_manager_scale_core_max(
537 						pol->core_share[count].pcpu);
538 		}
539 	} else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
540 		for (count = 0; count < pol->pkt.num_vcpu; count++) {
541 			if (pol->core_share[count].status != 1)
542 				power_manager_scale_core_med(
543 						pol->core_share[count].pcpu);
544 		}
545 	} else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
546 		for (count = 0; count < pol->pkt.num_vcpu; count++) {
547 			if (pol->core_share[count].status != 1)
548 				power_manager_scale_core_min(
549 						pol->core_share[count].pcpu);
550 		}
551 	}
552 }
553 
554 static void
555 apply_time_profile(struct policy *pol)
556 {
557 
558 	int count, x;
559 	struct timeval tv;
560 	struct tm *ptm;
561 	char time_string[40];
562 
563 	/* Obtain the time of day, and convert it to a tm struct. */
564 	gettimeofday(&tv, NULL);
565 	ptm = localtime(&tv.tv_sec);
566 	/* Format the date and time, down to a single second. */
567 	strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm);
568 
569 	for (x = 0; x < HOURS; x++) {
570 
571 		if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) {
572 			for (count = 0; count < pol->pkt.num_vcpu; count++) {
573 				if (pol->core_share[count].status != 1) {
574 					power_manager_scale_core_max(
575 						pol->core_share[count].pcpu);
576 				RTE_LOG(INFO, CHANNEL_MONITOR,
577 					"Scaling up core %d to max\n",
578 					pol->core_share[count].pcpu);
579 				}
580 			}
581 			break;
582 		} else if (ptm->tm_hour ==
583 				pol->pkt.timer_policy.quiet_hours[x]) {
584 			for (count = 0; count < pol->pkt.num_vcpu; count++) {
585 				if (pol->core_share[count].status != 1) {
586 					power_manager_scale_core_min(
587 						pol->core_share[count].pcpu);
588 				RTE_LOG(INFO, CHANNEL_MONITOR,
589 					"Scaling down core %d to min\n",
590 					pol->core_share[count].pcpu);
591 			}
592 		}
593 			break;
594 		} else if (ptm->tm_hour ==
595 			pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) {
596 			apply_traffic_profile(pol);
597 			break;
598 		}
599 	}
600 }
601 
602 static void
603 apply_workload_profile(struct policy *pol)
604 {
605 
606 	int count;
607 
608 	if (pol->pkt.workload == HIGH) {
609 		for (count = 0; count < pol->pkt.num_vcpu; count++) {
610 			if (pol->core_share[count].status != 1)
611 				power_manager_scale_core_max(
612 						pol->core_share[count].pcpu);
613 		}
614 	} else if (pol->pkt.workload == MEDIUM) {
615 		for (count = 0; count < pol->pkt.num_vcpu; count++) {
616 			if (pol->core_share[count].status != 1)
617 				power_manager_scale_core_med(
618 						pol->core_share[count].pcpu);
619 		}
620 	} else if (pol->pkt.workload == LOW) {
621 		for (count = 0; count < pol->pkt.num_vcpu; count++) {
622 			if (pol->core_share[count].status != 1)
623 				power_manager_scale_core_min(
624 						pol->core_share[count].pcpu);
625 		}
626 	}
627 }
628 
629 static void
630 apply_policy(struct policy *pol)
631 {
632 
633 	struct channel_packet *pkt = &pol->pkt;
634 
635 	/*Check policy to use*/
636 	if (pkt->policy_to_use == TRAFFIC)
637 		apply_traffic_profile(pol);
638 	else if (pkt->policy_to_use == TIME)
639 		apply_time_profile(pol);
640 	else if (pkt->policy_to_use == WORKLOAD)
641 		apply_workload_profile(pol);
642 }
643 
644 static int
645 process_request(struct channel_packet *pkt, struct channel_info *chan_info)
646 {
647 	uint64_t core_mask;
648 
649 	if (chan_info == NULL)
650 		return -1;
651 
652 	RTE_LOG(INFO, CHANNEL_MONITOR, "Processing Request %s\n", pkt->vm_name);
653 
654 	if (rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_CONNECTED,
655 			CHANNEL_MGR_CHANNEL_PROCESSING) == 0)
656 		return -1;
657 
658 	if (pkt->command == CPU_POWER) {
659 		core_mask = get_pcpus_mask(chan_info, pkt->resource_id);
660 		if (core_mask == 0) {
661 			/*
662 			 * Core mask will be 0 in the case where
663 			 * hypervisor is not available so we're working in
664 			 * the host, so use the core as the mask.
665 			 */
666 			core_mask = 1ULL << pkt->resource_id;
667 		}
668 		if (__builtin_popcountll(core_mask) == 1) {
669 
670 			unsigned core_num = __builtin_ffsll(core_mask) - 1;
671 
672 			switch (pkt->unit) {
673 			case(CPU_POWER_SCALE_MIN):
674 					power_manager_scale_core_min(core_num);
675 			break;
676 			case(CPU_POWER_SCALE_MAX):
677 					power_manager_scale_core_max(core_num);
678 			break;
679 			case(CPU_POWER_SCALE_DOWN):
680 					power_manager_scale_core_down(core_num);
681 			break;
682 			case(CPU_POWER_SCALE_UP):
683 					power_manager_scale_core_up(core_num);
684 			break;
685 			case(CPU_POWER_ENABLE_TURBO):
686 				power_manager_enable_turbo_core(core_num);
687 			break;
688 			case(CPU_POWER_DISABLE_TURBO):
689 				power_manager_disable_turbo_core(core_num);
690 			break;
691 			default:
692 				break;
693 			}
694 		} else {
695 			switch (pkt->unit) {
696 			case(CPU_POWER_SCALE_MIN):
697 					power_manager_scale_mask_min(core_mask);
698 			break;
699 			case(CPU_POWER_SCALE_MAX):
700 					power_manager_scale_mask_max(core_mask);
701 			break;
702 			case(CPU_POWER_SCALE_DOWN):
703 					power_manager_scale_mask_down(core_mask);
704 			break;
705 			case(CPU_POWER_SCALE_UP):
706 					power_manager_scale_mask_up(core_mask);
707 			break;
708 			case(CPU_POWER_ENABLE_TURBO):
709 				power_manager_enable_turbo_mask(core_mask);
710 			break;
711 			case(CPU_POWER_DISABLE_TURBO):
712 				power_manager_disable_turbo_mask(core_mask);
713 			break;
714 			default:
715 				break;
716 			}
717 
718 		}
719 	}
720 
721 	if (pkt->command == PKT_POLICY) {
722 		RTE_LOG(INFO, CHANNEL_MONITOR,
723 				"\nProcessing Policy request\n");
724 		update_policy(pkt);
725 		policy_is_set = 1;
726 	}
727 
728 	if (pkt->command == PKT_POLICY_REMOVE) {
729 		RTE_LOG(INFO, CHANNEL_MONITOR,
730 				 "Removing policy %s\n", pkt->vm_name);
731 		remove_policy(pkt);
732 	}
733 
734 	/*
735 	 * Return is not checked as channel status may have been set to DISABLED
736 	 * from management thread
737 	 */
738 	rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING,
739 			CHANNEL_MGR_CHANNEL_CONNECTED);
740 	return 0;
741 
742 }
743 
744 int
745 add_channel_to_monitor(struct channel_info **chan_info)
746 {
747 	struct channel_info *info = *chan_info;
748 	struct epoll_event event;
749 
750 	event.events = EPOLLIN;
751 	event.data.ptr = info;
752 	if (epoll_ctl(global_event_fd, EPOLL_CTL_ADD, info->fd, &event) < 0) {
753 		RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to add channel '%s' "
754 				"to epoll\n", info->channel_path);
755 		return -1;
756 	}
757 	RTE_LOG(ERR, CHANNEL_MONITOR, "Added channel '%s' "
758 			"to monitor\n", info->channel_path);
759 	return 0;
760 }
761 
762 int
763 remove_channel_from_monitor(struct channel_info *chan_info)
764 {
765 	if (epoll_ctl(global_event_fd, EPOLL_CTL_DEL,
766 			chan_info->fd, NULL) < 0) {
767 		RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to remove channel '%s' "
768 				"from epoll\n", chan_info->channel_path);
769 		return -1;
770 	}
771 	return 0;
772 }
773 
774 int
775 channel_monitor_init(void)
776 {
777 	global_event_fd = epoll_create1(0);
778 	if (global_event_fd == 0) {
779 		RTE_LOG(ERR, CHANNEL_MONITOR,
780 				"Error creating epoll context with error %s\n",
781 				strerror(errno));
782 		return -1;
783 	}
784 	global_events_list = rte_malloc("epoll_events",
785 			sizeof(*global_events_list)
786 			* MAX_EVENTS, RTE_CACHE_LINE_SIZE);
787 	if (global_events_list == NULL) {
788 		RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for "
789 				"epoll events\n");
790 		return -1;
791 	}
792 	return 0;
793 }
794 
795 static void
796 read_binary_packet(struct channel_info *chan_info)
797 {
798 	struct channel_packet pkt;
799 	void *buffer = &pkt;
800 	int buffer_len = sizeof(pkt);
801 	int n_bytes, err = 0;
802 
803 	while (buffer_len > 0) {
804 		n_bytes = read(chan_info->fd,
805 				buffer, buffer_len);
806 		if (n_bytes == buffer_len)
807 			break;
808 		if (n_bytes == -1) {
809 			err = errno;
810 			RTE_LOG(DEBUG, CHANNEL_MONITOR,
811 				"Received error on "
812 				"channel '%s' read: %s\n",
813 				chan_info->channel_path,
814 				strerror(err));
815 			remove_channel(&chan_info);
816 			break;
817 		}
818 		buffer = (char *)buffer + n_bytes;
819 		buffer_len -= n_bytes;
820 	}
821 	if (!err)
822 		process_request(&pkt, chan_info);
823 }
824 
825 #ifdef USE_JANSSON
826 static void
827 read_json_packet(struct channel_info *chan_info)
828 {
829 	struct channel_packet pkt;
830 	int n_bytes, ret;
831 	json_t *root;
832 	json_error_t error;
833 
834 	/* read opening brace to closing brace */
835 	do {
836 		int idx = 0;
837 		int indent = 0;
838 		do {
839 			n_bytes = read(chan_info->fd, &json_data[idx], 1);
840 			if (n_bytes == 0)
841 				break;
842 			if (json_data[idx] == '{')
843 				indent++;
844 			if (json_data[idx] == '}')
845 				indent--;
846 			if ((indent > 0) || (idx > 0))
847 				idx++;
848 			if (indent == 0)
849 				json_data[idx] = 0;
850 			if (idx >= MAX_JSON_STRING_LEN-1)
851 				break;
852 		} while (indent > 0);
853 
854 		if (indent > 0)
855 			/*
856 			 * We've broken out of the read loop without getting
857 			 * a closing brace, so throw away the data
858 			 */
859 			json_data[idx] = 0;
860 
861 		if (strlen(json_data) == 0)
862 			continue;
863 
864 		printf("got [%s]\n", json_data);
865 
866 		root = json_loads(json_data, 0, &error);
867 
868 		if (root) {
869 			/*
870 			 * Because our data is now in the json
871 			 * object, we can overwrite the pkt
872 			 * with a channel_packet struct, using
873 			 * parse_json_to_pkt()
874 			 */
875 			ret = parse_json_to_pkt(root, &pkt);
876 			json_decref(root);
877 			if (ret) {
878 				RTE_LOG(ERR, CHANNEL_MONITOR,
879 					"Error validating JSON profile data\n");
880 				break;
881 			}
882 			process_request(&pkt, chan_info);
883 		} else {
884 			RTE_LOG(ERR, CHANNEL_MONITOR,
885 					"JSON error on line %d: %s\n",
886 					error.line, error.text);
887 		}
888 	} while (n_bytes > 0);
889 }
890 #endif
891 
892 void
893 run_channel_monitor(void)
894 {
895 	while (run_loop) {
896 		int n_events, i;
897 
898 		n_events = epoll_wait(global_event_fd, global_events_list,
899 				MAX_EVENTS, 1);
900 		if (!run_loop)
901 			break;
902 		for (i = 0; i < n_events; i++) {
903 			struct channel_info *chan_info = (struct channel_info *)
904 					global_events_list[i].data.ptr;
905 			if ((global_events_list[i].events & EPOLLERR) ||
906 				(global_events_list[i].events & EPOLLHUP)) {
907 				RTE_LOG(DEBUG, CHANNEL_MONITOR, "Remote closed connection for "
908 						"channel '%s'\n",
909 						chan_info->channel_path);
910 				remove_channel(&chan_info);
911 				continue;
912 			}
913 			if (global_events_list[i].events & EPOLLIN) {
914 
915 				switch (chan_info->type) {
916 				case CHANNEL_TYPE_BINARY:
917 					read_binary_packet(chan_info);
918 					break;
919 #ifdef USE_JANSSON
920 				case CHANNEL_TYPE_JSON:
921 					read_json_packet(chan_info);
922 					break;
923 #endif
924 				default:
925 					break;
926 				}
927 			}
928 		}
929 		rte_delay_us(time_period_ms*1000);
930 		if (policy_is_set) {
931 			int j;
932 
933 			for (j = 0; j < MAX_CLIENTS; j++) {
934 				if (policies[j].enabled == 1)
935 					apply_policy(&policies[j]);
936 			}
937 		}
938 	}
939 }
940