xref: /dpdk/examples/vm_power_manager/channel_manager.c (revision 5ecb687a5698d2d8ec1f3b3b5a7a16bceca3e29c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <sys/un.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <inttypes.h>
11 #include <dirent.h>
12 #include <errno.h>
13 
14 #include <sys/queue.h>
15 #include <sys/types.h>
16 #include <sys/stat.h>
17 #include <sys/socket.h>
18 #include <sys/select.h>
19 
20 #include <rte_string_fns.h>
21 #include <rte_malloc.h>
22 #include <rte_memory.h>
23 #include <rte_mempool.h>
24 #include <rte_log.h>
25 #include <rte_atomic.h>
26 #include <rte_spinlock.h>
27 
28 #include <libvirt/libvirt.h>
29 
30 #include "channel_manager.h"
31 #include "channel_commands.h"
32 #include "channel_monitor.h"
33 #include "power_manager.h"
34 
35 
36 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1
37 
38 /* Global pointer to libvirt connection */
39 static virConnectPtr global_vir_conn_ptr;
40 
41 static unsigned char *global_cpumaps;
42 static virVcpuInfo *global_vircpuinfo;
43 static size_t global_maplen;
44 
45 static unsigned int global_n_host_cpus;
46 static bool global_hypervisor_available;
47 
48 /*
49  * Represents a single Virtual Machine
50  */
51 struct virtual_machine_info {
52 	char name[CHANNEL_MGR_MAX_NAME_LEN];
53 	uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS];
54 	struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
55 	char channel_mask[POWER_MGR_MAX_CPUS];
56 	uint8_t num_channels;
57 	enum vm_status status;
58 	virDomainPtr domainPtr;
59 	virDomainInfo info;
60 	rte_spinlock_t config_spinlock;
61 	LIST_ENTRY(virtual_machine_info) vms_info;
62 };
63 
64 LIST_HEAD(, virtual_machine_info) vm_list_head;
65 
66 static struct virtual_machine_info *
67 find_domain_by_name(const char *name)
68 {
69 	struct virtual_machine_info *info;
70 	LIST_FOREACH(info, &vm_list_head, vms_info) {
71 		if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1))
72 			return info;
73 	}
74 	return NULL;
75 }
76 
77 static int
78 update_pcpus_mask(struct virtual_machine_info *vm_info)
79 {
80 	virVcpuInfoPtr cpuinfo;
81 	unsigned i, j;
82 	int n_vcpus;
83 
84 	memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
85 
86 	if (!virDomainIsActive(vm_info->domainPtr)) {
87 		n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr,
88 				vm_info->info.nrVirtCpu, global_cpumaps, global_maplen,
89 				VIR_DOMAIN_AFFECT_CONFIG);
90 		if (n_vcpus < 0) {
91 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
92 					"in-active VM '%s'\n", vm_info->name);
93 			return -1;
94 		}
95 		goto update_pcpus;
96 	}
97 
98 	memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)*
99 			CHANNEL_CMDS_MAX_CPUS);
100 
101 	cpuinfo = global_vircpuinfo;
102 
103 	n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo,
104 			CHANNEL_CMDS_MAX_CPUS, global_cpumaps, global_maplen);
105 	if (n_vcpus < 0) {
106 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
107 				"active VM '%s'\n", vm_info->name);
108 		return -1;
109 	}
110 update_pcpus:
111 	if (n_vcpus >= CHANNEL_CMDS_MAX_CPUS) {
112 		RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range "
113 				"0...%d\n", n_vcpus, CHANNEL_CMDS_MAX_CPUS-1);
114 		return -1;
115 	}
116 	if (n_vcpus != vm_info->info.nrVirtCpu) {
117 		RTE_LOG(INFO, CHANNEL_MANAGER, "Updating the number of vCPUs for VM '%s"
118 				" from %d -> %d\n", vm_info->name, vm_info->info.nrVirtCpu,
119 				n_vcpus);
120 		vm_info->info.nrVirtCpu = n_vcpus;
121 	}
122 	rte_spinlock_lock(&(vm_info->config_spinlock));
123 	for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
124 		for (j = 0; j < global_n_host_cpus; j++) {
125 			if (VIR_CPU_USABLE(global_cpumaps,
126 					global_maplen, i, j) <= 0)
127 				continue;
128 			vm_info->pcpu_map[i] = j;
129 		}
130 	}
131 	rte_spinlock_unlock(&(vm_info->config_spinlock));
132 	return 0;
133 }
134 
135 int
136 set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
137 {
138 	int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
139 	struct virtual_machine_info *vm_info;
140 
141 	if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
142 		RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
143 				vcpu, CHANNEL_CMDS_MAX_CPUS-1);
144 		return -1;
145 	}
146 
147 	vm_info = find_domain_by_name(vm_name);
148 	if (vm_info == NULL) {
149 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
150 		return -1;
151 	}
152 
153 	if (!virDomainIsActive(vm_info->domainPtr)) {
154 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
155 				" for VM '%s', VM is not active\n",
156 				vcpu, vm_info->name);
157 		return -1;
158 	}
159 
160 	if (vcpu >= vm_info->info.nrVirtCpu) {
161 		RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds the assigned number of "
162 				"vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu);
163 		return -1;
164 	}
165 	memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
166 
167 	VIR_USE_CPU(global_cpumaps, pcpu);
168 
169 	if (pcpu >= global_n_host_cpus) {
170 		RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
171 				"number of CPUs(%u)\n",
172 				pcpu, global_n_host_cpus);
173 		return -1;
174 	}
175 
176 	if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
177 			global_maplen, flags) < 0) {
178 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
179 				" for VM '%s'\n", vcpu,
180 				vm_info->name);
181 		return -1;
182 	}
183 	rte_spinlock_lock(&(vm_info->config_spinlock));
184 	vm_info->pcpu_map[vcpu] = pcpu;
185 	rte_spinlock_unlock(&(vm_info->config_spinlock));
186 	return 0;
187 }
188 
189 uint16_t
190 get_pcpu(struct channel_info *chan_info, unsigned int vcpu)
191 {
192 	struct virtual_machine_info *vm_info =
193 			(struct virtual_machine_info *)chan_info->priv_info;
194 
195 	if (global_hypervisor_available && (vm_info != NULL)) {
196 		uint16_t pcpu;
197 		rte_spinlock_lock(&(vm_info->config_spinlock));
198 		pcpu = vm_info->pcpu_map[vcpu];
199 		rte_spinlock_unlock(&(vm_info->config_spinlock));
200 		return pcpu;
201 	} else
202 		return 0;
203 }
204 
205 static inline int
206 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
207 {
208 	rte_spinlock_lock(&(vm_info->config_spinlock));
209 	if (vm_info->channel_mask[channel_num] == 1) {
210 		rte_spinlock_unlock(&(vm_info->config_spinlock));
211 		return 1;
212 	}
213 	rte_spinlock_unlock(&(vm_info->config_spinlock));
214 	return 0;
215 }
216 
217 
218 
219 static int
220 open_non_blocking_channel(struct channel_info *info)
221 {
222 	int ret, flags;
223 	struct sockaddr_un sock_addr;
224 	fd_set soc_fd_set;
225 	struct timeval tv;
226 
227 	info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
228 	if (info->fd < 0) {
229 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n",
230 				strerror(errno),
231 				info->channel_path);
232 		return -1;
233 	}
234 	sock_addr.sun_family = AF_UNIX;
235 	memcpy(&sock_addr.sun_path, info->channel_path,
236 			strlen(info->channel_path)+1);
237 
238 	/* Get current flags */
239 	flags = fcntl(info->fd, F_GETFL, 0);
240 	if (flags < 0) {
241 		RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
242 				"'%s'\n", strerror(errno), info->channel_path);
243 		return 1;
244 	}
245 	/* Set to Non Blocking */
246 	flags |= O_NONBLOCK;
247 	if (fcntl(info->fd, F_SETFL, flags) < 0) {
248 		RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) setting non-blocking "
249 				"socket for '%s'\n", strerror(errno), info->channel_path);
250 		return -1;
251 	}
252 	ret = connect(info->fd, (struct sockaddr *)&sock_addr,
253 			sizeof(sock_addr));
254 	if (ret < 0) {
255 		/* ECONNREFUSED error is given when VM is not active */
256 		if (errno == ECONNREFUSED) {
257 			RTE_LOG(WARNING, CHANNEL_MANAGER, "VM is not active or has not "
258 					"activated its endpoint to channel %s\n",
259 					info->channel_path);
260 			return -1;
261 		}
262 		/* Wait for tv_sec if in progress */
263 		else if (errno == EINPROGRESS) {
264 			tv.tv_sec = 2;
265 			tv.tv_usec = 0;
266 			FD_ZERO(&soc_fd_set);
267 			FD_SET(info->fd, &soc_fd_set);
268 			if (select(info->fd+1, NULL, &soc_fd_set, NULL, &tv) > 0) {
269 				RTE_LOG(WARNING, CHANNEL_MANAGER, "Timeout or error on channel "
270 						"'%s'\n", info->channel_path);
271 				return -1;
272 			}
273 		} else {
274 			/* Any other error */
275 			RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) connecting socket"
276 					" for '%s'\n", strerror(errno), info->channel_path);
277 			return -1;
278 		}
279 	}
280 	return 0;
281 }
282 
283 static int
284 open_host_channel(struct channel_info *info)
285 {
286 	int flags;
287 
288 	info->fd = open(info->channel_path, O_RDWR | O_RSYNC);
289 	if (info->fd < 0) {
290 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n",
291 				strerror(errno),
292 				info->channel_path);
293 		return -1;
294 	}
295 
296 	/* Get current flags */
297 	flags = fcntl(info->fd, F_GETFL, 0);
298 	if (flags < 0) {
299 		RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
300 				"'%s'\n", strerror(errno), info->channel_path);
301 		return 1;
302 	}
303 	/* Set to Non Blocking */
304 	flags |= O_NONBLOCK;
305 	if (fcntl(info->fd, F_SETFL, flags) < 0) {
306 		RTE_LOG(WARNING, CHANNEL_MANAGER,
307 				"Error(%s) setting non-blocking "
308 				"socket for '%s'\n",
309 				strerror(errno), info->channel_path);
310 		return -1;
311 	}
312 	return 0;
313 }
314 
315 static int
316 setup_channel_info(struct virtual_machine_info **vm_info_dptr,
317 		struct channel_info **chan_info_dptr, unsigned channel_num)
318 {
319 	struct channel_info *chan_info = *chan_info_dptr;
320 	struct virtual_machine_info *vm_info = *vm_info_dptr;
321 
322 	chan_info->channel_num = channel_num;
323 	chan_info->priv_info = (void *)vm_info;
324 	chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
325 	chan_info->type = CHANNEL_TYPE_BINARY;
326 	if (open_non_blocking_channel(chan_info) < 0) {
327 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
328 				"'%s' for VM '%s'\n",
329 				chan_info->channel_path, vm_info->name);
330 		return -1;
331 	}
332 	if (add_channel_to_monitor(&chan_info) < 0) {
333 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
334 				"'%s' to epoll ctl for VM '%s'\n",
335 				chan_info->channel_path, vm_info->name);
336 		return -1;
337 
338 	}
339 	rte_spinlock_lock(&(vm_info->config_spinlock));
340 	vm_info->num_channels++;
341 	vm_info->channel_mask[channel_num] = 1;
342 	vm_info->channels[channel_num] = chan_info;
343 	chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
344 	rte_spinlock_unlock(&(vm_info->config_spinlock));
345 	return 0;
346 }
347 
348 static void
349 fifo_path(char *dst, unsigned int len)
350 {
351 	snprintf(dst, len, "%sfifo", CHANNEL_MGR_SOCKET_PATH);
352 }
353 
354 static int
355 setup_host_channel_info(struct channel_info **chan_info_dptr,
356 		unsigned int channel_num)
357 {
358 	struct channel_info *chan_info = *chan_info_dptr;
359 
360 	chan_info->channel_num = channel_num;
361 	chan_info->priv_info = (void *)NULL;
362 	chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
363 	chan_info->type = CHANNEL_TYPE_JSON;
364 
365 	fifo_path(chan_info->channel_path, sizeof(chan_info->channel_path));
366 
367 	if (open_host_channel(chan_info) < 0) {
368 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: "
369 				"'%s'\n",
370 				chan_info->channel_path);
371 		return -1;
372 	}
373 	if (add_channel_to_monitor(&chan_info) < 0) {
374 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
375 				"'%s' to epoll ctl\n",
376 				chan_info->channel_path);
377 		return -1;
378 
379 	}
380 	chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
381 	return 0;
382 }
383 
384 int
385 add_all_channels(const char *vm_name)
386 {
387 	DIR *d;
388 	struct dirent *dir;
389 	struct virtual_machine_info *vm_info;
390 	struct channel_info *chan_info;
391 	char *token, *remaining, *tail_ptr;
392 	char socket_name[PATH_MAX];
393 	unsigned channel_num;
394 	int num_channels_enabled = 0;
395 
396 	/* verify VM exists */
397 	vm_info = find_domain_by_name(vm_name);
398 	if (vm_info == NULL) {
399 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' not found"
400 				" during channel discovery\n", vm_name);
401 		return 0;
402 	}
403 	if (!virDomainIsActive(vm_info->domainPtr)) {
404 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name);
405 		vm_info->status = CHANNEL_MGR_VM_INACTIVE;
406 		return 0;
407 	}
408 	d = opendir(CHANNEL_MGR_SOCKET_PATH);
409 	if (d == NULL) {
410 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error opening directory '%s': %s\n",
411 				CHANNEL_MGR_SOCKET_PATH, strerror(errno));
412 		return -1;
413 	}
414 	while ((dir = readdir(d)) != NULL) {
415 		if (!strncmp(dir->d_name, ".", 1) ||
416 				!strncmp(dir->d_name, "..", 2))
417 			continue;
418 
419 		strlcpy(socket_name, dir->d_name, sizeof(socket_name));
420 		remaining = socket_name;
421 		/* Extract vm_name from "<vm_name>.<channel_num>" */
422 		token = strsep(&remaining, ".");
423 		if (remaining == NULL)
424 			continue;
425 		if (strncmp(vm_name, token, CHANNEL_MGR_MAX_NAME_LEN))
426 			continue;
427 
428 		/* remaining should contain only <channel_num> */
429 		errno = 0;
430 		channel_num = (unsigned)strtol(remaining, &tail_ptr, 0);
431 		if ((errno != 0) || (remaining[0] == '\0') ||
432 				tail_ptr == NULL || (*tail_ptr != '\0')) {
433 			RTE_LOG(WARNING, CHANNEL_MANAGER, "Malformed channel name"
434 					"'%s' found it should be in the form of "
435 					"'<guest_name>.<channel_num>(decimal)'\n",
436 					dir->d_name);
437 			continue;
438 		}
439 		if (channel_num >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
440 			RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is "
441 					"greater than max allowable: %d, skipping '%s%s'\n",
442 					channel_num, CHANNEL_CMDS_MAX_VM_CHANNELS-1,
443 					CHANNEL_MGR_SOCKET_PATH, dir->d_name);
444 			continue;
445 		}
446 		/* if channel has not been added previously */
447 		if (channel_exists(vm_info, channel_num))
448 			continue;
449 
450 		chan_info = rte_malloc(NULL, sizeof(*chan_info),
451 				RTE_CACHE_LINE_SIZE);
452 		if (chan_info == NULL) {
453 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
454 				"channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name);
455 			continue;
456 		}
457 
458 		snprintf(chan_info->channel_path,
459 				sizeof(chan_info->channel_path), "%s%s",
460 				CHANNEL_MGR_SOCKET_PATH, dir->d_name);
461 
462 		if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) {
463 			rte_free(chan_info);
464 			continue;
465 		}
466 
467 		num_channels_enabled++;
468 	}
469 	closedir(d);
470 	return num_channels_enabled;
471 }
472 
473 int
474 add_channels(const char *vm_name, unsigned *channel_list,
475 		unsigned len_channel_list)
476 {
477 	struct virtual_machine_info *vm_info;
478 	struct channel_info *chan_info;
479 	char socket_path[PATH_MAX];
480 	unsigned i;
481 	int num_channels_enabled = 0;
482 
483 	vm_info = find_domain_by_name(vm_name);
484 	if (vm_info == NULL) {
485 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' "
486 				"not found\n", vm_name);
487 		return 0;
488 	}
489 
490 	if (!virDomainIsActive(vm_info->domainPtr)) {
491 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name);
492 		vm_info->status = CHANNEL_MGR_VM_INACTIVE;
493 		return 0;
494 	}
495 
496 	for (i = 0; i < len_channel_list; i++) {
497 
498 		if (channel_list[i] >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
499 			RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range "
500 							"0...%d\n", channel_list[i],
501 							CHANNEL_CMDS_MAX_VM_CHANNELS-1);
502 			continue;
503 		}
504 		if (channel_exists(vm_info, channel_list[i])) {
505 			RTE_LOG(INFO, CHANNEL_MANAGER, "Channel already exists, skipping  "
506 					"'%s.%u'\n", vm_name, i);
507 			continue;
508 		}
509 
510 		snprintf(socket_path, sizeof(socket_path), "%s%s.%u",
511 				CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]);
512 		errno = 0;
513 		if (access(socket_path, F_OK) < 0) {
514 			RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
515 					"%s\n", socket_path, strerror(errno));
516 			continue;
517 		}
518 		chan_info = rte_malloc(NULL, sizeof(*chan_info),
519 				RTE_CACHE_LINE_SIZE);
520 		if (chan_info == NULL) {
521 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
522 					"channel '%s'\n", socket_path);
523 			continue;
524 		}
525 		snprintf(chan_info->channel_path,
526 				sizeof(chan_info->channel_path), "%s%s.%u",
527 				CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]);
528 		if (setup_channel_info(&vm_info, &chan_info, channel_list[i]) < 0) {
529 			rte_free(chan_info);
530 			continue;
531 		}
532 		num_channels_enabled++;
533 
534 	}
535 	return num_channels_enabled;
536 }
537 
538 int
539 add_host_channel(void)
540 {
541 	struct channel_info *chan_info;
542 	char socket_path[PATH_MAX];
543 	int num_channels_enabled = 0;
544 	int ret;
545 
546 	fifo_path(socket_path, sizeof(socket_path));
547 
548 	ret = mkfifo(socket_path, 0660);
549 	if ((errno != EEXIST) && (ret < 0)) {
550 		RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: "
551 				"%s\n", socket_path, strerror(errno));
552 		return 0;
553 	}
554 
555 	if (access(socket_path, F_OK) < 0) {
556 		RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
557 				"%s\n", socket_path, strerror(errno));
558 		return 0;
559 	}
560 	chan_info = rte_malloc(NULL, sizeof(*chan_info), 0);
561 	if (chan_info == NULL) {
562 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
563 				"channel '%s'\n", socket_path);
564 		return 0;
565 	}
566 	strlcpy(chan_info->channel_path, socket_path,
567 		sizeof(chan_info->channel_path));
568 	if (setup_host_channel_info(&chan_info, 0) < 0) {
569 		rte_free(chan_info);
570 		return 0;
571 	}
572 	num_channels_enabled++;
573 
574 	return num_channels_enabled;
575 }
576 
577 int
578 remove_channel(struct channel_info **chan_info_dptr)
579 {
580 	struct virtual_machine_info *vm_info;
581 	struct channel_info *chan_info = *chan_info_dptr;
582 
583 	close(chan_info->fd);
584 
585 	vm_info = (struct virtual_machine_info *)chan_info->priv_info;
586 
587 	rte_spinlock_lock(&(vm_info->config_spinlock));
588 	vm_info->channel_mask[chan_info->channel_num] = 0;
589 	vm_info->num_channels--;
590 	rte_spinlock_unlock(&(vm_info->config_spinlock));
591 
592 	rte_free(chan_info);
593 	return 0;
594 }
595 
596 int
597 set_channel_status_all(const char *vm_name, enum channel_status status)
598 {
599 	struct virtual_machine_info *vm_info;
600 	unsigned i;
601 	char mask[POWER_MGR_MAX_CPUS];
602 	int num_channels_changed = 0;
603 
604 	if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
605 			status == CHANNEL_MGR_CHANNEL_DISABLED)) {
606 		RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or "
607 				"disabled: Unable to change status for VM '%s'\n", vm_name);
608 	}
609 	vm_info = find_domain_by_name(vm_name);
610 	if (vm_info == NULL) {
611 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to disable channels: VM '%s' "
612 				"not found\n", vm_name);
613 		return 0;
614 	}
615 
616 	rte_spinlock_lock(&(vm_info->config_spinlock));
617 	memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
618 	for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
619 		if (mask[i] != 1)
620 			continue;
621 		vm_info->channels[i]->status = status;
622 		num_channels_changed++;
623 	}
624 	rte_spinlock_unlock(&(vm_info->config_spinlock));
625 	return num_channels_changed;
626 
627 }
628 
629 int
630 set_channel_status(const char *vm_name, unsigned *channel_list,
631 		unsigned len_channel_list, enum channel_status status)
632 {
633 	struct virtual_machine_info *vm_info;
634 	unsigned i;
635 	int num_channels_changed = 0;
636 
637 	if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
638 			status == CHANNEL_MGR_CHANNEL_DISABLED)) {
639 		RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or "
640 				"disabled: Unable to change status for VM '%s'\n", vm_name);
641 	}
642 	vm_info = find_domain_by_name(vm_name);
643 	if (vm_info == NULL) {
644 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' "
645 				"not found\n", vm_name);
646 		return 0;
647 	}
648 	for (i = 0; i < len_channel_list; i++) {
649 		if (channel_exists(vm_info, channel_list[i])) {
650 			rte_spinlock_lock(&(vm_info->config_spinlock));
651 			vm_info->channels[channel_list[i]]->status = status;
652 			rte_spinlock_unlock(&(vm_info->config_spinlock));
653 			num_channels_changed++;
654 		}
655 	}
656 	return num_channels_changed;
657 }
658 
659 void
660 get_all_vm(int *num_vm, int *num_vcpu)
661 {
662 
663 	virNodeInfo node_info;
664 	virDomainPtr *domptr;
665 	int i, ii, numVcpus[MAX_VCPUS], n_vcpus;
666 	unsigned int jj;
667 	const char *vm_name;
668 	unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
669 				VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
670 	unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
671 
672 	if (!global_hypervisor_available)
673 		return;
674 
675 	memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
676 	if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
677 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
678 		return;
679 	}
680 
681 	/* Returns number of pcpus */
682 	global_n_host_cpus = (unsigned int)node_info.cpus;
683 
684 	/* Returns number of active domains */
685 	*num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr,
686 					domain_flags);
687 	if (*num_vm <= 0) {
688 		RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n");
689 		return;
690 	}
691 
692 	for (i = 0; i < *num_vm; i++) {
693 
694 		/* Get Domain Names */
695 		vm_name = virDomainGetName(domptr[i]);
696 		lvm_info[i].vm_name = vm_name;
697 
698 		/* Get Number of Vcpus */
699 		numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag);
700 
701 		/* Get Number of VCpus & VcpuPinInfo */
702 		n_vcpus = virDomainGetVcpuPinInfo(domptr[i],
703 				numVcpus[i], global_cpumaps,
704 				global_maplen, domain_flag);
705 
706 		if ((int)n_vcpus > 0) {
707 			*num_vcpu = n_vcpus;
708 			lvm_info[i].num_cpus = n_vcpus;
709 		}
710 
711 		/* Save pcpu in use by libvirt VMs */
712 		for (ii = 0; ii < n_vcpus; ii++) {
713 			for (jj = 0; jj < global_n_host_cpus; jj++) {
714 				if (VIR_CPU_USABLE(global_cpumaps,
715 						global_maplen, ii, jj) > 0) {
716 					lvm_info[i].pcpus[ii] = jj;
717 				}
718 			}
719 		}
720 	}
721 }
722 
723 int
724 get_info_vm(const char *vm_name, struct vm_info *info)
725 {
726 	struct virtual_machine_info *vm_info;
727 	unsigned i, channel_num = 0;
728 	char mask[POWER_MGR_MAX_CPUS];
729 
730 	vm_info = find_domain_by_name(vm_name);
731 	if (vm_info == NULL) {
732 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
733 		return -1;
734 	}
735 	info->status = CHANNEL_MGR_VM_ACTIVE;
736 	if (!virDomainIsActive(vm_info->domainPtr))
737 		info->status = CHANNEL_MGR_VM_INACTIVE;
738 
739 	rte_spinlock_lock(&(vm_info->config_spinlock));
740 
741 	memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
742 	for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
743 		if (mask[i] != 1)
744 			continue;
745 		info->channels[channel_num].channel_num = i;
746 		memcpy(info->channels[channel_num].channel_path,
747 				vm_info->channels[i]->channel_path,
748 				UNIX_PATH_MAX);
749 		info->channels[channel_num].status =
750 				vm_info->channels[i]->status;
751 		info->channels[channel_num].fd =
752 				vm_info->channels[i]->fd;
753 		channel_num++;
754 	}
755 
756 	info->num_channels = channel_num;
757 	info->num_vcpus = vm_info->info.nrVirtCpu;
758 	rte_spinlock_unlock(&(vm_info->config_spinlock));
759 
760 	memcpy(info->name, vm_info->name, sizeof(vm_info->name));
761 	rte_spinlock_lock(&(vm_info->config_spinlock));
762 	for (i = 0; i < info->num_vcpus; i++) {
763 		info->pcpu_map[i] = vm_info->pcpu_map[i];
764 	}
765 	rte_spinlock_unlock(&(vm_info->config_spinlock));
766 	return 0;
767 }
768 
769 int
770 add_vm(const char *vm_name)
771 {
772 	struct virtual_machine_info *new_domain;
773 	virDomainPtr dom_ptr;
774 	int i;
775 
776 	if (find_domain_by_name(vm_name) != NULL) {
777 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add VM: VM '%s' "
778 				"already exists\n", vm_name);
779 		return -1;
780 	}
781 
782 	if (global_vir_conn_ptr == NULL) {
783 		RTE_LOG(ERR, CHANNEL_MANAGER, "No connection to hypervisor exists\n");
784 		return -1;
785 	}
786 	dom_ptr = virDomainLookupByName(global_vir_conn_ptr, vm_name);
787 	if (dom_ptr == NULL) {
788 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error on VM lookup with libvirt: "
789 				"VM '%s' not found\n", vm_name);
790 		return -1;
791 	}
792 
793 	new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain),
794 			RTE_CACHE_LINE_SIZE);
795 	if (new_domain == NULL) {
796 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM "
797 				"info\n");
798 		return -1;
799 	}
800 	new_domain->domainPtr = dom_ptr;
801 	if (virDomainGetInfo(new_domain->domainPtr, &new_domain->info) != 0) {
802 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get libvirt VM info\n");
803 		rte_free(new_domain);
804 		return -1;
805 	}
806 	if (new_domain->info.nrVirtCpu > CHANNEL_CMDS_MAX_CPUS) {
807 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is "
808 				"greater than allowable(%d)\n", new_domain->info.nrVirtCpu,
809 				CHANNEL_CMDS_MAX_CPUS);
810 		rte_free(new_domain);
811 		return -1;
812 	}
813 
814 	for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
815 		new_domain->pcpu_map[i] = 0;
816 	}
817 	if (update_pcpus_mask(new_domain) < 0) {
818 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
819 		rte_free(new_domain);
820 		return -1;
821 	}
822 	strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
823 	new_domain->name[sizeof(new_domain->name) - 1] = '\0';
824 	memset(new_domain->channel_mask, 0, POWER_MGR_MAX_CPUS);
825 	new_domain->num_channels = 0;
826 
827 	if (!virDomainIsActive(dom_ptr))
828 		new_domain->status = CHANNEL_MGR_VM_INACTIVE;
829 	else
830 		new_domain->status = CHANNEL_MGR_VM_ACTIVE;
831 
832 	rte_spinlock_init(&(new_domain->config_spinlock));
833 	LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
834 	return 0;
835 }
836 
837 int
838 remove_vm(const char *vm_name)
839 {
840 	struct virtual_machine_info *vm_info = find_domain_by_name(vm_name);
841 
842 	if (vm_info == NULL) {
843 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM: VM '%s' "
844 				"not found\n", vm_name);
845 		return -1;
846 	}
847 	rte_spinlock_lock(&vm_info->config_spinlock);
848 	if (vm_info->num_channels != 0) {
849 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM '%s', there are "
850 				"%"PRId8" channels still active\n",
851 				vm_name, vm_info->num_channels);
852 		rte_spinlock_unlock(&vm_info->config_spinlock);
853 		return -1;
854 	}
855 	LIST_REMOVE(vm_info, vms_info);
856 	rte_spinlock_unlock(&vm_info->config_spinlock);
857 	rte_free(vm_info);
858 	return 0;
859 }
860 
861 static void
862 disconnect_hypervisor(void)
863 {
864 	if (global_vir_conn_ptr != NULL) {
865 		virConnectClose(global_vir_conn_ptr);
866 		global_vir_conn_ptr = NULL;
867 	}
868 }
869 
870 static int
871 connect_hypervisor(const char *path)
872 {
873 	if (global_vir_conn_ptr != NULL) {
874 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error connecting to %s, connection "
875 				"already established\n", path);
876 		return -1;
877 	}
878 	global_vir_conn_ptr = virConnectOpen(path);
879 	if (global_vir_conn_ptr == NULL) {
880 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error failed to open connection to "
881 				"Hypervisor '%s'\n", path);
882 		return -1;
883 	}
884 	return 0;
885 }
886 int
887 channel_manager_init(const char *path __rte_unused)
888 {
889 	virNodeInfo info;
890 
891 	LIST_INIT(&vm_list_head);
892 	if (connect_hypervisor(path) < 0) {
893 		global_n_host_cpus = 64;
894 		global_hypervisor_available = 0;
895 		RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
896 	} else {
897 		global_hypervisor_available = 1;
898 
899 		global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
900 
901 		global_vircpuinfo = rte_zmalloc(NULL,
902 				sizeof(*global_vircpuinfo) *
903 				CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
904 		if (global_vircpuinfo == NULL) {
905 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
906 			goto error;
907 		}
908 		global_cpumaps = rte_zmalloc(NULL,
909 				CHANNEL_CMDS_MAX_CPUS * global_maplen,
910 				RTE_CACHE_LINE_SIZE);
911 		if (global_cpumaps == NULL)
912 			goto error;
913 
914 		if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
915 			RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
916 			goto error;
917 		}
918 		global_n_host_cpus = (unsigned int)info.cpus;
919 	}
920 
921 
922 
923 	if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
924 		RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
925 				"maximum of %u. No cores over %u should be used.\n",
926 				global_n_host_cpus, CHANNEL_CMDS_MAX_CPUS,
927 				CHANNEL_CMDS_MAX_CPUS - 1);
928 		global_n_host_cpus = CHANNEL_CMDS_MAX_CPUS;
929 	}
930 
931 	return 0;
932 error:
933 	if (global_hypervisor_available)
934 		disconnect_hypervisor();
935 	return -1;
936 }
937 
938 void
939 channel_manager_exit(void)
940 {
941 	unsigned i;
942 	char mask[POWER_MGR_MAX_CPUS];
943 	struct virtual_machine_info *vm_info;
944 
945 	LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
946 
947 		rte_spinlock_lock(&(vm_info->config_spinlock));
948 
949 		memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
950 		for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
951 			if (mask[i] != 1)
952 				continue;
953 			remove_channel_from_monitor(
954 					vm_info->channels[i]);
955 			close(vm_info->channels[i]->fd);
956 			rte_free(vm_info->channels[i]);
957 		}
958 		rte_spinlock_unlock(&(vm_info->config_spinlock));
959 
960 		LIST_REMOVE(vm_info, vms_info);
961 		rte_free(vm_info);
962 	}
963 
964 	if (global_hypervisor_available) {
965 		/* Only needed if hypervisor available */
966 		rte_free(global_cpumaps);
967 		rte_free(global_vircpuinfo);
968 		disconnect_hypervisor();
969 	}
970 }
971