xref: /dpdk/examples/vm_power_manager/channel_manager.c (revision 9c20d0fdc536df2a320cb1ae6cce49c2c7a02ebb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <fcntl.h>
8 #include <unistd.h>
9 #include <inttypes.h>
10 #include <dirent.h>
11 #include <errno.h>
12 
13 #include <sys/queue.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <sys/socket.h>
17 #include <sys/select.h>
18 
19 #include <rte_string_fns.h>
20 #include <rte_malloc.h>
21 #include <rte_memory.h>
22 #include <rte_mempool.h>
23 #include <rte_log.h>
24 #include <rte_spinlock.h>
25 #include <rte_tailq.h>
26 
27 #include <libvirt/libvirt.h>
28 
29 #include "channel_manager.h"
30 #include "channel_monitor.h"
31 #include "power_manager.h"
32 
33 
34 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1
35 
36 struct libvirt_vm_info lvm_info[MAX_CLIENTS];
37 
38 /* Global pointer to libvirt connection */
39 static virConnectPtr global_vir_conn_ptr;
40 
41 static unsigned char *global_cpumaps;
42 static virVcpuInfo *global_vircpuinfo;
43 static size_t global_maplen;
44 
45 static unsigned int global_n_host_cpus;
46 static bool global_hypervisor_available;
47 
48 /*
49  * Represents a single Virtual Machine
50  */
51 struct virtual_machine_info {
52 	char name[CHANNEL_MGR_MAX_NAME_LEN];
53 	uint16_t pcpu_map[RTE_MAX_LCORE];
54 	struct channel_info *channels[RTE_MAX_LCORE];
55 	char channel_mask[RTE_MAX_LCORE];
56 	uint8_t num_channels;
57 	enum vm_status status;
58 	virDomainPtr domainPtr;
59 	virDomainInfo info;
60 	rte_spinlock_t config_spinlock;
61 	int allow_query;
62 	RTE_TAILQ_ENTRY(virtual_machine_info) vms_info;
63 };
64 
65 RTE_TAILQ_HEAD(, virtual_machine_info) vm_list_head;
66 
67 static struct virtual_machine_info *
find_domain_by_name(const char * name)68 find_domain_by_name(const char *name)
69 {
70 	struct virtual_machine_info *info;
71 	RTE_TAILQ_FOREACH(info, &vm_list_head, vms_info) {
72 		if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1))
73 			return info;
74 	}
75 	return NULL;
76 }
77 
78 static int
update_pcpus_mask(struct virtual_machine_info * vm_info)79 update_pcpus_mask(struct virtual_machine_info *vm_info)
80 {
81 	virVcpuInfoPtr cpuinfo;
82 	unsigned i, j;
83 	int n_vcpus;
84 
85 	memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen);
86 
87 	if (!virDomainIsActive(vm_info->domainPtr)) {
88 		n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr,
89 				vm_info->info.nrVirtCpu, global_cpumaps, global_maplen,
90 				VIR_DOMAIN_AFFECT_CONFIG);
91 		if (n_vcpus < 0) {
92 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
93 					"in-active VM '%s'\n", vm_info->name);
94 			return -1;
95 		}
96 		goto update_pcpus;
97 	}
98 
99 	memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)*
100 			RTE_MAX_LCORE);
101 
102 	cpuinfo = global_vircpuinfo;
103 
104 	n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo,
105 			RTE_MAX_LCORE, global_cpumaps, global_maplen);
106 	if (n_vcpus < 0) {
107 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
108 				"active VM '%s'\n", vm_info->name);
109 		return -1;
110 	}
111 update_pcpus:
112 	if (n_vcpus >= RTE_MAX_LCORE) {
113 		RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range "
114 				"0...%d\n", n_vcpus, RTE_MAX_LCORE-1);
115 		return -1;
116 	}
117 	if (n_vcpus != vm_info->info.nrVirtCpu) {
118 		RTE_LOG(INFO, CHANNEL_MANAGER, "Updating the number of vCPUs for VM '%s"
119 				" from %d -> %d\n", vm_info->name, vm_info->info.nrVirtCpu,
120 				n_vcpus);
121 		vm_info->info.nrVirtCpu = n_vcpus;
122 	}
123 	rte_spinlock_lock(&(vm_info->config_spinlock));
124 	for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
125 		for (j = 0; j < global_n_host_cpus; j++) {
126 			if (VIR_CPU_USABLE(global_cpumaps,
127 					global_maplen, i, j) <= 0)
128 				continue;
129 			vm_info->pcpu_map[i] = j;
130 		}
131 	}
132 	rte_spinlock_unlock(&(vm_info->config_spinlock));
133 	return 0;
134 }
135 
136 int
set_pcpu(char * vm_name,unsigned int vcpu,unsigned int pcpu)137 set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
138 {
139 	int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
140 	struct virtual_machine_info *vm_info;
141 
142 	if (vcpu >= RTE_MAX_LCORE) {
143 		RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
144 				vcpu, RTE_MAX_LCORE-1);
145 		return -1;
146 	}
147 
148 	vm_info = find_domain_by_name(vm_name);
149 	if (vm_info == NULL) {
150 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
151 		return -1;
152 	}
153 
154 	if (!virDomainIsActive(vm_info->domainPtr)) {
155 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
156 				" for VM '%s', VM is not active\n",
157 				vcpu, vm_info->name);
158 		return -1;
159 	}
160 
161 	if (vcpu >= vm_info->info.nrVirtCpu) {
162 		RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds the assigned number of "
163 				"vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu);
164 		return -1;
165 	}
166 	memset(global_cpumaps, 0, RTE_MAX_LCORE * global_maplen);
167 
168 	VIR_USE_CPU(global_cpumaps, pcpu);
169 
170 	if (pcpu >= global_n_host_cpus) {
171 		RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
172 				"number of CPUs(%u)\n",
173 				pcpu, global_n_host_cpus);
174 		return -1;
175 	}
176 
177 	if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
178 			global_maplen, flags) < 0) {
179 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
180 				" for VM '%s'\n", vcpu,
181 				vm_info->name);
182 		return -1;
183 	}
184 	rte_spinlock_lock(&(vm_info->config_spinlock));
185 	vm_info->pcpu_map[vcpu] = pcpu;
186 	rte_spinlock_unlock(&(vm_info->config_spinlock));
187 	return 0;
188 }
189 
190 uint16_t
get_pcpu(struct channel_info * chan_info,unsigned int vcpu)191 get_pcpu(struct channel_info *chan_info, unsigned int vcpu)
192 {
193 	struct virtual_machine_info *vm_info =
194 			(struct virtual_machine_info *)chan_info->priv_info;
195 
196 	if (global_hypervisor_available && (vm_info != NULL)) {
197 		uint16_t pcpu;
198 		rte_spinlock_lock(&(vm_info->config_spinlock));
199 		pcpu = vm_info->pcpu_map[vcpu];
200 		rte_spinlock_unlock(&(vm_info->config_spinlock));
201 		return pcpu;
202 	} else
203 		return 0;
204 }
205 
206 static inline int
channel_exists(struct virtual_machine_info * vm_info,unsigned channel_num)207 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
208 {
209 	rte_spinlock_lock(&(vm_info->config_spinlock));
210 	if (vm_info->channel_mask[channel_num] == 1) {
211 		rte_spinlock_unlock(&(vm_info->config_spinlock));
212 		return 1;
213 	}
214 	rte_spinlock_unlock(&(vm_info->config_spinlock));
215 	return 0;
216 }
217 
218 
219 
220 static int
open_non_blocking_channel(struct channel_info * info)221 open_non_blocking_channel(struct channel_info *info)
222 {
223 	int ret, flags;
224 	struct sockaddr_un sock_addr;
225 	fd_set soc_fd_set;
226 	struct timeval tv;
227 
228 	info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
229 	if (info->fd < 0) {
230 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n",
231 				strerror(errno),
232 				info->channel_path);
233 		return -1;
234 	}
235 	sock_addr.sun_family = AF_UNIX;
236 	memcpy(&sock_addr.sun_path, info->channel_path,
237 			strlen(info->channel_path)+1);
238 
239 	/* Get current flags */
240 	flags = fcntl(info->fd, F_GETFL, 0);
241 	if (flags < 0) {
242 		RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
243 				"'%s'\n", strerror(errno), info->channel_path);
244 		return 1;
245 	}
246 	/* Set to Non Blocking */
247 	flags |= O_NONBLOCK;
248 	if (fcntl(info->fd, F_SETFL, flags) < 0) {
249 		RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) setting non-blocking "
250 				"socket for '%s'\n", strerror(errno), info->channel_path);
251 		return -1;
252 	}
253 	ret = connect(info->fd, (struct sockaddr *)&sock_addr,
254 			sizeof(sock_addr));
255 	if (ret < 0) {
256 		/* ECONNREFUSED error is given when VM is not active */
257 		if (errno == ECONNREFUSED) {
258 			RTE_LOG(WARNING, CHANNEL_MANAGER, "VM is not active or has not "
259 					"activated its endpoint to channel %s\n",
260 					info->channel_path);
261 			return -1;
262 		}
263 		/* Wait for tv_sec if in progress */
264 		else if (errno == EINPROGRESS) {
265 			tv.tv_sec = 2;
266 			tv.tv_usec = 0;
267 			FD_ZERO(&soc_fd_set);
268 			FD_SET(info->fd, &soc_fd_set);
269 			if (select(info->fd+1, NULL, &soc_fd_set, NULL, &tv) > 0) {
270 				RTE_LOG(WARNING, CHANNEL_MANAGER, "Timeout or error on channel "
271 						"'%s'\n", info->channel_path);
272 				return -1;
273 			}
274 		} else {
275 			/* Any other error */
276 			RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) connecting socket"
277 					" for '%s'\n", strerror(errno), info->channel_path);
278 			return -1;
279 		}
280 	}
281 	return 0;
282 }
283 
284 static int
open_host_channel(struct channel_info * info)285 open_host_channel(struct channel_info *info)
286 {
287 	int flags;
288 
289 	info->fd = open(info->channel_path, O_RDWR | O_RSYNC);
290 	if (info->fd < 0) {
291 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n",
292 				strerror(errno),
293 				info->channel_path);
294 		return -1;
295 	}
296 
297 	/* Get current flags */
298 	flags = fcntl(info->fd, F_GETFL, 0);
299 	if (flags < 0) {
300 		RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
301 				"'%s'\n", strerror(errno), info->channel_path);
302 		return 1;
303 	}
304 	/* Set to Non Blocking */
305 	flags |= O_NONBLOCK;
306 	if (fcntl(info->fd, F_SETFL, flags) < 0) {
307 		RTE_LOG(WARNING, CHANNEL_MANAGER,
308 				"Error(%s) setting non-blocking "
309 				"socket for '%s'\n",
310 				strerror(errno), info->channel_path);
311 		return -1;
312 	}
313 	return 0;
314 }
315 
316 static int
setup_channel_info(struct virtual_machine_info ** vm_info_dptr,struct channel_info ** chan_info_dptr,unsigned channel_num)317 setup_channel_info(struct virtual_machine_info **vm_info_dptr,
318 		struct channel_info **chan_info_dptr, unsigned channel_num)
319 {
320 	struct channel_info *chan_info = *chan_info_dptr;
321 	struct virtual_machine_info *vm_info = *vm_info_dptr;
322 
323 	chan_info->channel_num = channel_num;
324 	chan_info->priv_info = (void *)vm_info;
325 	chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
326 	chan_info->type = CHANNEL_TYPE_BINARY;
327 	if (open_non_blocking_channel(chan_info) < 0) {
328 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
329 				"'%s' for VM '%s'\n",
330 				chan_info->channel_path, vm_info->name);
331 		return -1;
332 	}
333 	if (add_channel_to_monitor(&chan_info) < 0) {
334 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
335 				"'%s' to epoll ctl for VM '%s'\n",
336 				chan_info->channel_path, vm_info->name);
337 		return -1;
338 
339 	}
340 	rte_spinlock_lock(&(vm_info->config_spinlock));
341 	vm_info->num_channels++;
342 	vm_info->channel_mask[channel_num] = 1;
343 	vm_info->channels[channel_num] = chan_info;
344 	chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
345 	rte_spinlock_unlock(&(vm_info->config_spinlock));
346 	return 0;
347 }
348 
349 static int
fifo_path(char * dst,unsigned int len,unsigned int id)350 fifo_path(char *dst, unsigned int len, unsigned int id)
351 {
352 	int cnt;
353 
354 	cnt = snprintf(dst, len, "%s%s%d", CHANNEL_MGR_SOCKET_PATH,
355 			CHANNEL_MGR_FIFO_PATTERN_NAME, id);
356 
357 	if ((cnt < 0) || (cnt > (int)len - 1)) {
358 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could not create proper "
359 			"string for fifo path\n");
360 
361 		return -1;
362 	}
363 
364 	return 0;
365 }
366 
367 static int
setup_host_channel_info(struct channel_info ** chan_info_dptr,unsigned int channel_num)368 setup_host_channel_info(struct channel_info **chan_info_dptr,
369 		unsigned int channel_num)
370 {
371 	struct channel_info *chan_info = *chan_info_dptr;
372 
373 	chan_info->channel_num = channel_num;
374 	chan_info->priv_info = (void *)NULL;
375 	chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
376 	chan_info->type = CHANNEL_TYPE_JSON;
377 
378 	if (open_host_channel(chan_info) < 0) {
379 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: "
380 				"'%s'\n",
381 				chan_info->channel_path);
382 		return -1;
383 	}
384 	if (add_channel_to_monitor(&chan_info) < 0) {
385 		RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
386 				"'%s' to epoll ctl\n",
387 				chan_info->channel_path);
388 		return -1;
389 
390 	}
391 	chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
392 	return 0;
393 }
394 
395 int
add_all_channels(const char * vm_name)396 add_all_channels(const char *vm_name)
397 {
398 	DIR *d;
399 	struct dirent *dir;
400 	struct virtual_machine_info *vm_info;
401 	struct channel_info *chan_info;
402 	char *token, *remaining, *tail_ptr;
403 	char socket_name[PATH_MAX];
404 	unsigned channel_num;
405 	int num_channels_enabled = 0;
406 
407 	/* verify VM exists */
408 	vm_info = find_domain_by_name(vm_name);
409 	if (vm_info == NULL) {
410 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' not found"
411 				" during channel discovery\n", vm_name);
412 		return 0;
413 	}
414 	if (!virDomainIsActive(vm_info->domainPtr)) {
415 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name);
416 		vm_info->status = CHANNEL_MGR_VM_INACTIVE;
417 		return 0;
418 	}
419 	d = opendir(CHANNEL_MGR_SOCKET_PATH);
420 	if (d == NULL) {
421 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error opening directory '%s': %s\n",
422 				CHANNEL_MGR_SOCKET_PATH, strerror(errno));
423 		return -1;
424 	}
425 	while ((dir = readdir(d)) != NULL) {
426 		if (!strncmp(dir->d_name, ".", 1) ||
427 				!strncmp(dir->d_name, "..", 2))
428 			continue;
429 
430 		strlcpy(socket_name, dir->d_name, sizeof(socket_name));
431 		remaining = socket_name;
432 		/* Extract vm_name from "<vm_name>.<channel_num>" */
433 		token = strsep(&remaining, ".");
434 		if (remaining == NULL)
435 			continue;
436 		if (strncmp(vm_name, token, CHANNEL_MGR_MAX_NAME_LEN))
437 			continue;
438 
439 		/* remaining should contain only <channel_num> */
440 		errno = 0;
441 		channel_num = (unsigned)strtol(remaining, &tail_ptr, 0);
442 		if ((errno != 0) || (remaining[0] == '\0') ||
443 				tail_ptr == NULL || (*tail_ptr != '\0')) {
444 			RTE_LOG(WARNING, CHANNEL_MANAGER, "Malformed channel name"
445 					"'%s' found it should be in the form of "
446 					"'<guest_name>.<channel_num>(decimal)'\n",
447 					dir->d_name);
448 			continue;
449 		}
450 		if (channel_num >= RTE_MAX_LCORE) {
451 			RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is "
452 					"greater than max allowable: %d, skipping '%s%s'\n",
453 					channel_num, RTE_MAX_LCORE-1,
454 					CHANNEL_MGR_SOCKET_PATH, dir->d_name);
455 			continue;
456 		}
457 		/* if channel has not been added previously */
458 		if (channel_exists(vm_info, channel_num))
459 			continue;
460 
461 		chan_info = rte_malloc(NULL, sizeof(*chan_info),
462 				RTE_CACHE_LINE_SIZE);
463 		if (chan_info == NULL) {
464 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
465 				"channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name);
466 			continue;
467 		}
468 
469 		if ((size_t)snprintf(chan_info->channel_path,
470 				sizeof(chan_info->channel_path), "%s%s",
471 				CHANNEL_MGR_SOCKET_PATH, dir->d_name)
472 					>= sizeof(chan_info->channel_path)) {
473 			RTE_LOG(ERR, CHANNEL_MANAGER, "Pathname too long for channel '%s%s'\n",
474 					CHANNEL_MGR_SOCKET_PATH, dir->d_name);
475 			rte_free(chan_info);
476 			continue;
477 		}
478 
479 		if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) {
480 			rte_free(chan_info);
481 			continue;
482 		}
483 
484 		num_channels_enabled++;
485 	}
486 	closedir(d);
487 	return num_channels_enabled;
488 }
489 
490 int
add_channels(const char * vm_name,unsigned * channel_list,unsigned len_channel_list)491 add_channels(const char *vm_name, unsigned *channel_list,
492 		unsigned len_channel_list)
493 {
494 	struct virtual_machine_info *vm_info;
495 	struct channel_info *chan_info;
496 	char socket_path[PATH_MAX];
497 	unsigned i;
498 	int num_channels_enabled = 0;
499 
500 	vm_info = find_domain_by_name(vm_name);
501 	if (vm_info == NULL) {
502 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' "
503 				"not found\n", vm_name);
504 		return 0;
505 	}
506 
507 	if (!virDomainIsActive(vm_info->domainPtr)) {
508 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name);
509 		vm_info->status = CHANNEL_MGR_VM_INACTIVE;
510 		return 0;
511 	}
512 
513 	for (i = 0; i < len_channel_list; i++) {
514 		if (channel_list[i] >= RTE_MAX_LCORE) {
515 			RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range "
516 							"0...%d\n", channel_list[i],
517 							RTE_MAX_LCORE-1);
518 			continue;
519 		}
520 		if (channel_exists(vm_info, channel_list[i])) {
521 			RTE_LOG(INFO, CHANNEL_MANAGER, "Channel already exists, skipping  "
522 					"'%s.%u'\n", vm_name, i);
523 			continue;
524 		}
525 
526 		snprintf(socket_path, sizeof(socket_path), "%s%s.%u",
527 				CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]);
528 		errno = 0;
529 		if (access(socket_path, F_OK) < 0) {
530 			RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
531 					"%s\n", socket_path, strerror(errno));
532 			continue;
533 		}
534 		chan_info = rte_malloc(NULL, sizeof(*chan_info),
535 				RTE_CACHE_LINE_SIZE);
536 		if (chan_info == NULL) {
537 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
538 					"channel '%s'\n", socket_path);
539 			continue;
540 		}
541 		snprintf(chan_info->channel_path,
542 				sizeof(chan_info->channel_path), "%s%s.%u",
543 				CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]);
544 		if (setup_channel_info(&vm_info, &chan_info, channel_list[i]) < 0) {
545 			rte_free(chan_info);
546 			continue;
547 		}
548 		num_channels_enabled++;
549 
550 	}
551 	return num_channels_enabled;
552 }
553 
554 int
add_host_channels(void)555 add_host_channels(void)
556 {
557 	struct channel_info *chan_info;
558 	char socket_path[PATH_MAX];
559 	int num_channels_enabled = 0;
560 	int ret;
561 	struct core_info *ci;
562 	struct channel_info *chan_infos[RTE_MAX_LCORE];
563 	int i;
564 
565 	for (i = 0; i < RTE_MAX_LCORE; i++)
566 		chan_infos[i] = NULL;
567 
568 	ci = get_core_info();
569 	if (ci == NULL) {
570 		RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot allocate memory for core_info\n");
571 		return 0;
572 	}
573 
574 	for (i = 0; i < ci->core_count; i++) {
575 		if (rte_lcore_index(i) == -1)
576 			continue;
577 
578 		if (ci->cd[i].global_enabled_cpus == 0)
579 			continue;
580 
581 		ret = fifo_path(socket_path, sizeof(socket_path), i);
582 		if (ret < 0)
583 			goto error;
584 
585 		ret = mkfifo(socket_path, 0660);
586 		RTE_LOG(DEBUG, CHANNEL_MANAGER, "TRY CREATE fifo '%s'\n",
587 			socket_path);
588 		if ((errno != EEXIST) && (ret < 0)) {
589 			RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: "
590 					"%s\n", socket_path, strerror(errno));
591 			goto error;
592 		}
593 		chan_info = rte_malloc(NULL, sizeof(*chan_info), 0);
594 		if (chan_info == NULL) {
595 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
596 					"channel '%s'\n", socket_path);
597 			goto error;
598 		}
599 		chan_infos[i] = chan_info;
600 		strlcpy(chan_info->channel_path, socket_path,
601 				sizeof(chan_info->channel_path));
602 
603 		if (setup_host_channel_info(&chan_info, i) < 0) {
604 			rte_free(chan_info);
605 			chan_infos[i] = NULL;
606 			goto error;
607 		}
608 		num_channels_enabled++;
609 	}
610 
611 	return num_channels_enabled;
612 error:
613 	/* Clean up the channels opened before we hit an error. */
614 	for (i = 0; i < ci->core_count; i++) {
615 		if (chan_infos[i] != NULL) {
616 			remove_channel_from_monitor(chan_infos[i]);
617 			close(chan_infos[i]->fd);
618 			rte_free(chan_infos[i]);
619 		}
620 	}
621 	return 0;
622 }
623 
624 int
remove_channel(struct channel_info ** chan_info_dptr)625 remove_channel(struct channel_info **chan_info_dptr)
626 {
627 	struct virtual_machine_info *vm_info;
628 	struct channel_info *chan_info = *chan_info_dptr;
629 
630 	close(chan_info->fd);
631 
632 	vm_info = (struct virtual_machine_info *)chan_info->priv_info;
633 
634 	rte_spinlock_lock(&(vm_info->config_spinlock));
635 	vm_info->channel_mask[chan_info->channel_num] = 0;
636 	vm_info->num_channels--;
637 	rte_spinlock_unlock(&(vm_info->config_spinlock));
638 
639 	rte_free(chan_info);
640 	return 0;
641 }
642 
643 int
set_channel_status_all(const char * vm_name,enum channel_status status)644 set_channel_status_all(const char *vm_name, enum channel_status status)
645 {
646 	struct virtual_machine_info *vm_info;
647 	unsigned i;
648 	char mask[RTE_MAX_LCORE];
649 	int num_channels_changed = 0;
650 
651 	if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
652 			status == CHANNEL_MGR_CHANNEL_DISABLED)) {
653 		RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or "
654 				"disabled: Unable to change status for VM '%s'\n", vm_name);
655 	}
656 	vm_info = find_domain_by_name(vm_name);
657 	if (vm_info == NULL) {
658 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to disable channels: VM '%s' "
659 				"not found\n", vm_name);
660 		return 0;
661 	}
662 
663 	rte_spinlock_lock(&(vm_info->config_spinlock));
664 	memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
665 	for (i = 0; i < RTE_MAX_LCORE; i++) {
666 		if (mask[i] != 1)
667 			continue;
668 		vm_info->channels[i]->status = status;
669 		num_channels_changed++;
670 	}
671 	rte_spinlock_unlock(&(vm_info->config_spinlock));
672 	return num_channels_changed;
673 
674 }
675 
676 int
set_channel_status(const char * vm_name,unsigned * channel_list,unsigned len_channel_list,enum channel_status status)677 set_channel_status(const char *vm_name, unsigned *channel_list,
678 		unsigned len_channel_list, enum channel_status status)
679 {
680 	struct virtual_machine_info *vm_info;
681 	unsigned i;
682 	int num_channels_changed = 0;
683 
684 	if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
685 			status == CHANNEL_MGR_CHANNEL_DISABLED)) {
686 		RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or "
687 				"disabled: Unable to change status for VM '%s'\n", vm_name);
688 	}
689 	vm_info = find_domain_by_name(vm_name);
690 	if (vm_info == NULL) {
691 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' "
692 				"not found\n", vm_name);
693 		return 0;
694 	}
695 	for (i = 0; i < len_channel_list; i++) {
696 		if (channel_exists(vm_info, channel_list[i])) {
697 			rte_spinlock_lock(&(vm_info->config_spinlock));
698 			vm_info->channels[channel_list[i]]->status = status;
699 			rte_spinlock_unlock(&(vm_info->config_spinlock));
700 			num_channels_changed++;
701 		}
702 	}
703 	return num_channels_changed;
704 }
705 
706 void
get_all_vm(int * num_vm,int * num_vcpu)707 get_all_vm(int *num_vm, int *num_vcpu)
708 {
709 
710 	virNodeInfo node_info;
711 	virDomainPtr *domptr;
712 	int i, ii, numVcpus[MAX_VCPUS], n_vcpus;
713 	unsigned int jj;
714 	const char *vm_name;
715 	unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
716 				VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
717 	unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
718 
719 	if (!global_hypervisor_available)
720 		return;
721 
722 	memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen);
723 	if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
724 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
725 		return;
726 	}
727 
728 	/* Returns number of pcpus */
729 	global_n_host_cpus = (unsigned int)node_info.cpus;
730 
731 	/* Returns number of active domains */
732 	*num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr,
733 					domain_flags);
734 	if (*num_vm <= 0) {
735 		RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n");
736 		return;
737 	}
738 
739 	for (i = 0; i < *num_vm; i++) {
740 
741 		/* Get Domain Names */
742 		vm_name = virDomainGetName(domptr[i]);
743 		lvm_info[i].vm_name = vm_name;
744 
745 		/* Get Number of Vcpus */
746 		numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag);
747 
748 		/* Get Number of VCpus & VcpuPinInfo */
749 		n_vcpus = virDomainGetVcpuPinInfo(domptr[i],
750 				numVcpus[i], global_cpumaps,
751 				global_maplen, domain_flag);
752 
753 		if ((int)n_vcpus > 0) {
754 			*num_vcpu = n_vcpus;
755 			lvm_info[i].num_cpus = n_vcpus;
756 		}
757 
758 		/* Save pcpu in use by libvirt VMs */
759 		for (ii = 0; ii < n_vcpus; ii++) {
760 			for (jj = 0; jj < global_n_host_cpus; jj++) {
761 				if (VIR_CPU_USABLE(global_cpumaps,
762 						global_maplen, ii, jj) > 0) {
763 					lvm_info[i].pcpus[ii] = jj;
764 				}
765 			}
766 		}
767 	}
768 }
769 
770 int
get_info_vm(const char * vm_name,struct vm_info * info)771 get_info_vm(const char *vm_name, struct vm_info *info)
772 {
773 	struct virtual_machine_info *vm_info;
774 	unsigned i, channel_num = 0;
775 	char mask[RTE_MAX_LCORE];
776 
777 	vm_info = find_domain_by_name(vm_name);
778 	if (vm_info == NULL) {
779 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
780 		return -1;
781 	}
782 	info->status = CHANNEL_MGR_VM_ACTIVE;
783 	if (!virDomainIsActive(vm_info->domainPtr))
784 		info->status = CHANNEL_MGR_VM_INACTIVE;
785 
786 	rte_spinlock_lock(&(vm_info->config_spinlock));
787 
788 	memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
789 	for (i = 0; i < RTE_MAX_LCORE; i++) {
790 		if (mask[i] != 1)
791 			continue;
792 		info->channels[channel_num].channel_num = i;
793 		memcpy(info->channels[channel_num].channel_path,
794 				vm_info->channels[i]->channel_path,
795 				UNIX_PATH_MAX);
796 		info->channels[channel_num].status =
797 				vm_info->channels[i]->status;
798 		info->channels[channel_num].fd =
799 				vm_info->channels[i]->fd;
800 		channel_num++;
801 	}
802 
803 	info->allow_query = vm_info->allow_query;
804 	info->num_channels = channel_num;
805 	info->num_vcpus = vm_info->info.nrVirtCpu;
806 	rte_spinlock_unlock(&(vm_info->config_spinlock));
807 
808 	memcpy(info->name, vm_info->name, sizeof(vm_info->name));
809 	rte_spinlock_lock(&(vm_info->config_spinlock));
810 	for (i = 0; i < info->num_vcpus; i++) {
811 		info->pcpu_map[i] = vm_info->pcpu_map[i];
812 	}
813 	rte_spinlock_unlock(&(vm_info->config_spinlock));
814 	return 0;
815 }
816 
817 int
add_vm(const char * vm_name)818 add_vm(const char *vm_name)
819 {
820 	struct virtual_machine_info *new_domain;
821 	virDomainPtr dom_ptr;
822 	int i;
823 
824 	if (find_domain_by_name(vm_name) != NULL) {
825 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add VM: VM '%s' "
826 				"already exists\n", vm_name);
827 		return -1;
828 	}
829 
830 	if (global_vir_conn_ptr == NULL) {
831 		RTE_LOG(ERR, CHANNEL_MANAGER, "No connection to hypervisor exists\n");
832 		return -1;
833 	}
834 	dom_ptr = virDomainLookupByName(global_vir_conn_ptr, vm_name);
835 	if (dom_ptr == NULL) {
836 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error on VM lookup with libvirt: "
837 				"VM '%s' not found\n", vm_name);
838 		return -1;
839 	}
840 
841 	new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain),
842 			RTE_CACHE_LINE_SIZE);
843 	if (new_domain == NULL) {
844 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM "
845 				"info\n");
846 		return -1;
847 	}
848 	new_domain->domainPtr = dom_ptr;
849 	if (virDomainGetInfo(new_domain->domainPtr, &new_domain->info) != 0) {
850 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get libvirt VM info\n");
851 		rte_free(new_domain);
852 		return -1;
853 	}
854 	if (new_domain->info.nrVirtCpu > RTE_MAX_LCORE) {
855 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is "
856 				"greater than allowable(%d)\n", new_domain->info.nrVirtCpu,
857 				RTE_MAX_LCORE);
858 		rte_free(new_domain);
859 		return -1;
860 	}
861 
862 	for (i = 0; i < RTE_MAX_LCORE; i++)
863 		new_domain->pcpu_map[i] = 0;
864 
865 	if (update_pcpus_mask(new_domain) < 0) {
866 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
867 		rte_free(new_domain);
868 		return -1;
869 	}
870 	strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
871 	new_domain->name[sizeof(new_domain->name) - 1] = '\0';
872 	memset(new_domain->channel_mask, 0, RTE_MAX_LCORE);
873 	new_domain->num_channels = 0;
874 
875 	if (!virDomainIsActive(dom_ptr))
876 		new_domain->status = CHANNEL_MGR_VM_INACTIVE;
877 	else
878 		new_domain->status = CHANNEL_MGR_VM_ACTIVE;
879 
880 	new_domain->allow_query = 0;
881 	rte_spinlock_init(&(new_domain->config_spinlock));
882 	TAILQ_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
883 	return 0;
884 }
885 
886 int
remove_vm(const char * vm_name)887 remove_vm(const char *vm_name)
888 {
889 	struct virtual_machine_info *vm_info = find_domain_by_name(vm_name);
890 
891 	if (vm_info == NULL) {
892 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM: VM '%s' "
893 				"not found\n", vm_name);
894 		return -1;
895 	}
896 	rte_spinlock_lock(&vm_info->config_spinlock);
897 	if (vm_info->num_channels != 0) {
898 		RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM '%s', there are "
899 				"%"PRId8" channels still active\n",
900 				vm_name, vm_info->num_channels);
901 		rte_spinlock_unlock(&vm_info->config_spinlock);
902 		return -1;
903 	}
904 	TAILQ_REMOVE(&vm_list_head, vm_info, vms_info);
905 	rte_spinlock_unlock(&vm_info->config_spinlock);
906 	rte_free(vm_info);
907 	return 0;
908 }
909 
910 int
set_query_status(char * vm_name,bool allow_query)911 set_query_status(char *vm_name,
912 		bool allow_query)
913 {
914 	struct virtual_machine_info *vm_info;
915 
916 	vm_info = find_domain_by_name(vm_name);
917 	if (vm_info == NULL) {
918 		RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
919 		return -1;
920 	}
921 	rte_spinlock_lock(&(vm_info->config_spinlock));
922 	vm_info->allow_query = allow_query ? 1 : 0;
923 	rte_spinlock_unlock(&(vm_info->config_spinlock));
924 	return 0;
925 }
926 
927 static void
disconnect_hypervisor(void)928 disconnect_hypervisor(void)
929 {
930 	if (global_vir_conn_ptr != NULL) {
931 		virConnectClose(global_vir_conn_ptr);
932 		global_vir_conn_ptr = NULL;
933 	}
934 }
935 
936 static int
connect_hypervisor(const char * path)937 connect_hypervisor(const char *path)
938 {
939 	if (global_vir_conn_ptr != NULL) {
940 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error connecting to %s, connection "
941 				"already established\n", path);
942 		return -1;
943 	}
944 	global_vir_conn_ptr = virConnectOpen(path);
945 	if (global_vir_conn_ptr == NULL) {
946 		RTE_LOG(ERR, CHANNEL_MANAGER, "Error failed to open connection to "
947 				"Hypervisor '%s'\n", path);
948 		return -1;
949 	}
950 	return 0;
951 }
952 int
channel_manager_init(const char * path __rte_unused)953 channel_manager_init(const char *path __rte_unused)
954 {
955 	virNodeInfo info;
956 
957 	TAILQ_INIT(&vm_list_head);
958 	if (connect_hypervisor(path) < 0) {
959 		global_n_host_cpus = 64;
960 		global_hypervisor_available = 0;
961 		RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
962 	} else {
963 		global_hypervisor_available = 1;
964 
965 		global_maplen = VIR_CPU_MAPLEN(RTE_MAX_LCORE);
966 
967 		global_vircpuinfo = rte_zmalloc(NULL,
968 				sizeof(*global_vircpuinfo) *
969 				RTE_MAX_LCORE, RTE_CACHE_LINE_SIZE);
970 		if (global_vircpuinfo == NULL) {
971 			RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
972 			goto error;
973 		}
974 		global_cpumaps = rte_zmalloc(NULL,
975 				RTE_MAX_LCORE * global_maplen,
976 				RTE_CACHE_LINE_SIZE);
977 		if (global_cpumaps == NULL)
978 			goto error;
979 
980 		if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
981 			RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
982 			goto error;
983 		}
984 		global_n_host_cpus = (unsigned int)info.cpus;
985 	}
986 
987 
988 
989 	if (global_n_host_cpus > RTE_MAX_LCORE) {
990 		RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
991 				"maximum of %u. No cores over %u should be used.\n",
992 				global_n_host_cpus, RTE_MAX_LCORE,
993 				RTE_MAX_LCORE - 1);
994 		global_n_host_cpus = RTE_MAX_LCORE;
995 	}
996 
997 	return 0;
998 error:
999 	if (global_hypervisor_available)
1000 		disconnect_hypervisor();
1001 	return -1;
1002 }
1003 
1004 void
channel_manager_exit(void)1005 channel_manager_exit(void)
1006 {
1007 	unsigned i;
1008 	char mask[RTE_MAX_LCORE];
1009 	struct virtual_machine_info *vm_info, *tmp;
1010 
1011 	RTE_TAILQ_FOREACH_SAFE(vm_info, &vm_list_head, vms_info, tmp) {
1012 
1013 		rte_spinlock_lock(&(vm_info->config_spinlock));
1014 
1015 		memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
1016 		for (i = 0; i < RTE_MAX_LCORE; i++) {
1017 			if (mask[i] != 1)
1018 				continue;
1019 			remove_channel_from_monitor(
1020 					vm_info->channels[i]);
1021 			close(vm_info->channels[i]->fd);
1022 			rte_free(vm_info->channels[i]);
1023 		}
1024 		rte_spinlock_unlock(&(vm_info->config_spinlock));
1025 
1026 		TAILQ_REMOVE(&vm_list_head, vm_info, vms_info);
1027 		rte_free(vm_info);
1028 	}
1029 
1030 	if (global_hypervisor_available) {
1031 		/* Only needed if hypervisor available */
1032 		rte_free(global_cpumaps);
1033 		rte_free(global_vircpuinfo);
1034 		disconnect_hypervisor();
1035 	}
1036 }
1037