1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <fcntl.h> 8 #include <unistd.h> 9 #include <inttypes.h> 10 #include <dirent.h> 11 #include <errno.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <sys/socket.h> 17 #include <sys/select.h> 18 19 #include <rte_string_fns.h> 20 #include <rte_malloc.h> 21 #include <rte_memory.h> 22 #include <rte_mempool.h> 23 #include <rte_log.h> 24 #include <rte_atomic.h> 25 #include <rte_spinlock.h> 26 27 #include <libvirt/libvirt.h> 28 29 #include "channel_manager.h" 30 #include "channel_commands.h" 31 #include "channel_monitor.h" 32 #include "power_manager.h" 33 34 35 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1 36 37 struct libvirt_vm_info lvm_info[MAX_CLIENTS]; 38 39 /* Global pointer to libvirt connection */ 40 static virConnectPtr global_vir_conn_ptr; 41 42 static unsigned char *global_cpumaps; 43 static virVcpuInfo *global_vircpuinfo; 44 static size_t global_maplen; 45 46 static unsigned int global_n_host_cpus; 47 static bool global_hypervisor_available; 48 49 /* 50 * Represents a single Virtual Machine 51 */ 52 struct virtual_machine_info { 53 char name[CHANNEL_MGR_MAX_NAME_LEN]; 54 uint16_t pcpu_map[RTE_MAX_LCORE]; 55 struct channel_info *channels[RTE_MAX_LCORE]; 56 char channel_mask[RTE_MAX_LCORE]; 57 uint8_t num_channels; 58 enum vm_status status; 59 virDomainPtr domainPtr; 60 virDomainInfo info; 61 rte_spinlock_t config_spinlock; 62 int allow_query; 63 LIST_ENTRY(virtual_machine_info) vms_info; 64 }; 65 66 LIST_HEAD(, virtual_machine_info) vm_list_head; 67 68 static struct virtual_machine_info * 69 find_domain_by_name(const char *name) 70 { 71 struct virtual_machine_info *info; 72 LIST_FOREACH(info, &vm_list_head, vms_info) { 73 if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1)) 74 return info; 75 } 76 return NULL; 77 } 78 79 static int 80 update_pcpus_mask(struct virtual_machine_info *vm_info) 81 { 82 virVcpuInfoPtr cpuinfo; 83 unsigned i, j; 84 int n_vcpus; 85 86 memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen); 87 88 if (!virDomainIsActive(vm_info->domainPtr)) { 89 n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr, 90 vm_info->info.nrVirtCpu, global_cpumaps, global_maplen, 91 VIR_DOMAIN_AFFECT_CONFIG); 92 if (n_vcpus < 0) { 93 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for " 94 "in-active VM '%s'\n", vm_info->name); 95 return -1; 96 } 97 goto update_pcpus; 98 } 99 100 memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)* 101 RTE_MAX_LCORE); 102 103 cpuinfo = global_vircpuinfo; 104 105 n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo, 106 RTE_MAX_LCORE, global_cpumaps, global_maplen); 107 if (n_vcpus < 0) { 108 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for " 109 "active VM '%s'\n", vm_info->name); 110 return -1; 111 } 112 update_pcpus: 113 if (n_vcpus >= RTE_MAX_LCORE) { 114 RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range " 115 "0...%d\n", n_vcpus, RTE_MAX_LCORE-1); 116 return -1; 117 } 118 if (n_vcpus != vm_info->info.nrVirtCpu) { 119 RTE_LOG(INFO, CHANNEL_MANAGER, "Updating the number of vCPUs for VM '%s" 120 " from %d -> %d\n", vm_info->name, vm_info->info.nrVirtCpu, 121 n_vcpus); 122 vm_info->info.nrVirtCpu = n_vcpus; 123 } 124 rte_spinlock_lock(&(vm_info->config_spinlock)); 125 for (i = 0; i < vm_info->info.nrVirtCpu; i++) { 126 for (j = 0; j < global_n_host_cpus; j++) { 127 if (VIR_CPU_USABLE(global_cpumaps, 128 global_maplen, i, j) <= 0) 129 continue; 130 vm_info->pcpu_map[i] = j; 131 } 132 } 133 rte_spinlock_unlock(&(vm_info->config_spinlock)); 134 return 0; 135 } 136 137 int 138 set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu) 139 { 140 int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG; 141 struct virtual_machine_info *vm_info; 142 143 if (vcpu >= RTE_MAX_LCORE) { 144 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n", 145 vcpu, RTE_MAX_LCORE-1); 146 return -1; 147 } 148 149 vm_info = find_domain_by_name(vm_name); 150 if (vm_info == NULL) { 151 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name); 152 return -1; 153 } 154 155 if (!virDomainIsActive(vm_info->domainPtr)) { 156 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU " 157 " for VM '%s', VM is not active\n", 158 vcpu, vm_info->name); 159 return -1; 160 } 161 162 if (vcpu >= vm_info->info.nrVirtCpu) { 163 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds the assigned number of " 164 "vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu); 165 return -1; 166 } 167 memset(global_cpumaps, 0, RTE_MAX_LCORE * global_maplen); 168 169 VIR_USE_CPU(global_cpumaps, pcpu); 170 171 if (pcpu >= global_n_host_cpus) { 172 RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available " 173 "number of CPUs(%u)\n", 174 pcpu, global_n_host_cpus); 175 return -1; 176 } 177 178 if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps, 179 global_maplen, flags) < 0) { 180 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU " 181 " for VM '%s'\n", vcpu, 182 vm_info->name); 183 return -1; 184 } 185 rte_spinlock_lock(&(vm_info->config_spinlock)); 186 vm_info->pcpu_map[vcpu] = pcpu; 187 rte_spinlock_unlock(&(vm_info->config_spinlock)); 188 return 0; 189 } 190 191 uint16_t 192 get_pcpu(struct channel_info *chan_info, unsigned int vcpu) 193 { 194 struct virtual_machine_info *vm_info = 195 (struct virtual_machine_info *)chan_info->priv_info; 196 197 if (global_hypervisor_available && (vm_info != NULL)) { 198 uint16_t pcpu; 199 rte_spinlock_lock(&(vm_info->config_spinlock)); 200 pcpu = vm_info->pcpu_map[vcpu]; 201 rte_spinlock_unlock(&(vm_info->config_spinlock)); 202 return pcpu; 203 } else 204 return 0; 205 } 206 207 static inline int 208 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num) 209 { 210 rte_spinlock_lock(&(vm_info->config_spinlock)); 211 if (vm_info->channel_mask[channel_num] == 1) { 212 rte_spinlock_unlock(&(vm_info->config_spinlock)); 213 return 1; 214 } 215 rte_spinlock_unlock(&(vm_info->config_spinlock)); 216 return 0; 217 } 218 219 220 221 static int 222 open_non_blocking_channel(struct channel_info *info) 223 { 224 int ret, flags; 225 struct sockaddr_un sock_addr; 226 fd_set soc_fd_set; 227 struct timeval tv; 228 229 info->fd = socket(AF_UNIX, SOCK_STREAM, 0); 230 if (info->fd < 0) { 231 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n", 232 strerror(errno), 233 info->channel_path); 234 return -1; 235 } 236 sock_addr.sun_family = AF_UNIX; 237 memcpy(&sock_addr.sun_path, info->channel_path, 238 strlen(info->channel_path)+1); 239 240 /* Get current flags */ 241 flags = fcntl(info->fd, F_GETFL, 0); 242 if (flags < 0) { 243 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for" 244 "'%s'\n", strerror(errno), info->channel_path); 245 return 1; 246 } 247 /* Set to Non Blocking */ 248 flags |= O_NONBLOCK; 249 if (fcntl(info->fd, F_SETFL, flags) < 0) { 250 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) setting non-blocking " 251 "socket for '%s'\n", strerror(errno), info->channel_path); 252 return -1; 253 } 254 ret = connect(info->fd, (struct sockaddr *)&sock_addr, 255 sizeof(sock_addr)); 256 if (ret < 0) { 257 /* ECONNREFUSED error is given when VM is not active */ 258 if (errno == ECONNREFUSED) { 259 RTE_LOG(WARNING, CHANNEL_MANAGER, "VM is not active or has not " 260 "activated its endpoint to channel %s\n", 261 info->channel_path); 262 return -1; 263 } 264 /* Wait for tv_sec if in progress */ 265 else if (errno == EINPROGRESS) { 266 tv.tv_sec = 2; 267 tv.tv_usec = 0; 268 FD_ZERO(&soc_fd_set); 269 FD_SET(info->fd, &soc_fd_set); 270 if (select(info->fd+1, NULL, &soc_fd_set, NULL, &tv) > 0) { 271 RTE_LOG(WARNING, CHANNEL_MANAGER, "Timeout or error on channel " 272 "'%s'\n", info->channel_path); 273 return -1; 274 } 275 } else { 276 /* Any other error */ 277 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) connecting socket" 278 " for '%s'\n", strerror(errno), info->channel_path); 279 return -1; 280 } 281 } 282 return 0; 283 } 284 285 static int 286 open_host_channel(struct channel_info *info) 287 { 288 int flags; 289 290 info->fd = open(info->channel_path, O_RDWR | O_RSYNC); 291 if (info->fd < 0) { 292 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n", 293 strerror(errno), 294 info->channel_path); 295 return -1; 296 } 297 298 /* Get current flags */ 299 flags = fcntl(info->fd, F_GETFL, 0); 300 if (flags < 0) { 301 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for" 302 "'%s'\n", strerror(errno), info->channel_path); 303 return 1; 304 } 305 /* Set to Non Blocking */ 306 flags |= O_NONBLOCK; 307 if (fcntl(info->fd, F_SETFL, flags) < 0) { 308 RTE_LOG(WARNING, CHANNEL_MANAGER, 309 "Error(%s) setting non-blocking " 310 "socket for '%s'\n", 311 strerror(errno), info->channel_path); 312 return -1; 313 } 314 return 0; 315 } 316 317 static int 318 setup_channel_info(struct virtual_machine_info **vm_info_dptr, 319 struct channel_info **chan_info_dptr, unsigned channel_num) 320 { 321 struct channel_info *chan_info = *chan_info_dptr; 322 struct virtual_machine_info *vm_info = *vm_info_dptr; 323 324 chan_info->channel_num = channel_num; 325 chan_info->priv_info = (void *)vm_info; 326 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED; 327 chan_info->type = CHANNEL_TYPE_BINARY; 328 if (open_non_blocking_channel(chan_info) < 0) { 329 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: " 330 "'%s' for VM '%s'\n", 331 chan_info->channel_path, vm_info->name); 332 return -1; 333 } 334 if (add_channel_to_monitor(&chan_info) < 0) { 335 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: " 336 "'%s' to epoll ctl for VM '%s'\n", 337 chan_info->channel_path, vm_info->name); 338 return -1; 339 340 } 341 rte_spinlock_lock(&(vm_info->config_spinlock)); 342 vm_info->num_channels++; 343 vm_info->channel_mask[channel_num] = 1; 344 vm_info->channels[channel_num] = chan_info; 345 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED; 346 rte_spinlock_unlock(&(vm_info->config_spinlock)); 347 return 0; 348 } 349 350 static int 351 fifo_path(char *dst, unsigned int len, unsigned int id) 352 { 353 int cnt; 354 355 cnt = snprintf(dst, len, "%s%s%d", CHANNEL_MGR_SOCKET_PATH, 356 CHANNEL_MGR_FIFO_PATTERN_NAME, id); 357 358 if ((cnt < 0) || (cnt > (int)len - 1)) { 359 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not create proper " 360 "string for fifo path\n"); 361 362 return -1; 363 } 364 365 return 0; 366 } 367 368 static int 369 setup_host_channel_info(struct channel_info **chan_info_dptr, 370 unsigned int channel_num) 371 { 372 struct channel_info *chan_info = *chan_info_dptr; 373 374 chan_info->channel_num = channel_num; 375 chan_info->priv_info = (void *)NULL; 376 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED; 377 chan_info->type = CHANNEL_TYPE_JSON; 378 379 if (open_host_channel(chan_info) < 0) { 380 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: " 381 "'%s'\n", 382 chan_info->channel_path); 383 return -1; 384 } 385 if (add_channel_to_monitor(&chan_info) < 0) { 386 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: " 387 "'%s' to epoll ctl\n", 388 chan_info->channel_path); 389 return -1; 390 391 } 392 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED; 393 return 0; 394 } 395 396 int 397 add_all_channels(const char *vm_name) 398 { 399 DIR *d; 400 struct dirent *dir; 401 struct virtual_machine_info *vm_info; 402 struct channel_info *chan_info; 403 char *token, *remaining, *tail_ptr; 404 char socket_name[PATH_MAX]; 405 unsigned channel_num; 406 int num_channels_enabled = 0; 407 408 /* verify VM exists */ 409 vm_info = find_domain_by_name(vm_name); 410 if (vm_info == NULL) { 411 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' not found" 412 " during channel discovery\n", vm_name); 413 return 0; 414 } 415 if (!virDomainIsActive(vm_info->domainPtr)) { 416 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name); 417 vm_info->status = CHANNEL_MGR_VM_INACTIVE; 418 return 0; 419 } 420 d = opendir(CHANNEL_MGR_SOCKET_PATH); 421 if (d == NULL) { 422 RTE_LOG(ERR, CHANNEL_MANAGER, "Error opening directory '%s': %s\n", 423 CHANNEL_MGR_SOCKET_PATH, strerror(errno)); 424 return -1; 425 } 426 while ((dir = readdir(d)) != NULL) { 427 if (!strncmp(dir->d_name, ".", 1) || 428 !strncmp(dir->d_name, "..", 2)) 429 continue; 430 431 strlcpy(socket_name, dir->d_name, sizeof(socket_name)); 432 remaining = socket_name; 433 /* Extract vm_name from "<vm_name>.<channel_num>" */ 434 token = strsep(&remaining, "."); 435 if (remaining == NULL) 436 continue; 437 if (strncmp(vm_name, token, CHANNEL_MGR_MAX_NAME_LEN)) 438 continue; 439 440 /* remaining should contain only <channel_num> */ 441 errno = 0; 442 channel_num = (unsigned)strtol(remaining, &tail_ptr, 0); 443 if ((errno != 0) || (remaining[0] == '\0') || 444 tail_ptr == NULL || (*tail_ptr != '\0')) { 445 RTE_LOG(WARNING, CHANNEL_MANAGER, "Malformed channel name" 446 "'%s' found it should be in the form of " 447 "'<guest_name>.<channel_num>(decimal)'\n", 448 dir->d_name); 449 continue; 450 } 451 if (channel_num >= RTE_MAX_LCORE) { 452 RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is " 453 "greater than max allowable: %d, skipping '%s%s'\n", 454 channel_num, RTE_MAX_LCORE-1, 455 CHANNEL_MGR_SOCKET_PATH, dir->d_name); 456 continue; 457 } 458 /* if channel has not been added previously */ 459 if (channel_exists(vm_info, channel_num)) 460 continue; 461 462 chan_info = rte_malloc(NULL, sizeof(*chan_info), 463 RTE_CACHE_LINE_SIZE); 464 if (chan_info == NULL) { 465 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 466 "channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name); 467 continue; 468 } 469 470 if ((size_t)snprintf(chan_info->channel_path, 471 sizeof(chan_info->channel_path), "%s%s", 472 CHANNEL_MGR_SOCKET_PATH, dir->d_name) 473 >= sizeof(chan_info->channel_path)) { 474 RTE_LOG(ERR, CHANNEL_MANAGER, "Pathname too long for channel '%s%s'\n", 475 CHANNEL_MGR_SOCKET_PATH, dir->d_name); 476 rte_free(chan_info); 477 continue; 478 } 479 480 if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) { 481 rte_free(chan_info); 482 continue; 483 } 484 485 num_channels_enabled++; 486 } 487 closedir(d); 488 return num_channels_enabled; 489 } 490 491 int 492 add_channels(const char *vm_name, unsigned *channel_list, 493 unsigned len_channel_list) 494 { 495 struct virtual_machine_info *vm_info; 496 struct channel_info *chan_info; 497 char socket_path[PATH_MAX]; 498 unsigned i; 499 int num_channels_enabled = 0; 500 501 vm_info = find_domain_by_name(vm_name); 502 if (vm_info == NULL) { 503 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' " 504 "not found\n", vm_name); 505 return 0; 506 } 507 508 if (!virDomainIsActive(vm_info->domainPtr)) { 509 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name); 510 vm_info->status = CHANNEL_MGR_VM_INACTIVE; 511 return 0; 512 } 513 514 for (i = 0; i < len_channel_list; i++) { 515 516 if (channel_list[i] >= RTE_MAX_LCORE) { 517 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range " 518 "0...%d\n", channel_list[i], 519 RTE_MAX_LCORE-1); 520 continue; 521 } 522 if (channel_exists(vm_info, channel_list[i])) { 523 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel already exists, skipping " 524 "'%s.%u'\n", vm_name, i); 525 continue; 526 } 527 528 snprintf(socket_path, sizeof(socket_path), "%s%s.%u", 529 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]); 530 errno = 0; 531 if (access(socket_path, F_OK) < 0) { 532 RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: " 533 "%s\n", socket_path, strerror(errno)); 534 continue; 535 } 536 chan_info = rte_malloc(NULL, sizeof(*chan_info), 537 RTE_CACHE_LINE_SIZE); 538 if (chan_info == NULL) { 539 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 540 "channel '%s'\n", socket_path); 541 continue; 542 } 543 snprintf(chan_info->channel_path, 544 sizeof(chan_info->channel_path), "%s%s.%u", 545 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]); 546 if (setup_channel_info(&vm_info, &chan_info, channel_list[i]) < 0) { 547 rte_free(chan_info); 548 continue; 549 } 550 num_channels_enabled++; 551 552 } 553 return num_channels_enabled; 554 } 555 556 int 557 add_host_channels(void) 558 { 559 struct channel_info *chan_info; 560 char socket_path[PATH_MAX]; 561 int num_channels_enabled = 0; 562 int ret; 563 struct core_info *ci; 564 struct channel_info *chan_infos[RTE_MAX_LCORE]; 565 int i; 566 567 for (i = 0; i < RTE_MAX_LCORE; i++) 568 chan_infos[i] = NULL; 569 570 ci = get_core_info(); 571 if (ci == NULL) { 572 RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot allocate memory for core_info\n"); 573 return 0; 574 } 575 576 for (i = 0; i < ci->core_count; i++) { 577 if (ci->cd[i].global_enabled_cpus == 0) 578 continue; 579 580 ret = fifo_path(socket_path, sizeof(socket_path), i); 581 if (ret < 0) 582 goto error; 583 584 ret = mkfifo(socket_path, 0660); 585 RTE_LOG(DEBUG, CHANNEL_MANAGER, "TRY CREATE fifo '%s'\n", 586 socket_path); 587 if ((errno != EEXIST) && (ret < 0)) { 588 RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: " 589 "%s\n", socket_path, strerror(errno)); 590 goto error; 591 } 592 chan_info = rte_malloc(NULL, sizeof(*chan_info), 0); 593 if (chan_info == NULL) { 594 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 595 "channel '%s'\n", socket_path); 596 goto error; 597 } 598 chan_infos[i] = chan_info; 599 strlcpy(chan_info->channel_path, socket_path, 600 sizeof(chan_info->channel_path)); 601 602 if (setup_host_channel_info(&chan_info, i) < 0) { 603 rte_free(chan_info); 604 chan_infos[i] = NULL; 605 goto error; 606 } 607 num_channels_enabled++; 608 } 609 610 return num_channels_enabled; 611 error: 612 /* Clean up the channels opened before we hit an error. */ 613 for (i = 0; i < ci->core_count; i++) { 614 if (chan_infos[i] != NULL) { 615 remove_channel_from_monitor(chan_infos[i]); 616 close(chan_infos[i]->fd); 617 rte_free(chan_infos[i]); 618 } 619 } 620 return 0; 621 } 622 623 int 624 remove_channel(struct channel_info **chan_info_dptr) 625 { 626 struct virtual_machine_info *vm_info; 627 struct channel_info *chan_info = *chan_info_dptr; 628 629 close(chan_info->fd); 630 631 vm_info = (struct virtual_machine_info *)chan_info->priv_info; 632 633 rte_spinlock_lock(&(vm_info->config_spinlock)); 634 vm_info->channel_mask[chan_info->channel_num] = 0; 635 vm_info->num_channels--; 636 rte_spinlock_unlock(&(vm_info->config_spinlock)); 637 638 rte_free(chan_info); 639 return 0; 640 } 641 642 int 643 set_channel_status_all(const char *vm_name, enum channel_status status) 644 { 645 struct virtual_machine_info *vm_info; 646 unsigned i; 647 char mask[RTE_MAX_LCORE]; 648 int num_channels_changed = 0; 649 650 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED || 651 status == CHANNEL_MGR_CHANNEL_DISABLED)) { 652 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or " 653 "disabled: Unable to change status for VM '%s'\n", vm_name); 654 } 655 vm_info = find_domain_by_name(vm_name); 656 if (vm_info == NULL) { 657 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to disable channels: VM '%s' " 658 "not found\n", vm_name); 659 return 0; 660 } 661 662 rte_spinlock_lock(&(vm_info->config_spinlock)); 663 memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE); 664 for (i = 0; i < RTE_MAX_LCORE; i++) { 665 if (mask[i] != 1) 666 continue; 667 vm_info->channels[i]->status = status; 668 num_channels_changed++; 669 } 670 rte_spinlock_unlock(&(vm_info->config_spinlock)); 671 return num_channels_changed; 672 673 } 674 675 int 676 set_channel_status(const char *vm_name, unsigned *channel_list, 677 unsigned len_channel_list, enum channel_status status) 678 { 679 struct virtual_machine_info *vm_info; 680 unsigned i; 681 int num_channels_changed = 0; 682 683 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED || 684 status == CHANNEL_MGR_CHANNEL_DISABLED)) { 685 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or " 686 "disabled: Unable to change status for VM '%s'\n", vm_name); 687 } 688 vm_info = find_domain_by_name(vm_name); 689 if (vm_info == NULL) { 690 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' " 691 "not found\n", vm_name); 692 return 0; 693 } 694 for (i = 0; i < len_channel_list; i++) { 695 if (channel_exists(vm_info, channel_list[i])) { 696 rte_spinlock_lock(&(vm_info->config_spinlock)); 697 vm_info->channels[channel_list[i]]->status = status; 698 rte_spinlock_unlock(&(vm_info->config_spinlock)); 699 num_channels_changed++; 700 } 701 } 702 return num_channels_changed; 703 } 704 705 void 706 get_all_vm(int *num_vm, int *num_vcpu) 707 { 708 709 virNodeInfo node_info; 710 virDomainPtr *domptr; 711 int i, ii, numVcpus[MAX_VCPUS], n_vcpus; 712 unsigned int jj; 713 const char *vm_name; 714 unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING | 715 VIR_CONNECT_LIST_DOMAINS_PERSISTENT; 716 unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG; 717 718 if (!global_hypervisor_available) 719 return; 720 721 memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen); 722 if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) { 723 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n"); 724 return; 725 } 726 727 /* Returns number of pcpus */ 728 global_n_host_cpus = (unsigned int)node_info.cpus; 729 730 /* Returns number of active domains */ 731 *num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr, 732 domain_flags); 733 if (*num_vm <= 0) { 734 RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n"); 735 return; 736 } 737 738 for (i = 0; i < *num_vm; i++) { 739 740 /* Get Domain Names */ 741 vm_name = virDomainGetName(domptr[i]); 742 lvm_info[i].vm_name = vm_name; 743 744 /* Get Number of Vcpus */ 745 numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag); 746 747 /* Get Number of VCpus & VcpuPinInfo */ 748 n_vcpus = virDomainGetVcpuPinInfo(domptr[i], 749 numVcpus[i], global_cpumaps, 750 global_maplen, domain_flag); 751 752 if ((int)n_vcpus > 0) { 753 *num_vcpu = n_vcpus; 754 lvm_info[i].num_cpus = n_vcpus; 755 } 756 757 /* Save pcpu in use by libvirt VMs */ 758 for (ii = 0; ii < n_vcpus; ii++) { 759 for (jj = 0; jj < global_n_host_cpus; jj++) { 760 if (VIR_CPU_USABLE(global_cpumaps, 761 global_maplen, ii, jj) > 0) { 762 lvm_info[i].pcpus[ii] = jj; 763 } 764 } 765 } 766 } 767 } 768 769 int 770 get_info_vm(const char *vm_name, struct vm_info *info) 771 { 772 struct virtual_machine_info *vm_info; 773 unsigned i, channel_num = 0; 774 char mask[RTE_MAX_LCORE]; 775 776 vm_info = find_domain_by_name(vm_name); 777 if (vm_info == NULL) { 778 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name); 779 return -1; 780 } 781 info->status = CHANNEL_MGR_VM_ACTIVE; 782 if (!virDomainIsActive(vm_info->domainPtr)) 783 info->status = CHANNEL_MGR_VM_INACTIVE; 784 785 rte_spinlock_lock(&(vm_info->config_spinlock)); 786 787 memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE); 788 for (i = 0; i < RTE_MAX_LCORE; i++) { 789 if (mask[i] != 1) 790 continue; 791 info->channels[channel_num].channel_num = i; 792 memcpy(info->channels[channel_num].channel_path, 793 vm_info->channels[i]->channel_path, 794 UNIX_PATH_MAX); 795 info->channels[channel_num].status = 796 vm_info->channels[i]->status; 797 info->channels[channel_num].fd = 798 vm_info->channels[i]->fd; 799 channel_num++; 800 } 801 802 info->allow_query = vm_info->allow_query; 803 info->num_channels = channel_num; 804 info->num_vcpus = vm_info->info.nrVirtCpu; 805 rte_spinlock_unlock(&(vm_info->config_spinlock)); 806 807 memcpy(info->name, vm_info->name, sizeof(vm_info->name)); 808 rte_spinlock_lock(&(vm_info->config_spinlock)); 809 for (i = 0; i < info->num_vcpus; i++) { 810 info->pcpu_map[i] = vm_info->pcpu_map[i]; 811 } 812 rte_spinlock_unlock(&(vm_info->config_spinlock)); 813 return 0; 814 } 815 816 int 817 add_vm(const char *vm_name) 818 { 819 struct virtual_machine_info *new_domain; 820 virDomainPtr dom_ptr; 821 int i; 822 823 if (find_domain_by_name(vm_name) != NULL) { 824 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add VM: VM '%s' " 825 "already exists\n", vm_name); 826 return -1; 827 } 828 829 if (global_vir_conn_ptr == NULL) { 830 RTE_LOG(ERR, CHANNEL_MANAGER, "No connection to hypervisor exists\n"); 831 return -1; 832 } 833 dom_ptr = virDomainLookupByName(global_vir_conn_ptr, vm_name); 834 if (dom_ptr == NULL) { 835 RTE_LOG(ERR, CHANNEL_MANAGER, "Error on VM lookup with libvirt: " 836 "VM '%s' not found\n", vm_name); 837 return -1; 838 } 839 840 new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain), 841 RTE_CACHE_LINE_SIZE); 842 if (new_domain == NULL) { 843 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM " 844 "info\n"); 845 return -1; 846 } 847 new_domain->domainPtr = dom_ptr; 848 if (virDomainGetInfo(new_domain->domainPtr, &new_domain->info) != 0) { 849 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get libvirt VM info\n"); 850 rte_free(new_domain); 851 return -1; 852 } 853 if (new_domain->info.nrVirtCpu > RTE_MAX_LCORE) { 854 RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is " 855 "greater than allowable(%d)\n", new_domain->info.nrVirtCpu, 856 RTE_MAX_LCORE); 857 rte_free(new_domain); 858 return -1; 859 } 860 861 for (i = 0; i < RTE_MAX_LCORE; i++) 862 new_domain->pcpu_map[i] = 0; 863 864 if (update_pcpus_mask(new_domain) < 0) { 865 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n"); 866 rte_free(new_domain); 867 return -1; 868 } 869 strncpy(new_domain->name, vm_name, sizeof(new_domain->name)); 870 new_domain->name[sizeof(new_domain->name) - 1] = '\0'; 871 memset(new_domain->channel_mask, 0, RTE_MAX_LCORE); 872 new_domain->num_channels = 0; 873 874 if (!virDomainIsActive(dom_ptr)) 875 new_domain->status = CHANNEL_MGR_VM_INACTIVE; 876 else 877 new_domain->status = CHANNEL_MGR_VM_ACTIVE; 878 879 new_domain->allow_query = 0; 880 rte_spinlock_init(&(new_domain->config_spinlock)); 881 LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info); 882 return 0; 883 } 884 885 int 886 remove_vm(const char *vm_name) 887 { 888 struct virtual_machine_info *vm_info = find_domain_by_name(vm_name); 889 890 if (vm_info == NULL) { 891 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM: VM '%s' " 892 "not found\n", vm_name); 893 return -1; 894 } 895 rte_spinlock_lock(&vm_info->config_spinlock); 896 if (vm_info->num_channels != 0) { 897 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM '%s', there are " 898 "%"PRId8" channels still active\n", 899 vm_name, vm_info->num_channels); 900 rte_spinlock_unlock(&vm_info->config_spinlock); 901 return -1; 902 } 903 LIST_REMOVE(vm_info, vms_info); 904 rte_spinlock_unlock(&vm_info->config_spinlock); 905 rte_free(vm_info); 906 return 0; 907 } 908 909 int 910 set_query_status(char *vm_name, 911 bool allow_query) 912 { 913 struct virtual_machine_info *vm_info; 914 915 vm_info = find_domain_by_name(vm_name); 916 if (vm_info == NULL) { 917 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name); 918 return -1; 919 } 920 rte_spinlock_lock(&(vm_info->config_spinlock)); 921 vm_info->allow_query = allow_query ? 1 : 0; 922 rte_spinlock_unlock(&(vm_info->config_spinlock)); 923 return 0; 924 } 925 926 static void 927 disconnect_hypervisor(void) 928 { 929 if (global_vir_conn_ptr != NULL) { 930 virConnectClose(global_vir_conn_ptr); 931 global_vir_conn_ptr = NULL; 932 } 933 } 934 935 static int 936 connect_hypervisor(const char *path) 937 { 938 if (global_vir_conn_ptr != NULL) { 939 RTE_LOG(ERR, CHANNEL_MANAGER, "Error connecting to %s, connection " 940 "already established\n", path); 941 return -1; 942 } 943 global_vir_conn_ptr = virConnectOpen(path); 944 if (global_vir_conn_ptr == NULL) { 945 RTE_LOG(ERR, CHANNEL_MANAGER, "Error failed to open connection to " 946 "Hypervisor '%s'\n", path); 947 return -1; 948 } 949 return 0; 950 } 951 int 952 channel_manager_init(const char *path __rte_unused) 953 { 954 virNodeInfo info; 955 956 LIST_INIT(&vm_list_head); 957 if (connect_hypervisor(path) < 0) { 958 global_n_host_cpus = 64; 959 global_hypervisor_available = 0; 960 RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n"); 961 } else { 962 global_hypervisor_available = 1; 963 964 global_maplen = VIR_CPU_MAPLEN(RTE_MAX_LCORE); 965 966 global_vircpuinfo = rte_zmalloc(NULL, 967 sizeof(*global_vircpuinfo) * 968 RTE_MAX_LCORE, RTE_CACHE_LINE_SIZE); 969 if (global_vircpuinfo == NULL) { 970 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n"); 971 goto error; 972 } 973 global_cpumaps = rte_zmalloc(NULL, 974 RTE_MAX_LCORE * global_maplen, 975 RTE_CACHE_LINE_SIZE); 976 if (global_cpumaps == NULL) 977 goto error; 978 979 if (virNodeGetInfo(global_vir_conn_ptr, &info)) { 980 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n"); 981 goto error; 982 } 983 global_n_host_cpus = (unsigned int)info.cpus; 984 } 985 986 987 988 if (global_n_host_cpus > RTE_MAX_LCORE) { 989 RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the " 990 "maximum of %u. No cores over %u should be used.\n", 991 global_n_host_cpus, RTE_MAX_LCORE, 992 RTE_MAX_LCORE - 1); 993 global_n_host_cpus = RTE_MAX_LCORE; 994 } 995 996 return 0; 997 error: 998 if (global_hypervisor_available) 999 disconnect_hypervisor(); 1000 return -1; 1001 } 1002 1003 void 1004 channel_manager_exit(void) 1005 { 1006 unsigned i; 1007 char mask[RTE_MAX_LCORE]; 1008 struct virtual_machine_info *vm_info; 1009 1010 LIST_FOREACH(vm_info, &vm_list_head, vms_info) { 1011 1012 rte_spinlock_lock(&(vm_info->config_spinlock)); 1013 1014 memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE); 1015 for (i = 0; i < RTE_MAX_LCORE; i++) { 1016 if (mask[i] != 1) 1017 continue; 1018 remove_channel_from_monitor( 1019 vm_info->channels[i]); 1020 close(vm_info->channels[i]->fd); 1021 rte_free(vm_info->channels[i]); 1022 } 1023 rte_spinlock_unlock(&(vm_info->config_spinlock)); 1024 1025 LIST_REMOVE(vm_info, vms_info); 1026 rte_free(vm_info); 1027 } 1028 1029 if (global_hypervisor_available) { 1030 /* Only needed if hypervisor available */ 1031 rte_free(global_cpumaps); 1032 rte_free(global_vircpuinfo); 1033 disconnect_hypervisor(); 1034 } 1035 } 1036