1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <fcntl.h> 8 #include <unistd.h> 9 #include <inttypes.h> 10 #include <dirent.h> 11 #include <errno.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <sys/socket.h> 17 #include <sys/select.h> 18 19 #include <rte_string_fns.h> 20 #include <rte_malloc.h> 21 #include <rte_memory.h> 22 #include <rte_mempool.h> 23 #include <rte_log.h> 24 #include <rte_atomic.h> 25 #include <rte_spinlock.h> 26 27 #include <libvirt/libvirt.h> 28 29 #include "channel_manager.h" 30 #include "channel_monitor.h" 31 #include "power_manager.h" 32 33 34 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1 35 36 struct libvirt_vm_info lvm_info[MAX_CLIENTS]; 37 38 /* Global pointer to libvirt connection */ 39 static virConnectPtr global_vir_conn_ptr; 40 41 static unsigned char *global_cpumaps; 42 static virVcpuInfo *global_vircpuinfo; 43 static size_t global_maplen; 44 45 static unsigned int global_n_host_cpus; 46 static bool global_hypervisor_available; 47 48 /* 49 * Represents a single Virtual Machine 50 */ 51 struct virtual_machine_info { 52 char name[CHANNEL_MGR_MAX_NAME_LEN]; 53 uint16_t pcpu_map[RTE_MAX_LCORE]; 54 struct channel_info *channels[RTE_MAX_LCORE]; 55 char channel_mask[RTE_MAX_LCORE]; 56 uint8_t num_channels; 57 enum vm_status status; 58 virDomainPtr domainPtr; 59 virDomainInfo info; 60 rte_spinlock_t config_spinlock; 61 int allow_query; 62 LIST_ENTRY(virtual_machine_info) vms_info; 63 }; 64 65 LIST_HEAD(, virtual_machine_info) vm_list_head; 66 67 static struct virtual_machine_info * 68 find_domain_by_name(const char *name) 69 { 70 struct virtual_machine_info *info; 71 LIST_FOREACH(info, &vm_list_head, vms_info) { 72 if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1)) 73 return info; 74 } 75 return NULL; 76 } 77 78 static int 79 update_pcpus_mask(struct virtual_machine_info *vm_info) 80 { 81 virVcpuInfoPtr cpuinfo; 82 unsigned i, j; 83 int n_vcpus; 84 85 memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen); 86 87 if (!virDomainIsActive(vm_info->domainPtr)) { 88 n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr, 89 vm_info->info.nrVirtCpu, global_cpumaps, global_maplen, 90 VIR_DOMAIN_AFFECT_CONFIG); 91 if (n_vcpus < 0) { 92 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for " 93 "in-active VM '%s'\n", vm_info->name); 94 return -1; 95 } 96 goto update_pcpus; 97 } 98 99 memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)* 100 RTE_MAX_LCORE); 101 102 cpuinfo = global_vircpuinfo; 103 104 n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo, 105 RTE_MAX_LCORE, global_cpumaps, global_maplen); 106 if (n_vcpus < 0) { 107 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for " 108 "active VM '%s'\n", vm_info->name); 109 return -1; 110 } 111 update_pcpus: 112 if (n_vcpus >= RTE_MAX_LCORE) { 113 RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range " 114 "0...%d\n", n_vcpus, RTE_MAX_LCORE-1); 115 return -1; 116 } 117 if (n_vcpus != vm_info->info.nrVirtCpu) { 118 RTE_LOG(INFO, CHANNEL_MANAGER, "Updating the number of vCPUs for VM '%s" 119 " from %d -> %d\n", vm_info->name, vm_info->info.nrVirtCpu, 120 n_vcpus); 121 vm_info->info.nrVirtCpu = n_vcpus; 122 } 123 rte_spinlock_lock(&(vm_info->config_spinlock)); 124 for (i = 0; i < vm_info->info.nrVirtCpu; i++) { 125 for (j = 0; j < global_n_host_cpus; j++) { 126 if (VIR_CPU_USABLE(global_cpumaps, 127 global_maplen, i, j) <= 0) 128 continue; 129 vm_info->pcpu_map[i] = j; 130 } 131 } 132 rte_spinlock_unlock(&(vm_info->config_spinlock)); 133 return 0; 134 } 135 136 int 137 set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu) 138 { 139 int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG; 140 struct virtual_machine_info *vm_info; 141 142 if (vcpu >= RTE_MAX_LCORE) { 143 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n", 144 vcpu, RTE_MAX_LCORE-1); 145 return -1; 146 } 147 148 vm_info = find_domain_by_name(vm_name); 149 if (vm_info == NULL) { 150 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name); 151 return -1; 152 } 153 154 if (!virDomainIsActive(vm_info->domainPtr)) { 155 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU " 156 " for VM '%s', VM is not active\n", 157 vcpu, vm_info->name); 158 return -1; 159 } 160 161 if (vcpu >= vm_info->info.nrVirtCpu) { 162 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds the assigned number of " 163 "vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu); 164 return -1; 165 } 166 memset(global_cpumaps, 0, RTE_MAX_LCORE * global_maplen); 167 168 VIR_USE_CPU(global_cpumaps, pcpu); 169 170 if (pcpu >= global_n_host_cpus) { 171 RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available " 172 "number of CPUs(%u)\n", 173 pcpu, global_n_host_cpus); 174 return -1; 175 } 176 177 if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps, 178 global_maplen, flags) < 0) { 179 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU " 180 " for VM '%s'\n", vcpu, 181 vm_info->name); 182 return -1; 183 } 184 rte_spinlock_lock(&(vm_info->config_spinlock)); 185 vm_info->pcpu_map[vcpu] = pcpu; 186 rte_spinlock_unlock(&(vm_info->config_spinlock)); 187 return 0; 188 } 189 190 uint16_t 191 get_pcpu(struct channel_info *chan_info, unsigned int vcpu) 192 { 193 struct virtual_machine_info *vm_info = 194 (struct virtual_machine_info *)chan_info->priv_info; 195 196 if (global_hypervisor_available && (vm_info != NULL)) { 197 uint16_t pcpu; 198 rte_spinlock_lock(&(vm_info->config_spinlock)); 199 pcpu = vm_info->pcpu_map[vcpu]; 200 rte_spinlock_unlock(&(vm_info->config_spinlock)); 201 return pcpu; 202 } else 203 return 0; 204 } 205 206 static inline int 207 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num) 208 { 209 rte_spinlock_lock(&(vm_info->config_spinlock)); 210 if (vm_info->channel_mask[channel_num] == 1) { 211 rte_spinlock_unlock(&(vm_info->config_spinlock)); 212 return 1; 213 } 214 rte_spinlock_unlock(&(vm_info->config_spinlock)); 215 return 0; 216 } 217 218 219 220 static int 221 open_non_blocking_channel(struct channel_info *info) 222 { 223 int ret, flags; 224 struct sockaddr_un sock_addr; 225 fd_set soc_fd_set; 226 struct timeval tv; 227 228 info->fd = socket(AF_UNIX, SOCK_STREAM, 0); 229 if (info->fd < 0) { 230 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n", 231 strerror(errno), 232 info->channel_path); 233 return -1; 234 } 235 sock_addr.sun_family = AF_UNIX; 236 memcpy(&sock_addr.sun_path, info->channel_path, 237 strlen(info->channel_path)+1); 238 239 /* Get current flags */ 240 flags = fcntl(info->fd, F_GETFL, 0); 241 if (flags < 0) { 242 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for" 243 "'%s'\n", strerror(errno), info->channel_path); 244 return 1; 245 } 246 /* Set to Non Blocking */ 247 flags |= O_NONBLOCK; 248 if (fcntl(info->fd, F_SETFL, flags) < 0) { 249 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) setting non-blocking " 250 "socket for '%s'\n", strerror(errno), info->channel_path); 251 return -1; 252 } 253 ret = connect(info->fd, (struct sockaddr *)&sock_addr, 254 sizeof(sock_addr)); 255 if (ret < 0) { 256 /* ECONNREFUSED error is given when VM is not active */ 257 if (errno == ECONNREFUSED) { 258 RTE_LOG(WARNING, CHANNEL_MANAGER, "VM is not active or has not " 259 "activated its endpoint to channel %s\n", 260 info->channel_path); 261 return -1; 262 } 263 /* Wait for tv_sec if in progress */ 264 else if (errno == EINPROGRESS) { 265 tv.tv_sec = 2; 266 tv.tv_usec = 0; 267 FD_ZERO(&soc_fd_set); 268 FD_SET(info->fd, &soc_fd_set); 269 if (select(info->fd+1, NULL, &soc_fd_set, NULL, &tv) > 0) { 270 RTE_LOG(WARNING, CHANNEL_MANAGER, "Timeout or error on channel " 271 "'%s'\n", info->channel_path); 272 return -1; 273 } 274 } else { 275 /* Any other error */ 276 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) connecting socket" 277 " for '%s'\n", strerror(errno), info->channel_path); 278 return -1; 279 } 280 } 281 return 0; 282 } 283 284 static int 285 open_host_channel(struct channel_info *info) 286 { 287 int flags; 288 289 info->fd = open(info->channel_path, O_RDWR | O_RSYNC); 290 if (info->fd < 0) { 291 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n", 292 strerror(errno), 293 info->channel_path); 294 return -1; 295 } 296 297 /* Get current flags */ 298 flags = fcntl(info->fd, F_GETFL, 0); 299 if (flags < 0) { 300 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for" 301 "'%s'\n", strerror(errno), info->channel_path); 302 return 1; 303 } 304 /* Set to Non Blocking */ 305 flags |= O_NONBLOCK; 306 if (fcntl(info->fd, F_SETFL, flags) < 0) { 307 RTE_LOG(WARNING, CHANNEL_MANAGER, 308 "Error(%s) setting non-blocking " 309 "socket for '%s'\n", 310 strerror(errno), info->channel_path); 311 return -1; 312 } 313 return 0; 314 } 315 316 static int 317 setup_channel_info(struct virtual_machine_info **vm_info_dptr, 318 struct channel_info **chan_info_dptr, unsigned channel_num) 319 { 320 struct channel_info *chan_info = *chan_info_dptr; 321 struct virtual_machine_info *vm_info = *vm_info_dptr; 322 323 chan_info->channel_num = channel_num; 324 chan_info->priv_info = (void *)vm_info; 325 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED; 326 chan_info->type = CHANNEL_TYPE_BINARY; 327 if (open_non_blocking_channel(chan_info) < 0) { 328 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: " 329 "'%s' for VM '%s'\n", 330 chan_info->channel_path, vm_info->name); 331 return -1; 332 } 333 if (add_channel_to_monitor(&chan_info) < 0) { 334 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: " 335 "'%s' to epoll ctl for VM '%s'\n", 336 chan_info->channel_path, vm_info->name); 337 return -1; 338 339 } 340 rte_spinlock_lock(&(vm_info->config_spinlock)); 341 vm_info->num_channels++; 342 vm_info->channel_mask[channel_num] = 1; 343 vm_info->channels[channel_num] = chan_info; 344 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED; 345 rte_spinlock_unlock(&(vm_info->config_spinlock)); 346 return 0; 347 } 348 349 static int 350 fifo_path(char *dst, unsigned int len, unsigned int id) 351 { 352 int cnt; 353 354 cnt = snprintf(dst, len, "%s%s%d", CHANNEL_MGR_SOCKET_PATH, 355 CHANNEL_MGR_FIFO_PATTERN_NAME, id); 356 357 if ((cnt < 0) || (cnt > (int)len - 1)) { 358 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not create proper " 359 "string for fifo path\n"); 360 361 return -1; 362 } 363 364 return 0; 365 } 366 367 static int 368 setup_host_channel_info(struct channel_info **chan_info_dptr, 369 unsigned int channel_num) 370 { 371 struct channel_info *chan_info = *chan_info_dptr; 372 373 chan_info->channel_num = channel_num; 374 chan_info->priv_info = (void *)NULL; 375 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED; 376 chan_info->type = CHANNEL_TYPE_JSON; 377 378 if (open_host_channel(chan_info) < 0) { 379 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: " 380 "'%s'\n", 381 chan_info->channel_path); 382 return -1; 383 } 384 if (add_channel_to_monitor(&chan_info) < 0) { 385 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: " 386 "'%s' to epoll ctl\n", 387 chan_info->channel_path); 388 return -1; 389 390 } 391 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED; 392 return 0; 393 } 394 395 int 396 add_all_channels(const char *vm_name) 397 { 398 DIR *d; 399 struct dirent *dir; 400 struct virtual_machine_info *vm_info; 401 struct channel_info *chan_info; 402 char *token, *remaining, *tail_ptr; 403 char socket_name[PATH_MAX]; 404 unsigned channel_num; 405 int num_channels_enabled = 0; 406 407 /* verify VM exists */ 408 vm_info = find_domain_by_name(vm_name); 409 if (vm_info == NULL) { 410 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' not found" 411 " during channel discovery\n", vm_name); 412 return 0; 413 } 414 if (!virDomainIsActive(vm_info->domainPtr)) { 415 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name); 416 vm_info->status = CHANNEL_MGR_VM_INACTIVE; 417 return 0; 418 } 419 d = opendir(CHANNEL_MGR_SOCKET_PATH); 420 if (d == NULL) { 421 RTE_LOG(ERR, CHANNEL_MANAGER, "Error opening directory '%s': %s\n", 422 CHANNEL_MGR_SOCKET_PATH, strerror(errno)); 423 return -1; 424 } 425 while ((dir = readdir(d)) != NULL) { 426 if (!strncmp(dir->d_name, ".", 1) || 427 !strncmp(dir->d_name, "..", 2)) 428 continue; 429 430 strlcpy(socket_name, dir->d_name, sizeof(socket_name)); 431 remaining = socket_name; 432 /* Extract vm_name from "<vm_name>.<channel_num>" */ 433 token = strsep(&remaining, "."); 434 if (remaining == NULL) 435 continue; 436 if (strncmp(vm_name, token, CHANNEL_MGR_MAX_NAME_LEN)) 437 continue; 438 439 /* remaining should contain only <channel_num> */ 440 errno = 0; 441 channel_num = (unsigned)strtol(remaining, &tail_ptr, 0); 442 if ((errno != 0) || (remaining[0] == '\0') || 443 tail_ptr == NULL || (*tail_ptr != '\0')) { 444 RTE_LOG(WARNING, CHANNEL_MANAGER, "Malformed channel name" 445 "'%s' found it should be in the form of " 446 "'<guest_name>.<channel_num>(decimal)'\n", 447 dir->d_name); 448 continue; 449 } 450 if (channel_num >= RTE_MAX_LCORE) { 451 RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is " 452 "greater than max allowable: %d, skipping '%s%s'\n", 453 channel_num, RTE_MAX_LCORE-1, 454 CHANNEL_MGR_SOCKET_PATH, dir->d_name); 455 continue; 456 } 457 if (rte_lcore_index(channel_num) == -1) 458 continue; 459 460 /* if channel has not been added previously */ 461 if (channel_exists(vm_info, channel_num)) 462 continue; 463 464 chan_info = rte_malloc(NULL, sizeof(*chan_info), 465 RTE_CACHE_LINE_SIZE); 466 if (chan_info == NULL) { 467 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 468 "channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name); 469 continue; 470 } 471 472 if ((size_t)snprintf(chan_info->channel_path, 473 sizeof(chan_info->channel_path), "%s%s", 474 CHANNEL_MGR_SOCKET_PATH, dir->d_name) 475 >= sizeof(chan_info->channel_path)) { 476 RTE_LOG(ERR, CHANNEL_MANAGER, "Pathname too long for channel '%s%s'\n", 477 CHANNEL_MGR_SOCKET_PATH, dir->d_name); 478 rte_free(chan_info); 479 continue; 480 } 481 482 if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) { 483 rte_free(chan_info); 484 continue; 485 } 486 487 num_channels_enabled++; 488 } 489 closedir(d); 490 return num_channels_enabled; 491 } 492 493 int 494 add_channels(const char *vm_name, unsigned *channel_list, 495 unsigned len_channel_list) 496 { 497 struct virtual_machine_info *vm_info; 498 struct channel_info *chan_info; 499 char socket_path[PATH_MAX]; 500 unsigned i; 501 int num_channels_enabled = 0; 502 503 vm_info = find_domain_by_name(vm_name); 504 if (vm_info == NULL) { 505 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' " 506 "not found\n", vm_name); 507 return 0; 508 } 509 510 if (!virDomainIsActive(vm_info->domainPtr)) { 511 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name); 512 vm_info->status = CHANNEL_MGR_VM_INACTIVE; 513 return 0; 514 } 515 516 for (i = 0; i < len_channel_list; i++) { 517 if (rte_lcore_index(i) == -1) 518 continue; 519 520 if (channel_list[i] >= RTE_MAX_LCORE) { 521 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range " 522 "0...%d\n", channel_list[i], 523 RTE_MAX_LCORE-1); 524 continue; 525 } 526 if (channel_exists(vm_info, channel_list[i])) { 527 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel already exists, skipping " 528 "'%s.%u'\n", vm_name, i); 529 continue; 530 } 531 532 snprintf(socket_path, sizeof(socket_path), "%s%s.%u", 533 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]); 534 errno = 0; 535 if (access(socket_path, F_OK) < 0) { 536 RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: " 537 "%s\n", socket_path, strerror(errno)); 538 continue; 539 } 540 chan_info = rte_malloc(NULL, sizeof(*chan_info), 541 RTE_CACHE_LINE_SIZE); 542 if (chan_info == NULL) { 543 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 544 "channel '%s'\n", socket_path); 545 continue; 546 } 547 snprintf(chan_info->channel_path, 548 sizeof(chan_info->channel_path), "%s%s.%u", 549 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]); 550 if (setup_channel_info(&vm_info, &chan_info, channel_list[i]) < 0) { 551 rte_free(chan_info); 552 continue; 553 } 554 num_channels_enabled++; 555 556 } 557 return num_channels_enabled; 558 } 559 560 int 561 add_host_channels(void) 562 { 563 struct channel_info *chan_info; 564 char socket_path[PATH_MAX]; 565 int num_channels_enabled = 0; 566 int ret; 567 struct core_info *ci; 568 struct channel_info *chan_infos[RTE_MAX_LCORE]; 569 int i; 570 571 for (i = 0; i < RTE_MAX_LCORE; i++) 572 chan_infos[i] = NULL; 573 574 ci = get_core_info(); 575 if (ci == NULL) { 576 RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot allocate memory for core_info\n"); 577 return 0; 578 } 579 580 for (i = 0; i < ci->core_count; i++) { 581 if (rte_lcore_index(i) == -1) 582 continue; 583 584 if (ci->cd[i].global_enabled_cpus == 0) 585 continue; 586 587 ret = fifo_path(socket_path, sizeof(socket_path), i); 588 if (ret < 0) 589 goto error; 590 591 ret = mkfifo(socket_path, 0660); 592 RTE_LOG(DEBUG, CHANNEL_MANAGER, "TRY CREATE fifo '%s'\n", 593 socket_path); 594 if ((errno != EEXIST) && (ret < 0)) { 595 RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: " 596 "%s\n", socket_path, strerror(errno)); 597 goto error; 598 } 599 chan_info = rte_malloc(NULL, sizeof(*chan_info), 0); 600 if (chan_info == NULL) { 601 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 602 "channel '%s'\n", socket_path); 603 goto error; 604 } 605 chan_infos[i] = chan_info; 606 strlcpy(chan_info->channel_path, socket_path, 607 sizeof(chan_info->channel_path)); 608 609 if (setup_host_channel_info(&chan_info, i) < 0) { 610 rte_free(chan_info); 611 chan_infos[i] = NULL; 612 goto error; 613 } 614 num_channels_enabled++; 615 } 616 617 return num_channels_enabled; 618 error: 619 /* Clean up the channels opened before we hit an error. */ 620 for (i = 0; i < ci->core_count; i++) { 621 if (chan_infos[i] != NULL) { 622 remove_channel_from_monitor(chan_infos[i]); 623 close(chan_infos[i]->fd); 624 rte_free(chan_infos[i]); 625 } 626 } 627 return 0; 628 } 629 630 int 631 remove_channel(struct channel_info **chan_info_dptr) 632 { 633 struct virtual_machine_info *vm_info; 634 struct channel_info *chan_info = *chan_info_dptr; 635 636 close(chan_info->fd); 637 638 vm_info = (struct virtual_machine_info *)chan_info->priv_info; 639 640 rte_spinlock_lock(&(vm_info->config_spinlock)); 641 vm_info->channel_mask[chan_info->channel_num] = 0; 642 vm_info->num_channels--; 643 rte_spinlock_unlock(&(vm_info->config_spinlock)); 644 645 rte_free(chan_info); 646 return 0; 647 } 648 649 int 650 set_channel_status_all(const char *vm_name, enum channel_status status) 651 { 652 struct virtual_machine_info *vm_info; 653 unsigned i; 654 char mask[RTE_MAX_LCORE]; 655 int num_channels_changed = 0; 656 657 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED || 658 status == CHANNEL_MGR_CHANNEL_DISABLED)) { 659 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or " 660 "disabled: Unable to change status for VM '%s'\n", vm_name); 661 } 662 vm_info = find_domain_by_name(vm_name); 663 if (vm_info == NULL) { 664 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to disable channels: VM '%s' " 665 "not found\n", vm_name); 666 return 0; 667 } 668 669 rte_spinlock_lock(&(vm_info->config_spinlock)); 670 memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE); 671 for (i = 0; i < RTE_MAX_LCORE; i++) { 672 if (mask[i] != 1) 673 continue; 674 vm_info->channels[i]->status = status; 675 num_channels_changed++; 676 } 677 rte_spinlock_unlock(&(vm_info->config_spinlock)); 678 return num_channels_changed; 679 680 } 681 682 int 683 set_channel_status(const char *vm_name, unsigned *channel_list, 684 unsigned len_channel_list, enum channel_status status) 685 { 686 struct virtual_machine_info *vm_info; 687 unsigned i; 688 int num_channels_changed = 0; 689 690 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED || 691 status == CHANNEL_MGR_CHANNEL_DISABLED)) { 692 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or " 693 "disabled: Unable to change status for VM '%s'\n", vm_name); 694 } 695 vm_info = find_domain_by_name(vm_name); 696 if (vm_info == NULL) { 697 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' " 698 "not found\n", vm_name); 699 return 0; 700 } 701 for (i = 0; i < len_channel_list; i++) { 702 if (channel_exists(vm_info, channel_list[i])) { 703 rte_spinlock_lock(&(vm_info->config_spinlock)); 704 vm_info->channels[channel_list[i]]->status = status; 705 rte_spinlock_unlock(&(vm_info->config_spinlock)); 706 num_channels_changed++; 707 } 708 } 709 return num_channels_changed; 710 } 711 712 void 713 get_all_vm(int *num_vm, int *num_vcpu) 714 { 715 716 virNodeInfo node_info; 717 virDomainPtr *domptr; 718 int i, ii, numVcpus[MAX_VCPUS], n_vcpus; 719 unsigned int jj; 720 const char *vm_name; 721 unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING | 722 VIR_CONNECT_LIST_DOMAINS_PERSISTENT; 723 unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG; 724 725 if (!global_hypervisor_available) 726 return; 727 728 memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen); 729 if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) { 730 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n"); 731 return; 732 } 733 734 /* Returns number of pcpus */ 735 global_n_host_cpus = (unsigned int)node_info.cpus; 736 737 /* Returns number of active domains */ 738 *num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr, 739 domain_flags); 740 if (*num_vm <= 0) { 741 RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n"); 742 return; 743 } 744 745 for (i = 0; i < *num_vm; i++) { 746 747 /* Get Domain Names */ 748 vm_name = virDomainGetName(domptr[i]); 749 lvm_info[i].vm_name = vm_name; 750 751 /* Get Number of Vcpus */ 752 numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag); 753 754 /* Get Number of VCpus & VcpuPinInfo */ 755 n_vcpus = virDomainGetVcpuPinInfo(domptr[i], 756 numVcpus[i], global_cpumaps, 757 global_maplen, domain_flag); 758 759 if ((int)n_vcpus > 0) { 760 *num_vcpu = n_vcpus; 761 lvm_info[i].num_cpus = n_vcpus; 762 } 763 764 /* Save pcpu in use by libvirt VMs */ 765 for (ii = 0; ii < n_vcpus; ii++) { 766 for (jj = 0; jj < global_n_host_cpus; jj++) { 767 if (VIR_CPU_USABLE(global_cpumaps, 768 global_maplen, ii, jj) > 0) { 769 lvm_info[i].pcpus[ii] = jj; 770 } 771 } 772 } 773 } 774 } 775 776 int 777 get_info_vm(const char *vm_name, struct vm_info *info) 778 { 779 struct virtual_machine_info *vm_info; 780 unsigned i, channel_num = 0; 781 char mask[RTE_MAX_LCORE]; 782 783 vm_info = find_domain_by_name(vm_name); 784 if (vm_info == NULL) { 785 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name); 786 return -1; 787 } 788 info->status = CHANNEL_MGR_VM_ACTIVE; 789 if (!virDomainIsActive(vm_info->domainPtr)) 790 info->status = CHANNEL_MGR_VM_INACTIVE; 791 792 rte_spinlock_lock(&(vm_info->config_spinlock)); 793 794 memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE); 795 for (i = 0; i < RTE_MAX_LCORE; i++) { 796 if (mask[i] != 1) 797 continue; 798 info->channels[channel_num].channel_num = i; 799 memcpy(info->channels[channel_num].channel_path, 800 vm_info->channels[i]->channel_path, 801 UNIX_PATH_MAX); 802 info->channels[channel_num].status = 803 vm_info->channels[i]->status; 804 info->channels[channel_num].fd = 805 vm_info->channels[i]->fd; 806 channel_num++; 807 } 808 809 info->allow_query = vm_info->allow_query; 810 info->num_channels = channel_num; 811 info->num_vcpus = vm_info->info.nrVirtCpu; 812 rte_spinlock_unlock(&(vm_info->config_spinlock)); 813 814 memcpy(info->name, vm_info->name, sizeof(vm_info->name)); 815 rte_spinlock_lock(&(vm_info->config_spinlock)); 816 for (i = 0; i < info->num_vcpus; i++) { 817 info->pcpu_map[i] = vm_info->pcpu_map[i]; 818 } 819 rte_spinlock_unlock(&(vm_info->config_spinlock)); 820 return 0; 821 } 822 823 int 824 add_vm(const char *vm_name) 825 { 826 struct virtual_machine_info *new_domain; 827 virDomainPtr dom_ptr; 828 int i; 829 830 if (find_domain_by_name(vm_name) != NULL) { 831 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add VM: VM '%s' " 832 "already exists\n", vm_name); 833 return -1; 834 } 835 836 if (global_vir_conn_ptr == NULL) { 837 RTE_LOG(ERR, CHANNEL_MANAGER, "No connection to hypervisor exists\n"); 838 return -1; 839 } 840 dom_ptr = virDomainLookupByName(global_vir_conn_ptr, vm_name); 841 if (dom_ptr == NULL) { 842 RTE_LOG(ERR, CHANNEL_MANAGER, "Error on VM lookup with libvirt: " 843 "VM '%s' not found\n", vm_name); 844 return -1; 845 } 846 847 new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain), 848 RTE_CACHE_LINE_SIZE); 849 if (new_domain == NULL) { 850 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM " 851 "info\n"); 852 return -1; 853 } 854 new_domain->domainPtr = dom_ptr; 855 if (virDomainGetInfo(new_domain->domainPtr, &new_domain->info) != 0) { 856 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get libvirt VM info\n"); 857 rte_free(new_domain); 858 return -1; 859 } 860 if (new_domain->info.nrVirtCpu > RTE_MAX_LCORE) { 861 RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is " 862 "greater than allowable(%d)\n", new_domain->info.nrVirtCpu, 863 RTE_MAX_LCORE); 864 rte_free(new_domain); 865 return -1; 866 } 867 868 for (i = 0; i < RTE_MAX_LCORE; i++) 869 new_domain->pcpu_map[i] = 0; 870 871 if (update_pcpus_mask(new_domain) < 0) { 872 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n"); 873 rte_free(new_domain); 874 return -1; 875 } 876 strncpy(new_domain->name, vm_name, sizeof(new_domain->name)); 877 new_domain->name[sizeof(new_domain->name) - 1] = '\0'; 878 memset(new_domain->channel_mask, 0, RTE_MAX_LCORE); 879 new_domain->num_channels = 0; 880 881 if (!virDomainIsActive(dom_ptr)) 882 new_domain->status = CHANNEL_MGR_VM_INACTIVE; 883 else 884 new_domain->status = CHANNEL_MGR_VM_ACTIVE; 885 886 new_domain->allow_query = 0; 887 rte_spinlock_init(&(new_domain->config_spinlock)); 888 LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info); 889 return 0; 890 } 891 892 int 893 remove_vm(const char *vm_name) 894 { 895 struct virtual_machine_info *vm_info = find_domain_by_name(vm_name); 896 897 if (vm_info == NULL) { 898 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM: VM '%s' " 899 "not found\n", vm_name); 900 return -1; 901 } 902 rte_spinlock_lock(&vm_info->config_spinlock); 903 if (vm_info->num_channels != 0) { 904 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM '%s', there are " 905 "%"PRId8" channels still active\n", 906 vm_name, vm_info->num_channels); 907 rte_spinlock_unlock(&vm_info->config_spinlock); 908 return -1; 909 } 910 LIST_REMOVE(vm_info, vms_info); 911 rte_spinlock_unlock(&vm_info->config_spinlock); 912 rte_free(vm_info); 913 return 0; 914 } 915 916 int 917 set_query_status(char *vm_name, 918 bool allow_query) 919 { 920 struct virtual_machine_info *vm_info; 921 922 vm_info = find_domain_by_name(vm_name); 923 if (vm_info == NULL) { 924 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name); 925 return -1; 926 } 927 rte_spinlock_lock(&(vm_info->config_spinlock)); 928 vm_info->allow_query = allow_query ? 1 : 0; 929 rte_spinlock_unlock(&(vm_info->config_spinlock)); 930 return 0; 931 } 932 933 static void 934 disconnect_hypervisor(void) 935 { 936 if (global_vir_conn_ptr != NULL) { 937 virConnectClose(global_vir_conn_ptr); 938 global_vir_conn_ptr = NULL; 939 } 940 } 941 942 static int 943 connect_hypervisor(const char *path) 944 { 945 if (global_vir_conn_ptr != NULL) { 946 RTE_LOG(ERR, CHANNEL_MANAGER, "Error connecting to %s, connection " 947 "already established\n", path); 948 return -1; 949 } 950 global_vir_conn_ptr = virConnectOpen(path); 951 if (global_vir_conn_ptr == NULL) { 952 RTE_LOG(ERR, CHANNEL_MANAGER, "Error failed to open connection to " 953 "Hypervisor '%s'\n", path); 954 return -1; 955 } 956 return 0; 957 } 958 int 959 channel_manager_init(const char *path __rte_unused) 960 { 961 virNodeInfo info; 962 963 LIST_INIT(&vm_list_head); 964 if (connect_hypervisor(path) < 0) { 965 global_n_host_cpus = 64; 966 global_hypervisor_available = 0; 967 RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n"); 968 } else { 969 global_hypervisor_available = 1; 970 971 global_maplen = VIR_CPU_MAPLEN(RTE_MAX_LCORE); 972 973 global_vircpuinfo = rte_zmalloc(NULL, 974 sizeof(*global_vircpuinfo) * 975 RTE_MAX_LCORE, RTE_CACHE_LINE_SIZE); 976 if (global_vircpuinfo == NULL) { 977 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n"); 978 goto error; 979 } 980 global_cpumaps = rte_zmalloc(NULL, 981 RTE_MAX_LCORE * global_maplen, 982 RTE_CACHE_LINE_SIZE); 983 if (global_cpumaps == NULL) 984 goto error; 985 986 if (virNodeGetInfo(global_vir_conn_ptr, &info)) { 987 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n"); 988 goto error; 989 } 990 global_n_host_cpus = (unsigned int)info.cpus; 991 } 992 993 994 995 if (global_n_host_cpus > RTE_MAX_LCORE) { 996 RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the " 997 "maximum of %u. No cores over %u should be used.\n", 998 global_n_host_cpus, RTE_MAX_LCORE, 999 RTE_MAX_LCORE - 1); 1000 global_n_host_cpus = RTE_MAX_LCORE; 1001 } 1002 1003 return 0; 1004 error: 1005 if (global_hypervisor_available) 1006 disconnect_hypervisor(); 1007 return -1; 1008 } 1009 1010 void 1011 channel_manager_exit(void) 1012 { 1013 unsigned i; 1014 char mask[RTE_MAX_LCORE]; 1015 struct virtual_machine_info *vm_info; 1016 1017 LIST_FOREACH(vm_info, &vm_list_head, vms_info) { 1018 1019 rte_spinlock_lock(&(vm_info->config_spinlock)); 1020 1021 memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE); 1022 for (i = 0; i < RTE_MAX_LCORE; i++) { 1023 if (mask[i] != 1) 1024 continue; 1025 remove_channel_from_monitor( 1026 vm_info->channels[i]); 1027 close(vm_info->channels[i]->fd); 1028 rte_free(vm_info->channels[i]); 1029 } 1030 rte_spinlock_unlock(&(vm_info->config_spinlock)); 1031 1032 LIST_REMOVE(vm_info, vms_info); 1033 rte_free(vm_info); 1034 } 1035 1036 if (global_hypervisor_available) { 1037 /* Only needed if hypervisor available */ 1038 rte_free(global_cpumaps); 1039 rte_free(global_vircpuinfo); 1040 disconnect_hypervisor(); 1041 } 1042 } 1043