1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <sys/un.h> 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <inttypes.h> 11 #include <dirent.h> 12 #include <errno.h> 13 14 #include <sys/queue.h> 15 #include <sys/types.h> 16 #include <sys/stat.h> 17 #include <sys/socket.h> 18 #include <sys/select.h> 19 20 #include <rte_malloc.h> 21 #include <rte_memory.h> 22 #include <rte_mempool.h> 23 #include <rte_log.h> 24 #include <rte_atomic.h> 25 #include <rte_spinlock.h> 26 27 #include <libvirt/libvirt.h> 28 29 #include "channel_manager.h" 30 #include "channel_commands.h" 31 #include "channel_monitor.h" 32 #include "power_manager.h" 33 34 35 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1 36 37 /* Global pointer to libvirt connection */ 38 static virConnectPtr global_vir_conn_ptr; 39 40 static unsigned char *global_cpumaps; 41 static virVcpuInfo *global_vircpuinfo; 42 static size_t global_maplen; 43 44 static unsigned int global_n_host_cpus; 45 static bool global_hypervisor_available; 46 47 /* 48 * Represents a single Virtual Machine 49 */ 50 struct virtual_machine_info { 51 char name[CHANNEL_MGR_MAX_NAME_LEN]; 52 uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS]; 53 struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS]; 54 char channel_mask[POWER_MGR_MAX_CPUS]; 55 uint8_t num_channels; 56 enum vm_status status; 57 virDomainPtr domainPtr; 58 virDomainInfo info; 59 rte_spinlock_t config_spinlock; 60 LIST_ENTRY(virtual_machine_info) vms_info; 61 }; 62 63 LIST_HEAD(, virtual_machine_info) vm_list_head; 64 65 static struct virtual_machine_info * 66 find_domain_by_name(const char *name) 67 { 68 struct virtual_machine_info *info; 69 LIST_FOREACH(info, &vm_list_head, vms_info) { 70 if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1)) 71 return info; 72 } 73 return NULL; 74 } 75 76 static int 77 update_pcpus_mask(struct virtual_machine_info *vm_info) 78 { 79 virVcpuInfoPtr cpuinfo; 80 unsigned i, j; 81 int n_vcpus; 82 83 memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen); 84 85 if (!virDomainIsActive(vm_info->domainPtr)) { 86 n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr, 87 vm_info->info.nrVirtCpu, global_cpumaps, global_maplen, 88 VIR_DOMAIN_AFFECT_CONFIG); 89 if (n_vcpus < 0) { 90 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for " 91 "in-active VM '%s'\n", vm_info->name); 92 return -1; 93 } 94 goto update_pcpus; 95 } 96 97 memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)* 98 CHANNEL_CMDS_MAX_CPUS); 99 100 cpuinfo = global_vircpuinfo; 101 102 n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo, 103 CHANNEL_CMDS_MAX_CPUS, global_cpumaps, global_maplen); 104 if (n_vcpus < 0) { 105 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for " 106 "active VM '%s'\n", vm_info->name); 107 return -1; 108 } 109 update_pcpus: 110 if (n_vcpus >= CHANNEL_CMDS_MAX_CPUS) { 111 RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range " 112 "0...%d\n", n_vcpus, CHANNEL_CMDS_MAX_CPUS-1); 113 return -1; 114 } 115 if (n_vcpus != vm_info->info.nrVirtCpu) { 116 RTE_LOG(INFO, CHANNEL_MANAGER, "Updating the number of vCPUs for VM '%s" 117 " from %d -> %d\n", vm_info->name, vm_info->info.nrVirtCpu, 118 n_vcpus); 119 vm_info->info.nrVirtCpu = n_vcpus; 120 } 121 rte_spinlock_lock(&(vm_info->config_spinlock)); 122 for (i = 0; i < vm_info->info.nrVirtCpu; i++) { 123 for (j = 0; j < global_n_host_cpus; j++) { 124 if (VIR_CPU_USABLE(global_cpumaps, 125 global_maplen, i, j) <= 0) 126 continue; 127 vm_info->pcpu_map[i] = j; 128 } 129 } 130 rte_spinlock_unlock(&(vm_info->config_spinlock)); 131 return 0; 132 } 133 134 int 135 set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu) 136 { 137 int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG; 138 struct virtual_machine_info *vm_info; 139 140 if (vcpu >= CHANNEL_CMDS_MAX_CPUS) { 141 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n", 142 vcpu, CHANNEL_CMDS_MAX_CPUS-1); 143 return -1; 144 } 145 146 vm_info = find_domain_by_name(vm_name); 147 if (vm_info == NULL) { 148 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name); 149 return -1; 150 } 151 152 if (!virDomainIsActive(vm_info->domainPtr)) { 153 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU " 154 " for VM '%s', VM is not active\n", 155 vcpu, vm_info->name); 156 return -1; 157 } 158 159 if (vcpu >= vm_info->info.nrVirtCpu) { 160 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds the assigned number of " 161 "vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu); 162 return -1; 163 } 164 memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen); 165 166 VIR_USE_CPU(global_cpumaps, pcpu); 167 168 if (pcpu >= global_n_host_cpus) { 169 RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available " 170 "number of CPUs(%u)\n", 171 pcpu, global_n_host_cpus); 172 return -1; 173 } 174 175 if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps, 176 global_maplen, flags) < 0) { 177 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU " 178 " for VM '%s'\n", vcpu, 179 vm_info->name); 180 return -1; 181 } 182 rte_spinlock_lock(&(vm_info->config_spinlock)); 183 vm_info->pcpu_map[vcpu] = pcpu; 184 rte_spinlock_unlock(&(vm_info->config_spinlock)); 185 return 0; 186 } 187 188 uint16_t 189 get_pcpu(struct channel_info *chan_info, unsigned int vcpu) 190 { 191 struct virtual_machine_info *vm_info = 192 (struct virtual_machine_info *)chan_info->priv_info; 193 194 if (global_hypervisor_available && (vm_info != NULL)) { 195 uint16_t pcpu; 196 rte_spinlock_lock(&(vm_info->config_spinlock)); 197 pcpu = vm_info->pcpu_map[vcpu]; 198 rte_spinlock_unlock(&(vm_info->config_spinlock)); 199 return pcpu; 200 } else 201 return 0; 202 } 203 204 static inline int 205 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num) 206 { 207 rte_spinlock_lock(&(vm_info->config_spinlock)); 208 if (vm_info->channel_mask[channel_num] == 1) { 209 rte_spinlock_unlock(&(vm_info->config_spinlock)); 210 return 1; 211 } 212 rte_spinlock_unlock(&(vm_info->config_spinlock)); 213 return 0; 214 } 215 216 217 218 static int 219 open_non_blocking_channel(struct channel_info *info) 220 { 221 int ret, flags; 222 struct sockaddr_un sock_addr; 223 fd_set soc_fd_set; 224 struct timeval tv; 225 226 info->fd = socket(AF_UNIX, SOCK_STREAM, 0); 227 if (info->fd < 0) { 228 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n", 229 strerror(errno), 230 info->channel_path); 231 return -1; 232 } 233 sock_addr.sun_family = AF_UNIX; 234 memcpy(&sock_addr.sun_path, info->channel_path, 235 strlen(info->channel_path)+1); 236 237 /* Get current flags */ 238 flags = fcntl(info->fd, F_GETFL, 0); 239 if (flags < 0) { 240 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for" 241 "'%s'\n", strerror(errno), info->channel_path); 242 return 1; 243 } 244 /* Set to Non Blocking */ 245 flags |= O_NONBLOCK; 246 if (fcntl(info->fd, F_SETFL, flags) < 0) { 247 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) setting non-blocking " 248 "socket for '%s'\n", strerror(errno), info->channel_path); 249 return -1; 250 } 251 ret = connect(info->fd, (struct sockaddr *)&sock_addr, 252 sizeof(sock_addr)); 253 if (ret < 0) { 254 /* ECONNREFUSED error is given when VM is not active */ 255 if (errno == ECONNREFUSED) { 256 RTE_LOG(WARNING, CHANNEL_MANAGER, "VM is not active or has not " 257 "activated its endpoint to channel %s\n", 258 info->channel_path); 259 return -1; 260 } 261 /* Wait for tv_sec if in progress */ 262 else if (errno == EINPROGRESS) { 263 tv.tv_sec = 2; 264 tv.tv_usec = 0; 265 FD_ZERO(&soc_fd_set); 266 FD_SET(info->fd, &soc_fd_set); 267 if (select(info->fd+1, NULL, &soc_fd_set, NULL, &tv) > 0) { 268 RTE_LOG(WARNING, CHANNEL_MANAGER, "Timeout or error on channel " 269 "'%s'\n", info->channel_path); 270 return -1; 271 } 272 } else { 273 /* Any other error */ 274 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) connecting socket" 275 " for '%s'\n", strerror(errno), info->channel_path); 276 return -1; 277 } 278 } 279 return 0; 280 } 281 282 static int 283 open_host_channel(struct channel_info *info) 284 { 285 int flags; 286 287 info->fd = open(info->channel_path, O_RDWR | O_RSYNC); 288 if (info->fd < 0) { 289 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n", 290 strerror(errno), 291 info->channel_path); 292 return -1; 293 } 294 295 /* Get current flags */ 296 flags = fcntl(info->fd, F_GETFL, 0); 297 if (flags < 0) { 298 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for" 299 "'%s'\n", strerror(errno), info->channel_path); 300 return 1; 301 } 302 /* Set to Non Blocking */ 303 flags |= O_NONBLOCK; 304 if (fcntl(info->fd, F_SETFL, flags) < 0) { 305 RTE_LOG(WARNING, CHANNEL_MANAGER, 306 "Error(%s) setting non-blocking " 307 "socket for '%s'\n", 308 strerror(errno), info->channel_path); 309 return -1; 310 } 311 return 0; 312 } 313 314 static int 315 setup_channel_info(struct virtual_machine_info **vm_info_dptr, 316 struct channel_info **chan_info_dptr, unsigned channel_num) 317 { 318 struct channel_info *chan_info = *chan_info_dptr; 319 struct virtual_machine_info *vm_info = *vm_info_dptr; 320 321 chan_info->channel_num = channel_num; 322 chan_info->priv_info = (void *)vm_info; 323 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED; 324 chan_info->type = CHANNEL_TYPE_BINARY; 325 if (open_non_blocking_channel(chan_info) < 0) { 326 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: " 327 "'%s' for VM '%s'\n", 328 chan_info->channel_path, vm_info->name); 329 return -1; 330 } 331 if (add_channel_to_monitor(&chan_info) < 0) { 332 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: " 333 "'%s' to epoll ctl for VM '%s'\n", 334 chan_info->channel_path, vm_info->name); 335 return -1; 336 337 } 338 rte_spinlock_lock(&(vm_info->config_spinlock)); 339 vm_info->num_channels++; 340 vm_info->channel_mask[channel_num] = 1; 341 vm_info->channels[channel_num] = chan_info; 342 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED; 343 rte_spinlock_unlock(&(vm_info->config_spinlock)); 344 return 0; 345 } 346 347 static void 348 fifo_path(char *dst, unsigned int len) 349 { 350 snprintf(dst, len, "%sfifo", CHANNEL_MGR_SOCKET_PATH); 351 } 352 353 static int 354 setup_host_channel_info(struct channel_info **chan_info_dptr, 355 unsigned int channel_num) 356 { 357 struct channel_info *chan_info = *chan_info_dptr; 358 359 chan_info->channel_num = channel_num; 360 chan_info->priv_info = (void *)NULL; 361 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED; 362 chan_info->type = CHANNEL_TYPE_JSON; 363 364 fifo_path(chan_info->channel_path, sizeof(chan_info->channel_path)); 365 366 if (open_host_channel(chan_info) < 0) { 367 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: " 368 "'%s'\n", 369 chan_info->channel_path); 370 return -1; 371 } 372 if (add_channel_to_monitor(&chan_info) < 0) { 373 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: " 374 "'%s' to epoll ctl\n", 375 chan_info->channel_path); 376 return -1; 377 378 } 379 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED; 380 return 0; 381 } 382 383 int 384 add_all_channels(const char *vm_name) 385 { 386 DIR *d; 387 struct dirent *dir; 388 struct virtual_machine_info *vm_info; 389 struct channel_info *chan_info; 390 char *token, *remaining, *tail_ptr; 391 char socket_name[PATH_MAX]; 392 unsigned channel_num; 393 int num_channels_enabled = 0; 394 395 /* verify VM exists */ 396 vm_info = find_domain_by_name(vm_name); 397 if (vm_info == NULL) { 398 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' not found" 399 " during channel discovery\n", vm_name); 400 return 0; 401 } 402 if (!virDomainIsActive(vm_info->domainPtr)) { 403 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name); 404 vm_info->status = CHANNEL_MGR_VM_INACTIVE; 405 return 0; 406 } 407 d = opendir(CHANNEL_MGR_SOCKET_PATH); 408 if (d == NULL) { 409 RTE_LOG(ERR, CHANNEL_MANAGER, "Error opening directory '%s': %s\n", 410 CHANNEL_MGR_SOCKET_PATH, strerror(errno)); 411 return -1; 412 } 413 while ((dir = readdir(d)) != NULL) { 414 if (!strncmp(dir->d_name, ".", 1) || 415 !strncmp(dir->d_name, "..", 2)) 416 continue; 417 418 snprintf(socket_name, sizeof(socket_name), "%s", dir->d_name); 419 remaining = socket_name; 420 /* Extract vm_name from "<vm_name>.<channel_num>" */ 421 token = strsep(&remaining, "."); 422 if (remaining == NULL) 423 continue; 424 if (strncmp(vm_name, token, CHANNEL_MGR_MAX_NAME_LEN)) 425 continue; 426 427 /* remaining should contain only <channel_num> */ 428 errno = 0; 429 channel_num = (unsigned)strtol(remaining, &tail_ptr, 0); 430 if ((errno != 0) || (remaining[0] == '\0') || 431 tail_ptr == NULL || (*tail_ptr != '\0')) { 432 RTE_LOG(WARNING, CHANNEL_MANAGER, "Malformed channel name" 433 "'%s' found it should be in the form of " 434 "'<guest_name>.<channel_num>(decimal)'\n", 435 dir->d_name); 436 continue; 437 } 438 if (channel_num >= CHANNEL_CMDS_MAX_VM_CHANNELS) { 439 RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is " 440 "greater than max allowable: %d, skipping '%s%s'\n", 441 channel_num, CHANNEL_CMDS_MAX_VM_CHANNELS-1, 442 CHANNEL_MGR_SOCKET_PATH, dir->d_name); 443 continue; 444 } 445 /* if channel has not been added previously */ 446 if (channel_exists(vm_info, channel_num)) 447 continue; 448 449 chan_info = rte_malloc(NULL, sizeof(*chan_info), 450 RTE_CACHE_LINE_SIZE); 451 if (chan_info == NULL) { 452 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 453 "channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name); 454 continue; 455 } 456 457 snprintf(chan_info->channel_path, 458 sizeof(chan_info->channel_path), "%s%s", 459 CHANNEL_MGR_SOCKET_PATH, dir->d_name); 460 461 if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) { 462 rte_free(chan_info); 463 continue; 464 } 465 466 num_channels_enabled++; 467 } 468 closedir(d); 469 return num_channels_enabled; 470 } 471 472 int 473 add_channels(const char *vm_name, unsigned *channel_list, 474 unsigned len_channel_list) 475 { 476 struct virtual_machine_info *vm_info; 477 struct channel_info *chan_info; 478 char socket_path[PATH_MAX]; 479 unsigned i; 480 int num_channels_enabled = 0; 481 482 vm_info = find_domain_by_name(vm_name); 483 if (vm_info == NULL) { 484 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' " 485 "not found\n", vm_name); 486 return 0; 487 } 488 489 if (!virDomainIsActive(vm_info->domainPtr)) { 490 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name); 491 vm_info->status = CHANNEL_MGR_VM_INACTIVE; 492 return 0; 493 } 494 495 for (i = 0; i < len_channel_list; i++) { 496 497 if (channel_list[i] >= CHANNEL_CMDS_MAX_VM_CHANNELS) { 498 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range " 499 "0...%d\n", channel_list[i], 500 CHANNEL_CMDS_MAX_VM_CHANNELS-1); 501 continue; 502 } 503 if (channel_exists(vm_info, channel_list[i])) { 504 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel already exists, skipping " 505 "'%s.%u'\n", vm_name, i); 506 continue; 507 } 508 509 snprintf(socket_path, sizeof(socket_path), "%s%s.%u", 510 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]); 511 errno = 0; 512 if (access(socket_path, F_OK) < 0) { 513 RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: " 514 "%s\n", socket_path, strerror(errno)); 515 continue; 516 } 517 chan_info = rte_malloc(NULL, sizeof(*chan_info), 518 RTE_CACHE_LINE_SIZE); 519 if (chan_info == NULL) { 520 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 521 "channel '%s'\n", socket_path); 522 continue; 523 } 524 snprintf(chan_info->channel_path, 525 sizeof(chan_info->channel_path), "%s%s.%u", 526 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]); 527 if (setup_channel_info(&vm_info, &chan_info, channel_list[i]) < 0) { 528 rte_free(chan_info); 529 continue; 530 } 531 num_channels_enabled++; 532 533 } 534 return num_channels_enabled; 535 } 536 537 int 538 add_host_channel(void) 539 { 540 struct channel_info *chan_info; 541 char socket_path[PATH_MAX]; 542 int num_channels_enabled = 0; 543 int ret; 544 545 fifo_path(socket_path, sizeof(socket_path)); 546 547 ret = mkfifo(socket_path, 0660); 548 if ((errno != EEXIST) && (ret < 0)) { 549 RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: " 550 "%s\n", socket_path, strerror(errno)); 551 return 0; 552 } 553 554 if (access(socket_path, F_OK) < 0) { 555 RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: " 556 "%s\n", socket_path, strerror(errno)); 557 return 0; 558 } 559 chan_info = rte_malloc(NULL, sizeof(*chan_info), 0); 560 if (chan_info == NULL) { 561 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for " 562 "channel '%s'\n", socket_path); 563 return 0; 564 } 565 snprintf(chan_info->channel_path, 566 sizeof(chan_info->channel_path), "%s", socket_path); 567 if (setup_host_channel_info(&chan_info, 0) < 0) { 568 rte_free(chan_info); 569 return 0; 570 } 571 num_channels_enabled++; 572 573 return num_channels_enabled; 574 } 575 576 int 577 remove_channel(struct channel_info **chan_info_dptr) 578 { 579 struct virtual_machine_info *vm_info; 580 struct channel_info *chan_info = *chan_info_dptr; 581 582 close(chan_info->fd); 583 584 vm_info = (struct virtual_machine_info *)chan_info->priv_info; 585 586 rte_spinlock_lock(&(vm_info->config_spinlock)); 587 vm_info->channel_mask[chan_info->channel_num] = 0; 588 vm_info->num_channels--; 589 rte_spinlock_unlock(&(vm_info->config_spinlock)); 590 591 rte_free(chan_info); 592 return 0; 593 } 594 595 int 596 set_channel_status_all(const char *vm_name, enum channel_status status) 597 { 598 struct virtual_machine_info *vm_info; 599 unsigned i; 600 char mask[POWER_MGR_MAX_CPUS]; 601 int num_channels_changed = 0; 602 603 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED || 604 status == CHANNEL_MGR_CHANNEL_DISABLED)) { 605 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or " 606 "disabled: Unable to change status for VM '%s'\n", vm_name); 607 } 608 vm_info = find_domain_by_name(vm_name); 609 if (vm_info == NULL) { 610 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to disable channels: VM '%s' " 611 "not found\n", vm_name); 612 return 0; 613 } 614 615 rte_spinlock_lock(&(vm_info->config_spinlock)); 616 memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS); 617 for (i = 0; i < POWER_MGR_MAX_CPUS; i++) { 618 if (mask[i] != 1) 619 continue; 620 vm_info->channels[i]->status = status; 621 num_channels_changed++; 622 } 623 rte_spinlock_unlock(&(vm_info->config_spinlock)); 624 return num_channels_changed; 625 626 } 627 628 int 629 set_channel_status(const char *vm_name, unsigned *channel_list, 630 unsigned len_channel_list, enum channel_status status) 631 { 632 struct virtual_machine_info *vm_info; 633 unsigned i; 634 int num_channels_changed = 0; 635 636 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED || 637 status == CHANNEL_MGR_CHANNEL_DISABLED)) { 638 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or " 639 "disabled: Unable to change status for VM '%s'\n", vm_name); 640 } 641 vm_info = find_domain_by_name(vm_name); 642 if (vm_info == NULL) { 643 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' " 644 "not found\n", vm_name); 645 return 0; 646 } 647 for (i = 0; i < len_channel_list; i++) { 648 if (channel_exists(vm_info, channel_list[i])) { 649 rte_spinlock_lock(&(vm_info->config_spinlock)); 650 vm_info->channels[channel_list[i]]->status = status; 651 rte_spinlock_unlock(&(vm_info->config_spinlock)); 652 num_channels_changed++; 653 } 654 } 655 return num_channels_changed; 656 } 657 658 void 659 get_all_vm(int *num_vm, int *num_vcpu) 660 { 661 662 virNodeInfo node_info; 663 virDomainPtr *domptr; 664 int i, ii, numVcpus[MAX_VCPUS], n_vcpus; 665 unsigned int jj; 666 const char *vm_name; 667 unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING | 668 VIR_CONNECT_LIST_DOMAINS_PERSISTENT; 669 unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG; 670 671 if (!global_hypervisor_available) 672 return; 673 674 memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen); 675 if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) { 676 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n"); 677 return; 678 } 679 680 /* Returns number of pcpus */ 681 global_n_host_cpus = (unsigned int)node_info.cpus; 682 683 /* Returns number of active domains */ 684 *num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr, 685 domain_flags); 686 if (*num_vm <= 0) { 687 RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n"); 688 return; 689 } 690 691 for (i = 0; i < *num_vm; i++) { 692 693 /* Get Domain Names */ 694 vm_name = virDomainGetName(domptr[i]); 695 lvm_info[i].vm_name = vm_name; 696 697 /* Get Number of Vcpus */ 698 numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag); 699 700 /* Get Number of VCpus & VcpuPinInfo */ 701 n_vcpus = virDomainGetVcpuPinInfo(domptr[i], 702 numVcpus[i], global_cpumaps, 703 global_maplen, domain_flag); 704 705 if ((int)n_vcpus > 0) { 706 *num_vcpu = n_vcpus; 707 lvm_info[i].num_cpus = n_vcpus; 708 } 709 710 /* Save pcpu in use by libvirt VMs */ 711 for (ii = 0; ii < n_vcpus; ii++) { 712 for (jj = 0; jj < global_n_host_cpus; jj++) { 713 if (VIR_CPU_USABLE(global_cpumaps, 714 global_maplen, ii, jj) > 0) { 715 lvm_info[i].pcpus[ii] = jj; 716 } 717 } 718 } 719 } 720 } 721 722 int 723 get_info_vm(const char *vm_name, struct vm_info *info) 724 { 725 struct virtual_machine_info *vm_info; 726 unsigned i, channel_num = 0; 727 char mask[POWER_MGR_MAX_CPUS]; 728 729 vm_info = find_domain_by_name(vm_name); 730 if (vm_info == NULL) { 731 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name); 732 return -1; 733 } 734 info->status = CHANNEL_MGR_VM_ACTIVE; 735 if (!virDomainIsActive(vm_info->domainPtr)) 736 info->status = CHANNEL_MGR_VM_INACTIVE; 737 738 rte_spinlock_lock(&(vm_info->config_spinlock)); 739 740 memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS); 741 for (i = 0; i < POWER_MGR_MAX_CPUS; i++) { 742 if (mask[i] != 1) 743 continue; 744 info->channels[channel_num].channel_num = i; 745 memcpy(info->channels[channel_num].channel_path, 746 vm_info->channels[i]->channel_path, 747 UNIX_PATH_MAX); 748 info->channels[channel_num].status = 749 vm_info->channels[i]->status; 750 info->channels[channel_num].fd = 751 vm_info->channels[i]->fd; 752 channel_num++; 753 } 754 755 info->num_channels = channel_num; 756 info->num_vcpus = vm_info->info.nrVirtCpu; 757 rte_spinlock_unlock(&(vm_info->config_spinlock)); 758 759 memcpy(info->name, vm_info->name, sizeof(vm_info->name)); 760 rte_spinlock_lock(&(vm_info->config_spinlock)); 761 for (i = 0; i < info->num_vcpus; i++) { 762 info->pcpu_map[i] = vm_info->pcpu_map[i]; 763 } 764 rte_spinlock_unlock(&(vm_info->config_spinlock)); 765 return 0; 766 } 767 768 int 769 add_vm(const char *vm_name) 770 { 771 struct virtual_machine_info *new_domain; 772 virDomainPtr dom_ptr; 773 int i; 774 775 if (find_domain_by_name(vm_name) != NULL) { 776 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add VM: VM '%s' " 777 "already exists\n", vm_name); 778 return -1; 779 } 780 781 if (global_vir_conn_ptr == NULL) { 782 RTE_LOG(ERR, CHANNEL_MANAGER, "No connection to hypervisor exists\n"); 783 return -1; 784 } 785 dom_ptr = virDomainLookupByName(global_vir_conn_ptr, vm_name); 786 if (dom_ptr == NULL) { 787 RTE_LOG(ERR, CHANNEL_MANAGER, "Error on VM lookup with libvirt: " 788 "VM '%s' not found\n", vm_name); 789 return -1; 790 } 791 792 new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain), 793 RTE_CACHE_LINE_SIZE); 794 if (new_domain == NULL) { 795 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM " 796 "info\n"); 797 return -1; 798 } 799 new_domain->domainPtr = dom_ptr; 800 if (virDomainGetInfo(new_domain->domainPtr, &new_domain->info) != 0) { 801 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get libvirt VM info\n"); 802 rte_free(new_domain); 803 return -1; 804 } 805 if (new_domain->info.nrVirtCpu > CHANNEL_CMDS_MAX_CPUS) { 806 RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is " 807 "greater than allowable(%d)\n", new_domain->info.nrVirtCpu, 808 CHANNEL_CMDS_MAX_CPUS); 809 rte_free(new_domain); 810 return -1; 811 } 812 813 for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) { 814 new_domain->pcpu_map[i] = 0; 815 } 816 if (update_pcpus_mask(new_domain) < 0) { 817 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n"); 818 rte_free(new_domain); 819 return -1; 820 } 821 strncpy(new_domain->name, vm_name, sizeof(new_domain->name)); 822 new_domain->name[sizeof(new_domain->name) - 1] = '\0'; 823 memset(new_domain->channel_mask, 0, POWER_MGR_MAX_CPUS); 824 new_domain->num_channels = 0; 825 826 if (!virDomainIsActive(dom_ptr)) 827 new_domain->status = CHANNEL_MGR_VM_INACTIVE; 828 else 829 new_domain->status = CHANNEL_MGR_VM_ACTIVE; 830 831 rte_spinlock_init(&(new_domain->config_spinlock)); 832 LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info); 833 return 0; 834 } 835 836 int 837 remove_vm(const char *vm_name) 838 { 839 struct virtual_machine_info *vm_info = find_domain_by_name(vm_name); 840 841 if (vm_info == NULL) { 842 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM: VM '%s' " 843 "not found\n", vm_name); 844 return -1; 845 } 846 rte_spinlock_lock(&vm_info->config_spinlock); 847 if (vm_info->num_channels != 0) { 848 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM '%s', there are " 849 "%"PRId8" channels still active\n", 850 vm_name, vm_info->num_channels); 851 rte_spinlock_unlock(&vm_info->config_spinlock); 852 return -1; 853 } 854 LIST_REMOVE(vm_info, vms_info); 855 rte_spinlock_unlock(&vm_info->config_spinlock); 856 rte_free(vm_info); 857 return 0; 858 } 859 860 static void 861 disconnect_hypervisor(void) 862 { 863 if (global_vir_conn_ptr != NULL) { 864 virConnectClose(global_vir_conn_ptr); 865 global_vir_conn_ptr = NULL; 866 } 867 } 868 869 static int 870 connect_hypervisor(const char *path) 871 { 872 if (global_vir_conn_ptr != NULL) { 873 RTE_LOG(ERR, CHANNEL_MANAGER, "Error connecting to %s, connection " 874 "already established\n", path); 875 return -1; 876 } 877 global_vir_conn_ptr = virConnectOpen(path); 878 if (global_vir_conn_ptr == NULL) { 879 RTE_LOG(ERR, CHANNEL_MANAGER, "Error failed to open connection to " 880 "Hypervisor '%s'\n", path); 881 return -1; 882 } 883 return 0; 884 } 885 int 886 channel_manager_init(const char *path __rte_unused) 887 { 888 virNodeInfo info; 889 890 LIST_INIT(&vm_list_head); 891 if (connect_hypervisor(path) < 0) { 892 global_n_host_cpus = 64; 893 global_hypervisor_available = 0; 894 RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n"); 895 } else { 896 global_hypervisor_available = 1; 897 898 global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS); 899 900 global_vircpuinfo = rte_zmalloc(NULL, 901 sizeof(*global_vircpuinfo) * 902 CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE); 903 if (global_vircpuinfo == NULL) { 904 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n"); 905 goto error; 906 } 907 global_cpumaps = rte_zmalloc(NULL, 908 CHANNEL_CMDS_MAX_CPUS * global_maplen, 909 RTE_CACHE_LINE_SIZE); 910 if (global_cpumaps == NULL) 911 goto error; 912 913 if (virNodeGetInfo(global_vir_conn_ptr, &info)) { 914 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n"); 915 goto error; 916 } 917 global_n_host_cpus = (unsigned int)info.cpus; 918 } 919 920 921 922 if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) { 923 RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the " 924 "maximum of %u. No cores over %u should be used.\n", 925 global_n_host_cpus, CHANNEL_CMDS_MAX_CPUS, 926 CHANNEL_CMDS_MAX_CPUS - 1); 927 global_n_host_cpus = CHANNEL_CMDS_MAX_CPUS; 928 } 929 930 return 0; 931 error: 932 if (global_hypervisor_available) 933 disconnect_hypervisor(); 934 return -1; 935 } 936 937 void 938 channel_manager_exit(void) 939 { 940 unsigned i; 941 char mask[POWER_MGR_MAX_CPUS]; 942 struct virtual_machine_info *vm_info; 943 944 LIST_FOREACH(vm_info, &vm_list_head, vms_info) { 945 946 rte_spinlock_lock(&(vm_info->config_spinlock)); 947 948 memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS); 949 for (i = 0; i < POWER_MGR_MAX_CPUS; i++) { 950 if (mask[i] != 1) 951 continue; 952 remove_channel_from_monitor( 953 vm_info->channels[i]); 954 close(vm_info->channels[i]->fd); 955 rte_free(vm_info->channels[i]); 956 } 957 rte_spinlock_unlock(&(vm_info->config_spinlock)); 958 959 LIST_REMOVE(vm_info, vms_info); 960 rte_free(vm_info); 961 } 962 963 if (global_hypervisor_available) { 964 /* Only needed if hypervisor available */ 965 rte_free(global_cpumaps); 966 rte_free(global_vircpuinfo); 967 disconnect_hypervisor(); 968 } 969 } 970