1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/env.h" 37 #include "spdk/likely.h" 38 #include "spdk/string.h" 39 #include "spdk/util.h" 40 #include "spdk/barrier.h" 41 #include "spdk/vhost.h" 42 #include "vhost_internal.h" 43 44 #include "spdk_internal/memory.h" 45 46 struct vhost_poll_group { 47 struct spdk_thread *thread; 48 unsigned ref; 49 TAILQ_ENTRY(vhost_poll_group) tailq; 50 }; 51 52 static TAILQ_HEAD(, vhost_poll_group) g_poll_groups = TAILQ_HEAD_INITIALIZER(g_poll_groups); 53 54 /* Temporary cpuset for poll group assignment */ 55 static struct spdk_cpuset *g_tmp_cpuset; 56 57 /* Path to folder where character device will be created. Can be set by user. */ 58 static char dev_dirname[PATH_MAX] = ""; 59 60 /* Thread performing all vhost management operations */ 61 static struct spdk_thread *g_vhost_init_thread; 62 63 static spdk_vhost_fini_cb g_fini_cpl_cb; 64 65 /** 66 * DPDK calls our callbacks synchronously but the work those callbacks 67 * perform needs to be async. Luckily, all DPDK callbacks are called on 68 * a DPDK-internal pthread, so we'll just wait on a semaphore in there. 69 */ 70 static sem_t g_dpdk_sem; 71 72 /** Return code for the current DPDK callback */ 73 static int g_dpdk_response; 74 75 struct spdk_vhost_session_fn_ctx { 76 /** Device pointer obtained before enqueuing the event */ 77 struct spdk_vhost_dev *vdev; 78 79 /** ID of the session to send event to. */ 80 uint32_t vsession_id; 81 82 /** User callback function to be executed on given thread. */ 83 spdk_vhost_session_fn cb_fn; 84 85 /** Custom user context */ 86 void *user_ctx; 87 }; 88 89 static int new_connection(int vid); 90 static int start_device(int vid); 91 static void stop_device(int vid); 92 static void destroy_connection(int vid); 93 94 #ifdef SPDK_CONFIG_VHOST_INTERNAL_LIB 95 static int get_config(int vid, uint8_t *config, uint32_t len); 96 static int set_config(int vid, uint8_t *config, uint32_t offset, 97 uint32_t size, uint32_t flags); 98 #endif 99 100 const struct vhost_device_ops g_spdk_vhost_ops = { 101 .new_device = start_device, 102 .destroy_device = stop_device, 103 .new_connection = new_connection, 104 .destroy_connection = destroy_connection, 105 #ifdef SPDK_CONFIG_VHOST_INTERNAL_LIB 106 .get_config = get_config, 107 .set_config = set_config, 108 .vhost_nvme_admin_passthrough = spdk_vhost_nvme_admin_passthrough, 109 .vhost_nvme_set_cq_call = spdk_vhost_nvme_set_cq_call, 110 .vhost_nvme_get_cap = spdk_vhost_nvme_get_cap, 111 .vhost_nvme_set_bar_mr = spdk_vhost_nvme_set_bar_mr, 112 #endif 113 }; 114 115 static TAILQ_HEAD(, spdk_vhost_dev) g_spdk_vhost_devices = TAILQ_HEAD_INITIALIZER( 116 g_spdk_vhost_devices); 117 static pthread_mutex_t g_spdk_vhost_mutex = PTHREAD_MUTEX_INITIALIZER; 118 119 void *spdk_vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len) 120 { 121 void *vva; 122 uint64_t newlen; 123 124 newlen = len; 125 vva = (void *)rte_vhost_va_from_guest_pa(vsession->mem, addr, &newlen); 126 if (newlen != len) { 127 return NULL; 128 } 129 130 return vva; 131 132 } 133 134 static void 135 spdk_vhost_log_req_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue, 136 uint16_t req_id) 137 { 138 struct vring_desc *desc, *desc_table; 139 uint32_t desc_table_size; 140 int rc; 141 142 if (spdk_likely(!spdk_vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) { 143 return; 144 } 145 146 rc = spdk_vhost_vq_get_desc(vsession, virtqueue, req_id, &desc, &desc_table, &desc_table_size); 147 if (spdk_unlikely(rc != 0)) { 148 SPDK_ERRLOG("Can't log used ring descriptors!\n"); 149 return; 150 } 151 152 do { 153 if (spdk_vhost_vring_desc_is_wr(desc)) { 154 /* To be honest, only pages realy touched should be logged, but 155 * doing so would require tracking those changes in each backed. 156 * Also backend most likely will touch all/most of those pages so 157 * for lets assume we touched all pages passed to as writeable buffers. */ 158 rte_vhost_log_write(vsession->vid, desc->addr, desc->len); 159 } 160 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_size); 161 } while (desc); 162 } 163 164 static void 165 spdk_vhost_log_used_vring_elem(struct spdk_vhost_session *vsession, 166 struct spdk_vhost_virtqueue *virtqueue, 167 uint16_t idx) 168 { 169 uint64_t offset, len; 170 uint16_t vq_idx; 171 172 if (spdk_likely(!spdk_vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) { 173 return; 174 } 175 176 offset = offsetof(struct vring_used, ring[idx]); 177 len = sizeof(virtqueue->vring.used->ring[idx]); 178 vq_idx = virtqueue - vsession->virtqueue; 179 180 rte_vhost_log_used_vring(vsession->vid, vq_idx, offset, len); 181 } 182 183 static void 184 spdk_vhost_log_used_vring_idx(struct spdk_vhost_session *vsession, 185 struct spdk_vhost_virtqueue *virtqueue) 186 { 187 uint64_t offset, len; 188 uint16_t vq_idx; 189 190 if (spdk_likely(!spdk_vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) { 191 return; 192 } 193 194 offset = offsetof(struct vring_used, idx); 195 len = sizeof(virtqueue->vring.used->idx); 196 vq_idx = virtqueue - vsession->virtqueue; 197 198 rte_vhost_log_used_vring(vsession->vid, vq_idx, offset, len); 199 } 200 201 /* 202 * Get available requests from avail ring. 203 */ 204 uint16_t 205 spdk_vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *virtqueue, uint16_t *reqs, 206 uint16_t reqs_len) 207 { 208 struct rte_vhost_vring *vring = &virtqueue->vring; 209 struct vring_avail *avail = vring->avail; 210 uint16_t size_mask = vring->size - 1; 211 uint16_t last_idx = virtqueue->last_avail_idx, avail_idx = avail->idx; 212 uint16_t count, i; 213 214 count = avail_idx - last_idx; 215 if (spdk_likely(count == 0)) { 216 return 0; 217 } 218 219 if (spdk_unlikely(count > vring->size)) { 220 /* TODO: the queue is unrecoverably broken and should be marked so. 221 * For now we will fail silently and report there are no new avail entries. 222 */ 223 return 0; 224 } 225 226 count = spdk_min(count, reqs_len); 227 virtqueue->last_avail_idx += count; 228 for (i = 0; i < count; i++) { 229 reqs[i] = vring->avail->ring[(last_idx + i) & size_mask]; 230 } 231 232 SPDK_DEBUGLOG(SPDK_LOG_VHOST_RING, 233 "AVAIL: last_idx=%"PRIu16" avail_idx=%"PRIu16" count=%"PRIu16"\n", 234 last_idx, avail_idx, count); 235 236 return count; 237 } 238 239 static bool 240 spdk_vhost_vring_desc_is_indirect(struct vring_desc *cur_desc) 241 { 242 return !!(cur_desc->flags & VRING_DESC_F_INDIRECT); 243 } 244 245 int 246 spdk_vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue, 247 uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table, 248 uint32_t *desc_table_size) 249 { 250 if (spdk_unlikely(req_idx >= virtqueue->vring.size)) { 251 return -1; 252 } 253 254 *desc = &virtqueue->vring.desc[req_idx]; 255 256 if (spdk_vhost_vring_desc_is_indirect(*desc)) { 257 *desc_table_size = (*desc)->len / sizeof(**desc); 258 *desc_table = spdk_vhost_gpa_to_vva(vsession, (*desc)->addr, 259 sizeof(**desc) * *desc_table_size); 260 *desc = *desc_table; 261 if (*desc == NULL) { 262 return -1; 263 } 264 265 return 0; 266 } 267 268 *desc_table = virtqueue->vring.desc; 269 *desc_table_size = virtqueue->vring.size; 270 271 return 0; 272 } 273 274 int 275 spdk_vhost_vq_used_signal(struct spdk_vhost_session *vsession, 276 struct spdk_vhost_virtqueue *virtqueue) 277 { 278 if (virtqueue->used_req_cnt == 0) { 279 return 0; 280 } 281 282 virtqueue->req_cnt += virtqueue->used_req_cnt; 283 virtqueue->used_req_cnt = 0; 284 285 SPDK_DEBUGLOG(SPDK_LOG_VHOST_RING, 286 "Queue %td - USED RING: sending IRQ: last used %"PRIu16"\n", 287 virtqueue - vsession->virtqueue, virtqueue->last_used_idx); 288 289 if (rte_vhost_vring_call(vsession->vid, virtqueue->vring_idx) == 0) { 290 /* interrupt signalled */ 291 return 1; 292 } else { 293 /* interrupt not signalled */ 294 return 0; 295 } 296 } 297 298 299 static void 300 check_session_io_stats(struct spdk_vhost_session *vsession, uint64_t now) 301 { 302 struct spdk_vhost_virtqueue *virtqueue; 303 uint32_t irq_delay_base = vsession->coalescing_delay_time_base; 304 uint32_t io_threshold = vsession->coalescing_io_rate_threshold; 305 int32_t irq_delay; 306 uint32_t req_cnt; 307 uint16_t q_idx; 308 309 if (now < vsession->next_stats_check_time) { 310 return; 311 } 312 313 vsession->next_stats_check_time = now + vsession->stats_check_interval; 314 for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) { 315 virtqueue = &vsession->virtqueue[q_idx]; 316 317 req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt; 318 if (req_cnt <= io_threshold) { 319 continue; 320 } 321 322 irq_delay = (irq_delay_base * (req_cnt - io_threshold)) / io_threshold; 323 virtqueue->irq_delay_time = (uint32_t) spdk_max(0, irq_delay); 324 325 virtqueue->req_cnt = 0; 326 virtqueue->next_event_time = now; 327 } 328 } 329 330 void 331 spdk_vhost_session_used_signal(struct spdk_vhost_session *vsession) 332 { 333 struct spdk_vhost_virtqueue *virtqueue; 334 uint64_t now; 335 uint16_t q_idx; 336 337 if (vsession->coalescing_delay_time_base == 0) { 338 for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) { 339 virtqueue = &vsession->virtqueue[q_idx]; 340 341 if (virtqueue->vring.desc == NULL || 342 (virtqueue->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 343 continue; 344 } 345 346 spdk_vhost_vq_used_signal(vsession, virtqueue); 347 } 348 } else { 349 now = spdk_get_ticks(); 350 check_session_io_stats(vsession, now); 351 352 for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) { 353 virtqueue = &vsession->virtqueue[q_idx]; 354 355 /* No need for event right now */ 356 if (now < virtqueue->next_event_time || 357 (virtqueue->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 358 continue; 359 } 360 361 if (!spdk_vhost_vq_used_signal(vsession, virtqueue)) { 362 continue; 363 } 364 365 /* Syscall is quite long so update time */ 366 now = spdk_get_ticks(); 367 virtqueue->next_event_time = now + virtqueue->irq_delay_time; 368 } 369 } 370 } 371 372 static int 373 spdk_vhost_session_set_coalescing(struct spdk_vhost_dev *vdev, 374 struct spdk_vhost_session *vsession, void *ctx) 375 { 376 if (vdev == NULL || vsession == NULL) { 377 /* nothing to do */ 378 return 0; 379 } 380 381 vsession->coalescing_delay_time_base = 382 vdev->coalescing_delay_us * spdk_get_ticks_hz() / 1000000ULL; 383 vsession->coalescing_io_rate_threshold = 384 vdev->coalescing_iops_threshold * SPDK_VHOST_STATS_CHECK_INTERVAL_MS / 1000U; 385 return 0; 386 } 387 388 static int 389 vhost_dev_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us, 390 uint32_t iops_threshold) 391 { 392 uint64_t delay_time_base = delay_base_us * spdk_get_ticks_hz() / 1000000ULL; 393 uint32_t io_rate = iops_threshold * SPDK_VHOST_STATS_CHECK_INTERVAL_MS / 1000U; 394 395 if (delay_time_base >= UINT32_MAX) { 396 SPDK_ERRLOG("Delay time of %"PRIu32" is to big\n", delay_base_us); 397 return -EINVAL; 398 } else if (io_rate == 0) { 399 SPDK_ERRLOG("IOPS rate of %"PRIu32" is too low. Min is %u\n", io_rate, 400 1000U / SPDK_VHOST_STATS_CHECK_INTERVAL_MS); 401 return -EINVAL; 402 } 403 404 vdev->coalescing_delay_us = delay_base_us; 405 vdev->coalescing_iops_threshold = iops_threshold; 406 return 0; 407 } 408 409 int 410 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us, 411 uint32_t iops_threshold) 412 { 413 int rc; 414 415 rc = vhost_dev_set_coalescing(vdev, delay_base_us, iops_threshold); 416 if (rc != 0) { 417 return rc; 418 } 419 420 spdk_vhost_dev_foreach_session(vdev, spdk_vhost_session_set_coalescing, NULL); 421 return 0; 422 } 423 424 void 425 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us, 426 uint32_t *iops_threshold) 427 { 428 if (delay_base_us) { 429 *delay_base_us = vdev->coalescing_delay_us; 430 } 431 432 if (iops_threshold) { 433 *iops_threshold = vdev->coalescing_iops_threshold; 434 } 435 } 436 437 /* 438 * Enqueue id and len to used ring. 439 */ 440 void 441 spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession, 442 struct spdk_vhost_virtqueue *virtqueue, 443 uint16_t id, uint32_t len) 444 { 445 struct rte_vhost_vring *vring = &virtqueue->vring; 446 struct vring_used *used = vring->used; 447 uint16_t last_idx = virtqueue->last_used_idx & (vring->size - 1); 448 449 SPDK_DEBUGLOG(SPDK_LOG_VHOST_RING, 450 "Queue %td - USED RING: last_idx=%"PRIu16" req id=%"PRIu16" len=%"PRIu32"\n", 451 virtqueue - vsession->virtqueue, virtqueue->last_used_idx, id, len); 452 453 spdk_vhost_log_req_desc(vsession, virtqueue, id); 454 455 virtqueue->last_used_idx++; 456 used->ring[last_idx].id = id; 457 used->ring[last_idx].len = len; 458 459 /* Ensure the used ring is updated before we log it or increment used->idx. */ 460 spdk_smp_wmb(); 461 462 spdk_vhost_log_used_vring_elem(vsession, virtqueue, last_idx); 463 * (volatile uint16_t *) &used->idx = virtqueue->last_used_idx; 464 spdk_vhost_log_used_vring_idx(vsession, virtqueue); 465 466 virtqueue->used_req_cnt++; 467 } 468 469 int 470 spdk_vhost_vring_desc_get_next(struct vring_desc **desc, 471 struct vring_desc *desc_table, uint32_t desc_table_size) 472 { 473 struct vring_desc *old_desc = *desc; 474 uint16_t next_idx; 475 476 if ((old_desc->flags & VRING_DESC_F_NEXT) == 0) { 477 *desc = NULL; 478 return 0; 479 } 480 481 next_idx = old_desc->next; 482 if (spdk_unlikely(next_idx >= desc_table_size)) { 483 *desc = NULL; 484 return -1; 485 } 486 487 *desc = &desc_table[next_idx]; 488 return 0; 489 } 490 491 bool 492 spdk_vhost_vring_desc_is_wr(struct vring_desc *cur_desc) 493 { 494 return !!(cur_desc->flags & VRING_DESC_F_WRITE); 495 } 496 497 int 498 spdk_vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov, 499 uint16_t *iov_index, const struct vring_desc *desc) 500 { 501 uint64_t len; 502 uint64_t remaining = desc->len; 503 uintptr_t payload = desc->addr; 504 uintptr_t vva; 505 506 do { 507 if (*iov_index >= SPDK_VHOST_IOVS_MAX) { 508 SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX); 509 return -1; 510 } 511 len = remaining; 512 vva = (uintptr_t)rte_vhost_va_from_guest_pa(vsession->mem, payload, &len); 513 if (vva == 0 || len == 0) { 514 SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload); 515 return -1; 516 } 517 iov[*iov_index].iov_base = (void *)vva; 518 iov[*iov_index].iov_len = len; 519 remaining -= len; 520 payload += len; 521 (*iov_index)++; 522 } while (remaining); 523 524 return 0; 525 } 526 527 static struct spdk_vhost_session * 528 spdk_vhost_session_find_by_id(struct spdk_vhost_dev *vdev, unsigned id) 529 { 530 struct spdk_vhost_session *vsession; 531 532 TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) { 533 if (vsession->id == id) { 534 return vsession; 535 } 536 } 537 538 return NULL; 539 } 540 541 struct spdk_vhost_session * 542 spdk_vhost_session_find_by_vid(int vid) 543 { 544 struct spdk_vhost_dev *vdev; 545 struct spdk_vhost_session *vsession; 546 547 TAILQ_FOREACH(vdev, &g_spdk_vhost_devices, tailq) { 548 TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) { 549 if (vsession->vid == vid) { 550 return vsession; 551 } 552 } 553 } 554 555 return NULL; 556 } 557 558 #define SHIFT_2MB 21 559 #define SIZE_2MB (1ULL << SHIFT_2MB) 560 #define FLOOR_2MB(x) (((uintptr_t)x) / SIZE_2MB) << SHIFT_2MB 561 #define CEIL_2MB(x) ((((uintptr_t)x) + SIZE_2MB - 1) / SIZE_2MB) << SHIFT_2MB 562 563 static void 564 spdk_vhost_session_mem_register(struct spdk_vhost_session *vsession) 565 { 566 struct rte_vhost_mem_region *region; 567 uint32_t i; 568 uint64_t previous_start = UINT64_MAX; 569 570 for (i = 0; i < vsession->mem->nregions; i++) { 571 uint64_t start, end, len; 572 region = &vsession->mem->regions[i]; 573 start = FLOOR_2MB(region->mmap_addr); 574 end = CEIL_2MB(region->mmap_addr + region->mmap_size); 575 if (start == previous_start) { 576 start += (size_t) SIZE_2MB; 577 } 578 previous_start = start; 579 len = end - start; 580 SPDK_INFOLOG(SPDK_LOG_VHOST, "Registering VM memory for vtophys translation - 0x%jx len:0x%jx\n", 581 start, len); 582 583 if (spdk_mem_register((void *)start, len) != 0) { 584 SPDK_WARNLOG("Failed to register memory region %"PRIu32". Future vtophys translation might fail.\n", 585 i); 586 continue; 587 } 588 } 589 } 590 591 static void 592 spdk_vhost_session_mem_unregister(struct spdk_vhost_session *vsession) 593 { 594 struct rte_vhost_mem_region *region; 595 uint32_t i; 596 uint64_t previous_start = UINT64_MAX; 597 598 for (i = 0; i < vsession->mem->nregions; i++) { 599 uint64_t start, end, len; 600 region = &vsession->mem->regions[i]; 601 start = FLOOR_2MB(region->mmap_addr); 602 end = CEIL_2MB(region->mmap_addr + region->mmap_size); 603 if (start == previous_start) { 604 start += (size_t) SIZE_2MB; 605 } 606 previous_start = start; 607 len = end - start; 608 609 if (spdk_vtophys((void *) start, NULL) == SPDK_VTOPHYS_ERROR) { 610 continue; /* region has not been registered */ 611 } 612 613 if (spdk_mem_unregister((void *)start, len) != 0) { 614 assert(false); 615 } 616 } 617 618 } 619 620 struct spdk_vhost_dev * 621 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev) 622 { 623 if (vdev == NULL) { 624 return TAILQ_FIRST(&g_spdk_vhost_devices); 625 } 626 627 return TAILQ_NEXT(vdev, tailq); 628 } 629 630 struct spdk_vhost_dev * 631 spdk_vhost_dev_find(const char *ctrlr_name) 632 { 633 struct spdk_vhost_dev *vdev; 634 size_t dev_dirname_len = strlen(dev_dirname); 635 636 if (strncmp(ctrlr_name, dev_dirname, dev_dirname_len) == 0) { 637 ctrlr_name += dev_dirname_len; 638 } 639 640 TAILQ_FOREACH(vdev, &g_spdk_vhost_devices, tailq) { 641 if (strcmp(vdev->name, ctrlr_name) == 0) { 642 return vdev; 643 } 644 } 645 646 return NULL; 647 } 648 649 static int 650 spdk_vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask) 651 { 652 int rc; 653 654 if (cpumask == NULL) { 655 return -1; 656 } 657 658 if (mask == NULL) { 659 spdk_cpuset_copy(cpumask, spdk_app_get_core_mask()); 660 return 0; 661 } 662 663 rc = spdk_app_parse_core_mask(mask, cpumask); 664 if (rc < 0) { 665 SPDK_ERRLOG("invalid cpumask %s\n", mask); 666 return -1; 667 } 668 669 if (spdk_cpuset_count(cpumask) == 0) { 670 SPDK_ERRLOG("no cpu is selected among reactor mask(=%s)\n", 671 spdk_cpuset_fmt(spdk_app_get_core_mask())); 672 return -1; 673 } 674 675 return 0; 676 } 677 678 static void * 679 _start_rte_driver(void *arg) 680 { 681 char *path = arg; 682 683 if (rte_vhost_driver_start(path) != 0) { 684 return NULL; 685 } 686 687 return path; 688 } 689 690 int 691 spdk_vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str, 692 const struct spdk_vhost_dev_backend *backend) 693 { 694 char path[PATH_MAX]; 695 struct stat file_stat; 696 struct spdk_cpuset *cpumask; 697 int rc; 698 699 assert(vdev); 700 if (name == NULL) { 701 SPDK_ERRLOG("Can't register controller with no name\n"); 702 return -EINVAL; 703 } 704 705 cpumask = spdk_cpuset_alloc(); 706 if (!cpumask) { 707 SPDK_ERRLOG("spdk_cpuset_alloc failed\n"); 708 return -ENOMEM; 709 } 710 711 if (spdk_vhost_parse_core_mask(mask_str, cpumask) != 0) { 712 SPDK_ERRLOG("cpumask %s is invalid (app mask is 0x%s)\n", 713 mask_str, spdk_cpuset_fmt(spdk_app_get_core_mask())); 714 rc = -EINVAL; 715 goto out; 716 } 717 718 if (spdk_vhost_dev_find(name)) { 719 SPDK_ERRLOG("vhost controller %s already exists.\n", name); 720 rc = -EEXIST; 721 goto out; 722 } 723 724 if (snprintf(path, sizeof(path), "%s%s", dev_dirname, name) >= (int)sizeof(path)) { 725 SPDK_ERRLOG("Resulting socket path for controller %s is too long: %s%s\n", name, dev_dirname, 726 name); 727 rc = -EINVAL; 728 goto out; 729 } 730 731 /* Register vhost driver to handle vhost messages. */ 732 if (stat(path, &file_stat) != -1) { 733 if (!S_ISSOCK(file_stat.st_mode)) { 734 SPDK_ERRLOG("Cannot create a domain socket at path \"%s\": " 735 "The file already exists and is not a socket.\n", 736 path); 737 rc = -EIO; 738 goto out; 739 } else if (unlink(path) != 0) { 740 SPDK_ERRLOG("Cannot create a domain socket at path \"%s\": " 741 "The socket already exists and failed to unlink.\n", 742 path); 743 rc = -EIO; 744 goto out; 745 } 746 } 747 748 if (rte_vhost_driver_register(path, 0) != 0) { 749 SPDK_ERRLOG("Could not register controller %s with vhost library\n", name); 750 SPDK_ERRLOG("Check if domain socket %s already exists\n", path); 751 rc = -EIO; 752 goto out; 753 } 754 if (rte_vhost_driver_set_features(path, backend->virtio_features) || 755 rte_vhost_driver_disable_features(path, backend->disabled_features)) { 756 SPDK_ERRLOG("Couldn't set vhost features for controller %s\n", name); 757 758 rte_vhost_driver_unregister(path); 759 rc = -EIO; 760 goto out; 761 } 762 763 if (rte_vhost_driver_callback_register(path, &g_spdk_vhost_ops) != 0) { 764 rte_vhost_driver_unregister(path); 765 SPDK_ERRLOG("Couldn't register callbacks for controller %s\n", name); 766 rc = -EIO; 767 goto out; 768 } 769 770 vdev->name = strdup(name); 771 vdev->path = strdup(path); 772 if (vdev->name == NULL || vdev->path == NULL) { 773 free(vdev->name); 774 free(vdev->path); 775 rte_vhost_driver_unregister(path); 776 rc = -EIO; 777 goto out; 778 } 779 780 vdev->cpumask = cpumask; 781 vdev->registered = true; 782 vdev->backend = backend; 783 TAILQ_INIT(&vdev->vsessions); 784 TAILQ_INSERT_TAIL(&g_spdk_vhost_devices, vdev, tailq); 785 786 vhost_dev_set_coalescing(vdev, SPDK_VHOST_COALESCING_DELAY_BASE_US, 787 SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD); 788 789 spdk_vhost_dev_install_rte_compat_hooks(vdev); 790 791 /* The following might start a POSIX thread that polls for incoming 792 * socket connections and calls backend->start/stop_device. These backend 793 * callbacks are also protected by the global SPDK vhost mutex, so we're 794 * safe with not initializing the vdev just yet. 795 */ 796 if (spdk_call_unaffinitized(_start_rte_driver, path) == NULL) { 797 SPDK_ERRLOG("Failed to start vhost driver for controller %s (%d): %s\n", 798 name, errno, spdk_strerror(errno)); 799 rte_vhost_driver_unregister(path); 800 TAILQ_REMOVE(&g_spdk_vhost_devices, vdev, tailq); 801 free(vdev->name); 802 free(vdev->path); 803 rc = -EIO; 804 goto out; 805 } 806 807 SPDK_INFOLOG(SPDK_LOG_VHOST, "Controller %s: new controller added\n", vdev->name); 808 return 0; 809 810 out: 811 spdk_cpuset_free(cpumask); 812 return rc; 813 } 814 815 int 816 spdk_vhost_dev_unregister(struct spdk_vhost_dev *vdev) 817 { 818 if (!TAILQ_EMPTY(&vdev->vsessions)) { 819 SPDK_ERRLOG("Controller %s has still valid connection.\n", vdev->name); 820 return -EBUSY; 821 } 822 823 if (vdev->registered && rte_vhost_driver_unregister(vdev->path) != 0) { 824 SPDK_ERRLOG("Could not unregister controller %s with vhost library\n" 825 "Check if domain socket %s still exists\n", 826 vdev->name, vdev->path); 827 return -EIO; 828 } 829 830 SPDK_INFOLOG(SPDK_LOG_VHOST, "Controller %s: removed\n", vdev->name); 831 832 free(vdev->name); 833 free(vdev->path); 834 spdk_cpuset_free(vdev->cpumask); 835 TAILQ_REMOVE(&g_spdk_vhost_devices, vdev, tailq); 836 return 0; 837 } 838 839 static struct spdk_vhost_session * 840 spdk_vhost_session_next(struct spdk_vhost_dev *vdev, unsigned prev_id) 841 { 842 struct spdk_vhost_session *vsession; 843 844 TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) { 845 if (vsession->id > prev_id) { 846 return vsession; 847 } 848 } 849 850 return NULL; 851 } 852 853 const char * 854 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev) 855 { 856 assert(vdev != NULL); 857 return vdev->name; 858 } 859 860 const struct spdk_cpuset * 861 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev) 862 { 863 assert(vdev != NULL); 864 return vdev->cpumask; 865 } 866 867 struct vhost_poll_group * 868 spdk_vhost_get_poll_group(struct spdk_cpuset *cpumask) 869 { 870 struct vhost_poll_group *pg, *selected_pg; 871 uint32_t min_ctrlrs; 872 873 min_ctrlrs = INT_MAX; 874 selected_pg = TAILQ_FIRST(&g_poll_groups); 875 876 TAILQ_FOREACH(pg, &g_poll_groups, tailq) { 877 spdk_cpuset_copy(g_tmp_cpuset, cpumask); 878 spdk_cpuset_and(g_tmp_cpuset, spdk_thread_get_cpumask(pg->thread)); 879 880 /* ignore threads which could be relocated to a non-masked cpu. */ 881 if (!spdk_cpuset_equal(g_tmp_cpuset, spdk_thread_get_cpumask(pg->thread))) { 882 continue; 883 } 884 885 if (pg->ref < min_ctrlrs) { 886 selected_pg = pg; 887 min_ctrlrs = pg->ref; 888 } 889 } 890 891 assert(selected_pg != NULL); 892 assert(selected_pg->ref < UINT_MAX); 893 selected_pg->ref++; 894 return selected_pg; 895 } 896 897 void 898 spdk_vhost_put_poll_group(struct vhost_poll_group *pg) 899 { 900 assert(pg->ref > 0); 901 pg->ref--; 902 } 903 904 void 905 spdk_vhost_session_start_done(struct spdk_vhost_session *vsession, int response) 906 { 907 if (response == 0) { 908 vsession->started = true; 909 assert(vsession->vdev->active_session_num < UINT32_MAX); 910 vsession->vdev->active_session_num++; 911 } 912 913 g_dpdk_response = response; 914 sem_post(&g_dpdk_sem); 915 } 916 917 void 918 spdk_vhost_session_stop_done(struct spdk_vhost_session *vsession, int response) 919 { 920 if (response == 0) { 921 vsession->started = false; 922 assert(vsession->vdev->active_session_num > 0); 923 vsession->vdev->active_session_num--; 924 } 925 926 g_dpdk_response = response; 927 sem_post(&g_dpdk_sem); 928 } 929 930 static void 931 spdk_vhost_event_cb(void *arg1) 932 { 933 struct spdk_vhost_session_fn_ctx *ctx = arg1; 934 struct spdk_vhost_session *vsession; 935 936 if (pthread_mutex_trylock(&g_spdk_vhost_mutex) != 0) { 937 spdk_thread_send_msg(spdk_get_thread(), spdk_vhost_event_cb, arg1); 938 return; 939 } 940 941 vsession = spdk_vhost_session_find_by_id(ctx->vdev, ctx->vsession_id); 942 ctx->cb_fn(ctx->vdev, vsession, NULL); 943 pthread_mutex_unlock(&g_spdk_vhost_mutex); 944 } 945 946 int 947 spdk_vhost_session_send_event(struct vhost_poll_group *pg, 948 struct spdk_vhost_session *vsession, 949 spdk_vhost_session_fn cb_fn, unsigned timeout_sec, 950 const char *errmsg) 951 { 952 struct spdk_vhost_session_fn_ctx ev_ctx = {0}; 953 struct timespec timeout; 954 int rc; 955 956 ev_ctx.vdev = vsession->vdev; 957 ev_ctx.vsession_id = vsession->id; 958 ev_ctx.cb_fn = cb_fn; 959 960 vsession->poll_group = pg; 961 spdk_thread_send_msg(pg->thread, spdk_vhost_event_cb, &ev_ctx); 962 pthread_mutex_unlock(&g_spdk_vhost_mutex); 963 964 clock_gettime(CLOCK_REALTIME, &timeout); 965 timeout.tv_sec += timeout_sec; 966 967 rc = sem_timedwait(&g_dpdk_sem, &timeout); 968 if (rc != 0) { 969 SPDK_ERRLOG("Timeout waiting for event: %s.\n", errmsg); 970 sem_wait(&g_dpdk_sem); 971 } 972 973 pthread_mutex_lock(&g_spdk_vhost_mutex); 974 return g_dpdk_response; 975 } 976 977 static void foreach_session_continue(struct spdk_vhost_session_fn_ctx *ev_ctx, 978 struct spdk_vhost_session *vsession); 979 980 static void 981 foreach_session_finish_cb(void *arg1) 982 { 983 struct spdk_vhost_session_fn_ctx *ctx = arg1; 984 struct spdk_vhost_dev *vdev = ctx->vdev; 985 986 if (pthread_mutex_trylock(&g_spdk_vhost_mutex) != 0) { 987 spdk_thread_send_msg(spdk_get_thread(), 988 foreach_session_finish_cb, arg1); 989 return; 990 } 991 992 assert(vdev->pending_async_op_num > 0); 993 vdev->pending_async_op_num--; 994 /* Call fn one last time with vsession == NULL */ 995 ctx->cb_fn(vdev, NULL, ctx->user_ctx); 996 997 pthread_mutex_unlock(&g_spdk_vhost_mutex); 998 free(ctx); 999 } 1000 1001 static void 1002 foreach_session_continue_cb(void *arg1) 1003 { 1004 struct spdk_vhost_session_fn_ctx *ctx = arg1; 1005 struct spdk_vhost_session *vsession = NULL; 1006 struct spdk_vhost_dev *vdev = ctx->vdev; 1007 int rc; 1008 1009 if (pthread_mutex_trylock(&g_spdk_vhost_mutex) != 0) { 1010 spdk_thread_send_msg(spdk_get_thread(), 1011 foreach_session_continue_cb, arg1); 1012 return; 1013 } 1014 1015 vsession = spdk_vhost_session_find_by_id(vdev, ctx->vsession_id); 1016 if (vsession == NULL || !vsession->initialized) { 1017 /* The session must have been removed in the meantime, so we 1018 * just skip it in our foreach chain 1019 */ 1020 goto out_unlock_continue; 1021 } 1022 1023 if (vsession->started && vsession->poll_group->thread != spdk_get_thread()) { 1024 /* if session has been relocated to other thread, it is no longer thread-safe 1025 * to access its contents here. Even though we're running under the global 1026 * vhost mutex, the session itself (and its pollers) are not. We need to chase 1027 * the session thread as many times as necessary. 1028 */ 1029 spdk_thread_send_msg(vsession->poll_group->thread, 1030 foreach_session_continue_cb, arg1); 1031 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1032 return; 1033 } 1034 1035 rc = ctx->cb_fn(vdev, vsession, ctx->user_ctx); 1036 if (rc < 0) { 1037 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1038 free(ctx); 1039 return; 1040 } 1041 1042 out_unlock_continue: 1043 vsession = spdk_vhost_session_next(vdev, ctx->vsession_id); 1044 foreach_session_continue(ctx, vsession); 1045 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1046 } 1047 1048 static void 1049 foreach_session_continue(struct spdk_vhost_session_fn_ctx *ev_ctx, 1050 struct spdk_vhost_session *vsession) 1051 { 1052 struct spdk_vhost_dev *vdev = ev_ctx->vdev; 1053 int rc; 1054 1055 while (vsession != NULL && !vsession->started) { 1056 if (vsession->initialized) { 1057 rc = ev_ctx->cb_fn(vdev, vsession, ev_ctx->user_ctx); 1058 if (rc < 0) { 1059 return; 1060 } 1061 } 1062 1063 vsession = spdk_vhost_session_next(vdev, vsession->id); 1064 } 1065 1066 if (vsession != NULL) { 1067 ev_ctx->vsession_id = vsession->id; 1068 spdk_thread_send_msg(vsession->poll_group->thread, 1069 foreach_session_continue_cb, ev_ctx); 1070 } else { 1071 ev_ctx->vsession_id = UINT32_MAX; 1072 spdk_thread_send_msg(g_vhost_init_thread, 1073 foreach_session_finish_cb, ev_ctx); 1074 } 1075 } 1076 1077 void 1078 spdk_vhost_dev_foreach_session(struct spdk_vhost_dev *vdev, 1079 spdk_vhost_session_fn fn, void *arg) 1080 { 1081 struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions); 1082 struct spdk_vhost_session_fn_ctx *ev_ctx; 1083 1084 ev_ctx = calloc(1, sizeof(*ev_ctx)); 1085 if (ev_ctx == NULL) { 1086 SPDK_ERRLOG("Failed to alloc vhost event.\n"); 1087 assert(false); 1088 return; 1089 } 1090 1091 ev_ctx->vdev = vdev; 1092 ev_ctx->cb_fn = fn; 1093 ev_ctx->user_ctx = arg; 1094 1095 assert(vdev->pending_async_op_num < UINT32_MAX); 1096 vdev->pending_async_op_num++; 1097 foreach_session_continue(ev_ctx, vsession); 1098 } 1099 1100 static void 1101 _stop_session(struct spdk_vhost_session *vsession) 1102 { 1103 struct spdk_vhost_dev *vdev = vsession->vdev; 1104 struct spdk_vhost_virtqueue *q; 1105 int rc; 1106 uint16_t i; 1107 1108 rc = vdev->backend->stop_session(vsession); 1109 if (rc != 0) { 1110 SPDK_ERRLOG("Couldn't stop device with vid %d.\n", vsession->vid); 1111 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1112 return; 1113 } 1114 1115 for (i = 0; i < vsession->max_queues; i++) { 1116 q = &vsession->virtqueue[i]; 1117 if (q->vring.desc == NULL) { 1118 continue; 1119 } 1120 rte_vhost_set_vring_base(vsession->vid, i, q->last_avail_idx, q->last_used_idx); 1121 } 1122 1123 spdk_vhost_session_mem_unregister(vsession); 1124 free(vsession->mem); 1125 } 1126 1127 static void 1128 stop_device(int vid) 1129 { 1130 struct spdk_vhost_session *vsession; 1131 1132 pthread_mutex_lock(&g_spdk_vhost_mutex); 1133 vsession = spdk_vhost_session_find_by_vid(vid); 1134 if (vsession == NULL) { 1135 SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid); 1136 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1137 return; 1138 } 1139 1140 if (!vsession->started) { 1141 /* already stopped, nothing to do */ 1142 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1143 return; 1144 } 1145 1146 _stop_session(vsession); 1147 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1148 } 1149 1150 static int 1151 start_device(int vid) 1152 { 1153 struct spdk_vhost_dev *vdev; 1154 struct spdk_vhost_session *vsession; 1155 int rc = -1; 1156 uint16_t i; 1157 1158 pthread_mutex_lock(&g_spdk_vhost_mutex); 1159 1160 vsession = spdk_vhost_session_find_by_vid(vid); 1161 if (vsession == NULL) { 1162 SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid); 1163 goto out; 1164 } 1165 1166 vdev = vsession->vdev; 1167 if (vsession->started) { 1168 /* already started, nothing to do */ 1169 rc = 0; 1170 goto out; 1171 } 1172 1173 vsession->max_queues = 0; 1174 memset(vsession->virtqueue, 0, sizeof(vsession->virtqueue)); 1175 for (i = 0; i < SPDK_VHOST_MAX_VQUEUES; i++) { 1176 struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i]; 1177 1178 q->vring_idx = -1; 1179 if (rte_vhost_get_vhost_vring(vid, i, &q->vring)) { 1180 continue; 1181 } 1182 q->vring_idx = i; 1183 1184 if (q->vring.desc == NULL || q->vring.size == 0) { 1185 continue; 1186 } 1187 1188 if (rte_vhost_get_vring_base(vsession->vid, i, &q->last_avail_idx, &q->last_used_idx)) { 1189 q->vring.desc = NULL; 1190 continue; 1191 } 1192 1193 /* Disable notifications. */ 1194 if (rte_vhost_enable_guest_notification(vid, i, 0) != 0) { 1195 SPDK_ERRLOG("vhost device %d: Failed to disable guest notification on queue %"PRIu16"\n", vid, i); 1196 goto out; 1197 } 1198 1199 vsession->max_queues = i + 1; 1200 } 1201 1202 if (rte_vhost_get_negotiated_features(vid, &vsession->negotiated_features) != 0) { 1203 SPDK_ERRLOG("vhost device %d: Failed to get negotiated driver features\n", vid); 1204 goto out; 1205 } 1206 1207 if (rte_vhost_get_mem_table(vid, &vsession->mem) != 0) { 1208 SPDK_ERRLOG("vhost device %d: Failed to get guest memory table\n", vid); 1209 goto out; 1210 } 1211 1212 /* 1213 * Not sure right now but this look like some kind of QEMU bug and guest IO 1214 * might be frozed without kicking all queues after live-migration. This look like 1215 * the previous vhost instance failed to effectively deliver all interrupts before 1216 * the GET_VRING_BASE message. This shouldn't harm guest since spurious interrupts 1217 * should be ignored by guest virtio driver. 1218 * 1219 * Tested on QEMU 2.10.91 and 2.11.50. 1220 */ 1221 for (i = 0; i < vsession->max_queues; i++) { 1222 struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i]; 1223 1224 if (q->vring.desc != NULL && q->vring.size > 0) { 1225 rte_vhost_vring_call(vsession->vid, q->vring_idx); 1226 } 1227 } 1228 1229 spdk_vhost_session_set_coalescing(vdev, vsession, NULL); 1230 spdk_vhost_session_mem_register(vsession); 1231 vsession->initialized = true; 1232 rc = vdev->backend->start_session(vsession); 1233 if (rc != 0) { 1234 spdk_vhost_session_mem_unregister(vsession); 1235 free(vsession->mem); 1236 goto out; 1237 } 1238 1239 out: 1240 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1241 return rc; 1242 } 1243 1244 #ifdef SPDK_CONFIG_VHOST_INTERNAL_LIB 1245 static int 1246 get_config(int vid, uint8_t *config, uint32_t len) 1247 { 1248 struct spdk_vhost_session *vsession; 1249 struct spdk_vhost_dev *vdev; 1250 int rc = -1; 1251 1252 pthread_mutex_lock(&g_spdk_vhost_mutex); 1253 vsession = spdk_vhost_session_find_by_vid(vid); 1254 if (vsession == NULL) { 1255 SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid); 1256 goto out; 1257 } 1258 1259 vdev = vsession->vdev; 1260 if (vdev->backend->vhost_get_config) { 1261 rc = vdev->backend->vhost_get_config(vdev, config, len); 1262 } 1263 1264 out: 1265 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1266 return rc; 1267 } 1268 1269 static int 1270 set_config(int vid, uint8_t *config, uint32_t offset, uint32_t size, uint32_t flags) 1271 { 1272 struct spdk_vhost_session *vsession; 1273 struct spdk_vhost_dev *vdev; 1274 int rc = -1; 1275 1276 pthread_mutex_lock(&g_spdk_vhost_mutex); 1277 vsession = spdk_vhost_session_find_by_vid(vid); 1278 if (vsession == NULL) { 1279 SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid); 1280 goto out; 1281 } 1282 1283 vdev = vsession->vdev; 1284 if (vdev->backend->vhost_set_config) { 1285 rc = vdev->backend->vhost_set_config(vdev, config, offset, size, flags); 1286 } 1287 1288 out: 1289 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1290 return rc; 1291 } 1292 #endif 1293 1294 int 1295 spdk_vhost_set_socket_path(const char *basename) 1296 { 1297 int ret; 1298 1299 if (basename && strlen(basename) > 0) { 1300 ret = snprintf(dev_dirname, sizeof(dev_dirname) - 2, "%s", basename); 1301 if (ret <= 0) { 1302 return -EINVAL; 1303 } 1304 if ((size_t)ret >= sizeof(dev_dirname) - 2) { 1305 SPDK_ERRLOG("Char dev dir path length %d is too long\n", ret); 1306 return -EINVAL; 1307 } 1308 1309 if (dev_dirname[ret - 1] != '/') { 1310 dev_dirname[ret] = '/'; 1311 dev_dirname[ret + 1] = '\0'; 1312 } 1313 } 1314 1315 return 0; 1316 } 1317 1318 void 1319 spdk_vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w) 1320 { 1321 assert(vdev->backend->dump_info_json != NULL); 1322 vdev->backend->dump_info_json(vdev, w); 1323 } 1324 1325 int 1326 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev) 1327 { 1328 if (vdev->pending_async_op_num) { 1329 return -EBUSY; 1330 } 1331 1332 return vdev->backend->remove_device(vdev); 1333 } 1334 1335 static int 1336 new_connection(int vid) 1337 { 1338 struct spdk_vhost_dev *vdev; 1339 struct spdk_vhost_session *vsession; 1340 char ifname[PATH_MAX]; 1341 1342 pthread_mutex_lock(&g_spdk_vhost_mutex); 1343 1344 if (rte_vhost_get_ifname(vid, ifname, PATH_MAX) < 0) { 1345 SPDK_ERRLOG("Couldn't get a valid ifname for device with vid %d\n", vid); 1346 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1347 return -1; 1348 } 1349 1350 vdev = spdk_vhost_dev_find(ifname); 1351 if (vdev == NULL) { 1352 SPDK_ERRLOG("Couldn't find device with vid %d to create connection for.\n", vid); 1353 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1354 return -1; 1355 } 1356 1357 /* We expect sessions inside vdev->vsessions to be sorted in ascending 1358 * order in regard of vsession->id. For now we always set id = vsessions_cnt++ 1359 * and append each session to the very end of the vsessions list. 1360 * This is required for spdk_vhost_dev_foreach_session() to work. 1361 */ 1362 if (vdev->vsessions_num == UINT_MAX) { 1363 assert(false); 1364 return -EINVAL; 1365 } 1366 1367 if (posix_memalign((void **)&vsession, SPDK_CACHE_LINE_SIZE, sizeof(*vsession) + 1368 vdev->backend->session_ctx_size)) { 1369 SPDK_ERRLOG("vsession alloc failed\n"); 1370 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1371 return -1; 1372 } 1373 memset(vsession, 0, sizeof(*vsession) + vdev->backend->session_ctx_size); 1374 1375 vsession->vdev = vdev; 1376 vsession->vid = vid; 1377 vsession->id = vdev->vsessions_num++; 1378 vsession->name = spdk_sprintf_alloc("%ss%u", vdev->name, vsession->vid); 1379 if (vsession->name == NULL) { 1380 SPDK_ERRLOG("vsession alloc failed\n"); 1381 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1382 free(vsession); 1383 return -1; 1384 } 1385 vsession->poll_group = NULL; 1386 vsession->started = false; 1387 vsession->initialized = false; 1388 vsession->next_stats_check_time = 0; 1389 vsession->stats_check_interval = SPDK_VHOST_STATS_CHECK_INTERVAL_MS * 1390 spdk_get_ticks_hz() / 1000UL; 1391 TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq); 1392 1393 spdk_vhost_session_install_rte_compat_hooks(vsession); 1394 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1395 return 0; 1396 } 1397 1398 static void 1399 destroy_connection(int vid) 1400 { 1401 struct spdk_vhost_session *vsession; 1402 1403 pthread_mutex_lock(&g_spdk_vhost_mutex); 1404 vsession = spdk_vhost_session_find_by_vid(vid); 1405 if (vsession == NULL) { 1406 SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid); 1407 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1408 return; 1409 } 1410 1411 if (vsession->started) { 1412 _stop_session(vsession); 1413 } 1414 1415 TAILQ_REMOVE(&vsession->vdev->vsessions, vsession, tailq); 1416 free(vsession->name); 1417 free(vsession); 1418 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1419 } 1420 1421 void 1422 spdk_vhost_lock(void) 1423 { 1424 pthread_mutex_lock(&g_spdk_vhost_mutex); 1425 } 1426 1427 int 1428 spdk_vhost_trylock(void) 1429 { 1430 return -pthread_mutex_trylock(&g_spdk_vhost_mutex); 1431 } 1432 1433 void 1434 spdk_vhost_unlock(void) 1435 { 1436 pthread_mutex_unlock(&g_spdk_vhost_mutex); 1437 } 1438 1439 static void 1440 vhost_create_poll_group_done(void *ctx) 1441 { 1442 spdk_vhost_init_cb init_cb = ctx; 1443 int ret; 1444 1445 if (TAILQ_EMPTY(&g_poll_groups)) { 1446 /* No threads? Iteration failed? */ 1447 init_cb(-ECHILD); 1448 return; 1449 } 1450 1451 ret = spdk_vhost_scsi_controller_construct(); 1452 if (ret != 0) { 1453 SPDK_ERRLOG("Cannot construct vhost controllers\n"); 1454 goto out; 1455 } 1456 1457 ret = spdk_vhost_blk_controller_construct(); 1458 if (ret != 0) { 1459 SPDK_ERRLOG("Cannot construct vhost block controllers\n"); 1460 goto out; 1461 } 1462 1463 #ifdef SPDK_CONFIG_VHOST_INTERNAL_LIB 1464 ret = spdk_vhost_nvme_controller_construct(); 1465 if (ret != 0) { 1466 SPDK_ERRLOG("Cannot construct vhost NVMe controllers\n"); 1467 goto out; 1468 } 1469 #endif 1470 1471 out: 1472 init_cb(ret); 1473 } 1474 1475 static void 1476 vhost_create_poll_group(void *ctx) 1477 { 1478 struct vhost_poll_group *pg; 1479 1480 pg = calloc(1, sizeof(*pg)); 1481 if (!pg) { 1482 SPDK_ERRLOG("Not enough memory to allocate poll groups\n"); 1483 spdk_app_stop(-ENOMEM); 1484 return; 1485 } 1486 1487 pg->thread = spdk_get_thread(); 1488 TAILQ_INSERT_TAIL(&g_poll_groups, pg, tailq); 1489 } 1490 1491 void 1492 spdk_vhost_init(spdk_vhost_init_cb init_cb) 1493 { 1494 size_t len; 1495 int ret; 1496 1497 g_vhost_init_thread = spdk_get_thread(); 1498 assert(g_vhost_init_thread != NULL); 1499 1500 if (dev_dirname[0] == '\0') { 1501 if (getcwd(dev_dirname, sizeof(dev_dirname) - 1) == NULL) { 1502 SPDK_ERRLOG("getcwd failed (%d): %s\n", errno, spdk_strerror(errno)); 1503 ret = -1; 1504 goto err_out; 1505 } 1506 1507 len = strlen(dev_dirname); 1508 if (dev_dirname[len - 1] != '/') { 1509 dev_dirname[len] = '/'; 1510 dev_dirname[len + 1] = '\0'; 1511 } 1512 } 1513 1514 g_tmp_cpuset = spdk_cpuset_alloc(); 1515 if (g_tmp_cpuset == NULL) { 1516 ret = -1; 1517 goto err_out; 1518 } 1519 1520 ret = sem_init(&g_dpdk_sem, 0, 0); 1521 if (ret != 0) { 1522 SPDK_ERRLOG("Failed to initialize semaphore for rte_vhost pthread.\n"); 1523 spdk_cpuset_free(g_tmp_cpuset); 1524 ret = -1; 1525 goto err_out; 1526 } 1527 1528 spdk_for_each_thread(vhost_create_poll_group, 1529 init_cb, 1530 vhost_create_poll_group_done); 1531 return; 1532 err_out: 1533 init_cb(ret); 1534 } 1535 1536 static void 1537 _spdk_vhost_fini(void *arg1) 1538 { 1539 struct spdk_vhost_dev *vdev, *tmp; 1540 struct vhost_poll_group *pg, *tpg; 1541 1542 spdk_vhost_lock(); 1543 vdev = spdk_vhost_dev_next(NULL); 1544 while (vdev != NULL) { 1545 tmp = spdk_vhost_dev_next(vdev); 1546 spdk_vhost_dev_remove(vdev); 1547 /* don't care if it fails, there's nothing we can do for now */ 1548 vdev = tmp; 1549 } 1550 spdk_vhost_unlock(); 1551 1552 /* All devices are removed now. */ 1553 sem_destroy(&g_dpdk_sem); 1554 spdk_cpuset_free(g_tmp_cpuset); 1555 TAILQ_FOREACH_SAFE(pg, &g_poll_groups, tailq, tpg) { 1556 TAILQ_REMOVE(&g_poll_groups, pg, tailq); 1557 free(pg); 1558 } 1559 g_fini_cpl_cb(); 1560 } 1561 1562 static void * 1563 session_shutdown(void *arg) 1564 { 1565 struct spdk_vhost_dev *vdev = NULL; 1566 1567 TAILQ_FOREACH(vdev, &g_spdk_vhost_devices, tailq) { 1568 rte_vhost_driver_unregister(vdev->path); 1569 vdev->registered = false; 1570 } 1571 1572 SPDK_INFOLOG(SPDK_LOG_VHOST, "Exiting\n"); 1573 spdk_thread_send_msg(g_vhost_init_thread, _spdk_vhost_fini, NULL); 1574 return NULL; 1575 } 1576 1577 void 1578 spdk_vhost_fini(spdk_vhost_fini_cb fini_cb) 1579 { 1580 pthread_t tid; 1581 int rc; 1582 1583 assert(spdk_get_thread() == g_vhost_init_thread); 1584 g_fini_cpl_cb = fini_cb; 1585 1586 /* rte_vhost API for removing sockets is not asynchronous. Since it may call SPDK 1587 * ops for stopping a device or removing a connection, we need to call it from 1588 * a separate thread to avoid deadlock. 1589 */ 1590 rc = pthread_create(&tid, NULL, &session_shutdown, NULL); 1591 if (rc < 0) { 1592 SPDK_ERRLOG("Failed to start session shutdown thread (%d): %s\n", rc, spdk_strerror(rc)); 1593 abort(); 1594 } 1595 pthread_detach(tid); 1596 } 1597 1598 void 1599 spdk_vhost_config_json(struct spdk_json_write_ctx *w) 1600 { 1601 struct spdk_vhost_dev *vdev; 1602 uint32_t delay_base_us; 1603 uint32_t iops_threshold; 1604 1605 spdk_json_write_array_begin(w); 1606 1607 spdk_vhost_lock(); 1608 vdev = spdk_vhost_dev_next(NULL); 1609 while (vdev != NULL) { 1610 vdev->backend->write_config_json(vdev, w); 1611 1612 spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold); 1613 if (delay_base_us) { 1614 spdk_json_write_object_begin(w); 1615 spdk_json_write_named_string(w, "method", "set_vhost_controller_coalescing"); 1616 1617 spdk_json_write_named_object_begin(w, "params"); 1618 spdk_json_write_named_string(w, "ctrlr", vdev->name); 1619 spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us); 1620 spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold); 1621 spdk_json_write_object_end(w); 1622 1623 spdk_json_write_object_end(w); 1624 } 1625 vdev = spdk_vhost_dev_next(vdev); 1626 } 1627 spdk_vhost_unlock(); 1628 1629 spdk_json_write_array_end(w); 1630 } 1631 1632 SPDK_LOG_REGISTER_COMPONENT("vhost", SPDK_LOG_VHOST) 1633 SPDK_LOG_REGISTER_COMPONENT("vhost_ring", SPDK_LOG_VHOST_RING) 1634