1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/env.h" 37 #include "spdk/likely.h" 38 #include "spdk/string.h" 39 #include "spdk/util.h" 40 #include "spdk/memory.h" 41 #include "spdk/barrier.h" 42 #include "spdk/vhost.h" 43 #include "vhost_internal.h" 44 45 bool g_packed_ring_recovery = false; 46 47 static struct spdk_cpuset g_vhost_core_mask; 48 49 /* Path to folder where character device will be created. Can be set by user. */ 50 static char dev_dirname[PATH_MAX] = ""; 51 52 /* Thread performing all vhost management operations */ 53 static struct spdk_thread *g_vhost_init_thread; 54 55 static spdk_vhost_fini_cb g_fini_cpl_cb; 56 57 /** 58 * DPDK calls our callbacks synchronously but the work those callbacks 59 * perform needs to be async. Luckily, all DPDK callbacks are called on 60 * a DPDK-internal pthread, so we'll just wait on a semaphore in there. 61 */ 62 static sem_t g_dpdk_sem; 63 64 /** Return code for the current DPDK callback */ 65 static int g_dpdk_response; 66 67 struct vhost_session_fn_ctx { 68 /** Device pointer obtained before enqueueing the event */ 69 struct spdk_vhost_dev *vdev; 70 71 /** ID of the session to send event to. */ 72 uint32_t vsession_id; 73 74 /** User provided function to be executed on session's thread. */ 75 spdk_vhost_session_fn cb_fn; 76 77 /** 78 * User provided function to be called on the init thread 79 * after iterating through all sessions. 80 */ 81 spdk_vhost_dev_fn cpl_fn; 82 83 /** Custom user context */ 84 void *user_ctx; 85 }; 86 87 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER( 88 g_vhost_devices); 89 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER; 90 91 void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len) 92 { 93 void *vva; 94 uint64_t newlen; 95 96 newlen = len; 97 vva = (void *)rte_vhost_va_from_guest_pa(vsession->mem, addr, &newlen); 98 if (newlen != len) { 99 return NULL; 100 } 101 102 return vva; 103 104 } 105 106 static void 107 vhost_log_req_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue, 108 uint16_t req_id) 109 { 110 struct vring_desc *desc, *desc_table; 111 uint32_t desc_table_size; 112 int rc; 113 114 if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) { 115 return; 116 } 117 118 rc = vhost_vq_get_desc(vsession, virtqueue, req_id, &desc, &desc_table, &desc_table_size); 119 if (spdk_unlikely(rc != 0)) { 120 SPDK_ERRLOG("Can't log used ring descriptors!\n"); 121 return; 122 } 123 124 do { 125 if (vhost_vring_desc_is_wr(desc)) { 126 /* To be honest, only pages realy touched should be logged, but 127 * doing so would require tracking those changes in each backed. 128 * Also backend most likely will touch all/most of those pages so 129 * for lets assume we touched all pages passed to as writeable buffers. */ 130 rte_vhost_log_write(vsession->vid, desc->addr, desc->len); 131 } 132 vhost_vring_desc_get_next(&desc, desc_table, desc_table_size); 133 } while (desc); 134 } 135 136 static void 137 vhost_log_used_vring_elem(struct spdk_vhost_session *vsession, 138 struct spdk_vhost_virtqueue *virtqueue, 139 uint16_t idx) 140 { 141 uint64_t offset, len; 142 143 if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) { 144 return; 145 } 146 147 if (spdk_unlikely(virtqueue->packed.packed_ring)) { 148 offset = idx * sizeof(struct vring_packed_desc); 149 len = sizeof(struct vring_packed_desc); 150 } else { 151 offset = offsetof(struct vring_used, ring[idx]); 152 len = sizeof(virtqueue->vring.used->ring[idx]); 153 } 154 155 rte_vhost_log_used_vring(vsession->vid, virtqueue->vring_idx, offset, len); 156 } 157 158 static void 159 vhost_log_used_vring_idx(struct spdk_vhost_session *vsession, 160 struct spdk_vhost_virtqueue *virtqueue) 161 { 162 uint64_t offset, len; 163 uint16_t vq_idx; 164 165 if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) { 166 return; 167 } 168 169 offset = offsetof(struct vring_used, idx); 170 len = sizeof(virtqueue->vring.used->idx); 171 vq_idx = virtqueue - vsession->virtqueue; 172 173 rte_vhost_log_used_vring(vsession->vid, vq_idx, offset, len); 174 } 175 176 /* 177 * Get available requests from avail ring. 178 */ 179 uint16_t 180 vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *virtqueue, uint16_t *reqs, 181 uint16_t reqs_len) 182 { 183 struct rte_vhost_vring *vring = &virtqueue->vring; 184 struct vring_avail *avail = vring->avail; 185 uint16_t size_mask = vring->size - 1; 186 uint16_t last_idx = virtqueue->last_avail_idx, avail_idx = avail->idx; 187 uint16_t count, i; 188 int rc; 189 uint64_t u64_value; 190 191 spdk_smp_rmb(); 192 193 if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) { 194 /* Read to clear vring's kickfd */ 195 rc = read(vring->kickfd, &u64_value, sizeof(u64_value)); 196 if (rc < 0) { 197 SPDK_ERRLOG("failed to acknowledge kickfd: %s.\n", spdk_strerror(errno)); 198 return -errno; 199 } 200 } 201 202 count = avail_idx - last_idx; 203 if (spdk_likely(count == 0)) { 204 return 0; 205 } 206 207 if (spdk_unlikely(count > vring->size)) { 208 /* TODO: the queue is unrecoverably broken and should be marked so. 209 * For now we will fail silently and report there are no new avail entries. 210 */ 211 return 0; 212 } 213 214 count = spdk_min(count, reqs_len); 215 216 virtqueue->last_avail_idx += count; 217 /* Check whether there are unprocessed reqs in vq, then kick vq manually */ 218 if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) { 219 /* If avail_idx is larger than virtqueue's last_avail_idx, then there is unprocessed reqs. 220 * avail_idx should get updated here from memory, in case of race condition with guest. 221 */ 222 avail_idx = * (volatile uint16_t *) &avail->idx; 223 if (avail_idx > virtqueue->last_avail_idx) { 224 /* Write to notify vring's kickfd */ 225 rc = write(vring->kickfd, &u64_value, sizeof(u64_value)); 226 if (rc < 0) { 227 SPDK_ERRLOG("failed to kick vring: %s.\n", spdk_strerror(errno)); 228 return -errno; 229 } 230 } 231 } 232 233 for (i = 0; i < count; i++) { 234 reqs[i] = vring->avail->ring[(last_idx + i) & size_mask]; 235 } 236 237 SPDK_DEBUGLOG(vhost_ring, 238 "AVAIL: last_idx=%"PRIu16" avail_idx=%"PRIu16" count=%"PRIu16"\n", 239 last_idx, avail_idx, count); 240 241 return count; 242 } 243 244 static bool 245 vhost_vring_desc_is_indirect(struct vring_desc *cur_desc) 246 { 247 return !!(cur_desc->flags & VRING_DESC_F_INDIRECT); 248 } 249 250 static bool 251 vhost_vring_packed_desc_is_indirect(struct vring_packed_desc *cur_desc) 252 { 253 return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0; 254 } 255 256 static bool 257 vhost_inflight_packed_desc_is_indirect(spdk_vhost_inflight_desc *cur_desc) 258 { 259 return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0; 260 } 261 262 int 263 vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue, 264 uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table, 265 uint32_t *desc_table_size) 266 { 267 if (spdk_unlikely(req_idx >= virtqueue->vring.size)) { 268 return -1; 269 } 270 271 *desc = &virtqueue->vring.desc[req_idx]; 272 273 if (vhost_vring_desc_is_indirect(*desc)) { 274 *desc_table_size = (*desc)->len / sizeof(**desc); 275 *desc_table = vhost_gpa_to_vva(vsession, (*desc)->addr, 276 sizeof(**desc) * *desc_table_size); 277 *desc = *desc_table; 278 if (*desc == NULL) { 279 return -1; 280 } 281 282 return 0; 283 } 284 285 *desc_table = virtqueue->vring.desc; 286 *desc_table_size = virtqueue->vring.size; 287 288 return 0; 289 } 290 291 static bool 292 vhost_packed_desc_indirect_to_desc_table(struct spdk_vhost_session *vsession, 293 uint64_t addr, uint32_t len, 294 struct vring_packed_desc **desc_table, 295 uint32_t *desc_table_size) 296 { 297 *desc_table_size = len / sizeof(struct vring_packed_desc); 298 299 *desc_table = vhost_gpa_to_vva(vsession, addr, len); 300 if (spdk_unlikely(*desc_table == NULL)) { 301 return false; 302 } 303 304 return true; 305 } 306 307 int 308 vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession, 309 struct spdk_vhost_virtqueue *virtqueue, 310 uint16_t req_idx, struct vring_packed_desc **desc, 311 struct vring_packed_desc **desc_table, uint32_t *desc_table_size) 312 { 313 *desc = &virtqueue->vring.desc_packed[req_idx]; 314 315 /* In packed ring when the desc is non-indirect we get next desc 316 * by judging (desc->flag & VRING_DESC_F_NEXT) != 0. When the desc 317 * is indirect we get next desc by idx and desc_table_size. It's 318 * different from split ring. 319 */ 320 if (vhost_vring_packed_desc_is_indirect(*desc)) { 321 if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len, 322 desc_table, desc_table_size)) { 323 return -1; 324 } 325 326 *desc = *desc_table; 327 } else { 328 *desc_table = NULL; 329 *desc_table_size = 0; 330 } 331 332 return 0; 333 } 334 335 int 336 vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession, 337 spdk_vhost_inflight_desc *desc_array, 338 uint16_t req_idx, spdk_vhost_inflight_desc **desc, 339 struct vring_packed_desc **desc_table, uint32_t *desc_table_size) 340 { 341 *desc = &desc_array[req_idx]; 342 343 if (vhost_inflight_packed_desc_is_indirect(*desc)) { 344 if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len, 345 desc_table, desc_table_size)) { 346 return -1; 347 } 348 349 /* This desc is the inflight desc not the packed desc. 350 * When set the F_INDIRECT the table entry should be the packed desc 351 * so set the inflight desc NULL. 352 */ 353 *desc = NULL; 354 } else { 355 /* When not set the F_INDIRECT means there is no packed desc table */ 356 *desc_table = NULL; 357 *desc_table_size = 0; 358 } 359 360 return 0; 361 } 362 363 int 364 vhost_vq_used_signal(struct spdk_vhost_session *vsession, 365 struct spdk_vhost_virtqueue *virtqueue) 366 { 367 if (virtqueue->used_req_cnt == 0) { 368 return 0; 369 } 370 371 virtqueue->req_cnt += virtqueue->used_req_cnt; 372 virtqueue->used_req_cnt = 0; 373 374 SPDK_DEBUGLOG(vhost_ring, 375 "Queue %td - USED RING: sending IRQ: last used %"PRIu16"\n", 376 virtqueue - vsession->virtqueue, virtqueue->last_used_idx); 377 378 if (rte_vhost_vring_call(vsession->vid, virtqueue->vring_idx) == 0) { 379 /* interrupt signalled */ 380 return 1; 381 } else { 382 /* interrupt not signalled */ 383 return 0; 384 } 385 } 386 387 static void 388 session_vq_io_stats_update(struct spdk_vhost_session *vsession, 389 struct spdk_vhost_virtqueue *virtqueue, uint64_t now) 390 { 391 uint32_t irq_delay_base = vsession->coalescing_delay_time_base; 392 uint32_t io_threshold = vsession->coalescing_io_rate_threshold; 393 int32_t irq_delay; 394 uint32_t req_cnt; 395 396 req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt; 397 if (req_cnt <= io_threshold) { 398 return; 399 } 400 401 irq_delay = (irq_delay_base * (req_cnt - io_threshold)) / io_threshold; 402 virtqueue->irq_delay_time = (uint32_t) spdk_max(0, irq_delay); 403 404 virtqueue->req_cnt = 0; 405 virtqueue->next_event_time = now; 406 } 407 408 static void 409 check_session_vq_io_stats(struct spdk_vhost_session *vsession, 410 struct spdk_vhost_virtqueue *virtqueue, uint64_t now) 411 { 412 if (now < vsession->next_stats_check_time) { 413 return; 414 } 415 416 vsession->next_stats_check_time = now + vsession->stats_check_interval; 417 session_vq_io_stats_update(vsession, virtqueue, now); 418 } 419 420 static inline bool 421 vhost_vq_event_is_suppressed(struct spdk_vhost_virtqueue *vq) 422 { 423 if (spdk_unlikely(vq->packed.packed_ring)) { 424 if (vq->vring.driver_event->flags & VRING_PACKED_EVENT_FLAG_DISABLE) { 425 return true; 426 } 427 } else { 428 if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) { 429 return true; 430 } 431 } 432 433 return false; 434 } 435 436 void 437 vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue) 438 { 439 struct spdk_vhost_session *vsession = virtqueue->vsession; 440 uint64_t now; 441 442 if (vsession->coalescing_delay_time_base == 0) { 443 if (virtqueue->vring.desc == NULL) { 444 return; 445 } 446 447 if (vhost_vq_event_is_suppressed(virtqueue)) { 448 return; 449 } 450 451 vhost_vq_used_signal(vsession, virtqueue); 452 } else { 453 now = spdk_get_ticks(); 454 check_session_vq_io_stats(vsession, virtqueue, now); 455 456 /* No need for event right now */ 457 if (now < virtqueue->next_event_time) { 458 return; 459 } 460 461 if (vhost_vq_event_is_suppressed(virtqueue)) { 462 return; 463 } 464 465 if (!vhost_vq_used_signal(vsession, virtqueue)) { 466 return; 467 } 468 469 /* Syscall is quite long so update time */ 470 now = spdk_get_ticks(); 471 virtqueue->next_event_time = now + virtqueue->irq_delay_time; 472 } 473 } 474 475 void 476 vhost_session_used_signal(struct spdk_vhost_session *vsession) 477 { 478 struct spdk_vhost_virtqueue *virtqueue; 479 uint16_t q_idx; 480 481 for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) { 482 virtqueue = &vsession->virtqueue[q_idx]; 483 vhost_session_vq_used_signal(virtqueue); 484 } 485 } 486 487 static int 488 vhost_session_set_coalescing(struct spdk_vhost_dev *vdev, 489 struct spdk_vhost_session *vsession, void *ctx) 490 { 491 vsession->coalescing_delay_time_base = 492 vdev->coalescing_delay_us * spdk_get_ticks_hz() / 1000000ULL; 493 vsession->coalescing_io_rate_threshold = 494 vdev->coalescing_iops_threshold * SPDK_VHOST_STATS_CHECK_INTERVAL_MS / 1000U; 495 return 0; 496 } 497 498 static int 499 vhost_dev_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us, 500 uint32_t iops_threshold) 501 { 502 uint64_t delay_time_base = delay_base_us * spdk_get_ticks_hz() / 1000000ULL; 503 uint32_t io_rate = iops_threshold * SPDK_VHOST_STATS_CHECK_INTERVAL_MS / 1000U; 504 505 if (delay_time_base >= UINT32_MAX) { 506 SPDK_ERRLOG("Delay time of %"PRIu32" is to big\n", delay_base_us); 507 return -EINVAL; 508 } else if (io_rate == 0) { 509 SPDK_ERRLOG("IOPS rate of %"PRIu32" is too low. Min is %u\n", io_rate, 510 1000U / SPDK_VHOST_STATS_CHECK_INTERVAL_MS); 511 return -EINVAL; 512 } 513 514 vdev->coalescing_delay_us = delay_base_us; 515 vdev->coalescing_iops_threshold = iops_threshold; 516 return 0; 517 } 518 519 int 520 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us, 521 uint32_t iops_threshold) 522 { 523 int rc; 524 525 rc = vhost_dev_set_coalescing(vdev, delay_base_us, iops_threshold); 526 if (rc != 0) { 527 return rc; 528 } 529 530 vhost_dev_foreach_session(vdev, vhost_session_set_coalescing, NULL, NULL); 531 return 0; 532 } 533 534 void 535 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us, 536 uint32_t *iops_threshold) 537 { 538 if (delay_base_us) { 539 *delay_base_us = vdev->coalescing_delay_us; 540 } 541 542 if (iops_threshold) { 543 *iops_threshold = vdev->coalescing_iops_threshold; 544 } 545 } 546 547 /* 548 * Enqueue id and len to used ring. 549 */ 550 void 551 vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession, 552 struct spdk_vhost_virtqueue *virtqueue, 553 uint16_t id, uint32_t len) 554 { 555 struct rte_vhost_vring *vring = &virtqueue->vring; 556 struct vring_used *used = vring->used; 557 uint16_t last_idx = virtqueue->last_used_idx & (vring->size - 1); 558 uint16_t vq_idx = virtqueue->vring_idx; 559 560 SPDK_DEBUGLOG(vhost_ring, 561 "Queue %td - USED RING: last_idx=%"PRIu16" req id=%"PRIu16" len=%"PRIu32"\n", 562 virtqueue - vsession->virtqueue, virtqueue->last_used_idx, id, len); 563 564 vhost_log_req_desc(vsession, virtqueue, id); 565 566 virtqueue->last_used_idx++; 567 used->ring[last_idx].id = id; 568 used->ring[last_idx].len = len; 569 570 /* Ensure the used ring is updated before we log it or increment used->idx. */ 571 spdk_smp_wmb(); 572 573 rte_vhost_set_last_inflight_io_split(vsession->vid, vq_idx, id); 574 575 vhost_log_used_vring_elem(vsession, virtqueue, last_idx); 576 * (volatile uint16_t *) &used->idx = virtqueue->last_used_idx; 577 vhost_log_used_vring_idx(vsession, virtqueue); 578 579 rte_vhost_clr_inflight_desc_split(vsession->vid, vq_idx, virtqueue->last_used_idx, id); 580 581 virtqueue->used_req_cnt++; 582 583 if (vsession->interrupt_mode) { 584 if (virtqueue->vring.desc == NULL || vhost_vq_event_is_suppressed(virtqueue)) { 585 return; 586 } 587 588 vhost_vq_used_signal(vsession, virtqueue); 589 } 590 } 591 592 void 593 vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession, 594 struct spdk_vhost_virtqueue *virtqueue, 595 uint16_t num_descs, uint16_t buffer_id, 596 uint32_t length, uint16_t inflight_head) 597 { 598 struct vring_packed_desc *desc = &virtqueue->vring.desc_packed[virtqueue->last_used_idx]; 599 bool used, avail; 600 601 SPDK_DEBUGLOG(vhost_ring, 602 "Queue %td - RING: buffer_id=%"PRIu16"\n", 603 virtqueue - vsession->virtqueue, buffer_id); 604 605 /* When the descriptor is used, two flags in descriptor 606 * avail flag and used flag are set to equal 607 * and used flag value == used_wrap_counter. 608 */ 609 used = !!(desc->flags & VRING_DESC_F_USED); 610 avail = !!(desc->flags & VRING_DESC_F_AVAIL); 611 if (spdk_unlikely(used == virtqueue->packed.used_phase && used == avail)) { 612 SPDK_ERRLOG("descriptor has been used before\n"); 613 return; 614 } 615 616 /* In used desc addr is unused and len specifies the buffer length 617 * that has been written to by the device. 618 */ 619 desc->addr = 0; 620 desc->len = length; 621 622 /* This bit specifies whether any data has been written by the device */ 623 if (length != 0) { 624 desc->flags |= VRING_DESC_F_WRITE; 625 } 626 627 /* Buffer ID is included in the last descriptor in the list. 628 * The driver needs to keep track of the size of the list corresponding 629 * to each buffer ID. 630 */ 631 desc->id = buffer_id; 632 633 /* A device MUST NOT make the descriptor used before buffer_id is 634 * written to the descriptor. 635 */ 636 spdk_smp_wmb(); 637 638 rte_vhost_set_last_inflight_io_packed(vsession->vid, virtqueue->vring_idx, inflight_head); 639 /* To mark a desc as used, the device sets the F_USED bit in flags to match 640 * the internal Device ring wrap counter. It also sets the F_AVAIL bit to 641 * match the same value. 642 */ 643 if (virtqueue->packed.used_phase) { 644 desc->flags |= VRING_DESC_F_AVAIL_USED; 645 } else { 646 desc->flags &= ~VRING_DESC_F_AVAIL_USED; 647 } 648 rte_vhost_clr_inflight_desc_packed(vsession->vid, virtqueue->vring_idx, inflight_head); 649 650 vhost_log_used_vring_elem(vsession, virtqueue, virtqueue->last_used_idx); 651 virtqueue->last_used_idx += num_descs; 652 if (virtqueue->last_used_idx >= virtqueue->vring.size) { 653 virtqueue->last_used_idx -= virtqueue->vring.size; 654 virtqueue->packed.used_phase = !virtqueue->packed.used_phase; 655 } 656 657 virtqueue->used_req_cnt++; 658 } 659 660 bool 661 vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue) 662 { 663 uint16_t flags = virtqueue->vring.desc_packed[virtqueue->last_avail_idx].flags; 664 665 /* To mark a desc as available, the driver sets the F_AVAIL bit in flags 666 * to match the internal avail wrap counter. It also sets the F_USED bit to 667 * match the inverse value but it's not mandatory. 668 */ 669 return (!!(flags & VRING_DESC_F_AVAIL) == virtqueue->packed.avail_phase); 670 } 671 672 bool 673 vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc) 674 { 675 return (cur_desc->flags & VRING_DESC_F_WRITE) != 0; 676 } 677 678 bool 679 vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc) 680 { 681 return (cur_desc->flags & VRING_DESC_F_WRITE) != 0; 682 } 683 684 int 685 vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx, 686 struct spdk_vhost_virtqueue *vq, 687 struct vring_packed_desc *desc_table, 688 uint32_t desc_table_size) 689 { 690 if (desc_table != NULL) { 691 /* When the desc_table isn't NULL means it's indirect and we get the next 692 * desc by req_idx and desc_table_size. The return value is NULL means 693 * we reach the last desc of this request. 694 */ 695 (*req_idx)++; 696 if (*req_idx < desc_table_size) { 697 *desc = &desc_table[*req_idx]; 698 } else { 699 *desc = NULL; 700 } 701 } else { 702 /* When the desc_table is NULL means it's non-indirect and we get the next 703 * desc by req_idx and F_NEXT in flags. The return value is NULL means 704 * we reach the last desc of this request. When return new desc 705 * we update the req_idx too. 706 */ 707 if (((*desc)->flags & VRING_DESC_F_NEXT) == 0) { 708 *desc = NULL; 709 return 0; 710 } 711 712 *req_idx = (*req_idx + 1) % vq->vring.size; 713 *desc = &vq->vring.desc_packed[*req_idx]; 714 } 715 716 return 0; 717 } 718 719 static int 720 vhost_vring_desc_payload_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov, 721 uint16_t *iov_index, uintptr_t payload, uint64_t remaining) 722 { 723 uintptr_t vva; 724 uint64_t len; 725 726 do { 727 if (*iov_index >= SPDK_VHOST_IOVS_MAX) { 728 SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX); 729 return -1; 730 } 731 len = remaining; 732 vva = (uintptr_t)rte_vhost_va_from_guest_pa(vsession->mem, payload, &len); 733 if (vva == 0 || len == 0) { 734 SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload); 735 return -1; 736 } 737 iov[*iov_index].iov_base = (void *)vva; 738 iov[*iov_index].iov_len = len; 739 remaining -= len; 740 payload += len; 741 (*iov_index)++; 742 } while (remaining); 743 744 return 0; 745 } 746 747 int 748 vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov, 749 uint16_t *iov_index, const struct vring_packed_desc *desc) 750 { 751 return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index, 752 desc->addr, desc->len); 753 } 754 755 int 756 vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov, 757 uint16_t *iov_index, const spdk_vhost_inflight_desc *desc) 758 { 759 return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index, 760 desc->addr, desc->len); 761 } 762 763 /* 1, Traverse the desc chain to get the buffer_id and return buffer_id as task_idx. 764 * 2, Update the vq->last_avail_idx to point next available desc chain. 765 * 3, Update the avail_wrap_counter if last_avail_idx overturn. 766 */ 767 uint16_t 768 vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx, 769 uint16_t *num_descs) 770 { 771 struct vring_packed_desc *desc; 772 uint16_t desc_head = req_idx; 773 774 *num_descs = 1; 775 776 desc = &vq->vring.desc_packed[req_idx]; 777 if (!vhost_vring_packed_desc_is_indirect(desc)) { 778 while ((desc->flags & VRING_DESC_F_NEXT) != 0) { 779 req_idx = (req_idx + 1) % vq->vring.size; 780 desc = &vq->vring.desc_packed[req_idx]; 781 (*num_descs)++; 782 } 783 } 784 785 /* Queue Size doesn't have to be a power of 2 786 * Device maintains last_avail_idx so we can make sure 787 * the value is valid(0 ~ vring.size - 1) 788 */ 789 vq->last_avail_idx = (req_idx + 1) % vq->vring.size; 790 if (vq->last_avail_idx < desc_head) { 791 vq->packed.avail_phase = !vq->packed.avail_phase; 792 } 793 794 return desc->id; 795 } 796 797 int 798 vhost_vring_desc_get_next(struct vring_desc **desc, 799 struct vring_desc *desc_table, uint32_t desc_table_size) 800 { 801 struct vring_desc *old_desc = *desc; 802 uint16_t next_idx; 803 804 if ((old_desc->flags & VRING_DESC_F_NEXT) == 0) { 805 *desc = NULL; 806 return 0; 807 } 808 809 next_idx = old_desc->next; 810 if (spdk_unlikely(next_idx >= desc_table_size)) { 811 *desc = NULL; 812 return -1; 813 } 814 815 *desc = &desc_table[next_idx]; 816 return 0; 817 } 818 819 int 820 vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov, 821 uint16_t *iov_index, const struct vring_desc *desc) 822 { 823 return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index, 824 desc->addr, desc->len); 825 } 826 827 static struct spdk_vhost_session * 828 vhost_session_find_by_id(struct spdk_vhost_dev *vdev, unsigned id) 829 { 830 struct spdk_vhost_session *vsession; 831 832 TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) { 833 if (vsession->id == id) { 834 return vsession; 835 } 836 } 837 838 return NULL; 839 } 840 841 struct spdk_vhost_session * 842 vhost_session_find_by_vid(int vid) 843 { 844 struct spdk_vhost_dev *vdev; 845 struct spdk_vhost_session *vsession; 846 847 TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) { 848 TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) { 849 if (vsession->vid == vid) { 850 return vsession; 851 } 852 } 853 } 854 855 return NULL; 856 } 857 858 struct spdk_vhost_dev * 859 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev) 860 { 861 if (vdev == NULL) { 862 return TAILQ_FIRST(&g_vhost_devices); 863 } 864 865 return TAILQ_NEXT(vdev, tailq); 866 } 867 868 struct spdk_vhost_dev * 869 spdk_vhost_dev_find(const char *ctrlr_name) 870 { 871 struct spdk_vhost_dev *vdev; 872 size_t dev_dirname_len = strlen(dev_dirname); 873 874 if (strncmp(ctrlr_name, dev_dirname, dev_dirname_len) == 0) { 875 ctrlr_name += dev_dirname_len; 876 } 877 878 TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) { 879 if (strcmp(vdev->name, ctrlr_name) == 0) { 880 return vdev; 881 } 882 } 883 884 return NULL; 885 } 886 887 static int 888 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask) 889 { 890 int rc; 891 struct spdk_cpuset negative_vhost_mask; 892 893 if (cpumask == NULL) { 894 return -1; 895 } 896 897 if (mask == NULL) { 898 spdk_cpuset_copy(cpumask, &g_vhost_core_mask); 899 return 0; 900 } 901 902 rc = spdk_cpuset_parse(cpumask, mask); 903 if (rc < 0) { 904 SPDK_ERRLOG("invalid cpumask %s\n", mask); 905 return -1; 906 } 907 908 spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask); 909 spdk_cpuset_negate(&negative_vhost_mask); 910 spdk_cpuset_and(&negative_vhost_mask, cpumask); 911 912 if (spdk_cpuset_count(&negative_vhost_mask) != 0) { 913 SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n", 914 spdk_cpuset_fmt(&g_vhost_core_mask)); 915 return -1; 916 } 917 918 spdk_cpuset_and(cpumask, &g_vhost_core_mask); 919 920 if (spdk_cpuset_count(cpumask) == 0) { 921 SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n", 922 spdk_cpuset_fmt(&g_vhost_core_mask)); 923 return -1; 924 } 925 926 return 0; 927 } 928 929 static void 930 vhost_dev_thread_exit(void *arg1) 931 { 932 spdk_thread_exit(spdk_get_thread()); 933 } 934 935 int 936 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str, 937 const struct spdk_vhost_dev_backend *backend) 938 { 939 char path[PATH_MAX]; 940 struct spdk_cpuset cpumask = {}; 941 int rc; 942 943 assert(vdev); 944 if (name == NULL) { 945 SPDK_ERRLOG("Can't register controller with no name\n"); 946 return -EINVAL; 947 } 948 949 if (vhost_parse_core_mask(mask_str, &cpumask) != 0) { 950 SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n", 951 mask_str, spdk_cpuset_fmt(&g_vhost_core_mask)); 952 return -EINVAL; 953 } 954 955 if (spdk_vhost_dev_find(name)) { 956 SPDK_ERRLOG("vhost controller %s already exists.\n", name); 957 return -EEXIST; 958 } 959 960 if (snprintf(path, sizeof(path), "%s%s", dev_dirname, name) >= (int)sizeof(path)) { 961 SPDK_ERRLOG("Resulting socket path for controller %s is too long: %s%s\n", name, dev_dirname, 962 name); 963 return -EINVAL; 964 } 965 966 vdev->name = strdup(name); 967 vdev->path = strdup(path); 968 if (vdev->name == NULL || vdev->path == NULL) { 969 rc = -EIO; 970 goto out; 971 } 972 973 vdev->thread = spdk_thread_create(vdev->name, &cpumask); 974 if (vdev->thread == NULL) { 975 SPDK_ERRLOG("Failed to create thread for vhost controller %s.\n", name); 976 rc = -EIO; 977 goto out; 978 } 979 980 vdev->registered = true; 981 vdev->backend = backend; 982 TAILQ_INIT(&vdev->vsessions); 983 984 vhost_dev_set_coalescing(vdev, SPDK_VHOST_COALESCING_DELAY_BASE_US, 985 SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD); 986 987 if (vhost_register_unix_socket(path, name, vdev->virtio_features, vdev->disabled_features, 988 vdev->protocol_features)) { 989 spdk_thread_send_msg(vdev->thread, vhost_dev_thread_exit, NULL); 990 rc = -EIO; 991 goto out; 992 } 993 994 TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq); 995 996 SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name); 997 return 0; 998 999 out: 1000 free(vdev->name); 1001 free(vdev->path); 1002 return rc; 1003 } 1004 1005 int 1006 vhost_dev_unregister(struct spdk_vhost_dev *vdev) 1007 { 1008 if (!TAILQ_EMPTY(&vdev->vsessions)) { 1009 SPDK_ERRLOG("Controller %s has still valid connection.\n", vdev->name); 1010 return -EBUSY; 1011 } 1012 1013 if (vdev->registered && vhost_driver_unregister(vdev->path) != 0) { 1014 SPDK_ERRLOG("Could not unregister controller %s with vhost library\n" 1015 "Check if domain socket %s still exists\n", 1016 vdev->name, vdev->path); 1017 return -EIO; 1018 } 1019 1020 SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name); 1021 1022 spdk_thread_send_msg(vdev->thread, vhost_dev_thread_exit, NULL); 1023 1024 free(vdev->name); 1025 free(vdev->path); 1026 TAILQ_REMOVE(&g_vhost_devices, vdev, tailq); 1027 return 0; 1028 } 1029 1030 const char * 1031 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev) 1032 { 1033 assert(vdev != NULL); 1034 return vdev->name; 1035 } 1036 1037 const struct spdk_cpuset * 1038 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev) 1039 { 1040 assert(vdev != NULL); 1041 return spdk_thread_get_cpumask(vdev->thread); 1042 } 1043 1044 static void 1045 wait_for_semaphore(int timeout_sec, const char *errmsg) 1046 { 1047 struct timespec timeout; 1048 int rc; 1049 1050 clock_gettime(CLOCK_REALTIME, &timeout); 1051 timeout.tv_sec += timeout_sec; 1052 rc = sem_timedwait(&g_dpdk_sem, &timeout); 1053 if (rc != 0) { 1054 SPDK_ERRLOG("Timeout waiting for event: %s.\n", errmsg); 1055 sem_wait(&g_dpdk_sem); 1056 } 1057 } 1058 1059 static void 1060 vhost_session_cb_done(int rc) 1061 { 1062 g_dpdk_response = rc; 1063 sem_post(&g_dpdk_sem); 1064 } 1065 1066 void 1067 vhost_session_start_done(struct spdk_vhost_session *vsession, int response) 1068 { 1069 if (response == 0) { 1070 vsession->started = true; 1071 1072 assert(vsession->vdev->active_session_num < UINT32_MAX); 1073 vsession->vdev->active_session_num++; 1074 } 1075 1076 vhost_session_cb_done(response); 1077 } 1078 1079 void 1080 vhost_session_stop_done(struct spdk_vhost_session *vsession, int response) 1081 { 1082 if (response == 0) { 1083 vsession->started = false; 1084 1085 assert(vsession->vdev->active_session_num > 0); 1086 vsession->vdev->active_session_num--; 1087 } 1088 1089 vhost_session_cb_done(response); 1090 } 1091 1092 static void 1093 vhost_event_cb(void *arg1) 1094 { 1095 struct vhost_session_fn_ctx *ctx = arg1; 1096 struct spdk_vhost_session *vsession; 1097 1098 if (pthread_mutex_trylock(&g_vhost_mutex) != 0) { 1099 spdk_thread_send_msg(spdk_get_thread(), vhost_event_cb, arg1); 1100 return; 1101 } 1102 1103 vsession = vhost_session_find_by_id(ctx->vdev, ctx->vsession_id); 1104 ctx->cb_fn(ctx->vdev, vsession, NULL); 1105 pthread_mutex_unlock(&g_vhost_mutex); 1106 } 1107 1108 int 1109 vhost_session_send_event(struct spdk_vhost_session *vsession, 1110 spdk_vhost_session_fn cb_fn, unsigned timeout_sec, 1111 const char *errmsg) 1112 { 1113 struct vhost_session_fn_ctx ev_ctx = {0}; 1114 struct spdk_vhost_dev *vdev = vsession->vdev; 1115 1116 ev_ctx.vdev = vdev; 1117 ev_ctx.vsession_id = vsession->id; 1118 ev_ctx.cb_fn = cb_fn; 1119 1120 spdk_thread_send_msg(vdev->thread, vhost_event_cb, &ev_ctx); 1121 1122 pthread_mutex_unlock(&g_vhost_mutex); 1123 wait_for_semaphore(timeout_sec, errmsg); 1124 pthread_mutex_lock(&g_vhost_mutex); 1125 1126 return g_dpdk_response; 1127 } 1128 1129 static void 1130 foreach_session_finish_cb(void *arg1) 1131 { 1132 struct vhost_session_fn_ctx *ev_ctx = arg1; 1133 struct spdk_vhost_dev *vdev = ev_ctx->vdev; 1134 1135 if (pthread_mutex_trylock(&g_vhost_mutex) != 0) { 1136 spdk_thread_send_msg(spdk_get_thread(), 1137 foreach_session_finish_cb, arg1); 1138 return; 1139 } 1140 1141 assert(vdev->pending_async_op_num > 0); 1142 vdev->pending_async_op_num--; 1143 if (ev_ctx->cpl_fn != NULL) { 1144 ev_ctx->cpl_fn(vdev, ev_ctx->user_ctx); 1145 } 1146 1147 pthread_mutex_unlock(&g_vhost_mutex); 1148 free(ev_ctx); 1149 } 1150 1151 static void 1152 foreach_session(void *arg1) 1153 { 1154 struct vhost_session_fn_ctx *ev_ctx = arg1; 1155 struct spdk_vhost_session *vsession; 1156 struct spdk_vhost_dev *vdev = ev_ctx->vdev; 1157 int rc; 1158 1159 if (pthread_mutex_trylock(&g_vhost_mutex) != 0) { 1160 spdk_thread_send_msg(spdk_get_thread(), foreach_session, arg1); 1161 return; 1162 } 1163 1164 TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) { 1165 if (vsession->initialized) { 1166 rc = ev_ctx->cb_fn(vdev, vsession, ev_ctx->user_ctx); 1167 if (rc < 0) { 1168 goto out; 1169 } 1170 } 1171 } 1172 1173 out: 1174 pthread_mutex_unlock(&g_vhost_mutex); 1175 1176 spdk_thread_send_msg(g_vhost_init_thread, foreach_session_finish_cb, arg1); 1177 } 1178 1179 void 1180 vhost_dev_foreach_session(struct spdk_vhost_dev *vdev, 1181 spdk_vhost_session_fn fn, 1182 spdk_vhost_dev_fn cpl_fn, 1183 void *arg) 1184 { 1185 struct vhost_session_fn_ctx *ev_ctx; 1186 1187 ev_ctx = calloc(1, sizeof(*ev_ctx)); 1188 if (ev_ctx == NULL) { 1189 SPDK_ERRLOG("Failed to alloc vhost event.\n"); 1190 assert(false); 1191 return; 1192 } 1193 1194 ev_ctx->vdev = vdev; 1195 ev_ctx->cb_fn = fn; 1196 ev_ctx->cpl_fn = cpl_fn; 1197 ev_ctx->user_ctx = arg; 1198 1199 assert(vdev->pending_async_op_num < UINT32_MAX); 1200 vdev->pending_async_op_num++; 1201 1202 spdk_thread_send_msg(vdev->thread, foreach_session, ev_ctx); 1203 } 1204 1205 static int 1206 _stop_session(struct spdk_vhost_session *vsession) 1207 { 1208 struct spdk_vhost_dev *vdev = vsession->vdev; 1209 struct spdk_vhost_virtqueue *q; 1210 int rc; 1211 uint16_t i; 1212 1213 rc = vdev->backend->stop_session(vsession); 1214 if (rc != 0) { 1215 SPDK_ERRLOG("Couldn't stop device with vid %d.\n", vsession->vid); 1216 return rc; 1217 } 1218 1219 for (i = 0; i < vsession->max_queues; i++) { 1220 q = &vsession->virtqueue[i]; 1221 1222 /* vring.desc and vring.desc_packed are in a union struct 1223 * so q->vring.desc can replace q->vring.desc_packed. 1224 */ 1225 if (q->vring.desc == NULL) { 1226 continue; 1227 } 1228 1229 /* Packed virtqueues support up to 2^15 entries each 1230 * so left one bit can be used as wrap counter. 1231 */ 1232 if (q->packed.packed_ring) { 1233 q->last_avail_idx = q->last_avail_idx | 1234 ((uint16_t)q->packed.avail_phase << 15); 1235 q->last_used_idx = q->last_used_idx | 1236 ((uint16_t)q->packed.used_phase << 15); 1237 } 1238 1239 rte_vhost_set_vring_base(vsession->vid, i, q->last_avail_idx, q->last_used_idx); 1240 } 1241 1242 vhost_session_mem_unregister(vsession->mem); 1243 free(vsession->mem); 1244 1245 return 0; 1246 } 1247 1248 int 1249 vhost_stop_device_cb(int vid) 1250 { 1251 struct spdk_vhost_session *vsession; 1252 int rc; 1253 1254 pthread_mutex_lock(&g_vhost_mutex); 1255 vsession = vhost_session_find_by_vid(vid); 1256 if (vsession == NULL) { 1257 SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid); 1258 pthread_mutex_unlock(&g_vhost_mutex); 1259 return -EINVAL; 1260 } 1261 1262 if (!vsession->started) { 1263 /* already stopped, nothing to do */ 1264 pthread_mutex_unlock(&g_vhost_mutex); 1265 return -EALREADY; 1266 } 1267 1268 rc = _stop_session(vsession); 1269 pthread_mutex_unlock(&g_vhost_mutex); 1270 1271 return rc; 1272 } 1273 1274 int 1275 vhost_start_device_cb(int vid) 1276 { 1277 struct spdk_vhost_dev *vdev; 1278 struct spdk_vhost_session *vsession; 1279 int rc = -1; 1280 uint16_t i; 1281 bool packed_ring; 1282 1283 pthread_mutex_lock(&g_vhost_mutex); 1284 1285 vsession = vhost_session_find_by_vid(vid); 1286 if (vsession == NULL) { 1287 SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid); 1288 goto out; 1289 } 1290 1291 vdev = vsession->vdev; 1292 if (vsession->started) { 1293 /* already started, nothing to do */ 1294 rc = 0; 1295 goto out; 1296 } 1297 1298 if (vhost_get_negotiated_features(vid, &vsession->negotiated_features) != 0) { 1299 SPDK_ERRLOG("vhost device %d: Failed to get negotiated driver features\n", vid); 1300 goto out; 1301 } 1302 1303 packed_ring = ((vsession->negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) != 0); 1304 1305 vsession->max_queues = 0; 1306 memset(vsession->virtqueue, 0, sizeof(vsession->virtqueue)); 1307 for (i = 0; i < SPDK_VHOST_MAX_VQUEUES; i++) { 1308 struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i]; 1309 1310 q->vsession = vsession; 1311 q->vring_idx = -1; 1312 if (rte_vhost_get_vhost_vring(vid, i, &q->vring)) { 1313 continue; 1314 } 1315 q->vring_idx = i; 1316 rte_vhost_get_vhost_ring_inflight(vid, i, &q->vring_inflight); 1317 1318 /* vring.desc and vring.desc_packed are in a union struct 1319 * so q->vring.desc can replace q->vring.desc_packed. 1320 */ 1321 if (q->vring.desc == NULL || q->vring.size == 0) { 1322 continue; 1323 } 1324 1325 if (rte_vhost_get_vring_base(vsession->vid, i, &q->last_avail_idx, &q->last_used_idx)) { 1326 q->vring.desc = NULL; 1327 continue; 1328 } 1329 1330 if (packed_ring) { 1331 /* Use the inflight mem to restore the last_avail_idx and last_used_idx. 1332 * When the vring format is packed, there is no used_idx in the 1333 * used ring, so VM can't resend the used_idx to VHOST when reconnect. 1334 * QEMU version 5.2.0 supports the packed inflight before that it only 1335 * supports split ring inflight because it doesn't send negotiated features 1336 * before get inflight fd. Users can use RPC to enable this function. 1337 */ 1338 if (spdk_unlikely(g_packed_ring_recovery)) { 1339 rte_vhost_get_vring_base_from_inflight(vsession->vid, i, 1340 &q->last_avail_idx, 1341 &q->last_used_idx); 1342 } 1343 1344 /* Packed virtqueues support up to 2^15 entries each 1345 * so left one bit can be used as wrap counter. 1346 */ 1347 q->packed.avail_phase = q->last_avail_idx >> 15; 1348 q->last_avail_idx = q->last_avail_idx & 0x7FFF; 1349 q->packed.used_phase = q->last_used_idx >> 15; 1350 q->last_used_idx = q->last_used_idx & 0x7FFF; 1351 1352 if (!vsession->interrupt_mode) { 1353 /* Disable I/O submission notifications, we'll be polling. */ 1354 q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_DISABLE; 1355 } 1356 } else { 1357 if (!vsession->interrupt_mode) { 1358 /* Disable I/O submission notifications, we'll be polling. */ 1359 q->vring.used->flags = VRING_USED_F_NO_NOTIFY; 1360 } 1361 } 1362 1363 q->packed.packed_ring = packed_ring; 1364 vsession->max_queues = i + 1; 1365 } 1366 1367 if (vhost_get_mem_table(vid, &vsession->mem) != 0) { 1368 SPDK_ERRLOG("vhost device %d: Failed to get guest memory table\n", vid); 1369 goto out; 1370 } 1371 1372 /* 1373 * Not sure right now but this look like some kind of QEMU bug and guest IO 1374 * might be frozed without kicking all queues after live-migration. This look like 1375 * the previous vhost instance failed to effectively deliver all interrupts before 1376 * the GET_VRING_BASE message. This shouldn't harm guest since spurious interrupts 1377 * should be ignored by guest virtio driver. 1378 * 1379 * Tested on QEMU 2.10.91 and 2.11.50. 1380 */ 1381 for (i = 0; i < vsession->max_queues; i++) { 1382 struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i]; 1383 1384 /* vring.desc and vring.desc_packed are in a union struct 1385 * so q->vring.desc can replace q->vring.desc_packed. 1386 */ 1387 if (q->vring.desc != NULL && q->vring.size > 0) { 1388 rte_vhost_vring_call(vsession->vid, q->vring_idx); 1389 } 1390 } 1391 1392 vhost_session_set_coalescing(vdev, vsession, NULL); 1393 vhost_session_mem_register(vsession->mem); 1394 vsession->initialized = true; 1395 rc = vdev->backend->start_session(vsession); 1396 if (rc != 0) { 1397 vhost_session_mem_unregister(vsession->mem); 1398 free(vsession->mem); 1399 goto out; 1400 } 1401 1402 out: 1403 pthread_mutex_unlock(&g_vhost_mutex); 1404 return rc; 1405 } 1406 1407 void 1408 vhost_session_set_interrupt_mode(struct spdk_vhost_session *vsession, bool interrupt_mode) 1409 { 1410 uint16_t i; 1411 bool packed_ring; 1412 int rc = 0; 1413 1414 packed_ring = ((vsession->negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) != 0); 1415 1416 for (i = 0; i < vsession->max_queues; i++) { 1417 struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i]; 1418 uint64_t num_events = 1; 1419 1420 /* vring.desc and vring.desc_packed are in a union struct 1421 * so q->vring.desc can replace q->vring.desc_packed. 1422 */ 1423 if (q->vring.desc == NULL || q->vring.size == 0) { 1424 continue; 1425 } 1426 1427 if (interrupt_mode) { 1428 /* Enable I/O submission notifications, we'll be interrupting. */ 1429 if (packed_ring) { 1430 * (volatile uint16_t *) &q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_ENABLE; 1431 } else { 1432 * (volatile uint16_t *) &q->vring.used->flags = 0; 1433 } 1434 1435 /* In case of race condition, always kick vring when switch to intr */ 1436 rc = write(q->vring.kickfd, &num_events, sizeof(num_events)); 1437 if (rc < 0) { 1438 SPDK_ERRLOG("failed to kick vring: %s.\n", spdk_strerror(errno)); 1439 } 1440 1441 vsession->interrupt_mode = true; 1442 } else { 1443 /* Disable I/O submission notifications, we'll be polling. */ 1444 if (packed_ring) { 1445 * (volatile uint16_t *) &q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_DISABLE; 1446 } else { 1447 * (volatile uint16_t *) &q->vring.used->flags = VRING_USED_F_NO_NOTIFY; 1448 } 1449 1450 vsession->interrupt_mode = false; 1451 } 1452 } 1453 } 1454 1455 int 1456 spdk_vhost_set_socket_path(const char *basename) 1457 { 1458 int ret; 1459 1460 if (basename && strlen(basename) > 0) { 1461 ret = snprintf(dev_dirname, sizeof(dev_dirname) - 2, "%s", basename); 1462 if (ret <= 0) { 1463 return -EINVAL; 1464 } 1465 if ((size_t)ret >= sizeof(dev_dirname) - 2) { 1466 SPDK_ERRLOG("Char dev dir path length %d is too long\n", ret); 1467 return -EINVAL; 1468 } 1469 1470 if (dev_dirname[ret - 1] != '/') { 1471 dev_dirname[ret] = '/'; 1472 dev_dirname[ret + 1] = '\0'; 1473 } 1474 } 1475 1476 return 0; 1477 } 1478 1479 void 1480 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w) 1481 { 1482 assert(vdev->backend->dump_info_json != NULL); 1483 vdev->backend->dump_info_json(vdev, w); 1484 } 1485 1486 int 1487 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev) 1488 { 1489 if (vdev->pending_async_op_num) { 1490 return -EBUSY; 1491 } 1492 1493 return vdev->backend->remove_device(vdev); 1494 } 1495 1496 int 1497 vhost_new_connection_cb(int vid, const char *ifname) 1498 { 1499 struct spdk_vhost_dev *vdev; 1500 struct spdk_vhost_session *vsession; 1501 1502 pthread_mutex_lock(&g_vhost_mutex); 1503 1504 vdev = spdk_vhost_dev_find(ifname); 1505 if (vdev == NULL) { 1506 SPDK_ERRLOG("Couldn't find device with vid %d to create connection for.\n", vid); 1507 pthread_mutex_unlock(&g_vhost_mutex); 1508 return -1; 1509 } 1510 1511 /* We expect sessions inside vdev->vsessions to be sorted in ascending 1512 * order in regard of vsession->id. For now we always set id = vsessions_cnt++ 1513 * and append each session to the very end of the vsessions list. 1514 * This is required for spdk_vhost_dev_foreach_session() to work. 1515 */ 1516 if (vdev->vsessions_num == UINT_MAX) { 1517 assert(false); 1518 return -EINVAL; 1519 } 1520 1521 if (posix_memalign((void **)&vsession, SPDK_CACHE_LINE_SIZE, sizeof(*vsession) + 1522 vdev->backend->session_ctx_size)) { 1523 SPDK_ERRLOG("vsession alloc failed\n"); 1524 pthread_mutex_unlock(&g_vhost_mutex); 1525 return -1; 1526 } 1527 memset(vsession, 0, sizeof(*vsession) + vdev->backend->session_ctx_size); 1528 1529 vsession->vdev = vdev; 1530 vsession->vid = vid; 1531 vsession->id = vdev->vsessions_num++; 1532 vsession->name = spdk_sprintf_alloc("%ss%u", vdev->name, vsession->vid); 1533 if (vsession->name == NULL) { 1534 SPDK_ERRLOG("vsession alloc failed\n"); 1535 pthread_mutex_unlock(&g_vhost_mutex); 1536 free(vsession); 1537 return -1; 1538 } 1539 vsession->started = false; 1540 vsession->initialized = false; 1541 vsession->next_stats_check_time = 0; 1542 vsession->stats_check_interval = SPDK_VHOST_STATS_CHECK_INTERVAL_MS * 1543 spdk_get_ticks_hz() / 1000UL; 1544 TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq); 1545 1546 vhost_session_install_rte_compat_hooks(vsession); 1547 pthread_mutex_unlock(&g_vhost_mutex); 1548 return 0; 1549 } 1550 1551 int 1552 vhost_destroy_connection_cb(int vid) 1553 { 1554 struct spdk_vhost_session *vsession; 1555 int rc = 0; 1556 1557 pthread_mutex_lock(&g_vhost_mutex); 1558 vsession = vhost_session_find_by_vid(vid); 1559 if (vsession == NULL) { 1560 SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid); 1561 pthread_mutex_unlock(&g_vhost_mutex); 1562 return -EINVAL; 1563 } 1564 1565 if (vsession->started) { 1566 rc = _stop_session(vsession); 1567 } 1568 1569 TAILQ_REMOVE(&vsession->vdev->vsessions, vsession, tailq); 1570 free(vsession->name); 1571 free(vsession); 1572 pthread_mutex_unlock(&g_vhost_mutex); 1573 1574 return rc; 1575 } 1576 1577 void 1578 spdk_vhost_lock(void) 1579 { 1580 pthread_mutex_lock(&g_vhost_mutex); 1581 } 1582 1583 int 1584 spdk_vhost_trylock(void) 1585 { 1586 return -pthread_mutex_trylock(&g_vhost_mutex); 1587 } 1588 1589 void 1590 spdk_vhost_unlock(void) 1591 { 1592 pthread_mutex_unlock(&g_vhost_mutex); 1593 } 1594 1595 void 1596 spdk_vhost_init(spdk_vhost_init_cb init_cb) 1597 { 1598 size_t len; 1599 uint32_t i; 1600 int ret = 0; 1601 1602 g_vhost_init_thread = spdk_get_thread(); 1603 assert(g_vhost_init_thread != NULL); 1604 1605 if (dev_dirname[0] == '\0') { 1606 if (getcwd(dev_dirname, sizeof(dev_dirname) - 1) == NULL) { 1607 SPDK_ERRLOG("getcwd failed (%d): %s\n", errno, spdk_strerror(errno)); 1608 ret = -1; 1609 goto out; 1610 } 1611 1612 len = strlen(dev_dirname); 1613 if (dev_dirname[len - 1] != '/') { 1614 dev_dirname[len] = '/'; 1615 dev_dirname[len + 1] = '\0'; 1616 } 1617 } 1618 1619 ret = sem_init(&g_dpdk_sem, 0, 0); 1620 if (ret != 0) { 1621 SPDK_ERRLOG("Failed to initialize semaphore for rte_vhost pthread.\n"); 1622 ret = -1; 1623 goto out; 1624 } 1625 1626 spdk_cpuset_zero(&g_vhost_core_mask); 1627 SPDK_ENV_FOREACH_CORE(i) { 1628 spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true); 1629 } 1630 out: 1631 init_cb(ret); 1632 } 1633 1634 static void 1635 vhost_fini(void *arg1) 1636 { 1637 struct spdk_vhost_dev *vdev, *tmp; 1638 1639 spdk_vhost_lock(); 1640 vdev = spdk_vhost_dev_next(NULL); 1641 while (vdev != NULL) { 1642 tmp = spdk_vhost_dev_next(vdev); 1643 spdk_vhost_dev_remove(vdev); 1644 /* don't care if it fails, there's nothing we can do for now */ 1645 vdev = tmp; 1646 } 1647 spdk_vhost_unlock(); 1648 1649 spdk_cpuset_zero(&g_vhost_core_mask); 1650 1651 /* All devices are removed now. */ 1652 sem_destroy(&g_dpdk_sem); 1653 1654 g_fini_cpl_cb(); 1655 } 1656 1657 static void * 1658 session_shutdown(void *arg) 1659 { 1660 struct spdk_vhost_dev *vdev = NULL; 1661 struct spdk_vhost_session *vsession; 1662 1663 TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) { 1664 pthread_mutex_lock(&g_vhost_mutex); 1665 TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) { 1666 if (vsession->started) { 1667 _stop_session(vsession); 1668 } 1669 } 1670 pthread_mutex_unlock(&g_vhost_mutex); 1671 vhost_driver_unregister(vdev->path); 1672 vdev->registered = false; 1673 } 1674 1675 SPDK_INFOLOG(vhost, "Exiting\n"); 1676 spdk_thread_send_msg(g_vhost_init_thread, vhost_fini, NULL); 1677 return NULL; 1678 } 1679 1680 void 1681 spdk_vhost_fini(spdk_vhost_fini_cb fini_cb) 1682 { 1683 pthread_t tid; 1684 int rc; 1685 1686 assert(spdk_get_thread() == g_vhost_init_thread); 1687 g_fini_cpl_cb = fini_cb; 1688 1689 /* rte_vhost API for removing sockets is not asynchronous. Since it may call SPDK 1690 * ops for stopping a device or removing a connection, we need to call it from 1691 * a separate thread to avoid deadlock. 1692 */ 1693 rc = pthread_create(&tid, NULL, &session_shutdown, NULL); 1694 if (rc < 0) { 1695 SPDK_ERRLOG("Failed to start session shutdown thread (%d): %s\n", rc, spdk_strerror(rc)); 1696 abort(); 1697 } 1698 pthread_detach(tid); 1699 } 1700 1701 void 1702 spdk_vhost_config_json(struct spdk_json_write_ctx *w) 1703 { 1704 struct spdk_vhost_dev *vdev; 1705 uint32_t delay_base_us; 1706 uint32_t iops_threshold; 1707 1708 spdk_json_write_array_begin(w); 1709 1710 spdk_vhost_lock(); 1711 vdev = spdk_vhost_dev_next(NULL); 1712 while (vdev != NULL) { 1713 vdev->backend->write_config_json(vdev, w); 1714 1715 spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold); 1716 if (delay_base_us) { 1717 spdk_json_write_object_begin(w); 1718 spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing"); 1719 1720 spdk_json_write_named_object_begin(w, "params"); 1721 spdk_json_write_named_string(w, "ctrlr", vdev->name); 1722 spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us); 1723 spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold); 1724 spdk_json_write_object_end(w); 1725 1726 spdk_json_write_object_end(w); 1727 } 1728 vdev = spdk_vhost_dev_next(vdev); 1729 } 1730 spdk_vhost_unlock(); 1731 1732 spdk_json_write_array_end(w); 1733 } 1734 1735 SPDK_LOG_REGISTER_COMPONENT(vhost) 1736 SPDK_LOG_REGISTER_COMPONENT(vhost_ring) 1737