1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <linux/vhost.h> 6 #include <linux/virtio_net.h> 7 #include <stdint.h> 8 #include <stdlib.h> 9 #ifdef RTE_LIBRTE_VHOST_NUMA 10 #include <numa.h> 11 #include <numaif.h> 12 #endif 13 14 #include <rte_errno.h> 15 #include <rte_log.h> 16 #include <rte_memory.h> 17 #include <rte_malloc.h> 18 #include <rte_vhost.h> 19 20 #include "iotlb.h" 21 #include "vhost.h" 22 #include "vhost_user.h" 23 24 struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE]; 25 pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER; 26 pthread_mutex_t vhost_dma_lock = PTHREAD_MUTEX_INITIALIZER; 27 28 struct vhost_vq_stats_name_off { 29 char name[RTE_VHOST_STATS_NAME_SIZE]; 30 unsigned int offset; 31 }; 32 33 static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = { 34 {"good_packets", offsetof(struct vhost_virtqueue, stats.packets)}, 35 {"good_bytes", offsetof(struct vhost_virtqueue, stats.bytes)}, 36 {"multicast_packets", offsetof(struct vhost_virtqueue, stats.multicast)}, 37 {"broadcast_packets", offsetof(struct vhost_virtqueue, stats.broadcast)}, 38 {"undersize_packets", offsetof(struct vhost_virtqueue, stats.size_bins[0])}, 39 {"size_64_packets", offsetof(struct vhost_virtqueue, stats.size_bins[1])}, 40 {"size_65_127_packets", offsetof(struct vhost_virtqueue, stats.size_bins[2])}, 41 {"size_128_255_packets", offsetof(struct vhost_virtqueue, stats.size_bins[3])}, 42 {"size_256_511_packets", offsetof(struct vhost_virtqueue, stats.size_bins[4])}, 43 {"size_512_1023_packets", offsetof(struct vhost_virtqueue, stats.size_bins[5])}, 44 {"size_1024_1518_packets", offsetof(struct vhost_virtqueue, stats.size_bins[6])}, 45 {"size_1519_max_packets", offsetof(struct vhost_virtqueue, stats.size_bins[7])}, 46 {"guest_notifications", offsetof(struct vhost_virtqueue, stats.guest_notifications)}, 47 {"iotlb_hits", offsetof(struct vhost_virtqueue, stats.iotlb_hits)}, 48 {"iotlb_misses", offsetof(struct vhost_virtqueue, stats.iotlb_misses)}, 49 {"inflight_submitted", offsetof(struct vhost_virtqueue, stats.inflight_submitted)}, 50 {"inflight_completed", offsetof(struct vhost_virtqueue, stats.inflight_completed)}, 51 }; 52 53 #define VHOST_NB_VQ_STATS RTE_DIM(vhost_vq_stat_strings) 54 55 /* Called with iotlb_lock read-locked */ 56 uint64_t 57 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, 58 uint64_t iova, uint64_t *size, uint8_t perm) 59 { 60 uint64_t vva, tmp_size; 61 62 if (unlikely(!*size)) 63 return 0; 64 65 tmp_size = *size; 66 67 vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm); 68 if (tmp_size == *size) { 69 if (dev->flags & VIRTIO_DEV_STATS_ENABLED) 70 vq->stats.iotlb_hits++; 71 return vva; 72 } 73 74 if (dev->flags & VIRTIO_DEV_STATS_ENABLED) 75 vq->stats.iotlb_misses++; 76 77 iova += tmp_size; 78 79 if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) { 80 /* 81 * iotlb_lock is read-locked for a full burst, 82 * but it only protects the iotlb cache. 83 * In case of IOTLB miss, we might block on the socket, 84 * which could cause a deadlock with QEMU if an IOTLB update 85 * is being handled. We can safely unlock here to avoid it. 86 */ 87 vhost_user_iotlb_rd_unlock(vq); 88 89 vhost_user_iotlb_pending_insert(dev, vq, iova, perm); 90 if (vhost_user_iotlb_miss(dev, iova, perm)) { 91 VHOST_LOG_DATA(dev->ifname, ERR, 92 "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", 93 iova); 94 vhost_user_iotlb_pending_remove(vq, iova, 1, perm); 95 } 96 97 vhost_user_iotlb_rd_lock(vq); 98 } 99 100 return 0; 101 } 102 103 #define VHOST_LOG_PAGE 4096 104 105 /* 106 * Atomically set a bit in memory. 107 */ 108 static __rte_always_inline void 109 vhost_set_bit(unsigned int nr, volatile uint8_t *addr) 110 { 111 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) 112 /* 113 * __sync_ built-ins are deprecated, but __atomic_ ones 114 * are sub-optimized in older GCC versions. 115 */ 116 __sync_fetch_and_or_1(addr, (1U << nr)); 117 #else 118 __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED); 119 #endif 120 } 121 122 static __rte_always_inline void 123 vhost_log_page(uint8_t *log_base, uint64_t page) 124 { 125 vhost_set_bit(page % 8, &log_base[page / 8]); 126 } 127 128 void 129 __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) 130 { 131 uint64_t page; 132 133 if (unlikely(!dev->log_base || !len)) 134 return; 135 136 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) 137 return; 138 139 /* To make sure guest memory updates are committed before logging */ 140 rte_atomic_thread_fence(__ATOMIC_RELEASE); 141 142 page = addr / VHOST_LOG_PAGE; 143 while (page * VHOST_LOG_PAGE < addr + len) { 144 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); 145 page += 1; 146 } 147 } 148 149 void 150 __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, 151 uint64_t iova, uint64_t len) 152 { 153 uint64_t hva, gpa, map_len; 154 map_len = len; 155 156 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); 157 if (map_len != len) { 158 VHOST_LOG_DATA(dev->ifname, ERR, 159 "failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", 160 iova); 161 return; 162 } 163 164 gpa = hva_to_gpa(dev, hva, len); 165 if (gpa) 166 __vhost_log_write(dev, gpa, len); 167 } 168 169 void 170 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) 171 { 172 unsigned long *log_base; 173 int i; 174 175 if (unlikely(!dev->log_base)) 176 return; 177 178 /* No cache, nothing to sync */ 179 if (unlikely(!vq->log_cache)) 180 return; 181 182 rte_atomic_thread_fence(__ATOMIC_RELEASE); 183 184 log_base = (unsigned long *)(uintptr_t)dev->log_base; 185 186 for (i = 0; i < vq->log_cache_nb_elem; i++) { 187 struct log_cache_entry *elem = vq->log_cache + i; 188 189 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) 190 /* 191 * '__sync' builtins are deprecated, but '__atomic' ones 192 * are sub-optimized in older GCC versions. 193 */ 194 __sync_fetch_and_or(log_base + elem->offset, elem->val); 195 #else 196 __atomic_fetch_or(log_base + elem->offset, elem->val, 197 __ATOMIC_RELAXED); 198 #endif 199 } 200 201 rte_atomic_thread_fence(__ATOMIC_RELEASE); 202 203 vq->log_cache_nb_elem = 0; 204 } 205 206 static __rte_always_inline void 207 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq, 208 uint64_t page) 209 { 210 uint32_t bit_nr = page % (sizeof(unsigned long) << 3); 211 uint32_t offset = page / (sizeof(unsigned long) << 3); 212 int i; 213 214 if (unlikely(!vq->log_cache)) { 215 /* No logging cache allocated, write dirty log map directly */ 216 rte_atomic_thread_fence(__ATOMIC_RELEASE); 217 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); 218 219 return; 220 } 221 222 for (i = 0; i < vq->log_cache_nb_elem; i++) { 223 struct log_cache_entry *elem = vq->log_cache + i; 224 225 if (elem->offset == offset) { 226 elem->val |= (1UL << bit_nr); 227 return; 228 } 229 } 230 231 if (unlikely(i >= VHOST_LOG_CACHE_NR)) { 232 /* 233 * No more room for a new log cache entry, 234 * so write the dirty log map directly. 235 */ 236 rte_atomic_thread_fence(__ATOMIC_RELEASE); 237 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); 238 239 return; 240 } 241 242 vq->log_cache[i].offset = offset; 243 vq->log_cache[i].val = (1UL << bit_nr); 244 vq->log_cache_nb_elem++; 245 } 246 247 void 248 __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, 249 uint64_t addr, uint64_t len) 250 { 251 uint64_t page; 252 253 if (unlikely(!dev->log_base || !len)) 254 return; 255 256 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) 257 return; 258 259 page = addr / VHOST_LOG_PAGE; 260 while (page * VHOST_LOG_PAGE < addr + len) { 261 vhost_log_cache_page(dev, vq, page); 262 page += 1; 263 } 264 } 265 266 void 267 __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, 268 uint64_t iova, uint64_t len) 269 { 270 uint64_t hva, gpa, map_len; 271 map_len = len; 272 273 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); 274 if (map_len != len) { 275 VHOST_LOG_DATA(dev->ifname, ERR, 276 "failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", 277 iova); 278 return; 279 } 280 281 gpa = hva_to_gpa(dev, hva, len); 282 if (gpa) 283 __vhost_log_cache_write(dev, vq, gpa, len); 284 } 285 286 void * 287 vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, 288 uint64_t desc_addr, uint64_t desc_len) 289 { 290 void *idesc; 291 uint64_t src, dst; 292 uint64_t len, remain = desc_len; 293 294 idesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node); 295 if (unlikely(!idesc)) 296 return NULL; 297 298 dst = (uint64_t)(uintptr_t)idesc; 299 300 while (remain) { 301 len = remain; 302 src = vhost_iova_to_vva(dev, vq, desc_addr, &len, 303 VHOST_ACCESS_RO); 304 if (unlikely(!src || !len)) { 305 rte_free(idesc); 306 return NULL; 307 } 308 309 rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); 310 311 remain -= len; 312 dst += len; 313 desc_addr += len; 314 } 315 316 return idesc; 317 } 318 319 void 320 cleanup_vq(struct vhost_virtqueue *vq, int destroy) 321 { 322 if ((vq->callfd >= 0) && (destroy != 0)) 323 close(vq->callfd); 324 if (vq->kickfd >= 0) 325 close(vq->kickfd); 326 } 327 328 void 329 cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq) 330 { 331 if (!(dev->protocol_features & 332 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) 333 return; 334 335 if (vq_is_packed(dev)) { 336 if (vq->inflight_packed) 337 vq->inflight_packed = NULL; 338 } else { 339 if (vq->inflight_split) 340 vq->inflight_split = NULL; 341 } 342 343 if (vq->resubmit_inflight) { 344 if (vq->resubmit_inflight->resubmit_list) { 345 rte_free(vq->resubmit_inflight->resubmit_list); 346 vq->resubmit_inflight->resubmit_list = NULL; 347 } 348 rte_free(vq->resubmit_inflight); 349 vq->resubmit_inflight = NULL; 350 } 351 } 352 353 /* 354 * Unmap any memory, close any file descriptors and 355 * free any memory owned by a device. 356 */ 357 void 358 cleanup_device(struct virtio_net *dev, int destroy) 359 { 360 uint32_t i; 361 362 vhost_backend_cleanup(dev); 363 364 for (i = 0; i < dev->nr_vring; i++) { 365 cleanup_vq(dev->virtqueue[i], destroy); 366 cleanup_vq_inflight(dev, dev->virtqueue[i]); 367 } 368 } 369 370 static void 371 vhost_free_async_mem(struct vhost_virtqueue *vq) 372 { 373 if (!vq->async) 374 return; 375 376 rte_free(vq->async->pkts_info); 377 rte_free(vq->async->pkts_cmpl_flag); 378 379 rte_free(vq->async->buffers_packed); 380 vq->async->buffers_packed = NULL; 381 rte_free(vq->async->descs_split); 382 vq->async->descs_split = NULL; 383 384 rte_free(vq->async); 385 vq->async = NULL; 386 } 387 388 void 389 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq) 390 { 391 if (vq_is_packed(dev)) 392 rte_free(vq->shadow_used_packed); 393 else 394 rte_free(vq->shadow_used_split); 395 396 vhost_free_async_mem(vq); 397 rte_free(vq->batch_copy_elems); 398 vhost_user_iotlb_destroy(vq); 399 rte_free(vq->log_cache); 400 rte_free(vq); 401 } 402 403 /* 404 * Release virtqueues and device memory. 405 */ 406 static void 407 free_device(struct virtio_net *dev) 408 { 409 uint32_t i; 410 411 for (i = 0; i < dev->nr_vring; i++) 412 free_vq(dev, dev->virtqueue[i]); 413 414 rte_free(dev); 415 } 416 417 static __rte_always_inline int 418 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) 419 { 420 if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG)))) 421 return 0; 422 423 vq->log_guest_addr = translate_log_addr(dev, vq, 424 vq->ring_addrs.log_guest_addr); 425 if (vq->log_guest_addr == 0) 426 return -1; 427 428 return 0; 429 } 430 431 /* 432 * Converts vring log address to GPA 433 * If IOMMU is enabled, the log address is IOVA 434 * If IOMMU not enabled, the log address is already GPA 435 * 436 * Caller should have iotlb_lock read-locked 437 */ 438 uint64_t 439 translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, 440 uint64_t log_addr) 441 { 442 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { 443 const uint64_t exp_size = sizeof(uint64_t); 444 uint64_t hva, gpa; 445 uint64_t size = exp_size; 446 447 hva = vhost_iova_to_vva(dev, vq, log_addr, 448 &size, VHOST_ACCESS_RW); 449 450 if (size != exp_size) 451 return 0; 452 453 gpa = hva_to_gpa(dev, hva, exp_size); 454 if (!gpa) { 455 VHOST_LOG_DATA(dev->ifname, ERR, 456 "failed to find GPA for log_addr: 0x%" 457 PRIx64 " hva: 0x%" PRIx64 "\n", 458 log_addr, hva); 459 return 0; 460 } 461 return gpa; 462 463 } else 464 return log_addr; 465 } 466 467 /* Caller should have iotlb_lock read-locked */ 468 static int 469 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq) 470 { 471 uint64_t req_size, size; 472 473 req_size = sizeof(struct vring_desc) * vq->size; 474 size = req_size; 475 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq, 476 vq->ring_addrs.desc_user_addr, 477 &size, VHOST_ACCESS_RW); 478 if (!vq->desc || size != req_size) 479 return -1; 480 481 req_size = sizeof(struct vring_avail); 482 req_size += sizeof(uint16_t) * vq->size; 483 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) 484 req_size += sizeof(uint16_t); 485 size = req_size; 486 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq, 487 vq->ring_addrs.avail_user_addr, 488 &size, VHOST_ACCESS_RW); 489 if (!vq->avail || size != req_size) 490 return -1; 491 492 req_size = sizeof(struct vring_used); 493 req_size += sizeof(struct vring_used_elem) * vq->size; 494 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) 495 req_size += sizeof(uint16_t); 496 size = req_size; 497 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq, 498 vq->ring_addrs.used_user_addr, 499 &size, VHOST_ACCESS_RW); 500 if (!vq->used || size != req_size) 501 return -1; 502 503 return 0; 504 } 505 506 /* Caller should have iotlb_lock read-locked */ 507 static int 508 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) 509 { 510 uint64_t req_size, size; 511 512 req_size = sizeof(struct vring_packed_desc) * vq->size; 513 size = req_size; 514 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t) 515 vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr, 516 &size, VHOST_ACCESS_RW); 517 if (!vq->desc_packed || size != req_size) 518 return -1; 519 520 req_size = sizeof(struct vring_packed_desc_event); 521 size = req_size; 522 vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t) 523 vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr, 524 &size, VHOST_ACCESS_RW); 525 if (!vq->driver_event || size != req_size) 526 return -1; 527 528 req_size = sizeof(struct vring_packed_desc_event); 529 size = req_size; 530 vq->device_event = (struct vring_packed_desc_event *)(uintptr_t) 531 vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr, 532 &size, VHOST_ACCESS_RW); 533 if (!vq->device_event || size != req_size) 534 return -1; 535 536 return 0; 537 } 538 539 int 540 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) 541 { 542 543 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) 544 return -1; 545 546 if (vq_is_packed(dev)) { 547 if (vring_translate_packed(dev, vq) < 0) 548 return -1; 549 } else { 550 if (vring_translate_split(dev, vq) < 0) 551 return -1; 552 } 553 554 if (log_translate(dev, vq) < 0) 555 return -1; 556 557 vq->access_ok = true; 558 559 return 0; 560 } 561 562 void 563 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq) 564 { 565 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) 566 vhost_user_iotlb_wr_lock(vq); 567 568 vq->access_ok = false; 569 vq->desc = NULL; 570 vq->avail = NULL; 571 vq->used = NULL; 572 vq->log_guest_addr = 0; 573 574 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) 575 vhost_user_iotlb_wr_unlock(vq); 576 } 577 578 static void 579 init_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq, 580 uint32_t vring_idx) 581 { 582 int numa_node = SOCKET_ID_ANY; 583 584 memset(vq, 0, sizeof(struct vhost_virtqueue)); 585 586 vq->index = vring_idx; 587 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; 588 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; 589 vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF; 590 591 #ifdef RTE_LIBRTE_VHOST_NUMA 592 if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { 593 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to query numa node: %s\n", 594 rte_strerror(errno)); 595 numa_node = SOCKET_ID_ANY; 596 } 597 #endif 598 vq->numa_node = numa_node; 599 600 vhost_user_iotlb_init(dev, vq); 601 } 602 603 static void 604 reset_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq) 605 { 606 int callfd; 607 608 callfd = vq->callfd; 609 init_vring_queue(dev, vq, vq->index); 610 vq->callfd = callfd; 611 } 612 613 int 614 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) 615 { 616 struct vhost_virtqueue *vq; 617 uint32_t i; 618 619 /* Also allocate holes, if any, up to requested vring index. */ 620 for (i = 0; i <= vring_idx; i++) { 621 if (dev->virtqueue[i]) 622 continue; 623 624 vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0); 625 if (vq == NULL) { 626 VHOST_LOG_CONFIG(dev->ifname, ERR, 627 "failed to allocate memory for vring %u.\n", 628 i); 629 return -1; 630 } 631 632 dev->virtqueue[i] = vq; 633 init_vring_queue(dev, vq, i); 634 rte_spinlock_init(&vq->access_lock); 635 vq->avail_wrap_counter = 1; 636 vq->used_wrap_counter = 1; 637 vq->signalled_used_valid = false; 638 } 639 640 dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1); 641 642 return 0; 643 } 644 645 /* 646 * Reset some variables in device structure, while keeping few 647 * others untouched, such as vid, ifname, nr_vring: they 648 * should be same unless the device is removed. 649 */ 650 void 651 reset_device(struct virtio_net *dev) 652 { 653 uint32_t i; 654 655 dev->features = 0; 656 dev->protocol_features = 0; 657 dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET; 658 659 for (i = 0; i < dev->nr_vring; i++) { 660 struct vhost_virtqueue *vq = dev->virtqueue[i]; 661 662 if (!vq) { 663 VHOST_LOG_CONFIG(dev->ifname, ERR, 664 "failed to reset vring, virtqueue not allocated (%d)\n", i); 665 continue; 666 } 667 reset_vring_queue(dev, vq); 668 } 669 } 670 671 /* 672 * Invoked when there is a new vhost-user connection established (when 673 * there is a new virtio device being attached). 674 */ 675 int 676 vhost_new_device(void) 677 { 678 struct virtio_net *dev; 679 int i; 680 681 pthread_mutex_lock(&vhost_dev_lock); 682 for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) { 683 if (vhost_devices[i] == NULL) 684 break; 685 } 686 687 if (i == RTE_MAX_VHOST_DEVICE) { 688 VHOST_LOG_CONFIG("device", ERR, "failed to find a free slot for new device.\n"); 689 pthread_mutex_unlock(&vhost_dev_lock); 690 return -1; 691 } 692 693 dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0); 694 if (dev == NULL) { 695 VHOST_LOG_CONFIG("device", ERR, "failed to allocate memory for new device.\n"); 696 pthread_mutex_unlock(&vhost_dev_lock); 697 return -1; 698 } 699 700 vhost_devices[i] = dev; 701 pthread_mutex_unlock(&vhost_dev_lock); 702 703 dev->vid = i; 704 dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; 705 dev->slave_req_fd = -1; 706 dev->postcopy_ufd = -1; 707 rte_spinlock_init(&dev->slave_req_lock); 708 709 return i; 710 } 711 712 void 713 vhost_destroy_device_notify(struct virtio_net *dev) 714 { 715 struct rte_vdpa_device *vdpa_dev; 716 717 if (dev->flags & VIRTIO_DEV_RUNNING) { 718 vdpa_dev = dev->vdpa_dev; 719 if (vdpa_dev) 720 vdpa_dev->ops->dev_close(dev->vid); 721 dev->flags &= ~VIRTIO_DEV_RUNNING; 722 dev->notify_ops->destroy_device(dev->vid); 723 } 724 } 725 726 /* 727 * Invoked when there is the vhost-user connection is broken (when 728 * the virtio device is being detached). 729 */ 730 void 731 vhost_destroy_device(int vid) 732 { 733 struct virtio_net *dev = get_device(vid); 734 735 if (dev == NULL) 736 return; 737 738 vhost_destroy_device_notify(dev); 739 740 cleanup_device(dev, 1); 741 free_device(dev); 742 743 vhost_devices[vid] = NULL; 744 } 745 746 void 747 vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *vdpa_dev) 748 { 749 struct virtio_net *dev = get_device(vid); 750 751 if (dev == NULL) 752 return; 753 754 dev->vdpa_dev = vdpa_dev; 755 } 756 757 void 758 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len) 759 { 760 struct virtio_net *dev; 761 unsigned int len; 762 763 dev = get_device(vid); 764 if (dev == NULL) 765 return; 766 767 len = if_len > sizeof(dev->ifname) ? 768 sizeof(dev->ifname) : if_len; 769 770 strncpy(dev->ifname, if_name, len); 771 dev->ifname[sizeof(dev->ifname) - 1] = '\0'; 772 } 773 774 void 775 vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats_enabled, 776 bool support_iommu) 777 { 778 struct virtio_net *dev = get_device(vid); 779 780 if (dev == NULL) 781 return; 782 783 if (enable) 784 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET; 785 else 786 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET; 787 if (!compliant_ol_flags) 788 dev->flags |= VIRTIO_DEV_LEGACY_OL_FLAGS; 789 else 790 dev->flags &= ~VIRTIO_DEV_LEGACY_OL_FLAGS; 791 if (stats_enabled) 792 dev->flags |= VIRTIO_DEV_STATS_ENABLED; 793 else 794 dev->flags &= ~VIRTIO_DEV_STATS_ENABLED; 795 if (support_iommu) 796 dev->flags |= VIRTIO_DEV_SUPPORT_IOMMU; 797 else 798 dev->flags &= ~VIRTIO_DEV_SUPPORT_IOMMU; 799 } 800 801 void 802 vhost_enable_extbuf(int vid) 803 { 804 struct virtio_net *dev = get_device(vid); 805 806 if (dev == NULL) 807 return; 808 809 dev->extbuf = 1; 810 } 811 812 void 813 vhost_enable_linearbuf(int vid) 814 { 815 struct virtio_net *dev = get_device(vid); 816 817 if (dev == NULL) 818 return; 819 820 dev->linearbuf = 1; 821 } 822 823 int 824 rte_vhost_get_mtu(int vid, uint16_t *mtu) 825 { 826 struct virtio_net *dev = get_device(vid); 827 828 if (dev == NULL || mtu == NULL) 829 return -ENODEV; 830 831 if (!(dev->flags & VIRTIO_DEV_READY)) 832 return -EAGAIN; 833 834 if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU))) 835 return -ENOTSUP; 836 837 *mtu = dev->mtu; 838 839 return 0; 840 } 841 842 int 843 rte_vhost_get_numa_node(int vid) 844 { 845 #ifdef RTE_LIBRTE_VHOST_NUMA 846 struct virtio_net *dev = get_device(vid); 847 int numa_node; 848 int ret; 849 850 if (dev == NULL || numa_available() != 0) 851 return -1; 852 853 ret = get_mempolicy(&numa_node, NULL, 0, dev, 854 MPOL_F_NODE | MPOL_F_ADDR); 855 if (ret < 0) { 856 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to query numa node: %s\n", 857 rte_strerror(errno)); 858 return -1; 859 } 860 861 return numa_node; 862 #else 863 RTE_SET_USED(vid); 864 return -1; 865 #endif 866 } 867 868 uint16_t 869 rte_vhost_get_vring_num(int vid) 870 { 871 struct virtio_net *dev = get_device(vid); 872 873 if (dev == NULL) 874 return 0; 875 876 return dev->nr_vring; 877 } 878 879 int 880 rte_vhost_get_ifname(int vid, char *buf, size_t len) 881 { 882 struct virtio_net *dev = get_device(vid); 883 884 if (dev == NULL || buf == NULL) 885 return -1; 886 887 len = RTE_MIN(len, sizeof(dev->ifname)); 888 889 strncpy(buf, dev->ifname, len); 890 buf[len - 1] = '\0'; 891 892 return 0; 893 } 894 895 int 896 rte_vhost_get_negotiated_features(int vid, uint64_t *features) 897 { 898 struct virtio_net *dev; 899 900 dev = get_device(vid); 901 if (dev == NULL || features == NULL) 902 return -1; 903 904 *features = dev->features; 905 return 0; 906 } 907 908 int 909 rte_vhost_get_negotiated_protocol_features(int vid, 910 uint64_t *protocol_features) 911 { 912 struct virtio_net *dev; 913 914 dev = get_device(vid); 915 if (dev == NULL || protocol_features == NULL) 916 return -1; 917 918 *protocol_features = dev->protocol_features; 919 return 0; 920 } 921 922 int 923 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem) 924 { 925 struct virtio_net *dev; 926 struct rte_vhost_memory *m; 927 size_t size; 928 929 dev = get_device(vid); 930 if (dev == NULL || mem == NULL) 931 return -1; 932 933 size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region); 934 m = malloc(sizeof(struct rte_vhost_memory) + size); 935 if (!m) 936 return -1; 937 938 m->nregions = dev->mem->nregions; 939 memcpy(m->regions, dev->mem->regions, size); 940 *mem = m; 941 942 return 0; 943 } 944 945 int 946 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx, 947 struct rte_vhost_vring *vring) 948 { 949 struct virtio_net *dev; 950 struct vhost_virtqueue *vq; 951 952 dev = get_device(vid); 953 if (dev == NULL || vring == NULL) 954 return -1; 955 956 if (vring_idx >= VHOST_MAX_VRING) 957 return -1; 958 959 vq = dev->virtqueue[vring_idx]; 960 if (!vq) 961 return -1; 962 963 if (vq_is_packed(dev)) { 964 vring->desc_packed = vq->desc_packed; 965 vring->driver_event = vq->driver_event; 966 vring->device_event = vq->device_event; 967 } else { 968 vring->desc = vq->desc; 969 vring->avail = vq->avail; 970 vring->used = vq->used; 971 } 972 vring->log_guest_addr = vq->log_guest_addr; 973 974 vring->callfd = vq->callfd; 975 vring->kickfd = vq->kickfd; 976 vring->size = vq->size; 977 978 return 0; 979 } 980 981 int 982 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx, 983 struct rte_vhost_ring_inflight *vring) 984 { 985 struct virtio_net *dev; 986 struct vhost_virtqueue *vq; 987 988 dev = get_device(vid); 989 if (unlikely(!dev)) 990 return -1; 991 992 if (vring_idx >= VHOST_MAX_VRING) 993 return -1; 994 995 vq = dev->virtqueue[vring_idx]; 996 if (unlikely(!vq)) 997 return -1; 998 999 if (vq_is_packed(dev)) { 1000 if (unlikely(!vq->inflight_packed)) 1001 return -1; 1002 1003 vring->inflight_packed = vq->inflight_packed; 1004 } else { 1005 if (unlikely(!vq->inflight_split)) 1006 return -1; 1007 1008 vring->inflight_split = vq->inflight_split; 1009 } 1010 1011 vring->resubmit_inflight = vq->resubmit_inflight; 1012 1013 return 0; 1014 } 1015 1016 int 1017 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx, 1018 uint16_t idx) 1019 { 1020 struct vhost_virtqueue *vq; 1021 struct virtio_net *dev; 1022 1023 dev = get_device(vid); 1024 if (unlikely(!dev)) 1025 return -1; 1026 1027 if (unlikely(!(dev->protocol_features & 1028 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1029 return 0; 1030 1031 if (unlikely(vq_is_packed(dev))) 1032 return -1; 1033 1034 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1035 return -1; 1036 1037 vq = dev->virtqueue[vring_idx]; 1038 if (unlikely(!vq)) 1039 return -1; 1040 1041 if (unlikely(!vq->inflight_split)) 1042 return -1; 1043 1044 if (unlikely(idx >= vq->size)) 1045 return -1; 1046 1047 vq->inflight_split->desc[idx].counter = vq->global_counter++; 1048 vq->inflight_split->desc[idx].inflight = 1; 1049 return 0; 1050 } 1051 1052 int 1053 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx, 1054 uint16_t head, uint16_t last, 1055 uint16_t *inflight_entry) 1056 { 1057 struct rte_vhost_inflight_info_packed *inflight_info; 1058 struct virtio_net *dev; 1059 struct vhost_virtqueue *vq; 1060 struct vring_packed_desc *desc; 1061 uint16_t old_free_head, free_head; 1062 1063 dev = get_device(vid); 1064 if (unlikely(!dev)) 1065 return -1; 1066 1067 if (unlikely(!(dev->protocol_features & 1068 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1069 return 0; 1070 1071 if (unlikely(!vq_is_packed(dev))) 1072 return -1; 1073 1074 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1075 return -1; 1076 1077 vq = dev->virtqueue[vring_idx]; 1078 if (unlikely(!vq)) 1079 return -1; 1080 1081 inflight_info = vq->inflight_packed; 1082 if (unlikely(!inflight_info)) 1083 return -1; 1084 1085 if (unlikely(head >= vq->size)) 1086 return -1; 1087 1088 desc = vq->desc_packed; 1089 old_free_head = inflight_info->old_free_head; 1090 if (unlikely(old_free_head >= vq->size)) 1091 return -1; 1092 1093 free_head = old_free_head; 1094 1095 /* init header descriptor */ 1096 inflight_info->desc[old_free_head].num = 0; 1097 inflight_info->desc[old_free_head].counter = vq->global_counter++; 1098 inflight_info->desc[old_free_head].inflight = 1; 1099 1100 /* save desc entry in flight entry */ 1101 while (head != ((last + 1) % vq->size)) { 1102 inflight_info->desc[old_free_head].num++; 1103 inflight_info->desc[free_head].addr = desc[head].addr; 1104 inflight_info->desc[free_head].len = desc[head].len; 1105 inflight_info->desc[free_head].flags = desc[head].flags; 1106 inflight_info->desc[free_head].id = desc[head].id; 1107 1108 inflight_info->desc[old_free_head].last = free_head; 1109 free_head = inflight_info->desc[free_head].next; 1110 inflight_info->free_head = free_head; 1111 head = (head + 1) % vq->size; 1112 } 1113 1114 inflight_info->old_free_head = free_head; 1115 *inflight_entry = old_free_head; 1116 1117 return 0; 1118 } 1119 1120 int 1121 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx, 1122 uint16_t last_used_idx, uint16_t idx) 1123 { 1124 struct virtio_net *dev; 1125 struct vhost_virtqueue *vq; 1126 1127 dev = get_device(vid); 1128 if (unlikely(!dev)) 1129 return -1; 1130 1131 if (unlikely(!(dev->protocol_features & 1132 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1133 return 0; 1134 1135 if (unlikely(vq_is_packed(dev))) 1136 return -1; 1137 1138 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1139 return -1; 1140 1141 vq = dev->virtqueue[vring_idx]; 1142 if (unlikely(!vq)) 1143 return -1; 1144 1145 if (unlikely(!vq->inflight_split)) 1146 return -1; 1147 1148 if (unlikely(idx >= vq->size)) 1149 return -1; 1150 1151 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1152 1153 vq->inflight_split->desc[idx].inflight = 0; 1154 1155 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1156 1157 vq->inflight_split->used_idx = last_used_idx; 1158 return 0; 1159 } 1160 1161 int 1162 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx, 1163 uint16_t head) 1164 { 1165 struct rte_vhost_inflight_info_packed *inflight_info; 1166 struct virtio_net *dev; 1167 struct vhost_virtqueue *vq; 1168 1169 dev = get_device(vid); 1170 if (unlikely(!dev)) 1171 return -1; 1172 1173 if (unlikely(!(dev->protocol_features & 1174 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1175 return 0; 1176 1177 if (unlikely(!vq_is_packed(dev))) 1178 return -1; 1179 1180 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1181 return -1; 1182 1183 vq = dev->virtqueue[vring_idx]; 1184 if (unlikely(!vq)) 1185 return -1; 1186 1187 inflight_info = vq->inflight_packed; 1188 if (unlikely(!inflight_info)) 1189 return -1; 1190 1191 if (unlikely(head >= vq->size)) 1192 return -1; 1193 1194 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1195 1196 inflight_info->desc[head].inflight = 0; 1197 1198 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1199 1200 inflight_info->old_free_head = inflight_info->free_head; 1201 inflight_info->old_used_idx = inflight_info->used_idx; 1202 inflight_info->old_used_wrap_counter = inflight_info->used_wrap_counter; 1203 1204 return 0; 1205 } 1206 1207 int 1208 rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx, 1209 uint16_t idx) 1210 { 1211 struct virtio_net *dev; 1212 struct vhost_virtqueue *vq; 1213 1214 dev = get_device(vid); 1215 if (unlikely(!dev)) 1216 return -1; 1217 1218 if (unlikely(!(dev->protocol_features & 1219 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1220 return 0; 1221 1222 if (unlikely(vq_is_packed(dev))) 1223 return -1; 1224 1225 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1226 return -1; 1227 1228 vq = dev->virtqueue[vring_idx]; 1229 if (unlikely(!vq)) 1230 return -1; 1231 1232 if (unlikely(!vq->inflight_split)) 1233 return -1; 1234 1235 if (unlikely(idx >= vq->size)) 1236 return -1; 1237 1238 vq->inflight_split->last_inflight_io = idx; 1239 return 0; 1240 } 1241 1242 int 1243 rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx, 1244 uint16_t head) 1245 { 1246 struct rte_vhost_inflight_info_packed *inflight_info; 1247 struct virtio_net *dev; 1248 struct vhost_virtqueue *vq; 1249 uint16_t last; 1250 1251 dev = get_device(vid); 1252 if (unlikely(!dev)) 1253 return -1; 1254 1255 if (unlikely(!(dev->protocol_features & 1256 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1257 return 0; 1258 1259 if (unlikely(!vq_is_packed(dev))) 1260 return -1; 1261 1262 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1263 return -1; 1264 1265 vq = dev->virtqueue[vring_idx]; 1266 if (unlikely(!vq)) 1267 return -1; 1268 1269 inflight_info = vq->inflight_packed; 1270 if (unlikely(!inflight_info)) 1271 return -1; 1272 1273 if (unlikely(head >= vq->size)) 1274 return -1; 1275 1276 last = inflight_info->desc[head].last; 1277 if (unlikely(last >= vq->size)) 1278 return -1; 1279 1280 inflight_info->desc[last].next = inflight_info->free_head; 1281 inflight_info->free_head = head; 1282 inflight_info->used_idx += inflight_info->desc[head].num; 1283 if (inflight_info->used_idx >= inflight_info->desc_num) { 1284 inflight_info->used_idx -= inflight_info->desc_num; 1285 inflight_info->used_wrap_counter = 1286 !inflight_info->used_wrap_counter; 1287 } 1288 1289 return 0; 1290 } 1291 1292 int 1293 rte_vhost_vring_call(int vid, uint16_t vring_idx) 1294 { 1295 struct virtio_net *dev; 1296 struct vhost_virtqueue *vq; 1297 1298 dev = get_device(vid); 1299 if (!dev) 1300 return -1; 1301 1302 if (vring_idx >= VHOST_MAX_VRING) 1303 return -1; 1304 1305 vq = dev->virtqueue[vring_idx]; 1306 if (!vq) 1307 return -1; 1308 1309 rte_spinlock_lock(&vq->access_lock); 1310 1311 if (vq_is_packed(dev)) 1312 vhost_vring_call_packed(dev, vq); 1313 else 1314 vhost_vring_call_split(dev, vq); 1315 1316 rte_spinlock_unlock(&vq->access_lock); 1317 1318 return 0; 1319 } 1320 1321 int 1322 rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx) 1323 { 1324 struct virtio_net *dev; 1325 struct vhost_virtqueue *vq; 1326 1327 dev = get_device(vid); 1328 if (!dev) 1329 return -1; 1330 1331 if (vring_idx >= VHOST_MAX_VRING) 1332 return -1; 1333 1334 vq = dev->virtqueue[vring_idx]; 1335 if (!vq) 1336 return -1; 1337 1338 if (!rte_spinlock_trylock(&vq->access_lock)) 1339 return -EAGAIN; 1340 1341 if (vq_is_packed(dev)) 1342 vhost_vring_call_packed(dev, vq); 1343 else 1344 vhost_vring_call_split(dev, vq); 1345 1346 rte_spinlock_unlock(&vq->access_lock); 1347 1348 return 0; 1349 } 1350 1351 uint16_t 1352 rte_vhost_avail_entries(int vid, uint16_t queue_id) 1353 { 1354 struct virtio_net *dev; 1355 struct vhost_virtqueue *vq; 1356 uint16_t ret = 0; 1357 1358 dev = get_device(vid); 1359 if (!dev) 1360 return 0; 1361 1362 if (queue_id >= VHOST_MAX_VRING) 1363 return 0; 1364 1365 vq = dev->virtqueue[queue_id]; 1366 if (!vq) 1367 return 0; 1368 1369 rte_spinlock_lock(&vq->access_lock); 1370 1371 if (unlikely(!vq->enabled || vq->avail == NULL)) 1372 goto out; 1373 1374 ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx; 1375 1376 out: 1377 rte_spinlock_unlock(&vq->access_lock); 1378 return ret; 1379 } 1380 1381 static inline int 1382 vhost_enable_notify_split(struct virtio_net *dev, 1383 struct vhost_virtqueue *vq, int enable) 1384 { 1385 if (vq->used == NULL) 1386 return -1; 1387 1388 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) { 1389 if (enable) 1390 vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; 1391 else 1392 vq->used->flags |= VRING_USED_F_NO_NOTIFY; 1393 } else { 1394 if (enable) 1395 vhost_avail_event(vq) = vq->last_avail_idx; 1396 } 1397 return 0; 1398 } 1399 1400 static inline int 1401 vhost_enable_notify_packed(struct virtio_net *dev, 1402 struct vhost_virtqueue *vq, int enable) 1403 { 1404 uint16_t flags; 1405 1406 if (vq->device_event == NULL) 1407 return -1; 1408 1409 if (!enable) { 1410 vq->device_event->flags = VRING_EVENT_F_DISABLE; 1411 return 0; 1412 } 1413 1414 flags = VRING_EVENT_F_ENABLE; 1415 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { 1416 flags = VRING_EVENT_F_DESC; 1417 vq->device_event->off_wrap = vq->last_avail_idx | 1418 vq->avail_wrap_counter << 15; 1419 } 1420 1421 rte_atomic_thread_fence(__ATOMIC_RELEASE); 1422 1423 vq->device_event->flags = flags; 1424 return 0; 1425 } 1426 1427 int 1428 vhost_enable_guest_notification(struct virtio_net *dev, 1429 struct vhost_virtqueue *vq, int enable) 1430 { 1431 /* 1432 * If the virtqueue is not ready yet, it will be applied 1433 * when it will become ready. 1434 */ 1435 if (!vq->ready) 1436 return 0; 1437 1438 if (vq_is_packed(dev)) 1439 return vhost_enable_notify_packed(dev, vq, enable); 1440 else 1441 return vhost_enable_notify_split(dev, vq, enable); 1442 } 1443 1444 int 1445 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) 1446 { 1447 struct virtio_net *dev = get_device(vid); 1448 struct vhost_virtqueue *vq; 1449 int ret; 1450 1451 if (!dev) 1452 return -1; 1453 1454 if (queue_id >= VHOST_MAX_VRING) 1455 return -1; 1456 1457 vq = dev->virtqueue[queue_id]; 1458 if (!vq) 1459 return -1; 1460 1461 rte_spinlock_lock(&vq->access_lock); 1462 1463 vq->notif_enable = enable; 1464 ret = vhost_enable_guest_notification(dev, vq, enable); 1465 1466 rte_spinlock_unlock(&vq->access_lock); 1467 1468 return ret; 1469 } 1470 1471 void 1472 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len) 1473 { 1474 struct virtio_net *dev = get_device(vid); 1475 1476 if (dev == NULL) 1477 return; 1478 1479 vhost_log_write(dev, addr, len); 1480 } 1481 1482 void 1483 rte_vhost_log_used_vring(int vid, uint16_t vring_idx, 1484 uint64_t offset, uint64_t len) 1485 { 1486 struct virtio_net *dev; 1487 struct vhost_virtqueue *vq; 1488 1489 dev = get_device(vid); 1490 if (dev == NULL) 1491 return; 1492 1493 if (vring_idx >= VHOST_MAX_VRING) 1494 return; 1495 vq = dev->virtqueue[vring_idx]; 1496 if (!vq) 1497 return; 1498 1499 vhost_log_used_vring(dev, vq, offset, len); 1500 } 1501 1502 uint32_t 1503 rte_vhost_rx_queue_count(int vid, uint16_t qid) 1504 { 1505 struct virtio_net *dev; 1506 struct vhost_virtqueue *vq; 1507 uint32_t ret = 0; 1508 1509 dev = get_device(vid); 1510 if (dev == NULL) 1511 return 0; 1512 1513 if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) { 1514 VHOST_LOG_DATA(dev->ifname, ERR, 1515 "%s: invalid virtqueue idx %d.\n", 1516 __func__, qid); 1517 return 0; 1518 } 1519 1520 vq = dev->virtqueue[qid]; 1521 if (vq == NULL) 1522 return 0; 1523 1524 rte_spinlock_lock(&vq->access_lock); 1525 1526 if (unlikely(!vq->enabled || vq->avail == NULL)) 1527 goto out; 1528 1529 ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; 1530 1531 out: 1532 rte_spinlock_unlock(&vq->access_lock); 1533 return ret; 1534 } 1535 1536 struct rte_vdpa_device * 1537 rte_vhost_get_vdpa_device(int vid) 1538 { 1539 struct virtio_net *dev = get_device(vid); 1540 1541 if (dev == NULL) 1542 return NULL; 1543 1544 return dev->vdpa_dev; 1545 } 1546 1547 int 1548 rte_vhost_get_log_base(int vid, uint64_t *log_base, 1549 uint64_t *log_size) 1550 { 1551 struct virtio_net *dev = get_device(vid); 1552 1553 if (dev == NULL || log_base == NULL || log_size == NULL) 1554 return -1; 1555 1556 *log_base = dev->log_base; 1557 *log_size = dev->log_size; 1558 1559 return 0; 1560 } 1561 1562 int 1563 rte_vhost_get_vring_base(int vid, uint16_t queue_id, 1564 uint16_t *last_avail_idx, uint16_t *last_used_idx) 1565 { 1566 struct vhost_virtqueue *vq; 1567 struct virtio_net *dev = get_device(vid); 1568 1569 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL) 1570 return -1; 1571 1572 if (queue_id >= VHOST_MAX_VRING) 1573 return -1; 1574 1575 vq = dev->virtqueue[queue_id]; 1576 if (!vq) 1577 return -1; 1578 1579 if (vq_is_packed(dev)) { 1580 *last_avail_idx = (vq->avail_wrap_counter << 15) | 1581 vq->last_avail_idx; 1582 *last_used_idx = (vq->used_wrap_counter << 15) | 1583 vq->last_used_idx; 1584 } else { 1585 *last_avail_idx = vq->last_avail_idx; 1586 *last_used_idx = vq->last_used_idx; 1587 } 1588 1589 return 0; 1590 } 1591 1592 int 1593 rte_vhost_set_vring_base(int vid, uint16_t queue_id, 1594 uint16_t last_avail_idx, uint16_t last_used_idx) 1595 { 1596 struct vhost_virtqueue *vq; 1597 struct virtio_net *dev = get_device(vid); 1598 1599 if (!dev) 1600 return -1; 1601 1602 if (queue_id >= VHOST_MAX_VRING) 1603 return -1; 1604 1605 vq = dev->virtqueue[queue_id]; 1606 if (!vq) 1607 return -1; 1608 1609 if (vq_is_packed(dev)) { 1610 vq->last_avail_idx = last_avail_idx & 0x7fff; 1611 vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15)); 1612 vq->last_used_idx = last_used_idx & 0x7fff; 1613 vq->used_wrap_counter = !!(last_used_idx & (1 << 15)); 1614 } else { 1615 vq->last_avail_idx = last_avail_idx; 1616 vq->last_used_idx = last_used_idx; 1617 } 1618 1619 return 0; 1620 } 1621 1622 int 1623 rte_vhost_get_vring_base_from_inflight(int vid, 1624 uint16_t queue_id, 1625 uint16_t *last_avail_idx, 1626 uint16_t *last_used_idx) 1627 { 1628 struct rte_vhost_inflight_info_packed *inflight_info; 1629 struct vhost_virtqueue *vq; 1630 struct virtio_net *dev = get_device(vid); 1631 1632 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL) 1633 return -1; 1634 1635 if (queue_id >= VHOST_MAX_VRING) 1636 return -1; 1637 1638 vq = dev->virtqueue[queue_id]; 1639 if (!vq) 1640 return -1; 1641 1642 if (!vq_is_packed(dev)) 1643 return -1; 1644 1645 inflight_info = vq->inflight_packed; 1646 if (!inflight_info) 1647 return -1; 1648 1649 *last_avail_idx = (inflight_info->old_used_wrap_counter << 15) | 1650 inflight_info->old_used_idx; 1651 *last_used_idx = *last_avail_idx; 1652 1653 return 0; 1654 } 1655 1656 int 1657 rte_vhost_extern_callback_register(int vid, 1658 struct rte_vhost_user_extern_ops const * const ops, void *ctx) 1659 { 1660 struct virtio_net *dev = get_device(vid); 1661 1662 if (dev == NULL || ops == NULL) 1663 return -1; 1664 1665 dev->extern_ops = *ops; 1666 dev->extern_data = ctx; 1667 return 0; 1668 } 1669 1670 static __rte_always_inline int 1671 async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq) 1672 { 1673 struct vhost_async *async; 1674 int node = vq->numa_node; 1675 1676 if (unlikely(vq->async)) { 1677 VHOST_LOG_CONFIG(dev->ifname, ERR, 1678 "async register failed: already registered (qid: %d)\n", 1679 vq->index); 1680 return -1; 1681 } 1682 1683 async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node); 1684 if (!async) { 1685 VHOST_LOG_CONFIG(dev->ifname, ERR, 1686 "failed to allocate async metadata (qid: %d)\n", 1687 vq->index); 1688 return -1; 1689 } 1690 1691 async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info), 1692 RTE_CACHE_LINE_SIZE, node); 1693 if (!async->pkts_info) { 1694 VHOST_LOG_CONFIG(dev->ifname, ERR, 1695 "failed to allocate async_pkts_info (qid: %d)\n", 1696 vq->index); 1697 goto out_free_async; 1698 } 1699 1700 async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool), 1701 RTE_CACHE_LINE_SIZE, node); 1702 if (!async->pkts_cmpl_flag) { 1703 VHOST_LOG_CONFIG(dev->ifname, ERR, 1704 "failed to allocate async pkts_cmpl_flag (qid: %d)\n", 1705 vq->index); 1706 goto out_free_async; 1707 } 1708 1709 if (vq_is_packed(dev)) { 1710 async->buffers_packed = rte_malloc_socket(NULL, 1711 vq->size * sizeof(struct vring_used_elem_packed), 1712 RTE_CACHE_LINE_SIZE, node); 1713 if (!async->buffers_packed) { 1714 VHOST_LOG_CONFIG(dev->ifname, ERR, 1715 "failed to allocate async buffers (qid: %d)\n", 1716 vq->index); 1717 goto out_free_inflight; 1718 } 1719 } else { 1720 async->descs_split = rte_malloc_socket(NULL, 1721 vq->size * sizeof(struct vring_used_elem), 1722 RTE_CACHE_LINE_SIZE, node); 1723 if (!async->descs_split) { 1724 VHOST_LOG_CONFIG(dev->ifname, ERR, 1725 "failed to allocate async descs (qid: %d)\n", 1726 vq->index); 1727 goto out_free_inflight; 1728 } 1729 } 1730 1731 vq->async = async; 1732 1733 return 0; 1734 out_free_inflight: 1735 rte_free(async->pkts_info); 1736 out_free_async: 1737 rte_free(async); 1738 1739 return -1; 1740 } 1741 1742 int 1743 rte_vhost_async_channel_register(int vid, uint16_t queue_id) 1744 { 1745 struct vhost_virtqueue *vq; 1746 struct virtio_net *dev = get_device(vid); 1747 int ret; 1748 1749 if (dev == NULL) 1750 return -1; 1751 1752 if (queue_id >= VHOST_MAX_VRING) 1753 return -1; 1754 1755 vq = dev->virtqueue[queue_id]; 1756 1757 if (unlikely(vq == NULL || !dev->async_copy)) 1758 return -1; 1759 1760 rte_spinlock_lock(&vq->access_lock); 1761 ret = async_channel_register(dev, vq); 1762 rte_spinlock_unlock(&vq->access_lock); 1763 1764 return ret; 1765 } 1766 1767 int 1768 rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id) 1769 { 1770 struct vhost_virtqueue *vq; 1771 struct virtio_net *dev = get_device(vid); 1772 1773 if (dev == NULL) 1774 return -1; 1775 1776 if (queue_id >= VHOST_MAX_VRING) 1777 return -1; 1778 1779 vq = dev->virtqueue[queue_id]; 1780 1781 if (unlikely(vq == NULL || !dev->async_copy)) 1782 return -1; 1783 1784 if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { 1785 VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", 1786 __func__); 1787 return -1; 1788 } 1789 1790 return async_channel_register(dev, vq); 1791 } 1792 1793 int 1794 rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) 1795 { 1796 struct vhost_virtqueue *vq; 1797 struct virtio_net *dev = get_device(vid); 1798 int ret = -1; 1799 1800 if (dev == NULL) 1801 return ret; 1802 1803 if (queue_id >= VHOST_MAX_VRING) 1804 return ret; 1805 1806 vq = dev->virtqueue[queue_id]; 1807 1808 if (vq == NULL) 1809 return ret; 1810 1811 if (!rte_spinlock_trylock(&vq->access_lock)) { 1812 VHOST_LOG_CONFIG(dev->ifname, ERR, 1813 "failed to unregister async channel, virtqueue busy.\n"); 1814 return ret; 1815 } 1816 1817 if (!vq->async) { 1818 ret = 0; 1819 } else if (vq->async->pkts_inflight_n) { 1820 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to unregister async channel.\n"); 1821 VHOST_LOG_CONFIG(dev->ifname, ERR, 1822 "inflight packets must be completed before unregistration.\n"); 1823 } else { 1824 vhost_free_async_mem(vq); 1825 ret = 0; 1826 } 1827 1828 rte_spinlock_unlock(&vq->access_lock); 1829 1830 return ret; 1831 } 1832 1833 int 1834 rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id) 1835 { 1836 struct vhost_virtqueue *vq; 1837 struct virtio_net *dev = get_device(vid); 1838 1839 if (dev == NULL) 1840 return -1; 1841 1842 if (queue_id >= VHOST_MAX_VRING) 1843 return -1; 1844 1845 vq = dev->virtqueue[queue_id]; 1846 1847 if (vq == NULL) 1848 return -1; 1849 1850 if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { 1851 VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", 1852 __func__); 1853 return -1; 1854 } 1855 1856 if (!vq->async) 1857 return 0; 1858 1859 if (vq->async->pkts_inflight_n) { 1860 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to unregister async channel.\n"); 1861 VHOST_LOG_CONFIG(dev->ifname, ERR, 1862 "inflight packets must be completed before unregistration.\n"); 1863 return -1; 1864 } 1865 1866 vhost_free_async_mem(vq); 1867 1868 return 0; 1869 } 1870 1871 int 1872 rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id) 1873 { 1874 struct rte_dma_info info; 1875 void *pkts_cmpl_flag_addr; 1876 uint16_t max_desc; 1877 1878 pthread_mutex_lock(&vhost_dma_lock); 1879 1880 if (!rte_dma_is_valid(dma_id)) { 1881 VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id); 1882 goto error; 1883 } 1884 1885 if (rte_dma_info_get(dma_id, &info) != 0) { 1886 VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id); 1887 goto error; 1888 } 1889 1890 if (vchan_id >= info.max_vchans) { 1891 VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id); 1892 goto error; 1893 } 1894 1895 if (!dma_copy_track[dma_id].vchans) { 1896 struct async_dma_vchan_info *vchans; 1897 1898 vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans, 1899 RTE_CACHE_LINE_SIZE); 1900 if (vchans == NULL) { 1901 VHOST_LOG_CONFIG("dma", ERR, 1902 "Failed to allocate vchans for DMA %d vChannel %u.\n", 1903 dma_id, vchan_id); 1904 goto error; 1905 } 1906 1907 dma_copy_track[dma_id].vchans = vchans; 1908 } 1909 1910 if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) { 1911 VHOST_LOG_CONFIG("dma", INFO, "DMA %d vChannel %u already registered.\n", 1912 dma_id, vchan_id); 1913 pthread_mutex_unlock(&vhost_dma_lock); 1914 return 0; 1915 } 1916 1917 max_desc = info.max_desc; 1918 if (!rte_is_power_of_2(max_desc)) 1919 max_desc = rte_align32pow2(max_desc); 1920 1921 pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE); 1922 if (!pkts_cmpl_flag_addr) { 1923 VHOST_LOG_CONFIG("dma", ERR, 1924 "Failed to allocate pkts_cmpl_flag_addr for DMA %d vChannel %u.\n", 1925 dma_id, vchan_id); 1926 1927 if (dma_copy_track[dma_id].nr_vchans == 0) { 1928 rte_free(dma_copy_track[dma_id].vchans); 1929 dma_copy_track[dma_id].vchans = NULL; 1930 } 1931 goto error; 1932 } 1933 1934 dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr; 1935 dma_copy_track[dma_id].vchans[vchan_id].ring_size = max_desc; 1936 dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1; 1937 dma_copy_track[dma_id].nr_vchans++; 1938 1939 pthread_mutex_unlock(&vhost_dma_lock); 1940 return 0; 1941 1942 error: 1943 pthread_mutex_unlock(&vhost_dma_lock); 1944 return -1; 1945 } 1946 1947 int 1948 rte_vhost_async_get_inflight(int vid, uint16_t queue_id) 1949 { 1950 struct vhost_virtqueue *vq; 1951 struct virtio_net *dev = get_device(vid); 1952 int ret = -1; 1953 1954 if (dev == NULL) 1955 return ret; 1956 1957 if (queue_id >= VHOST_MAX_VRING) 1958 return ret; 1959 1960 vq = dev->virtqueue[queue_id]; 1961 1962 if (vq == NULL) 1963 return ret; 1964 1965 if (!rte_spinlock_trylock(&vq->access_lock)) { 1966 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 1967 "failed to check in-flight packets. virtqueue busy.\n"); 1968 return ret; 1969 } 1970 1971 if (vq->async) 1972 ret = vq->async->pkts_inflight_n; 1973 1974 rte_spinlock_unlock(&vq->access_lock); 1975 1976 return ret; 1977 } 1978 1979 int 1980 rte_vhost_async_get_inflight_thread_unsafe(int vid, uint16_t queue_id) 1981 { 1982 struct vhost_virtqueue *vq; 1983 struct virtio_net *dev = get_device(vid); 1984 int ret = -1; 1985 1986 if (dev == NULL) 1987 return ret; 1988 1989 if (queue_id >= VHOST_MAX_VRING) 1990 return ret; 1991 1992 vq = dev->virtqueue[queue_id]; 1993 1994 if (vq == NULL) 1995 return ret; 1996 1997 if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { 1998 VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", 1999 __func__); 2000 return -1; 2001 } 2002 2003 if (!vq->async) 2004 return ret; 2005 2006 ret = vq->async->pkts_inflight_n; 2007 2008 return ret; 2009 } 2010 2011 int 2012 rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, 2013 struct rte_vhost_power_monitor_cond *pmc) 2014 { 2015 struct virtio_net *dev = get_device(vid); 2016 struct vhost_virtqueue *vq; 2017 2018 if (dev == NULL) 2019 return -1; 2020 if (queue_id >= VHOST_MAX_VRING) 2021 return -1; 2022 2023 vq = dev->virtqueue[queue_id]; 2024 if (vq == NULL) 2025 return -1; 2026 2027 if (vq_is_packed(dev)) { 2028 struct vring_packed_desc *desc; 2029 desc = vq->desc_packed; 2030 pmc->addr = &desc[vq->last_avail_idx].flags; 2031 if (vq->avail_wrap_counter) 2032 pmc->val = VRING_DESC_F_AVAIL; 2033 else 2034 pmc->val = VRING_DESC_F_USED; 2035 pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED; 2036 pmc->size = sizeof(desc[vq->last_avail_idx].flags); 2037 pmc->match = 1; 2038 } else { 2039 pmc->addr = &vq->avail->idx; 2040 pmc->val = vq->last_avail_idx & (vq->size - 1); 2041 pmc->mask = vq->size - 1; 2042 pmc->size = sizeof(vq->avail->idx); 2043 pmc->match = 0; 2044 } 2045 2046 return 0; 2047 } 2048 2049 2050 int 2051 rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id, 2052 struct rte_vhost_stat_name *name, unsigned int size) 2053 { 2054 struct virtio_net *dev = get_device(vid); 2055 unsigned int i; 2056 2057 if (dev == NULL) 2058 return -1; 2059 2060 if (queue_id >= dev->nr_vring) 2061 return -1; 2062 2063 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED)) 2064 return -1; 2065 2066 if (name == NULL || size < VHOST_NB_VQ_STATS) 2067 return VHOST_NB_VQ_STATS; 2068 2069 for (i = 0; i < VHOST_NB_VQ_STATS; i++) 2070 snprintf(name[i].name, sizeof(name[i].name), "%s_q%u_%s", 2071 (queue_id & 1) ? "rx" : "tx", 2072 queue_id / 2, vhost_vq_stat_strings[i].name); 2073 2074 return VHOST_NB_VQ_STATS; 2075 } 2076 2077 int 2078 rte_vhost_vring_stats_get(int vid, uint16_t queue_id, 2079 struct rte_vhost_stat *stats, unsigned int n) 2080 { 2081 struct virtio_net *dev = get_device(vid); 2082 struct vhost_virtqueue *vq; 2083 unsigned int i; 2084 2085 if (dev == NULL) 2086 return -1; 2087 2088 if (queue_id >= dev->nr_vring) 2089 return -1; 2090 2091 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED)) 2092 return -1; 2093 2094 if (stats == NULL || n < VHOST_NB_VQ_STATS) 2095 return VHOST_NB_VQ_STATS; 2096 2097 vq = dev->virtqueue[queue_id]; 2098 2099 rte_spinlock_lock(&vq->access_lock); 2100 for (i = 0; i < VHOST_NB_VQ_STATS; i++) { 2101 stats[i].value = 2102 *(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset); 2103 stats[i].id = i; 2104 } 2105 rte_spinlock_unlock(&vq->access_lock); 2106 2107 return VHOST_NB_VQ_STATS; 2108 } 2109 2110 int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id) 2111 { 2112 struct virtio_net *dev = get_device(vid); 2113 struct vhost_virtqueue *vq; 2114 2115 if (dev == NULL) 2116 return -1; 2117 2118 if (queue_id >= dev->nr_vring) 2119 return -1; 2120 2121 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED)) 2122 return -1; 2123 2124 vq = dev->virtqueue[queue_id]; 2125 2126 rte_spinlock_lock(&vq->access_lock); 2127 memset(&vq->stats, 0, sizeof(vq->stats)); 2128 rte_spinlock_unlock(&vq->access_lock); 2129 2130 return 0; 2131 } 2132 2133 int 2134 rte_vhost_async_dma_unconfigure(int16_t dma_id, uint16_t vchan_id) 2135 { 2136 struct rte_dma_info info; 2137 struct rte_dma_stats stats = { 0 }; 2138 2139 pthread_mutex_lock(&vhost_dma_lock); 2140 2141 if (!rte_dma_is_valid(dma_id)) { 2142 VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id); 2143 goto error; 2144 } 2145 2146 if (rte_dma_info_get(dma_id, &info) != 0) { 2147 VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id); 2148 goto error; 2149 } 2150 2151 if (vchan_id >= info.max_vchans || !dma_copy_track[dma_id].vchans || 2152 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) { 2153 VHOST_LOG_CONFIG("dma", ERR, "Invalid channel %d:%u.\n", dma_id, vchan_id); 2154 goto error; 2155 } 2156 2157 if (rte_dma_stats_get(dma_id, vchan_id, &stats) != 0) { 2158 VHOST_LOG_CONFIG("dma", ERR, 2159 "Failed to get stats for DMA %d vChannel %u.\n", dma_id, vchan_id); 2160 goto error; 2161 } 2162 2163 if (stats.submitted - stats.completed != 0) { 2164 VHOST_LOG_CONFIG("dma", ERR, 2165 "Do not unconfigure when there are inflight packets.\n"); 2166 goto error; 2167 } 2168 2169 rte_free(dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr); 2170 dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = NULL; 2171 dma_copy_track[dma_id].nr_vchans--; 2172 2173 if (dma_copy_track[dma_id].nr_vchans == 0) { 2174 rte_free(dma_copy_track[dma_id].vchans); 2175 dma_copy_track[dma_id].vchans = NULL; 2176 } 2177 2178 pthread_mutex_unlock(&vhost_dma_lock); 2179 return 0; 2180 2181 error: 2182 pthread_mutex_unlock(&vhost_dma_lock); 2183 return -1; 2184 } 2185 2186 RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO); 2187 RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING); 2188