1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <linux/vhost.h> 6 #include <linux/virtio_net.h> 7 #include <stdint.h> 8 #include <stdlib.h> 9 #ifdef RTE_LIBRTE_VHOST_NUMA 10 #include <numa.h> 11 #include <numaif.h> 12 #endif 13 14 #include <rte_errno.h> 15 #include <rte_log.h> 16 #include <rte_memory.h> 17 #include <rte_malloc.h> 18 #include <rte_vhost.h> 19 20 #include "iotlb.h" 21 #include "vhost.h" 22 #include "vhost_user.h" 23 24 struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE]; 25 pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER; 26 27 struct vhost_vq_stats_name_off { 28 char name[RTE_VHOST_STATS_NAME_SIZE]; 29 unsigned int offset; 30 }; 31 32 static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = { 33 {"good_packets", offsetof(struct vhost_virtqueue, stats.packets)}, 34 {"good_bytes", offsetof(struct vhost_virtqueue, stats.bytes)}, 35 {"multicast_packets", offsetof(struct vhost_virtqueue, stats.multicast)}, 36 {"broadcast_packets", offsetof(struct vhost_virtqueue, stats.broadcast)}, 37 {"undersize_packets", offsetof(struct vhost_virtqueue, stats.size_bins[0])}, 38 {"size_64_packets", offsetof(struct vhost_virtqueue, stats.size_bins[1])}, 39 {"size_65_127_packets", offsetof(struct vhost_virtqueue, stats.size_bins[2])}, 40 {"size_128_255_packets", offsetof(struct vhost_virtqueue, stats.size_bins[3])}, 41 {"size_256_511_packets", offsetof(struct vhost_virtqueue, stats.size_bins[4])}, 42 {"size_512_1023_packets", offsetof(struct vhost_virtqueue, stats.size_bins[5])}, 43 {"size_1024_1518_packets", offsetof(struct vhost_virtqueue, stats.size_bins[6])}, 44 {"size_1519_max_packets", offsetof(struct vhost_virtqueue, stats.size_bins[7])}, 45 {"guest_notifications", offsetof(struct vhost_virtqueue, stats.guest_notifications)}, 46 {"iotlb_hits", offsetof(struct vhost_virtqueue, stats.iotlb_hits)}, 47 {"iotlb_misses", offsetof(struct vhost_virtqueue, stats.iotlb_misses)}, 48 {"inflight_submitted", offsetof(struct vhost_virtqueue, stats.inflight_submitted)}, 49 {"inflight_completed", offsetof(struct vhost_virtqueue, stats.inflight_completed)}, 50 }; 51 52 #define VHOST_NB_VQ_STATS RTE_DIM(vhost_vq_stat_strings) 53 54 /* Called with iotlb_lock read-locked */ 55 uint64_t 56 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, 57 uint64_t iova, uint64_t *size, uint8_t perm) 58 { 59 uint64_t vva, tmp_size; 60 61 if (unlikely(!*size)) 62 return 0; 63 64 tmp_size = *size; 65 66 vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm); 67 if (tmp_size == *size) { 68 if (dev->flags & VIRTIO_DEV_STATS_ENABLED) 69 vq->stats.iotlb_hits++; 70 return vva; 71 } 72 73 if (dev->flags & VIRTIO_DEV_STATS_ENABLED) 74 vq->stats.iotlb_misses++; 75 76 iova += tmp_size; 77 78 if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) { 79 /* 80 * iotlb_lock is read-locked for a full burst, 81 * but it only protects the iotlb cache. 82 * In case of IOTLB miss, we might block on the socket, 83 * which could cause a deadlock with QEMU if an IOTLB update 84 * is being handled. We can safely unlock here to avoid it. 85 */ 86 vhost_user_iotlb_rd_unlock(vq); 87 88 vhost_user_iotlb_pending_insert(dev, vq, iova, perm); 89 if (vhost_user_iotlb_miss(dev, iova, perm)) { 90 VHOST_LOG_DATA(dev->ifname, ERR, 91 "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", 92 iova); 93 vhost_user_iotlb_pending_remove(vq, iova, 1, perm); 94 } 95 96 vhost_user_iotlb_rd_lock(vq); 97 } 98 99 return 0; 100 } 101 102 #define VHOST_LOG_PAGE 4096 103 104 /* 105 * Atomically set a bit in memory. 106 */ 107 static __rte_always_inline void 108 vhost_set_bit(unsigned int nr, volatile uint8_t *addr) 109 { 110 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) 111 /* 112 * __sync_ built-ins are deprecated, but __atomic_ ones 113 * are sub-optimized in older GCC versions. 114 */ 115 __sync_fetch_and_or_1(addr, (1U << nr)); 116 #else 117 __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED); 118 #endif 119 } 120 121 static __rte_always_inline void 122 vhost_log_page(uint8_t *log_base, uint64_t page) 123 { 124 vhost_set_bit(page % 8, &log_base[page / 8]); 125 } 126 127 void 128 __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) 129 { 130 uint64_t page; 131 132 if (unlikely(!dev->log_base || !len)) 133 return; 134 135 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) 136 return; 137 138 /* To make sure guest memory updates are committed before logging */ 139 rte_atomic_thread_fence(__ATOMIC_RELEASE); 140 141 page = addr / VHOST_LOG_PAGE; 142 while (page * VHOST_LOG_PAGE < addr + len) { 143 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); 144 page += 1; 145 } 146 } 147 148 void 149 __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, 150 uint64_t iova, uint64_t len) 151 { 152 uint64_t hva, gpa, map_len; 153 map_len = len; 154 155 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); 156 if (map_len != len) { 157 VHOST_LOG_DATA(dev->ifname, ERR, 158 "failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", 159 iova); 160 return; 161 } 162 163 gpa = hva_to_gpa(dev, hva, len); 164 if (gpa) 165 __vhost_log_write(dev, gpa, len); 166 } 167 168 void 169 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) 170 { 171 unsigned long *log_base; 172 int i; 173 174 if (unlikely(!dev->log_base)) 175 return; 176 177 /* No cache, nothing to sync */ 178 if (unlikely(!vq->log_cache)) 179 return; 180 181 rte_atomic_thread_fence(__ATOMIC_RELEASE); 182 183 log_base = (unsigned long *)(uintptr_t)dev->log_base; 184 185 for (i = 0; i < vq->log_cache_nb_elem; i++) { 186 struct log_cache_entry *elem = vq->log_cache + i; 187 188 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) 189 /* 190 * '__sync' builtins are deprecated, but '__atomic' ones 191 * are sub-optimized in older GCC versions. 192 */ 193 __sync_fetch_and_or(log_base + elem->offset, elem->val); 194 #else 195 __atomic_fetch_or(log_base + elem->offset, elem->val, 196 __ATOMIC_RELAXED); 197 #endif 198 } 199 200 rte_atomic_thread_fence(__ATOMIC_RELEASE); 201 202 vq->log_cache_nb_elem = 0; 203 } 204 205 static __rte_always_inline void 206 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq, 207 uint64_t page) 208 { 209 uint32_t bit_nr = page % (sizeof(unsigned long) << 3); 210 uint32_t offset = page / (sizeof(unsigned long) << 3); 211 int i; 212 213 if (unlikely(!vq->log_cache)) { 214 /* No logging cache allocated, write dirty log map directly */ 215 rte_atomic_thread_fence(__ATOMIC_RELEASE); 216 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); 217 218 return; 219 } 220 221 for (i = 0; i < vq->log_cache_nb_elem; i++) { 222 struct log_cache_entry *elem = vq->log_cache + i; 223 224 if (elem->offset == offset) { 225 elem->val |= (1UL << bit_nr); 226 return; 227 } 228 } 229 230 if (unlikely(i >= VHOST_LOG_CACHE_NR)) { 231 /* 232 * No more room for a new log cache entry, 233 * so write the dirty log map directly. 234 */ 235 rte_atomic_thread_fence(__ATOMIC_RELEASE); 236 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); 237 238 return; 239 } 240 241 vq->log_cache[i].offset = offset; 242 vq->log_cache[i].val = (1UL << bit_nr); 243 vq->log_cache_nb_elem++; 244 } 245 246 void 247 __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, 248 uint64_t addr, uint64_t len) 249 { 250 uint64_t page; 251 252 if (unlikely(!dev->log_base || !len)) 253 return; 254 255 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) 256 return; 257 258 page = addr / VHOST_LOG_PAGE; 259 while (page * VHOST_LOG_PAGE < addr + len) { 260 vhost_log_cache_page(dev, vq, page); 261 page += 1; 262 } 263 } 264 265 void 266 __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, 267 uint64_t iova, uint64_t len) 268 { 269 uint64_t hva, gpa, map_len; 270 map_len = len; 271 272 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); 273 if (map_len != len) { 274 VHOST_LOG_DATA(dev->ifname, ERR, 275 "failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", 276 iova); 277 return; 278 } 279 280 gpa = hva_to_gpa(dev, hva, len); 281 if (gpa) 282 __vhost_log_cache_write(dev, vq, gpa, len); 283 } 284 285 void * 286 vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, 287 uint64_t desc_addr, uint64_t desc_len) 288 { 289 void *idesc; 290 uint64_t src, dst; 291 uint64_t len, remain = desc_len; 292 293 idesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node); 294 if (unlikely(!idesc)) 295 return NULL; 296 297 dst = (uint64_t)(uintptr_t)idesc; 298 299 while (remain) { 300 len = remain; 301 src = vhost_iova_to_vva(dev, vq, desc_addr, &len, 302 VHOST_ACCESS_RO); 303 if (unlikely(!src || !len)) { 304 rte_free(idesc); 305 return NULL; 306 } 307 308 rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); 309 310 remain -= len; 311 dst += len; 312 desc_addr += len; 313 } 314 315 return idesc; 316 } 317 318 void 319 cleanup_vq(struct vhost_virtqueue *vq, int destroy) 320 { 321 if ((vq->callfd >= 0) && (destroy != 0)) 322 close(vq->callfd); 323 if (vq->kickfd >= 0) 324 close(vq->kickfd); 325 } 326 327 void 328 cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq) 329 { 330 if (!(dev->protocol_features & 331 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) 332 return; 333 334 if (vq_is_packed(dev)) { 335 if (vq->inflight_packed) 336 vq->inflight_packed = NULL; 337 } else { 338 if (vq->inflight_split) 339 vq->inflight_split = NULL; 340 } 341 342 if (vq->resubmit_inflight) { 343 if (vq->resubmit_inflight->resubmit_list) { 344 rte_free(vq->resubmit_inflight->resubmit_list); 345 vq->resubmit_inflight->resubmit_list = NULL; 346 } 347 rte_free(vq->resubmit_inflight); 348 vq->resubmit_inflight = NULL; 349 } 350 } 351 352 /* 353 * Unmap any memory, close any file descriptors and 354 * free any memory owned by a device. 355 */ 356 void 357 cleanup_device(struct virtio_net *dev, int destroy) 358 { 359 uint32_t i; 360 361 vhost_backend_cleanup(dev); 362 363 for (i = 0; i < dev->nr_vring; i++) { 364 cleanup_vq(dev->virtqueue[i], destroy); 365 cleanup_vq_inflight(dev, dev->virtqueue[i]); 366 } 367 } 368 369 static void 370 vhost_free_async_mem(struct vhost_virtqueue *vq) 371 { 372 if (!vq->async) 373 return; 374 375 rte_free(vq->async->pkts_info); 376 rte_free(vq->async->pkts_cmpl_flag); 377 378 rte_free(vq->async->buffers_packed); 379 vq->async->buffers_packed = NULL; 380 rte_free(vq->async->descs_split); 381 vq->async->descs_split = NULL; 382 383 rte_free(vq->async); 384 vq->async = NULL; 385 } 386 387 void 388 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq) 389 { 390 if (vq_is_packed(dev)) 391 rte_free(vq->shadow_used_packed); 392 else 393 rte_free(vq->shadow_used_split); 394 395 vhost_free_async_mem(vq); 396 rte_free(vq->batch_copy_elems); 397 rte_mempool_free(vq->iotlb_pool); 398 rte_free(vq->log_cache); 399 rte_free(vq); 400 } 401 402 /* 403 * Release virtqueues and device memory. 404 */ 405 static void 406 free_device(struct virtio_net *dev) 407 { 408 uint32_t i; 409 410 for (i = 0; i < dev->nr_vring; i++) 411 free_vq(dev, dev->virtqueue[i]); 412 413 rte_free(dev); 414 } 415 416 static __rte_always_inline int 417 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) 418 { 419 if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG)))) 420 return 0; 421 422 vq->log_guest_addr = translate_log_addr(dev, vq, 423 vq->ring_addrs.log_guest_addr); 424 if (vq->log_guest_addr == 0) 425 return -1; 426 427 return 0; 428 } 429 430 /* 431 * Converts vring log address to GPA 432 * If IOMMU is enabled, the log address is IOVA 433 * If IOMMU not enabled, the log address is already GPA 434 * 435 * Caller should have iotlb_lock read-locked 436 */ 437 uint64_t 438 translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, 439 uint64_t log_addr) 440 { 441 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { 442 const uint64_t exp_size = sizeof(uint64_t); 443 uint64_t hva, gpa; 444 uint64_t size = exp_size; 445 446 hva = vhost_iova_to_vva(dev, vq, log_addr, 447 &size, VHOST_ACCESS_RW); 448 449 if (size != exp_size) 450 return 0; 451 452 gpa = hva_to_gpa(dev, hva, exp_size); 453 if (!gpa) { 454 VHOST_LOG_DATA(dev->ifname, ERR, 455 "failed to find GPA for log_addr: 0x%" 456 PRIx64 " hva: 0x%" PRIx64 "\n", 457 log_addr, hva); 458 return 0; 459 } 460 return gpa; 461 462 } else 463 return log_addr; 464 } 465 466 /* Caller should have iotlb_lock read-locked */ 467 static int 468 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq) 469 { 470 uint64_t req_size, size; 471 472 req_size = sizeof(struct vring_desc) * vq->size; 473 size = req_size; 474 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq, 475 vq->ring_addrs.desc_user_addr, 476 &size, VHOST_ACCESS_RW); 477 if (!vq->desc || size != req_size) 478 return -1; 479 480 req_size = sizeof(struct vring_avail); 481 req_size += sizeof(uint16_t) * vq->size; 482 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) 483 req_size += sizeof(uint16_t); 484 size = req_size; 485 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq, 486 vq->ring_addrs.avail_user_addr, 487 &size, VHOST_ACCESS_RW); 488 if (!vq->avail || size != req_size) 489 return -1; 490 491 req_size = sizeof(struct vring_used); 492 req_size += sizeof(struct vring_used_elem) * vq->size; 493 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) 494 req_size += sizeof(uint16_t); 495 size = req_size; 496 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq, 497 vq->ring_addrs.used_user_addr, 498 &size, VHOST_ACCESS_RW); 499 if (!vq->used || size != req_size) 500 return -1; 501 502 return 0; 503 } 504 505 /* Caller should have iotlb_lock read-locked */ 506 static int 507 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) 508 { 509 uint64_t req_size, size; 510 511 req_size = sizeof(struct vring_packed_desc) * vq->size; 512 size = req_size; 513 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t) 514 vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr, 515 &size, VHOST_ACCESS_RW); 516 if (!vq->desc_packed || size != req_size) 517 return -1; 518 519 req_size = sizeof(struct vring_packed_desc_event); 520 size = req_size; 521 vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t) 522 vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr, 523 &size, VHOST_ACCESS_RW); 524 if (!vq->driver_event || size != req_size) 525 return -1; 526 527 req_size = sizeof(struct vring_packed_desc_event); 528 size = req_size; 529 vq->device_event = (struct vring_packed_desc_event *)(uintptr_t) 530 vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr, 531 &size, VHOST_ACCESS_RW); 532 if (!vq->device_event || size != req_size) 533 return -1; 534 535 return 0; 536 } 537 538 int 539 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) 540 { 541 542 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) 543 return -1; 544 545 if (vq_is_packed(dev)) { 546 if (vring_translate_packed(dev, vq) < 0) 547 return -1; 548 } else { 549 if (vring_translate_split(dev, vq) < 0) 550 return -1; 551 } 552 553 if (log_translate(dev, vq) < 0) 554 return -1; 555 556 vq->access_ok = true; 557 558 return 0; 559 } 560 561 void 562 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq) 563 { 564 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) 565 vhost_user_iotlb_wr_lock(vq); 566 567 vq->access_ok = false; 568 vq->desc = NULL; 569 vq->avail = NULL; 570 vq->used = NULL; 571 vq->log_guest_addr = 0; 572 573 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) 574 vhost_user_iotlb_wr_unlock(vq); 575 } 576 577 static void 578 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) 579 { 580 struct vhost_virtqueue *vq; 581 int numa_node = SOCKET_ID_ANY; 582 583 if (vring_idx >= VHOST_MAX_VRING) { 584 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to init vring, out of bound (%d)\n", 585 vring_idx); 586 return; 587 } 588 589 vq = dev->virtqueue[vring_idx]; 590 if (!vq) { 591 VHOST_LOG_CONFIG(dev->ifname, ERR, "virtqueue not allocated (%d)\n", vring_idx); 592 return; 593 } 594 595 memset(vq, 0, sizeof(struct vhost_virtqueue)); 596 597 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; 598 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; 599 vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF; 600 601 #ifdef RTE_LIBRTE_VHOST_NUMA 602 if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { 603 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to query numa node: %s\n", 604 rte_strerror(errno)); 605 numa_node = SOCKET_ID_ANY; 606 } 607 #endif 608 vq->numa_node = numa_node; 609 610 vhost_user_iotlb_init(dev, vring_idx); 611 } 612 613 static void 614 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx) 615 { 616 struct vhost_virtqueue *vq; 617 int callfd; 618 619 if (vring_idx >= VHOST_MAX_VRING) { 620 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to reset vring, out of bound (%d)\n", 621 vring_idx); 622 return; 623 } 624 625 vq = dev->virtqueue[vring_idx]; 626 if (!vq) { 627 VHOST_LOG_CONFIG(dev->ifname, ERR, 628 "failed to reset vring, virtqueue not allocated (%d)\n", 629 vring_idx); 630 return; 631 } 632 633 callfd = vq->callfd; 634 init_vring_queue(dev, vring_idx); 635 vq->callfd = callfd; 636 } 637 638 int 639 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) 640 { 641 struct vhost_virtqueue *vq; 642 uint32_t i; 643 644 /* Also allocate holes, if any, up to requested vring index. */ 645 for (i = 0; i <= vring_idx; i++) { 646 if (dev->virtqueue[i]) 647 continue; 648 649 vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0); 650 if (vq == NULL) { 651 VHOST_LOG_CONFIG(dev->ifname, ERR, 652 "failed to allocate memory for vring %u.\n", 653 i); 654 return -1; 655 } 656 657 dev->virtqueue[i] = vq; 658 init_vring_queue(dev, i); 659 rte_spinlock_init(&vq->access_lock); 660 vq->avail_wrap_counter = 1; 661 vq->used_wrap_counter = 1; 662 vq->signalled_used_valid = false; 663 } 664 665 dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1); 666 667 return 0; 668 } 669 670 /* 671 * Reset some variables in device structure, while keeping few 672 * others untouched, such as vid, ifname, nr_vring: they 673 * should be same unless the device is removed. 674 */ 675 void 676 reset_device(struct virtio_net *dev) 677 { 678 uint32_t i; 679 680 dev->features = 0; 681 dev->protocol_features = 0; 682 dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET; 683 684 for (i = 0; i < dev->nr_vring; i++) 685 reset_vring_queue(dev, i); 686 } 687 688 /* 689 * Invoked when there is a new vhost-user connection established (when 690 * there is a new virtio device being attached). 691 */ 692 int 693 vhost_new_device(void) 694 { 695 struct virtio_net *dev; 696 int i; 697 698 pthread_mutex_lock(&vhost_dev_lock); 699 for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) { 700 if (vhost_devices[i] == NULL) 701 break; 702 } 703 704 if (i == RTE_MAX_VHOST_DEVICE) { 705 VHOST_LOG_CONFIG("device", ERR, "failed to find a free slot for new device.\n"); 706 pthread_mutex_unlock(&vhost_dev_lock); 707 return -1; 708 } 709 710 dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0); 711 if (dev == NULL) { 712 VHOST_LOG_CONFIG("device", ERR, "failed to allocate memory for new device.\n"); 713 pthread_mutex_unlock(&vhost_dev_lock); 714 return -1; 715 } 716 717 vhost_devices[i] = dev; 718 pthread_mutex_unlock(&vhost_dev_lock); 719 720 dev->vid = i; 721 dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; 722 dev->slave_req_fd = -1; 723 dev->postcopy_ufd = -1; 724 rte_spinlock_init(&dev->slave_req_lock); 725 726 return i; 727 } 728 729 void 730 vhost_destroy_device_notify(struct virtio_net *dev) 731 { 732 struct rte_vdpa_device *vdpa_dev; 733 734 if (dev->flags & VIRTIO_DEV_RUNNING) { 735 vdpa_dev = dev->vdpa_dev; 736 if (vdpa_dev) 737 vdpa_dev->ops->dev_close(dev->vid); 738 dev->flags &= ~VIRTIO_DEV_RUNNING; 739 dev->notify_ops->destroy_device(dev->vid); 740 } 741 } 742 743 /* 744 * Invoked when there is the vhost-user connection is broken (when 745 * the virtio device is being detached). 746 */ 747 void 748 vhost_destroy_device(int vid) 749 { 750 struct virtio_net *dev = get_device(vid); 751 752 if (dev == NULL) 753 return; 754 755 vhost_destroy_device_notify(dev); 756 757 cleanup_device(dev, 1); 758 free_device(dev); 759 760 vhost_devices[vid] = NULL; 761 } 762 763 void 764 vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *vdpa_dev) 765 { 766 struct virtio_net *dev = get_device(vid); 767 768 if (dev == NULL) 769 return; 770 771 dev->vdpa_dev = vdpa_dev; 772 } 773 774 void 775 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len) 776 { 777 struct virtio_net *dev; 778 unsigned int len; 779 780 dev = get_device(vid); 781 if (dev == NULL) 782 return; 783 784 len = if_len > sizeof(dev->ifname) ? 785 sizeof(dev->ifname) : if_len; 786 787 strncpy(dev->ifname, if_name, len); 788 dev->ifname[sizeof(dev->ifname) - 1] = '\0'; 789 } 790 791 void 792 vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats_enabled) 793 { 794 struct virtio_net *dev = get_device(vid); 795 796 if (dev == NULL) 797 return; 798 799 if (enable) 800 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET; 801 else 802 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET; 803 if (!compliant_ol_flags) 804 dev->flags |= VIRTIO_DEV_LEGACY_OL_FLAGS; 805 else 806 dev->flags &= ~VIRTIO_DEV_LEGACY_OL_FLAGS; 807 if (stats_enabled) 808 dev->flags |= VIRTIO_DEV_STATS_ENABLED; 809 else 810 dev->flags &= ~VIRTIO_DEV_STATS_ENABLED; 811 } 812 813 void 814 vhost_enable_extbuf(int vid) 815 { 816 struct virtio_net *dev = get_device(vid); 817 818 if (dev == NULL) 819 return; 820 821 dev->extbuf = 1; 822 } 823 824 void 825 vhost_enable_linearbuf(int vid) 826 { 827 struct virtio_net *dev = get_device(vid); 828 829 if (dev == NULL) 830 return; 831 832 dev->linearbuf = 1; 833 } 834 835 int 836 rte_vhost_get_mtu(int vid, uint16_t *mtu) 837 { 838 struct virtio_net *dev = get_device(vid); 839 840 if (dev == NULL || mtu == NULL) 841 return -ENODEV; 842 843 if (!(dev->flags & VIRTIO_DEV_READY)) 844 return -EAGAIN; 845 846 if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU))) 847 return -ENOTSUP; 848 849 *mtu = dev->mtu; 850 851 return 0; 852 } 853 854 int 855 rte_vhost_get_numa_node(int vid) 856 { 857 #ifdef RTE_LIBRTE_VHOST_NUMA 858 struct virtio_net *dev = get_device(vid); 859 int numa_node; 860 int ret; 861 862 if (dev == NULL || numa_available() != 0) 863 return -1; 864 865 ret = get_mempolicy(&numa_node, NULL, 0, dev, 866 MPOL_F_NODE | MPOL_F_ADDR); 867 if (ret < 0) { 868 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to query numa node: %s\n", 869 rte_strerror(errno)); 870 return -1; 871 } 872 873 return numa_node; 874 #else 875 RTE_SET_USED(vid); 876 return -1; 877 #endif 878 } 879 880 uint32_t 881 rte_vhost_get_queue_num(int vid) 882 { 883 struct virtio_net *dev = get_device(vid); 884 885 if (dev == NULL) 886 return 0; 887 888 return dev->nr_vring / 2; 889 } 890 891 uint16_t 892 rte_vhost_get_vring_num(int vid) 893 { 894 struct virtio_net *dev = get_device(vid); 895 896 if (dev == NULL) 897 return 0; 898 899 return dev->nr_vring; 900 } 901 902 int 903 rte_vhost_get_ifname(int vid, char *buf, size_t len) 904 { 905 struct virtio_net *dev = get_device(vid); 906 907 if (dev == NULL || buf == NULL) 908 return -1; 909 910 len = RTE_MIN(len, sizeof(dev->ifname)); 911 912 strncpy(buf, dev->ifname, len); 913 buf[len - 1] = '\0'; 914 915 return 0; 916 } 917 918 int 919 rte_vhost_get_negotiated_features(int vid, uint64_t *features) 920 { 921 struct virtio_net *dev; 922 923 dev = get_device(vid); 924 if (dev == NULL || features == NULL) 925 return -1; 926 927 *features = dev->features; 928 return 0; 929 } 930 931 int 932 rte_vhost_get_negotiated_protocol_features(int vid, 933 uint64_t *protocol_features) 934 { 935 struct virtio_net *dev; 936 937 dev = get_device(vid); 938 if (dev == NULL || protocol_features == NULL) 939 return -1; 940 941 *protocol_features = dev->protocol_features; 942 return 0; 943 } 944 945 int 946 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem) 947 { 948 struct virtio_net *dev; 949 struct rte_vhost_memory *m; 950 size_t size; 951 952 dev = get_device(vid); 953 if (dev == NULL || mem == NULL) 954 return -1; 955 956 size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region); 957 m = malloc(sizeof(struct rte_vhost_memory) + size); 958 if (!m) 959 return -1; 960 961 m->nregions = dev->mem->nregions; 962 memcpy(m->regions, dev->mem->regions, size); 963 *mem = m; 964 965 return 0; 966 } 967 968 int 969 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx, 970 struct rte_vhost_vring *vring) 971 { 972 struct virtio_net *dev; 973 struct vhost_virtqueue *vq; 974 975 dev = get_device(vid); 976 if (dev == NULL || vring == NULL) 977 return -1; 978 979 if (vring_idx >= VHOST_MAX_VRING) 980 return -1; 981 982 vq = dev->virtqueue[vring_idx]; 983 if (!vq) 984 return -1; 985 986 if (vq_is_packed(dev)) { 987 vring->desc_packed = vq->desc_packed; 988 vring->driver_event = vq->driver_event; 989 vring->device_event = vq->device_event; 990 } else { 991 vring->desc = vq->desc; 992 vring->avail = vq->avail; 993 vring->used = vq->used; 994 } 995 vring->log_guest_addr = vq->log_guest_addr; 996 997 vring->callfd = vq->callfd; 998 vring->kickfd = vq->kickfd; 999 vring->size = vq->size; 1000 1001 return 0; 1002 } 1003 1004 int 1005 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx, 1006 struct rte_vhost_ring_inflight *vring) 1007 { 1008 struct virtio_net *dev; 1009 struct vhost_virtqueue *vq; 1010 1011 dev = get_device(vid); 1012 if (unlikely(!dev)) 1013 return -1; 1014 1015 if (vring_idx >= VHOST_MAX_VRING) 1016 return -1; 1017 1018 vq = dev->virtqueue[vring_idx]; 1019 if (unlikely(!vq)) 1020 return -1; 1021 1022 if (vq_is_packed(dev)) { 1023 if (unlikely(!vq->inflight_packed)) 1024 return -1; 1025 1026 vring->inflight_packed = vq->inflight_packed; 1027 } else { 1028 if (unlikely(!vq->inflight_split)) 1029 return -1; 1030 1031 vring->inflight_split = vq->inflight_split; 1032 } 1033 1034 vring->resubmit_inflight = vq->resubmit_inflight; 1035 1036 return 0; 1037 } 1038 1039 int 1040 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx, 1041 uint16_t idx) 1042 { 1043 struct vhost_virtqueue *vq; 1044 struct virtio_net *dev; 1045 1046 dev = get_device(vid); 1047 if (unlikely(!dev)) 1048 return -1; 1049 1050 if (unlikely(!(dev->protocol_features & 1051 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1052 return 0; 1053 1054 if (unlikely(vq_is_packed(dev))) 1055 return -1; 1056 1057 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1058 return -1; 1059 1060 vq = dev->virtqueue[vring_idx]; 1061 if (unlikely(!vq)) 1062 return -1; 1063 1064 if (unlikely(!vq->inflight_split)) 1065 return -1; 1066 1067 if (unlikely(idx >= vq->size)) 1068 return -1; 1069 1070 vq->inflight_split->desc[idx].counter = vq->global_counter++; 1071 vq->inflight_split->desc[idx].inflight = 1; 1072 return 0; 1073 } 1074 1075 int 1076 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx, 1077 uint16_t head, uint16_t last, 1078 uint16_t *inflight_entry) 1079 { 1080 struct rte_vhost_inflight_info_packed *inflight_info; 1081 struct virtio_net *dev; 1082 struct vhost_virtqueue *vq; 1083 struct vring_packed_desc *desc; 1084 uint16_t old_free_head, free_head; 1085 1086 dev = get_device(vid); 1087 if (unlikely(!dev)) 1088 return -1; 1089 1090 if (unlikely(!(dev->protocol_features & 1091 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1092 return 0; 1093 1094 if (unlikely(!vq_is_packed(dev))) 1095 return -1; 1096 1097 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1098 return -1; 1099 1100 vq = dev->virtqueue[vring_idx]; 1101 if (unlikely(!vq)) 1102 return -1; 1103 1104 inflight_info = vq->inflight_packed; 1105 if (unlikely(!inflight_info)) 1106 return -1; 1107 1108 if (unlikely(head >= vq->size)) 1109 return -1; 1110 1111 desc = vq->desc_packed; 1112 old_free_head = inflight_info->old_free_head; 1113 if (unlikely(old_free_head >= vq->size)) 1114 return -1; 1115 1116 free_head = old_free_head; 1117 1118 /* init header descriptor */ 1119 inflight_info->desc[old_free_head].num = 0; 1120 inflight_info->desc[old_free_head].counter = vq->global_counter++; 1121 inflight_info->desc[old_free_head].inflight = 1; 1122 1123 /* save desc entry in flight entry */ 1124 while (head != ((last + 1) % vq->size)) { 1125 inflight_info->desc[old_free_head].num++; 1126 inflight_info->desc[free_head].addr = desc[head].addr; 1127 inflight_info->desc[free_head].len = desc[head].len; 1128 inflight_info->desc[free_head].flags = desc[head].flags; 1129 inflight_info->desc[free_head].id = desc[head].id; 1130 1131 inflight_info->desc[old_free_head].last = free_head; 1132 free_head = inflight_info->desc[free_head].next; 1133 inflight_info->free_head = free_head; 1134 head = (head + 1) % vq->size; 1135 } 1136 1137 inflight_info->old_free_head = free_head; 1138 *inflight_entry = old_free_head; 1139 1140 return 0; 1141 } 1142 1143 int 1144 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx, 1145 uint16_t last_used_idx, uint16_t idx) 1146 { 1147 struct virtio_net *dev; 1148 struct vhost_virtqueue *vq; 1149 1150 dev = get_device(vid); 1151 if (unlikely(!dev)) 1152 return -1; 1153 1154 if (unlikely(!(dev->protocol_features & 1155 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1156 return 0; 1157 1158 if (unlikely(vq_is_packed(dev))) 1159 return -1; 1160 1161 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1162 return -1; 1163 1164 vq = dev->virtqueue[vring_idx]; 1165 if (unlikely(!vq)) 1166 return -1; 1167 1168 if (unlikely(!vq->inflight_split)) 1169 return -1; 1170 1171 if (unlikely(idx >= vq->size)) 1172 return -1; 1173 1174 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1175 1176 vq->inflight_split->desc[idx].inflight = 0; 1177 1178 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1179 1180 vq->inflight_split->used_idx = last_used_idx; 1181 return 0; 1182 } 1183 1184 int 1185 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx, 1186 uint16_t head) 1187 { 1188 struct rte_vhost_inflight_info_packed *inflight_info; 1189 struct virtio_net *dev; 1190 struct vhost_virtqueue *vq; 1191 1192 dev = get_device(vid); 1193 if (unlikely(!dev)) 1194 return -1; 1195 1196 if (unlikely(!(dev->protocol_features & 1197 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1198 return 0; 1199 1200 if (unlikely(!vq_is_packed(dev))) 1201 return -1; 1202 1203 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1204 return -1; 1205 1206 vq = dev->virtqueue[vring_idx]; 1207 if (unlikely(!vq)) 1208 return -1; 1209 1210 inflight_info = vq->inflight_packed; 1211 if (unlikely(!inflight_info)) 1212 return -1; 1213 1214 if (unlikely(head >= vq->size)) 1215 return -1; 1216 1217 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1218 1219 inflight_info->desc[head].inflight = 0; 1220 1221 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1222 1223 inflight_info->old_free_head = inflight_info->free_head; 1224 inflight_info->old_used_idx = inflight_info->used_idx; 1225 inflight_info->old_used_wrap_counter = inflight_info->used_wrap_counter; 1226 1227 return 0; 1228 } 1229 1230 int 1231 rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx, 1232 uint16_t idx) 1233 { 1234 struct virtio_net *dev; 1235 struct vhost_virtqueue *vq; 1236 1237 dev = get_device(vid); 1238 if (unlikely(!dev)) 1239 return -1; 1240 1241 if (unlikely(!(dev->protocol_features & 1242 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1243 return 0; 1244 1245 if (unlikely(vq_is_packed(dev))) 1246 return -1; 1247 1248 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1249 return -1; 1250 1251 vq = dev->virtqueue[vring_idx]; 1252 if (unlikely(!vq)) 1253 return -1; 1254 1255 if (unlikely(!vq->inflight_split)) 1256 return -1; 1257 1258 if (unlikely(idx >= vq->size)) 1259 return -1; 1260 1261 vq->inflight_split->last_inflight_io = idx; 1262 return 0; 1263 } 1264 1265 int 1266 rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx, 1267 uint16_t head) 1268 { 1269 struct rte_vhost_inflight_info_packed *inflight_info; 1270 struct virtio_net *dev; 1271 struct vhost_virtqueue *vq; 1272 uint16_t last; 1273 1274 dev = get_device(vid); 1275 if (unlikely(!dev)) 1276 return -1; 1277 1278 if (unlikely(!(dev->protocol_features & 1279 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))) 1280 return 0; 1281 1282 if (unlikely(!vq_is_packed(dev))) 1283 return -1; 1284 1285 if (unlikely(vring_idx >= VHOST_MAX_VRING)) 1286 return -1; 1287 1288 vq = dev->virtqueue[vring_idx]; 1289 if (unlikely(!vq)) 1290 return -1; 1291 1292 inflight_info = vq->inflight_packed; 1293 if (unlikely(!inflight_info)) 1294 return -1; 1295 1296 if (unlikely(head >= vq->size)) 1297 return -1; 1298 1299 last = inflight_info->desc[head].last; 1300 if (unlikely(last >= vq->size)) 1301 return -1; 1302 1303 inflight_info->desc[last].next = inflight_info->free_head; 1304 inflight_info->free_head = head; 1305 inflight_info->used_idx += inflight_info->desc[head].num; 1306 if (inflight_info->used_idx >= inflight_info->desc_num) { 1307 inflight_info->used_idx -= inflight_info->desc_num; 1308 inflight_info->used_wrap_counter = 1309 !inflight_info->used_wrap_counter; 1310 } 1311 1312 return 0; 1313 } 1314 1315 int 1316 rte_vhost_vring_call(int vid, uint16_t vring_idx) 1317 { 1318 struct virtio_net *dev; 1319 struct vhost_virtqueue *vq; 1320 1321 dev = get_device(vid); 1322 if (!dev) 1323 return -1; 1324 1325 if (vring_idx >= VHOST_MAX_VRING) 1326 return -1; 1327 1328 vq = dev->virtqueue[vring_idx]; 1329 if (!vq) 1330 return -1; 1331 1332 rte_spinlock_lock(&vq->access_lock); 1333 1334 if (vq_is_packed(dev)) 1335 vhost_vring_call_packed(dev, vq); 1336 else 1337 vhost_vring_call_split(dev, vq); 1338 1339 rte_spinlock_unlock(&vq->access_lock); 1340 1341 return 0; 1342 } 1343 1344 uint16_t 1345 rte_vhost_avail_entries(int vid, uint16_t queue_id) 1346 { 1347 struct virtio_net *dev; 1348 struct vhost_virtqueue *vq; 1349 uint16_t ret = 0; 1350 1351 dev = get_device(vid); 1352 if (!dev) 1353 return 0; 1354 1355 if (queue_id >= VHOST_MAX_VRING) 1356 return 0; 1357 1358 vq = dev->virtqueue[queue_id]; 1359 if (!vq) 1360 return 0; 1361 1362 rte_spinlock_lock(&vq->access_lock); 1363 1364 if (unlikely(!vq->enabled || vq->avail == NULL)) 1365 goto out; 1366 1367 ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx; 1368 1369 out: 1370 rte_spinlock_unlock(&vq->access_lock); 1371 return ret; 1372 } 1373 1374 static inline int 1375 vhost_enable_notify_split(struct virtio_net *dev, 1376 struct vhost_virtqueue *vq, int enable) 1377 { 1378 if (vq->used == NULL) 1379 return -1; 1380 1381 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) { 1382 if (enable) 1383 vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; 1384 else 1385 vq->used->flags |= VRING_USED_F_NO_NOTIFY; 1386 } else { 1387 if (enable) 1388 vhost_avail_event(vq) = vq->last_avail_idx; 1389 } 1390 return 0; 1391 } 1392 1393 static inline int 1394 vhost_enable_notify_packed(struct virtio_net *dev, 1395 struct vhost_virtqueue *vq, int enable) 1396 { 1397 uint16_t flags; 1398 1399 if (vq->device_event == NULL) 1400 return -1; 1401 1402 if (!enable) { 1403 vq->device_event->flags = VRING_EVENT_F_DISABLE; 1404 return 0; 1405 } 1406 1407 flags = VRING_EVENT_F_ENABLE; 1408 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { 1409 flags = VRING_EVENT_F_DESC; 1410 vq->device_event->off_wrap = vq->last_avail_idx | 1411 vq->avail_wrap_counter << 15; 1412 } 1413 1414 rte_atomic_thread_fence(__ATOMIC_RELEASE); 1415 1416 vq->device_event->flags = flags; 1417 return 0; 1418 } 1419 1420 int 1421 vhost_enable_guest_notification(struct virtio_net *dev, 1422 struct vhost_virtqueue *vq, int enable) 1423 { 1424 /* 1425 * If the virtqueue is not ready yet, it will be applied 1426 * when it will become ready. 1427 */ 1428 if (!vq->ready) 1429 return 0; 1430 1431 if (vq_is_packed(dev)) 1432 return vhost_enable_notify_packed(dev, vq, enable); 1433 else 1434 return vhost_enable_notify_split(dev, vq, enable); 1435 } 1436 1437 int 1438 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) 1439 { 1440 struct virtio_net *dev = get_device(vid); 1441 struct vhost_virtqueue *vq; 1442 int ret; 1443 1444 if (!dev) 1445 return -1; 1446 1447 if (queue_id >= VHOST_MAX_VRING) 1448 return -1; 1449 1450 vq = dev->virtqueue[queue_id]; 1451 if (!vq) 1452 return -1; 1453 1454 rte_spinlock_lock(&vq->access_lock); 1455 1456 vq->notif_enable = enable; 1457 ret = vhost_enable_guest_notification(dev, vq, enable); 1458 1459 rte_spinlock_unlock(&vq->access_lock); 1460 1461 return ret; 1462 } 1463 1464 void 1465 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len) 1466 { 1467 struct virtio_net *dev = get_device(vid); 1468 1469 if (dev == NULL) 1470 return; 1471 1472 vhost_log_write(dev, addr, len); 1473 } 1474 1475 void 1476 rte_vhost_log_used_vring(int vid, uint16_t vring_idx, 1477 uint64_t offset, uint64_t len) 1478 { 1479 struct virtio_net *dev; 1480 struct vhost_virtqueue *vq; 1481 1482 dev = get_device(vid); 1483 if (dev == NULL) 1484 return; 1485 1486 if (vring_idx >= VHOST_MAX_VRING) 1487 return; 1488 vq = dev->virtqueue[vring_idx]; 1489 if (!vq) 1490 return; 1491 1492 vhost_log_used_vring(dev, vq, offset, len); 1493 } 1494 1495 uint32_t 1496 rte_vhost_rx_queue_count(int vid, uint16_t qid) 1497 { 1498 struct virtio_net *dev; 1499 struct vhost_virtqueue *vq; 1500 uint32_t ret = 0; 1501 1502 dev = get_device(vid); 1503 if (dev == NULL) 1504 return 0; 1505 1506 if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) { 1507 VHOST_LOG_DATA(dev->ifname, ERR, 1508 "%s: invalid virtqueue idx %d.\n", 1509 __func__, qid); 1510 return 0; 1511 } 1512 1513 vq = dev->virtqueue[qid]; 1514 if (vq == NULL) 1515 return 0; 1516 1517 rte_spinlock_lock(&vq->access_lock); 1518 1519 if (unlikely(!vq->enabled || vq->avail == NULL)) 1520 goto out; 1521 1522 ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; 1523 1524 out: 1525 rte_spinlock_unlock(&vq->access_lock); 1526 return ret; 1527 } 1528 1529 struct rte_vdpa_device * 1530 rte_vhost_get_vdpa_device(int vid) 1531 { 1532 struct virtio_net *dev = get_device(vid); 1533 1534 if (dev == NULL) 1535 return NULL; 1536 1537 return dev->vdpa_dev; 1538 } 1539 1540 int 1541 rte_vhost_get_log_base(int vid, uint64_t *log_base, 1542 uint64_t *log_size) 1543 { 1544 struct virtio_net *dev = get_device(vid); 1545 1546 if (dev == NULL || log_base == NULL || log_size == NULL) 1547 return -1; 1548 1549 *log_base = dev->log_base; 1550 *log_size = dev->log_size; 1551 1552 return 0; 1553 } 1554 1555 int 1556 rte_vhost_get_vring_base(int vid, uint16_t queue_id, 1557 uint16_t *last_avail_idx, uint16_t *last_used_idx) 1558 { 1559 struct vhost_virtqueue *vq; 1560 struct virtio_net *dev = get_device(vid); 1561 1562 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL) 1563 return -1; 1564 1565 if (queue_id >= VHOST_MAX_VRING) 1566 return -1; 1567 1568 vq = dev->virtqueue[queue_id]; 1569 if (!vq) 1570 return -1; 1571 1572 if (vq_is_packed(dev)) { 1573 *last_avail_idx = (vq->avail_wrap_counter << 15) | 1574 vq->last_avail_idx; 1575 *last_used_idx = (vq->used_wrap_counter << 15) | 1576 vq->last_used_idx; 1577 } else { 1578 *last_avail_idx = vq->last_avail_idx; 1579 *last_used_idx = vq->last_used_idx; 1580 } 1581 1582 return 0; 1583 } 1584 1585 int 1586 rte_vhost_set_vring_base(int vid, uint16_t queue_id, 1587 uint16_t last_avail_idx, uint16_t last_used_idx) 1588 { 1589 struct vhost_virtqueue *vq; 1590 struct virtio_net *dev = get_device(vid); 1591 1592 if (!dev) 1593 return -1; 1594 1595 if (queue_id >= VHOST_MAX_VRING) 1596 return -1; 1597 1598 vq = dev->virtqueue[queue_id]; 1599 if (!vq) 1600 return -1; 1601 1602 if (vq_is_packed(dev)) { 1603 vq->last_avail_idx = last_avail_idx & 0x7fff; 1604 vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15)); 1605 vq->last_used_idx = last_used_idx & 0x7fff; 1606 vq->used_wrap_counter = !!(last_used_idx & (1 << 15)); 1607 } else { 1608 vq->last_avail_idx = last_avail_idx; 1609 vq->last_used_idx = last_used_idx; 1610 } 1611 1612 return 0; 1613 } 1614 1615 int 1616 rte_vhost_get_vring_base_from_inflight(int vid, 1617 uint16_t queue_id, 1618 uint16_t *last_avail_idx, 1619 uint16_t *last_used_idx) 1620 { 1621 struct rte_vhost_inflight_info_packed *inflight_info; 1622 struct vhost_virtqueue *vq; 1623 struct virtio_net *dev = get_device(vid); 1624 1625 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL) 1626 return -1; 1627 1628 if (queue_id >= VHOST_MAX_VRING) 1629 return -1; 1630 1631 vq = dev->virtqueue[queue_id]; 1632 if (!vq) 1633 return -1; 1634 1635 if (!vq_is_packed(dev)) 1636 return -1; 1637 1638 inflight_info = vq->inflight_packed; 1639 if (!inflight_info) 1640 return -1; 1641 1642 *last_avail_idx = (inflight_info->old_used_wrap_counter << 15) | 1643 inflight_info->old_used_idx; 1644 *last_used_idx = *last_avail_idx; 1645 1646 return 0; 1647 } 1648 1649 int 1650 rte_vhost_extern_callback_register(int vid, 1651 struct rte_vhost_user_extern_ops const * const ops, void *ctx) 1652 { 1653 struct virtio_net *dev = get_device(vid); 1654 1655 if (dev == NULL || ops == NULL) 1656 return -1; 1657 1658 dev->extern_ops = *ops; 1659 dev->extern_data = ctx; 1660 return 0; 1661 } 1662 1663 static __rte_always_inline int 1664 async_channel_register(int vid, uint16_t queue_id) 1665 { 1666 struct virtio_net *dev = get_device(vid); 1667 struct vhost_virtqueue *vq = dev->virtqueue[queue_id]; 1668 struct vhost_async *async; 1669 int node = vq->numa_node; 1670 1671 if (unlikely(vq->async)) { 1672 VHOST_LOG_CONFIG(dev->ifname, ERR, 1673 "async register failed: already registered (qid: %d)\n", 1674 queue_id); 1675 return -1; 1676 } 1677 1678 async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node); 1679 if (!async) { 1680 VHOST_LOG_CONFIG(dev->ifname, ERR, 1681 "failed to allocate async metadata (qid: %d)\n", 1682 queue_id); 1683 return -1; 1684 } 1685 1686 async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info), 1687 RTE_CACHE_LINE_SIZE, node); 1688 if (!async->pkts_info) { 1689 VHOST_LOG_CONFIG(dev->ifname, ERR, 1690 "failed to allocate async_pkts_info (qid: %d)\n", 1691 queue_id); 1692 goto out_free_async; 1693 } 1694 1695 async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool), 1696 RTE_CACHE_LINE_SIZE, node); 1697 if (!async->pkts_cmpl_flag) { 1698 VHOST_LOG_CONFIG(dev->ifname, ERR, 1699 "failed to allocate async pkts_cmpl_flag (qid: %d)\n", 1700 queue_id); 1701 goto out_free_async; 1702 } 1703 1704 if (vq_is_packed(dev)) { 1705 async->buffers_packed = rte_malloc_socket(NULL, 1706 vq->size * sizeof(struct vring_used_elem_packed), 1707 RTE_CACHE_LINE_SIZE, node); 1708 if (!async->buffers_packed) { 1709 VHOST_LOG_CONFIG(dev->ifname, ERR, 1710 "failed to allocate async buffers (qid: %d)\n", 1711 queue_id); 1712 goto out_free_inflight; 1713 } 1714 } else { 1715 async->descs_split = rte_malloc_socket(NULL, 1716 vq->size * sizeof(struct vring_used_elem), 1717 RTE_CACHE_LINE_SIZE, node); 1718 if (!async->descs_split) { 1719 VHOST_LOG_CONFIG(dev->ifname, ERR, 1720 "failed to allocate async descs (qid: %d)\n", 1721 queue_id); 1722 goto out_free_inflight; 1723 } 1724 } 1725 1726 vq->async = async; 1727 1728 return 0; 1729 out_free_inflight: 1730 rte_free(async->pkts_info); 1731 out_free_async: 1732 rte_free(async); 1733 1734 return -1; 1735 } 1736 1737 int 1738 rte_vhost_async_channel_register(int vid, uint16_t queue_id) 1739 { 1740 struct vhost_virtqueue *vq; 1741 struct virtio_net *dev = get_device(vid); 1742 int ret; 1743 1744 if (dev == NULL) 1745 return -1; 1746 1747 if (queue_id >= VHOST_MAX_VRING) 1748 return -1; 1749 1750 vq = dev->virtqueue[queue_id]; 1751 1752 if (unlikely(vq == NULL || !dev->async_copy)) 1753 return -1; 1754 1755 rte_spinlock_lock(&vq->access_lock); 1756 ret = async_channel_register(vid, queue_id); 1757 rte_spinlock_unlock(&vq->access_lock); 1758 1759 return ret; 1760 } 1761 1762 int 1763 rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id) 1764 { 1765 struct vhost_virtqueue *vq; 1766 struct virtio_net *dev = get_device(vid); 1767 1768 if (dev == NULL) 1769 return -1; 1770 1771 if (queue_id >= VHOST_MAX_VRING) 1772 return -1; 1773 1774 vq = dev->virtqueue[queue_id]; 1775 1776 if (unlikely(vq == NULL || !dev->async_copy)) 1777 return -1; 1778 1779 if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { 1780 VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", 1781 __func__); 1782 return -1; 1783 } 1784 1785 return async_channel_register(vid, queue_id); 1786 } 1787 1788 int 1789 rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) 1790 { 1791 struct vhost_virtqueue *vq; 1792 struct virtio_net *dev = get_device(vid); 1793 int ret = -1; 1794 1795 if (dev == NULL) 1796 return ret; 1797 1798 if (queue_id >= VHOST_MAX_VRING) 1799 return ret; 1800 1801 vq = dev->virtqueue[queue_id]; 1802 1803 if (vq == NULL) 1804 return ret; 1805 1806 if (!rte_spinlock_trylock(&vq->access_lock)) { 1807 VHOST_LOG_CONFIG(dev->ifname, ERR, 1808 "failed to unregister async channel, virtqueue busy.\n"); 1809 return ret; 1810 } 1811 1812 if (!vq->async) { 1813 ret = 0; 1814 } else if (vq->async->pkts_inflight_n) { 1815 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to unregister async channel.\n"); 1816 VHOST_LOG_CONFIG(dev->ifname, ERR, 1817 "inflight packets must be completed before unregistration.\n"); 1818 } else { 1819 vhost_free_async_mem(vq); 1820 ret = 0; 1821 } 1822 1823 rte_spinlock_unlock(&vq->access_lock); 1824 1825 return ret; 1826 } 1827 1828 int 1829 rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id) 1830 { 1831 struct vhost_virtqueue *vq; 1832 struct virtio_net *dev = get_device(vid); 1833 1834 if (dev == NULL) 1835 return -1; 1836 1837 if (queue_id >= VHOST_MAX_VRING) 1838 return -1; 1839 1840 vq = dev->virtqueue[queue_id]; 1841 1842 if (vq == NULL) 1843 return -1; 1844 1845 if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { 1846 VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", 1847 __func__); 1848 return -1; 1849 } 1850 1851 if (!vq->async) 1852 return 0; 1853 1854 if (vq->async->pkts_inflight_n) { 1855 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to unregister async channel.\n"); 1856 VHOST_LOG_CONFIG(dev->ifname, ERR, 1857 "inflight packets must be completed before unregistration.\n"); 1858 return -1; 1859 } 1860 1861 vhost_free_async_mem(vq); 1862 1863 return 0; 1864 } 1865 1866 int 1867 rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id) 1868 { 1869 struct rte_dma_info info; 1870 void *pkts_cmpl_flag_addr; 1871 uint16_t max_desc; 1872 1873 if (!rte_dma_is_valid(dma_id)) { 1874 VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id); 1875 return -1; 1876 } 1877 1878 if (rte_dma_info_get(dma_id, &info) != 0) { 1879 VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id); 1880 return -1; 1881 } 1882 1883 if (vchan_id >= info.max_vchans) { 1884 VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id); 1885 return -1; 1886 } 1887 1888 if (!dma_copy_track[dma_id].vchans) { 1889 struct async_dma_vchan_info *vchans; 1890 1891 vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans, 1892 RTE_CACHE_LINE_SIZE); 1893 if (vchans == NULL) { 1894 VHOST_LOG_CONFIG("dma", ERR, 1895 "Failed to allocate vchans for DMA %d vChannel %u.\n", 1896 dma_id, vchan_id); 1897 return -1; 1898 } 1899 1900 dma_copy_track[dma_id].vchans = vchans; 1901 } 1902 1903 if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) { 1904 VHOST_LOG_CONFIG("dma", INFO, "DMA %d vChannel %u already registered.\n", 1905 dma_id, vchan_id); 1906 return 0; 1907 } 1908 1909 max_desc = info.max_desc; 1910 if (!rte_is_power_of_2(max_desc)) 1911 max_desc = rte_align32pow2(max_desc); 1912 1913 pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE); 1914 if (!pkts_cmpl_flag_addr) { 1915 VHOST_LOG_CONFIG("dma", ERR, 1916 "Failed to allocate pkts_cmpl_flag_addr for DMA %d vChannel %u.\n", 1917 dma_id, vchan_id); 1918 1919 if (dma_copy_track[dma_id].nr_vchans == 0) { 1920 rte_free(dma_copy_track[dma_id].vchans); 1921 dma_copy_track[dma_id].vchans = NULL; 1922 } 1923 return -1; 1924 } 1925 1926 dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr; 1927 dma_copy_track[dma_id].vchans[vchan_id].ring_size = max_desc; 1928 dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1; 1929 dma_copy_track[dma_id].nr_vchans++; 1930 1931 return 0; 1932 } 1933 1934 int 1935 rte_vhost_async_get_inflight(int vid, uint16_t queue_id) 1936 { 1937 struct vhost_virtqueue *vq; 1938 struct virtio_net *dev = get_device(vid); 1939 int ret = -1; 1940 1941 if (dev == NULL) 1942 return ret; 1943 1944 if (queue_id >= VHOST_MAX_VRING) 1945 return ret; 1946 1947 vq = dev->virtqueue[queue_id]; 1948 1949 if (vq == NULL) 1950 return ret; 1951 1952 if (!rte_spinlock_trylock(&vq->access_lock)) { 1953 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 1954 "failed to check in-flight packets. virtqueue busy.\n"); 1955 return ret; 1956 } 1957 1958 if (vq->async) 1959 ret = vq->async->pkts_inflight_n; 1960 1961 rte_spinlock_unlock(&vq->access_lock); 1962 1963 return ret; 1964 } 1965 1966 int 1967 rte_vhost_async_get_inflight_thread_unsafe(int vid, uint16_t queue_id) 1968 { 1969 struct vhost_virtqueue *vq; 1970 struct virtio_net *dev = get_device(vid); 1971 int ret = -1; 1972 1973 if (dev == NULL) 1974 return ret; 1975 1976 if (queue_id >= VHOST_MAX_VRING) 1977 return ret; 1978 1979 vq = dev->virtqueue[queue_id]; 1980 1981 if (vq == NULL) 1982 return ret; 1983 1984 if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { 1985 VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", 1986 __func__); 1987 return -1; 1988 } 1989 1990 if (!vq->async) 1991 return ret; 1992 1993 ret = vq->async->pkts_inflight_n; 1994 1995 return ret; 1996 } 1997 1998 int 1999 rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, 2000 struct rte_vhost_power_monitor_cond *pmc) 2001 { 2002 struct virtio_net *dev = get_device(vid); 2003 struct vhost_virtqueue *vq; 2004 2005 if (dev == NULL) 2006 return -1; 2007 if (queue_id >= VHOST_MAX_VRING) 2008 return -1; 2009 2010 vq = dev->virtqueue[queue_id]; 2011 if (vq == NULL) 2012 return -1; 2013 2014 if (vq_is_packed(dev)) { 2015 struct vring_packed_desc *desc; 2016 desc = vq->desc_packed; 2017 pmc->addr = &desc[vq->last_avail_idx].flags; 2018 if (vq->avail_wrap_counter) 2019 pmc->val = VRING_DESC_F_AVAIL; 2020 else 2021 pmc->val = VRING_DESC_F_USED; 2022 pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED; 2023 pmc->size = sizeof(desc[vq->last_avail_idx].flags); 2024 pmc->match = 1; 2025 } else { 2026 pmc->addr = &vq->avail->idx; 2027 pmc->val = vq->last_avail_idx & (vq->size - 1); 2028 pmc->mask = vq->size - 1; 2029 pmc->size = sizeof(vq->avail->idx); 2030 pmc->match = 0; 2031 } 2032 2033 return 0; 2034 } 2035 2036 2037 int 2038 rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id, 2039 struct rte_vhost_stat_name *name, unsigned int size) 2040 { 2041 struct virtio_net *dev = get_device(vid); 2042 unsigned int i; 2043 2044 if (dev == NULL) 2045 return -1; 2046 2047 if (queue_id >= dev->nr_vring) 2048 return -1; 2049 2050 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED)) 2051 return -1; 2052 2053 if (name == NULL || size < VHOST_NB_VQ_STATS) 2054 return VHOST_NB_VQ_STATS; 2055 2056 for (i = 0; i < VHOST_NB_VQ_STATS; i++) 2057 snprintf(name[i].name, sizeof(name[i].name), "%s_q%u_%s", 2058 (queue_id & 1) ? "rx" : "tx", 2059 queue_id / 2, vhost_vq_stat_strings[i].name); 2060 2061 return VHOST_NB_VQ_STATS; 2062 } 2063 2064 int 2065 rte_vhost_vring_stats_get(int vid, uint16_t queue_id, 2066 struct rte_vhost_stat *stats, unsigned int n) 2067 { 2068 struct virtio_net *dev = get_device(vid); 2069 struct vhost_virtqueue *vq; 2070 unsigned int i; 2071 2072 if (dev == NULL) 2073 return -1; 2074 2075 if (queue_id >= dev->nr_vring) 2076 return -1; 2077 2078 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED)) 2079 return -1; 2080 2081 if (stats == NULL || n < VHOST_NB_VQ_STATS) 2082 return VHOST_NB_VQ_STATS; 2083 2084 vq = dev->virtqueue[queue_id]; 2085 2086 rte_spinlock_lock(&vq->access_lock); 2087 for (i = 0; i < VHOST_NB_VQ_STATS; i++) { 2088 stats[i].value = 2089 *(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset); 2090 stats[i].id = i; 2091 } 2092 rte_spinlock_unlock(&vq->access_lock); 2093 2094 return VHOST_NB_VQ_STATS; 2095 } 2096 2097 int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id) 2098 { 2099 struct virtio_net *dev = get_device(vid); 2100 struct vhost_virtqueue *vq; 2101 2102 if (dev == NULL) 2103 return -1; 2104 2105 if (queue_id >= dev->nr_vring) 2106 return -1; 2107 2108 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED)) 2109 return -1; 2110 2111 vq = dev->virtqueue[queue_id]; 2112 2113 rte_spinlock_lock(&vq->access_lock); 2114 memset(&vq->stats, 0, sizeof(vq->stats)); 2115 rte_spinlock_unlock(&vq->access_lock); 2116 2117 return 0; 2118 } 2119 2120 RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO); 2121 RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING); 2122