1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2018 Intel Corporation 3 */ 4 5 /* Security model 6 * -------------- 7 * The vhost-user protocol connection is an external interface, so it must be 8 * robust against invalid inputs. 9 * 10 * This is important because the vhost-user master is only one step removed 11 * from the guest. Malicious guests that have escaped will then launch further 12 * attacks from the vhost-user master. 13 * 14 * Even in deployments where guests are trusted, a bug in the vhost-user master 15 * can still cause invalid messages to be sent. Such messages must not 16 * compromise the stability of the DPDK application by causing crashes, memory 17 * corruption, or other problematic behavior. 18 * 19 * Do not assume received VhostUserMsg fields contain sensible values! 20 */ 21 22 #include <stdint.h> 23 #include <stdio.h> 24 #include <stdlib.h> 25 #include <string.h> 26 #include <unistd.h> 27 #include <fcntl.h> 28 #include <sys/ioctl.h> 29 #include <sys/mman.h> 30 #include <sys/stat.h> 31 #include <sys/syscall.h> 32 #ifdef RTE_LIBRTE_VHOST_NUMA 33 #include <numaif.h> 34 #endif 35 #ifdef RTE_LIBRTE_VHOST_POSTCOPY 36 #include <linux/userfaultfd.h> 37 #endif 38 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */ 39 #include <linux/memfd.h> 40 #define MEMFD_SUPPORTED 41 #endif 42 43 #include <rte_common.h> 44 #include <rte_malloc.h> 45 #include <rte_log.h> 46 #include <rte_vfio.h> 47 #include <rte_errno.h> 48 49 #include "iotlb.h" 50 #include "vhost.h" 51 #include "vhost_user.h" 52 53 #define VIRTIO_MIN_MTU 68 54 #define VIRTIO_MAX_MTU 65535 55 56 #define INFLIGHT_ALIGNMENT 64 57 #define INFLIGHT_VERSION 0x1 58 59 typedef struct vhost_message_handler { 60 const char *description; 61 int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx, 62 int main_fd); 63 bool accepts_fd; 64 } vhost_message_handler_t; 65 static vhost_message_handler_t vhost_message_handlers[]; 66 67 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx); 68 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx); 69 70 static void 71 close_msg_fds(struct vhu_msg_context *ctx) 72 { 73 int i; 74 75 for (i = 0; i < ctx->fd_num; i++) { 76 int fd = ctx->fds[i]; 77 78 if (fd == -1) 79 continue; 80 81 ctx->fds[i] = -1; 82 close(fd); 83 } 84 } 85 86 /* 87 * Ensure the expected number of FDs is received, 88 * close all FDs and return an error if this is not the case. 89 */ 90 static int 91 validate_msg_fds(struct virtio_net *dev, struct vhu_msg_context *ctx, int expected_fds) 92 { 93 if (ctx->fd_num == expected_fds) 94 return 0; 95 96 VHOST_LOG_CONFIG(dev->ifname, ERR, 97 "expect %d FDs for request %s, received %d\n", 98 expected_fds, vhost_message_handlers[ctx->msg.request.master].description, 99 ctx->fd_num); 100 101 close_msg_fds(ctx); 102 103 return -1; 104 } 105 106 static uint64_t 107 get_blk_size(int fd) 108 { 109 struct stat stat; 110 int ret; 111 112 ret = fstat(fd, &stat); 113 return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize; 114 } 115 116 static void 117 async_dma_map(struct virtio_net *dev, bool do_map) 118 { 119 int ret = 0; 120 uint32_t i; 121 struct guest_page *page; 122 123 if (do_map) { 124 for (i = 0; i < dev->nr_guest_pages; i++) { 125 page = &dev->guest_pages[i]; 126 ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD, 127 page->host_user_addr, 128 page->host_iova, 129 page->size); 130 if (ret) { 131 /* 132 * DMA device may bind with kernel driver, in this case, 133 * we don't need to program IOMMU manually. However, if no 134 * device is bound with vfio/uio in DPDK, and vfio kernel 135 * module is loaded, the API will still be called and return 136 * with ENODEV. 137 * 138 * DPDK vfio only returns ENODEV in very similar situations 139 * (vfio either unsupported, or supported but no devices found). 140 * Either way, no mappings could be performed. We treat it as 141 * normal case in async path. This is a workaround. 142 */ 143 if (rte_errno == ENODEV) 144 return; 145 146 /* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */ 147 VHOST_LOG_CONFIG(dev->ifname, ERR, "DMA engine map failed\n"); 148 } 149 } 150 151 } else { 152 for (i = 0; i < dev->nr_guest_pages; i++) { 153 page = &dev->guest_pages[i]; 154 ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD, 155 page->host_user_addr, 156 page->host_iova, 157 page->size); 158 if (ret) { 159 /* like DMA map, ignore the kernel driver case when unmap. */ 160 if (rte_errno == EINVAL) 161 return; 162 163 VHOST_LOG_CONFIG(dev->ifname, ERR, "DMA engine unmap failed\n"); 164 } 165 } 166 } 167 } 168 169 static void 170 free_mem_region(struct virtio_net *dev) 171 { 172 uint32_t i; 173 struct rte_vhost_mem_region *reg; 174 175 if (!dev || !dev->mem) 176 return; 177 178 if (dev->async_copy && rte_vfio_is_enabled("vfio")) 179 async_dma_map(dev, false); 180 181 for (i = 0; i < dev->mem->nregions; i++) { 182 reg = &dev->mem->regions[i]; 183 if (reg->host_user_addr) { 184 munmap(reg->mmap_addr, reg->mmap_size); 185 close(reg->fd); 186 } 187 } 188 } 189 190 void 191 vhost_backend_cleanup(struct virtio_net *dev) 192 { 193 struct rte_vdpa_device *vdpa_dev; 194 195 vdpa_dev = dev->vdpa_dev; 196 if (vdpa_dev && vdpa_dev->ops->dev_cleanup != NULL) 197 vdpa_dev->ops->dev_cleanup(dev->vid); 198 199 if (dev->mem) { 200 free_mem_region(dev); 201 rte_free(dev->mem); 202 dev->mem = NULL; 203 } 204 205 rte_free(dev->guest_pages); 206 dev->guest_pages = NULL; 207 208 if (dev->log_addr) { 209 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); 210 dev->log_addr = 0; 211 } 212 213 if (dev->inflight_info) { 214 if (dev->inflight_info->addr) { 215 munmap(dev->inflight_info->addr, 216 dev->inflight_info->size); 217 dev->inflight_info->addr = NULL; 218 } 219 220 if (dev->inflight_info->fd >= 0) { 221 close(dev->inflight_info->fd); 222 dev->inflight_info->fd = -1; 223 } 224 225 rte_free(dev->inflight_info); 226 dev->inflight_info = NULL; 227 } 228 229 if (dev->slave_req_fd >= 0) { 230 close(dev->slave_req_fd); 231 dev->slave_req_fd = -1; 232 } 233 234 if (dev->postcopy_ufd >= 0) { 235 close(dev->postcopy_ufd); 236 dev->postcopy_ufd = -1; 237 } 238 239 dev->postcopy_listening = 0; 240 } 241 242 static void 243 vhost_user_notify_queue_state(struct virtio_net *dev, struct vhost_virtqueue *vq, 244 int enable) 245 { 246 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; 247 248 /* Configure guest notifications on enable */ 249 if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF) 250 vhost_enable_guest_notification(dev, vq, vq->notif_enable); 251 252 if (vdpa_dev && vdpa_dev->ops->set_vring_state) 253 vdpa_dev->ops->set_vring_state(dev->vid, vq->index, enable); 254 255 if (dev->notify_ops->vring_state_changed) 256 dev->notify_ops->vring_state_changed(dev->vid, vq->index, enable); 257 } 258 259 /* 260 * This function just returns success at the moment unless 261 * the device hasn't been initialised. 262 */ 263 static int 264 vhost_user_set_owner(struct virtio_net **pdev __rte_unused, 265 struct vhu_msg_context *ctx __rte_unused, 266 int main_fd __rte_unused) 267 { 268 return RTE_VHOST_MSG_RESULT_OK; 269 } 270 271 static int 272 vhost_user_reset_owner(struct virtio_net **pdev, 273 struct vhu_msg_context *ctx __rte_unused, 274 int main_fd __rte_unused) 275 { 276 struct virtio_net *dev = *pdev; 277 278 vhost_destroy_device_notify(dev); 279 280 cleanup_device(dev, 0); 281 reset_device(dev); 282 return RTE_VHOST_MSG_RESULT_OK; 283 } 284 285 /* 286 * The features that we support are requested. 287 */ 288 static int 289 vhost_user_get_features(struct virtio_net **pdev, 290 struct vhu_msg_context *ctx, 291 int main_fd __rte_unused) 292 { 293 struct virtio_net *dev = *pdev; 294 uint64_t features = 0; 295 296 rte_vhost_driver_get_features(dev->ifname, &features); 297 298 ctx->msg.payload.u64 = features; 299 ctx->msg.size = sizeof(ctx->msg.payload.u64); 300 ctx->fd_num = 0; 301 302 return RTE_VHOST_MSG_RESULT_REPLY; 303 } 304 305 /* 306 * The queue number that we support are requested. 307 */ 308 static int 309 vhost_user_get_queue_num(struct virtio_net **pdev, 310 struct vhu_msg_context *ctx, 311 int main_fd __rte_unused) 312 { 313 struct virtio_net *dev = *pdev; 314 uint32_t queue_num = 0; 315 316 rte_vhost_driver_get_queue_num(dev->ifname, &queue_num); 317 318 ctx->msg.payload.u64 = (uint64_t)queue_num; 319 ctx->msg.size = sizeof(ctx->msg.payload.u64); 320 ctx->fd_num = 0; 321 322 return RTE_VHOST_MSG_RESULT_REPLY; 323 } 324 325 /* 326 * We receive the negotiated features supported by us and the virtio device. 327 */ 328 static int 329 vhost_user_set_features(struct virtio_net **pdev, 330 struct vhu_msg_context *ctx, 331 int main_fd __rte_unused) 332 { 333 struct virtio_net *dev = *pdev; 334 uint64_t features = ctx->msg.payload.u64; 335 uint64_t vhost_features = 0; 336 struct rte_vdpa_device *vdpa_dev; 337 338 rte_vhost_driver_get_features(dev->ifname, &vhost_features); 339 if (features & ~vhost_features) { 340 VHOST_LOG_CONFIG(dev->ifname, ERR, "received invalid negotiated features.\n"); 341 dev->flags |= VIRTIO_DEV_FEATURES_FAILED; 342 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; 343 344 return RTE_VHOST_MSG_RESULT_ERR; 345 } 346 347 if (dev->flags & VIRTIO_DEV_RUNNING) { 348 if (dev->features == features) 349 return RTE_VHOST_MSG_RESULT_OK; 350 351 /* 352 * Error out if master tries to change features while device is 353 * in running state. The exception being VHOST_F_LOG_ALL, which 354 * is enabled when the live-migration starts. 355 */ 356 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) { 357 VHOST_LOG_CONFIG(dev->ifname, ERR, 358 "features changed while device is running.\n"); 359 return RTE_VHOST_MSG_RESULT_ERR; 360 } 361 362 if (dev->notify_ops->features_changed) 363 dev->notify_ops->features_changed(dev->vid, features); 364 } 365 366 dev->features = features; 367 if (dev->features & 368 ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | 369 (1ULL << VIRTIO_F_VERSION_1) | 370 (1ULL << VIRTIO_F_RING_PACKED))) { 371 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); 372 } else { 373 dev->vhost_hlen = sizeof(struct virtio_net_hdr); 374 } 375 VHOST_LOG_CONFIG(dev->ifname, INFO, 376 "negotiated Virtio features: 0x%" PRIx64 "\n", 377 dev->features); 378 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 379 "mergeable RX buffers %s, virtio 1 %s\n", 380 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off", 381 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off"); 382 383 if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) && 384 !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) { 385 /* 386 * Remove all but first queue pair if MQ hasn't been 387 * negotiated. This is safe because the device is not 388 * running at this stage. 389 */ 390 while (dev->nr_vring > 2) { 391 struct vhost_virtqueue *vq; 392 393 vq = dev->virtqueue[--dev->nr_vring]; 394 if (!vq) 395 continue; 396 397 dev->virtqueue[dev->nr_vring] = NULL; 398 cleanup_vq(vq, 1); 399 cleanup_vq_inflight(dev, vq); 400 free_vq(dev, vq); 401 } 402 } 403 404 vdpa_dev = dev->vdpa_dev; 405 if (vdpa_dev) 406 vdpa_dev->ops->set_features(dev->vid); 407 408 dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED; 409 return RTE_VHOST_MSG_RESULT_OK; 410 } 411 412 /* 413 * The virtio device sends us the size of the descriptor ring. 414 */ 415 static int 416 vhost_user_set_vring_num(struct virtio_net **pdev, 417 struct vhu_msg_context *ctx, 418 int main_fd __rte_unused) 419 { 420 struct virtio_net *dev = *pdev; 421 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; 422 423 if (ctx->msg.payload.state.num > 32768) { 424 VHOST_LOG_CONFIG(dev->ifname, ERR, 425 "invalid virtqueue size %u\n", 426 ctx->msg.payload.state.num); 427 return RTE_VHOST_MSG_RESULT_ERR; 428 } 429 430 vq->size = ctx->msg.payload.state.num; 431 432 /* VIRTIO 1.0, 2.4 Virtqueues says: 433 * 434 * Queue Size value is always a power of 2. The maximum Queue Size 435 * value is 32768. 436 * 437 * VIRTIO 1.1 2.7 Virtqueues says: 438 * 439 * Packed virtqueues support up to 2^15 entries each. 440 */ 441 if (!vq_is_packed(dev)) { 442 if (vq->size & (vq->size - 1)) { 443 VHOST_LOG_CONFIG(dev->ifname, ERR, 444 "invalid virtqueue size %u\n", 445 vq->size); 446 return RTE_VHOST_MSG_RESULT_ERR; 447 } 448 } 449 450 if (vq_is_packed(dev)) { 451 rte_free(vq->shadow_used_packed); 452 vq->shadow_used_packed = rte_malloc_socket(NULL, 453 vq->size * 454 sizeof(struct vring_used_elem_packed), 455 RTE_CACHE_LINE_SIZE, vq->numa_node); 456 if (!vq->shadow_used_packed) { 457 VHOST_LOG_CONFIG(dev->ifname, ERR, 458 "failed to allocate memory for shadow used ring.\n"); 459 return RTE_VHOST_MSG_RESULT_ERR; 460 } 461 462 } else { 463 rte_free(vq->shadow_used_split); 464 465 vq->shadow_used_split = rte_malloc_socket(NULL, 466 vq->size * sizeof(struct vring_used_elem), 467 RTE_CACHE_LINE_SIZE, vq->numa_node); 468 469 if (!vq->shadow_used_split) { 470 VHOST_LOG_CONFIG(dev->ifname, ERR, 471 "failed to allocate memory for vq internal data.\n"); 472 return RTE_VHOST_MSG_RESULT_ERR; 473 } 474 } 475 476 rte_free(vq->batch_copy_elems); 477 vq->batch_copy_elems = rte_malloc_socket(NULL, 478 vq->size * sizeof(struct batch_copy_elem), 479 RTE_CACHE_LINE_SIZE, vq->numa_node); 480 if (!vq->batch_copy_elems) { 481 VHOST_LOG_CONFIG(dev->ifname, ERR, 482 "failed to allocate memory for batching copy.\n"); 483 return RTE_VHOST_MSG_RESULT_ERR; 484 } 485 486 return RTE_VHOST_MSG_RESULT_OK; 487 } 488 489 /* 490 * Reallocate virtio_dev, vhost_virtqueue and related data structures to 491 * make them on the same numa node as the memory of vring descriptor. 492 */ 493 #ifdef RTE_LIBRTE_VHOST_NUMA 494 static void 495 numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq) 496 { 497 int node, dev_node; 498 struct virtio_net *dev; 499 struct vhost_virtqueue *vq; 500 struct batch_copy_elem *bce; 501 struct guest_page *gp; 502 struct rte_vhost_memory *mem; 503 size_t mem_size; 504 int ret; 505 506 dev = *pdev; 507 vq = *pvq; 508 509 /* 510 * If VQ is ready, it is too late to reallocate, it certainly already 511 * happened anyway on VHOST_USER_SET_VRING_ADRR. 512 */ 513 if (vq->ready) 514 return; 515 516 ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR); 517 if (ret) { 518 VHOST_LOG_CONFIG(dev->ifname, ERR, 519 "unable to get virtqueue %d numa information.\n", 520 vq->index); 521 return; 522 } 523 524 if (node == vq->numa_node) 525 goto out_dev_realloc; 526 527 vq = rte_realloc_socket(*pvq, sizeof(**pvq), 0, node); 528 if (!vq) { 529 VHOST_LOG_CONFIG(dev->ifname, ERR, 530 "failed to realloc virtqueue %d on node %d\n", 531 (*pvq)->index, node); 532 return; 533 } 534 *pvq = vq; 535 536 if (vq != dev->virtqueue[vq->index]) { 537 VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated virtqueue on node %d\n", node); 538 dev->virtqueue[vq->index] = vq; 539 vhost_user_iotlb_init(dev, vq); 540 } 541 542 if (vq_is_packed(dev)) { 543 struct vring_used_elem_packed *sup; 544 545 sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup), 546 RTE_CACHE_LINE_SIZE, node); 547 if (!sup) { 548 VHOST_LOG_CONFIG(dev->ifname, ERR, 549 "failed to realloc shadow packed on node %d\n", 550 node); 551 return; 552 } 553 vq->shadow_used_packed = sup; 554 } else { 555 struct vring_used_elem *sus; 556 557 sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus), 558 RTE_CACHE_LINE_SIZE, node); 559 if (!sus) { 560 VHOST_LOG_CONFIG(dev->ifname, ERR, 561 "failed to realloc shadow split on node %d\n", 562 node); 563 return; 564 } 565 vq->shadow_used_split = sus; 566 } 567 568 bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce), 569 RTE_CACHE_LINE_SIZE, node); 570 if (!bce) { 571 VHOST_LOG_CONFIG(dev->ifname, ERR, 572 "failed to realloc batch copy elem on node %d\n", 573 node); 574 return; 575 } 576 vq->batch_copy_elems = bce; 577 578 if (vq->log_cache) { 579 struct log_cache_entry *lc; 580 581 lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node); 582 if (!lc) { 583 VHOST_LOG_CONFIG(dev->ifname, ERR, 584 "failed to realloc log cache on node %d\n", 585 node); 586 return; 587 } 588 vq->log_cache = lc; 589 } 590 591 if (vq->resubmit_inflight) { 592 struct rte_vhost_resubmit_info *ri; 593 594 ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node); 595 if (!ri) { 596 VHOST_LOG_CONFIG(dev->ifname, ERR, 597 "failed to realloc resubmit inflight on node %d\n", 598 node); 599 return; 600 } 601 vq->resubmit_inflight = ri; 602 603 if (ri->resubmit_list) { 604 struct rte_vhost_resubmit_desc *rd; 605 606 rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num, 607 0, node); 608 if (!rd) { 609 VHOST_LOG_CONFIG(dev->ifname, ERR, 610 "failed to realloc resubmit list on node %d\n", 611 node); 612 return; 613 } 614 ri->resubmit_list = rd; 615 } 616 } 617 618 vq->numa_node = node; 619 620 out_dev_realloc: 621 622 if (dev->flags & VIRTIO_DEV_RUNNING) 623 return; 624 625 ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); 626 if (ret) { 627 VHOST_LOG_CONFIG(dev->ifname, ERR, "unable to get numa information.\n"); 628 return; 629 } 630 631 if (dev_node == node) 632 return; 633 634 dev = rte_realloc_socket(*pdev, sizeof(**pdev), 0, node); 635 if (!dev) { 636 VHOST_LOG_CONFIG((*pdev)->ifname, ERR, "failed to realloc dev on node %d\n", node); 637 return; 638 } 639 *pdev = dev; 640 641 VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated device on node %d\n", node); 642 vhost_devices[dev->vid] = dev; 643 644 mem_size = sizeof(struct rte_vhost_memory) + 645 sizeof(struct rte_vhost_mem_region) * dev->mem->nregions; 646 mem = rte_realloc_socket(dev->mem, mem_size, 0, node); 647 if (!mem) { 648 VHOST_LOG_CONFIG(dev->ifname, ERR, 649 "failed to realloc mem table on node %d\n", 650 node); 651 return; 652 } 653 dev->mem = mem; 654 655 gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp), 656 RTE_CACHE_LINE_SIZE, node); 657 if (!gp) { 658 VHOST_LOG_CONFIG(dev->ifname, ERR, 659 "failed to realloc guest pages on node %d\n", 660 node); 661 return; 662 } 663 dev->guest_pages = gp; 664 } 665 #else 666 static void 667 numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq) 668 { 669 RTE_SET_USED(pdev); 670 RTE_SET_USED(pvq); 671 } 672 #endif 673 674 /* Converts QEMU virtual address to Vhost virtual address. */ 675 static uint64_t 676 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len) 677 { 678 struct rte_vhost_mem_region *r; 679 uint32_t i; 680 681 if (unlikely(!dev || !dev->mem)) 682 goto out_error; 683 684 /* Find the region where the address lives. */ 685 for (i = 0; i < dev->mem->nregions; i++) { 686 r = &dev->mem->regions[i]; 687 688 if (qva >= r->guest_user_addr && 689 qva < r->guest_user_addr + r->size) { 690 691 if (unlikely(*len > r->guest_user_addr + r->size - qva)) 692 *len = r->guest_user_addr + r->size - qva; 693 694 return qva - r->guest_user_addr + 695 r->host_user_addr; 696 } 697 } 698 out_error: 699 *len = 0; 700 701 return 0; 702 } 703 704 705 /* 706 * Converts ring address to Vhost virtual address. 707 * If IOMMU is enabled, the ring address is a guest IO virtual address, 708 * else it is a QEMU virtual address. 709 */ 710 static uint64_t 711 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, 712 uint64_t ra, uint64_t *size) 713 { 714 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { 715 uint64_t vva; 716 717 vhost_user_iotlb_rd_lock(vq); 718 vva = vhost_iova_to_vva(dev, vq, ra, 719 size, VHOST_ACCESS_RW); 720 vhost_user_iotlb_rd_unlock(vq); 721 722 return vva; 723 } 724 725 return qva_to_vva(dev, ra, size); 726 } 727 728 static uint64_t 729 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq) 730 { 731 uint64_t log_gpa; 732 733 vhost_user_iotlb_rd_lock(vq); 734 log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr); 735 vhost_user_iotlb_rd_unlock(vq); 736 737 return log_gpa; 738 } 739 740 static void 741 translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq) 742 { 743 struct vhost_virtqueue *vq; 744 struct virtio_net *dev; 745 uint64_t len, expected_len; 746 747 dev = *pdev; 748 vq = *pvq; 749 750 if (vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG)) { 751 vq->log_guest_addr = 752 log_addr_to_gpa(dev, vq); 753 if (vq->log_guest_addr == 0) { 754 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map log_guest_addr.\n"); 755 return; 756 } 757 } 758 759 if (vq_is_packed(dev)) { 760 len = sizeof(struct vring_packed_desc) * vq->size; 761 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t) 762 ring_addr_to_vva(dev, vq, vq->ring_addrs.desc_user_addr, &len); 763 if (vq->desc_packed == NULL || 764 len != sizeof(struct vring_packed_desc) * 765 vq->size) { 766 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map desc_packed ring.\n"); 767 return; 768 } 769 770 numa_realloc(&dev, &vq); 771 *pdev = dev; 772 *pvq = vq; 773 774 len = sizeof(struct vring_packed_desc_event); 775 vq->driver_event = (struct vring_packed_desc_event *) 776 (uintptr_t)ring_addr_to_vva(dev, 777 vq, vq->ring_addrs.avail_user_addr, &len); 778 if (vq->driver_event == NULL || 779 len != sizeof(struct vring_packed_desc_event)) { 780 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 781 "failed to find driver area address.\n"); 782 return; 783 } 784 785 len = sizeof(struct vring_packed_desc_event); 786 vq->device_event = (struct vring_packed_desc_event *) 787 (uintptr_t)ring_addr_to_vva(dev, 788 vq, vq->ring_addrs.used_user_addr, &len); 789 if (vq->device_event == NULL || 790 len != sizeof(struct vring_packed_desc_event)) { 791 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 792 "failed to find device area address.\n"); 793 return; 794 } 795 796 vq->access_ok = true; 797 return; 798 } 799 800 /* The addresses are converted from QEMU virtual to Vhost virtual. */ 801 if (vq->desc && vq->avail && vq->used) 802 return; 803 804 len = sizeof(struct vring_desc) * vq->size; 805 vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev, 806 vq, vq->ring_addrs.desc_user_addr, &len); 807 if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) { 808 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map desc ring.\n"); 809 return; 810 } 811 812 numa_realloc(&dev, &vq); 813 *pdev = dev; 814 *pvq = vq; 815 816 len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size; 817 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) 818 len += sizeof(uint16_t); 819 expected_len = len; 820 vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev, 821 vq, vq->ring_addrs.avail_user_addr, &len); 822 if (vq->avail == 0 || len != expected_len) { 823 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map avail ring.\n"); 824 return; 825 } 826 827 len = sizeof(struct vring_used) + 828 sizeof(struct vring_used_elem) * vq->size; 829 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) 830 len += sizeof(uint16_t); 831 expected_len = len; 832 vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev, 833 vq, vq->ring_addrs.used_user_addr, &len); 834 if (vq->used == 0 || len != expected_len) { 835 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map used ring.\n"); 836 return; 837 } 838 839 if (vq->last_used_idx != vq->used->idx) { 840 VHOST_LOG_CONFIG(dev->ifname, WARNING, 841 "last_used_idx (%u) and vq->used->idx (%u) mismatches;\n", 842 vq->last_used_idx, vq->used->idx); 843 vq->last_used_idx = vq->used->idx; 844 vq->last_avail_idx = vq->used->idx; 845 VHOST_LOG_CONFIG(dev->ifname, WARNING, 846 "some packets maybe resent for Tx and dropped for Rx\n"); 847 } 848 849 vq->access_ok = true; 850 851 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address desc: %p\n", vq->desc); 852 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address avail: %p\n", vq->avail); 853 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address used: %p\n", vq->used); 854 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "log_guest_addr: %" PRIx64 "\n", vq->log_guest_addr); 855 } 856 857 /* 858 * The virtio device sends us the desc, used and avail ring addresses. 859 * This function then converts these to our address space. 860 */ 861 static int 862 vhost_user_set_vring_addr(struct virtio_net **pdev, 863 struct vhu_msg_context *ctx, 864 int main_fd __rte_unused) 865 { 866 struct virtio_net *dev = *pdev; 867 struct vhost_virtqueue *vq; 868 struct vhost_vring_addr *addr = &ctx->msg.payload.addr; 869 bool access_ok; 870 871 if (dev->mem == NULL) 872 return RTE_VHOST_MSG_RESULT_ERR; 873 874 /* addr->index refers to the queue index. The txq 1, rxq is 0. */ 875 vq = dev->virtqueue[ctx->msg.payload.addr.index]; 876 877 access_ok = vq->access_ok; 878 879 /* 880 * Rings addresses should not be interpreted as long as the ring is not 881 * started and enabled 882 */ 883 memcpy(&vq->ring_addrs, addr, sizeof(*addr)); 884 885 vring_invalidate(dev, vq); 886 887 if ((vq->enabled && (dev->features & 888 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) || 889 access_ok) { 890 translate_ring_addresses(&dev, &vq); 891 *pdev = dev; 892 } 893 894 return RTE_VHOST_MSG_RESULT_OK; 895 } 896 897 /* 898 * The virtio device sends us the available ring last used index. 899 */ 900 static int 901 vhost_user_set_vring_base(struct virtio_net **pdev, 902 struct vhu_msg_context *ctx, 903 int main_fd __rte_unused) 904 { 905 struct virtio_net *dev = *pdev; 906 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; 907 uint64_t val = ctx->msg.payload.state.num; 908 909 if (vq_is_packed(dev)) { 910 /* 911 * Bit[0:14]: avail index 912 * Bit[15]: avail wrap counter 913 */ 914 vq->last_avail_idx = val & 0x7fff; 915 vq->avail_wrap_counter = !!(val & (0x1 << 15)); 916 /* 917 * Set used index to same value as available one, as 918 * their values should be the same since ring processing 919 * was stopped at get time. 920 */ 921 vq->last_used_idx = vq->last_avail_idx; 922 vq->used_wrap_counter = vq->avail_wrap_counter; 923 } else { 924 vq->last_used_idx = ctx->msg.payload.state.num; 925 vq->last_avail_idx = ctx->msg.payload.state.num; 926 } 927 928 VHOST_LOG_CONFIG(dev->ifname, INFO, 929 "vring base idx:%u last_used_idx:%u last_avail_idx:%u.\n", 930 ctx->msg.payload.state.index, vq->last_used_idx, vq->last_avail_idx); 931 932 return RTE_VHOST_MSG_RESULT_OK; 933 } 934 935 static int 936 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, 937 uint64_t host_iova, uint64_t host_user_addr, uint64_t size) 938 { 939 struct guest_page *page, *last_page; 940 struct guest_page *old_pages; 941 942 if (dev->nr_guest_pages == dev->max_guest_pages) { 943 dev->max_guest_pages *= 2; 944 old_pages = dev->guest_pages; 945 dev->guest_pages = rte_realloc(dev->guest_pages, 946 dev->max_guest_pages * sizeof(*page), 947 RTE_CACHE_LINE_SIZE); 948 if (dev->guest_pages == NULL) { 949 VHOST_LOG_CONFIG(dev->ifname, ERR, "cannot realloc guest_pages\n"); 950 rte_free(old_pages); 951 return -1; 952 } 953 } 954 955 if (dev->nr_guest_pages > 0) { 956 last_page = &dev->guest_pages[dev->nr_guest_pages - 1]; 957 /* merge if the two pages are continuous */ 958 if (host_iova == last_page->host_iova + last_page->size && 959 guest_phys_addr == last_page->guest_phys_addr + last_page->size && 960 host_user_addr == last_page->host_user_addr + last_page->size) { 961 last_page->size += size; 962 return 0; 963 } 964 } 965 966 page = &dev->guest_pages[dev->nr_guest_pages++]; 967 page->guest_phys_addr = guest_phys_addr; 968 page->host_iova = host_iova; 969 page->host_user_addr = host_user_addr; 970 page->size = size; 971 972 return 0; 973 } 974 975 static int 976 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, 977 uint64_t page_size) 978 { 979 uint64_t reg_size = reg->size; 980 uint64_t host_user_addr = reg->host_user_addr; 981 uint64_t guest_phys_addr = reg->guest_phys_addr; 982 uint64_t host_iova; 983 uint64_t size; 984 985 host_iova = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr); 986 size = page_size - (guest_phys_addr & (page_size - 1)); 987 size = RTE_MIN(size, reg_size); 988 989 if (add_one_guest_page(dev, guest_phys_addr, host_iova, 990 host_user_addr, size) < 0) 991 return -1; 992 993 host_user_addr += size; 994 guest_phys_addr += size; 995 reg_size -= size; 996 997 while (reg_size > 0) { 998 size = RTE_MIN(reg_size, page_size); 999 host_iova = rte_mem_virt2iova((void *)(uintptr_t) 1000 host_user_addr); 1001 if (add_one_guest_page(dev, guest_phys_addr, host_iova, 1002 host_user_addr, size) < 0) 1003 return -1; 1004 1005 host_user_addr += size; 1006 guest_phys_addr += size; 1007 reg_size -= size; 1008 } 1009 1010 /* sort guest page array if over binary search threshold */ 1011 if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { 1012 qsort((void *)dev->guest_pages, dev->nr_guest_pages, 1013 sizeof(struct guest_page), guest_page_addrcmp); 1014 } 1015 1016 return 0; 1017 } 1018 1019 #ifdef RTE_LIBRTE_VHOST_DEBUG 1020 /* TODO: enable it only in debug mode? */ 1021 static void 1022 dump_guest_pages(struct virtio_net *dev) 1023 { 1024 uint32_t i; 1025 struct guest_page *page; 1026 1027 for (i = 0; i < dev->nr_guest_pages; i++) { 1028 page = &dev->guest_pages[i]; 1029 1030 VHOST_LOG_CONFIG(dev->ifname, INFO, "guest physical page region %u\n", i); 1031 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tguest_phys_addr: %" PRIx64 "\n", 1032 page->guest_phys_addr); 1033 VHOST_LOG_CONFIG(dev->ifname, INFO, "\thost_iova : %" PRIx64 "\n", 1034 page->host_iova); 1035 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tsize : %" PRIx64 "\n", 1036 page->size); 1037 } 1038 } 1039 #else 1040 #define dump_guest_pages(dev) 1041 #endif 1042 1043 static bool 1044 vhost_memory_changed(struct VhostUserMemory *new, 1045 struct rte_vhost_memory *old) 1046 { 1047 uint32_t i; 1048 1049 if (new->nregions != old->nregions) 1050 return true; 1051 1052 for (i = 0; i < new->nregions; ++i) { 1053 VhostUserMemoryRegion *new_r = &new->regions[i]; 1054 struct rte_vhost_mem_region *old_r = &old->regions[i]; 1055 1056 if (new_r->guest_phys_addr != old_r->guest_phys_addr) 1057 return true; 1058 if (new_r->memory_size != old_r->size) 1059 return true; 1060 if (new_r->userspace_addr != old_r->guest_user_addr) 1061 return true; 1062 } 1063 1064 return false; 1065 } 1066 1067 #ifdef RTE_LIBRTE_VHOST_POSTCOPY 1068 static int 1069 vhost_user_postcopy_region_register(struct virtio_net *dev, 1070 struct rte_vhost_mem_region *reg) 1071 { 1072 struct uffdio_register reg_struct; 1073 1074 /* 1075 * Let's register all the mmapped area to ensure 1076 * alignment on page boundary. 1077 */ 1078 reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr; 1079 reg_struct.range.len = reg->mmap_size; 1080 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 1081 1082 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, 1083 ®_struct)) { 1084 VHOST_LOG_CONFIG(dev->ifname, ERR, 1085 "failed to register ufd for region " 1086 "%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n", 1087 (uint64_t)reg_struct.range.start, 1088 (uint64_t)reg_struct.range.start + 1089 (uint64_t)reg_struct.range.len - 1, 1090 dev->postcopy_ufd, 1091 strerror(errno)); 1092 return -1; 1093 } 1094 1095 VHOST_LOG_CONFIG(dev->ifname, INFO, 1096 "\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n", 1097 (uint64_t)reg_struct.range.start, 1098 (uint64_t)reg_struct.range.start + 1099 (uint64_t)reg_struct.range.len - 1); 1100 1101 return 0; 1102 } 1103 #else 1104 static int 1105 vhost_user_postcopy_region_register(struct virtio_net *dev __rte_unused, 1106 struct rte_vhost_mem_region *reg __rte_unused) 1107 { 1108 return -1; 1109 } 1110 #endif 1111 1112 static int 1113 vhost_user_postcopy_register(struct virtio_net *dev, int main_fd, 1114 struct vhu_msg_context *ctx) 1115 { 1116 struct VhostUserMemory *memory; 1117 struct rte_vhost_mem_region *reg; 1118 struct vhu_msg_context ack_ctx; 1119 uint32_t i; 1120 1121 if (!dev->postcopy_listening) 1122 return 0; 1123 1124 /* 1125 * We haven't a better way right now than sharing 1126 * DPDK's virtual address with Qemu, so that Qemu can 1127 * retrieve the region offset when handling userfaults. 1128 */ 1129 memory = &ctx->msg.payload.memory; 1130 for (i = 0; i < memory->nregions; i++) { 1131 reg = &dev->mem->regions[i]; 1132 memory->regions[i].userspace_addr = reg->host_user_addr; 1133 } 1134 1135 /* Send the addresses back to qemu */ 1136 ctx->fd_num = 0; 1137 send_vhost_reply(dev, main_fd, ctx); 1138 1139 /* Wait for qemu to acknowledge it got the addresses 1140 * we've got to wait before we're allowed to generate faults. 1141 */ 1142 if (read_vhost_message(dev, main_fd, &ack_ctx) <= 0) { 1143 VHOST_LOG_CONFIG(dev->ifname, ERR, 1144 "failed to read qemu ack on postcopy set-mem-table\n"); 1145 return -1; 1146 } 1147 1148 if (validate_msg_fds(dev, &ack_ctx, 0) != 0) 1149 return -1; 1150 1151 if (ack_ctx.msg.request.master != VHOST_USER_SET_MEM_TABLE) { 1152 VHOST_LOG_CONFIG(dev->ifname, ERR, 1153 "bad qemu ack on postcopy set-mem-table (%d)\n", 1154 ack_ctx.msg.request.master); 1155 return -1; 1156 } 1157 1158 /* Now userfault register and we can use the memory */ 1159 for (i = 0; i < memory->nregions; i++) { 1160 reg = &dev->mem->regions[i]; 1161 if (vhost_user_postcopy_region_register(dev, reg) < 0) 1162 return -1; 1163 } 1164 1165 return 0; 1166 } 1167 1168 static int 1169 vhost_user_mmap_region(struct virtio_net *dev, 1170 struct rte_vhost_mem_region *region, 1171 uint64_t mmap_offset) 1172 { 1173 void *mmap_addr; 1174 uint64_t mmap_size; 1175 uint64_t alignment; 1176 int populate; 1177 1178 /* Check for memory_size + mmap_offset overflow */ 1179 if (mmap_offset >= -region->size) { 1180 VHOST_LOG_CONFIG(dev->ifname, ERR, 1181 "mmap_offset (%#"PRIx64") and memory_size (%#"PRIx64") overflow\n", 1182 mmap_offset, region->size); 1183 return -1; 1184 } 1185 1186 mmap_size = region->size + mmap_offset; 1187 1188 /* mmap() without flag of MAP_ANONYMOUS, should be called with length 1189 * argument aligned with hugepagesz at older longterm version Linux, 1190 * like 2.6.32 and 3.2.72, or mmap() will fail with EINVAL. 1191 * 1192 * To avoid failure, make sure in caller to keep length aligned. 1193 */ 1194 alignment = get_blk_size(region->fd); 1195 if (alignment == (uint64_t)-1) { 1196 VHOST_LOG_CONFIG(dev->ifname, ERR, "couldn't get hugepage size through fstat\n"); 1197 return -1; 1198 } 1199 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment); 1200 if (mmap_size == 0) { 1201 /* 1202 * It could happen if initial mmap_size + alignment overflows 1203 * the sizeof uint64, which could happen if either mmap_size or 1204 * alignment value is wrong. 1205 * 1206 * mmap() kernel implementation would return an error, but 1207 * better catch it before and provide useful info in the logs. 1208 */ 1209 VHOST_LOG_CONFIG(dev->ifname, ERR, 1210 "mmap size (0x%" PRIx64 ") or alignment (0x%" PRIx64 ") is invalid\n", 1211 region->size + mmap_offset, alignment); 1212 return -1; 1213 } 1214 1215 populate = dev->async_copy ? MAP_POPULATE : 0; 1216 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, 1217 MAP_SHARED | populate, region->fd, 0); 1218 1219 if (mmap_addr == MAP_FAILED) { 1220 VHOST_LOG_CONFIG(dev->ifname, ERR, "mmap failed (%s).\n", strerror(errno)); 1221 return -1; 1222 } 1223 1224 region->mmap_addr = mmap_addr; 1225 region->mmap_size = mmap_size; 1226 region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset; 1227 1228 if (dev->async_copy) { 1229 if (add_guest_pages(dev, region, alignment) < 0) { 1230 VHOST_LOG_CONFIG(dev->ifname, ERR, 1231 "adding guest pages to region failed.\n"); 1232 return -1; 1233 } 1234 } 1235 1236 VHOST_LOG_CONFIG(dev->ifname, INFO, 1237 "guest memory region size: 0x%" PRIx64 "\n", 1238 region->size); 1239 VHOST_LOG_CONFIG(dev->ifname, INFO, 1240 "\t guest physical addr: 0x%" PRIx64 "\n", 1241 region->guest_phys_addr); 1242 VHOST_LOG_CONFIG(dev->ifname, INFO, 1243 "\t guest virtual addr: 0x%" PRIx64 "\n", 1244 region->guest_user_addr); 1245 VHOST_LOG_CONFIG(dev->ifname, INFO, 1246 "\t host virtual addr: 0x%" PRIx64 "\n", 1247 region->host_user_addr); 1248 VHOST_LOG_CONFIG(dev->ifname, INFO, 1249 "\t mmap addr : 0x%" PRIx64 "\n", 1250 (uint64_t)(uintptr_t)mmap_addr); 1251 VHOST_LOG_CONFIG(dev->ifname, INFO, 1252 "\t mmap size : 0x%" PRIx64 "\n", 1253 mmap_size); 1254 VHOST_LOG_CONFIG(dev->ifname, INFO, 1255 "\t mmap align: 0x%" PRIx64 "\n", 1256 alignment); 1257 VHOST_LOG_CONFIG(dev->ifname, INFO, 1258 "\t mmap off : 0x%" PRIx64 "\n", 1259 mmap_offset); 1260 1261 return 0; 1262 } 1263 1264 static int 1265 vhost_user_set_mem_table(struct virtio_net **pdev, 1266 struct vhu_msg_context *ctx, 1267 int main_fd) 1268 { 1269 struct virtio_net *dev = *pdev; 1270 struct VhostUserMemory *memory = &ctx->msg.payload.memory; 1271 struct rte_vhost_mem_region *reg; 1272 int numa_node = SOCKET_ID_ANY; 1273 uint64_t mmap_offset; 1274 uint32_t i; 1275 bool async_notify = false; 1276 1277 if (validate_msg_fds(dev, ctx, memory->nregions) != 0) 1278 return RTE_VHOST_MSG_RESULT_ERR; 1279 1280 if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) { 1281 VHOST_LOG_CONFIG(dev->ifname, ERR, 1282 "too many memory regions (%u)\n", 1283 memory->nregions); 1284 goto close_msg_fds; 1285 } 1286 1287 if (dev->mem && !vhost_memory_changed(memory, dev->mem)) { 1288 VHOST_LOG_CONFIG(dev->ifname, INFO, "memory regions not changed\n"); 1289 1290 close_msg_fds(ctx); 1291 1292 return RTE_VHOST_MSG_RESULT_OK; 1293 } 1294 1295 if (dev->mem) { 1296 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) { 1297 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; 1298 1299 if (vdpa_dev && vdpa_dev->ops->dev_close) 1300 vdpa_dev->ops->dev_close(dev->vid); 1301 dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; 1302 } 1303 1304 /* notify the vhost application to stop DMA transfers */ 1305 if (dev->async_copy && dev->notify_ops->vring_state_changed) { 1306 for (i = 0; i < dev->nr_vring; i++) { 1307 dev->notify_ops->vring_state_changed(dev->vid, 1308 i, 0); 1309 } 1310 async_notify = true; 1311 } 1312 1313 free_mem_region(dev); 1314 rte_free(dev->mem); 1315 dev->mem = NULL; 1316 } 1317 1318 /* Flush IOTLB cache as previous HVAs are now invalid */ 1319 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) 1320 for (i = 0; i < dev->nr_vring; i++) 1321 vhost_user_iotlb_flush_all(dev->virtqueue[i]); 1322 1323 /* 1324 * If VQ 0 has already been allocated, try to allocate on the same 1325 * NUMA node. It can be reallocated later in numa_realloc(). 1326 */ 1327 if (dev->nr_vring > 0) 1328 numa_node = dev->virtqueue[0]->numa_node; 1329 1330 dev->nr_guest_pages = 0; 1331 if (dev->guest_pages == NULL) { 1332 dev->max_guest_pages = 8; 1333 dev->guest_pages = rte_zmalloc_socket(NULL, 1334 dev->max_guest_pages * 1335 sizeof(struct guest_page), 1336 RTE_CACHE_LINE_SIZE, 1337 numa_node); 1338 if (dev->guest_pages == NULL) { 1339 VHOST_LOG_CONFIG(dev->ifname, ERR, 1340 "failed to allocate memory for dev->guest_pages\n"); 1341 goto close_msg_fds; 1342 } 1343 } 1344 1345 dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) + 1346 sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node); 1347 if (dev->mem == NULL) { 1348 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to allocate memory for dev->mem\n"); 1349 goto free_guest_pages; 1350 } 1351 1352 for (i = 0; i < memory->nregions; i++) { 1353 reg = &dev->mem->regions[i]; 1354 1355 reg->guest_phys_addr = memory->regions[i].guest_phys_addr; 1356 reg->guest_user_addr = memory->regions[i].userspace_addr; 1357 reg->size = memory->regions[i].memory_size; 1358 reg->fd = ctx->fds[i]; 1359 1360 /* 1361 * Assign invalid file descriptor value to avoid double 1362 * closing on error path. 1363 */ 1364 ctx->fds[i] = -1; 1365 1366 mmap_offset = memory->regions[i].mmap_offset; 1367 1368 if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) { 1369 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap region %u\n", i); 1370 goto free_mem_table; 1371 } 1372 1373 dev->mem->nregions++; 1374 } 1375 1376 if (dev->async_copy && rte_vfio_is_enabled("vfio")) 1377 async_dma_map(dev, true); 1378 1379 if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0) 1380 goto free_mem_table; 1381 1382 for (i = 0; i < dev->nr_vring; i++) { 1383 struct vhost_virtqueue *vq = dev->virtqueue[i]; 1384 1385 if (!vq) 1386 continue; 1387 1388 if (vq->desc || vq->avail || vq->used) { 1389 /* 1390 * If the memory table got updated, the ring addresses 1391 * need to be translated again as virtual addresses have 1392 * changed. 1393 */ 1394 vring_invalidate(dev, vq); 1395 1396 translate_ring_addresses(&dev, &vq); 1397 *pdev = dev; 1398 } 1399 } 1400 1401 dump_guest_pages(dev); 1402 1403 if (async_notify) { 1404 for (i = 0; i < dev->nr_vring; i++) 1405 dev->notify_ops->vring_state_changed(dev->vid, i, 1); 1406 } 1407 1408 return RTE_VHOST_MSG_RESULT_OK; 1409 1410 free_mem_table: 1411 free_mem_region(dev); 1412 rte_free(dev->mem); 1413 dev->mem = NULL; 1414 1415 free_guest_pages: 1416 rte_free(dev->guest_pages); 1417 dev->guest_pages = NULL; 1418 close_msg_fds: 1419 close_msg_fds(ctx); 1420 return RTE_VHOST_MSG_RESULT_ERR; 1421 } 1422 1423 static bool 1424 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) 1425 { 1426 bool rings_ok; 1427 1428 if (!vq) 1429 return false; 1430 1431 if (vq_is_packed(dev)) 1432 rings_ok = vq->desc_packed && vq->driver_event && 1433 vq->device_event; 1434 else 1435 rings_ok = vq->desc && vq->avail && vq->used; 1436 1437 return rings_ok && 1438 vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD && 1439 vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD && 1440 vq->enabled; 1441 } 1442 1443 #define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u 1444 1445 static int 1446 virtio_is_ready(struct virtio_net *dev) 1447 { 1448 struct vhost_virtqueue *vq; 1449 uint32_t i, nr_vring = dev->nr_vring; 1450 1451 if (dev->flags & VIRTIO_DEV_READY) 1452 return 1; 1453 1454 if (!dev->nr_vring) 1455 return 0; 1456 1457 if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) { 1458 nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY; 1459 1460 if (dev->nr_vring < nr_vring) 1461 return 0; 1462 } 1463 1464 for (i = 0; i < nr_vring; i++) { 1465 vq = dev->virtqueue[i]; 1466 1467 if (!vq_is_ready(dev, vq)) 1468 return 0; 1469 } 1470 1471 /* If supported, ensure the frontend is really done with config */ 1472 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS)) 1473 if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)) 1474 return 0; 1475 1476 dev->flags |= VIRTIO_DEV_READY; 1477 1478 if (!(dev->flags & VIRTIO_DEV_RUNNING)) 1479 VHOST_LOG_CONFIG(dev->ifname, INFO, "virtio is now ready for processing.\n"); 1480 return 1; 1481 } 1482 1483 static void * 1484 inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *fd) 1485 { 1486 void *ptr; 1487 int mfd = -1; 1488 char fname[20] = "/tmp/memfd-XXXXXX"; 1489 1490 *fd = -1; 1491 #ifdef MEMFD_SUPPORTED 1492 mfd = memfd_create(name, MFD_CLOEXEC); 1493 #else 1494 RTE_SET_USED(name); 1495 #endif 1496 if (mfd == -1) { 1497 mfd = mkstemp(fname); 1498 if (mfd == -1) { 1499 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to get inflight buffer fd\n"); 1500 return NULL; 1501 } 1502 1503 unlink(fname); 1504 } 1505 1506 if (ftruncate(mfd, size) == -1) { 1507 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc inflight buffer\n"); 1508 close(mfd); 1509 return NULL; 1510 } 1511 1512 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0); 1513 if (ptr == MAP_FAILED) { 1514 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap inflight buffer\n"); 1515 close(mfd); 1516 return NULL; 1517 } 1518 1519 *fd = mfd; 1520 return ptr; 1521 } 1522 1523 static uint32_t 1524 get_pervq_shm_size_split(uint16_t queue_size) 1525 { 1526 return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) * 1527 queue_size + sizeof(uint64_t) + 1528 sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT); 1529 } 1530 1531 static uint32_t 1532 get_pervq_shm_size_packed(uint16_t queue_size) 1533 { 1534 return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed) 1535 * queue_size + sizeof(uint64_t) + 1536 sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9, 1537 INFLIGHT_ALIGNMENT); 1538 } 1539 1540 static int 1541 vhost_user_get_inflight_fd(struct virtio_net **pdev, 1542 struct vhu_msg_context *ctx, 1543 int main_fd __rte_unused) 1544 { 1545 struct rte_vhost_inflight_info_packed *inflight_packed; 1546 uint64_t pervq_inflight_size, mmap_size; 1547 uint16_t num_queues, queue_size; 1548 struct virtio_net *dev = *pdev; 1549 int fd, i, j; 1550 int numa_node = SOCKET_ID_ANY; 1551 void *addr; 1552 1553 if (ctx->msg.size != sizeof(ctx->msg.payload.inflight)) { 1554 VHOST_LOG_CONFIG(dev->ifname, ERR, 1555 "invalid get_inflight_fd message size is %d\n", 1556 ctx->msg.size); 1557 return RTE_VHOST_MSG_RESULT_ERR; 1558 } 1559 1560 /* 1561 * If VQ 0 has already been allocated, try to allocate on the same 1562 * NUMA node. It can be reallocated later in numa_realloc(). 1563 */ 1564 if (dev->nr_vring > 0) 1565 numa_node = dev->virtqueue[0]->numa_node; 1566 1567 if (dev->inflight_info == NULL) { 1568 dev->inflight_info = rte_zmalloc_socket("inflight_info", 1569 sizeof(struct inflight_mem_info), 0, numa_node); 1570 if (!dev->inflight_info) { 1571 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc dev inflight area\n"); 1572 return RTE_VHOST_MSG_RESULT_ERR; 1573 } 1574 dev->inflight_info->fd = -1; 1575 } 1576 1577 num_queues = ctx->msg.payload.inflight.num_queues; 1578 queue_size = ctx->msg.payload.inflight.queue_size; 1579 1580 VHOST_LOG_CONFIG(dev->ifname, INFO, 1581 "get_inflight_fd num_queues: %u\n", 1582 ctx->msg.payload.inflight.num_queues); 1583 VHOST_LOG_CONFIG(dev->ifname, INFO, 1584 "get_inflight_fd queue_size: %u\n", 1585 ctx->msg.payload.inflight.queue_size); 1586 1587 if (vq_is_packed(dev)) 1588 pervq_inflight_size = get_pervq_shm_size_packed(queue_size); 1589 else 1590 pervq_inflight_size = get_pervq_shm_size_split(queue_size); 1591 1592 mmap_size = num_queues * pervq_inflight_size; 1593 addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd); 1594 if (!addr) { 1595 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc vhost inflight area\n"); 1596 ctx->msg.payload.inflight.mmap_size = 0; 1597 return RTE_VHOST_MSG_RESULT_ERR; 1598 } 1599 memset(addr, 0, mmap_size); 1600 1601 if (dev->inflight_info->addr) { 1602 munmap(dev->inflight_info->addr, dev->inflight_info->size); 1603 dev->inflight_info->addr = NULL; 1604 } 1605 1606 if (dev->inflight_info->fd >= 0) { 1607 close(dev->inflight_info->fd); 1608 dev->inflight_info->fd = -1; 1609 } 1610 1611 dev->inflight_info->addr = addr; 1612 dev->inflight_info->size = ctx->msg.payload.inflight.mmap_size = mmap_size; 1613 dev->inflight_info->fd = ctx->fds[0] = fd; 1614 ctx->msg.payload.inflight.mmap_offset = 0; 1615 ctx->fd_num = 1; 1616 1617 if (vq_is_packed(dev)) { 1618 for (i = 0; i < num_queues; i++) { 1619 inflight_packed = 1620 (struct rte_vhost_inflight_info_packed *)addr; 1621 inflight_packed->used_wrap_counter = 1; 1622 inflight_packed->old_used_wrap_counter = 1; 1623 for (j = 0; j < queue_size; j++) 1624 inflight_packed->desc[j].next = j + 1; 1625 addr = (void *)((char *)addr + pervq_inflight_size); 1626 } 1627 } 1628 1629 VHOST_LOG_CONFIG(dev->ifname, INFO, 1630 "send inflight mmap_size: %"PRIu64"\n", 1631 ctx->msg.payload.inflight.mmap_size); 1632 VHOST_LOG_CONFIG(dev->ifname, INFO, 1633 "send inflight mmap_offset: %"PRIu64"\n", 1634 ctx->msg.payload.inflight.mmap_offset); 1635 VHOST_LOG_CONFIG(dev->ifname, INFO, 1636 "send inflight fd: %d\n", ctx->fds[0]); 1637 1638 return RTE_VHOST_MSG_RESULT_REPLY; 1639 } 1640 1641 static int 1642 vhost_user_set_inflight_fd(struct virtio_net **pdev, 1643 struct vhu_msg_context *ctx, 1644 int main_fd __rte_unused) 1645 { 1646 uint64_t mmap_size, mmap_offset; 1647 uint16_t num_queues, queue_size; 1648 struct virtio_net *dev = *pdev; 1649 uint32_t pervq_inflight_size; 1650 struct vhost_virtqueue *vq; 1651 void *addr; 1652 int fd, i; 1653 int numa_node = SOCKET_ID_ANY; 1654 1655 if (validate_msg_fds(dev, ctx, 1) != 0) 1656 return RTE_VHOST_MSG_RESULT_ERR; 1657 1658 fd = ctx->fds[0]; 1659 if (ctx->msg.size != sizeof(ctx->msg.payload.inflight) || fd < 0) { 1660 VHOST_LOG_CONFIG(dev->ifname, ERR, 1661 "invalid set_inflight_fd message size is %d,fd is %d\n", 1662 ctx->msg.size, fd); 1663 return RTE_VHOST_MSG_RESULT_ERR; 1664 } 1665 1666 mmap_size = ctx->msg.payload.inflight.mmap_size; 1667 mmap_offset = ctx->msg.payload.inflight.mmap_offset; 1668 num_queues = ctx->msg.payload.inflight.num_queues; 1669 queue_size = ctx->msg.payload.inflight.queue_size; 1670 1671 if (vq_is_packed(dev)) 1672 pervq_inflight_size = get_pervq_shm_size_packed(queue_size); 1673 else 1674 pervq_inflight_size = get_pervq_shm_size_split(queue_size); 1675 1676 VHOST_LOG_CONFIG(dev->ifname, INFO, "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size); 1677 VHOST_LOG_CONFIG(dev->ifname, INFO, 1678 "set_inflight_fd mmap_offset: %"PRIu64"\n", 1679 mmap_offset); 1680 VHOST_LOG_CONFIG(dev->ifname, INFO, 1681 "set_inflight_fd num_queues: %u\n", 1682 num_queues); 1683 VHOST_LOG_CONFIG(dev->ifname, INFO, 1684 "set_inflight_fd queue_size: %u\n", 1685 queue_size); 1686 VHOST_LOG_CONFIG(dev->ifname, INFO, 1687 "set_inflight_fd fd: %d\n", 1688 fd); 1689 VHOST_LOG_CONFIG(dev->ifname, INFO, 1690 "set_inflight_fd pervq_inflight_size: %d\n", 1691 pervq_inflight_size); 1692 1693 /* 1694 * If VQ 0 has already been allocated, try to allocate on the same 1695 * NUMA node. It can be reallocated later in numa_realloc(). 1696 */ 1697 if (dev->nr_vring > 0) 1698 numa_node = dev->virtqueue[0]->numa_node; 1699 1700 if (!dev->inflight_info) { 1701 dev->inflight_info = rte_zmalloc_socket("inflight_info", 1702 sizeof(struct inflight_mem_info), 0, numa_node); 1703 if (dev->inflight_info == NULL) { 1704 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc dev inflight area\n"); 1705 return RTE_VHOST_MSG_RESULT_ERR; 1706 } 1707 dev->inflight_info->fd = -1; 1708 } 1709 1710 if (dev->inflight_info->addr) { 1711 munmap(dev->inflight_info->addr, dev->inflight_info->size); 1712 dev->inflight_info->addr = NULL; 1713 } 1714 1715 addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 1716 fd, mmap_offset); 1717 if (addr == MAP_FAILED) { 1718 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap share memory.\n"); 1719 return RTE_VHOST_MSG_RESULT_ERR; 1720 } 1721 1722 if (dev->inflight_info->fd >= 0) { 1723 close(dev->inflight_info->fd); 1724 dev->inflight_info->fd = -1; 1725 } 1726 1727 dev->inflight_info->fd = fd; 1728 dev->inflight_info->addr = addr; 1729 dev->inflight_info->size = mmap_size; 1730 1731 for (i = 0; i < num_queues; i++) { 1732 vq = dev->virtqueue[i]; 1733 if (!vq) 1734 continue; 1735 1736 if (vq_is_packed(dev)) { 1737 vq->inflight_packed = addr; 1738 vq->inflight_packed->desc_num = queue_size; 1739 } else { 1740 vq->inflight_split = addr; 1741 vq->inflight_split->desc_num = queue_size; 1742 } 1743 addr = (void *)((char *)addr + pervq_inflight_size); 1744 } 1745 1746 return RTE_VHOST_MSG_RESULT_OK; 1747 } 1748 1749 static int 1750 vhost_user_set_vring_call(struct virtio_net **pdev, 1751 struct vhu_msg_context *ctx, 1752 int main_fd __rte_unused) 1753 { 1754 struct virtio_net *dev = *pdev; 1755 struct vhost_vring_file file; 1756 struct vhost_virtqueue *vq; 1757 int expected_fds; 1758 1759 expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; 1760 if (validate_msg_fds(dev, ctx, expected_fds) != 0) 1761 return RTE_VHOST_MSG_RESULT_ERR; 1762 1763 file.index = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; 1764 if (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) 1765 file.fd = VIRTIO_INVALID_EVENTFD; 1766 else 1767 file.fd = ctx->fds[0]; 1768 VHOST_LOG_CONFIG(dev->ifname, INFO, 1769 "vring call idx:%d file:%d\n", 1770 file.index, file.fd); 1771 1772 vq = dev->virtqueue[file.index]; 1773 1774 if (vq->ready) { 1775 vq->ready = false; 1776 vhost_user_notify_queue_state(dev, vq, 0); 1777 } 1778 1779 if (vq->callfd >= 0) 1780 close(vq->callfd); 1781 1782 vq->callfd = file.fd; 1783 1784 return RTE_VHOST_MSG_RESULT_OK; 1785 } 1786 1787 static int vhost_user_set_vring_err(struct virtio_net **pdev, 1788 struct vhu_msg_context *ctx, 1789 int main_fd __rte_unused) 1790 { 1791 struct virtio_net *dev = *pdev; 1792 int expected_fds; 1793 1794 expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; 1795 if (validate_msg_fds(dev, ctx, expected_fds) != 0) 1796 return RTE_VHOST_MSG_RESULT_ERR; 1797 1798 if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) 1799 close(ctx->fds[0]); 1800 VHOST_LOG_CONFIG(dev->ifname, INFO, "not implemented\n"); 1801 1802 return RTE_VHOST_MSG_RESULT_OK; 1803 } 1804 1805 static int 1806 resubmit_desc_compare(const void *a, const void *b) 1807 { 1808 const struct rte_vhost_resubmit_desc *desc0 = a; 1809 const struct rte_vhost_resubmit_desc *desc1 = b; 1810 1811 if (desc1->counter > desc0->counter) 1812 return 1; 1813 1814 return -1; 1815 } 1816 1817 static int 1818 vhost_check_queue_inflights_split(struct virtio_net *dev, 1819 struct vhost_virtqueue *vq) 1820 { 1821 uint16_t i; 1822 uint16_t resubmit_num = 0, last_io, num; 1823 struct vring_used *used = vq->used; 1824 struct rte_vhost_resubmit_info *resubmit; 1825 struct rte_vhost_inflight_info_split *inflight_split; 1826 1827 if (!(dev->protocol_features & 1828 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) 1829 return RTE_VHOST_MSG_RESULT_OK; 1830 1831 /* The frontend may still not support the inflight feature 1832 * although we negotiate the protocol feature. 1833 */ 1834 if ((!vq->inflight_split)) 1835 return RTE_VHOST_MSG_RESULT_OK; 1836 1837 if (!vq->inflight_split->version) { 1838 vq->inflight_split->version = INFLIGHT_VERSION; 1839 return RTE_VHOST_MSG_RESULT_OK; 1840 } 1841 1842 if (vq->resubmit_inflight) 1843 return RTE_VHOST_MSG_RESULT_OK; 1844 1845 inflight_split = vq->inflight_split; 1846 vq->global_counter = 0; 1847 last_io = inflight_split->last_inflight_io; 1848 1849 if (inflight_split->used_idx != used->idx) { 1850 inflight_split->desc[last_io].inflight = 0; 1851 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 1852 inflight_split->used_idx = used->idx; 1853 } 1854 1855 for (i = 0; i < inflight_split->desc_num; i++) { 1856 if (inflight_split->desc[i].inflight == 1) 1857 resubmit_num++; 1858 } 1859 1860 vq->last_avail_idx += resubmit_num; 1861 1862 if (resubmit_num) { 1863 resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info), 1864 0, vq->numa_node); 1865 if (!resubmit) { 1866 VHOST_LOG_CONFIG(dev->ifname, ERR, 1867 "failed to allocate memory for resubmit info.\n"); 1868 return RTE_VHOST_MSG_RESULT_ERR; 1869 } 1870 1871 resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list", 1872 resubmit_num * sizeof(struct rte_vhost_resubmit_desc), 1873 0, vq->numa_node); 1874 if (!resubmit->resubmit_list) { 1875 VHOST_LOG_CONFIG(dev->ifname, ERR, 1876 "failed to allocate memory for inflight desc.\n"); 1877 rte_free(resubmit); 1878 return RTE_VHOST_MSG_RESULT_ERR; 1879 } 1880 1881 num = 0; 1882 for (i = 0; i < vq->inflight_split->desc_num; i++) { 1883 if (vq->inflight_split->desc[i].inflight == 1) { 1884 resubmit->resubmit_list[num].index = i; 1885 resubmit->resubmit_list[num].counter = 1886 inflight_split->desc[i].counter; 1887 num++; 1888 } 1889 } 1890 resubmit->resubmit_num = num; 1891 1892 if (resubmit->resubmit_num > 1) 1893 qsort(resubmit->resubmit_list, resubmit->resubmit_num, 1894 sizeof(struct rte_vhost_resubmit_desc), 1895 resubmit_desc_compare); 1896 1897 vq->global_counter = resubmit->resubmit_list[0].counter + 1; 1898 vq->resubmit_inflight = resubmit; 1899 } 1900 1901 return RTE_VHOST_MSG_RESULT_OK; 1902 } 1903 1904 static int 1905 vhost_check_queue_inflights_packed(struct virtio_net *dev, 1906 struct vhost_virtqueue *vq) 1907 { 1908 uint16_t i; 1909 uint16_t resubmit_num = 0, old_used_idx, num; 1910 struct rte_vhost_resubmit_info *resubmit; 1911 struct rte_vhost_inflight_info_packed *inflight_packed; 1912 1913 if (!(dev->protocol_features & 1914 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) 1915 return RTE_VHOST_MSG_RESULT_OK; 1916 1917 /* The frontend may still not support the inflight feature 1918 * although we negotiate the protocol feature. 1919 */ 1920 if ((!vq->inflight_packed)) 1921 return RTE_VHOST_MSG_RESULT_OK; 1922 1923 if (!vq->inflight_packed->version) { 1924 vq->inflight_packed->version = INFLIGHT_VERSION; 1925 return RTE_VHOST_MSG_RESULT_OK; 1926 } 1927 1928 if (vq->resubmit_inflight) 1929 return RTE_VHOST_MSG_RESULT_OK; 1930 1931 inflight_packed = vq->inflight_packed; 1932 vq->global_counter = 0; 1933 old_used_idx = inflight_packed->old_used_idx; 1934 1935 if (inflight_packed->used_idx != old_used_idx) { 1936 if (inflight_packed->desc[old_used_idx].inflight == 0) { 1937 inflight_packed->old_used_idx = 1938 inflight_packed->used_idx; 1939 inflight_packed->old_used_wrap_counter = 1940 inflight_packed->used_wrap_counter; 1941 inflight_packed->old_free_head = 1942 inflight_packed->free_head; 1943 } else { 1944 inflight_packed->used_idx = 1945 inflight_packed->old_used_idx; 1946 inflight_packed->used_wrap_counter = 1947 inflight_packed->old_used_wrap_counter; 1948 inflight_packed->free_head = 1949 inflight_packed->old_free_head; 1950 } 1951 } 1952 1953 for (i = 0; i < inflight_packed->desc_num; i++) { 1954 if (inflight_packed->desc[i].inflight == 1) 1955 resubmit_num++; 1956 } 1957 1958 if (resubmit_num) { 1959 resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info), 1960 0, vq->numa_node); 1961 if (resubmit == NULL) { 1962 VHOST_LOG_CONFIG(dev->ifname, ERR, 1963 "failed to allocate memory for resubmit info.\n"); 1964 return RTE_VHOST_MSG_RESULT_ERR; 1965 } 1966 1967 resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list", 1968 resubmit_num * sizeof(struct rte_vhost_resubmit_desc), 1969 0, vq->numa_node); 1970 if (resubmit->resubmit_list == NULL) { 1971 VHOST_LOG_CONFIG(dev->ifname, ERR, 1972 "failed to allocate memory for resubmit desc.\n"); 1973 rte_free(resubmit); 1974 return RTE_VHOST_MSG_RESULT_ERR; 1975 } 1976 1977 num = 0; 1978 for (i = 0; i < inflight_packed->desc_num; i++) { 1979 if (vq->inflight_packed->desc[i].inflight == 1) { 1980 resubmit->resubmit_list[num].index = i; 1981 resubmit->resubmit_list[num].counter = 1982 inflight_packed->desc[i].counter; 1983 num++; 1984 } 1985 } 1986 resubmit->resubmit_num = num; 1987 1988 if (resubmit->resubmit_num > 1) 1989 qsort(resubmit->resubmit_list, resubmit->resubmit_num, 1990 sizeof(struct rte_vhost_resubmit_desc), 1991 resubmit_desc_compare); 1992 1993 vq->global_counter = resubmit->resubmit_list[0].counter + 1; 1994 vq->resubmit_inflight = resubmit; 1995 } 1996 1997 return RTE_VHOST_MSG_RESULT_OK; 1998 } 1999 2000 static int 2001 vhost_user_set_vring_kick(struct virtio_net **pdev, 2002 struct vhu_msg_context *ctx, 2003 int main_fd __rte_unused) 2004 { 2005 struct virtio_net *dev = *pdev; 2006 struct vhost_vring_file file; 2007 struct vhost_virtqueue *vq; 2008 int expected_fds; 2009 2010 expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; 2011 if (validate_msg_fds(dev, ctx, expected_fds) != 0) 2012 return RTE_VHOST_MSG_RESULT_ERR; 2013 2014 file.index = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; 2015 if (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) 2016 file.fd = VIRTIO_INVALID_EVENTFD; 2017 else 2018 file.fd = ctx->fds[0]; 2019 VHOST_LOG_CONFIG(dev->ifname, INFO, 2020 "vring kick idx:%d file:%d\n", 2021 file.index, file.fd); 2022 2023 /* Interpret ring addresses only when ring is started. */ 2024 vq = dev->virtqueue[file.index]; 2025 translate_ring_addresses(&dev, &vq); 2026 *pdev = dev; 2027 2028 /* 2029 * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated, 2030 * the ring starts already enabled. Otherwise, it is enabled via 2031 * the SET_VRING_ENABLE message. 2032 */ 2033 if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { 2034 vq->enabled = true; 2035 } 2036 2037 if (vq->ready) { 2038 vq->ready = false; 2039 vhost_user_notify_queue_state(dev, vq, 0); 2040 } 2041 2042 if (vq->kickfd >= 0) 2043 close(vq->kickfd); 2044 vq->kickfd = file.fd; 2045 2046 if (vq_is_packed(dev)) { 2047 if (vhost_check_queue_inflights_packed(dev, vq)) { 2048 VHOST_LOG_CONFIG(dev->ifname, ERR, 2049 "failed to inflights for vq: %d\n", 2050 file.index); 2051 return RTE_VHOST_MSG_RESULT_ERR; 2052 } 2053 } else { 2054 if (vhost_check_queue_inflights_split(dev, vq)) { 2055 VHOST_LOG_CONFIG(dev->ifname, ERR, 2056 "failed to inflights for vq: %d\n", 2057 file.index); 2058 return RTE_VHOST_MSG_RESULT_ERR; 2059 } 2060 } 2061 2062 return RTE_VHOST_MSG_RESULT_OK; 2063 } 2064 2065 /* 2066 * when virtio is stopped, qemu will send us the GET_VRING_BASE message. 2067 */ 2068 static int 2069 vhost_user_get_vring_base(struct virtio_net **pdev, 2070 struct vhu_msg_context *ctx, 2071 int main_fd __rte_unused) 2072 { 2073 struct virtio_net *dev = *pdev; 2074 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; 2075 uint64_t val; 2076 2077 /* We have to stop the queue (virtio) if it is running. */ 2078 vhost_destroy_device_notify(dev); 2079 2080 dev->flags &= ~VIRTIO_DEV_READY; 2081 dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; 2082 2083 /* Here we are safe to get the indexes */ 2084 if (vq_is_packed(dev)) { 2085 /* 2086 * Bit[0:14]: avail index 2087 * Bit[15]: avail wrap counter 2088 */ 2089 val = vq->last_avail_idx & 0x7fff; 2090 val |= vq->avail_wrap_counter << 15; 2091 ctx->msg.payload.state.num = val; 2092 } else { 2093 ctx->msg.payload.state.num = vq->last_avail_idx; 2094 } 2095 2096 VHOST_LOG_CONFIG(dev->ifname, INFO, 2097 "vring base idx:%d file:%d\n", 2098 ctx->msg.payload.state.index, ctx->msg.payload.state.num); 2099 /* 2100 * Based on current qemu vhost-user implementation, this message is 2101 * sent and only sent in vhost_vring_stop. 2102 * TODO: cleanup the vring, it isn't usable since here. 2103 */ 2104 if (vq->kickfd >= 0) 2105 close(vq->kickfd); 2106 2107 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; 2108 2109 if (vq->callfd >= 0) 2110 close(vq->callfd); 2111 2112 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; 2113 2114 vq->signalled_used_valid = false; 2115 2116 if (vq_is_packed(dev)) { 2117 rte_free(vq->shadow_used_packed); 2118 vq->shadow_used_packed = NULL; 2119 } else { 2120 rte_free(vq->shadow_used_split); 2121 vq->shadow_used_split = NULL; 2122 } 2123 2124 rte_free(vq->batch_copy_elems); 2125 vq->batch_copy_elems = NULL; 2126 2127 rte_free(vq->log_cache); 2128 vq->log_cache = NULL; 2129 2130 ctx->msg.size = sizeof(ctx->msg.payload.state); 2131 ctx->fd_num = 0; 2132 2133 vhost_user_iotlb_flush_all(vq); 2134 2135 vring_invalidate(dev, vq); 2136 2137 return RTE_VHOST_MSG_RESULT_REPLY; 2138 } 2139 2140 /* 2141 * when virtio queues are ready to work, qemu will send us to 2142 * enable the virtio queue pair. 2143 */ 2144 static int 2145 vhost_user_set_vring_enable(struct virtio_net **pdev, 2146 struct vhu_msg_context *ctx, 2147 int main_fd __rte_unused) 2148 { 2149 struct virtio_net *dev = *pdev; 2150 bool enable = !!ctx->msg.payload.state.num; 2151 int index = (int)ctx->msg.payload.state.index; 2152 2153 VHOST_LOG_CONFIG(dev->ifname, INFO, 2154 "set queue enable: %d to qp idx: %d\n", 2155 enable, index); 2156 2157 if (enable && dev->virtqueue[index]->async) { 2158 if (dev->virtqueue[index]->async->pkts_inflight_n) { 2159 VHOST_LOG_CONFIG(dev->ifname, ERR, 2160 "failed to enable vring. Inflight packets must be completed first\n"); 2161 return RTE_VHOST_MSG_RESULT_ERR; 2162 } 2163 } 2164 2165 dev->virtqueue[index]->enabled = enable; 2166 2167 return RTE_VHOST_MSG_RESULT_OK; 2168 } 2169 2170 static int 2171 vhost_user_get_protocol_features(struct virtio_net **pdev, 2172 struct vhu_msg_context *ctx, 2173 int main_fd __rte_unused) 2174 { 2175 struct virtio_net *dev = *pdev; 2176 uint64_t features, protocol_features; 2177 2178 rte_vhost_driver_get_features(dev->ifname, &features); 2179 rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features); 2180 2181 ctx->msg.payload.u64 = protocol_features; 2182 ctx->msg.size = sizeof(ctx->msg.payload.u64); 2183 ctx->fd_num = 0; 2184 2185 return RTE_VHOST_MSG_RESULT_REPLY; 2186 } 2187 2188 static int 2189 vhost_user_set_protocol_features(struct virtio_net **pdev, 2190 struct vhu_msg_context *ctx, 2191 int main_fd __rte_unused) 2192 { 2193 struct virtio_net *dev = *pdev; 2194 uint64_t protocol_features = ctx->msg.payload.u64; 2195 uint64_t slave_protocol_features = 0; 2196 2197 rte_vhost_driver_get_protocol_features(dev->ifname, 2198 &slave_protocol_features); 2199 if (protocol_features & ~slave_protocol_features) { 2200 VHOST_LOG_CONFIG(dev->ifname, ERR, "received invalid protocol features.\n"); 2201 return RTE_VHOST_MSG_RESULT_ERR; 2202 } 2203 2204 dev->protocol_features = protocol_features; 2205 VHOST_LOG_CONFIG(dev->ifname, INFO, 2206 "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n", 2207 dev->protocol_features); 2208 2209 return RTE_VHOST_MSG_RESULT_OK; 2210 } 2211 2212 static int 2213 vhost_user_set_log_base(struct virtio_net **pdev, 2214 struct vhu_msg_context *ctx, 2215 int main_fd __rte_unused) 2216 { 2217 struct virtio_net *dev = *pdev; 2218 int fd = ctx->fds[0]; 2219 uint64_t size, off; 2220 void *addr; 2221 uint32_t i; 2222 2223 if (validate_msg_fds(dev, ctx, 1) != 0) 2224 return RTE_VHOST_MSG_RESULT_ERR; 2225 2226 if (fd < 0) { 2227 VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid log fd: %d\n", fd); 2228 return RTE_VHOST_MSG_RESULT_ERR; 2229 } 2230 2231 if (ctx->msg.size != sizeof(VhostUserLog)) { 2232 VHOST_LOG_CONFIG(dev->ifname, ERR, 2233 "invalid log base msg size: %"PRId32" != %d\n", 2234 ctx->msg.size, (int)sizeof(VhostUserLog)); 2235 goto close_msg_fds; 2236 } 2237 2238 size = ctx->msg.payload.log.mmap_size; 2239 off = ctx->msg.payload.log.mmap_offset; 2240 2241 /* Check for mmap size and offset overflow. */ 2242 if (off >= -size) { 2243 VHOST_LOG_CONFIG(dev->ifname, ERR, 2244 "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n", 2245 off, size); 2246 goto close_msg_fds; 2247 } 2248 2249 VHOST_LOG_CONFIG(dev->ifname, INFO, 2250 "log mmap size: %"PRId64", offset: %"PRId64"\n", 2251 size, off); 2252 2253 /* 2254 * mmap from 0 to workaround a hugepage mmap bug: mmap will 2255 * fail when offset is not page size aligned. 2256 */ 2257 addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 2258 close(fd); 2259 if (addr == MAP_FAILED) { 2260 VHOST_LOG_CONFIG(dev->ifname, ERR, "mmap log base failed!\n"); 2261 return RTE_VHOST_MSG_RESULT_ERR; 2262 } 2263 2264 /* 2265 * Free previously mapped log memory on occasionally 2266 * multiple VHOST_USER_SET_LOG_BASE. 2267 */ 2268 if (dev->log_addr) { 2269 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); 2270 } 2271 dev->log_addr = (uint64_t)(uintptr_t)addr; 2272 dev->log_base = dev->log_addr + off; 2273 dev->log_size = size; 2274 2275 for (i = 0; i < dev->nr_vring; i++) { 2276 struct vhost_virtqueue *vq = dev->virtqueue[i]; 2277 2278 rte_free(vq->log_cache); 2279 vq->log_cache = NULL; 2280 vq->log_cache_nb_elem = 0; 2281 vq->log_cache = rte_malloc_socket("vq log cache", 2282 sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR, 2283 0, vq->numa_node); 2284 /* 2285 * If log cache alloc fail, don't fail migration, but no 2286 * caching will be done, which will impact performance 2287 */ 2288 if (!vq->log_cache) 2289 VHOST_LOG_CONFIG(dev->ifname, ERR, 2290 "failed to allocate VQ logging cache\n"); 2291 } 2292 2293 /* 2294 * The spec is not clear about it (yet), but QEMU doesn't expect 2295 * any payload in the reply. 2296 */ 2297 ctx->msg.size = 0; 2298 ctx->fd_num = 0; 2299 2300 return RTE_VHOST_MSG_RESULT_REPLY; 2301 2302 close_msg_fds: 2303 close_msg_fds(ctx); 2304 return RTE_VHOST_MSG_RESULT_ERR; 2305 } 2306 2307 static int vhost_user_set_log_fd(struct virtio_net **pdev, 2308 struct vhu_msg_context *ctx, 2309 int main_fd __rte_unused) 2310 { 2311 struct virtio_net *dev = *pdev; 2312 2313 if (validate_msg_fds(dev, ctx, 1) != 0) 2314 return RTE_VHOST_MSG_RESULT_ERR; 2315 2316 close(ctx->fds[0]); 2317 VHOST_LOG_CONFIG(dev->ifname, INFO, "not implemented.\n"); 2318 2319 return RTE_VHOST_MSG_RESULT_OK; 2320 } 2321 2322 /* 2323 * An rarp packet is constructed and broadcasted to notify switches about 2324 * the new location of the migrated VM, so that packets from outside will 2325 * not be lost after migration. 2326 * 2327 * However, we don't actually "send" a rarp packet here, instead, we set 2328 * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it. 2329 */ 2330 static int 2331 vhost_user_send_rarp(struct virtio_net **pdev, 2332 struct vhu_msg_context *ctx, 2333 int main_fd __rte_unused) 2334 { 2335 struct virtio_net *dev = *pdev; 2336 uint8_t *mac = (uint8_t *)&ctx->msg.payload.u64; 2337 struct rte_vdpa_device *vdpa_dev; 2338 2339 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 2340 "MAC: " RTE_ETHER_ADDR_PRT_FMT "\n", 2341 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2342 memcpy(dev->mac.addr_bytes, mac, 6); 2343 2344 /* 2345 * Set the flag to inject a RARP broadcast packet at 2346 * rte_vhost_dequeue_burst(). 2347 * 2348 * __ATOMIC_RELEASE ordering is for making sure the mac is 2349 * copied before the flag is set. 2350 */ 2351 __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE); 2352 vdpa_dev = dev->vdpa_dev; 2353 if (vdpa_dev && vdpa_dev->ops->migration_done) 2354 vdpa_dev->ops->migration_done(dev->vid); 2355 2356 return RTE_VHOST_MSG_RESULT_OK; 2357 } 2358 2359 static int 2360 vhost_user_net_set_mtu(struct virtio_net **pdev, 2361 struct vhu_msg_context *ctx, 2362 int main_fd __rte_unused) 2363 { 2364 struct virtio_net *dev = *pdev; 2365 2366 if (ctx->msg.payload.u64 < VIRTIO_MIN_MTU || 2367 ctx->msg.payload.u64 > VIRTIO_MAX_MTU) { 2368 VHOST_LOG_CONFIG(dev->ifname, ERR, 2369 "invalid MTU size (%"PRIu64")\n", 2370 ctx->msg.payload.u64); 2371 2372 return RTE_VHOST_MSG_RESULT_ERR; 2373 } 2374 2375 dev->mtu = ctx->msg.payload.u64; 2376 2377 return RTE_VHOST_MSG_RESULT_OK; 2378 } 2379 2380 static int 2381 vhost_user_set_req_fd(struct virtio_net **pdev, 2382 struct vhu_msg_context *ctx, 2383 int main_fd __rte_unused) 2384 { 2385 struct virtio_net *dev = *pdev; 2386 int fd = ctx->fds[0]; 2387 2388 if (validate_msg_fds(dev, ctx, 1) != 0) 2389 return RTE_VHOST_MSG_RESULT_ERR; 2390 2391 if (fd < 0) { 2392 VHOST_LOG_CONFIG(dev->ifname, ERR, 2393 "invalid file descriptor for slave channel (%d)\n", fd); 2394 return RTE_VHOST_MSG_RESULT_ERR; 2395 } 2396 2397 if (dev->slave_req_fd >= 0) 2398 close(dev->slave_req_fd); 2399 2400 dev->slave_req_fd = fd; 2401 2402 return RTE_VHOST_MSG_RESULT_OK; 2403 } 2404 2405 static int 2406 is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) 2407 { 2408 struct vhost_vring_addr *ra; 2409 uint64_t start, end, len; 2410 2411 start = imsg->iova; 2412 end = start + imsg->size; 2413 2414 ra = &vq->ring_addrs; 2415 len = sizeof(struct vring_desc) * vq->size; 2416 if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start) 2417 return 1; 2418 2419 len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size; 2420 if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start) 2421 return 1; 2422 2423 len = sizeof(struct vring_used) + 2424 sizeof(struct vring_used_elem) * vq->size; 2425 if (ra->used_user_addr < end && (ra->used_user_addr + len) > start) 2426 return 1; 2427 2428 if (ra->flags & (1 << VHOST_VRING_F_LOG)) { 2429 len = sizeof(uint64_t); 2430 if (ra->log_guest_addr < end && 2431 (ra->log_guest_addr + len) > start) 2432 return 1; 2433 } 2434 2435 return 0; 2436 } 2437 2438 static int 2439 is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) 2440 { 2441 struct vhost_vring_addr *ra; 2442 uint64_t start, end, len; 2443 2444 start = imsg->iova; 2445 end = start + imsg->size; 2446 2447 ra = &vq->ring_addrs; 2448 len = sizeof(struct vring_packed_desc) * vq->size; 2449 if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start) 2450 return 1; 2451 2452 len = sizeof(struct vring_packed_desc_event); 2453 if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start) 2454 return 1; 2455 2456 len = sizeof(struct vring_packed_desc_event); 2457 if (ra->used_user_addr < end && (ra->used_user_addr + len) > start) 2458 return 1; 2459 2460 if (ra->flags & (1 << VHOST_VRING_F_LOG)) { 2461 len = sizeof(uint64_t); 2462 if (ra->log_guest_addr < end && 2463 (ra->log_guest_addr + len) > start) 2464 return 1; 2465 } 2466 2467 return 0; 2468 } 2469 2470 static int is_vring_iotlb(struct virtio_net *dev, 2471 struct vhost_virtqueue *vq, 2472 struct vhost_iotlb_msg *imsg) 2473 { 2474 if (vq_is_packed(dev)) 2475 return is_vring_iotlb_packed(vq, imsg); 2476 else 2477 return is_vring_iotlb_split(vq, imsg); 2478 } 2479 2480 static int 2481 vhost_user_get_config(struct virtio_net **pdev, 2482 struct vhu_msg_context *ctx, 2483 int main_fd __rte_unused) 2484 { 2485 struct virtio_net *dev = *pdev; 2486 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; 2487 int ret = 0; 2488 2489 if (validate_msg_fds(dev, ctx, 0) != 0) 2490 return RTE_VHOST_MSG_RESULT_ERR; 2491 2492 if (!vdpa_dev) { 2493 VHOST_LOG_CONFIG(dev->ifname, ERR, "is not vDPA device!\n"); 2494 return RTE_VHOST_MSG_RESULT_ERR; 2495 } 2496 2497 if (vdpa_dev->ops->get_config) { 2498 ret = vdpa_dev->ops->get_config(dev->vid, 2499 ctx->msg.payload.cfg.region, 2500 ctx->msg.payload.cfg.size); 2501 if (ret != 0) { 2502 ctx->msg.size = 0; 2503 VHOST_LOG_CONFIG(dev->ifname, ERR, "get_config() return error!\n"); 2504 } 2505 } else { 2506 VHOST_LOG_CONFIG(dev->ifname, ERR, "get_config() not supported!\n"); 2507 } 2508 2509 return RTE_VHOST_MSG_RESULT_REPLY; 2510 } 2511 2512 static int 2513 vhost_user_set_config(struct virtio_net **pdev, 2514 struct vhu_msg_context *ctx, 2515 int main_fd __rte_unused) 2516 { 2517 struct virtio_net *dev = *pdev; 2518 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; 2519 int ret = 0; 2520 2521 if (validate_msg_fds(dev, ctx, 0) != 0) 2522 return RTE_VHOST_MSG_RESULT_ERR; 2523 2524 if (ctx->msg.payload.cfg.size > VHOST_USER_MAX_CONFIG_SIZE) { 2525 VHOST_LOG_CONFIG(dev->ifname, ERR, 2526 "vhost_user_config size: %"PRIu32", should not be larger than %d\n", 2527 ctx->msg.payload.cfg.size, VHOST_USER_MAX_CONFIG_SIZE); 2528 goto out; 2529 } 2530 2531 if (!vdpa_dev) { 2532 VHOST_LOG_CONFIG(dev->ifname, ERR, "is not vDPA device!\n"); 2533 goto out; 2534 } 2535 2536 if (vdpa_dev->ops->set_config) { 2537 ret = vdpa_dev->ops->set_config(dev->vid, 2538 ctx->msg.payload.cfg.region, 2539 ctx->msg.payload.cfg.offset, 2540 ctx->msg.payload.cfg.size, 2541 ctx->msg.payload.cfg.flags); 2542 if (ret) 2543 VHOST_LOG_CONFIG(dev->ifname, ERR, "set_config() return error!\n"); 2544 } else { 2545 VHOST_LOG_CONFIG(dev->ifname, ERR, "set_config() not supported!\n"); 2546 } 2547 2548 return RTE_VHOST_MSG_RESULT_OK; 2549 2550 out: 2551 return RTE_VHOST_MSG_RESULT_ERR; 2552 } 2553 2554 static int 2555 vhost_user_iotlb_msg(struct virtio_net **pdev, 2556 struct vhu_msg_context *ctx, 2557 int main_fd __rte_unused) 2558 { 2559 struct virtio_net *dev = *pdev; 2560 struct vhost_iotlb_msg *imsg = &ctx->msg.payload.iotlb; 2561 uint16_t i; 2562 uint64_t vva, len; 2563 2564 switch (imsg->type) { 2565 case VHOST_IOTLB_UPDATE: 2566 len = imsg->size; 2567 vva = qva_to_vva(dev, imsg->uaddr, &len); 2568 if (!vva) 2569 return RTE_VHOST_MSG_RESULT_ERR; 2570 2571 for (i = 0; i < dev->nr_vring; i++) { 2572 struct vhost_virtqueue *vq = dev->virtqueue[i]; 2573 2574 if (!vq) 2575 continue; 2576 2577 vhost_user_iotlb_cache_insert(dev, vq, imsg->iova, vva, 2578 len, imsg->perm); 2579 2580 if (is_vring_iotlb(dev, vq, imsg)) { 2581 rte_spinlock_lock(&vq->access_lock); 2582 translate_ring_addresses(&dev, &vq); 2583 *pdev = dev; 2584 rte_spinlock_unlock(&vq->access_lock); 2585 } 2586 } 2587 break; 2588 case VHOST_IOTLB_INVALIDATE: 2589 for (i = 0; i < dev->nr_vring; i++) { 2590 struct vhost_virtqueue *vq = dev->virtqueue[i]; 2591 2592 if (!vq) 2593 continue; 2594 2595 vhost_user_iotlb_cache_remove(vq, imsg->iova, 2596 imsg->size); 2597 2598 if (is_vring_iotlb(dev, vq, imsg)) { 2599 rte_spinlock_lock(&vq->access_lock); 2600 vring_invalidate(dev, vq); 2601 rte_spinlock_unlock(&vq->access_lock); 2602 } 2603 } 2604 break; 2605 default: 2606 VHOST_LOG_CONFIG(dev->ifname, ERR, 2607 "invalid IOTLB message type (%d)\n", 2608 imsg->type); 2609 return RTE_VHOST_MSG_RESULT_ERR; 2610 } 2611 2612 return RTE_VHOST_MSG_RESULT_OK; 2613 } 2614 2615 static int 2616 vhost_user_set_postcopy_advise(struct virtio_net **pdev, 2617 struct vhu_msg_context *ctx, 2618 int main_fd __rte_unused) 2619 { 2620 struct virtio_net *dev = *pdev; 2621 #ifdef RTE_LIBRTE_VHOST_POSTCOPY 2622 struct uffdio_api api_struct; 2623 2624 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 2625 2626 if (dev->postcopy_ufd == -1) { 2627 VHOST_LOG_CONFIG(dev->ifname, ERR, 2628 "userfaultfd not available: %s\n", 2629 strerror(errno)); 2630 return RTE_VHOST_MSG_RESULT_ERR; 2631 } 2632 api_struct.api = UFFD_API; 2633 api_struct.features = 0; 2634 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { 2635 VHOST_LOG_CONFIG(dev->ifname, ERR, 2636 "UFFDIO_API ioctl failure: %s\n", 2637 strerror(errno)); 2638 close(dev->postcopy_ufd); 2639 dev->postcopy_ufd = -1; 2640 return RTE_VHOST_MSG_RESULT_ERR; 2641 } 2642 ctx->fds[0] = dev->postcopy_ufd; 2643 ctx->fd_num = 1; 2644 2645 return RTE_VHOST_MSG_RESULT_REPLY; 2646 #else 2647 dev->postcopy_ufd = -1; 2648 ctx->fd_num = 0; 2649 2650 return RTE_VHOST_MSG_RESULT_ERR; 2651 #endif 2652 } 2653 2654 static int 2655 vhost_user_set_postcopy_listen(struct virtio_net **pdev, 2656 struct vhu_msg_context *ctx __rte_unused, 2657 int main_fd __rte_unused) 2658 { 2659 struct virtio_net *dev = *pdev; 2660 2661 if (dev->mem && dev->mem->nregions) { 2662 VHOST_LOG_CONFIG(dev->ifname, ERR, 2663 "regions already registered at postcopy-listen\n"); 2664 return RTE_VHOST_MSG_RESULT_ERR; 2665 } 2666 dev->postcopy_listening = 1; 2667 2668 return RTE_VHOST_MSG_RESULT_OK; 2669 } 2670 2671 static int 2672 vhost_user_postcopy_end(struct virtio_net **pdev, 2673 struct vhu_msg_context *ctx, 2674 int main_fd __rte_unused) 2675 { 2676 struct virtio_net *dev = *pdev; 2677 2678 dev->postcopy_listening = 0; 2679 if (dev->postcopy_ufd >= 0) { 2680 close(dev->postcopy_ufd); 2681 dev->postcopy_ufd = -1; 2682 } 2683 2684 ctx->msg.payload.u64 = 0; 2685 ctx->msg.size = sizeof(ctx->msg.payload.u64); 2686 ctx->fd_num = 0; 2687 2688 return RTE_VHOST_MSG_RESULT_REPLY; 2689 } 2690 2691 static int 2692 vhost_user_get_status(struct virtio_net **pdev, 2693 struct vhu_msg_context *ctx, 2694 int main_fd __rte_unused) 2695 { 2696 struct virtio_net *dev = *pdev; 2697 2698 ctx->msg.payload.u64 = dev->status; 2699 ctx->msg.size = sizeof(ctx->msg.payload.u64); 2700 ctx->fd_num = 0; 2701 2702 return RTE_VHOST_MSG_RESULT_REPLY; 2703 } 2704 2705 static int 2706 vhost_user_set_status(struct virtio_net **pdev, 2707 struct vhu_msg_context *ctx, 2708 int main_fd __rte_unused) 2709 { 2710 struct virtio_net *dev = *pdev; 2711 2712 /* As per Virtio specification, the device status is 8bits long */ 2713 if (ctx->msg.payload.u64 > UINT8_MAX) { 2714 VHOST_LOG_CONFIG(dev->ifname, ERR, 2715 "invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n", 2716 ctx->msg.payload.u64); 2717 return RTE_VHOST_MSG_RESULT_ERR; 2718 } 2719 2720 dev->status = ctx->msg.payload.u64; 2721 2722 if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) && 2723 (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) { 2724 VHOST_LOG_CONFIG(dev->ifname, ERR, 2725 "FEATURES_OK bit is set but feature negotiation failed\n"); 2726 /* 2727 * Clear the bit to let the driver know about the feature 2728 * negotiation failure 2729 */ 2730 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; 2731 } 2732 2733 VHOST_LOG_CONFIG(dev->ifname, INFO, "new device status(0x%08x):\n", dev->status); 2734 VHOST_LOG_CONFIG(dev->ifname, INFO, 2735 "\t-RESET: %u\n", 2736 (dev->status == VIRTIO_DEVICE_STATUS_RESET)); 2737 VHOST_LOG_CONFIG(dev->ifname, INFO, 2738 "\t-ACKNOWLEDGE: %u\n", 2739 !!(dev->status & VIRTIO_DEVICE_STATUS_ACK)); 2740 VHOST_LOG_CONFIG(dev->ifname, INFO, 2741 "\t-DRIVER: %u\n", 2742 !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER)); 2743 VHOST_LOG_CONFIG(dev->ifname, INFO, 2744 "\t-FEATURES_OK: %u\n", 2745 !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK)); 2746 VHOST_LOG_CONFIG(dev->ifname, INFO, 2747 "\t-DRIVER_OK: %u\n", 2748 !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)); 2749 VHOST_LOG_CONFIG(dev->ifname, INFO, 2750 "\t-DEVICE_NEED_RESET: %u\n", 2751 !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET)); 2752 VHOST_LOG_CONFIG(dev->ifname, INFO, 2753 "\t-FAILED: %u\n", 2754 !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED)); 2755 2756 return RTE_VHOST_MSG_RESULT_OK; 2757 } 2758 2759 #define VHOST_MESSAGE_HANDLERS \ 2760 VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false) \ 2761 VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false) \ 2762 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false) \ 2763 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false) \ 2764 VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false) \ 2765 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true) \ 2766 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true) \ 2767 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true) \ 2768 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false) \ 2769 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false) \ 2770 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false) \ 2771 VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false) \ 2772 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true) \ 2773 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true) \ 2774 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true) \ 2775 VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, false) \ 2776 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, false) \ 2777 VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) \ 2778 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \ 2779 VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \ 2780 VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \ 2781 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_SLAVE_REQ_FD, vhost_user_set_req_fd, true) \ 2782 VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \ 2783 VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \ 2784 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false) \ 2785 VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false) \ 2786 VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false) \ 2787 VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false) \ 2788 VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false) \ 2789 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true) \ 2790 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false) \ 2791 VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false) 2792 2793 #define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd) \ 2794 [id] = { #id, handler, accepts_fd }, 2795 static vhost_message_handler_t vhost_message_handlers[] = { 2796 VHOST_MESSAGE_HANDLERS 2797 }; 2798 #undef VHOST_MESSAGE_HANDLER 2799 2800 /* return bytes# of read on success or negative val on failure. */ 2801 static int 2802 read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) 2803 { 2804 int ret; 2805 2806 ret = read_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE, 2807 ctx->fds, VHOST_MEMORY_MAX_NREGIONS, &ctx->fd_num); 2808 if (ret <= 0) { 2809 return ret; 2810 } else if (ret != VHOST_USER_HDR_SIZE) { 2811 VHOST_LOG_CONFIG(dev->ifname, ERR, "Unexpected header size read\n"); 2812 close_msg_fds(ctx); 2813 return -1; 2814 } 2815 2816 if (ctx->msg.size) { 2817 if (ctx->msg.size > sizeof(ctx->msg.payload)) { 2818 VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid msg size: %d\n", 2819 ctx->msg.size); 2820 return -1; 2821 } 2822 ret = read(sockfd, &ctx->msg.payload, ctx->msg.size); 2823 if (ret <= 0) 2824 return ret; 2825 if (ret != (int)ctx->msg.size) { 2826 VHOST_LOG_CONFIG(dev->ifname, ERR, "read control message failed\n"); 2827 return -1; 2828 } 2829 } 2830 2831 return ret; 2832 } 2833 2834 static int 2835 send_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) 2836 { 2837 if (!ctx) 2838 return 0; 2839 2840 return send_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, 2841 VHOST_USER_HDR_SIZE + ctx->msg.size, ctx->fds, ctx->fd_num); 2842 } 2843 2844 static int 2845 send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) 2846 { 2847 if (!ctx) 2848 return 0; 2849 2850 ctx->msg.flags &= ~VHOST_USER_VERSION_MASK; 2851 ctx->msg.flags &= ~VHOST_USER_NEED_REPLY; 2852 ctx->msg.flags |= VHOST_USER_VERSION; 2853 ctx->msg.flags |= VHOST_USER_REPLY_MASK; 2854 2855 return send_vhost_message(dev, sockfd, ctx); 2856 } 2857 2858 static int 2859 send_vhost_slave_message(struct virtio_net *dev, 2860 struct vhu_msg_context *ctx) 2861 { 2862 int ret; 2863 2864 if (ctx->msg.flags & VHOST_USER_NEED_REPLY) 2865 rte_spinlock_lock(&dev->slave_req_lock); 2866 2867 ret = send_vhost_message(dev, dev->slave_req_fd, ctx); 2868 if (ret < 0 && (ctx->msg.flags & VHOST_USER_NEED_REPLY)) 2869 rte_spinlock_unlock(&dev->slave_req_lock); 2870 2871 return ret; 2872 } 2873 2874 /* 2875 * Allocate a queue pair if it hasn't been allocated yet 2876 */ 2877 static int 2878 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, 2879 struct vhu_msg_context *ctx) 2880 { 2881 uint32_t vring_idx; 2882 2883 switch (ctx->msg.request.master) { 2884 case VHOST_USER_SET_VRING_KICK: 2885 case VHOST_USER_SET_VRING_CALL: 2886 case VHOST_USER_SET_VRING_ERR: 2887 vring_idx = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; 2888 break; 2889 case VHOST_USER_SET_VRING_NUM: 2890 case VHOST_USER_SET_VRING_BASE: 2891 case VHOST_USER_GET_VRING_BASE: 2892 case VHOST_USER_SET_VRING_ENABLE: 2893 vring_idx = ctx->msg.payload.state.index; 2894 break; 2895 case VHOST_USER_SET_VRING_ADDR: 2896 vring_idx = ctx->msg.payload.addr.index; 2897 break; 2898 case VHOST_USER_SET_INFLIGHT_FD: 2899 vring_idx = ctx->msg.payload.inflight.num_queues - 1; 2900 break; 2901 default: 2902 return 0; 2903 } 2904 2905 if (vring_idx >= VHOST_MAX_VRING) { 2906 VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid vring index: %u\n", vring_idx); 2907 return -1; 2908 } 2909 2910 if (dev->virtqueue[vring_idx]) 2911 return 0; 2912 2913 return alloc_vring_queue(dev, vring_idx); 2914 } 2915 2916 static void 2917 vhost_user_lock_all_queue_pairs(struct virtio_net *dev) 2918 { 2919 unsigned int i = 0; 2920 unsigned int vq_num = 0; 2921 2922 while (vq_num < dev->nr_vring) { 2923 struct vhost_virtqueue *vq = dev->virtqueue[i]; 2924 2925 if (vq) { 2926 rte_spinlock_lock(&vq->access_lock); 2927 vq_num++; 2928 } 2929 i++; 2930 } 2931 } 2932 2933 static void 2934 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) 2935 { 2936 unsigned int i = 0; 2937 unsigned int vq_num = 0; 2938 2939 while (vq_num < dev->nr_vring) { 2940 struct vhost_virtqueue *vq = dev->virtqueue[i]; 2941 2942 if (vq) { 2943 rte_spinlock_unlock(&vq->access_lock); 2944 vq_num++; 2945 } 2946 i++; 2947 } 2948 } 2949 2950 int 2951 vhost_user_msg_handler(int vid, int fd) 2952 { 2953 struct virtio_net *dev; 2954 struct vhu_msg_context ctx; 2955 vhost_message_handler_t *msg_handler; 2956 struct rte_vdpa_device *vdpa_dev; 2957 int msg_result = RTE_VHOST_MSG_RESULT_OK; 2958 int ret; 2959 int unlock_required = 0; 2960 bool handled; 2961 uint32_t vdpa_type = 0; 2962 uint32_t request; 2963 uint32_t i; 2964 2965 dev = get_device(vid); 2966 if (dev == NULL) 2967 return -1; 2968 2969 if (!dev->notify_ops) { 2970 dev->notify_ops = vhost_driver_callback_get(dev->ifname); 2971 if (!dev->notify_ops) { 2972 VHOST_LOG_CONFIG(dev->ifname, ERR, 2973 "failed to get callback ops for driver\n"); 2974 return -1; 2975 } 2976 } 2977 2978 ret = read_vhost_message(dev, fd, &ctx); 2979 if (ret <= 0) { 2980 if (ret < 0) 2981 VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost read message failed\n"); 2982 else 2983 VHOST_LOG_CONFIG(dev->ifname, INFO, "vhost peer closed\n"); 2984 2985 return -1; 2986 } 2987 2988 request = ctx.msg.request.master; 2989 if (request > VHOST_USER_NONE && request < RTE_DIM(vhost_message_handlers)) 2990 msg_handler = &vhost_message_handlers[request]; 2991 else 2992 msg_handler = NULL; 2993 2994 if (msg_handler != NULL && msg_handler->description != NULL) { 2995 if (request != VHOST_USER_IOTLB_MSG) 2996 VHOST_LOG_CONFIG(dev->ifname, INFO, 2997 "read message %s\n", 2998 msg_handler->description); 2999 else 3000 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 3001 "read message %s\n", 3002 msg_handler->description); 3003 } else { 3004 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "external request %d\n", request); 3005 } 3006 3007 ret = vhost_user_check_and_alloc_queue_pair(dev, &ctx); 3008 if (ret < 0) { 3009 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc queue\n"); 3010 return -1; 3011 } 3012 3013 /* 3014 * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE 3015 * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops 3016 * and device is destroyed. destroy_device waits for queues to be 3017 * inactive, so it is safe. Otherwise taking the access_lock 3018 * would cause a dead lock. 3019 */ 3020 switch (request) { 3021 case VHOST_USER_SET_FEATURES: 3022 case VHOST_USER_SET_PROTOCOL_FEATURES: 3023 case VHOST_USER_SET_OWNER: 3024 case VHOST_USER_SET_MEM_TABLE: 3025 case VHOST_USER_SET_LOG_BASE: 3026 case VHOST_USER_SET_LOG_FD: 3027 case VHOST_USER_SET_VRING_NUM: 3028 case VHOST_USER_SET_VRING_ADDR: 3029 case VHOST_USER_SET_VRING_BASE: 3030 case VHOST_USER_SET_VRING_KICK: 3031 case VHOST_USER_SET_VRING_CALL: 3032 case VHOST_USER_SET_VRING_ERR: 3033 case VHOST_USER_SET_VRING_ENABLE: 3034 case VHOST_USER_SEND_RARP: 3035 case VHOST_USER_NET_SET_MTU: 3036 case VHOST_USER_SET_SLAVE_REQ_FD: 3037 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { 3038 vhost_user_lock_all_queue_pairs(dev); 3039 unlock_required = 1; 3040 } 3041 break; 3042 default: 3043 break; 3044 3045 } 3046 3047 handled = false; 3048 if (dev->extern_ops.pre_msg_handle) { 3049 RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0); 3050 msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx); 3051 switch (msg_result) { 3052 case RTE_VHOST_MSG_RESULT_REPLY: 3053 send_vhost_reply(dev, fd, &ctx); 3054 /* Fall-through */ 3055 case RTE_VHOST_MSG_RESULT_ERR: 3056 case RTE_VHOST_MSG_RESULT_OK: 3057 handled = true; 3058 goto skip_to_post_handle; 3059 case RTE_VHOST_MSG_RESULT_NOT_HANDLED: 3060 default: 3061 break; 3062 } 3063 } 3064 3065 if (msg_handler == NULL || msg_handler->callback == NULL) 3066 goto skip_to_post_handle; 3067 3068 if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0) { 3069 msg_result = RTE_VHOST_MSG_RESULT_ERR; 3070 } else { 3071 msg_result = msg_handler->callback(&dev, &ctx, fd); 3072 } 3073 3074 switch (msg_result) { 3075 case RTE_VHOST_MSG_RESULT_ERR: 3076 VHOST_LOG_CONFIG(dev->ifname, ERR, 3077 "processing %s failed.\n", 3078 msg_handler->description); 3079 handled = true; 3080 break; 3081 case RTE_VHOST_MSG_RESULT_OK: 3082 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 3083 "processing %s succeeded.\n", 3084 msg_handler->description); 3085 handled = true; 3086 break; 3087 case RTE_VHOST_MSG_RESULT_REPLY: 3088 VHOST_LOG_CONFIG(dev->ifname, DEBUG, 3089 "processing %s succeeded and needs reply.\n", 3090 msg_handler->description); 3091 send_vhost_reply(dev, fd, &ctx); 3092 handled = true; 3093 break; 3094 default: 3095 break; 3096 } 3097 3098 skip_to_post_handle: 3099 if (msg_result != RTE_VHOST_MSG_RESULT_ERR && 3100 dev->extern_ops.post_msg_handle) { 3101 RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0); 3102 msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx); 3103 switch (msg_result) { 3104 case RTE_VHOST_MSG_RESULT_REPLY: 3105 send_vhost_reply(dev, fd, &ctx); 3106 /* Fall-through */ 3107 case RTE_VHOST_MSG_RESULT_ERR: 3108 case RTE_VHOST_MSG_RESULT_OK: 3109 handled = true; 3110 case RTE_VHOST_MSG_RESULT_NOT_HANDLED: 3111 default: 3112 break; 3113 } 3114 } 3115 3116 /* If message was not handled at this stage, treat it as an error */ 3117 if (!handled) { 3118 VHOST_LOG_CONFIG(dev->ifname, ERR, 3119 "vhost message (req: %d) was not handled.\n", 3120 request); 3121 close_msg_fds(&ctx); 3122 msg_result = RTE_VHOST_MSG_RESULT_ERR; 3123 } 3124 3125 /* 3126 * If the request required a reply that was already sent, 3127 * this optional reply-ack won't be sent as the 3128 * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply(). 3129 */ 3130 if (ctx.msg.flags & VHOST_USER_NEED_REPLY) { 3131 ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR; 3132 ctx.msg.size = sizeof(ctx.msg.payload.u64); 3133 ctx.fd_num = 0; 3134 send_vhost_reply(dev, fd, &ctx); 3135 } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) { 3136 VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling failed.\n"); 3137 ret = -1; 3138 goto unlock; 3139 } 3140 3141 for (i = 0; i < dev->nr_vring; i++) { 3142 struct vhost_virtqueue *vq = dev->virtqueue[i]; 3143 bool cur_ready = vq_is_ready(dev, vq); 3144 3145 if (cur_ready != (vq && vq->ready)) { 3146 vq->ready = cur_ready; 3147 vhost_user_notify_queue_state(dev, vq, cur_ready); 3148 } 3149 } 3150 3151 unlock: 3152 if (unlock_required) 3153 vhost_user_unlock_all_queue_pairs(dev); 3154 3155 if (ret != 0 || !virtio_is_ready(dev)) 3156 goto out; 3157 3158 /* 3159 * Virtio is now ready. If not done already, it is time 3160 * to notify the application it can process the rings and 3161 * configure the vDPA device if present. 3162 */ 3163 3164 if (!(dev->flags & VIRTIO_DEV_RUNNING)) { 3165 if (dev->notify_ops->new_device(dev->vid) == 0) 3166 dev->flags |= VIRTIO_DEV_RUNNING; 3167 } 3168 3169 vdpa_dev = dev->vdpa_dev; 3170 if (!vdpa_dev) 3171 goto out; 3172 3173 if (vdpa_dev->ops->get_dev_type) { 3174 ret = vdpa_dev->ops->get_dev_type(vdpa_dev, &vdpa_type); 3175 if (ret) { 3176 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to get vdpa dev type.\n"); 3177 ret = -1; 3178 goto out; 3179 } 3180 } else { 3181 vdpa_type = RTE_VHOST_VDPA_DEVICE_TYPE_NET; 3182 } 3183 if (vdpa_type == RTE_VHOST_VDPA_DEVICE_TYPE_BLK 3184 && request != VHOST_USER_SET_VRING_CALL) 3185 goto out; 3186 3187 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { 3188 if (vdpa_dev->ops->dev_conf(dev->vid)) 3189 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to configure vDPA device\n"); 3190 else 3191 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED; 3192 } 3193 3194 out: 3195 return ret; 3196 } 3197 3198 static int process_slave_message_reply(struct virtio_net *dev, 3199 const struct vhu_msg_context *ctx) 3200 { 3201 struct vhu_msg_context msg_reply; 3202 int ret; 3203 3204 if ((ctx->msg.flags & VHOST_USER_NEED_REPLY) == 0) 3205 return 0; 3206 3207 ret = read_vhost_message(dev, dev->slave_req_fd, &msg_reply); 3208 if (ret <= 0) { 3209 if (ret < 0) 3210 VHOST_LOG_CONFIG(dev->ifname, ERR, 3211 "vhost read slave message reply failed\n"); 3212 else 3213 VHOST_LOG_CONFIG(dev->ifname, INFO, "vhost peer closed\n"); 3214 ret = -1; 3215 goto out; 3216 } 3217 3218 ret = 0; 3219 if (msg_reply.msg.request.slave != ctx->msg.request.slave) { 3220 VHOST_LOG_CONFIG(dev->ifname, ERR, 3221 "received unexpected msg type (%u), expected %u\n", 3222 msg_reply.msg.request.slave, ctx->msg.request.slave); 3223 ret = -1; 3224 goto out; 3225 } 3226 3227 ret = msg_reply.msg.payload.u64 ? -1 : 0; 3228 3229 out: 3230 rte_spinlock_unlock(&dev->slave_req_lock); 3231 return ret; 3232 } 3233 3234 int 3235 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm) 3236 { 3237 int ret; 3238 struct vhu_msg_context ctx = { 3239 .msg = { 3240 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG, 3241 .flags = VHOST_USER_VERSION, 3242 .size = sizeof(ctx.msg.payload.iotlb), 3243 .payload.iotlb = { 3244 .iova = iova, 3245 .perm = perm, 3246 .type = VHOST_IOTLB_MISS, 3247 }, 3248 }, 3249 }; 3250 3251 ret = send_vhost_message(dev, dev->slave_req_fd, &ctx); 3252 if (ret < 0) { 3253 VHOST_LOG_CONFIG(dev->ifname, ERR, 3254 "failed to send IOTLB miss message (%d)\n", 3255 ret); 3256 return ret; 3257 } 3258 3259 return 0; 3260 } 3261 3262 static int 3263 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply) 3264 { 3265 int ret; 3266 struct vhu_msg_context ctx = { 3267 .msg = { 3268 .request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG, 3269 .flags = VHOST_USER_VERSION, 3270 .size = 0, 3271 } 3272 }; 3273 3274 if (need_reply) 3275 ctx.msg.flags |= VHOST_USER_NEED_REPLY; 3276 3277 ret = send_vhost_slave_message(dev, &ctx); 3278 if (ret < 0) { 3279 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to send config change (%d)\n", ret); 3280 return ret; 3281 } 3282 3283 return process_slave_message_reply(dev, &ctx); 3284 } 3285 3286 int 3287 rte_vhost_slave_config_change(int vid, bool need_reply) 3288 { 3289 struct virtio_net *dev; 3290 3291 dev = get_device(vid); 3292 if (!dev) 3293 return -ENODEV; 3294 3295 return vhost_user_slave_config_change(dev, need_reply); 3296 } 3297 3298 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev, 3299 int index, int fd, 3300 uint64_t offset, 3301 uint64_t size) 3302 { 3303 int ret; 3304 struct vhu_msg_context ctx = { 3305 .msg = { 3306 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG, 3307 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY, 3308 .size = sizeof(ctx.msg.payload.area), 3309 .payload.area = { 3310 .u64 = index & VHOST_USER_VRING_IDX_MASK, 3311 .size = size, 3312 .offset = offset, 3313 }, 3314 }, 3315 }; 3316 3317 if (fd < 0) 3318 ctx.msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; 3319 else { 3320 ctx.fds[0] = fd; 3321 ctx.fd_num = 1; 3322 } 3323 3324 ret = send_vhost_slave_message(dev, &ctx); 3325 if (ret < 0) { 3326 VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to set host notifier (%d)\n", ret); 3327 return ret; 3328 } 3329 3330 return process_slave_message_reply(dev, &ctx); 3331 } 3332 3333 int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable) 3334 { 3335 struct virtio_net *dev; 3336 struct rte_vdpa_device *vdpa_dev; 3337 int vfio_device_fd, ret = 0; 3338 uint64_t offset, size; 3339 unsigned int i, q_start, q_last; 3340 3341 dev = get_device(vid); 3342 if (!dev) 3343 return -ENODEV; 3344 3345 vdpa_dev = dev->vdpa_dev; 3346 if (vdpa_dev == NULL) 3347 return -ENODEV; 3348 3349 if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) || 3350 !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) || 3351 !(dev->protocol_features & 3352 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) || 3353 !(dev->protocol_features & 3354 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) || 3355 !(dev->protocol_features & 3356 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER))) 3357 return -ENOTSUP; 3358 3359 if (qid == RTE_VHOST_QUEUE_ALL) { 3360 q_start = 0; 3361 q_last = dev->nr_vring - 1; 3362 } else { 3363 if (qid >= dev->nr_vring) 3364 return -EINVAL; 3365 q_start = qid; 3366 q_last = qid; 3367 } 3368 3369 if (vdpa_dev->ops->get_vfio_device_fd == NULL) 3370 return -ENOTSUP; 3371 if (vdpa_dev->ops->get_notify_area == NULL) 3372 return -ENOTSUP; 3373 3374 vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid); 3375 if (vfio_device_fd < 0) 3376 return -ENOTSUP; 3377 3378 if (enable) { 3379 for (i = q_start; i <= q_last; i++) { 3380 if (vdpa_dev->ops->get_notify_area(vid, i, &offset, 3381 &size) < 0) { 3382 ret = -ENOTSUP; 3383 goto disable; 3384 } 3385 3386 if (vhost_user_slave_set_vring_host_notifier(dev, i, 3387 vfio_device_fd, offset, size) < 0) { 3388 ret = -EFAULT; 3389 goto disable; 3390 } 3391 } 3392 } else { 3393 disable: 3394 for (i = q_start; i <= q_last; i++) { 3395 vhost_user_slave_set_vring_host_notifier(dev, i, -1, 3396 0, 0); 3397 } 3398 } 3399 3400 return ret; 3401 } 3402