Lines Matching defs:dev
121 #define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
124 vq_assert_lock(dev, vq); \
127 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
128 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
151 validate_msg_fds(struct virtio_net *dev, struct vhu_msg_context *ctx, int expected_fds)
156 VHOST_CONFIG_LOG(dev->ifname, ERR,
177 async_dma_map(struct virtio_net *dev, bool do_map)
184 for (i = 0; i < dev->nr_guest_pages; i++) {
185 page = &dev->guest_pages[i];
207 VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine map failed");
212 for (i = 0; i < dev->nr_guest_pages; i++) {
213 page = &dev->guest_pages[i];
223 VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine unmap failed");
230 free_mem_region(struct virtio_net *dev)
235 if (!dev || !dev->mem)
238 if (dev->async_copy && rte_vfio_is_enabled("vfio"))
239 async_dma_map(dev, false);
241 for (i = 0; i < dev->mem->nregions; i++) {
242 reg = &dev->mem->regions[i];
251 vhost_backend_cleanup(struct virtio_net *dev)
255 vdpa_dev = dev->vdpa_dev;
257 vdpa_dev->ops->dev_cleanup(dev->vid);
259 if (dev->mem) {
260 free_mem_region(dev);
261 rte_free(dev->mem);
262 dev->mem = NULL;
265 rte_free(dev->guest_pages);
266 dev->guest_pages = NULL;
268 if (dev->log_addr) {
269 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
270 dev->log_addr = 0;
273 if (dev->inflight_info) {
274 if (dev->inflight_info->addr) {
275 munmap(dev->inflight_info->addr,
276 dev->inflight_info->size);
277 dev->inflight_info->addr = NULL;
280 if (dev->inflight_info->fd >= 0) {
281 close(dev->inflight_info->fd);
282 dev->inflight_info->fd = -1;
285 rte_free(dev->inflight_info);
286 dev->inflight_info = NULL;
289 if (dev->backend_req_fd >= 0) {
290 close(dev->backend_req_fd);
291 dev->backend_req_fd = -1;
294 if (dev->postcopy_ufd >= 0) {
295 close(dev->postcopy_ufd);
296 dev->postcopy_ufd = -1;
299 dev->postcopy_listening = 0;
301 vhost_user_iotlb_destroy(dev);
305 vhost_user_notify_queue_state(struct virtio_net *dev, struct vhost_virtqueue *vq,
308 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
312 vhost_enable_guest_notification(dev, vq, vq->notif_enable);
315 vdpa_dev->ops->set_vring_state(dev->vid, vq->index, enable);
317 if (dev->notify_ops->vring_state_changed)
318 dev->notify_ops->vring_state_changed(dev->vid, vq->index, enable);
338 struct virtio_net *dev = *pdev;
340 vhost_destroy_device_notify(dev);
342 cleanup_device(dev, 0);
343 reset_device(dev);
355 struct virtio_net *dev = *pdev;
358 rte_vhost_driver_get_features(dev->ifname, &features);
375 struct virtio_net *dev = *pdev;
378 rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
395 struct virtio_net *dev = *pdev;
400 rte_vhost_driver_get_features(dev->ifname, &vhost_features);
402 VHOST_CONFIG_LOG(dev->ifname, ERR, "received invalid negotiated features.");
403 dev->flags |= VIRTIO_DEV_FEATURES_FAILED;
404 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
409 if (dev->flags & VIRTIO_DEV_RUNNING) {
410 if (dev->features == features)
418 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
419 VHOST_CONFIG_LOG(dev->ifname, ERR,
424 if (dev->notify_ops->features_changed)
425 dev->notify_ops->features_changed(dev->vid, features);
428 dev->features = features;
429 if (dev->features &
433 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
435 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
437 VHOST_CONFIG_LOG(dev->ifname, INFO,
439 dev->features);
440 VHOST_CONFIG_LOG(dev->ifname, DEBUG,
442 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
443 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
445 if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
446 !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
452 while (dev->nr_vring > 2) {
455 vq = dev->virtqueue[--dev->nr_vring];
459 dev->virtqueue[dev->nr_vring] = NULL;
461 cleanup_vq_inflight(dev, vq);
463 VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_FEATURES);
465 free_vq(dev, vq);
469 vdpa_dev = dev->vdpa_dev;
471 vdpa_dev->ops->set_features(dev->vid);
473 dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED;
485 struct virtio_net *dev = *pdev;
486 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
489 VHOST_CONFIG_LOG(dev->ifname, ERR,
506 if (!vq_is_packed(dev)) {
508 VHOST_CONFIG_LOG(dev->ifname, ERR,
515 if (vq_is_packed(dev)) {
522 VHOST_CONFIG_LOG(dev->ifname, ERR,
535 VHOST_CONFIG_LOG(dev->ifname, ERR,
546 VHOST_CONFIG_LOG(dev->ifname, ERR,
563 struct virtio_net *dev;
571 dev = *pdev;
583 VHOST_CONFIG_LOG(dev->ifname, ERR,
594 VHOST_CONFIG_LOG(dev->ifname, ERR,
601 if (vq != dev->virtqueue[vq->index]) {
602 VHOST_CONFIG_LOG(dev->ifname, INFO, "reallocated virtqueue on node %d", node);
603 dev->virtqueue[vq->index] = vq;
606 if (vq_is_packed(dev)) {
612 VHOST_CONFIG_LOG(dev->ifname, ERR,
624 VHOST_CONFIG_LOG(dev->ifname, ERR,
635 VHOST_CONFIG_LOG(dev->ifname, ERR,
647 VHOST_CONFIG_LOG(dev->ifname, ERR,
660 VHOST_CONFIG_LOG(dev->ifname, ERR,
673 VHOST_CONFIG_LOG(dev->ifname, ERR,
686 if (dev->flags & VIRTIO_DEV_RUNNING)
689 ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR);
691 VHOST_CONFIG_LOG(dev->ifname, ERR, "unable to get numa information.");
698 dev = rte_realloc_socket(*pdev, sizeof(**pdev), 0, node);
699 if (!dev) {
700 VHOST_CONFIG_LOG((*pdev)->ifname, ERR, "failed to realloc dev on node %d", node);
703 *pdev = dev;
705 VHOST_CONFIG_LOG(dev->ifname, INFO, "reallocated device on node %d", node);
706 vhost_devices[dev->vid] = dev;
709 sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
710 mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
712 VHOST_CONFIG_LOG(dev->ifname, ERR,
717 dev->mem = mem;
719 gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),
722 VHOST_CONFIG_LOG(dev->ifname, ERR,
727 dev->guest_pages = gp;
729 vhost_user_iotlb_init(dev);
742 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
747 if (unlikely(!dev || !dev->mem))
751 for (i = 0; i < dev->mem->nregions; i++) {
752 r = &dev->mem->regions[i];
777 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
780 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
784 vva = vhost_iova_to_vva(dev, vq, ra,
791 return qva_to_vva(dev, ra, size);
795 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
800 log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
826 mem_set_dump(struct virtio_net *dev, void *ptr, size_t size, bool enable, uint64_t pagesz)
834 VHOST_CONFIG_LOG(dev->ifname, INFO,
844 struct virtio_net *dev;
847 dev = *pdev;
850 vq_assert_lock(dev, vq);
854 log_addr_to_gpa(dev, vq);
856 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map log_guest_addr.");
861 if (vq_is_packed(dev)) {
864 ring_addr_to_vva(dev, vq, vq->ring_addrs.desc_user_addr, &len);
868 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map desc_packed ring.");
872 mem_set_dump(dev, vq->desc_packed, len, true,
873 hua_to_alignment(dev->mem, vq->desc_packed));
874 numa_realloc(&dev, &vq);
875 *pdev = dev;
880 (uintptr_t)ring_addr_to_vva(dev,
884 VHOST_CONFIG_LOG(dev->ifname, DEBUG,
889 mem_set_dump(dev, vq->driver_event, len, true,
890 hua_to_alignment(dev->mem, vq->driver_event));
893 (uintptr_t)ring_addr_to_vva(dev,
897 VHOST_CONFIG_LOG(dev->ifname, DEBUG,
902 mem_set_dump(dev, vq->device_event, len, true,
903 hua_to_alignment(dev->mem, vq->device_event));
913 vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
916 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map desc ring.");
920 mem_set_dump(dev, vq->desc, len, true, hua_to_alignment(dev->mem, vq->desc));
921 numa_realloc(&dev, &vq);
922 *pdev = dev;
926 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
929 vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
932 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map avail ring.");
936 mem_set_dump(dev, vq->avail, len, true, hua_to_alignment(dev->mem, vq->avail));
939 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
942 vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
945 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map used ring.");
949 mem_set_dump(dev, vq->used, len, true, hua_to_alignment(dev->mem, vq->used));
952 VHOST_CONFIG_LOG(dev->ifname, WARNING,
958 VHOST_CONFIG_LOG(dev->ifname, WARNING,
964 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address desc: %p", vq->desc);
965 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address avail: %p", vq->avail);
966 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address used: %p", vq->used);
967 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "log_guest_addr: %" PRIx64, vq->log_guest_addr);
979 struct virtio_net *dev = *pdev;
984 if (dev->mem == NULL)
988 vq = dev->virtqueue[ctx->msg.payload.addr.index];
996 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)
1000 VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ADDR);
1004 vring_invalidate(dev, vq);
1006 if ((vq->enabled && (dev->features &
1009 translate_ring_addresses(&dev, &vq);
1010 *pdev = dev;
1025 struct virtio_net *dev = *pdev;
1026 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
1029 if (vq_is_packed(dev)) {
1050 VHOST_CONFIG_LOG(dev->ifname, INFO,
1058 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
1064 if (dev->nr_guest_pages == dev->max_guest_pages) {
1065 dev->max_guest_pages *= 2;
1066 old_pages = dev->guest_pages;
1067 dev->guest_pages = rte_realloc(dev->guest_pages,
1068 dev->max_guest_pages * sizeof(*page),
1070 if (dev->guest_pages == NULL) {
1071 VHOST_CONFIG_LOG(dev->ifname, ERR, "cannot realloc guest_pages");
1077 if (dev->nr_guest_pages > 0) {
1078 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
1088 page = &dev->guest_pages[dev->nr_guest_pages++];
1098 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
1111 if (add_one_guest_page(dev, guest_phys_addr, host_iova,
1123 if (add_one_guest_page(dev, guest_phys_addr, host_iova,
1133 if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
1134 qsort((void *)dev->guest_pages, dev->nr_guest_pages,
1144 dump_guest_pages(struct virtio_net *dev)
1149 for (i = 0; i < dev->nr_guest_pages; i++) {
1150 page = &dev->guest_pages[i];
1152 VHOST_CONFIG_LOG(dev->ifname, INFO, "guest physical page region %u", i);
1153 VHOST_CONFIG_LOG(dev->ifname, INFO, "\tguest_phys_addr: %" PRIx64,
1155 VHOST_CONFIG_LOG(dev->ifname, INFO, "\thost_iova : %" PRIx64,
1157 VHOST_CONFIG_LOG(dev->ifname, INFO, "\tsize : %" PRIx64,
1162 #define dump_guest_pages(dev)
1191 vhost_user_postcopy_region_register(struct virtio_net *dev,
1204 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
1206 VHOST_CONFIG_LOG(dev->ifname, ERR,
1212 dev->postcopy_ufd,
1217 VHOST_CONFIG_LOG(dev->ifname, INFO,
1227 vhost_user_postcopy_region_register(struct virtio_net *dev __rte_unused,
1235 vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
1243 if (!dev->postcopy_listening)
1253 reg = &dev->mem->regions[i];
1259 send_vhost_reply(dev, main_fd, ctx);
1264 if (read_vhost_message(dev, main_fd, &ack_ctx) <= 0) {
1265 VHOST_CONFIG_LOG(dev->ifname, ERR,
1270 if (validate_msg_fds(dev, &ack_ctx, 0) != 0)
1274 VHOST_CONFIG_LOG(dev->ifname, ERR,
1282 reg = &dev->mem->regions[i];
1283 if (vhost_user_postcopy_region_register(dev, reg) < 0)
1291 vhost_user_mmap_region(struct virtio_net *dev,
1302 VHOST_CONFIG_LOG(dev->ifname, ERR,
1318 VHOST_CONFIG_LOG(dev->ifname, ERR, "couldn't get hugepage size through fstat");
1331 VHOST_CONFIG_LOG(dev->ifname, ERR,
1337 populate = dev->async_copy ? MAP_POPULATE : 0;
1342 VHOST_CONFIG_LOG(dev->ifname, ERR, "mmap failed (%s).", strerror(errno));
1349 mem_set_dump(dev, mmap_addr, mmap_size, false, alignment);
1351 if (dev->async_copy) {
1352 if (add_guest_pages(dev, region, alignment) < 0) {
1353 VHOST_CONFIG_LOG(dev->ifname, ERR,
1359 VHOST_CONFIG_LOG(dev->ifname, INFO,
1362 VHOST_CONFIG_LOG(dev->ifname, INFO,
1365 VHOST_CONFIG_LOG(dev->ifname, INFO,
1368 VHOST_CONFIG_LOG(dev->ifname, INFO,
1371 VHOST_CONFIG_LOG(dev->ifname, INFO,
1374 VHOST_CONFIG_LOG(dev->ifname, INFO,
1377 VHOST_CONFIG_LOG(dev->ifname, INFO,
1380 VHOST_CONFIG_LOG(dev->ifname, INFO,
1392 struct virtio_net *dev = *pdev;
1400 if (validate_msg_fds(dev, ctx, memory->nregions) != 0)
1404 VHOST_CONFIG_LOG(dev->ifname, ERR,
1410 if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
1411 VHOST_CONFIG_LOG(dev->ifname, INFO, "memory regions not changed");
1418 if (dev->mem) {
1419 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) {
1420 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
1423 vdpa_dev->ops->dev_close(dev->vid);
1424 dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1428 if (dev->async_copy && dev->notify_ops->vring_state_changed) {
1429 for (i = 0; i < dev->nr_vring; i++) {
1430 dev->notify_ops->vring_state_changed(dev->vid,
1437 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1438 vhost_user_iotlb_flush_all(dev);
1440 free_mem_region(dev);
1441 rte_free(dev->mem);
1442 dev->mem = NULL;
1449 if (dev->nr_vring > 0)
1450 numa_node = dev->virtqueue[0]->numa_node;
1452 dev->nr_guest_pages = 0;
1453 if (dev->guest_pages == NULL) {
1454 dev->max_guest_pages = 8;
1455 dev->guest_pages = rte_zmalloc_socket(NULL,
1456 dev->max_guest_pages *
1460 if (dev->guest_pages == NULL) {
1461 VHOST_CONFIG_LOG(dev->ifname, ERR,
1462 "failed to allocate memory for dev->guest_pages");
1467 dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
1469 if (dev->mem == NULL) {
1470 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory for dev->mem");
1475 reg = &dev->mem->regions[i];
1490 if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
1491 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap region %u", i);
1495 dev->mem->nregions++;
1498 if (dev->async_copy && rte_vfio_is_enabled("vfio"))
1499 async_dma_map(dev, true);
1501 if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0)
1504 for (i = 0; i < dev->nr_vring; i++) {
1505 struct vhost_virtqueue *vq = dev->virtqueue[i];
1512 VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_MEM_TABLE);
1519 vring_invalidate(dev, vq);
1521 translate_ring_addresses(&dev, &vq);
1522 *pdev = dev;
1526 dump_guest_pages(dev);
1529 for (i = 0; i < dev->nr_vring; i++)
1530 dev->notify_ops->vring_state_changed(dev->vid, i, 1);
1536 free_mem_region(dev);
1537 rte_free(dev->mem);
1538 dev->mem = NULL;
1541 rte_free(dev->guest_pages);
1542 dev->guest_pages = NULL;
1549 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
1556 if (vq_is_packed(dev))
1572 virtio_is_ready(struct virtio_net *dev)
1577 uint32_t i, nr_vring = dev->nr_vring;
1579 if (dev->flags & VIRTIO_DEV_READY)
1582 if (!dev->nr_vring)
1585 vdpa_dev = dev->vdpa_dev;
1594 if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET)
1598 if (dev->nr_vring < nr_vring)
1602 vq = dev->virtqueue[i];
1604 if (!vq_is_ready(dev, vq))
1609 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
1610 if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK))
1613 dev->flags |= VIRTIO_DEV_READY;
1615 if (!(dev->flags & VIRTIO_DEV_RUNNING))
1616 VHOST_CONFIG_LOG(dev->ifname, INFO, "virtio is now ready for processing.");
1621 inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *fd)
1637 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to get inflight buffer fd");
1645 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc inflight buffer");
1652 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap inflight buffer");
1658 mem_set_dump(dev, ptr, size, false, alignment);
1688 struct virtio_net *dev = *pdev;
1694 VHOST_CONFIG_LOG(dev->ifname, ERR,
1704 if (dev->nr_vring > 0)
1705 numa_node = dev->virtqueue[0]->numa_node;
1707 if (dev->inflight_info == NULL) {
1708 dev->inflight_info = rte_zmalloc_socket("inflight_info",
1710 if (!dev->inflight_info) {
1711 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc dev inflight area");
1714 dev->inflight_info->fd = -1;
1720 VHOST_CONFIG_LOG(dev->ifname, INFO,
1723 VHOST_CONFIG_LOG(dev->ifname, INFO,
1727 if (vq_is_packed(dev))
1733 addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd);
1735 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc vhost inflight area");
1741 if (dev->inflight_info->addr) {
1742 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1743 dev->inflight_info->addr = NULL;
1746 if (dev->inflight_info->fd >= 0) {
1747 close(dev->inflight_info->fd);
1748 dev->inflight_info->fd = -1;
1751 dev->inflight_info->addr = addr;
1752 dev->inflight_info->size = ctx->msg.payload.inflight.mmap_size = mmap_size;
1753 dev->inflight_info->fd = ctx->fds[0] = fd;
1757 if (vq_is_packed(dev)) {
1769 VHOST_CONFIG_LOG(dev->ifname, INFO,
1772 VHOST_CONFIG_LOG(dev->ifname, INFO,
1775 VHOST_CONFIG_LOG(dev->ifname, INFO,
1788 struct virtio_net *dev = *pdev;
1795 if (validate_msg_fds(dev, ctx, 1) != 0)
1800 VHOST_CONFIG_LOG(dev->ifname, ERR,
1811 if (vq_is_packed(dev))
1816 VHOST_CONFIG_LOG(dev->ifname, INFO, "set_inflight_fd mmap_size: %"PRIu64, mmap_size);
1817 VHOST_CONFIG_LOG(dev->ifname, INFO,
1820 VHOST_CONFIG_LOG(dev->ifname, INFO,
1823 VHOST_CONFIG_LOG(dev->ifname, INFO,
1826 VHOST_CONFIG_LOG(dev->ifname, INFO,
1829 VHOST_CONFIG_LOG(dev->ifname, INFO,
1837 if (dev->nr_vring > 0)
1838 numa_node = dev->virtqueue[0]->numa_node;
1840 if (!dev->inflight_info) {
1841 dev->inflight_info = rte_zmalloc_socket("inflight_info",
1843 if (dev->inflight_info == NULL) {
1844 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc dev inflight area");
1847 dev->inflight_info->fd = -1;
1850 if (dev->inflight_info->addr) {
1851 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1852 dev->inflight_info->addr = NULL;
1858 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap share memory.");
1862 if (dev->inflight_info->fd >= 0) {
1863 close(dev->inflight_info->fd);
1864 dev->inflight_info->fd = -1;
1867 mem_set_dump(dev, addr, mmap_size, false, get_blk_size(fd));
1868 dev->inflight_info->fd = fd;
1869 dev->inflight_info->addr = addr;
1870 dev->inflight_info->size = mmap_size;
1873 vq = dev->virtqueue[i];
1877 cleanup_vq_inflight(dev, vq);
1878 if (vq_is_packed(dev)) {
1896 struct virtio_net *dev = *pdev;
1902 if (validate_msg_fds(dev, ctx, expected_fds) != 0)
1910 VHOST_CONFIG_LOG(dev->ifname, INFO,
1914 vq = dev->virtqueue[file.index];
1918 vhost_user_notify_queue_state(dev, vq, 0);
1933 struct virtio_net *dev = *pdev;
1937 if (validate_msg_fds(dev, ctx, expected_fds) != 0)
1942 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "not implemented");
1960 vhost_check_queue_inflights_split(struct virtio_net *dev,
1969 if (!(dev->protocol_features &
2009 VHOST_CONFIG_LOG(dev->ifname, ERR,
2018 VHOST_CONFIG_LOG(dev->ifname, ERR,
2048 vhost_check_queue_inflights_packed(struct virtio_net *dev,
2056 if (!(dev->protocol_features &
2105 VHOST_CONFIG_LOG(dev->ifname, ERR,
2114 VHOST_CONFIG_LOG(dev->ifname, ERR,
2148 struct virtio_net *dev = *pdev;
2154 if (validate_msg_fds(dev, ctx, expected_fds) != 0)
2162 VHOST_CONFIG_LOG(dev->ifname, INFO,
2167 vq = dev->virtqueue[file.index];
2168 translate_ring_addresses(&dev, &vq);
2169 *pdev = dev;
2176 if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
2182 vhost_user_notify_queue_state(dev, vq, 0);
2189 if (vq_is_packed(dev)) {
2190 if (vhost_check_queue_inflights_packed(dev, vq)) {
2191 VHOST_CONFIG_LOG(dev->ifname, ERR,
2197 if (vhost_check_queue_inflights_split(dev, vq)) {
2198 VHOST_CONFIG_LOG(dev->ifname, ERR,
2216 struct virtio_net *dev = *pdev;
2217 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
2221 vhost_destroy_device_notify(dev);
2223 dev->flags &= ~VIRTIO_DEV_READY;
2224 dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
2227 if (vq_is_packed(dev)) {
2239 VHOST_CONFIG_LOG(dev->ifname, INFO,
2259 if (vq_is_packed(dev)) {
2276 vhost_user_iotlb_flush_all(dev);
2279 vring_invalidate(dev, vq);
2295 struct virtio_net *dev = *pdev;
2300 VHOST_CONFIG_LOG(dev->ifname, INFO,
2304 vq = dev->virtqueue[index];
2305 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2307 VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ENABLE);
2309 VHOST_CONFIG_LOG(dev->ifname, ERR,
2325 struct virtio_net *dev = *pdev;
2328 rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
2342 struct virtio_net *dev = *pdev;
2346 rte_vhost_driver_get_protocol_features(dev->ifname,
2349 VHOST_CONFIG_LOG(dev->ifname, ERR, "received invalid protocol features.");
2353 dev->protocol_features = protocol_features;
2354 VHOST_CONFIG_LOG(dev->ifname, INFO,
2356 dev->protocol_features);
2366 struct virtio_net *dev = *pdev;
2373 if (validate_msg_fds(dev, ctx, 1) != 0)
2377 VHOST_CONFIG_LOG(dev->ifname, ERR, "invalid log fd: %d", fd);
2382 VHOST_CONFIG_LOG(dev->ifname, ERR,
2393 VHOST_CONFIG_LOG(dev->ifname, ERR,
2399 VHOST_CONFIG_LOG(dev->ifname, INFO,
2411 VHOST_CONFIG_LOG(dev->ifname, ERR, "mmap log base failed!");
2419 if (dev->log_addr) {
2420 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
2422 dev->log_addr = (uint64_t)(uintptr_t)addr;
2423 dev->log_base = dev->log_addr + off;
2424 dev->log_size = size;
2425 mem_set_dump(dev, addr, size + off, false, alignment);
2427 for (i = 0; i < dev->nr_vring; i++) {
2428 struct vhost_virtqueue *vq = dev->virtqueue[i];
2441 VHOST_CONFIG_LOG(dev->ifname, ERR,
2463 struct virtio_net *dev = *pdev;
2465 if (validate_msg_fds(dev, ctx, 1) != 0)
2469 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "not implemented.");
2487 struct virtio_net *dev = *pdev;
2491 VHOST_CONFIG_LOG(dev->ifname, DEBUG,
2494 memcpy(dev->mac.addr_bytes, mac, 6);
2503 rte_atomic_store_explicit(&dev->broadcast_rarp, 1, rte_memory_order_release);
2504 vdpa_dev = dev->vdpa_dev;
2506 vdpa_dev->ops->migration_done(dev->vid);
2516 struct virtio_net *dev = *pdev;
2520 VHOST_CONFIG_LOG(dev->ifname, ERR,
2527 dev->mtu = ctx->msg.payload.u64;
2537 struct virtio_net *dev = *pdev;
2540 if (validate_msg_fds(dev, ctx, 1) != 0)
2544 VHOST_CONFIG_LOG(dev->ifname, ERR,
2549 if (dev->backend_req_fd >= 0)
2550 close(dev->backend_req_fd);
2552 dev->backend_req_fd = fd;
2622 static int is_vring_iotlb(struct virtio_net *dev,
2626 if (vq_is_packed(dev))
2637 struct virtio_net *dev = *pdev;
2638 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
2641 if (validate_msg_fds(dev, ctx, 0) != 0)
2645 VHOST_CONFIG_LOG(dev->ifname, ERR, "is not vDPA device!");
2650 ret = vdpa_dev->ops->get_config(dev->vid,
2655 VHOST_CONFIG_LOG(dev->ifname, ERR, "get_config() return error!");
2658 VHOST_CONFIG_LOG(dev->ifname, ERR, "get_config() not supported!");
2669 struct virtio_net *dev = *pdev;
2670 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
2673 if (validate_msg_fds(dev, ctx, 0) != 0)
2677 VHOST_CONFIG_LOG(dev->ifname, ERR,
2684 VHOST_CONFIG_LOG(dev->ifname, ERR, "is not vDPA device!");
2689 ret = vdpa_dev->ops->set_config(dev->vid,
2695 VHOST_CONFIG_LOG(dev->ifname, ERR, "set_config() return error!");
2697 VHOST_CONFIG_LOG(dev->ifname, ERR, "set_config() not supported!");
2711 struct virtio_net *dev = *pdev;
2719 vva = qva_to_vva(dev, imsg->uaddr, &len);
2723 pg_sz = hua_to_alignment(dev->mem, (void *)(uintptr_t)vva);
2725 vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len, pg_sz, imsg->perm);
2727 for (i = 0; i < dev->nr_vring; i++) {
2728 struct vhost_virtqueue *vq = dev->virtqueue[i];
2733 if (is_vring_iotlb(dev, vq, imsg)) {
2735 translate_ring_addresses(&dev, &vq);
2736 *pdev = dev;
2742 vhost_user_iotlb_cache_remove(dev, imsg->iova, imsg->size);
2744 for (i = 0; i < dev->nr_vring; i++) {
2745 struct vhost_virtqueue *vq = dev->virtqueue[i];
2750 if (is_vring_iotlb(dev, vq, imsg)) {
2752 vring_invalidate(dev, vq);
2758 VHOST_CONFIG_LOG(dev->ifname, ERR, "invalid IOTLB message type (%d)",
2771 struct virtio_net *dev = *pdev;
2775 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
2777 if (dev->postcopy_ufd == -1) {
2778 VHOST_CONFIG_LOG(dev->ifname, ERR,
2785 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
2786 VHOST_CONFIG_LOG(dev->ifname, ERR,
2789 close(dev->postcopy_ufd);
2790 dev->postcopy_ufd = -1;
2793 ctx->fds[0] = dev->postcopy_ufd;
2798 dev->postcopy_ufd = -1;
2810 struct virtio_net *dev = *pdev;
2812 if (dev->mem && dev->mem->nregions) {
2813 VHOST_CONFIG_LOG(dev->ifname, ERR,
2817 dev->postcopy_listening = 1;
2827 struct virtio_net *dev = *pdev;
2829 dev->postcopy_listening = 0;
2830 if (dev->postcopy_ufd >= 0) {
2831 close(dev->postcopy_ufd);
2832 dev->postcopy_ufd = -1;
2847 struct virtio_net *dev = *pdev;
2849 ctx->msg.payload.u64 = dev->status;
2861 struct virtio_net *dev = *pdev;
2865 VHOST_CONFIG_LOG(dev->ifname, ERR,
2871 dev->status = ctx->msg.payload.u64;
2873 if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) &&
2874 (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) {
2875 VHOST_CONFIG_LOG(dev->ifname, ERR,
2881 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
2884 VHOST_CONFIG_LOG(dev->ifname, INFO, "new device status(0x%08x):", dev->status);
2885 VHOST_CONFIG_LOG(dev->ifname, INFO,
2887 (dev->status == VIRTIO_DEVICE_STATUS_RESET));
2888 VHOST_CONFIG_LOG(dev->ifname, INFO,
2890 !!(dev->status & VIRTIO_DEVICE_STATUS_ACK));
2891 VHOST_CONFIG_LOG(dev->ifname, INFO,
2893 !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER));
2894 VHOST_CONFIG_LOG(dev->ifname, INFO,
2896 !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK));
2897 VHOST_CONFIG_LOG(dev->ifname, INFO,
2899 !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK));
2900 VHOST_CONFIG_LOG(dev->ifname, INFO,
2902 !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET));
2903 VHOST_CONFIG_LOG(dev->ifname, INFO,
2905 !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
2919 read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx)
2923 ret = read_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE,
2929 VHOST_CONFIG_LOG(dev->ifname, ERR, "Unexpected header size read");
2936 VHOST_CONFIG_LOG(dev->ifname, ERR, "invalid msg size: %d",
2945 VHOST_CONFIG_LOG(dev->ifname, ERR, "read control message failed");
2959 send_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx)
2964 return send_fd_message(dev->ifname, sockfd, (char *)&ctx->msg,
2969 send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx)
2979 return send_vhost_message(dev, sockfd, ctx);
2983 send_vhost_backend_message(struct virtio_net *dev, struct vhu_msg_context *ctx)
2985 return send_vhost_message(dev, dev->backend_req_fd, ctx);
2989 send_vhost_backend_message_process_reply(struct virtio_net *dev, struct vhu_msg_context *ctx)
2994 rte_spinlock_lock(&dev->backend_req_lock);
2995 ret = send_vhost_backend_message(dev, ctx);
2997 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to send config change (%d)", ret);
3001 ret = read_vhost_message(dev, dev->backend_req_fd, &msg_reply);
3004 VHOST_CONFIG_LOG(dev->ifname, ERR,
3007 VHOST_CONFIG_LOG(dev->ifname, INFO, "vhost peer closed");
3013 VHOST_CONFIG_LOG(dev->ifname, ERR,
3022 rte_spinlock_unlock(&dev->backend_req_lock);
3030 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
3058 VHOST_CONFIG_LOG(dev->ifname, ERR, "invalid vring index: %u", vring_idx);
3062 if (dev->virtqueue[vring_idx])
3065 return alloc_vring_queue(dev, vring_idx);
3069 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
3075 while (vq_num < dev->nr_vring) {
3076 struct vhost_virtqueue *vq = dev->virtqueue[i];
3087 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
3093 while (vq_num < dev->nr_vring) {
3094 struct vhost_virtqueue *vq = dev->virtqueue[i];
3107 struct virtio_net *dev;
3119 dev = get_device(vid);
3120 if (dev == NULL)
3123 if (!dev->notify_ops) {
3124 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
3125 if (!dev->notify_ops) {
3126 VHOST_CONFIG_LOG(dev->ifname, ERR,
3133 ret = read_vhost_message(dev, fd, &ctx);
3135 VHOST_CONFIG_LOG(dev->ifname, INFO, "vhost peer closed");
3146 VHOST_CONFIG_LOG(dev->ifname, ERR, "vhost read message %s%s%sfailed",
3155 VHOST_CONFIG_LOG(dev->ifname, INFO,
3159 VHOST_CONFIG_LOG(dev->ifname, DEBUG,
3163 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "external request %d", request);
3166 ret = vhost_user_check_and_alloc_queue_pair(dev, &ctx);
3168 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc queue");
3180 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
3181 vhost_user_lock_all_queue_pairs(dev);
3187 if (dev->extern_ops.pre_msg_handle) {
3189 msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
3192 send_vhost_reply(dev, fd, &ctx);
3207 if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0) {
3210 msg_result = msg_handler->callback(&dev, &ctx, fd);
3215 VHOST_CONFIG_LOG(dev->ifname, ERR,
3221 VHOST_CONFIG_LOG(dev->ifname, DEBUG,
3227 VHOST_CONFIG_LOG(dev->ifname, DEBUG,
3230 send_vhost_reply(dev, fd, &ctx);
3239 dev->extern_ops.post_msg_handle) {
3241 msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
3244 send_vhost_reply(dev, fd, &ctx);
3257 VHOST_CONFIG_LOG(dev->ifname, ERR,
3273 send_vhost_reply(dev, fd, &ctx);
3275 VHOST_CONFIG_LOG(dev->ifname, ERR, "vhost message handling failed.");
3280 for (i = 0; i < dev->nr_vring; i++) {
3281 struct vhost_virtqueue *vq = dev->virtqueue[i];
3282 bool cur_ready = vq_is_ready(dev, vq);
3286 vhost_user_notify_queue_state(dev, vq, cur_ready);
3292 vhost_user_unlock_all_queue_pairs(dev);
3294 if (ret != 0 || !virtio_is_ready(dev))
3303 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
3304 if (dev->notify_ops->new_device(dev->vid) == 0)
3305 dev->flags |= VIRTIO_DEV_RUNNING;
3308 vdpa_dev = dev->vdpa_dev;
3315 if (blk_call_fd != dev->nr_vring - 1)
3322 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
3323 if (vdpa_dev->ops->dev_conf(dev->vid))
3324 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to configure vDPA device");
3326 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
3334 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
3350 ret = send_vhost_message(dev, dev->backend_req_fd, &ctx);
3352 VHOST_CONFIG_LOG(dev->ifname, ERR,
3371 struct virtio_net *dev;
3374 dev = get_device(vid);
3375 if (!dev)
3379 ret = send_vhost_backend_message(dev, &ctx);
3382 ret = send_vhost_backend_message_process_reply(dev, &ctx);
3386 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to send config change (%d)", ret);
3390 static int vhost_user_backend_set_vring_host_notifier(struct virtio_net *dev,
3416 ret = send_vhost_backend_message_process_reply(dev, &ctx);
3418 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to set host notifier (%d)", ret);
3425 struct virtio_net *dev;
3431 dev = get_device(vid);
3432 if (!dev)
3435 vdpa_dev = dev->vdpa_dev;
3439 if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
3440 !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
3441 !(dev->protocol_features &
3443 !(dev->protocol_features &
3445 !(dev->protocol_features &
3451 q_last = dev->nr_vring - 1;
3453 if (qid >= dev->nr_vring)
3476 if (vhost_user_backend_set_vring_host_notifier(dev, i,
3485 vhost_user_backend_set_vring_host_notifier(dev, i, -1,
3494 vhost_user_inject_irq(struct virtio_net *dev __rte_unused, struct vhost_virtqueue *vq)