Lines Matching defs:dev

64 vhost_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
66 return dev->backend_ops->iotlb_miss(dev, iova, perm);
70 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
80 vva = vhost_user_iotlb_cache_find(dev, iova, &tmp_size, perm);
82 if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
87 if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
92 if (!vhost_user_iotlb_pending_miss(dev, iova, perm)) {
102 vhost_user_iotlb_pending_insert(dev, iova, perm);
103 if (vhost_iotlb_miss(dev, iova, perm)) {
104 VHOST_DATA_LOG(dev->ifname, ERR,
107 vhost_user_iotlb_pending_remove(dev, iova, 1, perm);
115 vva = vhost_user_iotlb_cache_find(dev, iova, &tmp_size, perm);
149 __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
153 if (unlikely(!dev->log_base || !len))
156 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
164 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
170 __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
176 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
178 VHOST_DATA_LOG(dev->ifname, ERR,
184 gpa = hva_to_gpa(dev, hva, len);
186 __vhost_log_write(dev, gpa, len);
190 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
195 if (unlikely(!dev->log_base))
204 log_base = (unsigned long *)(uintptr_t)dev->log_base;
228 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
238 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
258 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
269 __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
274 if (unlikely(!dev->log_base || !len))
277 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
282 vhost_log_cache_page(dev, vq, page);
288 __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
294 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
296 VHOST_DATA_LOG(dev->ifname, ERR,
302 gpa = hva_to_gpa(dev, hva, len);
304 __vhost_log_cache_write(dev, vq, gpa, len);
308 vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
323 src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
350 cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq)
352 if (!(dev->protocol_features &
356 if (vq_is_packed(dev)) {
379 cleanup_device(struct virtio_net *dev, int destroy)
383 vhost_backend_cleanup(dev);
385 for (i = 0; i < dev->nr_vring; i++) {
386 cleanup_vq(dev->virtqueue[i], destroy);
387 cleanup_vq_inflight(dev, dev->virtqueue[i]);
411 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
413 if (vq_is_packed(dev))
430 free_device(struct virtio_net *dev)
434 for (i = 0; i < dev->nr_vring; i++)
435 free_vq(dev, dev->virtqueue[i]);
437 rte_free(dev);
441 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
447 vq->log_guest_addr = translate_log_addr(dev, vq,
461 translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
464 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
469 hva = vhost_iova_to_vva(dev, vq, log_addr,
475 gpa = hva_to_gpa(dev, hva, exp_size);
477 VHOST_DATA_LOG(dev->ifname, ERR,
490 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
497 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
505 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
508 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
516 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
519 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
529 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
537 vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr,
545 vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr,
553 vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr,
562 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
565 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
568 if (vq_is_packed(dev)) {
569 if (vring_translate_packed(dev, vq) < 0)
572 if (vring_translate_split(dev, vq) < 0)
576 if (log_translate(dev, vq) < 0)
585 vring_invalidate(struct virtio_net *dev __rte_unused, struct vhost_virtqueue *vq)
599 init_vring_queue(struct virtio_net *dev __rte_unused, struct vhost_virtqueue *vq,
613 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to query numa node: %s",
622 reset_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq)
627 init_vring_queue(dev, vq, vq->index);
632 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
639 if (dev->virtqueue[i])
644 VHOST_CONFIG_LOG(dev->ifname, ERR,
650 dev->virtqueue[i] = vq;
651 init_vring_queue(dev, vq, i);
659 dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1);
670 reset_device(struct virtio_net *dev)
674 dev->features = 0;
675 dev->protocol_features = 0;
676 dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
678 for (i = 0; i < dev->nr_vring; i++) {
679 struct vhost_virtqueue *vq = dev->virtqueue[i];
682 VHOST_CONFIG_LOG(dev->ifname, ERR,
686 reset_vring_queue(dev, vq);
697 struct virtio_net *dev;
727 dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
728 if (dev == NULL) {
734 vhost_devices[i] = dev;
737 dev->vid = i;
738 dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
739 dev->backend_req_fd = -1;
740 dev->postcopy_ufd = -1;
741 rte_spinlock_init(&dev->backend_req_lock);
742 dev->backend_ops = ops;
748 vhost_destroy_device_notify(struct virtio_net *dev)
752 if (dev->flags & VIRTIO_DEV_RUNNING) {
753 vdpa_dev = dev->vdpa_dev;
755 vdpa_dev->ops->dev_close(dev->vid);
756 dev->flags &= ~VIRTIO_DEV_RUNNING;
757 dev->notify_ops->destroy_device(dev->vid);
768 struct virtio_net *dev = get_device(vid);
770 if (dev == NULL)
773 vhost_destroy_device_notify(dev);
775 cleanup_device(dev, 1);
776 free_device(dev);
784 struct virtio_net *dev = get_device(vid);
786 if (dev == NULL)
789 dev->vdpa_dev = vdpa_dev;
795 struct virtio_net *dev;
798 dev = get_device(vid);
799 if (dev == NULL)
802 len = if_len > sizeof(dev->ifname) ?
803 sizeof(dev->ifname) : if_len;
805 strncpy(dev->ifname, if_name, len);
806 dev->ifname[sizeof(dev->ifname) - 1] = '\0';
813 struct virtio_net *dev = get_device(vid);
815 if (dev == NULL)
819 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
821 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
823 dev->flags |= VIRTIO_DEV_LEGACY_OL_FLAGS;
825 dev->flags &= ~VIRTIO_DEV_LEGACY_OL_FLAGS;
827 dev->flags |= VIRTIO_DEV_STATS_ENABLED;
829 dev->flags &= ~VIRTIO_DEV_STATS_ENABLED;
831 dev->flags |= VIRTIO_DEV_SUPPORT_IOMMU;
833 dev->flags &= ~VIRTIO_DEV_SUPPORT_IOMMU;
835 if (vhost_user_iotlb_init(dev) < 0)
843 struct virtio_net *dev = get_device(vid);
845 if (dev == NULL)
848 dev->extbuf = 1;
854 struct virtio_net *dev = get_device(vid);
856 if (dev == NULL)
859 dev->linearbuf = 1;
865 struct virtio_net *dev = get_device(vid);
867 if (dev == NULL || mtu == NULL)
870 if (!(dev->flags & VIRTIO_DEV_READY))
873 if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
876 *mtu = dev->mtu;
885 struct virtio_net *dev = get_device(vid);
889 if (dev == NULL || numa_available() != 0)
892 ret = get_mempolicy(&numa_node, NULL, 0, dev,
895 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to query numa node: %s",
910 struct virtio_net *dev = get_device(vid);
912 if (dev == NULL)
915 return dev->nr_vring;
921 struct virtio_net *dev = get_device(vid);
923 if (dev == NULL || buf == NULL)
926 len = RTE_MIN(len, sizeof(dev->ifname));
928 strncpy(buf, dev->ifname, len);
937 struct virtio_net *dev;
939 dev = get_device(vid);
940 if (dev == NULL || features == NULL)
943 *features = dev->features;
951 struct virtio_net *dev;
953 dev = get_device(vid);
954 if (dev == NULL || protocol_features == NULL)
957 *protocol_features = dev->protocol_features;
964 struct virtio_net *dev;
968 dev = get_device(vid);
969 if (dev == NULL || mem == NULL)
972 size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
977 m->nregions = dev->mem->nregions;
978 memcpy(m->regions, dev->mem->regions, size);
988 struct virtio_net *dev;
991 dev = get_device(vid);
992 if (dev == NULL || vring == NULL)
998 vq = dev->virtqueue[vring_idx];
1002 if (vq_is_packed(dev)) {
1024 struct virtio_net *dev;
1027 dev = get_device(vid);
1028 if (unlikely(!dev))
1034 vq = dev->virtqueue[vring_idx];
1038 if (vq_is_packed(dev)) {
1060 struct virtio_net *dev;
1062 dev = get_device(vid);
1063 if (unlikely(!dev))
1066 if (unlikely(!(dev->protocol_features &
1070 if (unlikely(vq_is_packed(dev)))
1076 vq = dev->virtqueue[vring_idx];
1097 struct virtio_net *dev;
1102 dev = get_device(vid);
1103 if (unlikely(!dev))
1106 if (unlikely(!(dev->protocol_features &
1110 if (unlikely(!vq_is_packed(dev)))
1116 vq = dev->virtqueue[vring_idx];
1163 struct virtio_net *dev;
1166 dev = get_device(vid);
1167 if (unlikely(!dev))
1170 if (unlikely(!(dev->protocol_features &
1174 if (unlikely(vq_is_packed(dev)))
1180 vq = dev->virtqueue[vring_idx];
1205 struct virtio_net *dev;
1208 dev = get_device(vid);
1209 if (unlikely(!dev))
1212 if (unlikely(!(dev->protocol_features &
1216 if (unlikely(!vq_is_packed(dev)))
1222 vq = dev->virtqueue[vring_idx];
1250 struct virtio_net *dev;
1253 dev = get_device(vid);
1254 if (unlikely(!dev))
1257 if (unlikely(!(dev->protocol_features &
1261 if (unlikely(vq_is_packed(dev)))
1267 vq = dev->virtqueue[vring_idx];
1286 struct virtio_net *dev;
1290 dev = get_device(vid);
1291 if (unlikely(!dev))
1294 if (unlikely(!(dev->protocol_features &
1298 if (unlikely(!vq_is_packed(dev)))
1304 vq = dev->virtqueue[vring_idx];
1334 struct virtio_net *dev;
1338 dev = get_device(vid);
1339 if (!dev)
1345 vq = dev->virtqueue[vring_idx];
1356 if (vq_is_packed(dev))
1357 vhost_vring_call_packed(dev, vq);
1359 vhost_vring_call_split(dev, vq);
1370 struct virtio_net *dev;
1374 dev = get_device(vid);
1375 if (!dev)
1381 vq = dev->virtqueue[vring_idx];
1393 if (vq_is_packed(dev))
1394 vhost_vring_call_packed(dev, vq);
1396 vhost_vring_call_split(dev, vq);
1407 struct virtio_net *dev;
1411 dev = get_device(vid);
1412 if (!dev)
1418 vq = dev->virtqueue[queue_id];
1438 vhost_enable_notify_split(struct virtio_net *dev,
1444 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
1457 vhost_enable_notify_packed(struct virtio_net *dev,
1471 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
1484 vhost_enable_guest_notification(struct virtio_net *dev,
1494 if (vq_is_packed(dev))
1495 return vhost_enable_notify_packed(dev, vq, enable);
1497 return vhost_enable_notify_split(dev, vq, enable);
1503 struct virtio_net *dev = get_device(vid);
1507 if (!dev)
1513 vq = dev->virtqueue[queue_id];
1525 ret = vhost_enable_guest_notification(dev, vq, enable);
1536 struct virtio_net *dev = get_device(vid);
1539 if (!dev || queue_id >= VHOST_MAX_VRING)
1542 vq = dev->virtqueue[queue_id];
1553 if (dev->backend_ops->inject_irq(dev, vq)) {
1554 if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
1558 if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
1561 if (dev->notify_ops->guest_notified)
1562 dev->notify_ops->guest_notified(dev->vid);
1572 struct virtio_net *dev = get_device(vid);
1574 if (dev == NULL)
1577 vhost_log_write(dev, addr, len);
1584 struct virtio_net *dev;
1587 dev = get_device(vid);
1588 if (dev == NULL)
1593 vq = dev->virtqueue[vring_idx];
1597 vhost_log_used_vring(dev, vq, offset, len);
1603 struct virtio_net *dev;
1607 dev = get_device(vid);
1608 if (dev == NULL)
1611 if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
1612 VHOST_DATA_LOG(dev->ifname, ERR,
1618 vq = dev->virtqueue[qid];
1640 struct virtio_net *dev = get_device(vid);
1642 if (dev == NULL)
1645 return dev->vdpa_dev;
1652 struct virtio_net *dev = get_device(vid);
1654 if (dev == NULL || log_base == NULL || log_size == NULL)
1657 *log_base = dev->log_base;
1658 *log_size = dev->log_size;
1668 struct virtio_net *dev = get_device(vid);
1670 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
1676 vq = dev->virtqueue[queue_id];
1680 if (vq_is_packed(dev)) {
1698 struct virtio_net *dev = get_device(vid);
1700 if (!dev)
1706 vq = dev->virtqueue[queue_id];
1710 if (vq_is_packed(dev)) {
1733 struct virtio_net *dev = get_device(vid);
1735 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
1741 vq = dev->virtqueue[queue_id];
1745 if (!vq_is_packed(dev))
1763 struct virtio_net *dev = get_device(vid);
1765 if (dev == NULL || ops == NULL)
1768 dev->extern_ops = *ops;
1769 dev->extern_data = ctx;
1774 async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
1781 VHOST_CONFIG_LOG(dev->ifname, ERR,
1789 VHOST_CONFIG_LOG(dev->ifname, ERR,
1798 VHOST_CONFIG_LOG(dev->ifname, ERR,
1807 VHOST_CONFIG_LOG(dev->ifname, ERR,
1813 if (vq_is_packed(dev)) {
1818 VHOST_CONFIG_LOG(dev->ifname, ERR,
1828 VHOST_CONFIG_LOG(dev->ifname, ERR,
1850 struct virtio_net *dev = get_device(vid);
1853 if (dev == NULL)
1859 vq = dev->virtqueue[queue_id];
1861 if (unlikely(vq == NULL || !dev->async_copy || dev->vdpa_dev != NULL))
1871 ret = async_channel_register(dev, vq);
1883 struct virtio_net *dev = get_device(vid);
1885 if (dev == NULL)
1891 vq = dev->virtqueue[queue_id];
1893 if (unlikely(vq == NULL || !dev->async_copy || dev->vdpa_dev != NULL))
1896 vq_assert_lock(dev, vq);
1898 return async_channel_register(dev, vq);
1905 struct virtio_net *dev = get_device(vid);
1908 if (dev == NULL)
1914 vq = dev->virtqueue[queue_id];
1920 VHOST_CONFIG_LOG(dev->ifname, ERR,
1933 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to unregister async channel.");
1934 VHOST_CONFIG_LOG(dev->ifname, ERR,
1951 struct virtio_net *dev = get_device(vid);
1953 if (dev == NULL)
1959 vq = dev->virtqueue[queue_id];
1964 vq_assert_lock(dev, vq);
1970 VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to unregister async channel.");
1971 VHOST_CONFIG_LOG(dev->ifname, ERR,
2061 struct virtio_net *dev = get_device(vid);
2064 if (dev == NULL)
2070 vq = dev->virtqueue[queue_id];
2076 VHOST_CONFIG_LOG(dev->ifname, DEBUG,
2099 struct virtio_net *dev = get_device(vid);
2102 if (dev == NULL)
2108 vq = dev->virtqueue[queue_id];
2113 vq_assert_lock(dev, vq);
2127 struct virtio_net *dev = get_device(vid);
2131 if (dev == NULL)
2136 vq = dev->virtqueue[queue_id];
2147 if (vq_is_packed(dev)) {
2177 struct virtio_net *dev = get_device(vid);
2180 if (dev == NULL)
2183 if (queue_id >= dev->nr_vring)
2186 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2204 struct virtio_net *dev = get_device(vid);
2209 if (dev == NULL)
2212 if (queue_id >= dev->nr_vring)
2215 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2221 vq = dev->virtqueue[queue_id];
2248 struct virtio_net *dev = get_device(vid);
2252 if (dev == NULL)
2255 if (queue_id >= dev->nr_vring)
2258 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2261 vq = dev->virtqueue[queue_id];