Lines Matching defs:vq

70 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
83 vq->stats.iotlb_hits++;
88 vq->stats.iotlb_misses++;
100 vhost_user_iotlb_rd_unlock(vq);
110 vhost_user_iotlb_rd_lock(vq);
170 __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
176 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
190 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
199 if (unlikely(!vq->log_cache))
206 for (i = 0; i < vq->log_cache_nb_elem; i++) {
207 struct log_cache_entry *elem = vq->log_cache + i;
224 vq->log_cache_nb_elem = 0;
228 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
235 if (unlikely(!vq->log_cache)) {
243 for (i = 0; i < vq->log_cache_nb_elem; i++) {
244 struct log_cache_entry *elem = vq->log_cache + i;
263 vq->log_cache[i].offset = offset;
264 vq->log_cache[i].val = (1UL << bit_nr);
265 vq->log_cache_nb_elem++;
269 __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
282 vhost_log_cache_page(dev, vq, page);
288 __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
294 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
304 __vhost_log_cache_write(dev, vq, gpa, len);
308 vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
315 idesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node);
323 src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
341 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
343 if ((vq->callfd >= 0) && (destroy != 0))
344 close(vq->callfd);
345 if (vq->kickfd >= 0)
346 close(vq->kickfd);
350 cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq)
357 if (vq->inflight_packed)
358 vq->inflight_packed = NULL;
360 if (vq->inflight_split)
361 vq->inflight_split = NULL;
364 if (vq->resubmit_inflight) {
365 if (vq->resubmit_inflight->resubmit_list) {
366 rte_free(vq->resubmit_inflight->resubmit_list);
367 vq->resubmit_inflight->resubmit_list = NULL;
369 rte_free(vq->resubmit_inflight);
370 vq->resubmit_inflight = NULL;
392 vhost_free_async_mem(struct vhost_virtqueue *vq)
393 __rte_exclusive_locks_required(&vq->access_lock)
395 if (!vq->async)
398 rte_free(vq->async->pkts_info);
399 rte_free(vq->async->pkts_cmpl_flag);
401 rte_free(vq->async->buffers_packed);
402 vq->async->buffers_packed = NULL;
403 rte_free(vq->async->descs_split);
404 vq->async->descs_split = NULL;
406 rte_free(vq->async);
407 vq->async = NULL;
411 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
414 rte_free(vq->shadow_used_packed);
416 rte_free(vq->shadow_used_split);
418 rte_rwlock_write_lock(&vq->access_lock);
419 vhost_free_async_mem(vq);
420 rte_rwlock_write_unlock(&vq->access_lock);
421 rte_free(vq->batch_copy_elems);
422 rte_free(vq->log_cache);
423 rte_free(vq);
441 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
442 __rte_shared_locks_required(&vq->iotlb_lock)
444 if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG))))
447 vq->log_guest_addr = translate_log_addr(dev, vq,
448 vq->ring_addrs.log_guest_addr);
449 if (vq->log_guest_addr == 0)
461 translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
469 hva = vhost_iova_to_vva(dev, vq, log_addr,
490 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
491 __rte_shared_locks_required(&vq->iotlb_lock)
495 req_size = sizeof(struct vring_desc) * vq->size;
497 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
498 vq->ring_addrs.desc_user_addr,
500 if (!vq->desc || size != req_size)
504 req_size += sizeof(uint16_t) * vq->size;
508 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
509 vq->ring_addrs.avail_user_addr,
511 if (!vq->avail || size != req_size)
515 req_size += sizeof(struct vring_used_elem) * vq->size;
519 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
520 vq->ring_addrs.used_user_addr,
522 if (!vq->used || size != req_size)
529 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
530 __rte_shared_locks_required(&vq->iotlb_lock)
534 req_size = sizeof(struct vring_packed_desc) * vq->size;
536 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
537 vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr,
539 if (!vq->desc_packed || size != req_size)
544 vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t)
545 vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr,
547 if (!vq->driver_event || size != req_size)
552 vq->device_event = (struct vring_packed_desc_event *)(uintptr_t)
553 vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr,
555 if (!vq->device_event || size != req_size)
562 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
569 if (vring_translate_packed(dev, vq) < 0)
572 if (vring_translate_split(dev, vq) < 0)
576 if (log_translate(dev, vq) < 0)
579 vq->access_ok = true;
585 vring_invalidate(struct virtio_net *dev __rte_unused, struct vhost_virtqueue *vq)
587 vhost_user_iotlb_wr_lock(vq);
589 vq->access_ok = false;
590 vq->desc = NULL;
591 vq->avail = NULL;
592 vq->used = NULL;
593 vq->log_guest_addr = 0;
595 vhost_user_iotlb_wr_unlock(vq);
599 init_vring_queue(struct virtio_net *dev __rte_unused, struct vhost_virtqueue *vq,
604 memset(vq, 0, sizeof(struct vhost_virtqueue));
606 vq->index = vring_idx;
607 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
608 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
609 vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF;
612 if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
618 vq->numa_node = numa_node;
622 reset_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq)
626 callfd = vq->callfd;
627 init_vring_queue(dev, vq, vq->index);
628 vq->callfd = callfd;
634 struct vhost_virtqueue *vq;
642 vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);
643 if (vq == NULL) {
650 dev->virtqueue[i] = vq;
651 init_vring_queue(dev, vq, i);
652 rte_rwlock_init(&vq->access_lock);
653 rte_rwlock_init(&vq->iotlb_lock);
654 vq->avail_wrap_counter = 1;
655 vq->used_wrap_counter = 1;
656 vq->signalled_used_valid = false;
679 struct vhost_virtqueue *vq = dev->virtqueue[i];
681 if (!vq) {
686 reset_vring_queue(dev, vq);
989 struct vhost_virtqueue *vq;
998 vq = dev->virtqueue[vring_idx];
999 if (!vq)
1003 vring->desc_packed = vq->desc_packed;
1004 vring->driver_event = vq->driver_event;
1005 vring->device_event = vq->device_event;
1007 vring->desc = vq->desc;
1008 vring->avail = vq->avail;
1009 vring->used = vq->used;
1011 vring->log_guest_addr = vq->log_guest_addr;
1013 vring->callfd = vq->callfd;
1014 vring->kickfd = vq->kickfd;
1015 vring->size = vq->size;
1025 struct vhost_virtqueue *vq;
1034 vq = dev->virtqueue[vring_idx];
1035 if (unlikely(!vq))
1039 if (unlikely(!vq->inflight_packed))
1042 vring->inflight_packed = vq->inflight_packed;
1044 if (unlikely(!vq->inflight_split))
1047 vring->inflight_split = vq->inflight_split;
1050 vring->resubmit_inflight = vq->resubmit_inflight;
1059 struct vhost_virtqueue *vq;
1076 vq = dev->virtqueue[vring_idx];
1077 if (unlikely(!vq))
1080 if (unlikely(!vq->inflight_split))
1083 if (unlikely(idx >= vq->size))
1086 vq->inflight_split->desc[idx].counter = vq->global_counter++;
1087 vq->inflight_split->desc[idx].inflight = 1;
1098 struct vhost_virtqueue *vq;
1116 vq = dev->virtqueue[vring_idx];
1117 if (unlikely(!vq))
1120 inflight_info = vq->inflight_packed;
1124 if (unlikely(head >= vq->size))
1127 desc = vq->desc_packed;
1129 if (unlikely(old_free_head >= vq->size))
1136 inflight_info->desc[old_free_head].counter = vq->global_counter++;
1140 while (head != ((last + 1) % vq->size)) {
1150 head = (head + 1) % vq->size;
1164 struct vhost_virtqueue *vq;
1180 vq = dev->virtqueue[vring_idx];
1181 if (unlikely(!vq))
1184 if (unlikely(!vq->inflight_split))
1187 if (unlikely(idx >= vq->size))
1192 vq->inflight_split->desc[idx].inflight = 0;
1196 vq->inflight_split->used_idx = last_used_idx;
1206 struct vhost_virtqueue *vq;
1222 vq = dev->virtqueue[vring_idx];
1223 if (unlikely(!vq))
1226 inflight_info = vq->inflight_packed;
1230 if (unlikely(head >= vq->size))
1251 struct vhost_virtqueue *vq;
1267 vq = dev->virtqueue[vring_idx];
1268 if (unlikely(!vq))
1271 if (unlikely(!vq->inflight_split))
1274 if (unlikely(idx >= vq->size))
1277 vq->inflight_split->last_inflight_io = idx;
1287 struct vhost_virtqueue *vq;
1304 vq = dev->virtqueue[vring_idx];
1305 if (unlikely(!vq))
1308 inflight_info = vq->inflight_packed;
1312 if (unlikely(head >= vq->size))
1316 if (unlikely(last >= vq->size))
1335 struct vhost_virtqueue *vq;
1345 vq = dev->virtqueue[vring_idx];
1346 if (!vq)
1349 rte_rwlock_read_lock(&vq->access_lock);
1351 if (unlikely(!vq->access_ok)) {
1357 vhost_vring_call_packed(dev, vq);
1359 vhost_vring_call_split(dev, vq);
1362 rte_rwlock_read_unlock(&vq->access_lock);
1371 struct vhost_virtqueue *vq;
1381 vq = dev->virtqueue[vring_idx];
1382 if (!vq)
1385 if (rte_rwlock_read_trylock(&vq->access_lock))
1388 if (unlikely(!vq->access_ok)) {
1394 vhost_vring_call_packed(dev, vq);
1396 vhost_vring_call_split(dev, vq);
1399 rte_rwlock_read_unlock(&vq->access_lock);
1408 struct vhost_virtqueue *vq;
1418 vq = dev->virtqueue[queue_id];
1419 if (!vq)
1422 rte_rwlock_write_lock(&vq->access_lock);
1424 if (unlikely(!vq->access_ok))
1427 if (unlikely(!vq->enabled))
1430 ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
1433 rte_rwlock_write_unlock(&vq->access_lock);
1439 struct vhost_virtqueue *vq, int enable)
1441 if (vq->used == NULL)
1446 vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
1448 vq->used->flags |= VRING_USED_F_NO_NOTIFY;
1451 vhost_avail_event(vq) = vq->last_avail_idx;
1458 struct vhost_virtqueue *vq, int enable)
1462 if (vq->device_event == NULL)
1466 vq->device_event->flags = VRING_EVENT_F_DISABLE;
1473 vq->device_event->off_wrap = vq->last_avail_idx |
1474 vq->avail_wrap_counter << 15;
1479 vq->device_event->flags = flags;
1485 struct vhost_virtqueue *vq, int enable)
1491 if (!vq->ready)
1495 return vhost_enable_notify_packed(dev, vq, enable);
1497 return vhost_enable_notify_split(dev, vq, enable);
1504 struct vhost_virtqueue *vq;
1513 vq = dev->virtqueue[queue_id];
1514 if (!vq)
1517 rte_rwlock_write_lock(&vq->access_lock);
1519 if (unlikely(!vq->access_ok)) {
1524 vq->notif_enable = enable;
1525 ret = vhost_enable_guest_notification(dev, vq, enable);
1528 rte_rwlock_write_unlock(&vq->access_lock);
1537 struct vhost_virtqueue *vq;
1542 vq = dev->virtqueue[queue_id];
1543 if (!vq)
1546 rte_rwlock_read_lock(&vq->access_lock);
1548 if (unlikely(!vq->access_ok))
1551 rte_atomic_store_explicit(&vq->irq_pending, false, rte_memory_order_release);
1553 if (dev->backend_ops->inject_irq(dev, vq)) {
1555 rte_atomic_fetch_add_explicit(&vq->stats.guest_notifications_error,
1559 rte_atomic_fetch_add_explicit(&vq->stats.guest_notifications,
1566 rte_rwlock_read_unlock(&vq->access_lock);
1585 struct vhost_virtqueue *vq;
1593 vq = dev->virtqueue[vring_idx];
1594 if (!vq)
1597 vhost_log_used_vring(dev, vq, offset, len);
1604 struct vhost_virtqueue *vq;
1618 vq = dev->virtqueue[qid];
1619 if (vq == NULL)
1622 rte_rwlock_write_lock(&vq->access_lock);
1624 if (unlikely(!vq->access_ok))
1627 if (unlikely(!vq->enabled))
1630 ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
1633 rte_rwlock_write_unlock(&vq->access_lock);
1667 struct vhost_virtqueue *vq;
1676 vq = dev->virtqueue[queue_id];
1677 if (!vq)
1681 *last_avail_idx = (vq->avail_wrap_counter << 15) |
1682 vq->last_avail_idx;
1683 *last_used_idx = (vq->used_wrap_counter << 15) |
1684 vq->last_used_idx;
1686 *last_avail_idx = vq->last_avail_idx;
1687 *last_used_idx = vq->last_used_idx;
1697 struct vhost_virtqueue *vq;
1706 vq = dev->virtqueue[queue_id];
1707 if (!vq)
1711 vq->last_avail_idx = last_avail_idx & 0x7fff;
1712 vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15));
1713 vq->last_used_idx = last_used_idx & 0x7fff;
1714 vq->used_wrap_counter = !!(last_used_idx & (1 << 15));
1715 vhost_virtqueue_reconnect_log_packed(vq);
1717 vq->last_avail_idx = last_avail_idx;
1718 vq->last_used_idx = last_used_idx;
1719 vhost_virtqueue_reconnect_log_split(vq);
1732 struct vhost_virtqueue *vq;
1741 vq = dev->virtqueue[queue_id];
1742 if (!vq)
1748 inflight_info = vq->inflight_packed;
1774 async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
1775 __rte_exclusive_locks_required(&vq->access_lock)
1778 int node = vq->numa_node;
1780 if (unlikely(vq->async)) {
1783 vq->index);
1791 vq->index);
1795 async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
1800 vq->index);
1804 async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool),
1809 vq->index);
1815 vq->size * sizeof(struct vring_used_elem_packed),
1820 vq->index);
1825 vq->size * sizeof(struct vring_used_elem),
1830 vq->index);
1835 vq->async = async;
1849 struct vhost_virtqueue *vq;
1859 vq = dev->virtqueue[queue_id];
1861 if (unlikely(vq == NULL || !dev->async_copy || dev->vdpa_dev != NULL))
1864 rte_rwlock_write_lock(&vq->access_lock);
1866 if (unlikely(!vq->access_ok)) {
1871 ret = async_channel_register(dev, vq);
1874 rte_rwlock_write_unlock(&vq->access_lock);
1882 struct vhost_virtqueue *vq;
1891 vq = dev->virtqueue[queue_id];
1893 if (unlikely(vq == NULL || !dev->async_copy || dev->vdpa_dev != NULL))
1896 vq_assert_lock(dev, vq);
1898 return async_channel_register(dev, vq);
1904 struct vhost_virtqueue *vq;
1914 vq = dev->virtqueue[queue_id];
1916 if (vq == NULL)
1919 if (rte_rwlock_write_trylock(&vq->access_lock)) {
1925 if (unlikely(!vq->access_ok)) {
1930 if (!vq->async) {
1932 } else if (vq->async->pkts_inflight_n) {
1937 vhost_free_async_mem(vq);
1942 rte_rwlock_write_unlock(&vq->access_lock);
1950 struct vhost_virtqueue *vq;
1959 vq = dev->virtqueue[queue_id];
1961 if (vq == NULL)
1964 vq_assert_lock(dev, vq);
1966 if (!vq->async)
1969 if (vq->async->pkts_inflight_n) {
1976 vhost_free_async_mem(vq);
2060 struct vhost_virtqueue *vq;
2070 vq = dev->virtqueue[queue_id];
2072 if (vq == NULL)
2075 if (rte_rwlock_write_trylock(&vq->access_lock)) {
2081 if (unlikely(!vq->access_ok)) {
2086 if (vq->async)
2087 ret = vq->async->pkts_inflight_n;
2090 rte_rwlock_write_unlock(&vq->access_lock);
2098 struct vhost_virtqueue *vq;
2108 vq = dev->virtqueue[queue_id];
2110 if (vq == NULL)
2113 vq_assert_lock(dev, vq);
2115 if (!vq->async)
2118 ret = vq->async->pkts_inflight_n;
2128 struct vhost_virtqueue *vq;
2136 vq = dev->virtqueue[queue_id];
2137 if (vq == NULL)
2140 rte_rwlock_read_lock(&vq->access_lock);
2142 if (unlikely(!vq->access_ok)) {
2149 desc = vq->desc_packed;
2150 pmc->addr = &desc[vq->last_avail_idx].flags;
2151 if (vq->avail_wrap_counter)
2156 pmc->size = sizeof(desc[vq->last_avail_idx].flags);
2159 pmc->addr = &vq->avail->idx;
2160 pmc->val = vq->last_avail_idx & (vq->size - 1);
2161 pmc->mask = vq->size - 1;
2162 pmc->size = sizeof(vq->avail->idx);
2167 rte_rwlock_read_unlock(&vq->access_lock);
2205 struct vhost_virtqueue *vq;
2221 vq = dev->virtqueue[queue_id];
2223 rte_rwlock_write_lock(&vq->access_lock);
2225 if (unlikely(!vq->access_ok)) {
2236 *(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset);
2241 rte_rwlock_write_unlock(&vq->access_lock);
2249 struct vhost_virtqueue *vq;
2261 vq = dev->virtqueue[queue_id];
2263 rte_rwlock_write_lock(&vq->access_lock);
2265 if (unlikely(!vq->access_ok)) {
2273 memset(&vq->stats, 0, sizeof(vq->stats));
2276 rte_rwlock_write_unlock(&vq->access_lock);