Lines Matching defs:vq
30 async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
56 vhost_queue_stats_update(const struct virtio_net *dev, struct vhost_virtqueue *vq,
58 __rte_shared_locks_required(&vq->access_lock)
60 struct virtqueue_stats *stats = &vq->stats;
90 vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
93 __rte_shared_locks_required(&vq->access_lock)
134 dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = &vq->async->pkts_cmpl_flag[flag_idx];
140 vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
143 __rte_shared_locks_required(&vq->access_lock)
152 ret = vhost_async_dma_transfer_one(dev, vq, dma_id, vchan_id, head_idx,
159 if (head_idx >= vq->size)
160 head_idx -= vq->size;
223 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
224 __rte_shared_locks_required(&vq->iotlb_lock)
226 struct batch_copy_elem *elem = vq->batch_copy_elems;
227 uint16_t count = vq->batch_copy_nb_elems;
232 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
237 vq->batch_copy_nb_elems = 0;
241 do_data_copy_dequeue(struct vhost_virtqueue *vq)
243 struct batch_copy_elem *elem = vq->batch_copy_elems;
244 uint16_t count = vq->batch_copy_nb_elems;
250 vq->batch_copy_nb_elems = 0;
255 struct vhost_virtqueue *vq,
258 rte_memcpy(&vq->used->ring[to],
259 &vq->shadow_used_split[from],
261 vhost_log_cache_used_vring(dev, vq,
267 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
269 uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
271 if (used_idx + vq->shadow_used_idx <= vq->size) {
272 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
273 vq->shadow_used_idx);
277 /* update used ring interval [used_idx, vq->size] */
278 size = vq->size - used_idx;
279 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
282 do_flush_shadow_used_ring_split(dev, vq, 0, size,
283 vq->shadow_used_idx - size);
285 vq->last_used_idx += vq->shadow_used_idx;
287 vhost_log_cache_sync(dev, vq);
289 rte_atomic_fetch_add_explicit((unsigned short __rte_atomic *)&vq->used->idx,
290 vq->shadow_used_idx, rte_memory_order_release);
291 vq->shadow_used_idx = 0;
292 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
293 sizeof(vq->used->idx));
297 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
300 uint16_t i = vq->shadow_used_idx++;
302 vq->shadow_used_split[i].id = desc_idx;
303 vq->shadow_used_split[i].len = len;
308 struct vhost_virtqueue *vq)
311 uint16_t used_idx = vq->last_used_idx;
312 uint16_t head_idx = vq->last_used_idx;
316 for (i = 0; i < vq->shadow_used_idx; i++) {
317 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
318 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
320 used_idx += vq->shadow_used_packed[i].count;
321 if (used_idx >= vq->size)
322 used_idx -= vq->size;
328 for (i = 0; i < vq->shadow_used_idx; i++) {
331 if (vq->shadow_used_packed[i].len)
336 if (vq->used_wrap_counter) {
345 vq->desc_packed[vq->last_used_idx].flags = flags;
347 vhost_log_cache_used_vring(dev, vq,
348 vq->last_used_idx *
352 head_idx = vq->last_used_idx;
356 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
359 vq->desc_packed[head_idx].flags = head_flags;
361 vhost_log_cache_used_vring(dev, vq,
366 vq->shadow_used_idx = 0;
367 vhost_log_cache_sync(dev, vq);
372 struct vhost_virtqueue *vq)
374 struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
376 vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
379 (unsigned short __rte_atomic *)&vq->desc_packed[vq->shadow_last_used_idx].flags,
382 vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
385 vq->shadow_used_idx = 0;
386 vhost_log_cache_sync(dev, vq);
391 struct vhost_virtqueue *vq,
400 last_used_idx = vq->last_used_idx;
401 desc_base = &vq->desc_packed[last_used_idx];
403 flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
416 vhost_log_cache_used_vring(dev, vq, last_used_idx *
420 vhost_log_cache_sync(dev, vq);
422 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
426 vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
429 __rte_exclusive_locks_required(&vq->access_lock)
432 struct vhost_async *async = vq->async;
439 if (async->buffer_idx_packed >= vq->size)
440 async->buffer_idx_packed -= vq->size;
445 vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue *vq, uint16_t *ids)
446 __rte_shared_locks_required(&vq->access_lock)
449 struct vhost_async *async = vq->async;
457 if (async->buffer_idx_packed >= vq->size)
458 async->buffer_idx_packed -= vq->size;
463 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
466 vq->shadow_used_packed[0].id = id;
468 if (!vq->shadow_used_idx) {
469 vq->shadow_last_used_idx = vq->last_used_idx;
470 vq->shadow_used_packed[0].flags =
471 PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
472 vq->shadow_used_packed[0].len = 0;
473 vq->shadow_used_packed[0].count = 1;
474 vq->shadow_used_idx++;
477 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
482 struct vhost_virtqueue *vq,
489 flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
491 if (!vq->shadow_used_idx) {
492 vq->shadow_last_used_idx = vq->last_used_idx;
493 vq->shadow_used_packed[0].id = ids[0];
494 vq->shadow_used_packed[0].len = 0;
495 vq->shadow_used_packed[0].count = 1;
496 vq->shadow_used_packed[0].flags = flags;
497 vq->shadow_used_idx++;
503 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
504 vq->desc_packed[vq->last_used_idx + i].len = 0;
509 vq->desc_packed[vq->last_used_idx + i].flags = flags;
511 vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
515 vhost_log_cache_sync(dev, vq);
517 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
521 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
527 flags = vq->desc_packed[vq->last_used_idx].flags;
528 if (vq->used_wrap_counter) {
536 if (!vq->shadow_used_idx) {
537 vq->shadow_last_used_idx = vq->last_used_idx;
539 vq->shadow_used_packed[0].id = buf_id;
540 vq->shadow_used_packed[0].len = 0;
541 vq->shadow_used_packed[0].flags = flags;
542 vq->shadow_used_idx++;
544 vq->desc_packed[vq->last_used_idx].id = buf_id;
545 vq->desc_packed[vq->last_used_idx].len = 0;
546 vq->desc_packed[vq->last_used_idx].flags = flags;
549 vq_inc_last_used_packed(vq, count);
553 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
559 vq->shadow_used_packed[0].id = buf_id;
561 flags = vq->desc_packed[vq->last_used_idx].flags;
562 if (vq->used_wrap_counter) {
570 if (!vq->shadow_used_idx) {
571 vq->shadow_last_used_idx = vq->last_used_idx;
572 vq->shadow_used_packed[0].len = 0;
573 vq->shadow_used_packed[0].flags = flags;
574 vq->shadow_used_idx++;
577 vq_inc_last_used_packed(vq, count);
581 vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
591 if (!vq->shadow_used_idx)
592 vq->shadow_aligned_idx = vq->last_used_idx &
594 vq->shadow_used_packed[vq->shadow_used_idx].id = id[i];
595 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
596 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
597 vq->shadow_aligned_idx += count[i];
598 vq->shadow_used_idx++;
603 vhost_async_shadow_enqueue_packed(struct vhost_virtqueue *vq,
608 __rte_exclusive_locks_required(&vq->access_lock)
611 struct vhost_async *async = vq->async;
618 if (async->buffer_idx_packed >= vq->size)
619 async->buffer_idx_packed -= vq->size;
625 struct vhost_virtqueue *vq,
630 __rte_shared_locks_required(&vq->iotlb_lock)
632 vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
634 if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
635 do_data_copy_enqueue(dev, vq);
636 vhost_flush_enqueue_shadow_packed(dev, vq);
719 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
722 __rte_shared_locks_required(&vq->iotlb_lock)
733 desc_addr = vhost_iova_to_vva(dev, vq,
756 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
760 __rte_shared_locks_required(&vq->iotlb_lock)
762 uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
766 uint32_t nr_descs = vq->size;
768 struct vring_desc *descs = vq->desc;
771 if (unlikely(idx >= vq->size))
776 if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
777 dlen = vq->desc[idx].len;
779 if (unlikely(nr_descs > vq->size))
783 vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
789 if (unlikely(dlen < vq->desc[idx].len)) {
794 idesc = vhost_alloc_copy_ind_table(dev, vq,
795 vq->desc[idx].addr, vq->desc[idx].len);
814 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
840 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
844 __rte_shared_locks_required(&vq->iotlb_lock)
854 cur_idx = vq->last_avail_idx;
857 max_tries = vq->size - 1;
872 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
878 update_shadow_used_ring_split(vq, head_idx, len);
892 struct vhost_virtqueue *vq,
895 __rte_shared_locks_required(&vq->iotlb_lock)
905 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
915 vq, desc->addr, desc->len);
923 if (unlikely(nr_descs >= vq->size)) {
936 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
950 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
954 __rte_shared_locks_required(&vq->iotlb_lock)
956 bool wrap_counter = vq->avail_wrap_counter;
957 struct vring_packed_desc *descs = vq->desc_packed;
961 if (avail_idx < vq->last_avail_idx)
979 if (unlikely(*desc_count >= vq->size))
986 if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
995 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
1005 if (++avail_idx >= vq->size) {
1006 avail_idx -= vq->size;
1017 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
1020 __rte_shared_locks_required(&vq->iotlb_lock)
1037 vhost_log_cache_write_iova(dev, vq,
1120 async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
1123 __rte_shared_locks_required(&vq->access_lock)
1124 __rte_shared_locks_required(&vq->iotlb_lock)
1126 struct vhost_async *async = vq->async;
1162 sync_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
1165 __rte_shared_locks_required(&vq->iotlb_lock)
1167 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1169 if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) {
1174 vhost_log_cache_write_iova(dev, vq, buf_iova, cpy_len);
1183 batch_copy[vq->batch_copy_nb_elems].dst =
1185 batch_copy[vq->batch_copy_nb_elems].src =
1187 batch_copy[vq->batch_copy_nb_elems].log_addr = buf_iova;
1189 batch_copy[vq->batch_copy_nb_elems].dst =
1191 batch_copy[vq->batch_copy_nb_elems].src =
1194 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
1195 vq->batch_copy_nb_elems++;
1200 mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
1203 __rte_shared_locks_required(&vq->access_lock)
1204 __rte_shared_locks_required(&vq->iotlb_lock)
1214 struct vhost_async *async = vq->async;
1286 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1290 vhost_log_cache_write_iova(dev, vq,
1301 if (async_fill_seg(dev, vq, m, mbuf_offset,
1305 sync_fill_seg(dev, vq, m, mbuf_offset,
1329 struct vhost_virtqueue *vq,
1333 __rte_shared_locks_required(&vq->access_lock)
1334 __rte_shared_locks_required(&vq->iotlb_lock)
1337 uint16_t avail_idx = vq->last_avail_idx;
1344 uint32_t buffer_len[vq->size];
1345 uint16_t buffer_buf_id[vq->size];
1346 uint16_t buffer_desc_count[vq->size];
1349 max_tries = vq->size - 1;
1362 if (unlikely(fill_vec_buf_packed(dev, vq,
1379 if (avail_idx >= vq->size)
1380 avail_idx -= vq->size;
1383 if (mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers, false) < 0)
1386 vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1393 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1395 __rte_shared_locks_required(&vq->access_lock)
1396 __rte_shared_locks_required(&vq->iotlb_lock)
1407 avail_head = rte_atomic_load_explicit((unsigned short __rte_atomic *)&vq->avail->idx,
1410 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1416 if (unlikely(reserve_avail_buf_split(dev, vq,
1421 vq->shadow_used_idx -= num_buffers;
1427 vq->last_avail_idx, vq->last_avail_idx + num_buffers);
1429 if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
1431 vq->shadow_used_idx -= num_buffers;
1435 vq->last_avail_idx += num_buffers;
1436 vhost_virtqueue_reconnect_log_split(vq);
1439 do_data_copy_enqueue(dev, vq);
1441 if (likely(vq->shadow_used_idx)) {
1442 flush_shadow_used_ring_split(dev, vq);
1443 vhost_vring_call_split(dev, vq);
1451 struct vhost_virtqueue *vq,
1455 __rte_shared_locks_required(&vq->iotlb_lock)
1457 bool wrap_counter = vq->avail_wrap_counter;
1458 struct vring_packed_desc *descs = vq->desc_packed;
1459 uint16_t avail_idx = vq->last_avail_idx;
1466 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1486 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1502 virtio_dev_rx_async_batch_check(struct vhost_virtqueue *vq,
1509 bool wrap_counter = vq->avail_wrap_counter;
1510 struct vring_packed_desc *descs = vq->desc_packed;
1511 uint16_t avail_idx = vq->last_avail_idx;
1518 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1555 struct vhost_virtqueue *vq,
1559 __rte_shared_locks_required(&vq->iotlb_lock)
1563 struct vring_packed_desc *descs = vq->desc_packed;
1564 uint16_t avail_idx = vq->last_avail_idx;
1585 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1594 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1600 vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1605 struct vhost_virtqueue *vq,
1607 __rte_shared_locks_required(&vq->iotlb_lock)
1612 if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
1615 if (vq->shadow_used_idx) {
1616 do_data_copy_enqueue(dev, vq);
1617 vhost_flush_enqueue_shadow_packed(dev, vq);
1620 virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
1627 struct vhost_virtqueue *vq,
1629 __rte_shared_locks_required(&vq->access_lock)
1630 __rte_shared_locks_required(&vq->iotlb_lock)
1635 if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1643 vq->last_avail_idx, vq->last_avail_idx + nr_descs);
1645 vq_inc_last_avail_packed(vq, nr_descs);
1652 struct vhost_virtqueue *__rte_restrict vq,
1655 __rte_shared_locks_required(&vq->access_lock)
1656 __rte_shared_locks_required(&vq->iotlb_lock)
1661 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1664 if (!virtio_dev_rx_sync_batch_packed(dev, vq,
1671 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1677 if (vq->shadow_used_idx) {
1678 do_data_copy_enqueue(dev, vq);
1679 vhost_flush_enqueue_shadow_packed(dev, vq);
1683 vhost_vring_call_packed(dev, vq);
1689 virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
1691 rte_rwlock_write_lock(&vq->access_lock);
1692 vhost_user_iotlb_rd_lock(vq);
1693 if (!vq->access_ok)
1694 vring_translate(dev, vq);
1695 vhost_user_iotlb_rd_unlock(vq);
1696 rte_rwlock_write_unlock(&vq->access_lock);
1700 virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
1706 rte_rwlock_read_lock(&vq->access_lock);
1708 if (unlikely(!vq->enabled))
1711 vhost_user_iotlb_rd_lock(vq);
1713 if (unlikely(!vq->access_ok)) {
1714 vhost_user_iotlb_rd_unlock(vq);
1715 rte_rwlock_read_unlock(&vq->access_lock);
1717 virtio_dev_vring_translate(dev, vq);
1726 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1728 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1730 vhost_queue_stats_update(dev, vq, pkts, nb_tx);
1733 vhost_user_iotlb_rd_unlock(vq);
1736 rte_rwlock_read_unlock(&vq->access_lock);
1769 async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
1770 __rte_shared_locks_required(&vq->access_lock)
1772 struct vhost_async *async = vq->async;
1777 return vq->size - async->pkts_inflight_n + async->pkts_idx;
1797 virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1799 __rte_exclusive_locks_required(&vq->access_lock)
1800 __rte_shared_locks_required(&vq->iotlb_lock)
1807 struct vhost_async *async = vq->async;
1816 avail_head = rte_atomic_load_explicit((unsigned short __rte_atomic *)&vq->avail->idx,
1819 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1827 if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
1831 vq->shadow_used_idx -= num_buffers;
1837 vq->last_avail_idx, vq->last_avail_idx + num_buffers);
1839 if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
1840 vq->shadow_used_idx -= num_buffers;
1844 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1848 vq->last_avail_idx += num_buffers;
1849 vhost_virtqueue_reconnect_log_split(vq);
1855 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
1864 __func__, pkt_err, vq->index);
1871 num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1876 vq->shadow_used_idx -= num_descs;
1877 vq->last_avail_idx -= num_descs;
1878 vhost_virtqueue_reconnect_log_split(vq);
1882 if (likely(vq->shadow_used_idx)) {
1883 uint16_t to = async->desc_idx_split & (vq->size - 1);
1885 store_dma_desc_info_split(vq->shadow_used_split,
1886 async->descs_split, vq->size, 0, to,
1887 vq->shadow_used_idx);
1889 async->desc_idx_split += vq->shadow_used_idx;
1892 if (async->pkts_idx >= vq->size)
1893 async->pkts_idx -= vq->size;
1896 vq->shadow_used_idx = 0;
1905 struct vhost_virtqueue *vq,
1910 __rte_exclusive_locks_required(&vq->access_lock)
1911 __rte_shared_locks_required(&vq->iotlb_lock)
1914 uint16_t avail_idx = vq->last_avail_idx;
1920 uint32_t buffer_len[vq->size];
1921 uint16_t buffer_buf_id[vq->size];
1922 uint16_t buffer_desc_count[vq->size];
1925 max_tries = vq->size - 1;
1938 if (unlikely(fill_vec_buf_packed(dev, vq,
1954 if (avail_idx >= vq->size)
1955 avail_idx -= vq->size;
1958 if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
1961 vhost_async_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
1968 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1970 __rte_exclusive_locks_required(&vq->access_lock)
1971 __rte_shared_locks_required(&vq->iotlb_lock)
1975 if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
1983 vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
1990 struct vhost_virtqueue *vq,
1994 __rte_exclusive_locks_required(&vq->access_lock)
1995 __rte_shared_locks_required(&vq->iotlb_lock)
1999 struct vring_packed_desc *descs = vq->desc_packed;
2000 struct vhost_async *async = vq->async;
2001 uint16_t avail_idx = vq->last_avail_idx;
2011 desc = vhost_iova_to_vva(dev, vq, desc_addrs[i], &lens[i], VHOST_ACCESS_RW);
2026 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2043 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr, lens[i]);
2048 vhost_async_shadow_enqueue_packed_batch(vq, lens, ids);
2053 struct vhost_virtqueue *vq,
2056 __rte_exclusive_locks_required(&vq->access_lock)
2057 __rte_shared_locks_required(&vq->iotlb_lock)
2062 if (virtio_dev_rx_async_batch_check(vq, pkts, desc_addrs, lens, dma_id, vchan_id) == -1)
2065 virtio_dev_rx_async_packed_batch_enqueue(dev, vq, pkts, desc_addrs, lens);
2071 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
2073 __rte_exclusive_locks_required(&vq->access_lock)
2077 struct vhost_async *async = vq->async;
2078 struct async_inflight_info *pkts_info = vq->async->pkts_info;
2083 descs_err += pkts_info[slot_idx % vq->size].descs;
2084 buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
2088 if (vq->last_avail_idx >= descs_err) {
2089 vq->last_avail_idx -= descs_err;
2091 vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
2092 vq->avail_wrap_counter ^= 1;
2094 vhost_virtqueue_reconnect_log_packed(vq);
2099 async->buffer_idx_packed = async->buffer_idx_packed + vq->size - buffers_err;
2103 virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
2105 __rte_exclusive_locks_required(&vq->access_lock)
2106 __rte_shared_locks_required(&vq->iotlb_lock)
2113 struct vhost_async *async = vq->async;
2120 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2123 if (!virtio_dev_rx_async_packed_batch(dev, vq, &pkts[pkt_idx],
2126 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
2138 if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
2142 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
2149 vq_inc_last_avail_packed(vq, num_descs);
2155 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
2164 __func__, pkt_err, vq->index);
2165 dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
2169 if (async->pkts_idx >= vq->size)
2170 async->pkts_idx -= vq->size;
2178 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
2179 __rte_shared_locks_required(&vq->access_lock)
2181 struct vhost_async *async = vq->async;
2187 from = async->last_desc_idx_split & (vq->size - 1);
2188 nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
2189 to = vq->last_used_idx & (vq->size - 1);
2191 if (to + nr_copy <= vq->size) {
2192 rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
2195 uint16_t size = vq->size - to;
2197 rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
2199 rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
2204 vq->last_used_idx += nr_copy;
2210 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
2212 __rte_shared_locks_required(&vq->access_lock)
2214 struct vhost_async *async = vq->async;
2216 uint16_t used_idx = vq->last_used_idx;
2217 uint16_t head_idx = vq->last_used_idx;
2223 vq->desc_packed[used_idx].id = async->buffers_packed[from].id;
2224 vq->desc_packed[used_idx].len = async->buffers_packed[from].len;
2227 if (used_idx >= vq->size)
2228 used_idx -= vq->size;
2231 if (from >= vq->size)
2248 if (vq->used_wrap_counter) {
2257 vq->desc_packed[vq->last_used_idx].flags = flags;
2259 head_idx = vq->last_used_idx;
2263 vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
2266 if (from == vq->size)
2270 vq->desc_packed[head_idx].flags = head_flags;
2275 vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
2277 __rte_shared_locks_required(&vq->access_lock)
2279 struct vhost_async *async = vq->async;
2288 start_idx = async_get_first_inflight_pkt_idx(vq);
2297 while (vq->async->pkts_cmpl_flag[from] && count--) {
2298 vq->async->pkts_cmpl_flag[from] = false;
2300 if (from >= vq->size)
2301 from -= vq->size;
2309 from = (start_idx + i) % vq->size;
2319 if (likely(vq->enabled && vq->access_ok)) {
2321 write_back_completed_descs_packed(vq, n_buffers);
2322 vhost_vring_call_packed(dev, vq);
2324 write_back_completed_descs_split(vq, n_descs);
2326 (unsigned short __rte_atomic *)&vq->used->idx,
2328 vhost_vring_call_split(dev, vq);
2333 if (async->last_buffer_idx_packed >= vq->size)
2334 async->last_buffer_idx_packed -= vq->size;
2349 struct vhost_virtqueue *vq;
2371 vq = dev->virtqueue[queue_id];
2373 if (rte_rwlock_read_trylock(&vq->access_lock)) {
2380 if (unlikely(!vq->async)) {
2387 n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);
2389 vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
2390 vq->stats.inflight_completed += n_pkts_cpl;
2393 rte_rwlock_read_unlock(&vq->access_lock);
2404 struct vhost_virtqueue *vq;
2423 vq = dev->virtqueue[queue_id];
2425 vq_assert_lock(dev, vq);
2427 if (unlikely(!vq->async)) {
2443 n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count,
2446 n_pkts_cpl = async_poll_dequeue_completed(dev, vq, pkts, count,
2449 vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
2450 vq->stats.inflight_completed += n_pkts_cpl;
2460 struct vhost_virtqueue *vq;
2479 vq = dev->virtqueue[queue_id];
2481 if (rte_rwlock_read_trylock(&vq->access_lock)) {
2487 if (unlikely(!vq->async)) {
2501 n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count,
2504 n_pkts_cpl = async_poll_dequeue_completed(dev, vq, pkts, count,
2507 vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
2508 vq->stats.inflight_completed += n_pkts_cpl;
2511 rte_rwlock_read_unlock(&vq->access_lock);
2517 virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
2532 rte_rwlock_write_lock(&vq->access_lock);
2534 if (unlikely(!vq->enabled || !vq->async))
2537 vhost_user_iotlb_rd_lock(vq);
2539 if (unlikely(!vq->access_ok)) {
2540 vhost_user_iotlb_rd_unlock(vq);
2541 rte_rwlock_write_unlock(&vq->access_lock);
2543 virtio_dev_vring_translate(dev, vq);
2552 nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, pkts, count,
2555 nb_tx = virtio_dev_rx_async_submit_split(dev, vq, pkts, count,
2558 vq->stats.inflight_submitted += nb_tx;
2561 vhost_user_iotlb_rd_unlock(vq);
2564 rte_rwlock_write_unlock(&vq->access_lock);
2889 desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
2893 __rte_shared_locks_required(&vq->access_lock)
2894 __rte_shared_locks_required(&vq->iotlb_lock)
2905 struct vhost_async *async = vq->async;
2956 if (async_fill_seg(dev, vq, cur, mbuf_offset,
2964 sync_fill_seg(dev, vq, cur, mbuf_offset,
2997 vq->stats.mbuf_alloc_failed++;
3098 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
3101 __rte_shared_locks_required(&vq->access_lock)
3102 __rte_shared_locks_required(&vq->iotlb_lock)
3112 avail_entries = rte_atomic_load_explicit((unsigned short __rte_atomic *)&vq->avail->idx,
3113 rte_memory_order_acquire) - vq->last_avail_idx;
3117 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
3126 vq->stats.mbuf_alloc_failed += count;
3137 if (unlikely(fill_vec_buf_split(dev, vq,
3138 vq->last_avail_idx + i,
3144 update_shadow_used_ring_split(vq, head_idx, 0);
3167 err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
3181 if (likely(vq->shadow_used_idx)) {
3182 vq->last_avail_idx += vq->shadow_used_idx;
3183 vhost_virtqueue_reconnect_log_split(vq);
3184 do_data_copy_dequeue(vq);
3185 flush_shadow_used_ring_split(dev, vq);
3186 vhost_vring_call_split(dev, vq);
3195 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
3197 __rte_shared_locks_required(&vq->access_lock)
3198 __rte_shared_locks_required(&vq->iotlb_lock)
3200 return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
3206 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
3208 __rte_shared_locks_required(&vq->access_lock)
3209 __rte_shared_locks_required(&vq->iotlb_lock)
3211 return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
3216 struct vhost_virtqueue *vq,
3221 __rte_shared_locks_required(&vq->iotlb_lock)
3223 bool wrap = vq->avail_wrap_counter;
3224 struct vring_packed_desc *descs = vq->desc_packed;
3232 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
3249 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
3288 struct vhost_virtqueue *vq,
3297 bool wrap = vq->avail_wrap_counter;
3298 struct vring_packed_desc *descs = vq->desc_packed;
3305 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
3362 struct vhost_virtqueue *vq,
3365 __rte_shared_locks_required(&vq->iotlb_lock)
3367 uint16_t avail_idx = vq->last_avail_idx;
3374 if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
3394 vhost_shadow_dequeue_batch_packed_inorder(vq,
3397 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
3399 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
3406 struct vhost_virtqueue *vq,
3412 __rte_shared_locks_required(&vq->access_lock)
3413 __rte_shared_locks_required(&vq->iotlb_lock)
3421 if (unlikely(fill_vec_buf_packed(dev, vq,
3422 vq->last_avail_idx, desc_count,
3443 err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
3458 struct vhost_virtqueue *vq,
3462 __rte_shared_locks_required(&vq->access_lock)
3463 __rte_shared_locks_required(&vq->iotlb_lock)
3469 ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
3474 vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
3477 vhost_shadow_dequeue_single_packed(vq, buf_id,
3480 vq_inc_last_avail_packed(vq, desc_count);
3487 get_nb_avail_entries_packed(const struct vhost_virtqueue *__rte_restrict vq,
3490 const struct vring_packed_desc *descs = vq->desc_packed;
3491 bool avail_wrap = vq->avail_wrap_counter;
3492 uint16_t avail_idx = vq->last_avail_idx;
3506 if (unlikely(++avail_idx >= vq->size)) {
3507 avail_idx -= vq->size;
3518 struct vhost_virtqueue *__rte_restrict vq,
3523 __rte_shared_locks_required(&vq->access_lock)
3524 __rte_shared_locks_required(&vq->iotlb_lock)
3528 count = get_nb_avail_entries_packed(vq, count);
3533 vq->stats.mbuf_alloc_failed += count;
3538 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
3541 if (!virtio_dev_tx_batch_packed(dev, vq,
3549 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
3559 if (vq->shadow_used_idx) {
3560 do_data_copy_dequeue(vq);
3562 vhost_flush_dequeue_shadow_packed(dev, vq);
3563 vhost_vring_call_packed(dev, vq);
3572 struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3574 __rte_shared_locks_required(&vq->access_lock)
3575 __rte_shared_locks_required(&vq->iotlb_lock)
3577 return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
3583 struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3585 __rte_shared_locks_required(&vq->access_lock)
3586 __rte_shared_locks_required(&vq->iotlb_lock)
3588 return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
3596 struct vhost_virtqueue *vq;
3618 vq = dev->virtqueue[queue_id];
3620 if (unlikely(rte_rwlock_read_trylock(&vq->access_lock) != 0))
3623 if (unlikely(!vq->enabled))
3626 vhost_user_iotlb_rd_lock(vq);
3628 if (unlikely(!vq->access_ok)) {
3629 vhost_user_iotlb_rd_unlock(vq);
3630 rte_rwlock_read_unlock(&vq->access_lock);
3632 virtio_dev_vring_translate(dev, vq);
3670 nb_rx += virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool,
3673 nb_rx += virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool,
3677 nb_rx += virtio_dev_tx_split_legacy(dev, vq, mbuf_pool,
3680 nb_rx += virtio_dev_tx_split_compliant(dev, vq, mbuf_pool,
3684 vhost_queue_stats_update(dev, vq, pkts, nb_rx);
3687 vhost_user_iotlb_rd_unlock(vq);
3690 rte_rwlock_read_unlock(&vq->access_lock);
3697 async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
3700 __rte_shared_locks_required(&vq->access_lock)
3704 struct async_inflight_info *pkts_info = vq->async->pkts_info;
3708 start_idx = async_get_first_inflight_pkt_idx(vq);
3711 while (vq->async->pkts_cmpl_flag[from] && count--) {
3712 vq->async->pkts_cmpl_flag[from] = false;
3713 from = (from + 1) % vq->size;
3721 from = (start_idx + i) % vq->size;
3731 write_back_completed_descs_packed(vq, nr_cpl_pkts);
3732 vhost_vring_call_packed(dev, vq);
3734 write_back_completed_descs_split(vq, nr_cpl_pkts);
3735 rte_atomic_fetch_add_explicit((unsigned short __rte_atomic *)&vq->used->idx,
3737 vhost_vring_call_split(dev, vq);
3739 vq->async->pkts_inflight_n -= nr_cpl_pkts;
3745 virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
3748 __rte_shared_locks_required(&vq->access_lock)
3749 __rte_shared_locks_required(&vq->iotlb_lock)
3758 struct vhost_async *async = vq->async;
3767 avail_entries = rte_atomic_load_explicit((unsigned short __rte_atomic *)&vq->avail->idx,
3768 rte_memory_order_acquire) - vq->last_avail_idx;
3772 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
3781 vq->stats.mbuf_alloc_failed += count;
3794 if (unlikely(fill_vec_buf_split(dev, vq, vq->last_avail_idx,
3827 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
3828 err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkt, mbuf_pool,
3845 to = async->desc_idx_split & (vq->size - 1);
3850 vq->last_avail_idx++;
3851 vhost_virtqueue_reconnect_log_split(vq);
3857 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
3869 vq->last_avail_idx -= pkt_err;
3870 vhost_virtqueue_reconnect_log_split(vq);
3878 rte_pktmbuf_free(pkts_info[slot_idx & (vq->size - 1)].mbuf);
3884 if (async->pkts_idx >= vq->size)
3885 async->pkts_idx -= vq->size;
3889 nr_done_pkts = async_poll_dequeue_completed(dev, vq, pkts, pkts_size,
3898 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
3901 __rte_shared_locks_required(&vq->access_lock)
3902 __rte_shared_locks_required(&vq->iotlb_lock)
3904 return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
3911 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
3914 __rte_shared_locks_required(&vq->access_lock)
3915 __rte_shared_locks_required(&vq->iotlb_lock)
3917 return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
3922 vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
3924 __rte_shared_locks_required(&vq->access_lock)
3926 struct vhost_async *async = vq->async;
3934 if (async->buffer_idx_packed >= vq->size)
3935 async->buffer_idx_packed -= vq->size;
3941 struct vhost_virtqueue *vq,
3946 __rte_shared_locks_required(&vq->access_lock)
3947 __rte_shared_locks_required(&vq->iotlb_lock)
3954 struct vhost_async *async = vq->async;
3958 if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx, &desc_count,
3973 err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts, mbuf_pool,
3987 vhost_async_shadow_dequeue_single_packed(vq, buf_id, desc_count);
3989 vq_inc_last_avail_packed(vq, desc_count);
3996 struct vhost_virtqueue *vq,
3999 __rte_shared_locks_required(&vq->access_lock)
4000 __rte_shared_locks_required(&vq->iotlb_lock)
4002 uint16_t avail_idx = vq->last_avail_idx;
4004 struct vhost_async *async = vq->async;
4016 if (vhost_async_tx_batch_packed_check(dev, vq, pkts, avail_idx,
4039 desc_vva = vhost_iova_to_vva(dev, vq, desc_addrs[i],
4046 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
4048 vhost_async_shadow_dequeue_packed_batch(vq, ids);
4054 virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
4057 __rte_shared_locks_required(&vq->access_lock)
4058 __rte_shared_locks_required(&vq->iotlb_lock)
4066 struct vhost_async *async = vq->async;
4075 vq->stats.mbuf_alloc_failed += count;
4082 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
4084 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
4086 if (!virtio_dev_tx_async_packed_batch(dev, vq, &pkts_prealloc[pkt_idx],
4089 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
4099 if (unlikely(virtio_dev_tx_async_single_packed(dev, vq, mbuf_pool, pkt,
4104 slot_idx = vq->size - 1;
4115 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
4133 async->buffer_idx_packed += vq->size - pkt_err;
4140 slot_idx = vq->size - 1;
4146 if (vq->last_avail_idx >= descs_err) {
4147 vq->last_avail_idx -= descs_err;
4149 vq->last_avail_idx += vq->size - descs_err;
4150 vq->avail_wrap_counter ^= 1;
4152 vhost_virtqueue_reconnect_log_packed(vq);
4156 if (async->pkts_idx >= vq->size)
4157 async->pkts_idx -= vq->size;
4160 nr_done_pkts = async_poll_dequeue_completed(dev, vq, pkts, count,
4168 virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct vhost_virtqueue *vq,
4171 __rte_shared_locks_required(&vq->access_lock)
4172 __rte_shared_locks_required(&vq->iotlb_lock)
4174 return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
4180 virtio_dev_tx_async_packed_compliant(struct virtio_net *dev, struct vhost_virtqueue *vq,
4183 __rte_shared_locks_required(&vq->access_lock)
4184 __rte_shared_locks_required(&vq->iotlb_lock)
4186 return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
4196 struct vhost_virtqueue *vq;
4231 vq = dev->virtqueue[queue_id];
4233 if (unlikely(rte_rwlock_read_trylock(&vq->access_lock) != 0))
4236 if (unlikely(vq->enabled == 0))
4239 if (unlikely(!vq->async)) {
4245 vhost_user_iotlb_rd_lock(vq);
4247 if (unlikely(vq->access_ok == 0)) {
4248 vhost_user_iotlb_rd_unlock(vq);
4249 rte_rwlock_read_unlock(&vq->access_lock);
4251 virtio_dev_vring_translate(dev, vq);
4288 nb_rx += virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
4291 nb_rx += virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
4295 nb_rx += virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
4298 nb_rx += virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
4302 *nr_inflight = vq->async->pkts_inflight_n;
4303 vhost_queue_stats_update(dev, vq, pkts, nb_rx);
4306 vhost_user_iotlb_rd_unlock(vq);
4309 rte_rwlock_read_unlock(&vq->access_lock);