xref: /dpdk/drivers/net/virtio/virtio_rxtx_packed.h (revision 787091b9d5f614ae35ab8cfb7718f6ccb64f1f3c)
10eaf7fc2SJoyce Kong /* SPDX-License-Identifier: BSD-3-Clause
20eaf7fc2SJoyce Kong  * Copyright(c) 2010-2020 Intel Corporation
30eaf7fc2SJoyce Kong  */
40eaf7fc2SJoyce Kong 
50eaf7fc2SJoyce Kong #ifndef _VIRTIO_RXTX_PACKED_H_
60eaf7fc2SJoyce Kong #define _VIRTIO_RXTX_PACKED_H_
70eaf7fc2SJoyce Kong 
80eaf7fc2SJoyce Kong #include <stdint.h>
90eaf7fc2SJoyce Kong #include <stdio.h>
100eaf7fc2SJoyce Kong #include <stdlib.h>
110eaf7fc2SJoyce Kong #include <string.h>
120eaf7fc2SJoyce Kong #include <errno.h>
130eaf7fc2SJoyce Kong 
140eaf7fc2SJoyce Kong #include <rte_net.h>
150eaf7fc2SJoyce Kong 
160eaf7fc2SJoyce Kong #include "virtio_logs.h"
170eaf7fc2SJoyce Kong #include "virtio_ethdev.h"
18b5ba7ee4SMaxime Coquelin #include "virtio.h"
190eaf7fc2SJoyce Kong #include "virtqueue.h"
200eaf7fc2SJoyce Kong 
210eaf7fc2SJoyce Kong #define BYTE_SIZE 8
225971ce5eSJoyce Kong 
235971ce5eSJoyce Kong #ifdef CC_AVX512_SUPPORT
240eaf7fc2SJoyce Kong /* flag bits offset in packed ring desc higher 64bits */
250eaf7fc2SJoyce Kong #define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
260eaf7fc2SJoyce Kong 	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
275971ce5eSJoyce Kong #elif defined(RTE_ARCH_ARM)
285971ce5eSJoyce Kong /* flag bits offset in packed ring desc from ID */
295971ce5eSJoyce Kong #define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
305971ce5eSJoyce Kong 	offsetof(struct vring_packed_desc, id)) * BYTE_SIZE)
3153088746SJoyce Kong #define FLAGS_LEN_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
3253088746SJoyce Kong 	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
335971ce5eSJoyce Kong #endif
340eaf7fc2SJoyce Kong 
350eaf7fc2SJoyce Kong #define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \
360eaf7fc2SJoyce Kong 	FLAGS_BITS_OFFSET)
370eaf7fc2SJoyce Kong 
380eaf7fc2SJoyce Kong /* reference count offset in mbuf rearm data */
390eaf7fc2SJoyce Kong #define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \
400eaf7fc2SJoyce Kong 	offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
4153088746SJoyce Kong 
4253088746SJoyce Kong #ifdef CC_AVX512_SUPPORT
430eaf7fc2SJoyce Kong /* segment number offset in mbuf rearm data */
440eaf7fc2SJoyce Kong #define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \
450eaf7fc2SJoyce Kong 	offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
460eaf7fc2SJoyce Kong /* default rearm data */
470eaf7fc2SJoyce Kong #define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \
480eaf7fc2SJoyce Kong 	1ULL << REFCNT_BITS_OFFSET)
4953088746SJoyce Kong #endif
500eaf7fc2SJoyce Kong 
510eaf7fc2SJoyce Kong /* id bits offset in packed ring desc higher 64bits */
520eaf7fc2SJoyce Kong #define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \
530eaf7fc2SJoyce Kong 	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
540eaf7fc2SJoyce Kong 
550eaf7fc2SJoyce Kong /* net hdr short size mask */
560eaf7fc2SJoyce Kong #define NET_HDR_MASK 0x3F
570eaf7fc2SJoyce Kong 
585971ce5eSJoyce Kong #ifdef RTE_ARCH_ARM
595971ce5eSJoyce Kong /* The cache line size on different Arm platforms are different, so
605971ce5eSJoyce Kong  * put a four batch size here to match with the minimum cache line
615971ce5eSJoyce Kong  * size and accommodate NEON register size.
625971ce5eSJoyce Kong  */
635971ce5eSJoyce Kong #define PACKED_BATCH_SIZE 4
645971ce5eSJoyce Kong #else
650eaf7fc2SJoyce Kong #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
660eaf7fc2SJoyce Kong 	sizeof(struct vring_packed_desc))
675971ce5eSJoyce Kong #endif
680eaf7fc2SJoyce Kong #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
690eaf7fc2SJoyce Kong 
700eaf7fc2SJoyce Kong #ifdef VIRTIO_GCC_UNROLL_PRAGMA
710eaf7fc2SJoyce Kong #define virtio_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
720eaf7fc2SJoyce Kong 	for (iter = val; iter < size; iter++)
730eaf7fc2SJoyce Kong #endif
740eaf7fc2SJoyce Kong 
750eaf7fc2SJoyce Kong #ifdef VIRTIO_CLANG_UNROLL_PRAGMA
760eaf7fc2SJoyce Kong #define virtio_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
770eaf7fc2SJoyce Kong 	for (iter = val; iter < size; iter++)
780eaf7fc2SJoyce Kong #endif
790eaf7fc2SJoyce Kong 
800eaf7fc2SJoyce Kong #ifdef VIRTIO_ICC_UNROLL_PRAGMA
810eaf7fc2SJoyce Kong #define virtio_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
820eaf7fc2SJoyce Kong 	for (iter = val; iter < size; iter++)
830eaf7fc2SJoyce Kong #endif
840eaf7fc2SJoyce Kong 
850eaf7fc2SJoyce Kong #ifndef virtio_for_each_try_unroll
860eaf7fc2SJoyce Kong #define virtio_for_each_try_unroll(iter, val, size) \
870eaf7fc2SJoyce Kong 	for (iter = val; iter < size; iter++)
880eaf7fc2SJoyce Kong #endif
890eaf7fc2SJoyce Kong 
900eaf7fc2SJoyce Kong static inline void
virtio_update_batch_stats(struct virtnet_stats * stats,uint16_t pkt_len1,uint16_t pkt_len2,uint16_t pkt_len3,uint16_t pkt_len4)910eaf7fc2SJoyce Kong virtio_update_batch_stats(struct virtnet_stats *stats,
920eaf7fc2SJoyce Kong 			  uint16_t pkt_len1,
930eaf7fc2SJoyce Kong 			  uint16_t pkt_len2,
940eaf7fc2SJoyce Kong 			  uint16_t pkt_len3,
950eaf7fc2SJoyce Kong 			  uint16_t pkt_len4)
960eaf7fc2SJoyce Kong {
970eaf7fc2SJoyce Kong 	stats->bytes += pkt_len1;
980eaf7fc2SJoyce Kong 	stats->bytes += pkt_len2;
990eaf7fc2SJoyce Kong 	stats->bytes += pkt_len3;
1000eaf7fc2SJoyce Kong 	stats->bytes += pkt_len4;
1010eaf7fc2SJoyce Kong }
1020eaf7fc2SJoyce Kong 
1030eaf7fc2SJoyce Kong static inline int
virtqueue_enqueue_single_packed_vec(struct virtnet_tx * txvq,struct rte_mbuf * txm)1040eaf7fc2SJoyce Kong virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
1050eaf7fc2SJoyce Kong 				    struct rte_mbuf *txm)
1060eaf7fc2SJoyce Kong {
1073169550fSMaxime Coquelin 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1080eaf7fc2SJoyce Kong 	struct virtio_hw *hw = vq->hw;
1090eaf7fc2SJoyce Kong 	uint16_t hdr_size = hw->vtnet_hdr_size;
1100eaf7fc2SJoyce Kong 	uint16_t slots, can_push = 0, use_indirect = 0;
1110eaf7fc2SJoyce Kong 	int16_t need;
1120eaf7fc2SJoyce Kong 
1130eaf7fc2SJoyce Kong 	/* optimize ring usage */
114b4f9a45aSMaxime Coquelin 	if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
115b4f9a45aSMaxime Coquelin 	     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1160eaf7fc2SJoyce Kong 	     rte_mbuf_refcnt_read(txm) == 1 && RTE_MBUF_DIRECT(txm) &&
1170eaf7fc2SJoyce Kong 	     txm->nb_segs == 1 && rte_pktmbuf_headroom(txm) >= hdr_size)
1180eaf7fc2SJoyce Kong 		can_push = 1;
119b4f9a45aSMaxime Coquelin 	else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1200eaf7fc2SJoyce Kong 		 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1210eaf7fc2SJoyce Kong 		use_indirect = 1;
1220eaf7fc2SJoyce Kong 
1230eaf7fc2SJoyce Kong 	/* How many main ring entries are needed to this Tx?
1240eaf7fc2SJoyce Kong 	 * indirect   => 1
1250eaf7fc2SJoyce Kong 	 * any_layout => number of segments
1260eaf7fc2SJoyce Kong 	 * default    => number of segments + 1
1270eaf7fc2SJoyce Kong 	 */
1280eaf7fc2SJoyce Kong 	can_push = rte_mbuf_refcnt_read(txm) == 1 &&
1290eaf7fc2SJoyce Kong 		   RTE_MBUF_DIRECT(txm) &&
1300eaf7fc2SJoyce Kong 		   txm->nb_segs == 1 &&
1310eaf7fc2SJoyce Kong 		   rte_pktmbuf_headroom(txm) >= hdr_size;
1320eaf7fc2SJoyce Kong 
133*787091b9SMarvin Liu 	slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1340eaf7fc2SJoyce Kong 	need = slots - vq->vq_free_cnt;
1350eaf7fc2SJoyce Kong 
1360eaf7fc2SJoyce Kong 	/* Positive value indicates it need free vring descriptors */
1370eaf7fc2SJoyce Kong 	if (unlikely(need > 0)) {
1380eaf7fc2SJoyce Kong 		virtio_xmit_cleanup_inorder_packed(vq, need);
1390eaf7fc2SJoyce Kong 		need = slots - vq->vq_free_cnt;
1400eaf7fc2SJoyce Kong 		if (unlikely(need > 0)) {
1410eaf7fc2SJoyce Kong 			PMD_TX_LOG(ERR,
1420eaf7fc2SJoyce Kong 				   "No free tx descriptors to transmit");
1430eaf7fc2SJoyce Kong 			return -1;
1440eaf7fc2SJoyce Kong 		}
1450eaf7fc2SJoyce Kong 	}
1460eaf7fc2SJoyce Kong 
1470eaf7fc2SJoyce Kong 	/* Enqueue Packet buffers */
1480eaf7fc2SJoyce Kong 	virtqueue_enqueue_xmit_packed(txvq, txm, slots, use_indirect,
1490eaf7fc2SJoyce Kong 				can_push, 1);
1500eaf7fc2SJoyce Kong 
1510eaf7fc2SJoyce Kong 	txvq->stats.bytes += txm->pkt_len;
1520eaf7fc2SJoyce Kong 	return 0;
1530eaf7fc2SJoyce Kong }
1540eaf7fc2SJoyce Kong 
1550eaf7fc2SJoyce Kong /* Optionally fill offload information in structure */
1560eaf7fc2SJoyce Kong static inline int
virtio_vec_rx_offload(struct rte_mbuf * m,struct virtio_net_hdr * hdr)1570eaf7fc2SJoyce Kong virtio_vec_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
1580eaf7fc2SJoyce Kong {
1590eaf7fc2SJoyce Kong 	struct rte_net_hdr_lens hdr_lens;
1600eaf7fc2SJoyce Kong 	uint32_t hdrlen, ptype;
1610eaf7fc2SJoyce Kong 	int l4_supported = 0;
1620eaf7fc2SJoyce Kong 
1630eaf7fc2SJoyce Kong 	/* nothing to do */
1640eaf7fc2SJoyce Kong 	if (hdr->flags == 0)
1650eaf7fc2SJoyce Kong 		return 0;
1660eaf7fc2SJoyce Kong 
1670eaf7fc2SJoyce Kong 	/* GSO not support in vec path, skip check */
168daa02b5cSOlivier Matz 	m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
1690eaf7fc2SJoyce Kong 
1700eaf7fc2SJoyce Kong 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1710eaf7fc2SJoyce Kong 	m->packet_type = ptype;
1720eaf7fc2SJoyce Kong 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
1730eaf7fc2SJoyce Kong 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
1740eaf7fc2SJoyce Kong 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
1750eaf7fc2SJoyce Kong 		l4_supported = 1;
1760eaf7fc2SJoyce Kong 
1770eaf7fc2SJoyce Kong 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1780eaf7fc2SJoyce Kong 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
1790eaf7fc2SJoyce Kong 		if (hdr->csum_start <= hdrlen && l4_supported) {
180daa02b5cSOlivier Matz 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
1810eaf7fc2SJoyce Kong 		} else {
1820eaf7fc2SJoyce Kong 			/* Unknown proto or tunnel, do sw cksum. We can assume
1830eaf7fc2SJoyce Kong 			 * the cksum field is in the first segment since the
1840eaf7fc2SJoyce Kong 			 * buffers we provided to the host are large enough.
1850eaf7fc2SJoyce Kong 			 * In case of SCTP, this will be wrong since it's a CRC
1860eaf7fc2SJoyce Kong 			 * but there's nothing we can do.
1870eaf7fc2SJoyce Kong 			 */
1880eaf7fc2SJoyce Kong 			uint16_t csum = 0, off;
1890eaf7fc2SJoyce Kong 
1900eaf7fc2SJoyce Kong 			if (rte_raw_cksum_mbuf(m, hdr->csum_start,
1910eaf7fc2SJoyce Kong 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
1920eaf7fc2SJoyce Kong 				&csum) < 0)
1930eaf7fc2SJoyce Kong 				return -1;
1940eaf7fc2SJoyce Kong 			if (likely(csum != 0xffff))
1950eaf7fc2SJoyce Kong 				csum = ~csum;
1960eaf7fc2SJoyce Kong 			off = hdr->csum_offset + hdr->csum_start;
1970eaf7fc2SJoyce Kong 			if (rte_pktmbuf_data_len(m) >= off + 1)
1980eaf7fc2SJoyce Kong 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
1990eaf7fc2SJoyce Kong 					off) = csum;
2000eaf7fc2SJoyce Kong 		}
2010eaf7fc2SJoyce Kong 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
202daa02b5cSOlivier Matz 		m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
2030eaf7fc2SJoyce Kong 	}
2040eaf7fc2SJoyce Kong 
2050eaf7fc2SJoyce Kong 	return 0;
2060eaf7fc2SJoyce Kong }
2070eaf7fc2SJoyce Kong 
2080eaf7fc2SJoyce Kong static inline uint16_t
virtqueue_dequeue_single_packed_vec(struct virtnet_rx * rxvq,struct rte_mbuf ** rx_pkts)2090eaf7fc2SJoyce Kong virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq,
2100eaf7fc2SJoyce Kong 				    struct rte_mbuf **rx_pkts)
2110eaf7fc2SJoyce Kong {
2120eaf7fc2SJoyce Kong 	uint16_t used_idx, id;
2130eaf7fc2SJoyce Kong 	uint32_t len;
2143169550fSMaxime Coquelin 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
2150eaf7fc2SJoyce Kong 	struct virtio_hw *hw = vq->hw;
2160eaf7fc2SJoyce Kong 	uint32_t hdr_size = hw->vtnet_hdr_size;
2170eaf7fc2SJoyce Kong 	struct virtio_net_hdr *hdr;
2180eaf7fc2SJoyce Kong 	struct vring_packed_desc *desc;
2190eaf7fc2SJoyce Kong 	struct rte_mbuf *cookie;
2200eaf7fc2SJoyce Kong 
2210eaf7fc2SJoyce Kong 	desc = vq->vq_packed.ring.desc;
2220eaf7fc2SJoyce Kong 	used_idx = vq->vq_used_cons_idx;
2230eaf7fc2SJoyce Kong 	if (!desc_is_used(&desc[used_idx], vq))
2240eaf7fc2SJoyce Kong 		return -1;
2250eaf7fc2SJoyce Kong 
2260eaf7fc2SJoyce Kong 	len = desc[used_idx].len;
2270eaf7fc2SJoyce Kong 	id = desc[used_idx].id;
2280eaf7fc2SJoyce Kong 	cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
2290eaf7fc2SJoyce Kong 	if (unlikely(cookie == NULL)) {
2300eaf7fc2SJoyce Kong 		PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
2310eaf7fc2SJoyce Kong 				vq->vq_used_cons_idx);
2320eaf7fc2SJoyce Kong 		return -1;
2330eaf7fc2SJoyce Kong 	}
2340eaf7fc2SJoyce Kong 	rte_prefetch0(cookie);
2350eaf7fc2SJoyce Kong 	rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
2360eaf7fc2SJoyce Kong 
2370eaf7fc2SJoyce Kong 	cookie->data_off = RTE_PKTMBUF_HEADROOM;
2380eaf7fc2SJoyce Kong 	cookie->ol_flags = 0;
2390eaf7fc2SJoyce Kong 	cookie->pkt_len = (uint32_t)(len - hdr_size);
2400eaf7fc2SJoyce Kong 	cookie->data_len = (uint32_t)(len - hdr_size);
2410eaf7fc2SJoyce Kong 
2420eaf7fc2SJoyce Kong 	hdr = (struct virtio_net_hdr *)((char *)cookie->buf_addr +
2430eaf7fc2SJoyce Kong 					RTE_PKTMBUF_HEADROOM - hdr_size);
2440eaf7fc2SJoyce Kong 	if (hw->has_rx_offload)
2450eaf7fc2SJoyce Kong 		virtio_vec_rx_offload(cookie, hdr);
2460eaf7fc2SJoyce Kong 
2470eaf7fc2SJoyce Kong 	*rx_pkts = cookie;
2480eaf7fc2SJoyce Kong 
2490eaf7fc2SJoyce Kong 	rxvq->stats.bytes += cookie->pkt_len;
2500eaf7fc2SJoyce Kong 
2510eaf7fc2SJoyce Kong 	vq->vq_free_cnt++;
2520eaf7fc2SJoyce Kong 	vq->vq_used_cons_idx++;
2530eaf7fc2SJoyce Kong 	if (vq->vq_used_cons_idx >= vq->vq_nentries) {
2540eaf7fc2SJoyce Kong 		vq->vq_used_cons_idx -= vq->vq_nentries;
2550eaf7fc2SJoyce Kong 		vq->vq_packed.used_wrap_counter ^= 1;
2560eaf7fc2SJoyce Kong 	}
2570eaf7fc2SJoyce Kong 
2580eaf7fc2SJoyce Kong 	return 0;
2590eaf7fc2SJoyce Kong }
2600eaf7fc2SJoyce Kong 
2610eaf7fc2SJoyce Kong static inline void
virtio_recv_refill_packed_vec(struct virtnet_rx * rxvq,struct rte_mbuf ** cookie,uint16_t num)2620eaf7fc2SJoyce Kong virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
2630eaf7fc2SJoyce Kong 			      struct rte_mbuf **cookie,
2640eaf7fc2SJoyce Kong 			      uint16_t num)
2650eaf7fc2SJoyce Kong {
2663169550fSMaxime Coquelin 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
2670eaf7fc2SJoyce Kong 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
2680eaf7fc2SJoyce Kong 	uint16_t flags = vq->vq_packed.cached_flags;
2690eaf7fc2SJoyce Kong 	struct virtio_hw *hw = vq->hw;
2700eaf7fc2SJoyce Kong 	struct vq_desc_extra *dxp;
2710eaf7fc2SJoyce Kong 	uint16_t idx, i;
2720eaf7fc2SJoyce Kong 	uint16_t batch_num, total_num = 0;
2730eaf7fc2SJoyce Kong 	uint16_t head_idx = vq->vq_avail_idx;
2740eaf7fc2SJoyce Kong 	uint16_t head_flag = vq->vq_packed.cached_flags;
2750eaf7fc2SJoyce Kong 	uint64_t addr;
2760eaf7fc2SJoyce Kong 
2770eaf7fc2SJoyce Kong 	do {
2780eaf7fc2SJoyce Kong 		idx = vq->vq_avail_idx;
2790eaf7fc2SJoyce Kong 
2800eaf7fc2SJoyce Kong 		batch_num = PACKED_BATCH_SIZE;
2810eaf7fc2SJoyce Kong 		if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))
2820eaf7fc2SJoyce Kong 			batch_num = vq->vq_nentries - idx;
2830eaf7fc2SJoyce Kong 		if (unlikely((total_num + batch_num) > num))
2840eaf7fc2SJoyce Kong 			batch_num = num - total_num;
2850eaf7fc2SJoyce Kong 
2860eaf7fc2SJoyce Kong 		virtio_for_each_try_unroll(i, 0, batch_num) {
2870eaf7fc2SJoyce Kong 			dxp = &vq->vq_descx[idx + i];
2880eaf7fc2SJoyce Kong 			dxp->cookie = (void *)cookie[total_num + i];
2890eaf7fc2SJoyce Kong 
290ba55c94aSMaxime Coquelin 			addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +
2910eaf7fc2SJoyce Kong 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
2920eaf7fc2SJoyce Kong 			start_dp[idx + i].addr = addr;
2930eaf7fc2SJoyce Kong 			start_dp[idx + i].len = cookie[total_num + i]->buf_len
2940eaf7fc2SJoyce Kong 				- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
2950eaf7fc2SJoyce Kong 			if (total_num || i) {
2960eaf7fc2SJoyce Kong 				virtqueue_store_flags_packed(&start_dp[idx + i],
2970eaf7fc2SJoyce Kong 						flags, hw->weak_barriers);
2980eaf7fc2SJoyce Kong 			}
2990eaf7fc2SJoyce Kong 		}
3000eaf7fc2SJoyce Kong 
3010eaf7fc2SJoyce Kong 		vq->vq_avail_idx += batch_num;
3020eaf7fc2SJoyce Kong 		if (vq->vq_avail_idx >= vq->vq_nentries) {
3030eaf7fc2SJoyce Kong 			vq->vq_avail_idx -= vq->vq_nentries;
3040eaf7fc2SJoyce Kong 			vq->vq_packed.cached_flags ^=
3050eaf7fc2SJoyce Kong 				VRING_PACKED_DESC_F_AVAIL_USED;
3060eaf7fc2SJoyce Kong 			flags = vq->vq_packed.cached_flags;
3070eaf7fc2SJoyce Kong 		}
3080eaf7fc2SJoyce Kong 		total_num += batch_num;
3090eaf7fc2SJoyce Kong 	} while (total_num < num);
3100eaf7fc2SJoyce Kong 
3110eaf7fc2SJoyce Kong 	virtqueue_store_flags_packed(&start_dp[head_idx], head_flag,
3120eaf7fc2SJoyce Kong 				hw->weak_barriers);
3130eaf7fc2SJoyce Kong 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
3140eaf7fc2SJoyce Kong }
3150eaf7fc2SJoyce Kong 
3160eaf7fc2SJoyce Kong #endif /* _VIRTIO_RXTX_PACKED_H_ */
317