xref: /dpdk/drivers/net/virtio/virtqueue.h (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
36c3169a3SBruce Richardson  */
46c3169a3SBruce Richardson 
56c3169a3SBruce Richardson #ifndef _VIRTQUEUE_H_
66c3169a3SBruce Richardson #define _VIRTQUEUE_H_
76c3169a3SBruce Richardson 
86c3169a3SBruce Richardson #include <stdint.h>
96c3169a3SBruce Richardson 
106c3169a3SBruce Richardson #include <rte_atomic.h>
116c3169a3SBruce Richardson #include <rte_memory.h>
126c3169a3SBruce Richardson #include <rte_mempool.h>
1357f90f89SMarvin Liu #include <rte_net.h>
146c3169a3SBruce Richardson 
15b4f9a45aSMaxime Coquelin #include "virtio.h"
166c3169a3SBruce Richardson #include "virtio_ring.h"
176c3169a3SBruce Richardson #include "virtio_logs.h"
18905a2469SYuanhan Liu #include "virtio_rxtx.h"
1935e7012eSMaxime Coquelin #include "virtio_cvq.h"
206c3169a3SBruce Richardson 
216c3169a3SBruce Richardson struct rte_mbuf;
226c3169a3SBruce Richardson 
2357f90f89SMarvin Liu #define DEFAULT_TX_FREE_THRESH 32
241982462eSMarvin Liu #define DEFAULT_RX_FREE_THRESH 32
251982462eSMarvin Liu 
2657f90f89SMarvin Liu #define VIRTIO_MBUF_BURST_SZ 64
276c3169a3SBruce Richardson /*
289230ab8dSIlya Maximets  * Per virtio_ring.h in Linux.
296c3169a3SBruce Richardson  *     For virtio_pci on SMP, we don't need to order with respect to MMIO
30240a9941SJoyce Kong  *     accesses through relaxed memory I/O windows, so thread_fence is
316c3169a3SBruce Richardson  *     sufficient.
326c3169a3SBruce Richardson  *
339230ab8dSIlya Maximets  *     For using virtio to talk to real devices (eg. vDPA) we do need real
349230ab8dSIlya Maximets  *     barriers.
356c3169a3SBruce Richardson  */
369230ab8dSIlya Maximets static inline void
virtio_mb(uint8_t weak_barriers)379230ab8dSIlya Maximets virtio_mb(uint8_t weak_barriers)
389230ab8dSIlya Maximets {
399230ab8dSIlya Maximets 	if (weak_barriers)
40*e12a0166STyler Retzlaff 		rte_atomic_thread_fence(rte_memory_order_seq_cst);
419230ab8dSIlya Maximets 	else
429230ab8dSIlya Maximets 		rte_mb();
439230ab8dSIlya Maximets }
449230ab8dSIlya Maximets 
459230ab8dSIlya Maximets static inline void
virtio_rmb(uint8_t weak_barriers)469230ab8dSIlya Maximets virtio_rmb(uint8_t weak_barriers)
479230ab8dSIlya Maximets {
489230ab8dSIlya Maximets 	if (weak_barriers)
49*e12a0166STyler Retzlaff 		rte_atomic_thread_fence(rte_memory_order_acquire);
509230ab8dSIlya Maximets 	else
51f0f5d844SPhil Yang 		rte_io_rmb();
529230ab8dSIlya Maximets }
539230ab8dSIlya Maximets 
549230ab8dSIlya Maximets static inline void
virtio_wmb(uint8_t weak_barriers)559230ab8dSIlya Maximets virtio_wmb(uint8_t weak_barriers)
569230ab8dSIlya Maximets {
579230ab8dSIlya Maximets 	if (weak_barriers)
58*e12a0166STyler Retzlaff 		rte_atomic_thread_fence(rte_memory_order_release);
599230ab8dSIlya Maximets 	else
60f0f5d844SPhil Yang 		rte_io_wmb();
619230ab8dSIlya Maximets }
626c3169a3SBruce Richardson 
632c661d41SJoyce Kong static inline uint16_t
virtqueue_fetch_flags_packed(struct vring_packed_desc * dp,uint8_t weak_barriers)642c661d41SJoyce Kong virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
652c661d41SJoyce Kong 			      uint8_t weak_barriers)
662c661d41SJoyce Kong {
672c661d41SJoyce Kong 	uint16_t flags;
682c661d41SJoyce Kong 
692c661d41SJoyce Kong 	if (weak_barriers) {
70*e12a0166STyler Retzlaff /* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports
712c661d41SJoyce Kong  * a better perf(~1.5%), which comes from the saved branch by the compiler.
72f1b9cf07SJoyce Kong  * The if and else branch are identical  on the platforms except Arm.
732c661d41SJoyce Kong  */
74f1b9cf07SJoyce Kong #ifdef RTE_ARCH_ARM
75*e12a0166STyler Retzlaff 		flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);
76f1b9cf07SJoyce Kong #else
77f1b9cf07SJoyce Kong 		flags = dp->flags;
78f1b9cf07SJoyce Kong 		rte_io_rmb();
792c661d41SJoyce Kong #endif
802c661d41SJoyce Kong 	} else {
812c661d41SJoyce Kong 		flags = dp->flags;
82f0f5d844SPhil Yang 		rte_io_rmb();
832c661d41SJoyce Kong 	}
842c661d41SJoyce Kong 
852c661d41SJoyce Kong 	return flags;
862c661d41SJoyce Kong }
872c661d41SJoyce Kong 
886094557dSJoyce Kong static inline void
virtqueue_store_flags_packed(struct vring_packed_desc * dp,uint16_t flags,uint8_t weak_barriers)896094557dSJoyce Kong virtqueue_store_flags_packed(struct vring_packed_desc *dp,
906094557dSJoyce Kong 			      uint16_t flags, uint8_t weak_barriers)
916094557dSJoyce Kong {
926094557dSJoyce Kong 	if (weak_barriers) {
93*e12a0166STyler Retzlaff /* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports
946094557dSJoyce Kong  * a better perf(~1.5%), which comes from the saved branch by the compiler.
95f1b9cf07SJoyce Kong  * The if and else branch are identical on the platforms except Arm.
966094557dSJoyce Kong  */
97f1b9cf07SJoyce Kong #ifdef RTE_ARCH_ARM
98*e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);
99f1b9cf07SJoyce Kong #else
100f1b9cf07SJoyce Kong 		rte_io_wmb();
101f1b9cf07SJoyce Kong 		dp->flags = flags;
1026094557dSJoyce Kong #endif
1036094557dSJoyce Kong 	} else {
104f0f5d844SPhil Yang 		rte_io_wmb();
1056094557dSJoyce Kong 		dp->flags = flags;
1066094557dSJoyce Kong 	}
1076094557dSJoyce Kong }
108f1b9cf07SJoyce Kong 
1096c3169a3SBruce Richardson #ifdef RTE_PMD_PACKET_PREFETCH
1106c3169a3SBruce Richardson #define rte_packet_prefetch(p)  rte_prefetch1(p)
1116c3169a3SBruce Richardson #else
1126c3169a3SBruce Richardson #define rte_packet_prefetch(p)  do {} while(0)
1136c3169a3SBruce Richardson #endif
1146c3169a3SBruce Richardson 
1156c3169a3SBruce Richardson #define VIRTQUEUE_MAX_NAME_SZ 32
1166c3169a3SBruce Richardson 
1178c41645bSMaxime Coquelin #ifdef RTE_ARCH_32
1188c41645bSMaxime Coquelin #define VIRTIO_MBUF_ADDR_MASK(vq) ((vq)->mbuf_addr_mask)
1198c41645bSMaxime Coquelin #else
1208c41645bSMaxime Coquelin #define VIRTIO_MBUF_ADDR_MASK(vq) UINT64_MAX
1218c41645bSMaxime Coquelin #endif
1228c41645bSMaxime Coquelin 
123ba55c94aSMaxime Coquelin /**
124ba55c94aSMaxime Coquelin  * Return the IOVA (or virtual address in case of virtio-user) of mbuf
125ba55c94aSMaxime Coquelin  * data buffer.
126ba55c94aSMaxime Coquelin  *
127ba55c94aSMaxime Coquelin  * The address is firstly casted to the word size (sizeof(uintptr_t))
1288c41645bSMaxime Coquelin  * before casting it to uint64_t. It is then masked with the expected
1298c41645bSMaxime Coquelin  * address length (64 bits for virtio-pci, word size for virtio-user).
1308c41645bSMaxime Coquelin  *
1318c41645bSMaxime Coquelin  * This is to make it work with different combination of word size (64
1328c41645bSMaxime Coquelin  * bit and 32 bit) and virtio device (virtio-pci and virtio-user).
133ba55c94aSMaxime Coquelin  */
134ba55c94aSMaxime Coquelin #define VIRTIO_MBUF_ADDR(mb, vq) \
1358c41645bSMaxime Coquelin 	((*(uint64_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)) & \
1368c41645bSMaxime Coquelin 		VIRTIO_MBUF_ADDR_MASK(vq))
137ba55c94aSMaxime Coquelin 
138ba55c94aSMaxime Coquelin /**
139ba55c94aSMaxime Coquelin  * Return the physical address (or virtual address in case of
140ba55c94aSMaxime Coquelin  * virtio-user) of mbuf data buffer, taking care of mbuf data offset
141ba55c94aSMaxime Coquelin  */
142ba55c94aSMaxime Coquelin #define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
143ba55c94aSMaxime Coquelin 	(VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
144ba55c94aSMaxime Coquelin 
1456c3169a3SBruce Richardson #define VTNET_SQ_RQ_QUEUE_IDX 0
1466c3169a3SBruce Richardson #define VTNET_SQ_TQ_QUEUE_IDX 1
1476c3169a3SBruce Richardson #define VTNET_SQ_CQ_QUEUE_IDX 2
1486c3169a3SBruce Richardson 
1496c3169a3SBruce Richardson enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
1506c3169a3SBruce Richardson /**
1516c3169a3SBruce Richardson  * The maximum virtqueue size is 2^15. Use that value as the end of
1526c3169a3SBruce Richardson  * descriptor chain terminator since it will never be a valid index
1536c3169a3SBruce Richardson  * in the descriptor table. This is used to verify we are correctly
1546c3169a3SBruce Richardson  * handling vq_free_cnt.
1556c3169a3SBruce Richardson  */
1566c3169a3SBruce Richardson #define VQ_RING_DESC_CHAIN_END 32768
1576c3169a3SBruce Richardson 
1586c3169a3SBruce Richardson #define VIRTIO_NET_OK     0
1596c3169a3SBruce Richardson #define VIRTIO_NET_ERR    1
1606c3169a3SBruce Richardson 
16101ad44fdSHuawei Xie struct vq_desc_extra {
16201ad44fdSHuawei Xie 	void *cookie;
16301ad44fdSHuawei Xie 	uint16_t ndescs;
1644c3f5822SJens Freimann 	uint16_t next;
16501ad44fdSHuawei Xie };
16601ad44fdSHuawei Xie 
1673169550fSMaxime Coquelin #define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
1683169550fSMaxime Coquelin #define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
1693169550fSMaxime Coquelin #define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
1703169550fSMaxime Coquelin 
1716c3169a3SBruce Richardson struct virtqueue {
1726c3169a3SBruce Richardson 	struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
173dfd33aa4STiwei Bie 	union {
174dfd33aa4STiwei Bie 		struct {
175dfd33aa4STiwei Bie 			/**< vring keeping desc, used and avail */
176dfd33aa4STiwei Bie 			struct vring ring;
177dfd33aa4STiwei Bie 		} vq_split;
178dfd33aa4STiwei Bie 
179dfd33aa4STiwei Bie 		struct {
180dfd33aa4STiwei Bie 			/**< vring keeping descs and events */
181dfd33aa4STiwei Bie 			struct vring_packed ring;
1824c3f5822SJens Freimann 			bool used_wrap_counter;
1838e148e49STiwei Bie 			uint16_t cached_flags; /**< cached flags for descs */
1844c3f5822SJens Freimann 			uint16_t event_flags_shadow;
185dfd33aa4STiwei Bie 		} vq_packed;
186dfd33aa4STiwei Bie 	};
1878e148e49STiwei Bie 
188dfd33aa4STiwei Bie 	uint16_t vq_used_cons_idx; /**< last consumed descriptor */
18901ad44fdSHuawei Xie 	uint16_t vq_nentries;  /**< vring desc numbers */
19001ad44fdSHuawei Xie 	uint16_t vq_free_cnt;  /**< num of desc available */
19101ad44fdSHuawei Xie 	uint16_t vq_avail_idx; /**< sync until needed */
19201ad44fdSHuawei Xie 	uint16_t vq_free_thresh; /**< free threshold */
1936c3169a3SBruce Richardson 
19497bd5372SMaxime Coquelin 	/**
19597bd5372SMaxime Coquelin 	 * Head of the free chain in the descriptor table. If
19697bd5372SMaxime Coquelin 	 * there are no free descriptors, this will be set to
19797bd5372SMaxime Coquelin 	 * VQ_RING_DESC_CHAIN_END.
19897bd5372SMaxime Coquelin 	 */
19997bd5372SMaxime Coquelin 	uint16_t  vq_desc_head_idx;
20097bd5372SMaxime Coquelin 	uint16_t  vq_desc_tail_idx;
20197bd5372SMaxime Coquelin 	uint16_t  vq_queue_index;   /**< PCI queue index */
20297bd5372SMaxime Coquelin 
2036c3169a3SBruce Richardson 	void *vq_ring_virt_mem;  /**< linear address of vring*/
2046c3169a3SBruce Richardson 	unsigned int vq_ring_size;
205ba55c94aSMaxime Coquelin 	uint16_t mbuf_addr_offset;
2068c41645bSMaxime Coquelin 	uint64_t mbuf_addr_mask;
20701ad44fdSHuawei Xie 
208905a2469SYuanhan Liu 	union {
209905a2469SYuanhan Liu 		struct virtnet_rx rxq;
210905a2469SYuanhan Liu 		struct virtnet_tx txq;
211905a2469SYuanhan Liu 		struct virtnet_ctl cq;
212905a2469SYuanhan Liu 	};
213905a2469SYuanhan Liu 
2145fddf290SMaxime Coquelin 	const struct rte_memzone *mz; /**< mem zone to populate ring. */
215df6e0a06SSantosh Shukla 	rte_iova_t vq_ring_mem; /**< physical address of vring,
216e8df94b8SJianfeng Tan 	                         * or virtual address for virtio_user. */
2176c3169a3SBruce Richardson 
2186ba1f63bSYuanhan Liu 	uint16_t  *notify_addr;
219013b4c52SBruce Richardson 	struct vq_desc_extra vq_descx[];
2206c3169a3SBruce Richardson };
2216c3169a3SBruce Richardson 
2227be78d02SJosh Soref /* If multiqueue is provided by host, then we support it. */
2236c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MQ   4
2240c9d6620SMaxime Coquelin 
2256c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET        0
2260c9d6620SMaxime Coquelin #define VIRTIO_NET_CTRL_MQ_RSS_CONFIG          1
2270c9d6620SMaxime Coquelin 
2286c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN        1
2296c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX        0x8000
2304a92b671SStephen Hemminger 
2316c3169a3SBruce Richardson /**
2326c3169a3SBruce Richardson  * This is the first element of the scatter-gather list.  If you don't
2336c3169a3SBruce Richardson  * specify GSO or CSUM features, you can simply ignore the header.
2346c3169a3SBruce Richardson  */
2356c3169a3SBruce Richardson struct virtio_net_hdr {
2366c3169a3SBruce Richardson #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1    /**< Use csum_start,csum_offset*/
23796cb6711SOlivier Matz #define VIRTIO_NET_HDR_F_DATA_VALID 2    /**< Checksum is valid */
2386c3169a3SBruce Richardson 	uint8_t flags;
2396c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_NONE     0    /**< Not a GSO frame */
2406c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_TCPV4    1    /**< GSO frame, IPv4 TCP (TSO) */
2416c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_UDP      3    /**< GSO frame, IPv4 UDP (UFO) */
2426c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_TCPV6    4    /**< GSO frame, IPv6 TCP */
2436c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_ECN      0x80 /**< TCP has ECN set */
2446c3169a3SBruce Richardson 	uint8_t gso_type;
2456c3169a3SBruce Richardson 	uint16_t hdr_len;     /**< Ethernet + IP + tcp/udp hdrs */
2466c3169a3SBruce Richardson 	uint16_t gso_size;    /**< Bytes to append to hdr_len per frame */
2476c3169a3SBruce Richardson 	uint16_t csum_start;  /**< Position to start checksumming from */
2486c3169a3SBruce Richardson 	uint16_t csum_offset; /**< Offset after that to place checksum */
2496c3169a3SBruce Richardson };
2506c3169a3SBruce Richardson 
2516c3169a3SBruce Richardson /**
2526c3169a3SBruce Richardson  * This is the version of the header to use when the MRG_RXBUF
2536c3169a3SBruce Richardson  * feature has been negotiated.
2546c3169a3SBruce Richardson  */
2556c3169a3SBruce Richardson struct virtio_net_hdr_mrg_rxbuf {
2566c3169a3SBruce Richardson 	struct   virtio_net_hdr hdr;
2576c3169a3SBruce Richardson 	uint16_t num_buffers; /**< Number of merged rx buffers */
2586c3169a3SBruce Richardson };
2596c3169a3SBruce Richardson 
2606dc5de3aSStephen Hemminger /* Region reserved to allow for transmit header and indirect ring */
2616dc5de3aSStephen Hemminger #define VIRTIO_MAX_TX_INDIRECT 8
2626dc5de3aSStephen Hemminger struct virtio_tx_region {
2636dc5de3aSStephen Hemminger 	struct virtio_net_hdr_mrg_rxbuf tx_hdr;
26427595cd8STyler Retzlaff 	union __rte_aligned(16) {
265381f39ebSMarvin Liu 		struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT];
266381f39ebSMarvin Liu 		struct vring_packed_desc
267381f39ebSMarvin Liu 			tx_packed_indir[VIRTIO_MAX_TX_INDIRECT];
26827595cd8STyler Retzlaff 	};
2696dc5de3aSStephen Hemminger };
2706dc5de3aSStephen Hemminger 
271e9f4feb7SJens Freimann static inline int
desc_is_used(struct vring_packed_desc * desc,struct virtqueue * vq)2722923b8f9STiwei Bie desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
273e9f4feb7SJens Freimann {
274e9f4feb7SJens Freimann 	uint16_t used, avail, flags;
275e9f4feb7SJens Freimann 
2762c661d41SJoyce Kong 	flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
27712e9e70cSTiwei Bie 	used = !!(flags & VRING_PACKED_DESC_F_USED);
27812e9e70cSTiwei Bie 	avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
279e9f4feb7SJens Freimann 
280dfd33aa4STiwei Bie 	return avail == used && used == vq->vq_packed.used_wrap_counter;
281a4270ea4SJens Freimann }
282a4270ea4SJens Freimann 
283e9f4feb7SJens Freimann static inline void
vring_desc_init_packed(struct virtqueue * vq,int n)284e9f4feb7SJens Freimann vring_desc_init_packed(struct virtqueue *vq, int n)
285e9f4feb7SJens Freimann {
286e9f4feb7SJens Freimann 	int i;
287e9f4feb7SJens Freimann 	for (i = 0; i < n - 1; i++) {
2884cdc4d98STiwei Bie 		vq->vq_packed.ring.desc[i].id = i;
289e9f4feb7SJens Freimann 		vq->vq_descx[i].next = i + 1;
290e9f4feb7SJens Freimann 	}
2914cdc4d98STiwei Bie 	vq->vq_packed.ring.desc[i].id = i;
292e9f4feb7SJens Freimann 	vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
293e9f4feb7SJens Freimann }
294e9f4feb7SJens Freimann 
2956dc5de3aSStephen Hemminger /* Chain all the descriptors in the ring with an END */
2966dc5de3aSStephen Hemminger static inline void
vring_desc_init_split(struct vring_desc * dp,uint16_t n)297f803734bSJens Freimann vring_desc_init_split(struct vring_desc *dp, uint16_t n)
2986dc5de3aSStephen Hemminger {
2996dc5de3aSStephen Hemminger 	uint16_t i;
3006dc5de3aSStephen Hemminger 
3016dc5de3aSStephen Hemminger 	for (i = 0; i < n - 1; i++)
3026dc5de3aSStephen Hemminger 		dp[i].next = (uint16_t)(i + 1);
3036dc5de3aSStephen Hemminger 	dp[i].next = VQ_RING_DESC_CHAIN_END;
3046dc5de3aSStephen Hemminger }
3056dc5de3aSStephen Hemminger 
306381f39ebSMarvin Liu static inline void
vring_desc_init_indirect_packed(struct vring_packed_desc * dp,int n)307381f39ebSMarvin Liu vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n)
308381f39ebSMarvin Liu {
309381f39ebSMarvin Liu 	int i;
310381f39ebSMarvin Liu 	for (i = 0; i < n; i++) {
311381f39ebSMarvin Liu 		dp[i].id = (uint16_t)i;
312381f39ebSMarvin Liu 		dp[i].flags = VRING_DESC_F_WRITE;
313381f39ebSMarvin Liu 	}
314381f39ebSMarvin Liu }
315381f39ebSMarvin Liu 
3166c3169a3SBruce Richardson /**
31713cd890dSTiwei Bie  * Tell the backend not to interrupt us. Implementation for packed virtqueues.
3186c3169a3SBruce Richardson  */
319c056be23SJianfeng Tan static inline void
virtqueue_disable_intr_packed(struct virtqueue * vq)320e9f4feb7SJens Freimann virtqueue_disable_intr_packed(struct virtqueue *vq)
321e9f4feb7SJens Freimann {
322dfd33aa4STiwei Bie 	if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
323dfd33aa4STiwei Bie 		vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
3244cdc4d98STiwei Bie 		vq->vq_packed.ring.driver->desc_event_flags =
325dfd33aa4STiwei Bie 			vq->vq_packed.event_flags_shadow;
326e9f4feb7SJens Freimann 	}
327c68fee95STiwei Bie }
328e9f4feb7SJens Freimann 
329e9f4feb7SJens Freimann /**
33013cd890dSTiwei Bie  * Tell the backend not to interrupt us. Implementation for split virtqueues.
33113cd890dSTiwei Bie  */
33213cd890dSTiwei Bie static inline void
virtqueue_disable_intr_split(struct virtqueue * vq)33313cd890dSTiwei Bie virtqueue_disable_intr_split(struct virtqueue *vq)
33413cd890dSTiwei Bie {
33513cd890dSTiwei Bie 	vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
33613cd890dSTiwei Bie }
33713cd890dSTiwei Bie 
33813cd890dSTiwei Bie /**
339e9f4feb7SJens Freimann  * Tell the backend not to interrupt us.
340e9f4feb7SJens Freimann  */
341e9f4feb7SJens Freimann static inline void
virtqueue_disable_intr(struct virtqueue * vq)342c056be23SJianfeng Tan virtqueue_disable_intr(struct virtqueue *vq)
343c056be23SJianfeng Tan {
344b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue(vq->hw))
345e9f4feb7SJens Freimann 		virtqueue_disable_intr_packed(vq);
346e9f4feb7SJens Freimann 	else
34713cd890dSTiwei Bie 		virtqueue_disable_intr_split(vq);
348c056be23SJianfeng Tan }
349c056be23SJianfeng Tan 
350c056be23SJianfeng Tan /**
351e9f4feb7SJens Freimann  * Tell the backend to interrupt. Implementation for packed virtqueues.
352e9f4feb7SJens Freimann  */
353e9f4feb7SJens Freimann static inline void
virtqueue_enable_intr_packed(struct virtqueue * vq)354e9f4feb7SJens Freimann virtqueue_enable_intr_packed(struct virtqueue *vq)
355e9f4feb7SJens Freimann {
356dfd33aa4STiwei Bie 	if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
357dfd33aa4STiwei Bie 		vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
3584cdc4d98STiwei Bie 		vq->vq_packed.ring.driver->desc_event_flags =
359dfd33aa4STiwei Bie 			vq->vq_packed.event_flags_shadow;
360e9f4feb7SJens Freimann 	}
361e9f4feb7SJens Freimann }
362e9f4feb7SJens Freimann 
363e9f4feb7SJens Freimann /**
364e9f4feb7SJens Freimann  * Tell the backend to interrupt. Implementation for split virtqueues.
365e9f4feb7SJens Freimann  */
366e9f4feb7SJens Freimann static inline void
virtqueue_enable_intr_split(struct virtqueue * vq)367e9f4feb7SJens Freimann virtqueue_enable_intr_split(struct virtqueue *vq)
368e9f4feb7SJens Freimann {
369dfd33aa4STiwei Bie 	vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
370e9f4feb7SJens Freimann }
371e9f4feb7SJens Freimann 
372e9f4feb7SJens Freimann /**
373c056be23SJianfeng Tan  * Tell the backend to interrupt us.
374c056be23SJianfeng Tan  */
375c056be23SJianfeng Tan static inline void
virtqueue_enable_intr(struct virtqueue * vq)376c056be23SJianfeng Tan virtqueue_enable_intr(struct virtqueue *vq)
377c056be23SJianfeng Tan {
378b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue(vq->hw))
379e9f4feb7SJens Freimann 		virtqueue_enable_intr_packed(vq);
380e9f4feb7SJens Freimann 	else
381e9f4feb7SJens Freimann 		virtqueue_enable_intr_split(vq);
382c056be23SJianfeng Tan }
383c056be23SJianfeng Tan 
3846c3169a3SBruce Richardson /**
3856c3169a3SBruce Richardson  *  Get all mbufs to be freed.
3866c3169a3SBruce Richardson  */
387727411f5SOlivier Matz struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
3886c3169a3SBruce Richardson 
389d8227497STiwei Bie /* Flush the elements in the used ring. */
390bcf55c93STiwei Bie void virtqueue_rxvq_flush(struct virtqueue *vq);
391d8227497STiwei Bie 
3926ebbf410SXuan Ding int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
3936ebbf410SXuan Ding 
3946ebbf410SXuan Ding int virtqueue_txvq_reset_packed(struct virtqueue *vq);
3956ebbf410SXuan Ding 
39689149851SMaxime Coquelin void virtqueue_txq_indirect_headers_init(struct virtqueue *vq);
39789149851SMaxime Coquelin 
398b02b02b6SMaxime Coquelin struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
399b02b02b6SMaxime Coquelin 		uint16_t num, int type, int node, const char *name);
400b02b02b6SMaxime Coquelin 
401b02b02b6SMaxime Coquelin void virtqueue_free(struct virtqueue *vq);
402b02b02b6SMaxime Coquelin 
4036c3169a3SBruce Richardson static inline int
virtqueue_full(const struct virtqueue * vq)4046c3169a3SBruce Richardson virtqueue_full(const struct virtqueue *vq)
4056c3169a3SBruce Richardson {
4066c3169a3SBruce Richardson 	return vq->vq_free_cnt == 0;
4076c3169a3SBruce Richardson }
4086c3169a3SBruce Richardson 
409e67ae1e2SOlivier Matz static inline int
virtio_get_queue_type(struct virtio_hw * hw,uint16_t vq_idx)410b5ba7ee4SMaxime Coquelin virtio_get_queue_type(struct virtio_hw *hw, uint16_t vq_idx)
411e67ae1e2SOlivier Matz {
412b5ba7ee4SMaxime Coquelin 	if (vq_idx == hw->max_queue_pairs * 2)
413e67ae1e2SOlivier Matz 		return VTNET_CQ;
414b5ba7ee4SMaxime Coquelin 	else if (vq_idx % 2 == 0)
415e67ae1e2SOlivier Matz 		return VTNET_RQ;
416e67ae1e2SOlivier Matz 	else
417e67ae1e2SOlivier Matz 		return VTNET_TQ;
418e67ae1e2SOlivier Matz }
419e67ae1e2SOlivier Matz 
420f0f5d844SPhil Yang /* virtqueue_nused has load-acquire or rte_io_rmb insed */
421ea5207c1SJoyce Kong static inline uint16_t
virtqueue_nused(const struct virtqueue * vq)422ea5207c1SJoyce Kong virtqueue_nused(const struct virtqueue *vq)
423ea5207c1SJoyce Kong {
424ea5207c1SJoyce Kong 	uint16_t idx;
425ea5207c1SJoyce Kong 
426ea5207c1SJoyce Kong 	if (vq->hw->weak_barriers) {
427ea5207c1SJoyce Kong 	/**
428*e12a0166STyler Retzlaff 	 * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it
429ea5207c1SJoyce Kong 	 * reports a slightly better perf, which comes from the saved
430ea5207c1SJoyce Kong 	 * branch by the compiler.
431f0f5d844SPhil Yang 	 * The if and else branches are identical with the smp and io
432ea5207c1SJoyce Kong 	 * barriers both defined as compiler barriers on x86.
433ea5207c1SJoyce Kong 	 */
434ea5207c1SJoyce Kong #ifdef RTE_ARCH_X86_64
435ea5207c1SJoyce Kong 		idx = vq->vq_split.ring.used->idx;
436ea5207c1SJoyce Kong 		rte_smp_rmb();
437ea5207c1SJoyce Kong #else
438*e12a0166STyler Retzlaff 		idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,
439*e12a0166STyler Retzlaff 				rte_memory_order_acquire);
440ea5207c1SJoyce Kong #endif
441ea5207c1SJoyce Kong 	} else {
442ea5207c1SJoyce Kong 		idx = vq->vq_split.ring.used->idx;
443f0f5d844SPhil Yang 		rte_io_rmb();
444ea5207c1SJoyce Kong 	}
445ea5207c1SJoyce Kong 	return idx - vq->vq_used_cons_idx;
446ea5207c1SJoyce Kong }
4476c3169a3SBruce Richardson 
448d8227497STiwei Bie void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
449892dc798SJens Freimann void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
4507097ca1bSMarvin Liu void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
4517097ca1bSMarvin Liu 			  uint16_t num);
452d8227497STiwei Bie 
4536c3169a3SBruce Richardson static inline void
vq_update_avail_idx(struct virtqueue * vq)4546c3169a3SBruce Richardson vq_update_avail_idx(struct virtqueue *vq)
4556c3169a3SBruce Richardson {
4563fc1d87cSJoyce Kong 	if (vq->hw->weak_barriers) {
457*e12a0166STyler Retzlaff 	/* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as
4583fc1d87cSJoyce Kong 	 * it reports a slightly better perf, which comes from the
4593fc1d87cSJoyce Kong 	 * saved branch by the compiler.
4603fc1d87cSJoyce Kong 	 * The if and else branches are identical with the smp and
461f0f5d844SPhil Yang 	 * io barriers both defined as compiler barriers on x86.
4623fc1d87cSJoyce Kong 	 */
4633fc1d87cSJoyce Kong #ifdef RTE_ARCH_X86_64
4643fc1d87cSJoyce Kong 		rte_smp_wmb();
465dfd33aa4STiwei Bie 		vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
4663fc1d87cSJoyce Kong #else
467*e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,
468*e12a0166STyler Retzlaff 				 vq->vq_avail_idx, rte_memory_order_release);
4693fc1d87cSJoyce Kong #endif
4703fc1d87cSJoyce Kong 	} else {
471f0f5d844SPhil Yang 		rte_io_wmb();
4723fc1d87cSJoyce Kong 		vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
4733fc1d87cSJoyce Kong 	}
4746c3169a3SBruce Richardson }
4756c3169a3SBruce Richardson 
4766c3169a3SBruce Richardson static inline void
vq_update_avail_ring(struct virtqueue * vq,uint16_t desc_idx)4776c3169a3SBruce Richardson vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
4786c3169a3SBruce Richardson {
4796c3169a3SBruce Richardson 	uint16_t avail_idx;
4806c3169a3SBruce Richardson 	/*
4816c3169a3SBruce Richardson 	 * Place the head of the descriptor chain into the next slot and make
4826c3169a3SBruce Richardson 	 * it usable to the host. The chain is made available now rather than
4836c3169a3SBruce Richardson 	 * deferring to virtqueue_notify() in the hopes that if the host is
4846c3169a3SBruce Richardson 	 * currently running on another CPU, we can keep it processing the new
4856c3169a3SBruce Richardson 	 * descriptor.
4866c3169a3SBruce Richardson 	 */
4876c3169a3SBruce Richardson 	avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
488dfd33aa4STiwei Bie 	if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
489dfd33aa4STiwei Bie 		vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
4906c3169a3SBruce Richardson 	vq->vq_avail_idx++;
4916c3169a3SBruce Richardson }
4926c3169a3SBruce Richardson 
4936c3169a3SBruce Richardson static inline int
virtqueue_kick_prepare(struct virtqueue * vq)4946c3169a3SBruce Richardson virtqueue_kick_prepare(struct virtqueue *vq)
4956c3169a3SBruce Richardson {
496d21d05c7SIlya Maximets 	/*
497d21d05c7SIlya Maximets 	 * Ensure updated avail->idx is visible to vhost before reading
498d21d05c7SIlya Maximets 	 * the used->flags.
499d21d05c7SIlya Maximets 	 */
5009230ab8dSIlya Maximets 	virtio_mb(vq->hw->weak_barriers);
501dfd33aa4STiwei Bie 	return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
5026c3169a3SBruce Richardson }
5036c3169a3SBruce Richardson 
504892dc798SJens Freimann static inline int
virtqueue_kick_prepare_packed(struct virtqueue * vq)505892dc798SJens Freimann virtqueue_kick_prepare_packed(struct virtqueue *vq)
506892dc798SJens Freimann {
507892dc798SJens Freimann 	uint16_t flags;
508892dc798SJens Freimann 
509d21d05c7SIlya Maximets 	/*
510d21d05c7SIlya Maximets 	 * Ensure updated data is visible to vhost before reading the flags.
511d21d05c7SIlya Maximets 	 */
5129230ab8dSIlya Maximets 	virtio_mb(vq->hw->weak_barriers);
5134cdc4d98STiwei Bie 	flags = vq->vq_packed.ring.device->desc_event_flags;
514892dc798SJens Freimann 
515892dc798SJens Freimann 	return flags != RING_EVENT_FLAGS_DISABLE;
516892dc798SJens Freimann }
517892dc798SJens Freimann 
518cc827f83SIlya Maximets /*
519cc827f83SIlya Maximets  * virtqueue_kick_prepare*() or the virtio_wmb() should be called
520cc827f83SIlya Maximets  * before this function to be sure that all the data is visible to vhost.
521cc827f83SIlya Maximets  */
5226c3169a3SBruce Richardson static inline void
virtqueue_notify(struct virtqueue * vq)5236c3169a3SBruce Richardson virtqueue_notify(struct virtqueue *vq)
5246c3169a3SBruce Richardson {
525f8b60756SMaxime Coquelin 	VIRTIO_OPS(vq->hw)->notify_queue(vq->hw, vq);
5266c3169a3SBruce Richardson }
5276c3169a3SBruce Richardson 
5286c3169a3SBruce Richardson #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
5296c3169a3SBruce Richardson #define VIRTQUEUE_DUMP(vq) do { \
5306c3169a3SBruce Richardson 	uint16_t used_idx, nused; \
531*e12a0166STyler Retzlaff 	used_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \
532*e12a0166STyler Retzlaff 				   rte_memory_order_relaxed); \
5336c3169a3SBruce Richardson 	nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
534b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue((vq)->hw)) { \
53556785a2dSJens Freimann 		PMD_INIT_LOG(DEBUG, \
53656785a2dSJens Freimann 		"VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
5378e148e49STiwei Bie 		" cached_flags=0x%x; used_wrap_counter=%d", \
53856785a2dSJens Freimann 		(vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
539dfd33aa4STiwei Bie 		(vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
540dfd33aa4STiwei Bie 		(vq)->vq_packed.used_wrap_counter); \
54156785a2dSJens Freimann 		break; \
54256785a2dSJens Freimann 	} \
5436c3169a3SBruce Richardson 	PMD_INIT_LOG(DEBUG, \
5446c3169a3SBruce Richardson 	  "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
5456c3169a3SBruce Richardson 	  " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
5466c3169a3SBruce Richardson 	  " avail.flags=0x%x; used.flags=0x%x", \
547ea5207c1SJoyce Kong 	  (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
548ea5207c1SJoyce Kong 	  (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
549*e12a0166STyler Retzlaff 	  rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, rte_memory_order_relaxed), \
550dfd33aa4STiwei Bie 	  (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
5516c3169a3SBruce Richardson } while (0)
5526c3169a3SBruce Richardson #else
5536c3169a3SBruce Richardson #define VIRTQUEUE_DUMP(vq) do { } while (0)
5546c3169a3SBruce Richardson #endif
5556c3169a3SBruce Richardson 
55657f90f89SMarvin Liu /* avoid write operation when necessary, to lessen cache issues */
55757f90f89SMarvin Liu #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
558a1412e05SVipul Ashri 	typeof(var) *const var_ = &(var);	\
559a1412e05SVipul Ashri 	typeof(val)  const val_ = (val);	\
560a1412e05SVipul Ashri 	if (*var_ != val_)			\
561a1412e05SVipul Ashri 		*var_ = val_;			\
56257f90f89SMarvin Liu } while (0)
56357f90f89SMarvin Liu 
56457f90f89SMarvin Liu #define virtqueue_clear_net_hdr(hdr) do {		\
56557f90f89SMarvin Liu 	typeof(hdr) hdr_ = (hdr);			\
56657f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0);	\
56757f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0);	\
56857f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0);		\
56957f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0);	\
57057f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0);	\
57157f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0);	\
57257f90f89SMarvin Liu } while (0)
57357f90f89SMarvin Liu 
57457f90f89SMarvin Liu static inline void
virtqueue_xmit_offload(struct virtio_net_hdr * hdr,struct rte_mbuf * cookie)57585a4fa2fSDavid Marchand virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
57657f90f89SMarvin Liu {
577daa02b5cSOlivier Matz 	uint64_t csum_l4 = cookie->ol_flags & RTE_MBUF_F_TX_L4_MASK;
578daa02b5cSOlivier Matz 	uint16_t o_l23_len = (cookie->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
5796474b594SIvan Malov 			     cookie->outer_l2_len + cookie->outer_l3_len : 0;
58057f90f89SMarvin Liu 
581daa02b5cSOlivier Matz 	if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
582daa02b5cSOlivier Matz 		csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
583004d8e85SDavid Marchand 
584004d8e85SDavid Marchand 	switch (csum_l4) {
585daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_UDP_CKSUM:
5866474b594SIvan Malov 		hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
58785a4fa2fSDavid Marchand 		hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
58857f90f89SMarvin Liu 		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
58957f90f89SMarvin Liu 		break;
59057f90f89SMarvin Liu 
591daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TCP_CKSUM:
5926474b594SIvan Malov 		hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
59357f90f89SMarvin Liu 		hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
59457f90f89SMarvin Liu 		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
59557f90f89SMarvin Liu 		break;
59657f90f89SMarvin Liu 
59757f90f89SMarvin Liu 	default:
59857f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
59957f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
60057f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
60157f90f89SMarvin Liu 		break;
60257f90f89SMarvin Liu 	}
60357f90f89SMarvin Liu 
60457f90f89SMarvin Liu 	/* TCP Segmentation Offload */
605daa02b5cSOlivier Matz 	if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
606daa02b5cSOlivier Matz 		hdr->gso_type = (cookie->ol_flags & RTE_MBUF_F_TX_IPV6) ?
60757f90f89SMarvin Liu 			VIRTIO_NET_HDR_GSO_TCPV6 :
60857f90f89SMarvin Liu 			VIRTIO_NET_HDR_GSO_TCPV4;
60957f90f89SMarvin Liu 		hdr->gso_size = cookie->tso_segsz;
6106474b594SIvan Malov 		hdr->hdr_len = o_l23_len + cookie->l2_len + cookie->l3_len +
6116474b594SIvan Malov 			       cookie->l4_len;
61257f90f89SMarvin Liu 	} else {
61357f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
61457f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
61557f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
61657f90f89SMarvin Liu 	}
61757f90f89SMarvin Liu }
61857f90f89SMarvin Liu 
61957f90f89SMarvin Liu static inline void
virtqueue_enqueue_xmit_packed(struct virtnet_tx * txvq,struct rte_mbuf * cookie,uint16_t needed,int use_indirect,int can_push,int in_order)62057f90f89SMarvin Liu virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
621b473061bSMarvin Liu 			      uint16_t needed, int use_indirect, int can_push,
622b473061bSMarvin Liu 			      int in_order)
62357f90f89SMarvin Liu {
624a632f0f6SMaxime Coquelin 	struct virtio_tx_region *txr = txvq->hdr_mz->addr;
62557f90f89SMarvin Liu 	struct vq_desc_extra *dxp;
6263169550fSMaxime Coquelin 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
62757f90f89SMarvin Liu 	struct vring_packed_desc *start_dp, *head_dp;
62857f90f89SMarvin Liu 	uint16_t idx, id, head_idx, head_flags;
62957f90f89SMarvin Liu 	int16_t head_size = vq->hw->vtnet_hdr_size;
63057f90f89SMarvin Liu 	struct virtio_net_hdr *hdr;
63157f90f89SMarvin Liu 	uint16_t prev;
63257f90f89SMarvin Liu 	bool prepend_header = false;
6338410c369SMarvin Liu 	uint16_t seg_num = cookie->nb_segs;
63457f90f89SMarvin Liu 
63557f90f89SMarvin Liu 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
63657f90f89SMarvin Liu 
63757f90f89SMarvin Liu 	dxp = &vq->vq_descx[id];
63857f90f89SMarvin Liu 	dxp->ndescs = needed;
63957f90f89SMarvin Liu 	dxp->cookie = cookie;
64057f90f89SMarvin Liu 
64157f90f89SMarvin Liu 	head_idx = vq->vq_avail_idx;
64257f90f89SMarvin Liu 	idx = head_idx;
64357f90f89SMarvin Liu 	prev = head_idx;
64457f90f89SMarvin Liu 	start_dp = vq->vq_packed.ring.desc;
64557f90f89SMarvin Liu 
64657f90f89SMarvin Liu 	head_dp = &vq->vq_packed.ring.desc[idx];
64757f90f89SMarvin Liu 	head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
64857f90f89SMarvin Liu 	head_flags |= vq->vq_packed.cached_flags;
64957f90f89SMarvin Liu 
65057f90f89SMarvin Liu 	if (can_push) {
65157f90f89SMarvin Liu 		/* prepend cannot fail, checked by caller */
65257f90f89SMarvin Liu 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
65357f90f89SMarvin Liu 					      -head_size);
65457f90f89SMarvin Liu 		prepend_header = true;
65557f90f89SMarvin Liu 
65657f90f89SMarvin Liu 		/* if offload disabled, it is not zeroed below, do it now */
65757f90f89SMarvin Liu 		if (!vq->hw->has_tx_offload)
65857f90f89SMarvin Liu 			virtqueue_clear_net_hdr(hdr);
659b473061bSMarvin Liu 	} else if (use_indirect) {
660b473061bSMarvin Liu 		/* setup tx ring slot to point to indirect
661b473061bSMarvin Liu 		 * descriptor list stored in reserved region.
662b473061bSMarvin Liu 		 *
663b473061bSMarvin Liu 		 * the first slot in indirect ring is already preset
664b473061bSMarvin Liu 		 * to point to the header in reserved region
665b473061bSMarvin Liu 		 */
666a632f0f6SMaxime Coquelin 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
667a632f0f6SMaxime Coquelin 		start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_packed_desc);
668ad6f0194SXuan Ding 		/* Packed descriptor id needs to be restored when inorder. */
669ad6f0194SXuan Ding 		if (in_order)
670ad6f0194SXuan Ding 			start_dp[idx].id = idx;
671b473061bSMarvin Liu 		/* reset flags for indirect desc */
672b473061bSMarvin Liu 		head_flags = VRING_DESC_F_INDIRECT;
673b473061bSMarvin Liu 		head_flags |= vq->vq_packed.cached_flags;
674b473061bSMarvin Liu 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
675b473061bSMarvin Liu 
676b473061bSMarvin Liu 		/* loop below will fill in rest of the indirect elements */
677b473061bSMarvin Liu 		start_dp = txr[idx].tx_packed_indir;
678b473061bSMarvin Liu 		idx = 1;
67957f90f89SMarvin Liu 	} else {
68057f90f89SMarvin Liu 		/* setup first tx ring slot to point to header
68157f90f89SMarvin Liu 		 * stored in reserved region.
68257f90f89SMarvin Liu 		 */
683a632f0f6SMaxime Coquelin 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
68457f90f89SMarvin Liu 		start_dp[idx].len = vq->hw->vtnet_hdr_size;
685f9236364SFengjiang Liu 		head_flags |= VRING_DESC_F_NEXT;
68657f90f89SMarvin Liu 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
68757f90f89SMarvin Liu 		idx++;
68857f90f89SMarvin Liu 		if (idx >= vq->vq_nentries) {
68957f90f89SMarvin Liu 			idx -= vq->vq_nentries;
69057f90f89SMarvin Liu 			vq->vq_packed.cached_flags ^=
69157f90f89SMarvin Liu 				VRING_PACKED_DESC_F_AVAIL_USED;
69257f90f89SMarvin Liu 		}
69357f90f89SMarvin Liu 	}
69457f90f89SMarvin Liu 
69585a4fa2fSDavid Marchand 	if (vq->hw->has_tx_offload)
69685a4fa2fSDavid Marchand 		virtqueue_xmit_offload(hdr, cookie);
69757f90f89SMarvin Liu 
69857f90f89SMarvin Liu 	do {
69957f90f89SMarvin Liu 		uint16_t flags;
70057f90f89SMarvin Liu 
701ba55c94aSMaxime Coquelin 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
70257f90f89SMarvin Liu 		start_dp[idx].len  = cookie->data_len;
70357f90f89SMarvin Liu 		if (prepend_header) {
70457f90f89SMarvin Liu 			start_dp[idx].addr -= head_size;
70557f90f89SMarvin Liu 			start_dp[idx].len += head_size;
70657f90f89SMarvin Liu 			prepend_header = false;
70757f90f89SMarvin Liu 		}
70857f90f89SMarvin Liu 
70957f90f89SMarvin Liu 		if (likely(idx != head_idx)) {
71057f90f89SMarvin Liu 			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
71157f90f89SMarvin Liu 			flags |= vq->vq_packed.cached_flags;
71257f90f89SMarvin Liu 			start_dp[idx].flags = flags;
71357f90f89SMarvin Liu 		}
71457f90f89SMarvin Liu 		prev = idx;
71557f90f89SMarvin Liu 		idx++;
71657f90f89SMarvin Liu 		if (idx >= vq->vq_nentries) {
71757f90f89SMarvin Liu 			idx -= vq->vq_nentries;
71857f90f89SMarvin Liu 			vq->vq_packed.cached_flags ^=
71957f90f89SMarvin Liu 				VRING_PACKED_DESC_F_AVAIL_USED;
72057f90f89SMarvin Liu 		}
72157f90f89SMarvin Liu 	} while ((cookie = cookie->next) != NULL);
72257f90f89SMarvin Liu 
72357f90f89SMarvin Liu 	start_dp[prev].id = id;
72457f90f89SMarvin Liu 
725b473061bSMarvin Liu 	if (use_indirect) {
726b473061bSMarvin Liu 		idx = head_idx;
727b473061bSMarvin Liu 		if (++idx >= vq->vq_nentries) {
728b473061bSMarvin Liu 			idx -= vq->vq_nentries;
729b473061bSMarvin Liu 			vq->vq_packed.cached_flags ^=
730b473061bSMarvin Liu 				VRING_PACKED_DESC_F_AVAIL_USED;
731b473061bSMarvin Liu 		}
732b473061bSMarvin Liu 	}
733b473061bSMarvin Liu 
73457f90f89SMarvin Liu 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
73557f90f89SMarvin Liu 	vq->vq_avail_idx = idx;
73657f90f89SMarvin Liu 
73757f90f89SMarvin Liu 	if (!in_order) {
73857f90f89SMarvin Liu 		vq->vq_desc_head_idx = dxp->next;
73957f90f89SMarvin Liu 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
74057f90f89SMarvin Liu 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
74157f90f89SMarvin Liu 	}
74257f90f89SMarvin Liu 
74357f90f89SMarvin Liu 	virtqueue_store_flags_packed(head_dp, head_flags,
74457f90f89SMarvin Liu 				     vq->hw->weak_barriers);
74557f90f89SMarvin Liu }
74657f90f89SMarvin Liu 
74757f90f89SMarvin Liu static void
vq_ring_free_id_packed(struct virtqueue * vq,uint16_t id)74857f90f89SMarvin Liu vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
74957f90f89SMarvin Liu {
75057f90f89SMarvin Liu 	struct vq_desc_extra *dxp;
75157f90f89SMarvin Liu 
75257f90f89SMarvin Liu 	dxp = &vq->vq_descx[id];
75357f90f89SMarvin Liu 	vq->vq_free_cnt += dxp->ndescs;
75457f90f89SMarvin Liu 
75557f90f89SMarvin Liu 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
75657f90f89SMarvin Liu 		vq->vq_desc_head_idx = id;
75757f90f89SMarvin Liu 	else
75857f90f89SMarvin Liu 		vq->vq_descx[vq->vq_desc_tail_idx].next = id;
75957f90f89SMarvin Liu 
76057f90f89SMarvin Liu 	vq->vq_desc_tail_idx = id;
76157f90f89SMarvin Liu 	dxp->next = VQ_RING_DESC_CHAIN_END;
76257f90f89SMarvin Liu }
76357f90f89SMarvin Liu 
76457f90f89SMarvin Liu static void
virtio_xmit_cleanup_inorder_packed(struct virtqueue * vq,uint16_t num)765e2ca43a3SIvan Ilchenko virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, uint16_t num)
76657f90f89SMarvin Liu {
76757f90f89SMarvin Liu 	uint16_t used_idx, id, curr_id, free_cnt = 0;
76857f90f89SMarvin Liu 	uint16_t size = vq->vq_nentries;
76957f90f89SMarvin Liu 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
77057f90f89SMarvin Liu 	struct vq_desc_extra *dxp;
771e2ca43a3SIvan Ilchenko 	int nb = num;
77257f90f89SMarvin Liu 
77357f90f89SMarvin Liu 	used_idx = vq->vq_used_cons_idx;
774f0f5d844SPhil Yang 	/* desc_is_used has a load-acquire or rte_io_rmb inside
77557f90f89SMarvin Liu 	 * and wait for used desc in virtqueue.
77657f90f89SMarvin Liu 	 */
777e2ca43a3SIvan Ilchenko 	while (nb > 0 && desc_is_used(&desc[used_idx], vq)) {
77857f90f89SMarvin Liu 		id = desc[used_idx].id;
77957f90f89SMarvin Liu 		do {
78057f90f89SMarvin Liu 			curr_id = used_idx;
78157f90f89SMarvin Liu 			dxp = &vq->vq_descx[used_idx];
78257f90f89SMarvin Liu 			used_idx += dxp->ndescs;
78357f90f89SMarvin Liu 			free_cnt += dxp->ndescs;
784e2ca43a3SIvan Ilchenko 			nb -= dxp->ndescs;
78557f90f89SMarvin Liu 			if (used_idx >= size) {
78657f90f89SMarvin Liu 				used_idx -= size;
78757f90f89SMarvin Liu 				vq->vq_packed.used_wrap_counter ^= 1;
78857f90f89SMarvin Liu 			}
78957f90f89SMarvin Liu 			if (dxp->cookie != NULL) {
79057f90f89SMarvin Liu 				rte_pktmbuf_free(dxp->cookie);
79157f90f89SMarvin Liu 				dxp->cookie = NULL;
79257f90f89SMarvin Liu 			}
79357f90f89SMarvin Liu 		} while (curr_id != id);
79457f90f89SMarvin Liu 	}
79557f90f89SMarvin Liu 	vq->vq_used_cons_idx = used_idx;
79657f90f89SMarvin Liu 	vq->vq_free_cnt += free_cnt;
79757f90f89SMarvin Liu }
79857f90f89SMarvin Liu 
79957f90f89SMarvin Liu static void
virtio_xmit_cleanup_normal_packed(struct virtqueue * vq,uint16_t num)800e2ca43a3SIvan Ilchenko virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, uint16_t num)
80157f90f89SMarvin Liu {
80257f90f89SMarvin Liu 	uint16_t used_idx, id;
80357f90f89SMarvin Liu 	uint16_t size = vq->vq_nentries;
80457f90f89SMarvin Liu 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
80557f90f89SMarvin Liu 	struct vq_desc_extra *dxp;
80657f90f89SMarvin Liu 
80757f90f89SMarvin Liu 	used_idx = vq->vq_used_cons_idx;
808f0f5d844SPhil Yang 	/* desc_is_used has a load-acquire or rte_io_rmb inside
80957f90f89SMarvin Liu 	 * and wait for used desc in virtqueue.
81057f90f89SMarvin Liu 	 */
81157f90f89SMarvin Liu 	while (num-- && desc_is_used(&desc[used_idx], vq)) {
81257f90f89SMarvin Liu 		id = desc[used_idx].id;
81357f90f89SMarvin Liu 		dxp = &vq->vq_descx[id];
81457f90f89SMarvin Liu 		vq->vq_used_cons_idx += dxp->ndescs;
81557f90f89SMarvin Liu 		if (vq->vq_used_cons_idx >= size) {
81657f90f89SMarvin Liu 			vq->vq_used_cons_idx -= size;
81757f90f89SMarvin Liu 			vq->vq_packed.used_wrap_counter ^= 1;
81857f90f89SMarvin Liu 		}
81957f90f89SMarvin Liu 		vq_ring_free_id_packed(vq, id);
82057f90f89SMarvin Liu 		if (dxp->cookie != NULL) {
82157f90f89SMarvin Liu 			rte_pktmbuf_free(dxp->cookie);
82257f90f89SMarvin Liu 			dxp->cookie = NULL;
82357f90f89SMarvin Liu 		}
82457f90f89SMarvin Liu 		used_idx = vq->vq_used_cons_idx;
82557f90f89SMarvin Liu 	}
82657f90f89SMarvin Liu }
82757f90f89SMarvin Liu 
82857f90f89SMarvin Liu /* Cleanup from completed transmits. */
82957f90f89SMarvin Liu static inline void
virtio_xmit_cleanup_packed(struct virtqueue * vq,uint16_t num,int in_order)830e2ca43a3SIvan Ilchenko virtio_xmit_cleanup_packed(struct virtqueue *vq, uint16_t num, int in_order)
83157f90f89SMarvin Liu {
83257f90f89SMarvin Liu 	if (in_order)
83357f90f89SMarvin Liu 		virtio_xmit_cleanup_inorder_packed(vq, num);
83457f90f89SMarvin Liu 	else
83557f90f89SMarvin Liu 		virtio_xmit_cleanup_normal_packed(vq, num);
83657f90f89SMarvin Liu }
83757f90f89SMarvin Liu 
83857f90f89SMarvin Liu static inline void
virtio_xmit_cleanup(struct virtqueue * vq,uint16_t num)83957f90f89SMarvin Liu virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
84057f90f89SMarvin Liu {
84157f90f89SMarvin Liu 	uint16_t i, used_idx, desc_idx;
84257f90f89SMarvin Liu 	for (i = 0; i < num; i++) {
84357f90f89SMarvin Liu 		struct vring_used_elem *uep;
84457f90f89SMarvin Liu 		struct vq_desc_extra *dxp;
84557f90f89SMarvin Liu 
84657f90f89SMarvin Liu 		used_idx = (uint16_t)(vq->vq_used_cons_idx &
84757f90f89SMarvin Liu 				(vq->vq_nentries - 1));
84857f90f89SMarvin Liu 		uep = &vq->vq_split.ring.used->ring[used_idx];
84957f90f89SMarvin Liu 
85057f90f89SMarvin Liu 		desc_idx = (uint16_t)uep->id;
85157f90f89SMarvin Liu 		dxp = &vq->vq_descx[desc_idx];
85257f90f89SMarvin Liu 		vq->vq_used_cons_idx++;
85357f90f89SMarvin Liu 		vq_ring_free_chain(vq, desc_idx);
85457f90f89SMarvin Liu 
85557f90f89SMarvin Liu 		if (dxp->cookie != NULL) {
85657f90f89SMarvin Liu 			rte_pktmbuf_free(dxp->cookie);
85757f90f89SMarvin Liu 			dxp->cookie = NULL;
85857f90f89SMarvin Liu 		}
85957f90f89SMarvin Liu 	}
86057f90f89SMarvin Liu }
86157f90f89SMarvin Liu 
86257f90f89SMarvin Liu /* Cleanup from completed inorder transmits. */
86357f90f89SMarvin Liu static __rte_always_inline void
virtio_xmit_cleanup_inorder(struct virtqueue * vq,uint16_t num)86457f90f89SMarvin Liu virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
86557f90f89SMarvin Liu {
86657f90f89SMarvin Liu 	uint16_t i, idx = vq->vq_used_cons_idx;
86757f90f89SMarvin Liu 	int16_t free_cnt = 0;
86857f90f89SMarvin Liu 	struct vq_desc_extra *dxp = NULL;
86957f90f89SMarvin Liu 
87057f90f89SMarvin Liu 	if (unlikely(num == 0))
87157f90f89SMarvin Liu 		return;
87257f90f89SMarvin Liu 
87357f90f89SMarvin Liu 	for (i = 0; i < num; i++) {
87457f90f89SMarvin Liu 		dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
87557f90f89SMarvin Liu 		free_cnt += dxp->ndescs;
87657f90f89SMarvin Liu 		if (dxp->cookie != NULL) {
87757f90f89SMarvin Liu 			rte_pktmbuf_free(dxp->cookie);
87857f90f89SMarvin Liu 			dxp->cookie = NULL;
87957f90f89SMarvin Liu 		}
88057f90f89SMarvin Liu 	}
88157f90f89SMarvin Liu 
88257f90f89SMarvin Liu 	vq->vq_free_cnt += free_cnt;
88357f90f89SMarvin Liu 	vq->vq_used_cons_idx = idx;
88457f90f89SMarvin Liu }
8856c3169a3SBruce Richardson #endif /* _VIRTQUEUE_H_ */
886