xref: /dpdk/drivers/common/idpf/idpf_common_rxtx.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1c008a5e7SBeilei Xing /* SPDX-License-Identifier: BSD-3-Clause
2c008a5e7SBeilei Xing  * Copyright(c) 2023 Intel Corporation
3c008a5e7SBeilei Xing  */
4c008a5e7SBeilei Xing 
5c008a5e7SBeilei Xing #include <rte_mbuf_dyn.h>
68c6098afSBeilei Xing #include <rte_errno.h>
78c6098afSBeilei Xing 
8c008a5e7SBeilei Xing #include "idpf_common_rxtx.h"
9c008a5e7SBeilei Xing 
108c6098afSBeilei Xing int idpf_timestamp_dynfield_offset = -1;
118c6098afSBeilei Xing uint64_t idpf_timestamp_dynflag;
128c6098afSBeilei Xing 
13c008a5e7SBeilei Xing int
idpf_qc_rx_thresh_check(uint16_t nb_desc,uint16_t thresh)14715939a7SBeilei Xing idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh)
15c008a5e7SBeilei Xing {
16c008a5e7SBeilei Xing 	/* The following constraints must be satisfied:
17c008a5e7SBeilei Xing 	 * thresh < rxq->nb_rx_desc
18c008a5e7SBeilei Xing 	 */
19c008a5e7SBeilei Xing 	if (thresh >= nb_desc) {
20c008a5e7SBeilei Xing 		DRV_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
21c008a5e7SBeilei Xing 			thresh, nb_desc);
22c008a5e7SBeilei Xing 		return -EINVAL;
23c008a5e7SBeilei Xing 	}
24c008a5e7SBeilei Xing 
25c008a5e7SBeilei Xing 	return 0;
26c008a5e7SBeilei Xing }
27c008a5e7SBeilei Xing 
28c008a5e7SBeilei Xing int
idpf_qc_tx_thresh_check(uint16_t nb_desc,uint16_t tx_rs_thresh,uint16_t tx_free_thresh)29715939a7SBeilei Xing idpf_qc_tx_thresh_check(uint16_t nb_desc, uint16_t tx_rs_thresh,
30c008a5e7SBeilei Xing 			uint16_t tx_free_thresh)
31c008a5e7SBeilei Xing {
32c008a5e7SBeilei Xing 	/* TX descriptors will have their RS bit set after tx_rs_thresh
33c008a5e7SBeilei Xing 	 * descriptors have been used. The TX descriptor ring will be cleaned
34c008a5e7SBeilei Xing 	 * after tx_free_thresh descriptors are used or if the number of
35c008a5e7SBeilei Xing 	 * descriptors required to transmit a packet is greater than the
36c008a5e7SBeilei Xing 	 * number of free TX descriptors.
37c008a5e7SBeilei Xing 	 *
38c008a5e7SBeilei Xing 	 * The following constraints must be satisfied:
39c008a5e7SBeilei Xing 	 *  - tx_rs_thresh must be less than the size of the ring minus 2.
40c008a5e7SBeilei Xing 	 *  - tx_free_thresh must be less than the size of the ring minus 3.
41c008a5e7SBeilei Xing 	 *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
42c008a5e7SBeilei Xing 	 *  - tx_rs_thresh must be a divisor of the ring size.
43c008a5e7SBeilei Xing 	 *
44c008a5e7SBeilei Xing 	 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
45c008a5e7SBeilei Xing 	 * race condition, hence the maximum threshold constraints. When set
46c008a5e7SBeilei Xing 	 * to zero use default values.
47c008a5e7SBeilei Xing 	 */
48c008a5e7SBeilei Xing 	if (tx_rs_thresh >= (nb_desc - 2)) {
49c008a5e7SBeilei Xing 		DRV_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
50c008a5e7SBeilei Xing 			"number of TX descriptors (%u) minus 2",
51c008a5e7SBeilei Xing 			tx_rs_thresh, nb_desc);
52c008a5e7SBeilei Xing 		return -EINVAL;
53c008a5e7SBeilei Xing 	}
54c008a5e7SBeilei Xing 	if (tx_free_thresh >= (nb_desc - 3)) {
55c008a5e7SBeilei Xing 		DRV_LOG(ERR, "tx_free_thresh (%u) must be less than the "
56c008a5e7SBeilei Xing 			"number of TX descriptors (%u) minus 3.",
57c008a5e7SBeilei Xing 			tx_free_thresh, nb_desc);
58c008a5e7SBeilei Xing 		return -EINVAL;
59c008a5e7SBeilei Xing 	}
60c008a5e7SBeilei Xing 	if (tx_rs_thresh > tx_free_thresh) {
61c008a5e7SBeilei Xing 		DRV_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
62c008a5e7SBeilei Xing 			"equal to tx_free_thresh (%u).",
63c008a5e7SBeilei Xing 			tx_rs_thresh, tx_free_thresh);
64c008a5e7SBeilei Xing 		return -EINVAL;
65c008a5e7SBeilei Xing 	}
66c008a5e7SBeilei Xing 	if ((nb_desc % tx_rs_thresh) != 0) {
67c008a5e7SBeilei Xing 		DRV_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
68c008a5e7SBeilei Xing 			"number of TX descriptors (%u).",
69c008a5e7SBeilei Xing 			tx_rs_thresh, nb_desc);
70c008a5e7SBeilei Xing 		return -EINVAL;
71c008a5e7SBeilei Xing 	}
72c008a5e7SBeilei Xing 
73c008a5e7SBeilei Xing 	return 0;
74c008a5e7SBeilei Xing }
75c008a5e7SBeilei Xing 
76c008a5e7SBeilei Xing void
idpf_qc_rxq_mbufs_release(struct idpf_rx_queue * rxq)77715939a7SBeilei Xing idpf_qc_rxq_mbufs_release(struct idpf_rx_queue *rxq)
78c008a5e7SBeilei Xing {
79c008a5e7SBeilei Xing 	uint16_t i;
80c008a5e7SBeilei Xing 
81c008a5e7SBeilei Xing 	if (rxq->sw_ring == NULL)
82c008a5e7SBeilei Xing 		return;
83c008a5e7SBeilei Xing 
84c008a5e7SBeilei Xing 	for (i = 0; i < rxq->nb_rx_desc; i++) {
85c008a5e7SBeilei Xing 		if (rxq->sw_ring[i] != NULL) {
86c008a5e7SBeilei Xing 			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
87c008a5e7SBeilei Xing 			rxq->sw_ring[i] = NULL;
88c008a5e7SBeilei Xing 		}
89c008a5e7SBeilei Xing 	}
90c008a5e7SBeilei Xing }
91c008a5e7SBeilei Xing 
92c008a5e7SBeilei Xing void
idpf_qc_txq_mbufs_release(struct idpf_tx_queue * txq)93715939a7SBeilei Xing idpf_qc_txq_mbufs_release(struct idpf_tx_queue *txq)
94c008a5e7SBeilei Xing {
95c008a5e7SBeilei Xing 	uint16_t nb_desc, i;
96c008a5e7SBeilei Xing 
97c008a5e7SBeilei Xing 	if (txq == NULL || txq->sw_ring == NULL) {
98c008a5e7SBeilei Xing 		DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
99c008a5e7SBeilei Xing 		return;
100c008a5e7SBeilei Xing 	}
101c008a5e7SBeilei Xing 
102c008a5e7SBeilei Xing 	if (txq->sw_nb_desc != 0) {
103c008a5e7SBeilei Xing 		/* For split queue model, descriptor ring */
104c008a5e7SBeilei Xing 		nb_desc = txq->sw_nb_desc;
105c008a5e7SBeilei Xing 	} else {
106c008a5e7SBeilei Xing 		/* For single queue model */
107c008a5e7SBeilei Xing 		nb_desc = txq->nb_tx_desc;
108c008a5e7SBeilei Xing 	}
109c008a5e7SBeilei Xing 	for (i = 0; i < nb_desc; i++) {
110c008a5e7SBeilei Xing 		if (txq->sw_ring[i].mbuf != NULL) {
111c008a5e7SBeilei Xing 			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
112c008a5e7SBeilei Xing 			txq->sw_ring[i].mbuf = NULL;
113c008a5e7SBeilei Xing 		}
114c008a5e7SBeilei Xing 	}
115c008a5e7SBeilei Xing }
116c008a5e7SBeilei Xing 
117c008a5e7SBeilei Xing void
idpf_qc_split_rx_descq_reset(struct idpf_rx_queue * rxq)118715939a7SBeilei Xing idpf_qc_split_rx_descq_reset(struct idpf_rx_queue *rxq)
119c008a5e7SBeilei Xing {
120c008a5e7SBeilei Xing 	uint16_t len;
121c008a5e7SBeilei Xing 	uint32_t i;
122c008a5e7SBeilei Xing 
123c008a5e7SBeilei Xing 	if (rxq == NULL)
124c008a5e7SBeilei Xing 		return;
125c008a5e7SBeilei Xing 
126c008a5e7SBeilei Xing 	len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
127c008a5e7SBeilei Xing 
128c008a5e7SBeilei Xing 	for (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);
129c008a5e7SBeilei Xing 	     i++)
130c008a5e7SBeilei Xing 		((volatile char *)rxq->rx_ring)[i] = 0;
131c008a5e7SBeilei Xing 
132c008a5e7SBeilei Xing 	rxq->rx_tail = 0;
133c008a5e7SBeilei Xing 	rxq->expected_gen_id = 1;
134c008a5e7SBeilei Xing }
135c008a5e7SBeilei Xing 
136c008a5e7SBeilei Xing void
idpf_qc_split_rx_bufq_reset(struct idpf_rx_queue * rxq)137715939a7SBeilei Xing idpf_qc_split_rx_bufq_reset(struct idpf_rx_queue *rxq)
138c008a5e7SBeilei Xing {
139c008a5e7SBeilei Xing 	uint16_t len;
140c008a5e7SBeilei Xing 	uint32_t i;
141c008a5e7SBeilei Xing 
142c008a5e7SBeilei Xing 	if (rxq == NULL)
143c008a5e7SBeilei Xing 		return;
144c008a5e7SBeilei Xing 
145c008a5e7SBeilei Xing 	len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
146c008a5e7SBeilei Xing 
147c008a5e7SBeilei Xing 	for (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);
148c008a5e7SBeilei Xing 	     i++)
149c008a5e7SBeilei Xing 		((volatile char *)rxq->rx_ring)[i] = 0;
150c008a5e7SBeilei Xing 
151c008a5e7SBeilei Xing 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
152c008a5e7SBeilei Xing 
153c008a5e7SBeilei Xing 	for (i = 0; i < IDPF_RX_MAX_BURST; i++)
154c008a5e7SBeilei Xing 		rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
155c008a5e7SBeilei Xing 
156c008a5e7SBeilei Xing 	/* The next descriptor id which can be received. */
157c008a5e7SBeilei Xing 	rxq->rx_next_avail = 0;
158c008a5e7SBeilei Xing 
159c008a5e7SBeilei Xing 	/* The next descriptor id which can be refilled. */
160c008a5e7SBeilei Xing 	rxq->rx_tail = 0;
161c008a5e7SBeilei Xing 	/* The number of descriptors which can be refilled. */
162c008a5e7SBeilei Xing 	rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
163c008a5e7SBeilei Xing 
164e528d7c7SWenjun Wu 	rxq->rxrearm_nb = 0;
165e528d7c7SWenjun Wu 	rxq->rxrearm_start = 0;
166e528d7c7SWenjun Wu 
167c008a5e7SBeilei Xing 	rxq->bufq1 = NULL;
168c008a5e7SBeilei Xing 	rxq->bufq2 = NULL;
169c008a5e7SBeilei Xing }
170c008a5e7SBeilei Xing 
171c008a5e7SBeilei Xing void
idpf_qc_split_rx_queue_reset(struct idpf_rx_queue * rxq)172715939a7SBeilei Xing idpf_qc_split_rx_queue_reset(struct idpf_rx_queue *rxq)
173c008a5e7SBeilei Xing {
174715939a7SBeilei Xing 	idpf_qc_split_rx_descq_reset(rxq);
175715939a7SBeilei Xing 	idpf_qc_split_rx_bufq_reset(rxq->bufq1);
176715939a7SBeilei Xing 	idpf_qc_split_rx_bufq_reset(rxq->bufq2);
177c008a5e7SBeilei Xing }
178c008a5e7SBeilei Xing 
179c008a5e7SBeilei Xing void
idpf_qc_single_rx_queue_reset(struct idpf_rx_queue * rxq)180715939a7SBeilei Xing idpf_qc_single_rx_queue_reset(struct idpf_rx_queue *rxq)
181c008a5e7SBeilei Xing {
182c008a5e7SBeilei Xing 	uint16_t len;
183c008a5e7SBeilei Xing 	uint32_t i;
184c008a5e7SBeilei Xing 
185c008a5e7SBeilei Xing 	if (rxq == NULL)
186c008a5e7SBeilei Xing 		return;
187c008a5e7SBeilei Xing 
188c008a5e7SBeilei Xing 	len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
189c008a5e7SBeilei Xing 
190c008a5e7SBeilei Xing 	for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
191c008a5e7SBeilei Xing 	     i++)
192c008a5e7SBeilei Xing 		((volatile char *)rxq->rx_ring)[i] = 0;
193c008a5e7SBeilei Xing 
194c008a5e7SBeilei Xing 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
195c008a5e7SBeilei Xing 
196c008a5e7SBeilei Xing 	for (i = 0; i < IDPF_RX_MAX_BURST; i++)
197c008a5e7SBeilei Xing 		rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
198c008a5e7SBeilei Xing 
199c008a5e7SBeilei Xing 	rxq->rx_tail = 0;
200c008a5e7SBeilei Xing 	rxq->nb_rx_hold = 0;
201c008a5e7SBeilei Xing 
202c008a5e7SBeilei Xing 	rte_pktmbuf_free(rxq->pkt_first_seg);
203c008a5e7SBeilei Xing 
204c008a5e7SBeilei Xing 	rxq->pkt_first_seg = NULL;
205c008a5e7SBeilei Xing 	rxq->pkt_last_seg = NULL;
206c008a5e7SBeilei Xing 	rxq->rxrearm_start = 0;
207c008a5e7SBeilei Xing 	rxq->rxrearm_nb = 0;
208c008a5e7SBeilei Xing }
209c008a5e7SBeilei Xing 
210c008a5e7SBeilei Xing void
idpf_qc_split_tx_descq_reset(struct idpf_tx_queue * txq)211715939a7SBeilei Xing idpf_qc_split_tx_descq_reset(struct idpf_tx_queue *txq)
212c008a5e7SBeilei Xing {
213c008a5e7SBeilei Xing 	struct idpf_tx_entry *txe;
214c008a5e7SBeilei Xing 	uint32_t i, size;
215c008a5e7SBeilei Xing 	uint16_t prev;
216c008a5e7SBeilei Xing 
217c008a5e7SBeilei Xing 	if (txq == NULL) {
218c008a5e7SBeilei Xing 		DRV_LOG(DEBUG, "Pointer to txq is NULL");
219c008a5e7SBeilei Xing 		return;
220c008a5e7SBeilei Xing 	}
221c008a5e7SBeilei Xing 
222c008a5e7SBeilei Xing 	size = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;
223c008a5e7SBeilei Xing 	for (i = 0; i < size; i++)
224c008a5e7SBeilei Xing 		((volatile char *)txq->desc_ring)[i] = 0;
225c008a5e7SBeilei Xing 
226c008a5e7SBeilei Xing 	txe = txq->sw_ring;
227c008a5e7SBeilei Xing 	prev = (uint16_t)(txq->sw_nb_desc - 1);
228c008a5e7SBeilei Xing 	for (i = 0; i < txq->sw_nb_desc; i++) {
229c008a5e7SBeilei Xing 		txe[i].mbuf = NULL;
230c008a5e7SBeilei Xing 		txe[i].last_id = i;
231c008a5e7SBeilei Xing 		txe[prev].next_id = i;
232c008a5e7SBeilei Xing 		prev = i;
233c008a5e7SBeilei Xing 	}
234c008a5e7SBeilei Xing 
235c008a5e7SBeilei Xing 	txq->tx_tail = 0;
236c008a5e7SBeilei Xing 	txq->nb_used = 0;
237c008a5e7SBeilei Xing 
238c008a5e7SBeilei Xing 	/* Use this as next to clean for split desc queue */
239c008a5e7SBeilei Xing 	txq->last_desc_cleaned = 0;
240c008a5e7SBeilei Xing 	txq->sw_tail = 0;
241c008a5e7SBeilei Xing 	txq->nb_free = txq->nb_tx_desc - 1;
242e528d7c7SWenjun Wu 
243e528d7c7SWenjun Wu 	memset(txq->ctype, 0, sizeof(txq->ctype));
244e528d7c7SWenjun Wu 	txq->next_dd = txq->rs_thresh - 1;
245e528d7c7SWenjun Wu 	txq->next_rs = txq->rs_thresh - 1;
246c008a5e7SBeilei Xing }
247c008a5e7SBeilei Xing 
248c008a5e7SBeilei Xing void
idpf_qc_split_tx_complq_reset(struct idpf_tx_queue * cq)249715939a7SBeilei Xing idpf_qc_split_tx_complq_reset(struct idpf_tx_queue *cq)
250c008a5e7SBeilei Xing {
251c008a5e7SBeilei Xing 	uint32_t i, size;
252c008a5e7SBeilei Xing 
253c008a5e7SBeilei Xing 	if (cq == NULL) {
254c008a5e7SBeilei Xing 		DRV_LOG(DEBUG, "Pointer to complq is NULL");
255c008a5e7SBeilei Xing 		return;
256c008a5e7SBeilei Xing 	}
257c008a5e7SBeilei Xing 
258c008a5e7SBeilei Xing 	size = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;
259c008a5e7SBeilei Xing 	for (i = 0; i < size; i++)
260c008a5e7SBeilei Xing 		((volatile char *)cq->compl_ring)[i] = 0;
261c008a5e7SBeilei Xing 
262c008a5e7SBeilei Xing 	cq->tx_tail = 0;
263c008a5e7SBeilei Xing 	cq->expected_gen_id = 1;
264c008a5e7SBeilei Xing }
265c008a5e7SBeilei Xing 
266c008a5e7SBeilei Xing void
idpf_qc_single_tx_queue_reset(struct idpf_tx_queue * txq)267715939a7SBeilei Xing idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
268c008a5e7SBeilei Xing {
269c008a5e7SBeilei Xing 	struct idpf_tx_entry *txe;
270c008a5e7SBeilei Xing 	uint32_t i, size;
271c008a5e7SBeilei Xing 	uint16_t prev;
272c008a5e7SBeilei Xing 
273c008a5e7SBeilei Xing 	if (txq == NULL) {
274c008a5e7SBeilei Xing 		DRV_LOG(DEBUG, "Pointer to txq is NULL");
275c008a5e7SBeilei Xing 		return;
276c008a5e7SBeilei Xing 	}
277c008a5e7SBeilei Xing 
278c008a5e7SBeilei Xing 	txe = txq->sw_ring;
279bab8149aSSimei Su 	size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;
280c008a5e7SBeilei Xing 	for (i = 0; i < size; i++)
281c008a5e7SBeilei Xing 		((volatile char *)txq->tx_ring)[i] = 0;
282c008a5e7SBeilei Xing 
283c008a5e7SBeilei Xing 	prev = (uint16_t)(txq->nb_tx_desc - 1);
284c008a5e7SBeilei Xing 	for (i = 0; i < txq->nb_tx_desc; i++) {
285bab8149aSSimei Su 		txq->tx_ring[i].qw1 =
286bab8149aSSimei Su 			rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
287c008a5e7SBeilei Xing 		txe[i].mbuf =  NULL;
288c008a5e7SBeilei Xing 		txe[i].last_id = i;
289c008a5e7SBeilei Xing 		txe[prev].next_id = i;
290c008a5e7SBeilei Xing 		prev = i;
291c008a5e7SBeilei Xing 	}
292c008a5e7SBeilei Xing 
293c008a5e7SBeilei Xing 	txq->tx_tail = 0;
294c008a5e7SBeilei Xing 	txq->nb_used = 0;
295c008a5e7SBeilei Xing 
296c008a5e7SBeilei Xing 	txq->last_desc_cleaned = txq->nb_tx_desc - 1;
297c008a5e7SBeilei Xing 	txq->nb_free = txq->nb_tx_desc - 1;
298c008a5e7SBeilei Xing 
299c008a5e7SBeilei Xing 	txq->next_dd = txq->rs_thresh - 1;
300c008a5e7SBeilei Xing 	txq->next_rs = txq->rs_thresh - 1;
301c008a5e7SBeilei Xing }
302c008a5e7SBeilei Xing 
303c008a5e7SBeilei Xing void
idpf_qc_rx_queue_release(void * rxq)304715939a7SBeilei Xing idpf_qc_rx_queue_release(void *rxq)
305c008a5e7SBeilei Xing {
306c008a5e7SBeilei Xing 	struct idpf_rx_queue *q = rxq;
307c008a5e7SBeilei Xing 
308c008a5e7SBeilei Xing 	if (q == NULL)
309c008a5e7SBeilei Xing 		return;
310c008a5e7SBeilei Xing 
311c008a5e7SBeilei Xing 	/* Split queue */
312181348d3SMingxia Liu 	if (!q->adapter->is_rx_singleq) {
313c008a5e7SBeilei Xing 		q->bufq1->ops->release_mbufs(q->bufq1);
314c008a5e7SBeilei Xing 		rte_free(q->bufq1->sw_ring);
315c008a5e7SBeilei Xing 		rte_memzone_free(q->bufq1->mz);
316c008a5e7SBeilei Xing 		rte_free(q->bufq1);
317c008a5e7SBeilei Xing 		q->bufq2->ops->release_mbufs(q->bufq2);
318c008a5e7SBeilei Xing 		rte_free(q->bufq2->sw_ring);
319c008a5e7SBeilei Xing 		rte_memzone_free(q->bufq2->mz);
320c008a5e7SBeilei Xing 		rte_free(q->bufq2);
321c008a5e7SBeilei Xing 		rte_memzone_free(q->mz);
322c008a5e7SBeilei Xing 		rte_free(q);
323c008a5e7SBeilei Xing 		return;
324c008a5e7SBeilei Xing 	}
325c008a5e7SBeilei Xing 
326c008a5e7SBeilei Xing 	/* Single queue */
327c008a5e7SBeilei Xing 	q->ops->release_mbufs(q);
328c008a5e7SBeilei Xing 	rte_free(q->sw_ring);
329c008a5e7SBeilei Xing 	rte_memzone_free(q->mz);
330c008a5e7SBeilei Xing 	rte_free(q);
331c008a5e7SBeilei Xing }
332c008a5e7SBeilei Xing 
333c008a5e7SBeilei Xing void
idpf_qc_tx_queue_release(void * txq)334715939a7SBeilei Xing idpf_qc_tx_queue_release(void *txq)
335c008a5e7SBeilei Xing {
336c008a5e7SBeilei Xing 	struct idpf_tx_queue *q = txq;
337c008a5e7SBeilei Xing 
338c008a5e7SBeilei Xing 	if (q == NULL)
339c008a5e7SBeilei Xing 		return;
340c008a5e7SBeilei Xing 
341c008a5e7SBeilei Xing 	if (q->complq) {
342c008a5e7SBeilei Xing 		rte_memzone_free(q->complq->mz);
343c008a5e7SBeilei Xing 		rte_free(q->complq);
344c008a5e7SBeilei Xing 	}
345c008a5e7SBeilei Xing 
346c008a5e7SBeilei Xing 	q->ops->release_mbufs(q);
347c008a5e7SBeilei Xing 	rte_free(q->sw_ring);
348c008a5e7SBeilei Xing 	rte_memzone_free(q->mz);
349c008a5e7SBeilei Xing 	rte_free(q);
350c008a5e7SBeilei Xing }
351c008a5e7SBeilei Xing 
352c008a5e7SBeilei Xing int
idpf_qc_ts_mbuf_register(struct idpf_rx_queue * rxq)353715939a7SBeilei Xing idpf_qc_ts_mbuf_register(struct idpf_rx_queue *rxq)
3548c6098afSBeilei Xing {
3558c6098afSBeilei Xing 	int err;
3568c6098afSBeilei Xing 	if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {
3578c6098afSBeilei Xing 		/* Register mbuf field and flag for Rx timestamp */
3588c6098afSBeilei Xing 		err = rte_mbuf_dyn_rx_timestamp_register(&idpf_timestamp_dynfield_offset,
3598c6098afSBeilei Xing 							 &idpf_timestamp_dynflag);
3608c6098afSBeilei Xing 		if (err != 0) {
3618c6098afSBeilei Xing 			DRV_LOG(ERR,
3628c6098afSBeilei Xing 				"Cannot register mbuf field/flag for timestamp");
3638c6098afSBeilei Xing 			return -EINVAL;
3648c6098afSBeilei Xing 		}
3658c6098afSBeilei Xing 	}
3668c6098afSBeilei Xing 	return 0;
3678c6098afSBeilei Xing }
3688c6098afSBeilei Xing 
3698c6098afSBeilei Xing int
idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue * rxq)370715939a7SBeilei Xing idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
371c008a5e7SBeilei Xing {
372c008a5e7SBeilei Xing 	volatile struct virtchnl2_singleq_rx_buf_desc *rxd;
373c008a5e7SBeilei Xing 	struct rte_mbuf *mbuf = NULL;
374c008a5e7SBeilei Xing 	uint64_t dma_addr;
375c008a5e7SBeilei Xing 	uint16_t i;
376c008a5e7SBeilei Xing 
377c008a5e7SBeilei Xing 	for (i = 0; i < rxq->nb_rx_desc; i++) {
378c008a5e7SBeilei Xing 		mbuf = rte_mbuf_raw_alloc(rxq->mp);
379c008a5e7SBeilei Xing 		if (unlikely(mbuf == NULL)) {
380c008a5e7SBeilei Xing 			DRV_LOG(ERR, "Failed to allocate mbuf for RX");
381c008a5e7SBeilei Xing 			return -ENOMEM;
382c008a5e7SBeilei Xing 		}
383c008a5e7SBeilei Xing 
384c008a5e7SBeilei Xing 		rte_mbuf_refcnt_set(mbuf, 1);
385c008a5e7SBeilei Xing 		mbuf->next = NULL;
386c008a5e7SBeilei Xing 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
387c008a5e7SBeilei Xing 		mbuf->nb_segs = 1;
388c008a5e7SBeilei Xing 		mbuf->port = rxq->port_id;
389c008a5e7SBeilei Xing 
390c008a5e7SBeilei Xing 		dma_addr =
391c008a5e7SBeilei Xing 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
392c008a5e7SBeilei Xing 
393c008a5e7SBeilei Xing 		rxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];
394c008a5e7SBeilei Xing 		rxd->pkt_addr = dma_addr;
395c008a5e7SBeilei Xing 		rxd->hdr_addr = 0;
396c008a5e7SBeilei Xing 		rxd->rsvd1 = 0;
397c008a5e7SBeilei Xing 		rxd->rsvd2 = 0;
398c008a5e7SBeilei Xing 		rxq->sw_ring[i] = mbuf;
399c008a5e7SBeilei Xing 	}
400c008a5e7SBeilei Xing 
401c008a5e7SBeilei Xing 	return 0;
402c008a5e7SBeilei Xing }
403c008a5e7SBeilei Xing 
404c008a5e7SBeilei Xing int
idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue * rxq)405715939a7SBeilei Xing idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
406c008a5e7SBeilei Xing {
407c008a5e7SBeilei Xing 	volatile struct virtchnl2_splitq_rx_buf_desc *rxd;
408c008a5e7SBeilei Xing 	struct rte_mbuf *mbuf = NULL;
409c008a5e7SBeilei Xing 	uint64_t dma_addr;
410c008a5e7SBeilei Xing 	uint16_t i;
411c008a5e7SBeilei Xing 
412c008a5e7SBeilei Xing 	for (i = 0; i < rxq->nb_rx_desc; i++) {
413c008a5e7SBeilei Xing 		mbuf = rte_mbuf_raw_alloc(rxq->mp);
414c008a5e7SBeilei Xing 		if (unlikely(mbuf == NULL)) {
415c008a5e7SBeilei Xing 			DRV_LOG(ERR, "Failed to allocate mbuf for RX");
416c008a5e7SBeilei Xing 			return -ENOMEM;
417c008a5e7SBeilei Xing 		}
418c008a5e7SBeilei Xing 
419c008a5e7SBeilei Xing 		rte_mbuf_refcnt_set(mbuf, 1);
420c008a5e7SBeilei Xing 		mbuf->next = NULL;
421c008a5e7SBeilei Xing 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
422c008a5e7SBeilei Xing 		mbuf->nb_segs = 1;
423c008a5e7SBeilei Xing 		mbuf->port = rxq->port_id;
424c008a5e7SBeilei Xing 
425c008a5e7SBeilei Xing 		dma_addr =
426c008a5e7SBeilei Xing 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
427c008a5e7SBeilei Xing 
428c008a5e7SBeilei Xing 		rxd = &((volatile struct virtchnl2_splitq_rx_buf_desc *)(rxq->rx_ring))[i];
429c008a5e7SBeilei Xing 		rxd->qword0.buf_id = i;
430c008a5e7SBeilei Xing 		rxd->qword0.rsvd0 = 0;
431c008a5e7SBeilei Xing 		rxd->qword0.rsvd1 = 0;
432c008a5e7SBeilei Xing 		rxd->pkt_addr = dma_addr;
433c008a5e7SBeilei Xing 		rxd->hdr_addr = 0;
434c008a5e7SBeilei Xing 		rxd->rsvd2 = 0;
435c008a5e7SBeilei Xing 
436c008a5e7SBeilei Xing 		rxq->sw_ring[i] = mbuf;
437c008a5e7SBeilei Xing 	}
438c008a5e7SBeilei Xing 
439c008a5e7SBeilei Xing 	rxq->nb_rx_hold = 0;
440c008a5e7SBeilei Xing 	rxq->rx_tail = rxq->nb_rx_desc - 1;
441c008a5e7SBeilei Xing 
442c008a5e7SBeilei Xing 	return 0;
443c008a5e7SBeilei Xing }
4448c6098afSBeilei Xing 
4458c6098afSBeilei Xing #define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
4468c6098afSBeilei Xing /* Helper function to convert a 32b nanoseconds timestamp to 64b. */
4478c6098afSBeilei Xing static inline uint64_t
idpf_tstamp_convert_32b_64b(struct idpf_adapter * ad,uint32_t flag,uint32_t in_timestamp)4488c6098afSBeilei Xing idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
4498c6098afSBeilei Xing 			    uint32_t in_timestamp)
4508c6098afSBeilei Xing {
4518c6098afSBeilei Xing #ifdef RTE_ARCH_X86_64
4528c6098afSBeilei Xing 	struct idpf_hw *hw = &ad->hw;
4538c6098afSBeilei Xing 	const uint64_t mask = 0xFFFFFFFF;
4548c6098afSBeilei Xing 	uint32_t hi, lo, lo2, delta;
4558c6098afSBeilei Xing 	uint64_t ns;
4568c6098afSBeilei Xing 
4578c6098afSBeilei Xing 	if (flag != 0) {
4588c6098afSBeilei Xing 		IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
4598c6098afSBeilei Xing 		IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
4608c6098afSBeilei Xing 			       PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
4618c6098afSBeilei Xing 		lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
4628c6098afSBeilei Xing 		hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
4638c6098afSBeilei Xing 		/*
4648c6098afSBeilei Xing 		 * On typical system, the delta between lo and lo2 is ~1000ns,
4658c6098afSBeilei Xing 		 * so 10000 seems a large-enough but not overly-big guard band.
4668c6098afSBeilei Xing 		 */
4678c6098afSBeilei Xing 		if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
4688c6098afSBeilei Xing 			lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
4698c6098afSBeilei Xing 		else
4708c6098afSBeilei Xing 			lo2 = lo;
4718c6098afSBeilei Xing 
4728c6098afSBeilei Xing 		if (lo2 < lo) {
4738c6098afSBeilei Xing 			lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
4748c6098afSBeilei Xing 			hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
4758c6098afSBeilei Xing 		}
4768c6098afSBeilei Xing 
4778c6098afSBeilei Xing 		ad->time_hw = ((uint64_t)hi << 32) | lo;
4788c6098afSBeilei Xing 	}
4798c6098afSBeilei Xing 
4808c6098afSBeilei Xing 	delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
4818c6098afSBeilei Xing 	if (delta > (mask / 2)) {
4828c6098afSBeilei Xing 		delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
4838c6098afSBeilei Xing 		ns = ad->time_hw - delta;
4848c6098afSBeilei Xing 	} else {
4858c6098afSBeilei Xing 		ns = ad->time_hw + delta;
4868c6098afSBeilei Xing 	}
4878c6098afSBeilei Xing 
4888c6098afSBeilei Xing 	return ns;
4898c6098afSBeilei Xing #else /* !RTE_ARCH_X86_64 */
4908c6098afSBeilei Xing 	RTE_SET_USED(ad);
4918c6098afSBeilei Xing 	RTE_SET_USED(flag);
4928c6098afSBeilei Xing 	RTE_SET_USED(in_timestamp);
4938c6098afSBeilei Xing 	return 0;
4948c6098afSBeilei Xing #endif /* RTE_ARCH_X86_64 */
4958c6098afSBeilei Xing }
4968c6098afSBeilei Xing 
4978c6098afSBeilei Xing #define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S				\
4988c6098afSBeilei Xing 	(RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |     \
4998c6098afSBeilei Xing 	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |     \
5008c6098afSBeilei Xing 	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |    \
5018c6098afSBeilei Xing 	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
5028c6098afSBeilei Xing 
5038c6098afSBeilei Xing static inline uint64_t
idpf_splitq_rx_csum_offload(uint8_t err)5048c6098afSBeilei Xing idpf_splitq_rx_csum_offload(uint8_t err)
5058c6098afSBeilei Xing {
5068c6098afSBeilei Xing 	uint64_t flags = 0;
5078c6098afSBeilei Xing 
5088c6098afSBeilei Xing 	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
5098c6098afSBeilei Xing 		return flags;
5108c6098afSBeilei Xing 
5118c6098afSBeilei Xing 	if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
5128c6098afSBeilei Xing 		flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
5138c6098afSBeilei Xing 			  RTE_MBUF_F_RX_L4_CKSUM_GOOD);
5148c6098afSBeilei Xing 		return flags;
5158c6098afSBeilei Xing 	}
5168c6098afSBeilei Xing 
5178c6098afSBeilei Xing 	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
5188c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
5198c6098afSBeilei Xing 	else
5208c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
5218c6098afSBeilei Xing 
5228c6098afSBeilei Xing 	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
5238c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
5248c6098afSBeilei Xing 	else
5258c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
5268c6098afSBeilei Xing 
5278c6098afSBeilei Xing 	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
5288c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
5298c6098afSBeilei Xing 
5308c6098afSBeilei Xing 	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
5318c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
5328c6098afSBeilei Xing 	else
5338c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
5348c6098afSBeilei Xing 
5358c6098afSBeilei Xing 	return flags;
5368c6098afSBeilei Xing }
5378c6098afSBeilei Xing 
5388c6098afSBeilei Xing #define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0
5398c6098afSBeilei Xing #define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16
5408c6098afSBeilei Xing #define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24
5418c6098afSBeilei Xing 
5428c6098afSBeilei Xing static inline uint64_t
idpf_splitq_rx_rss_offload(struct rte_mbuf * mb,volatile struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)5438c6098afSBeilei Xing idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
5448c6098afSBeilei Xing 			   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
5458c6098afSBeilei Xing {
5468c6098afSBeilei Xing 	uint8_t status_err0_qw0;
5478c6098afSBeilei Xing 	uint64_t flags = 0;
5488c6098afSBeilei Xing 
5498c6098afSBeilei Xing 	status_err0_qw0 = rx_desc->status_err0_qw0;
5508c6098afSBeilei Xing 
5518c6098afSBeilei Xing 	if ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
5528c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_RSS_HASH;
5538c6098afSBeilei Xing 		mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
5548c6098afSBeilei Xing 				IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
5558c6098afSBeilei Xing 			((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
5568c6098afSBeilei Xing 			 IDPF_RX_FLEX_DESC_ADV_HASH2_S) |
5578c6098afSBeilei Xing 			((uint32_t)(rx_desc->hash3) <<
5588c6098afSBeilei Xing 			 IDPF_RX_FLEX_DESC_ADV_HASH3_S);
5598c6098afSBeilei Xing 	}
5608c6098afSBeilei Xing 
5618c6098afSBeilei Xing 	return flags;
5628c6098afSBeilei Xing }
5638c6098afSBeilei Xing 
5648c6098afSBeilei Xing static void
idpf_split_rx_bufq_refill(struct idpf_rx_queue * rx_bufq)5658c6098afSBeilei Xing idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
5668c6098afSBeilei Xing {
5678c6098afSBeilei Xing 	volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
5688c6098afSBeilei Xing 	volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
5698c6098afSBeilei Xing 	uint16_t nb_refill = rx_bufq->rx_free_thresh;
5708c6098afSBeilei Xing 	uint16_t nb_desc = rx_bufq->nb_rx_desc;
5718c6098afSBeilei Xing 	uint16_t next_avail = rx_bufq->rx_tail;
5728c6098afSBeilei Xing 	struct rte_mbuf *nmb[rx_bufq->rx_free_thresh];
5738c6098afSBeilei Xing 	uint64_t dma_addr;
5748c6098afSBeilei Xing 	uint16_t delta;
5758c6098afSBeilei Xing 	int i;
5768c6098afSBeilei Xing 
5778c6098afSBeilei Xing 	if (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)
5788c6098afSBeilei Xing 		return;
5798c6098afSBeilei Xing 
5808c6098afSBeilei Xing 	rx_buf_ring = rx_bufq->rx_ring;
5818c6098afSBeilei Xing 	delta = nb_desc - next_avail;
5828c6098afSBeilei Xing 	if (unlikely(delta < nb_refill)) {
5838c6098afSBeilei Xing 		if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 0)) {
5848c6098afSBeilei Xing 			for (i = 0; i < delta; i++) {
5858c6098afSBeilei Xing 				rx_buf_desc = &rx_buf_ring[next_avail + i];
5868c6098afSBeilei Xing 				rx_bufq->sw_ring[next_avail + i] = nmb[i];
5878c6098afSBeilei Xing 				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
5888c6098afSBeilei Xing 				rx_buf_desc->hdr_addr = 0;
5898c6098afSBeilei Xing 				rx_buf_desc->pkt_addr = dma_addr;
5908c6098afSBeilei Xing 			}
5918c6098afSBeilei Xing 			nb_refill -= delta;
5928c6098afSBeilei Xing 			next_avail = 0;
5938c6098afSBeilei Xing 			rx_bufq->nb_rx_hold -= delta;
5948c6098afSBeilei Xing 		} else {
595*e12a0166STyler Retzlaff 			rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
596*e12a0166STyler Retzlaff 					   nb_desc - next_avail, rte_memory_order_relaxed);
5978c6098afSBeilei Xing 			RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
5988c6098afSBeilei Xing 			       rx_bufq->port_id, rx_bufq->queue_id);
5998c6098afSBeilei Xing 			return;
6008c6098afSBeilei Xing 		}
6018c6098afSBeilei Xing 	}
6028c6098afSBeilei Xing 
6038c6098afSBeilei Xing 	if (nb_desc - next_avail >= nb_refill) {
6048c6098afSBeilei Xing 		if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) == 0)) {
6058c6098afSBeilei Xing 			for (i = 0; i < nb_refill; i++) {
6068c6098afSBeilei Xing 				rx_buf_desc = &rx_buf_ring[next_avail + i];
6078c6098afSBeilei Xing 				rx_bufq->sw_ring[next_avail + i] = nmb[i];
6088c6098afSBeilei Xing 				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
6098c6098afSBeilei Xing 				rx_buf_desc->hdr_addr = 0;
6108c6098afSBeilei Xing 				rx_buf_desc->pkt_addr = dma_addr;
6118c6098afSBeilei Xing 			}
6128c6098afSBeilei Xing 			next_avail += nb_refill;
6138c6098afSBeilei Xing 			rx_bufq->nb_rx_hold -= nb_refill;
6148c6098afSBeilei Xing 		} else {
615*e12a0166STyler Retzlaff 			rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
616*e12a0166STyler Retzlaff 					   nb_desc - next_avail, rte_memory_order_relaxed);
6178c6098afSBeilei Xing 			RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
6188c6098afSBeilei Xing 			       rx_bufq->port_id, rx_bufq->queue_id);
6198c6098afSBeilei Xing 		}
6208c6098afSBeilei Xing 	}
6218c6098afSBeilei Xing 
6228c6098afSBeilei Xing 	IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
6238c6098afSBeilei Xing 
6248c6098afSBeilei Xing 	rx_bufq->rx_tail = next_avail;
6258c6098afSBeilei Xing }
6268c6098afSBeilei Xing 
6278c6098afSBeilei Xing uint16_t
idpf_dp_splitq_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)6289ebf3f6bSBeilei Xing idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
6298c6098afSBeilei Xing 			 uint16_t nb_pkts)
6308c6098afSBeilei Xing {
6318c6098afSBeilei Xing 	volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;
6328c6098afSBeilei Xing 	volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
6338c6098afSBeilei Xing 	uint16_t pktlen_gen_bufq_id;
6348c6098afSBeilei Xing 	struct idpf_rx_queue *rxq;
6358c6098afSBeilei Xing 	const uint32_t *ptype_tbl;
6368c6098afSBeilei Xing 	uint8_t status_err0_qw1;
6378c6098afSBeilei Xing 	struct idpf_adapter *ad;
6388c6098afSBeilei Xing 	struct rte_mbuf *rxm;
6398c6098afSBeilei Xing 	uint16_t rx_id_bufq1;
6408c6098afSBeilei Xing 	uint16_t rx_id_bufq2;
6418c6098afSBeilei Xing 	uint64_t pkt_flags;
6428c6098afSBeilei Xing 	uint16_t pkt_len;
6438c6098afSBeilei Xing 	uint16_t bufq_id;
6448c6098afSBeilei Xing 	uint16_t gen_id;
6458c6098afSBeilei Xing 	uint16_t rx_id;
6468c6098afSBeilei Xing 	uint16_t nb_rx;
6478c6098afSBeilei Xing 	uint64_t ts_ns;
6488c6098afSBeilei Xing 
6498c6098afSBeilei Xing 	nb_rx = 0;
6508c6098afSBeilei Xing 	rxq = rx_queue;
6518c6098afSBeilei Xing 	ad = rxq->adapter;
6528c6098afSBeilei Xing 
6538c6098afSBeilei Xing 	if (unlikely(rxq == NULL) || unlikely(!rxq->q_started))
6548c6098afSBeilei Xing 		return nb_rx;
6558c6098afSBeilei Xing 
6568c6098afSBeilei Xing 	rx_id = rxq->rx_tail;
6578c6098afSBeilei Xing 	rx_id_bufq1 = rxq->bufq1->rx_next_avail;
6588c6098afSBeilei Xing 	rx_id_bufq2 = rxq->bufq2->rx_next_avail;
6598c6098afSBeilei Xing 	rx_desc_ring = rxq->rx_ring;
6608c6098afSBeilei Xing 	ptype_tbl = rxq->adapter->ptype_tbl;
6618c6098afSBeilei Xing 
6628c6098afSBeilei Xing 	if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)
6638c6098afSBeilei Xing 		rxq->hw_register_set = 1;
6648c6098afSBeilei Xing 
6658c6098afSBeilei Xing 	while (nb_rx < nb_pkts) {
6668c6098afSBeilei Xing 		rx_desc = &rx_desc_ring[rx_id];
6678c6098afSBeilei Xing 
6688c6098afSBeilei Xing 		pktlen_gen_bufq_id =
6698c6098afSBeilei Xing 			rte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id);
6708c6098afSBeilei Xing 		gen_id = (pktlen_gen_bufq_id &
6718c6098afSBeilei Xing 			  VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >>
6728c6098afSBeilei Xing 			VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S;
6738c6098afSBeilei Xing 		if (gen_id != rxq->expected_gen_id)
6748c6098afSBeilei Xing 			break;
6758c6098afSBeilei Xing 
6768c6098afSBeilei Xing 		pkt_len = (pktlen_gen_bufq_id &
6778c6098afSBeilei Xing 			   VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >>
6788c6098afSBeilei Xing 			VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S;
6798c6098afSBeilei Xing 		if (pkt_len == 0)
6808c6098afSBeilei Xing 			RX_LOG(ERR, "Packet length is 0");
6818c6098afSBeilei Xing 
6828c6098afSBeilei Xing 		rx_id++;
6838c6098afSBeilei Xing 		if (unlikely(rx_id == rxq->nb_rx_desc)) {
6848c6098afSBeilei Xing 			rx_id = 0;
6858c6098afSBeilei Xing 			rxq->expected_gen_id ^= 1;
6868c6098afSBeilei Xing 		}
6878c6098afSBeilei Xing 
6888c6098afSBeilei Xing 		bufq_id = (pktlen_gen_bufq_id &
6898c6098afSBeilei Xing 			   VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >>
6908c6098afSBeilei Xing 			VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S;
6918c6098afSBeilei Xing 		if (bufq_id == 0) {
6928c6098afSBeilei Xing 			rxm = rxq->bufq1->sw_ring[rx_id_bufq1];
6938c6098afSBeilei Xing 			rx_id_bufq1++;
6948c6098afSBeilei Xing 			if (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc))
6958c6098afSBeilei Xing 				rx_id_bufq1 = 0;
6968c6098afSBeilei Xing 			rxq->bufq1->nb_rx_hold++;
6978c6098afSBeilei Xing 		} else {
6988c6098afSBeilei Xing 			rxm = rxq->bufq2->sw_ring[rx_id_bufq2];
6998c6098afSBeilei Xing 			rx_id_bufq2++;
7008c6098afSBeilei Xing 			if (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc))
7018c6098afSBeilei Xing 				rx_id_bufq2 = 0;
7028c6098afSBeilei Xing 			rxq->bufq2->nb_rx_hold++;
7038c6098afSBeilei Xing 		}
7048c6098afSBeilei Xing 
7058c6098afSBeilei Xing 		rxm->pkt_len = pkt_len;
7068c6098afSBeilei Xing 		rxm->data_len = pkt_len;
7078c6098afSBeilei Xing 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
7088c6098afSBeilei Xing 		rxm->next = NULL;
7098c6098afSBeilei Xing 		rxm->nb_segs = 1;
7108c6098afSBeilei Xing 		rxm->port = rxq->port_id;
7118c6098afSBeilei Xing 		rxm->ol_flags = 0;
7128c6098afSBeilei Xing 		rxm->packet_type =
7138c6098afSBeilei Xing 			ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) &
7148c6098afSBeilei Xing 				   VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>
7158c6098afSBeilei Xing 				  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];
7168c6098afSBeilei Xing 
7178c6098afSBeilei Xing 		status_err0_qw1 = rx_desc->status_err0_qw1;
7188c6098afSBeilei Xing 		pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1);
7198c6098afSBeilei Xing 		pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc);
7208c6098afSBeilei Xing 		if (idpf_timestamp_dynflag > 0 &&
7218c6098afSBeilei Xing 		    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP)) {
7228c6098afSBeilei Xing 			/* timestamp */
7238c6098afSBeilei Xing 			ts_ns = idpf_tstamp_convert_32b_64b(ad,
7248c6098afSBeilei Xing 							    rxq->hw_register_set,
7258c6098afSBeilei Xing 							    rte_le_to_cpu_32(rx_desc->ts_high));
7268c6098afSBeilei Xing 			rxq->hw_register_set = 0;
7278c6098afSBeilei Xing 			*RTE_MBUF_DYNFIELD(rxm,
7288c6098afSBeilei Xing 					   idpf_timestamp_dynfield_offset,
7298c6098afSBeilei Xing 					   rte_mbuf_timestamp_t *) = ts_ns;
7308c6098afSBeilei Xing 			rxm->ol_flags |= idpf_timestamp_dynflag;
7318c6098afSBeilei Xing 		}
7328c6098afSBeilei Xing 
7338c6098afSBeilei Xing 		rxm->ol_flags |= pkt_flags;
7348c6098afSBeilei Xing 
7358c6098afSBeilei Xing 		rx_pkts[nb_rx++] = rxm;
7368c6098afSBeilei Xing 	}
7378c6098afSBeilei Xing 
7388c6098afSBeilei Xing 	if (nb_rx > 0) {
7398c6098afSBeilei Xing 		rxq->rx_tail = rx_id;
7408c6098afSBeilei Xing 		if (rx_id_bufq1 != rxq->bufq1->rx_next_avail)
7418c6098afSBeilei Xing 			rxq->bufq1->rx_next_avail = rx_id_bufq1;
7428c6098afSBeilei Xing 		if (rx_id_bufq2 != rxq->bufq2->rx_next_avail)
7438c6098afSBeilei Xing 			rxq->bufq2->rx_next_avail = rx_id_bufq2;
7448c6098afSBeilei Xing 
7458c6098afSBeilei Xing 		idpf_split_rx_bufq_refill(rxq->bufq1);
7468c6098afSBeilei Xing 		idpf_split_rx_bufq_refill(rxq->bufq2);
7478c6098afSBeilei Xing 	}
7488c6098afSBeilei Xing 
7498c6098afSBeilei Xing 	return nb_rx;
7508c6098afSBeilei Xing }
7518c6098afSBeilei Xing 
7528c6098afSBeilei Xing static inline void
idpf_split_tx_free(struct idpf_tx_queue * cq)7538c6098afSBeilei Xing idpf_split_tx_free(struct idpf_tx_queue *cq)
7548c6098afSBeilei Xing {
7558c6098afSBeilei Xing 	volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
7568c6098afSBeilei Xing 	volatile struct idpf_splitq_tx_compl_desc *txd;
7578c6098afSBeilei Xing 	uint16_t next = cq->tx_tail;
7588c6098afSBeilei Xing 	struct idpf_tx_entry *txe;
7598c6098afSBeilei Xing 	struct idpf_tx_queue *txq;
7608c6098afSBeilei Xing 	uint16_t gen, qid, q_head;
7618c6098afSBeilei Xing 	uint16_t nb_desc_clean;
7628c6098afSBeilei Xing 	uint8_t ctype;
7638c6098afSBeilei Xing 
7648c6098afSBeilei Xing 	txd = &compl_ring[next];
7658c6098afSBeilei Xing 	gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
7668c6098afSBeilei Xing 	       IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
7678c6098afSBeilei Xing 	if (gen != cq->expected_gen_id)
7688c6098afSBeilei Xing 		return;
7698c6098afSBeilei Xing 
7708c6098afSBeilei Xing 	ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
7718c6098afSBeilei Xing 		 IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;
7728c6098afSBeilei Xing 	qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
7738c6098afSBeilei Xing 	       IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
7748c6098afSBeilei Xing 	q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
7758c6098afSBeilei Xing 	txq = cq->txqs[qid - cq->tx_start_qid];
7768c6098afSBeilei Xing 
7778c6098afSBeilei Xing 	switch (ctype) {
7788c6098afSBeilei Xing 	case IDPF_TXD_COMPLT_RE:
7798c6098afSBeilei Xing 		/* clean to q_head which indicates be fetched txq desc id + 1.
7808c6098afSBeilei Xing 		 * TODO: need to refine and remove the if condition.
7818c6098afSBeilei Xing 		 */
7828c6098afSBeilei Xing 		if (unlikely(q_head % 32)) {
7838c6098afSBeilei Xing 			TX_LOG(ERR, "unexpected desc (head = %u) completion.",
7848c6098afSBeilei Xing 			       q_head);
7858c6098afSBeilei Xing 			return;
7868c6098afSBeilei Xing 		}
7878c6098afSBeilei Xing 		if (txq->last_desc_cleaned > q_head)
7888c6098afSBeilei Xing 			nb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +
7898c6098afSBeilei Xing 				q_head;
7908c6098afSBeilei Xing 		else
7918c6098afSBeilei Xing 			nb_desc_clean = q_head - txq->last_desc_cleaned;
7928c6098afSBeilei Xing 		txq->nb_free += nb_desc_clean;
7938c6098afSBeilei Xing 		txq->last_desc_cleaned = q_head;
7948c6098afSBeilei Xing 		break;
7958c6098afSBeilei Xing 	case IDPF_TXD_COMPLT_RS:
7968c6098afSBeilei Xing 		/* q_head indicates sw_id when ctype is 2 */
7978c6098afSBeilei Xing 		txe = &txq->sw_ring[q_head];
7988c6098afSBeilei Xing 		if (txe->mbuf != NULL) {
7998c6098afSBeilei Xing 			rte_pktmbuf_free_seg(txe->mbuf);
8008c6098afSBeilei Xing 			txe->mbuf = NULL;
8018c6098afSBeilei Xing 		}
8028c6098afSBeilei Xing 		break;
8038c6098afSBeilei Xing 	default:
8048c6098afSBeilei Xing 		TX_LOG(ERR, "unknown completion type.");
8058c6098afSBeilei Xing 		return;
8068c6098afSBeilei Xing 	}
8078c6098afSBeilei Xing 
8088c6098afSBeilei Xing 	if (++next == cq->nb_tx_desc) {
8098c6098afSBeilei Xing 		next = 0;
8108c6098afSBeilei Xing 		cq->expected_gen_id ^= 1;
8118c6098afSBeilei Xing 	}
8128c6098afSBeilei Xing 
8138c6098afSBeilei Xing 	cq->tx_tail = next;
8148c6098afSBeilei Xing }
8158c6098afSBeilei Xing 
8168c6098afSBeilei Xing /* Check if the context descriptor is needed for TX offloading */
8178c6098afSBeilei Xing static inline uint16_t
idpf_calc_context_desc(uint64_t flags)8188c6098afSBeilei Xing idpf_calc_context_desc(uint64_t flags)
8198c6098afSBeilei Xing {
8208c6098afSBeilei Xing 	if ((flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
8218c6098afSBeilei Xing 		return 1;
8228c6098afSBeilei Xing 
8238c6098afSBeilei Xing 	return 0;
8248c6098afSBeilei Xing }
8258c6098afSBeilei Xing 
8268c6098afSBeilei Xing /* set TSO context descriptor
8278c6098afSBeilei Xing  */
8288c6098afSBeilei Xing static inline void
idpf_set_splitq_tso_ctx(struct rte_mbuf * mbuf,union idpf_tx_offload tx_offload,volatile union idpf_flex_tx_ctx_desc * ctx_desc)8298c6098afSBeilei Xing idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,
8308c6098afSBeilei Xing 			union idpf_tx_offload tx_offload,
8318c6098afSBeilei Xing 			volatile union idpf_flex_tx_ctx_desc *ctx_desc)
8328c6098afSBeilei Xing {
8338c6098afSBeilei Xing 	uint16_t cmd_dtype;
8348c6098afSBeilei Xing 	uint32_t tso_len;
8358c6098afSBeilei Xing 	uint8_t hdr_len;
8368c6098afSBeilei Xing 
8378c6098afSBeilei Xing 	if (tx_offload.l4_len == 0) {
8388c6098afSBeilei Xing 		TX_LOG(DEBUG, "L4 length set to 0");
8398c6098afSBeilei Xing 		return;
8408c6098afSBeilei Xing 	}
8418c6098afSBeilei Xing 
8428c6098afSBeilei Xing 	hdr_len = tx_offload.l2_len +
8438c6098afSBeilei Xing 		tx_offload.l3_len +
8448c6098afSBeilei Xing 		tx_offload.l4_len;
8458c6098afSBeilei Xing 	cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
8468c6098afSBeilei Xing 		IDPF_TX_FLEX_CTX_DESC_CMD_TSO;
8478c6098afSBeilei Xing 	tso_len = mbuf->pkt_len - hdr_len;
8488c6098afSBeilei Xing 
8498c6098afSBeilei Xing 	ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);
8508c6098afSBeilei Xing 	ctx_desc->tso.qw0.hdr_len = hdr_len;
8518c6098afSBeilei Xing 	ctx_desc->tso.qw0.mss_rt =
8528c6098afSBeilei Xing 		rte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &
8538c6098afSBeilei Xing 				 IDPF_TXD_FLEX_CTX_MSS_RT_M);
8548c6098afSBeilei Xing 	ctx_desc->tso.qw0.flex_tlen =
8558c6098afSBeilei Xing 		rte_cpu_to_le_32(tso_len &
8568c6098afSBeilei Xing 				 IDPF_TXD_FLEX_CTX_MSS_RT_M);
8578c6098afSBeilei Xing }
8588c6098afSBeilei Xing 
8598c6098afSBeilei Xing uint16_t
idpf_dp_splitq_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)8609ebf3f6bSBeilei Xing idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
8618c6098afSBeilei Xing 			 uint16_t nb_pkts)
8628c6098afSBeilei Xing {
8638c6098afSBeilei Xing 	struct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;
8648c6098afSBeilei Xing 	volatile struct idpf_flex_tx_sched_desc *txr;
8658c6098afSBeilei Xing 	volatile struct idpf_flex_tx_sched_desc *txd;
8668c6098afSBeilei Xing 	struct idpf_tx_entry *sw_ring;
8678c6098afSBeilei Xing 	union idpf_tx_offload tx_offload = {0};
8688c6098afSBeilei Xing 	struct idpf_tx_entry *txe, *txn;
8698c6098afSBeilei Xing 	uint16_t nb_used, tx_id, sw_id;
8708c6098afSBeilei Xing 	struct rte_mbuf *tx_pkt;
8718c6098afSBeilei Xing 	uint16_t nb_to_clean;
8728c6098afSBeilei Xing 	uint16_t nb_tx = 0;
8738c6098afSBeilei Xing 	uint64_t ol_flags;
874ef12cbbeSBeilei Xing 	uint8_t cmd_dtype;
8758c6098afSBeilei Xing 	uint16_t nb_ctx;
8768c6098afSBeilei Xing 
8778c6098afSBeilei Xing 	if (unlikely(txq == NULL) || unlikely(!txq->q_started))
8788c6098afSBeilei Xing 		return nb_tx;
8798c6098afSBeilei Xing 
8808c6098afSBeilei Xing 	txr = txq->desc_ring;
8818c6098afSBeilei Xing 	sw_ring = txq->sw_ring;
8828c6098afSBeilei Xing 	tx_id = txq->tx_tail;
8838c6098afSBeilei Xing 	sw_id = txq->sw_tail;
8848c6098afSBeilei Xing 	txe = &sw_ring[sw_id];
8858c6098afSBeilei Xing 
8868c6098afSBeilei Xing 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
8878c6098afSBeilei Xing 		tx_pkt = tx_pkts[nb_tx];
8888c6098afSBeilei Xing 
8898c6098afSBeilei Xing 		if (txq->nb_free <= txq->free_thresh) {
8908c6098afSBeilei Xing 			/* TODO: Need to refine
8918c6098afSBeilei Xing 			 * 1. free and clean: Better to decide a clean destination instead of
8928c6098afSBeilei Xing 			 * loop times. And don't free mbuf when RS got immediately, free when
8938c6098afSBeilei Xing 			 * transmit or according to the clean destination.
8948c6098afSBeilei Xing 			 * Now, just ignore the RE write back, free mbuf when get RS
8958c6098afSBeilei Xing 			 * 2. out-of-order rewrite back haven't be supported, SW head and HW head
8968c6098afSBeilei Xing 			 * need to be separated.
8978c6098afSBeilei Xing 			 **/
8988c6098afSBeilei Xing 			nb_to_clean = 2 * txq->rs_thresh;
8998c6098afSBeilei Xing 			while (nb_to_clean--)
9008c6098afSBeilei Xing 				idpf_split_tx_free(txq->complq);
9018c6098afSBeilei Xing 		}
9028c6098afSBeilei Xing 
9038c6098afSBeilei Xing 		if (txq->nb_free < tx_pkt->nb_segs)
9048c6098afSBeilei Xing 			break;
9058c6098afSBeilei Xing 
906ef12cbbeSBeilei Xing 		cmd_dtype = 0;
9078c6098afSBeilei Xing 		ol_flags = tx_pkt->ol_flags;
9088c6098afSBeilei Xing 		tx_offload.l2_len = tx_pkt->l2_len;
9098c6098afSBeilei Xing 		tx_offload.l3_len = tx_pkt->l3_len;
9108c6098afSBeilei Xing 		tx_offload.l4_len = tx_pkt->l4_len;
9118c6098afSBeilei Xing 		tx_offload.tso_segsz = tx_pkt->tso_segsz;
9128c6098afSBeilei Xing 		/* Calculate the number of context descriptors needed. */
9138c6098afSBeilei Xing 		nb_ctx = idpf_calc_context_desc(ol_flags);
9148c6098afSBeilei Xing 		nb_used = tx_pkt->nb_segs + nb_ctx;
9158c6098afSBeilei Xing 
916ef12cbbeSBeilei Xing 		if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)
917ef12cbbeSBeilei Xing 			cmd_dtype = IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
918ef12cbbeSBeilei Xing 
9198c6098afSBeilei Xing 		/* context descriptor */
9208c6098afSBeilei Xing 		if (nb_ctx != 0) {
9218c6098afSBeilei Xing 			volatile union idpf_flex_tx_ctx_desc *ctx_desc =
9228c6098afSBeilei Xing 				(volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];
9238c6098afSBeilei Xing 
9248c6098afSBeilei Xing 			if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
9258c6098afSBeilei Xing 				idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
9268c6098afSBeilei Xing 							ctx_desc);
9278c6098afSBeilei Xing 
9288c6098afSBeilei Xing 			tx_id++;
9298c6098afSBeilei Xing 			if (tx_id == txq->nb_tx_desc)
9308c6098afSBeilei Xing 				tx_id = 0;
9318c6098afSBeilei Xing 		}
9328c6098afSBeilei Xing 
9338c6098afSBeilei Xing 		do {
9348c6098afSBeilei Xing 			txd = &txr[tx_id];
9358c6098afSBeilei Xing 			txn = &sw_ring[txe->next_id];
9368c6098afSBeilei Xing 			txe->mbuf = tx_pkt;
9378c6098afSBeilei Xing 
9388c6098afSBeilei Xing 			/* Setup TX descriptor */
9398c6098afSBeilei Xing 			txd->buf_addr =
9408c6098afSBeilei Xing 				rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
941ef12cbbeSBeilei Xing 			cmd_dtype |= IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
942ef12cbbeSBeilei Xing 			txd->qw1.cmd_dtype = cmd_dtype;
9438c6098afSBeilei Xing 			txd->qw1.rxr_bufsize = tx_pkt->data_len;
9448c6098afSBeilei Xing 			txd->qw1.compl_tag = sw_id;
9458c6098afSBeilei Xing 			tx_id++;
9468c6098afSBeilei Xing 			if (tx_id == txq->nb_tx_desc)
9478c6098afSBeilei Xing 				tx_id = 0;
9488c6098afSBeilei Xing 			sw_id = txe->next_id;
9498c6098afSBeilei Xing 			txe = txn;
9508c6098afSBeilei Xing 			tx_pkt = tx_pkt->next;
9518c6098afSBeilei Xing 		} while (tx_pkt);
9528c6098afSBeilei Xing 
9538c6098afSBeilei Xing 		/* fill the last descriptor with End of Packet (EOP) bit */
9548c6098afSBeilei Xing 		txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP;
9558c6098afSBeilei Xing 
9568c6098afSBeilei Xing 		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
9578c6098afSBeilei Xing 		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
9588c6098afSBeilei Xing 
9598c6098afSBeilei Xing 		if (txq->nb_used >= 32) {
9608c6098afSBeilei Xing 			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;
9618c6098afSBeilei Xing 			/* Update txq RE bit counters */
9628c6098afSBeilei Xing 			txq->nb_used = 0;
9638c6098afSBeilei Xing 		}
9648c6098afSBeilei Xing 	}
9658c6098afSBeilei Xing 
9668c6098afSBeilei Xing 	/* update the tail pointer if any packets were processed */
9678c6098afSBeilei Xing 	if (likely(nb_tx > 0)) {
9688c6098afSBeilei Xing 		IDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id);
9698c6098afSBeilei Xing 		txq->tx_tail = tx_id;
9708c6098afSBeilei Xing 		txq->sw_tail = sw_id;
9718c6098afSBeilei Xing 	}
9728c6098afSBeilei Xing 
9738c6098afSBeilei Xing 	return nb_tx;
9748c6098afSBeilei Xing }
9758c6098afSBeilei Xing 
9768c6098afSBeilei Xing #define IDPF_RX_FLEX_DESC_STATUS0_XSUM_S				\
9778c6098afSBeilei Xing 	(RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |		\
9788c6098afSBeilei Xing 	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |		\
9798c6098afSBeilei Xing 	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |	\
9808c6098afSBeilei Xing 	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))
9818c6098afSBeilei Xing 
9828c6098afSBeilei Xing /* Translate the rx descriptor status and error fields to pkt flags */
9838c6098afSBeilei Xing static inline uint64_t
idpf_rxd_to_pkt_flags(uint16_t status_error)9848c6098afSBeilei Xing idpf_rxd_to_pkt_flags(uint16_t status_error)
9858c6098afSBeilei Xing {
9868c6098afSBeilei Xing 	uint64_t flags = 0;
9878c6098afSBeilei Xing 
9888c6098afSBeilei Xing 	if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S)) == 0))
9898c6098afSBeilei Xing 		return flags;
9908c6098afSBeilei Xing 
9918c6098afSBeilei Xing 	if (likely((status_error & IDPF_RX_FLEX_DESC_STATUS0_XSUM_S) == 0)) {
9928c6098afSBeilei Xing 		flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
9938c6098afSBeilei Xing 			  RTE_MBUF_F_RX_L4_CKSUM_GOOD);
9948c6098afSBeilei Xing 		return flags;
9958c6098afSBeilei Xing 	}
9968c6098afSBeilei Xing 
9978c6098afSBeilei Xing 	if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)) != 0))
9988c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
9998c6098afSBeilei Xing 	else
10008c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
10018c6098afSBeilei Xing 
10028c6098afSBeilei Xing 	if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) != 0))
10038c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
10048c6098afSBeilei Xing 	else
10058c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
10068c6098afSBeilei Xing 
10078c6098afSBeilei Xing 	if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)) != 0))
10088c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
10098c6098afSBeilei Xing 
10108c6098afSBeilei Xing 	if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)) != 0))
10118c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
10128c6098afSBeilei Xing 	else
10138c6098afSBeilei Xing 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
10148c6098afSBeilei Xing 
10158c6098afSBeilei Xing 	return flags;
10168c6098afSBeilei Xing }
10178c6098afSBeilei Xing 
10188c6098afSBeilei Xing static inline void
idpf_update_rx_tail(struct idpf_rx_queue * rxq,uint16_t nb_hold,uint16_t rx_id)10198c6098afSBeilei Xing idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold,
10208c6098afSBeilei Xing 		    uint16_t rx_id)
10218c6098afSBeilei Xing {
10228c6098afSBeilei Xing 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
10238c6098afSBeilei Xing 
10248c6098afSBeilei Xing 	if (nb_hold > rxq->rx_free_thresh) {
10258c6098afSBeilei Xing 		RX_LOG(DEBUG,
10268c6098afSBeilei Xing 		       "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
10278c6098afSBeilei Xing 		       rxq->port_id, rxq->queue_id, rx_id, nb_hold);
10288c6098afSBeilei Xing 		rx_id = (uint16_t)((rx_id == 0) ?
10298c6098afSBeilei Xing 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
10308c6098afSBeilei Xing 		IDPF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
10318c6098afSBeilei Xing 		nb_hold = 0;
10328c6098afSBeilei Xing 	}
10338c6098afSBeilei Xing 	rxq->nb_rx_hold = nb_hold;
10348c6098afSBeilei Xing }
10358c6098afSBeilei Xing 
103695f40178SMingxia Liu static inline void
idpf_singleq_rx_rss_offload(struct rte_mbuf * mb,volatile struct virtchnl2_rx_flex_desc_nic * rx_desc,uint64_t * pkt_flags)103795f40178SMingxia Liu idpf_singleq_rx_rss_offload(struct rte_mbuf *mb,
103895f40178SMingxia Liu 			    volatile struct virtchnl2_rx_flex_desc_nic *rx_desc,
103995f40178SMingxia Liu 			    uint64_t *pkt_flags)
104095f40178SMingxia Liu {
104195f40178SMingxia Liu 	uint16_t rx_status0 = rte_le_to_cpu_16(rx_desc->status_error0);
104295f40178SMingxia Liu 
104395f40178SMingxia Liu 	if (rx_status0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_S)) {
104495f40178SMingxia Liu 		*pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
104595f40178SMingxia Liu 		mb->hash.rss = rte_le_to_cpu_32(rx_desc->rss_hash);
104695f40178SMingxia Liu 	}
104795f40178SMingxia Liu 
104895f40178SMingxia Liu }
104995f40178SMingxia Liu 
10508c6098afSBeilei Xing uint16_t
idpf_dp_singleq_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)10519ebf3f6bSBeilei Xing idpf_dp_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
10528c6098afSBeilei Xing 			  uint16_t nb_pkts)
10538c6098afSBeilei Xing {
10548c6098afSBeilei Xing 	volatile union virtchnl2_rx_desc *rx_ring;
10558c6098afSBeilei Xing 	volatile union virtchnl2_rx_desc *rxdp;
10568c6098afSBeilei Xing 	union virtchnl2_rx_desc rxd;
10578c6098afSBeilei Xing 	struct idpf_rx_queue *rxq;
10588c6098afSBeilei Xing 	const uint32_t *ptype_tbl;
10598c6098afSBeilei Xing 	uint16_t rx_id, nb_hold;
10608c6098afSBeilei Xing 	struct idpf_adapter *ad;
10618c6098afSBeilei Xing 	uint16_t rx_packet_len;
10628c6098afSBeilei Xing 	struct rte_mbuf *rxm;
10638c6098afSBeilei Xing 	struct rte_mbuf *nmb;
10648c6098afSBeilei Xing 	uint16_t rx_status0;
10658c6098afSBeilei Xing 	uint64_t pkt_flags;
10668c6098afSBeilei Xing 	uint64_t dma_addr;
10678c6098afSBeilei Xing 	uint64_t ts_ns;
10688c6098afSBeilei Xing 	uint16_t nb_rx;
10698c6098afSBeilei Xing 
10708c6098afSBeilei Xing 	nb_rx = 0;
10718c6098afSBeilei Xing 	nb_hold = 0;
10728c6098afSBeilei Xing 	rxq = rx_queue;
10738c6098afSBeilei Xing 
10748c6098afSBeilei Xing 	ad = rxq->adapter;
10758c6098afSBeilei Xing 
10768c6098afSBeilei Xing 	if (unlikely(rxq == NULL) || unlikely(!rxq->q_started))
10778c6098afSBeilei Xing 		return nb_rx;
10788c6098afSBeilei Xing 
10798c6098afSBeilei Xing 	rx_id = rxq->rx_tail;
10808c6098afSBeilei Xing 	rx_ring = rxq->rx_ring;
10818c6098afSBeilei Xing 	ptype_tbl = rxq->adapter->ptype_tbl;
10828c6098afSBeilei Xing 
10838c6098afSBeilei Xing 	if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)
10848c6098afSBeilei Xing 		rxq->hw_register_set = 1;
10858c6098afSBeilei Xing 
10868c6098afSBeilei Xing 	while (nb_rx < nb_pkts) {
10878c6098afSBeilei Xing 		rxdp = &rx_ring[rx_id];
10888c6098afSBeilei Xing 		rx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0);
10898c6098afSBeilei Xing 
10908c6098afSBeilei Xing 		/* Check the DD bit first */
10918c6098afSBeilei Xing 		if ((rx_status0 & (1 << VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S)) == 0)
10928c6098afSBeilei Xing 			break;
10938c6098afSBeilei Xing 
10948c6098afSBeilei Xing 		nmb = rte_mbuf_raw_alloc(rxq->mp);
10958c6098afSBeilei Xing 		if (unlikely(nmb == NULL)) {
1096*e12a0166STyler Retzlaff 			rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
1097*e12a0166STyler Retzlaff 					rte_memory_order_relaxed);
10988c6098afSBeilei Xing 			RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
10998c6098afSBeilei Xing 			       "queue_id=%u", rxq->port_id, rxq->queue_id);
11008c6098afSBeilei Xing 			break;
11018c6098afSBeilei Xing 		}
11028c6098afSBeilei Xing 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
11038c6098afSBeilei Xing 
11048c6098afSBeilei Xing 		nb_hold++;
11058c6098afSBeilei Xing 		rxm = rxq->sw_ring[rx_id];
11068c6098afSBeilei Xing 		rxq->sw_ring[rx_id] = nmb;
11078c6098afSBeilei Xing 		rx_id++;
11088c6098afSBeilei Xing 		if (unlikely(rx_id == rxq->nb_rx_desc))
11098c6098afSBeilei Xing 			rx_id = 0;
11108c6098afSBeilei Xing 
11118c6098afSBeilei Xing 		/* Prefetch next mbuf */
11128c6098afSBeilei Xing 		rte_prefetch0(rxq->sw_ring[rx_id]);
11138c6098afSBeilei Xing 
11148c6098afSBeilei Xing 		/* When next RX descriptor is on a cache line boundary,
11158c6098afSBeilei Xing 		 * prefetch the next 4 RX descriptors and next 8 pointers
11168c6098afSBeilei Xing 		 * to mbufs.
11178c6098afSBeilei Xing 		 */
11188c6098afSBeilei Xing 		if ((rx_id & 0x3) == 0) {
11198c6098afSBeilei Xing 			rte_prefetch0(&rx_ring[rx_id]);
11208c6098afSBeilei Xing 			rte_prefetch0(rxq->sw_ring[rx_id]);
11218c6098afSBeilei Xing 		}
11228c6098afSBeilei Xing 		dma_addr =
11238c6098afSBeilei Xing 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
11248c6098afSBeilei Xing 		rxdp->read.hdr_addr = 0;
11258c6098afSBeilei Xing 		rxdp->read.pkt_addr = dma_addr;
11268c6098afSBeilei Xing 
11278c6098afSBeilei Xing 		rx_packet_len = (rte_cpu_to_le_16(rxd.flex_nic_wb.pkt_len) &
11288c6098afSBeilei Xing 				 VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M);
11298c6098afSBeilei Xing 
11308c6098afSBeilei Xing 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
11318c6098afSBeilei Xing 		rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
11328c6098afSBeilei Xing 		rxm->nb_segs = 1;
11338c6098afSBeilei Xing 		rxm->next = NULL;
11348c6098afSBeilei Xing 		rxm->pkt_len = rx_packet_len;
11358c6098afSBeilei Xing 		rxm->data_len = rx_packet_len;
11368c6098afSBeilei Xing 		rxm->port = rxq->port_id;
11378c6098afSBeilei Xing 		rxm->ol_flags = 0;
11388c6098afSBeilei Xing 		pkt_flags = idpf_rxd_to_pkt_flags(rx_status0);
113995f40178SMingxia Liu 		idpf_singleq_rx_rss_offload(rxm, &rxd.flex_nic_wb, &pkt_flags);
11408c6098afSBeilei Xing 		rxm->packet_type =
11418c6098afSBeilei Xing 			ptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) &
11428c6098afSBeilei Xing 					    VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)];
11438c6098afSBeilei Xing 
11448c6098afSBeilei Xing 		rxm->ol_flags |= pkt_flags;
11458c6098afSBeilei Xing 
11468c6098afSBeilei Xing 		if (idpf_timestamp_dynflag > 0 &&
11478c6098afSBeilei Xing 		    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {
11488c6098afSBeilei Xing 			/* timestamp */
11498c6098afSBeilei Xing 			ts_ns = idpf_tstamp_convert_32b_64b(ad,
11508c6098afSBeilei Xing 					    rxq->hw_register_set,
11518c6098afSBeilei Xing 					    rte_le_to_cpu_32(rxd.flex_nic_wb.flex_ts.ts_high));
11528c6098afSBeilei Xing 			rxq->hw_register_set = 0;
11538c6098afSBeilei Xing 			*RTE_MBUF_DYNFIELD(rxm,
11548c6098afSBeilei Xing 					   idpf_timestamp_dynfield_offset,
11558c6098afSBeilei Xing 					   rte_mbuf_timestamp_t *) = ts_ns;
11568c6098afSBeilei Xing 			rxm->ol_flags |= idpf_timestamp_dynflag;
11578c6098afSBeilei Xing 		}
11588c6098afSBeilei Xing 
11598c6098afSBeilei Xing 		rx_pkts[nb_rx++] = rxm;
11608c6098afSBeilei Xing 	}
11618c6098afSBeilei Xing 	rxq->rx_tail = rx_id;
11628c6098afSBeilei Xing 
11638c6098afSBeilei Xing 	idpf_update_rx_tail(rxq, nb_hold, rx_id);
11648c6098afSBeilei Xing 
11658c6098afSBeilei Xing 	return nb_rx;
11668c6098afSBeilei Xing }
11678c6098afSBeilei Xing 
116813145ac4SMingxia Liu uint16_t
idpf_dp_singleq_recv_scatter_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)116913145ac4SMingxia Liu idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
117013145ac4SMingxia Liu 			       uint16_t nb_pkts)
117113145ac4SMingxia Liu {
117213145ac4SMingxia Liu 	struct idpf_rx_queue *rxq = rx_queue;
117313145ac4SMingxia Liu 	volatile union virtchnl2_rx_desc *rx_ring = rxq->rx_ring;
117413145ac4SMingxia Liu 	volatile union virtchnl2_rx_desc *rxdp;
117513145ac4SMingxia Liu 	union virtchnl2_rx_desc rxd;
117613145ac4SMingxia Liu 	struct idpf_adapter *ad;
117713145ac4SMingxia Liu 	struct rte_mbuf *first_seg = rxq->pkt_first_seg;
117813145ac4SMingxia Liu 	struct rte_mbuf *last_seg = rxq->pkt_last_seg;
117913145ac4SMingxia Liu 	struct rte_mbuf *rxm;
118013145ac4SMingxia Liu 	struct rte_mbuf *nmb;
118113145ac4SMingxia Liu 	struct rte_eth_dev *dev;
118213145ac4SMingxia Liu 	const uint32_t *ptype_tbl = rxq->adapter->ptype_tbl;
118313145ac4SMingxia Liu 	uint16_t rx_id = rxq->rx_tail;
118413145ac4SMingxia Liu 	uint16_t rx_packet_len;
118513145ac4SMingxia Liu 	uint16_t nb_hold = 0;
118613145ac4SMingxia Liu 	uint16_t rx_status0;
118713145ac4SMingxia Liu 	uint16_t nb_rx = 0;
118813145ac4SMingxia Liu 	uint64_t pkt_flags;
118913145ac4SMingxia Liu 	uint64_t dma_addr;
119013145ac4SMingxia Liu 	uint64_t ts_ns;
119113145ac4SMingxia Liu 
119213145ac4SMingxia Liu 	ad = rxq->adapter;
119313145ac4SMingxia Liu 
119413145ac4SMingxia Liu 	if (unlikely(!rxq) || unlikely(!rxq->q_started))
119513145ac4SMingxia Liu 		return nb_rx;
119613145ac4SMingxia Liu 
119713145ac4SMingxia Liu 	while (nb_rx < nb_pkts) {
119813145ac4SMingxia Liu 		rxdp = &rx_ring[rx_id];
119913145ac4SMingxia Liu 		rx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0);
120013145ac4SMingxia Liu 
120113145ac4SMingxia Liu 		/* Check the DD bit first */
120213145ac4SMingxia Liu 		if (!(rx_status0 & (1 << VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S)))
120313145ac4SMingxia Liu 			break;
120413145ac4SMingxia Liu 
120513145ac4SMingxia Liu 		nmb = rte_mbuf_raw_alloc(rxq->mp);
120613145ac4SMingxia Liu 		if (unlikely(!nmb)) {
1207*e12a0166STyler Retzlaff 			rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
1208*e12a0166STyler Retzlaff 					rte_memory_order_relaxed);
120913145ac4SMingxia Liu 			RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
121013145ac4SMingxia Liu 			       "queue_id=%u", rxq->port_id, rxq->queue_id);
121113145ac4SMingxia Liu 			break;
121213145ac4SMingxia Liu 		}
121313145ac4SMingxia Liu 
121413145ac4SMingxia Liu 		rxd = *rxdp;
121513145ac4SMingxia Liu 
121613145ac4SMingxia Liu 		nb_hold++;
121713145ac4SMingxia Liu 		rxm = rxq->sw_ring[rx_id];
121813145ac4SMingxia Liu 		rxq->sw_ring[rx_id] = nmb;
121913145ac4SMingxia Liu 		rx_id++;
122013145ac4SMingxia Liu 		if (unlikely(rx_id == rxq->nb_rx_desc))
122113145ac4SMingxia Liu 			rx_id = 0;
122213145ac4SMingxia Liu 
122313145ac4SMingxia Liu 		/* Prefetch next mbuf */
122413145ac4SMingxia Liu 		rte_prefetch0(rxq->sw_ring[rx_id]);
122513145ac4SMingxia Liu 
122613145ac4SMingxia Liu 		/* When next RX descriptor is on a cache line boundary,
122713145ac4SMingxia Liu 		 * prefetch the next 4 RX descriptors and next 8 pointers
122813145ac4SMingxia Liu 		 * to mbufs.
122913145ac4SMingxia Liu 		 */
123013145ac4SMingxia Liu 		if ((rx_id & 0x3) == 0) {
123113145ac4SMingxia Liu 			rte_prefetch0(&rx_ring[rx_id]);
123213145ac4SMingxia Liu 			rte_prefetch0(rxq->sw_ring[rx_id]);
123313145ac4SMingxia Liu 		}
123413145ac4SMingxia Liu 		dma_addr =
123513145ac4SMingxia Liu 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
123613145ac4SMingxia Liu 		rxdp->read.hdr_addr = 0;
123713145ac4SMingxia Liu 		rxdp->read.pkt_addr = dma_addr;
123813145ac4SMingxia Liu 		rx_packet_len = (rte_cpu_to_le_16(rxd.flex_nic_wb.pkt_len) &
123913145ac4SMingxia Liu 				 VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M);
124013145ac4SMingxia Liu 		rxm->data_len = rx_packet_len;
124113145ac4SMingxia Liu 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
124213145ac4SMingxia Liu 
124313145ac4SMingxia Liu 		/**
124413145ac4SMingxia Liu 		 * If this is the first buffer of the received packet, set the
124513145ac4SMingxia Liu 		 * pointer to the first mbuf of the packet and initialize its
124613145ac4SMingxia Liu 		 * context. Otherwise, update the total length and the number
124713145ac4SMingxia Liu 		 * of segments of the current scattered packet, and update the
124813145ac4SMingxia Liu 		 * pointer to the last mbuf of the current packet.
124913145ac4SMingxia Liu 		 */
125013145ac4SMingxia Liu 		if (!first_seg) {
125113145ac4SMingxia Liu 			first_seg = rxm;
125213145ac4SMingxia Liu 			first_seg->nb_segs = 1;
125313145ac4SMingxia Liu 			first_seg->pkt_len = rx_packet_len;
125413145ac4SMingxia Liu 		} else {
125513145ac4SMingxia Liu 			first_seg->pkt_len =
125613145ac4SMingxia Liu 				(uint16_t)(first_seg->pkt_len +
125713145ac4SMingxia Liu 					   rx_packet_len);
125813145ac4SMingxia Liu 			first_seg->nb_segs++;
125913145ac4SMingxia Liu 			last_seg->next = rxm;
126013145ac4SMingxia Liu 		}
126113145ac4SMingxia Liu 
126213145ac4SMingxia Liu 		if (!(rx_status0 & (1 << VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_S))) {
126313145ac4SMingxia Liu 			last_seg = rxm;
126413145ac4SMingxia Liu 			continue;
126513145ac4SMingxia Liu 		}
126613145ac4SMingxia Liu 
126713145ac4SMingxia Liu 		rxm->next = NULL;
126813145ac4SMingxia Liu 
126913145ac4SMingxia Liu 		first_seg->port = rxq->port_id;
127013145ac4SMingxia Liu 		first_seg->ol_flags = 0;
127113145ac4SMingxia Liu 		pkt_flags = idpf_rxd_to_pkt_flags(rx_status0);
127295f40178SMingxia Liu 		idpf_singleq_rx_rss_offload(first_seg, &rxd.flex_nic_wb, &pkt_flags);
127313145ac4SMingxia Liu 		first_seg->packet_type =
127413145ac4SMingxia Liu 			ptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) &
127513145ac4SMingxia Liu 				VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)];
127613145ac4SMingxia Liu 
127713145ac4SMingxia Liu 		if (idpf_timestamp_dynflag > 0 &&
127813145ac4SMingxia Liu 		    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {
127913145ac4SMingxia Liu 			/* timestamp */
128013145ac4SMingxia Liu 			ts_ns = idpf_tstamp_convert_32b_64b(ad,
128113145ac4SMingxia Liu 				rxq->hw_register_set,
128213145ac4SMingxia Liu 				rte_le_to_cpu_32(rxd.flex_nic_wb.flex_ts.ts_high));
128313145ac4SMingxia Liu 			rxq->hw_register_set = 0;
128413145ac4SMingxia Liu 			*RTE_MBUF_DYNFIELD(rxm,
128513145ac4SMingxia Liu 					   idpf_timestamp_dynfield_offset,
128613145ac4SMingxia Liu 					   rte_mbuf_timestamp_t *) = ts_ns;
128713145ac4SMingxia Liu 			first_seg->ol_flags |= idpf_timestamp_dynflag;
128813145ac4SMingxia Liu 		}
128913145ac4SMingxia Liu 
129013145ac4SMingxia Liu 		first_seg->ol_flags |= pkt_flags;
129113145ac4SMingxia Liu 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
129213145ac4SMingxia Liu 					  first_seg->data_off));
129313145ac4SMingxia Liu 		rx_pkts[nb_rx++] = first_seg;
129413145ac4SMingxia Liu 		first_seg = NULL;
129513145ac4SMingxia Liu 	}
129613145ac4SMingxia Liu 	rxq->rx_tail = rx_id;
129713145ac4SMingxia Liu 	rxq->pkt_first_seg = first_seg;
129813145ac4SMingxia Liu 	rxq->pkt_last_seg = last_seg;
129913145ac4SMingxia Liu 
130013145ac4SMingxia Liu 	idpf_update_rx_tail(rxq, nb_hold, rx_id);
130113145ac4SMingxia Liu 
130213145ac4SMingxia Liu 	return nb_rx;
130313145ac4SMingxia Liu }
130413145ac4SMingxia Liu 
13058c6098afSBeilei Xing static inline int
idpf_xmit_cleanup(struct idpf_tx_queue * txq)13068c6098afSBeilei Xing idpf_xmit_cleanup(struct idpf_tx_queue *txq)
13078c6098afSBeilei Xing {
13088c6098afSBeilei Xing 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
13098c6098afSBeilei Xing 	struct idpf_tx_entry *sw_ring = txq->sw_ring;
13108c6098afSBeilei Xing 	uint16_t nb_tx_desc = txq->nb_tx_desc;
13118c6098afSBeilei Xing 	uint16_t desc_to_clean_to;
13128c6098afSBeilei Xing 	uint16_t nb_tx_to_clean;
13138c6098afSBeilei Xing 	uint16_t i;
13148c6098afSBeilei Xing 
1315bab8149aSSimei Su 	volatile struct idpf_base_tx_desc *txd = txq->tx_ring;
13168c6098afSBeilei Xing 
13178c6098afSBeilei Xing 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
13188c6098afSBeilei Xing 	if (desc_to_clean_to >= nb_tx_desc)
13198c6098afSBeilei Xing 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
13208c6098afSBeilei Xing 
13218c6098afSBeilei Xing 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1322bab8149aSSimei Su 	if ((txd[desc_to_clean_to].qw1 &
1323bab8149aSSimei Su 	     rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
1324bab8149aSSimei Su 	    rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
13258c6098afSBeilei Xing 		TX_LOG(DEBUG, "TX descriptor %4u is not done "
13268c6098afSBeilei Xing 		       "(port=%d queue=%d)", desc_to_clean_to,
13278c6098afSBeilei Xing 		       txq->port_id, txq->queue_id);
13288c6098afSBeilei Xing 		return -1;
13298c6098afSBeilei Xing 	}
13308c6098afSBeilei Xing 
13318c6098afSBeilei Xing 	if (last_desc_cleaned > desc_to_clean_to)
13328c6098afSBeilei Xing 		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
13338c6098afSBeilei Xing 					    desc_to_clean_to);
13348c6098afSBeilei Xing 	else
13358c6098afSBeilei Xing 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
13368c6098afSBeilei Xing 					    last_desc_cleaned);
13378c6098afSBeilei Xing 
1338bab8149aSSimei Su 	txd[desc_to_clean_to].qw1 = 0;
13398c6098afSBeilei Xing 
13408c6098afSBeilei Xing 	txq->last_desc_cleaned = desc_to_clean_to;
13418c6098afSBeilei Xing 	txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
13428c6098afSBeilei Xing 
13438c6098afSBeilei Xing 	return 0;
13448c6098afSBeilei Xing }
13458c6098afSBeilei Xing 
13468c6098afSBeilei Xing /* TX function */
13478c6098afSBeilei Xing uint16_t
idpf_dp_singleq_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)13489ebf3f6bSBeilei Xing idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
13498c6098afSBeilei Xing 			  uint16_t nb_pkts)
13508c6098afSBeilei Xing {
1351bab8149aSSimei Su 	volatile struct idpf_base_tx_desc *txd;
1352bab8149aSSimei Su 	volatile struct idpf_base_tx_desc *txr;
13538c6098afSBeilei Xing 	union idpf_tx_offload tx_offload = {0};
13548c6098afSBeilei Xing 	struct idpf_tx_entry *txe, *txn;
13558c6098afSBeilei Xing 	struct idpf_tx_entry *sw_ring;
13568c6098afSBeilei Xing 	struct idpf_tx_queue *txq;
13578c6098afSBeilei Xing 	struct rte_mbuf *tx_pkt;
13588c6098afSBeilei Xing 	struct rte_mbuf *m_seg;
13598c6098afSBeilei Xing 	uint64_t buf_dma_addr;
1360bab8149aSSimei Su 	uint32_t td_offset;
13618c6098afSBeilei Xing 	uint64_t ol_flags;
13628c6098afSBeilei Xing 	uint16_t tx_last;
13638c6098afSBeilei Xing 	uint16_t nb_used;
13648c6098afSBeilei Xing 	uint16_t nb_ctx;
13658c6098afSBeilei Xing 	uint16_t td_cmd;
13668c6098afSBeilei Xing 	uint16_t tx_id;
13678c6098afSBeilei Xing 	uint16_t nb_tx;
13688c6098afSBeilei Xing 	uint16_t slen;
13698c6098afSBeilei Xing 
13708c6098afSBeilei Xing 	nb_tx = 0;
13718c6098afSBeilei Xing 	txq = tx_queue;
13728c6098afSBeilei Xing 
13738c6098afSBeilei Xing 	if (unlikely(txq == NULL) || unlikely(!txq->q_started))
13748c6098afSBeilei Xing 		return nb_tx;
13758c6098afSBeilei Xing 
13768c6098afSBeilei Xing 	sw_ring = txq->sw_ring;
13778c6098afSBeilei Xing 	txr = txq->tx_ring;
13788c6098afSBeilei Xing 	tx_id = txq->tx_tail;
13798c6098afSBeilei Xing 	txe = &sw_ring[tx_id];
13808c6098afSBeilei Xing 
13818c6098afSBeilei Xing 	/* Check if the descriptor ring needs to be cleaned. */
13828c6098afSBeilei Xing 	if (txq->nb_free < txq->free_thresh)
13838c6098afSBeilei Xing 		(void)idpf_xmit_cleanup(txq);
13848c6098afSBeilei Xing 
13858c6098afSBeilei Xing 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
13868c6098afSBeilei Xing 		td_cmd = 0;
1387bab8149aSSimei Su 		td_offset = 0;
13888c6098afSBeilei Xing 
13898c6098afSBeilei Xing 		tx_pkt = *tx_pkts++;
13908c6098afSBeilei Xing 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
13918c6098afSBeilei Xing 
13928c6098afSBeilei Xing 		ol_flags = tx_pkt->ol_flags;
13938c6098afSBeilei Xing 		tx_offload.l2_len = tx_pkt->l2_len;
13948c6098afSBeilei Xing 		tx_offload.l3_len = tx_pkt->l3_len;
13958c6098afSBeilei Xing 		tx_offload.l4_len = tx_pkt->l4_len;
13968c6098afSBeilei Xing 		tx_offload.tso_segsz = tx_pkt->tso_segsz;
13978c6098afSBeilei Xing 		/* Calculate the number of context descriptors needed. */
13988c6098afSBeilei Xing 		nb_ctx = idpf_calc_context_desc(ol_flags);
13998c6098afSBeilei Xing 
14008c6098afSBeilei Xing 		/* The number of descriptors that must be allocated for
14018c6098afSBeilei Xing 		 * a packet equals to the number of the segments of that
14028c6098afSBeilei Xing 		 * packet plus 1 context descriptor if needed.
14038c6098afSBeilei Xing 		 */
14048c6098afSBeilei Xing 		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
14058c6098afSBeilei Xing 		tx_last = (uint16_t)(tx_id + nb_used - 1);
14068c6098afSBeilei Xing 
14078c6098afSBeilei Xing 		/* Circular ring */
14088c6098afSBeilei Xing 		if (tx_last >= txq->nb_tx_desc)
14098c6098afSBeilei Xing 			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
14108c6098afSBeilei Xing 
14118c6098afSBeilei Xing 		TX_LOG(DEBUG, "port_id=%u queue_id=%u"
14128c6098afSBeilei Xing 		       " tx_first=%u tx_last=%u",
14138c6098afSBeilei Xing 		       txq->port_id, txq->queue_id, tx_id, tx_last);
14148c6098afSBeilei Xing 
14158c6098afSBeilei Xing 		if (nb_used > txq->nb_free) {
14168c6098afSBeilei Xing 			if (idpf_xmit_cleanup(txq) != 0) {
14178c6098afSBeilei Xing 				if (nb_tx == 0)
14188c6098afSBeilei Xing 					return 0;
14198c6098afSBeilei Xing 				goto end_of_tx;
14208c6098afSBeilei Xing 			}
14218c6098afSBeilei Xing 			if (unlikely(nb_used > txq->rs_thresh)) {
14228c6098afSBeilei Xing 				while (nb_used > txq->nb_free) {
14238c6098afSBeilei Xing 					if (idpf_xmit_cleanup(txq) != 0) {
14248c6098afSBeilei Xing 						if (nb_tx == 0)
14258c6098afSBeilei Xing 							return 0;
14268c6098afSBeilei Xing 						goto end_of_tx;
14278c6098afSBeilei Xing 					}
14288c6098afSBeilei Xing 				}
14298c6098afSBeilei Xing 			}
14308c6098afSBeilei Xing 		}
14318c6098afSBeilei Xing 
1432ef12cbbeSBeilei Xing 		if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)
1433ef12cbbeSBeilei Xing 			td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
1434ef12cbbeSBeilei Xing 
14358c6098afSBeilei Xing 		if (nb_ctx != 0) {
14368c6098afSBeilei Xing 			/* Setup TX context descriptor if required */
14378c6098afSBeilei Xing 			volatile union idpf_flex_tx_ctx_desc *ctx_txd =
14388c6098afSBeilei Xing 				(volatile union idpf_flex_tx_ctx_desc *)
14398c6098afSBeilei Xing 				&txr[tx_id];
14408c6098afSBeilei Xing 
14418c6098afSBeilei Xing 			txn = &sw_ring[txe->next_id];
14428c6098afSBeilei Xing 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
14438c6098afSBeilei Xing 			if (txe->mbuf != NULL) {
14448c6098afSBeilei Xing 				rte_pktmbuf_free_seg(txe->mbuf);
14458c6098afSBeilei Xing 				txe->mbuf = NULL;
14468c6098afSBeilei Xing 			}
14478c6098afSBeilei Xing 
14488c6098afSBeilei Xing 			/* TSO enabled */
14498c6098afSBeilei Xing 			if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
14508c6098afSBeilei Xing 				idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
14518c6098afSBeilei Xing 							ctx_txd);
14528c6098afSBeilei Xing 
14538c6098afSBeilei Xing 			txe->last_id = tx_last;
14548c6098afSBeilei Xing 			tx_id = txe->next_id;
14558c6098afSBeilei Xing 			txe = txn;
14568c6098afSBeilei Xing 		}
14578c6098afSBeilei Xing 
14588c6098afSBeilei Xing 		m_seg = tx_pkt;
14598c6098afSBeilei Xing 		do {
14608c6098afSBeilei Xing 			txd = &txr[tx_id];
14618c6098afSBeilei Xing 			txn = &sw_ring[txe->next_id];
14628c6098afSBeilei Xing 
14638c6098afSBeilei Xing 			if (txe->mbuf != NULL)
14648c6098afSBeilei Xing 				rte_pktmbuf_free_seg(txe->mbuf);
14658c6098afSBeilei Xing 			txe->mbuf = m_seg;
14668c6098afSBeilei Xing 
14678c6098afSBeilei Xing 			/* Setup TX Descriptor */
14688c6098afSBeilei Xing 			slen = m_seg->data_len;
14698c6098afSBeilei Xing 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
14708c6098afSBeilei Xing 			txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
1471bab8149aSSimei Su 			txd->qw1 = rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DATA |
1472bab8149aSSimei Su 				((uint64_t)td_cmd  << IDPF_TXD_QW1_CMD_S) |
1473bab8149aSSimei Su 				((uint64_t)td_offset << IDPF_TXD_QW1_OFFSET_S) |
1474bab8149aSSimei Su 				((uint64_t)slen << IDPF_TXD_QW1_TX_BUF_SZ_S));
14758c6098afSBeilei Xing 
14768c6098afSBeilei Xing 			txe->last_id = tx_last;
14778c6098afSBeilei Xing 			tx_id = txe->next_id;
14788c6098afSBeilei Xing 			txe = txn;
14798c6098afSBeilei Xing 			m_seg = m_seg->next;
14808c6098afSBeilei Xing 		} while (m_seg);
14818c6098afSBeilei Xing 
14828c6098afSBeilei Xing 		/* The last packet data descriptor needs End Of Packet (EOP) */
1483bab8149aSSimei Su 		td_cmd |= IDPF_TX_DESC_CMD_EOP;
14848c6098afSBeilei Xing 		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
14858c6098afSBeilei Xing 		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
14868c6098afSBeilei Xing 
14878c6098afSBeilei Xing 		if (txq->nb_used >= txq->rs_thresh) {
14888c6098afSBeilei Xing 			TX_LOG(DEBUG, "Setting RS bit on TXD id="
14898c6098afSBeilei Xing 			       "%4u (port=%d queue=%d)",
14908c6098afSBeilei Xing 			       tx_last, txq->port_id, txq->queue_id);
14918c6098afSBeilei Xing 
1492bab8149aSSimei Su 			td_cmd |= IDPF_TX_DESC_CMD_RS;
14938c6098afSBeilei Xing 
14948c6098afSBeilei Xing 			/* Update txq RS bit counters */
14958c6098afSBeilei Xing 			txq->nb_used = 0;
14968c6098afSBeilei Xing 		}
14978c6098afSBeilei Xing 
1498bab8149aSSimei Su 		txd->qw1 |= rte_cpu_to_le_16(td_cmd << IDPF_TXD_QW1_CMD_S);
14998c6098afSBeilei Xing 	}
15008c6098afSBeilei Xing 
15018c6098afSBeilei Xing end_of_tx:
15028c6098afSBeilei Xing 	rte_wmb();
15038c6098afSBeilei Xing 
15048c6098afSBeilei Xing 	TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
15058c6098afSBeilei Xing 	       txq->port_id, txq->queue_id, tx_id, nb_tx);
15068c6098afSBeilei Xing 
15078c6098afSBeilei Xing 	IDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id);
15088c6098afSBeilei Xing 	txq->tx_tail = tx_id;
15098c6098afSBeilei Xing 
15108c6098afSBeilei Xing 	return nb_tx;
15118c6098afSBeilei Xing }
15128c6098afSBeilei Xing 
15138c6098afSBeilei Xing /* TX prep functions */
15148c6098afSBeilei Xing uint16_t
idpf_dp_prep_pkts(__rte_unused void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)15159ebf3f6bSBeilei Xing idpf_dp_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
15168c6098afSBeilei Xing 		  uint16_t nb_pkts)
15178c6098afSBeilei Xing {
15188c6098afSBeilei Xing #ifdef RTE_LIBRTE_ETHDEV_DEBUG
15198c6098afSBeilei Xing 	int ret;
15208c6098afSBeilei Xing #endif
15218c6098afSBeilei Xing 	int i;
15228c6098afSBeilei Xing 	uint64_t ol_flags;
15238c6098afSBeilei Xing 	struct rte_mbuf *m;
15248c6098afSBeilei Xing 
15258c6098afSBeilei Xing 	for (i = 0; i < nb_pkts; i++) {
15268c6098afSBeilei Xing 		m = tx_pkts[i];
15278c6098afSBeilei Xing 		ol_flags = m->ol_flags;
15288c6098afSBeilei Xing 
15298c6098afSBeilei Xing 		/* Check condition for nb_segs > IDPF_TX_MAX_MTU_SEG. */
15308c6098afSBeilei Xing 		if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) {
15318c6098afSBeilei Xing 			if (m->nb_segs > IDPF_TX_MAX_MTU_SEG) {
15328c6098afSBeilei Xing 				rte_errno = EINVAL;
15338c6098afSBeilei Xing 				return i;
15348c6098afSBeilei Xing 			}
15358c6098afSBeilei Xing 		} else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) ||
15368c6098afSBeilei Xing 			   (m->tso_segsz > IDPF_MAX_TSO_MSS) ||
15378c6098afSBeilei Xing 			   (m->pkt_len > IDPF_MAX_TSO_FRAME_SIZE)) {
15388c6098afSBeilei Xing 			/* MSS outside the range are considered malicious */
15398c6098afSBeilei Xing 			rte_errno = EINVAL;
15408c6098afSBeilei Xing 			return i;
15418c6098afSBeilei Xing 		}
15428c6098afSBeilei Xing 
15438c6098afSBeilei Xing 		if ((ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) != 0) {
15448c6098afSBeilei Xing 			rte_errno = ENOTSUP;
15458c6098afSBeilei Xing 			return i;
15468c6098afSBeilei Xing 		}
15478c6098afSBeilei Xing 
15488c6098afSBeilei Xing 		if (m->pkt_len < IDPF_MIN_FRAME_SIZE) {
15498c6098afSBeilei Xing 			rte_errno = EINVAL;
15508c6098afSBeilei Xing 			return i;
15518c6098afSBeilei Xing 		}
15528c6098afSBeilei Xing 
15538c6098afSBeilei Xing #ifdef RTE_LIBRTE_ETHDEV_DEBUG
15548c6098afSBeilei Xing 		ret = rte_validate_tx_offload(m);
15558c6098afSBeilei Xing 		if (ret != 0) {
15568c6098afSBeilei Xing 			rte_errno = -ret;
15578c6098afSBeilei Xing 			return i;
15588c6098afSBeilei Xing 		}
15598c6098afSBeilei Xing #endif
15608c6098afSBeilei Xing 	}
15618c6098afSBeilei Xing 
15628c6098afSBeilei Xing 	return i;
15638c6098afSBeilei Xing }
1564f580252dSBeilei Xing 
1565f580252dSBeilei Xing static void __rte_cold
release_rxq_mbufs_vec(struct idpf_rx_queue * rxq)1566f580252dSBeilei Xing release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)
1567f580252dSBeilei Xing {
1568f580252dSBeilei Xing 	const uint16_t mask = rxq->nb_rx_desc - 1;
1569f580252dSBeilei Xing 	uint16_t i;
1570f580252dSBeilei Xing 
1571f580252dSBeilei Xing 	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
1572f580252dSBeilei Xing 		return;
1573f580252dSBeilei Xing 
1574f580252dSBeilei Xing 	/* free all mbufs that are valid in the ring */
1575f580252dSBeilei Xing 	if (rxq->rxrearm_nb == 0) {
1576f580252dSBeilei Xing 		for (i = 0; i < rxq->nb_rx_desc; i++) {
1577f580252dSBeilei Xing 			if (rxq->sw_ring[i] != NULL)
1578f580252dSBeilei Xing 				rte_pktmbuf_free_seg(rxq->sw_ring[i]);
1579f580252dSBeilei Xing 		}
1580f580252dSBeilei Xing 	} else {
1581f580252dSBeilei Xing 		for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) {
1582f580252dSBeilei Xing 			if (rxq->sw_ring[i] != NULL)
1583f580252dSBeilei Xing 				rte_pktmbuf_free_seg(rxq->sw_ring[i]);
1584f580252dSBeilei Xing 		}
1585f580252dSBeilei Xing 	}
1586f580252dSBeilei Xing 
1587f580252dSBeilei Xing 	rxq->rxrearm_nb = rxq->nb_rx_desc;
1588f580252dSBeilei Xing 
1589f580252dSBeilei Xing 	/* set all entries to NULL */
1590f580252dSBeilei Xing 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
1591f580252dSBeilei Xing }
1592f580252dSBeilei Xing 
1593e528d7c7SWenjun Wu static const struct idpf_rxq_ops def_rx_ops_vec = {
1594f580252dSBeilei Xing 	.release_mbufs = release_rxq_mbufs_vec,
1595f580252dSBeilei Xing };
1596f580252dSBeilei Xing 
1597f580252dSBeilei Xing static inline int
idpf_rxq_vec_setup_default(struct idpf_rx_queue * rxq)1598e528d7c7SWenjun Wu idpf_rxq_vec_setup_default(struct idpf_rx_queue *rxq)
1599f580252dSBeilei Xing {
1600f580252dSBeilei Xing 	uintptr_t p;
1601f580252dSBeilei Xing 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
1602f580252dSBeilei Xing 
1603f580252dSBeilei Xing 	mb_def.nb_segs = 1;
1604f580252dSBeilei Xing 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
1605f580252dSBeilei Xing 	mb_def.port = rxq->port_id;
1606f580252dSBeilei Xing 	rte_mbuf_refcnt_set(&mb_def, 1);
1607f580252dSBeilei Xing 
1608f580252dSBeilei Xing 	/* prevent compiler reordering: rearm_data covers previous fields */
1609f580252dSBeilei Xing 	rte_compiler_barrier();
1610f580252dSBeilei Xing 	p = (uintptr_t)&mb_def.rearm_data;
1611f580252dSBeilei Xing 	rxq->mbuf_initializer = *(uint64_t *)p;
1612f580252dSBeilei Xing 	return 0;
1613f580252dSBeilei Xing }
1614f580252dSBeilei Xing 
1615f580252dSBeilei Xing int __rte_cold
idpf_qc_singleq_rx_vec_setup(struct idpf_rx_queue * rxq)1616715939a7SBeilei Xing idpf_qc_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
1617f580252dSBeilei Xing {
1618e528d7c7SWenjun Wu 	rxq->ops = &def_rx_ops_vec;
1619e528d7c7SWenjun Wu 	return idpf_rxq_vec_setup_default(rxq);
1620e528d7c7SWenjun Wu }
1621e528d7c7SWenjun Wu 
1622e528d7c7SWenjun Wu int __rte_cold
idpf_qc_splitq_rx_vec_setup(struct idpf_rx_queue * rxq)1623e528d7c7SWenjun Wu idpf_qc_splitq_rx_vec_setup(struct idpf_rx_queue *rxq)
1624e528d7c7SWenjun Wu {
1625e528d7c7SWenjun Wu 	rxq->bufq2->ops = &def_rx_ops_vec;
1626e528d7c7SWenjun Wu 	return idpf_rxq_vec_setup_default(rxq->bufq2);
1627f580252dSBeilei Xing }
1628