xref: /dpdk/drivers/net/ngbe/ngbe_rxtx_vec_common.h (revision e94c20c34cfd7140b4199ab3c1059520c0605318)
1*e94c20c3SJiawen Wu /* SPDX-License-Identifier: BSD-3-Clause
2*e94c20c3SJiawen Wu  * Copyright(c) 2015-2024 Beijing WangXun Technology Co., Ltd.
3*e94c20c3SJiawen Wu  * Copyright(c) 2010-2015 Intel Corporation
4*e94c20c3SJiawen Wu  */
5*e94c20c3SJiawen Wu 
6*e94c20c3SJiawen Wu #ifndef _NGBE_RXTX_VEC_COMMON_H_
7*e94c20c3SJiawen Wu #define _NGBE_RXTX_VEC_COMMON_H_
8*e94c20c3SJiawen Wu #include <stdint.h>
9*e94c20c3SJiawen Wu 
10*e94c20c3SJiawen Wu #include "ngbe_ethdev.h"
11*e94c20c3SJiawen Wu #include "ngbe_rxtx.h"
12*e94c20c3SJiawen Wu 
13*e94c20c3SJiawen Wu #define NGBE_RXD_PTID_SHIFT 9
14*e94c20c3SJiawen Wu 
15*e94c20c3SJiawen Wu #define RTE_NGBE_RXQ_REARM_THRESH      32
16*e94c20c3SJiawen Wu #define RTE_NGBE_MAX_RX_BURST          RTE_NGBE_RXQ_REARM_THRESH
17*e94c20c3SJiawen Wu 
18*e94c20c3SJiawen Wu static inline uint16_t
reassemble_packets(struct ngbe_rx_queue * rxq,struct rte_mbuf ** rx_bufs,uint16_t nb_bufs,uint8_t * split_flags)19*e94c20c3SJiawen Wu reassemble_packets(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
20*e94c20c3SJiawen Wu 		   uint16_t nb_bufs, uint8_t *split_flags)
21*e94c20c3SJiawen Wu {
22*e94c20c3SJiawen Wu 	struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
23*e94c20c3SJiawen Wu 	struct rte_mbuf *start = rxq->pkt_first_seg;
24*e94c20c3SJiawen Wu 	struct rte_mbuf *end =  rxq->pkt_last_seg;
25*e94c20c3SJiawen Wu 	unsigned int pkt_idx, buf_idx;
26*e94c20c3SJiawen Wu 
27*e94c20c3SJiawen Wu 	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
28*e94c20c3SJiawen Wu 		if (end != NULL) {
29*e94c20c3SJiawen Wu 			/* processing a split packet */
30*e94c20c3SJiawen Wu 			end->next = rx_bufs[buf_idx];
31*e94c20c3SJiawen Wu 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
32*e94c20c3SJiawen Wu 
33*e94c20c3SJiawen Wu 			start->nb_segs++;
34*e94c20c3SJiawen Wu 			start->pkt_len += rx_bufs[buf_idx]->data_len;
35*e94c20c3SJiawen Wu 			end = end->next;
36*e94c20c3SJiawen Wu 
37*e94c20c3SJiawen Wu 			if (!split_flags[buf_idx]) {
38*e94c20c3SJiawen Wu 				/* it's the last packet of the set */
39*e94c20c3SJiawen Wu 				start->hash = end->hash;
40*e94c20c3SJiawen Wu 				start->ol_flags = end->ol_flags;
41*e94c20c3SJiawen Wu 				/* we need to strip crc for the whole packet */
42*e94c20c3SJiawen Wu 				start->pkt_len -= rxq->crc_len;
43*e94c20c3SJiawen Wu 				if (end->data_len > rxq->crc_len) {
44*e94c20c3SJiawen Wu 					end->data_len -= rxq->crc_len;
45*e94c20c3SJiawen Wu 				} else {
46*e94c20c3SJiawen Wu 					/* free up last mbuf */
47*e94c20c3SJiawen Wu 					struct rte_mbuf *secondlast = start;
48*e94c20c3SJiawen Wu 
49*e94c20c3SJiawen Wu 					start->nb_segs--;
50*e94c20c3SJiawen Wu 					while (secondlast->next != end)
51*e94c20c3SJiawen Wu 						secondlast = secondlast->next;
52*e94c20c3SJiawen Wu 					secondlast->data_len -= (rxq->crc_len -
53*e94c20c3SJiawen Wu 							end->data_len);
54*e94c20c3SJiawen Wu 					secondlast->next = NULL;
55*e94c20c3SJiawen Wu 					rte_pktmbuf_free_seg(end);
56*e94c20c3SJiawen Wu 				}
57*e94c20c3SJiawen Wu 				pkts[pkt_idx++] = start;
58*e94c20c3SJiawen Wu 				start = NULL;
59*e94c20c3SJiawen Wu 				end = NULL;
60*e94c20c3SJiawen Wu 			}
61*e94c20c3SJiawen Wu 		} else {
62*e94c20c3SJiawen Wu 			/* not processing a split packet */
63*e94c20c3SJiawen Wu 			if (!split_flags[buf_idx]) {
64*e94c20c3SJiawen Wu 				/* not a split packet, save and skip */
65*e94c20c3SJiawen Wu 				pkts[pkt_idx++] = rx_bufs[buf_idx];
66*e94c20c3SJiawen Wu 				continue;
67*e94c20c3SJiawen Wu 			}
68*e94c20c3SJiawen Wu 			start = rx_bufs[buf_idx];
69*e94c20c3SJiawen Wu 			end = start;
70*e94c20c3SJiawen Wu 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
71*e94c20c3SJiawen Wu 			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
72*e94c20c3SJiawen Wu 		}
73*e94c20c3SJiawen Wu 	}
74*e94c20c3SJiawen Wu 
75*e94c20c3SJiawen Wu 	/* save the partial packet for next time */
76*e94c20c3SJiawen Wu 	rxq->pkt_first_seg = start;
77*e94c20c3SJiawen Wu 	rxq->pkt_last_seg = end;
78*e94c20c3SJiawen Wu 	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
79*e94c20c3SJiawen Wu 	return pkt_idx;
80*e94c20c3SJiawen Wu }
81*e94c20c3SJiawen Wu 
82*e94c20c3SJiawen Wu static __rte_always_inline int
ngbe_tx_free_bufs(struct ngbe_tx_queue * txq)83*e94c20c3SJiawen Wu ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)
84*e94c20c3SJiawen Wu {
85*e94c20c3SJiawen Wu 	struct ngbe_tx_entry_v *txep;
86*e94c20c3SJiawen Wu 	uint32_t status;
87*e94c20c3SJiawen Wu 	uint32_t n;
88*e94c20c3SJiawen Wu 	uint32_t i;
89*e94c20c3SJiawen Wu 	int nb_free = 0;
90*e94c20c3SJiawen Wu 	struct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];
91*e94c20c3SJiawen Wu 
92*e94c20c3SJiawen Wu 	/* check DD bit on threshold descriptor */
93*e94c20c3SJiawen Wu 	status = txq->tx_ring[txq->tx_next_dd].dw3;
94*e94c20c3SJiawen Wu 	if (!(status & NGBE_TXD_DD)) {
95*e94c20c3SJiawen Wu 		if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
96*e94c20c3SJiawen Wu 			ngbe_set32_masked(txq->tdc_reg_addr,
97*e94c20c3SJiawen Wu 				NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
98*e94c20c3SJiawen Wu 		return 0;
99*e94c20c3SJiawen Wu 	}
100*e94c20c3SJiawen Wu 
101*e94c20c3SJiawen Wu 	n = txq->tx_free_thresh;
102*e94c20c3SJiawen Wu 
103*e94c20c3SJiawen Wu 	/*
104*e94c20c3SJiawen Wu 	 * first buffer to free from S/W ring is at index
105*e94c20c3SJiawen Wu 	 * tx_next_dd - (tx_rs_thresh-1)
106*e94c20c3SJiawen Wu 	 */
107*e94c20c3SJiawen Wu 	txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
108*e94c20c3SJiawen Wu 	m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
109*e94c20c3SJiawen Wu 	if (likely(m != NULL)) {
110*e94c20c3SJiawen Wu 		free[0] = m;
111*e94c20c3SJiawen Wu 		nb_free = 1;
112*e94c20c3SJiawen Wu 		for (i = 1; i < n; i++) {
113*e94c20c3SJiawen Wu 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
114*e94c20c3SJiawen Wu 			if (likely(m != NULL)) {
115*e94c20c3SJiawen Wu 				if (likely(m->pool == free[0]->pool)) {
116*e94c20c3SJiawen Wu 					free[nb_free++] = m;
117*e94c20c3SJiawen Wu 				} else {
118*e94c20c3SJiawen Wu 					rte_mempool_put_bulk(free[0]->pool,
119*e94c20c3SJiawen Wu 							(void *)free, nb_free);
120*e94c20c3SJiawen Wu 					free[0] = m;
121*e94c20c3SJiawen Wu 					nb_free = 1;
122*e94c20c3SJiawen Wu 				}
123*e94c20c3SJiawen Wu 			}
124*e94c20c3SJiawen Wu 		}
125*e94c20c3SJiawen Wu 		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
126*e94c20c3SJiawen Wu 	} else {
127*e94c20c3SJiawen Wu 		for (i = 1; i < n; i++) {
128*e94c20c3SJiawen Wu 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
129*e94c20c3SJiawen Wu 			if (m != NULL)
130*e94c20c3SJiawen Wu 				rte_mempool_put(m->pool, m);
131*e94c20c3SJiawen Wu 		}
132*e94c20c3SJiawen Wu 	}
133*e94c20c3SJiawen Wu 
134*e94c20c3SJiawen Wu 	/* buffers were freed, update counters */
135*e94c20c3SJiawen Wu 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
136*e94c20c3SJiawen Wu 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
137*e94c20c3SJiawen Wu 	if (txq->tx_next_dd >= txq->nb_tx_desc)
138*e94c20c3SJiawen Wu 		txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
139*e94c20c3SJiawen Wu 
140*e94c20c3SJiawen Wu 	return txq->tx_free_thresh;
141*e94c20c3SJiawen Wu }
142*e94c20c3SJiawen Wu 
143*e94c20c3SJiawen Wu static __rte_always_inline void
tx_backlog_entry(struct ngbe_tx_entry_v * txep,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)144*e94c20c3SJiawen Wu tx_backlog_entry(struct ngbe_tx_entry_v *txep,
145*e94c20c3SJiawen Wu 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
146*e94c20c3SJiawen Wu {
147*e94c20c3SJiawen Wu 	int i;
148*e94c20c3SJiawen Wu 
149*e94c20c3SJiawen Wu 	for (i = 0; i < (int)nb_pkts; ++i)
150*e94c20c3SJiawen Wu 		txep[i].mbuf = tx_pkts[i];
151*e94c20c3SJiawen Wu }
152*e94c20c3SJiawen Wu 
153*e94c20c3SJiawen Wu static inline void
_ngbe_tx_queue_release_mbufs_vec(struct ngbe_tx_queue * txq)154*e94c20c3SJiawen Wu _ngbe_tx_queue_release_mbufs_vec(struct ngbe_tx_queue *txq)
155*e94c20c3SJiawen Wu {
156*e94c20c3SJiawen Wu 	unsigned int i;
157*e94c20c3SJiawen Wu 	struct ngbe_tx_entry_v *txe;
158*e94c20c3SJiawen Wu 	const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
159*e94c20c3SJiawen Wu 
160*e94c20c3SJiawen Wu 	if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
161*e94c20c3SJiawen Wu 		return;
162*e94c20c3SJiawen Wu 
163*e94c20c3SJiawen Wu 	/* release the used mbufs in sw_ring */
164*e94c20c3SJiawen Wu 	for (i = txq->tx_next_dd - (txq->tx_free_thresh - 1);
165*e94c20c3SJiawen Wu 	     i != txq->tx_tail;
166*e94c20c3SJiawen Wu 	     i = (i + 1) % txq->nb_tx_desc) {
167*e94c20c3SJiawen Wu 		txe = &txq->sw_ring_v[i];
168*e94c20c3SJiawen Wu 		rte_pktmbuf_free_seg(txe->mbuf);
169*e94c20c3SJiawen Wu 	}
170*e94c20c3SJiawen Wu 	txq->nb_tx_free = max_desc;
171*e94c20c3SJiawen Wu 
172*e94c20c3SJiawen Wu 	/* reset tx_entry */
173*e94c20c3SJiawen Wu 	for (i = 0; i < txq->nb_tx_desc; i++) {
174*e94c20c3SJiawen Wu 		txe = &txq->sw_ring_v[i];
175*e94c20c3SJiawen Wu 		txe->mbuf = NULL;
176*e94c20c3SJiawen Wu 	}
177*e94c20c3SJiawen Wu }
178*e94c20c3SJiawen Wu 
179*e94c20c3SJiawen Wu static inline void
_ngbe_rx_queue_release_mbufs_vec(struct ngbe_rx_queue * rxq)180*e94c20c3SJiawen Wu _ngbe_rx_queue_release_mbufs_vec(struct ngbe_rx_queue *rxq)
181*e94c20c3SJiawen Wu {
182*e94c20c3SJiawen Wu 	unsigned int i;
183*e94c20c3SJiawen Wu 
184*e94c20c3SJiawen Wu 	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
185*e94c20c3SJiawen Wu 		return;
186*e94c20c3SJiawen Wu 
187*e94c20c3SJiawen Wu 	/* free all mbufs that are valid in the ring */
188*e94c20c3SJiawen Wu 	if (rxq->rxrearm_nb == 0) {
189*e94c20c3SJiawen Wu 		for (i = 0; i < rxq->nb_rx_desc; i++) {
190*e94c20c3SJiawen Wu 			if (rxq->sw_ring[i].mbuf != NULL)
191*e94c20c3SJiawen Wu 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
192*e94c20c3SJiawen Wu 		}
193*e94c20c3SJiawen Wu 	} else {
194*e94c20c3SJiawen Wu 		for (i = rxq->rx_tail;
195*e94c20c3SJiawen Wu 		     i != rxq->rxrearm_start;
196*e94c20c3SJiawen Wu 		     i = (i + 1) % rxq->nb_rx_desc) {
197*e94c20c3SJiawen Wu 			if (rxq->sw_ring[i].mbuf != NULL)
198*e94c20c3SJiawen Wu 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
199*e94c20c3SJiawen Wu 		}
200*e94c20c3SJiawen Wu 	}
201*e94c20c3SJiawen Wu 
202*e94c20c3SJiawen Wu 	rxq->rxrearm_nb = rxq->nb_rx_desc;
203*e94c20c3SJiawen Wu 
204*e94c20c3SJiawen Wu 	/* set all entries to NULL */
205*e94c20c3SJiawen Wu 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
206*e94c20c3SJiawen Wu }
207*e94c20c3SJiawen Wu 
208*e94c20c3SJiawen Wu static inline void
_ngbe_tx_free_swring_vec(struct ngbe_tx_queue * txq)209*e94c20c3SJiawen Wu _ngbe_tx_free_swring_vec(struct ngbe_tx_queue *txq)
210*e94c20c3SJiawen Wu {
211*e94c20c3SJiawen Wu 	if (txq == NULL)
212*e94c20c3SJiawen Wu 		return;
213*e94c20c3SJiawen Wu 
214*e94c20c3SJiawen Wu 	if (txq->sw_ring != NULL) {
215*e94c20c3SJiawen Wu 		rte_free(txq->sw_ring_v - 1);
216*e94c20c3SJiawen Wu 		txq->sw_ring_v = NULL;
217*e94c20c3SJiawen Wu 	}
218*e94c20c3SJiawen Wu }
219*e94c20c3SJiawen Wu 
220*e94c20c3SJiawen Wu static inline void
_ngbe_reset_tx_queue_vec(struct ngbe_tx_queue * txq)221*e94c20c3SJiawen Wu _ngbe_reset_tx_queue_vec(struct ngbe_tx_queue *txq)
222*e94c20c3SJiawen Wu {
223*e94c20c3SJiawen Wu 	static const struct ngbe_tx_desc zeroed_desc = {0};
224*e94c20c3SJiawen Wu 	struct ngbe_tx_entry_v *txe = txq->sw_ring_v;
225*e94c20c3SJiawen Wu 	uint16_t i;
226*e94c20c3SJiawen Wu 
227*e94c20c3SJiawen Wu 	/* Zero out HW ring memory */
228*e94c20c3SJiawen Wu 	for (i = 0; i < txq->nb_tx_desc; i++)
229*e94c20c3SJiawen Wu 		txq->tx_ring[i] = zeroed_desc;
230*e94c20c3SJiawen Wu 
231*e94c20c3SJiawen Wu 	/* Initialize SW ring entries */
232*e94c20c3SJiawen Wu 	for (i = 0; i < txq->nb_tx_desc; i++) {
233*e94c20c3SJiawen Wu 		volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
234*e94c20c3SJiawen Wu 
235*e94c20c3SJiawen Wu 		txd->dw3 = NGBE_TXD_DD;
236*e94c20c3SJiawen Wu 		txe[i].mbuf = NULL;
237*e94c20c3SJiawen Wu 	}
238*e94c20c3SJiawen Wu 
239*e94c20c3SJiawen Wu 	txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
240*e94c20c3SJiawen Wu 
241*e94c20c3SJiawen Wu 	txq->tx_tail = 0;
242*e94c20c3SJiawen Wu 	/*
243*e94c20c3SJiawen Wu 	 * Always allow 1 descriptor to be un-allocated to avoid
244*e94c20c3SJiawen Wu 	 * a H/W race condition
245*e94c20c3SJiawen Wu 	 */
246*e94c20c3SJiawen Wu 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
247*e94c20c3SJiawen Wu 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
248*e94c20c3SJiawen Wu 	txq->ctx_curr = 0;
249*e94c20c3SJiawen Wu 	memset((void *)&txq->ctx_cache, 0,
250*e94c20c3SJiawen Wu 		NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
251*e94c20c3SJiawen Wu }
252*e94c20c3SJiawen Wu 
253*e94c20c3SJiawen Wu static inline int
ngbe_rxq_vec_setup_default(struct ngbe_rx_queue * rxq)254*e94c20c3SJiawen Wu ngbe_rxq_vec_setup_default(struct ngbe_rx_queue *rxq)
255*e94c20c3SJiawen Wu {
256*e94c20c3SJiawen Wu 	uintptr_t p;
257*e94c20c3SJiawen Wu 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
258*e94c20c3SJiawen Wu 
259*e94c20c3SJiawen Wu 	mb_def.nb_segs = 1;
260*e94c20c3SJiawen Wu 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
261*e94c20c3SJiawen Wu 	mb_def.port = rxq->port_id;
262*e94c20c3SJiawen Wu 	rte_mbuf_refcnt_set(&mb_def, 1);
263*e94c20c3SJiawen Wu 
264*e94c20c3SJiawen Wu 	/* prevent compiler reordering: rearm_data covers previous fields */
265*e94c20c3SJiawen Wu 	rte_compiler_barrier();
266*e94c20c3SJiawen Wu 	p = (uintptr_t)&mb_def.rearm_data;
267*e94c20c3SJiawen Wu 	rxq->mbuf_initializer = *(uint64_t *)p;
268*e94c20c3SJiawen Wu 	return 0;
269*e94c20c3SJiawen Wu }
270*e94c20c3SJiawen Wu 
271*e94c20c3SJiawen Wu static inline int
ngbe_txq_vec_setup_default(struct ngbe_tx_queue * txq,const struct ngbe_txq_ops * txq_ops)272*e94c20c3SJiawen Wu ngbe_txq_vec_setup_default(struct ngbe_tx_queue *txq,
273*e94c20c3SJiawen Wu 			    const struct ngbe_txq_ops *txq_ops)
274*e94c20c3SJiawen Wu {
275*e94c20c3SJiawen Wu 	if (txq->sw_ring_v == NULL)
276*e94c20c3SJiawen Wu 		return -1;
277*e94c20c3SJiawen Wu 
278*e94c20c3SJiawen Wu 	/* leave the first one for overflow */
279*e94c20c3SJiawen Wu 	txq->sw_ring_v = txq->sw_ring_v + 1;
280*e94c20c3SJiawen Wu 	txq->ops = txq_ops;
281*e94c20c3SJiawen Wu 
282*e94c20c3SJiawen Wu 	return 0;
283*e94c20c3SJiawen Wu }
284*e94c20c3SJiawen Wu 
285*e94c20c3SJiawen Wu static inline int
ngbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev * dev)286*e94c20c3SJiawen Wu ngbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
287*e94c20c3SJiawen Wu {
288*e94c20c3SJiawen Wu 	RTE_SET_USED(dev);
289*e94c20c3SJiawen Wu #ifndef RTE_LIBRTE_IEEE1588
290*e94c20c3SJiawen Wu 
291*e94c20c3SJiawen Wu 	return 0;
292*e94c20c3SJiawen Wu #else
293*e94c20c3SJiawen Wu 	return -1;
294*e94c20c3SJiawen Wu #endif
295*e94c20c3SJiawen Wu }
296*e94c20c3SJiawen Wu #endif
297