xref: /dpdk/drivers/net/ngbe/ngbe_rxtx_vec_common.h (revision e94c20c34cfd7140b4199ab3c1059520c0605318)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2024 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2015 Intel Corporation
4  */
5 
6 #ifndef _NGBE_RXTX_VEC_COMMON_H_
7 #define _NGBE_RXTX_VEC_COMMON_H_
8 #include <stdint.h>
9 
10 #include "ngbe_ethdev.h"
11 #include "ngbe_rxtx.h"
12 
13 #define NGBE_RXD_PTID_SHIFT 9
14 
15 #define RTE_NGBE_RXQ_REARM_THRESH      32
16 #define RTE_NGBE_MAX_RX_BURST          RTE_NGBE_RXQ_REARM_THRESH
17 
18 static inline uint16_t
reassemble_packets(struct ngbe_rx_queue * rxq,struct rte_mbuf ** rx_bufs,uint16_t nb_bufs,uint8_t * split_flags)19 reassemble_packets(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
20 		   uint16_t nb_bufs, uint8_t *split_flags)
21 {
22 	struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
23 	struct rte_mbuf *start = rxq->pkt_first_seg;
24 	struct rte_mbuf *end =  rxq->pkt_last_seg;
25 	unsigned int pkt_idx, buf_idx;
26 
27 	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
28 		if (end != NULL) {
29 			/* processing a split packet */
30 			end->next = rx_bufs[buf_idx];
31 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
32 
33 			start->nb_segs++;
34 			start->pkt_len += rx_bufs[buf_idx]->data_len;
35 			end = end->next;
36 
37 			if (!split_flags[buf_idx]) {
38 				/* it's the last packet of the set */
39 				start->hash = end->hash;
40 				start->ol_flags = end->ol_flags;
41 				/* we need to strip crc for the whole packet */
42 				start->pkt_len -= rxq->crc_len;
43 				if (end->data_len > rxq->crc_len) {
44 					end->data_len -= rxq->crc_len;
45 				} else {
46 					/* free up last mbuf */
47 					struct rte_mbuf *secondlast = start;
48 
49 					start->nb_segs--;
50 					while (secondlast->next != end)
51 						secondlast = secondlast->next;
52 					secondlast->data_len -= (rxq->crc_len -
53 							end->data_len);
54 					secondlast->next = NULL;
55 					rte_pktmbuf_free_seg(end);
56 				}
57 				pkts[pkt_idx++] = start;
58 				start = NULL;
59 				end = NULL;
60 			}
61 		} else {
62 			/* not processing a split packet */
63 			if (!split_flags[buf_idx]) {
64 				/* not a split packet, save and skip */
65 				pkts[pkt_idx++] = rx_bufs[buf_idx];
66 				continue;
67 			}
68 			start = rx_bufs[buf_idx];
69 			end = start;
70 			rx_bufs[buf_idx]->data_len += rxq->crc_len;
71 			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
72 		}
73 	}
74 
75 	/* save the partial packet for next time */
76 	rxq->pkt_first_seg = start;
77 	rxq->pkt_last_seg = end;
78 	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
79 	return pkt_idx;
80 }
81 
82 static __rte_always_inline int
ngbe_tx_free_bufs(struct ngbe_tx_queue * txq)83 ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)
84 {
85 	struct ngbe_tx_entry_v *txep;
86 	uint32_t status;
87 	uint32_t n;
88 	uint32_t i;
89 	int nb_free = 0;
90 	struct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];
91 
92 	/* check DD bit on threshold descriptor */
93 	status = txq->tx_ring[txq->tx_next_dd].dw3;
94 	if (!(status & NGBE_TXD_DD)) {
95 		if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
96 			ngbe_set32_masked(txq->tdc_reg_addr,
97 				NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
98 		return 0;
99 	}
100 
101 	n = txq->tx_free_thresh;
102 
103 	/*
104 	 * first buffer to free from S/W ring is at index
105 	 * tx_next_dd - (tx_rs_thresh-1)
106 	 */
107 	txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
108 	m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
109 	if (likely(m != NULL)) {
110 		free[0] = m;
111 		nb_free = 1;
112 		for (i = 1; i < n; i++) {
113 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
114 			if (likely(m != NULL)) {
115 				if (likely(m->pool == free[0]->pool)) {
116 					free[nb_free++] = m;
117 				} else {
118 					rte_mempool_put_bulk(free[0]->pool,
119 							(void *)free, nb_free);
120 					free[0] = m;
121 					nb_free = 1;
122 				}
123 			}
124 		}
125 		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
126 	} else {
127 		for (i = 1; i < n; i++) {
128 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
129 			if (m != NULL)
130 				rte_mempool_put(m->pool, m);
131 		}
132 	}
133 
134 	/* buffers were freed, update counters */
135 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
136 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
137 	if (txq->tx_next_dd >= txq->nb_tx_desc)
138 		txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
139 
140 	return txq->tx_free_thresh;
141 }
142 
143 static __rte_always_inline void
tx_backlog_entry(struct ngbe_tx_entry_v * txep,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)144 tx_backlog_entry(struct ngbe_tx_entry_v *txep,
145 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
146 {
147 	int i;
148 
149 	for (i = 0; i < (int)nb_pkts; ++i)
150 		txep[i].mbuf = tx_pkts[i];
151 }
152 
153 static inline void
_ngbe_tx_queue_release_mbufs_vec(struct ngbe_tx_queue * txq)154 _ngbe_tx_queue_release_mbufs_vec(struct ngbe_tx_queue *txq)
155 {
156 	unsigned int i;
157 	struct ngbe_tx_entry_v *txe;
158 	const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
159 
160 	if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
161 		return;
162 
163 	/* release the used mbufs in sw_ring */
164 	for (i = txq->tx_next_dd - (txq->tx_free_thresh - 1);
165 	     i != txq->tx_tail;
166 	     i = (i + 1) % txq->nb_tx_desc) {
167 		txe = &txq->sw_ring_v[i];
168 		rte_pktmbuf_free_seg(txe->mbuf);
169 	}
170 	txq->nb_tx_free = max_desc;
171 
172 	/* reset tx_entry */
173 	for (i = 0; i < txq->nb_tx_desc; i++) {
174 		txe = &txq->sw_ring_v[i];
175 		txe->mbuf = NULL;
176 	}
177 }
178 
179 static inline void
_ngbe_rx_queue_release_mbufs_vec(struct ngbe_rx_queue * rxq)180 _ngbe_rx_queue_release_mbufs_vec(struct ngbe_rx_queue *rxq)
181 {
182 	unsigned int i;
183 
184 	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
185 		return;
186 
187 	/* free all mbufs that are valid in the ring */
188 	if (rxq->rxrearm_nb == 0) {
189 		for (i = 0; i < rxq->nb_rx_desc; i++) {
190 			if (rxq->sw_ring[i].mbuf != NULL)
191 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
192 		}
193 	} else {
194 		for (i = rxq->rx_tail;
195 		     i != rxq->rxrearm_start;
196 		     i = (i + 1) % rxq->nb_rx_desc) {
197 			if (rxq->sw_ring[i].mbuf != NULL)
198 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
199 		}
200 	}
201 
202 	rxq->rxrearm_nb = rxq->nb_rx_desc;
203 
204 	/* set all entries to NULL */
205 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
206 }
207 
208 static inline void
_ngbe_tx_free_swring_vec(struct ngbe_tx_queue * txq)209 _ngbe_tx_free_swring_vec(struct ngbe_tx_queue *txq)
210 {
211 	if (txq == NULL)
212 		return;
213 
214 	if (txq->sw_ring != NULL) {
215 		rte_free(txq->sw_ring_v - 1);
216 		txq->sw_ring_v = NULL;
217 	}
218 }
219 
220 static inline void
_ngbe_reset_tx_queue_vec(struct ngbe_tx_queue * txq)221 _ngbe_reset_tx_queue_vec(struct ngbe_tx_queue *txq)
222 {
223 	static const struct ngbe_tx_desc zeroed_desc = {0};
224 	struct ngbe_tx_entry_v *txe = txq->sw_ring_v;
225 	uint16_t i;
226 
227 	/* Zero out HW ring memory */
228 	for (i = 0; i < txq->nb_tx_desc; i++)
229 		txq->tx_ring[i] = zeroed_desc;
230 
231 	/* Initialize SW ring entries */
232 	for (i = 0; i < txq->nb_tx_desc; i++) {
233 		volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
234 
235 		txd->dw3 = NGBE_TXD_DD;
236 		txe[i].mbuf = NULL;
237 	}
238 
239 	txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
240 
241 	txq->tx_tail = 0;
242 	/*
243 	 * Always allow 1 descriptor to be un-allocated to avoid
244 	 * a H/W race condition
245 	 */
246 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
247 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
248 	txq->ctx_curr = 0;
249 	memset((void *)&txq->ctx_cache, 0,
250 		NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
251 }
252 
253 static inline int
ngbe_rxq_vec_setup_default(struct ngbe_rx_queue * rxq)254 ngbe_rxq_vec_setup_default(struct ngbe_rx_queue *rxq)
255 {
256 	uintptr_t p;
257 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
258 
259 	mb_def.nb_segs = 1;
260 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
261 	mb_def.port = rxq->port_id;
262 	rte_mbuf_refcnt_set(&mb_def, 1);
263 
264 	/* prevent compiler reordering: rearm_data covers previous fields */
265 	rte_compiler_barrier();
266 	p = (uintptr_t)&mb_def.rearm_data;
267 	rxq->mbuf_initializer = *(uint64_t *)p;
268 	return 0;
269 }
270 
271 static inline int
ngbe_txq_vec_setup_default(struct ngbe_tx_queue * txq,const struct ngbe_txq_ops * txq_ops)272 ngbe_txq_vec_setup_default(struct ngbe_tx_queue *txq,
273 			    const struct ngbe_txq_ops *txq_ops)
274 {
275 	if (txq->sw_ring_v == NULL)
276 		return -1;
277 
278 	/* leave the first one for overflow */
279 	txq->sw_ring_v = txq->sw_ring_v + 1;
280 	txq->ops = txq_ops;
281 
282 	return 0;
283 }
284 
285 static inline int
ngbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev * dev)286 ngbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
287 {
288 	RTE_SET_USED(dev);
289 #ifndef RTE_LIBRTE_IEEE1588
290 
291 	return 0;
292 #else
293 	return -1;
294 #endif
295 }
296 #endif
297