xref: /dpdk/drivers/net/ngbe/ngbe_rxtx.c (revision 43b7e5ea60ac2ecb96be0a31dec717b857a64f27)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <sys/queue.h>
7 
8 #include <stdint.h>
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
12 
13 #include "ngbe_logs.h"
14 #include "base/ngbe.h"
15 #include "ngbe_ethdev.h"
16 #include "ngbe_rxtx.h"
17 
18 /**
19  * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
20  *
21  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
22  * in the sw_sc_ring is not set to NULL but rather points to the next
23  * mbuf of this RSC aggregation (that has not been completed yet and still
24  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
25  * will just free first "nb_segs" segments of the cluster explicitly by calling
26  * an rte_pktmbuf_free_seg().
27  *
28  * @m scattered cluster head
29  */
30 static void
31 ngbe_free_sc_cluster(struct rte_mbuf *m)
32 {
33 	uint16_t i, nb_segs = m->nb_segs;
34 	struct rte_mbuf *next_seg;
35 
36 	for (i = 0; i < nb_segs; i++) {
37 		next_seg = m->next;
38 		rte_pktmbuf_free_seg(m);
39 		m = next_seg;
40 	}
41 }
42 
43 static void
44 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
45 {
46 	unsigned int i;
47 
48 	if (rxq->sw_ring != NULL) {
49 		for (i = 0; i < rxq->nb_rx_desc; i++) {
50 			if (rxq->sw_ring[i].mbuf != NULL) {
51 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
52 				rxq->sw_ring[i].mbuf = NULL;
53 			}
54 		}
55 		for (i = 0; i < rxq->rx_nb_avail; ++i) {
56 			struct rte_mbuf *mb;
57 
58 			mb = rxq->rx_stage[rxq->rx_next_avail + i];
59 			rte_pktmbuf_free_seg(mb);
60 		}
61 		rxq->rx_nb_avail = 0;
62 	}
63 
64 	if (rxq->sw_sc_ring != NULL)
65 		for (i = 0; i < rxq->nb_rx_desc; i++)
66 			if (rxq->sw_sc_ring[i].fbuf != NULL) {
67 				ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
68 				rxq->sw_sc_ring[i].fbuf = NULL;
69 			}
70 }
71 
72 static void
73 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
74 {
75 	if (rxq != NULL) {
76 		ngbe_rx_queue_release_mbufs(rxq);
77 		rte_free(rxq->sw_ring);
78 		rte_free(rxq->sw_sc_ring);
79 		rte_free(rxq);
80 	}
81 }
82 
83 void
84 ngbe_dev_rx_queue_release(void *rxq)
85 {
86 	ngbe_rx_queue_release(rxq);
87 }
88 
89 /*
90  * Check if Rx Burst Bulk Alloc function can be used.
91  * Return
92  *        0: the preconditions are satisfied and the bulk allocation function
93  *           can be used.
94  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
95  *           function must be used.
96  */
97 static inline int
98 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
99 {
100 	int ret = 0;
101 
102 	/*
103 	 * Make sure the following pre-conditions are satisfied:
104 	 *   rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
105 	 *   rxq->rx_free_thresh < rxq->nb_rx_desc
106 	 *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
107 	 * Scattered packets are not supported.  This should be checked
108 	 * outside of this function.
109 	 */
110 	if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
111 		PMD_INIT_LOG(DEBUG,
112 			     "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
113 			     rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
114 		ret = -EINVAL;
115 	} else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
116 		PMD_INIT_LOG(DEBUG,
117 			     "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
118 			     rxq->rx_free_thresh, rxq->nb_rx_desc);
119 		ret = -EINVAL;
120 	} else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
121 		PMD_INIT_LOG(DEBUG,
122 			     "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
123 			     rxq->nb_rx_desc, rxq->rx_free_thresh);
124 		ret = -EINVAL;
125 	}
126 
127 	return ret;
128 }
129 
130 /* Reset dynamic ngbe_rx_queue fields back to defaults */
131 static void
132 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
133 {
134 	static const struct ngbe_rx_desc zeroed_desc = {
135 						{{0}, {0} }, {{0}, {0} } };
136 	unsigned int i;
137 	uint16_t len = rxq->nb_rx_desc;
138 
139 	/*
140 	 * By default, the Rx queue setup function allocates enough memory for
141 	 * NGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
142 	 * extra memory at the end of the descriptor ring to be zero'd out.
143 	 */
144 	if (adapter->rx_bulk_alloc_allowed)
145 		/* zero out extra memory */
146 		len += RTE_PMD_NGBE_RX_MAX_BURST;
147 
148 	/*
149 	 * Zero out HW ring memory. Zero out extra memory at the end of
150 	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
151 	 * reads extra memory as zeros.
152 	 */
153 	for (i = 0; i < len; i++)
154 		rxq->rx_ring[i] = zeroed_desc;
155 
156 	/*
157 	 * initialize extra software ring entries. Space for these extra
158 	 * entries is always allocated
159 	 */
160 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
161 	for (i = rxq->nb_rx_desc; i < len; ++i)
162 		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
163 
164 	rxq->rx_nb_avail = 0;
165 	rxq->rx_next_avail = 0;
166 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
167 	rxq->rx_tail = 0;
168 	rxq->nb_rx_hold = 0;
169 	rxq->pkt_first_seg = NULL;
170 	rxq->pkt_last_seg = NULL;
171 }
172 
173 int
174 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
175 			 uint16_t queue_idx,
176 			 uint16_t nb_desc,
177 			 unsigned int socket_id,
178 			 const struct rte_eth_rxconf *rx_conf,
179 			 struct rte_mempool *mp)
180 {
181 	const struct rte_memzone *rz;
182 	struct ngbe_rx_queue *rxq;
183 	struct ngbe_hw     *hw;
184 	uint16_t len;
185 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
186 
187 	PMD_INIT_FUNC_TRACE();
188 	hw = ngbe_dev_hw(dev);
189 
190 	/* Free memory prior to re-allocation if needed... */
191 	if (dev->data->rx_queues[queue_idx] != NULL) {
192 		ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
193 		dev->data->rx_queues[queue_idx] = NULL;
194 	}
195 
196 	/* First allocate the Rx queue data structure */
197 	rxq = rte_zmalloc_socket("ethdev RX queue",
198 				 sizeof(struct ngbe_rx_queue),
199 				 RTE_CACHE_LINE_SIZE, socket_id);
200 	if (rxq == NULL)
201 		return -ENOMEM;
202 	rxq->mb_pool = mp;
203 	rxq->nb_rx_desc = nb_desc;
204 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
205 	rxq->queue_id = queue_idx;
206 	rxq->reg_idx = queue_idx;
207 	rxq->port_id = dev->data->port_id;
208 	rxq->drop_en = rx_conf->rx_drop_en;
209 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
210 
211 	/*
212 	 * Allocate Rx ring hardware descriptors. A memzone large enough to
213 	 * handle the maximum ring size is allocated in order to allow for
214 	 * resizing in later calls to the queue setup function.
215 	 */
216 	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
217 				      RX_RING_SZ, NGBE_ALIGN, socket_id);
218 	if (rz == NULL) {
219 		ngbe_rx_queue_release(rxq);
220 		return -ENOMEM;
221 	}
222 
223 	/*
224 	 * Zero init all the descriptors in the ring.
225 	 */
226 	memset(rz->addr, 0, RX_RING_SZ);
227 
228 	rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
229 	rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
230 
231 	rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
232 	rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
233 
234 	/*
235 	 * Certain constraints must be met in order to use the bulk buffer
236 	 * allocation Rx burst function. If any of Rx queues doesn't meet them
237 	 * the feature should be disabled for the whole port.
238 	 */
239 	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
240 		PMD_INIT_LOG(DEBUG,
241 			     "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
242 			     rxq->queue_id, rxq->port_id);
243 		adapter->rx_bulk_alloc_allowed = false;
244 	}
245 
246 	/*
247 	 * Allocate software ring. Allow for space at the end of the
248 	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
249 	 * function does not access an invalid memory region.
250 	 */
251 	len = nb_desc;
252 	if (adapter->rx_bulk_alloc_allowed)
253 		len += RTE_PMD_NGBE_RX_MAX_BURST;
254 
255 	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
256 					  sizeof(struct ngbe_rx_entry) * len,
257 					  RTE_CACHE_LINE_SIZE, socket_id);
258 	if (rxq->sw_ring == NULL) {
259 		ngbe_rx_queue_release(rxq);
260 		return -ENOMEM;
261 	}
262 
263 	/*
264 	 * Always allocate even if it's not going to be needed in order to
265 	 * simplify the code.
266 	 *
267 	 * This ring is used in Scattered Rx cases and Scattered Rx may
268 	 * be requested in ngbe_dev_rx_init(), which is called later from
269 	 * dev_start() flow.
270 	 */
271 	rxq->sw_sc_ring =
272 		rte_zmalloc_socket("rxq->sw_sc_ring",
273 				  sizeof(struct ngbe_scattered_rx_entry) * len,
274 				  RTE_CACHE_LINE_SIZE, socket_id);
275 	if (rxq->sw_sc_ring == NULL) {
276 		ngbe_rx_queue_release(rxq);
277 		return -ENOMEM;
278 	}
279 
280 	PMD_INIT_LOG(DEBUG,
281 		     "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
282 		     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
283 		     rxq->rx_ring_phys_addr);
284 
285 	dev->data->rx_queues[queue_idx] = rxq;
286 
287 	ngbe_reset_rx_queue(adapter, rxq);
288 
289 	return 0;
290 }
291 
292