xref: /dpdk/drivers/net/axgbe/axgbe_rxtx.c (revision 515cd4a488b6a0c6e40d20e6b10d8e89657dc23f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_ethdev.h"
7 #include "axgbe_rxtx.h"
8 #include "axgbe_phy.h"
9 
10 #include <rte_time.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_vect.h>
14 
15 static void
16 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
17 {
18 	uint16_t i;
19 	struct rte_mbuf **sw_ring;
20 
21 	if (rx_queue) {
22 		sw_ring = rx_queue->sw_ring;
23 		if (sw_ring) {
24 			for (i = 0; i < rx_queue->nb_desc; i++) {
25 				rte_pktmbuf_free(sw_ring[i]);
26 			}
27 			rte_free(sw_ring);
28 		}
29 		rte_free(rx_queue);
30 	}
31 }
32 
33 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
34 {
35 	axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
36 }
37 
38 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
39 			     uint16_t nb_desc, unsigned int socket_id,
40 			     const struct rte_eth_rxconf *rx_conf,
41 			     struct rte_mempool *mp)
42 {
43 	PMD_INIT_FUNC_TRACE();
44 	uint32_t size;
45 	const struct rte_memzone *dma;
46 	struct axgbe_rx_queue *rxq;
47 	uint32_t rx_desc = nb_desc;
48 	struct axgbe_port *pdata =  dev->data->dev_private;
49 
50 	/*
51 	 * validate Rx descriptors count
52 	 * should be power of 2 and less than h/w supported
53 	 */
54 	if ((!rte_is_power_of_2(rx_desc)) ||
55 	    rx_desc > pdata->rx_desc_count)
56 		return -EINVAL;
57 	/* First allocate the rx queue data structure */
58 	rxq = rte_zmalloc_socket("ethdev RX queue",
59 				 sizeof(struct axgbe_rx_queue),
60 				 RTE_CACHE_LINE_SIZE, socket_id);
61 	if (!rxq) {
62 		PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
63 		return -ENOMEM;
64 	}
65 
66 	rxq->cur = 0;
67 	rxq->dirty = 0;
68 	rxq->pdata = pdata;
69 	rxq->mb_pool = mp;
70 	rxq->queue_id = queue_idx;
71 	rxq->port_id = dev->data->port_id;
72 	rxq->nb_desc = rx_desc;
73 	rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
74 		(DMA_CH_INC * rxq->queue_id));
75 	rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
76 						  DMA_CH_RDTR_LO);
77 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
78 		rxq->crc_len = RTE_ETHER_CRC_LEN;
79 	else
80 		rxq->crc_len = 0;
81 
82 	/* CRC strip in AXGBE supports per port not per queue */
83 	pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
84 	rxq->free_thresh = rx_conf->rx_free_thresh ?
85 		rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
86 	if (rxq->free_thresh >  rxq->nb_desc)
87 		rxq->free_thresh = rxq->nb_desc >> 3;
88 
89 	/* Allocate RX ring hardware descriptors */
90 	size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
91 	dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
92 				       socket_id);
93 	if (!dma) {
94 		PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
95 		axgbe_rx_queue_release(rxq);
96 		return -ENOMEM;
97 	}
98 	rxq->ring_phys_addr = (uint64_t)dma->iova;
99 	rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
100 	memset((void *)rxq->desc, 0, size);
101 	/* Allocate software ring */
102 	size = rxq->nb_desc * sizeof(struct rte_mbuf *);
103 	rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
104 					  RTE_CACHE_LINE_SIZE,
105 					  socket_id);
106 	if (!rxq->sw_ring) {
107 		PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
108 		axgbe_rx_queue_release(rxq);
109 		return -ENOMEM;
110 	}
111 	dev->data->rx_queues[queue_idx] = rxq;
112 	if (!pdata->rx_queues)
113 		pdata->rx_queues = dev->data->rx_queues;
114 
115 	return 0;
116 }
117 
118 static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
119 				  unsigned int queue)
120 {
121 	unsigned int rx_status;
122 	unsigned long rx_timeout;
123 
124 	/* The Rx engine cannot be stopped if it is actively processing
125 	 * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
126 	 * wait forever though...
127 	 */
128 	rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
129 					       rte_get_timer_hz());
130 
131 	while (time_before(rte_get_timer_cycles(), rx_timeout)) {
132 		rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
133 		if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
134 		    (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
135 			break;
136 
137 		rte_delay_us(900);
138 	}
139 
140 	if (!time_before(rte_get_timer_cycles(), rx_timeout))
141 		PMD_DRV_LOG(ERR,
142 			    "timed out waiting for Rx queue %u to empty\n",
143 			    queue);
144 }
145 
146 void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
147 {
148 	struct axgbe_rx_queue *rxq;
149 	struct axgbe_port *pdata = dev->data->dev_private;
150 	unsigned int i;
151 
152 	/* Disable MAC Rx */
153 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
154 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
155 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
156 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
157 
158 	/* Prepare for Rx DMA channel stop */
159 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
160 		rxq = dev->data->rx_queues[i];
161 		axgbe_prepare_rx_stop(pdata, i);
162 	}
163 	/* Disable each Rx queue */
164 	AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
165 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
166 		rxq = dev->data->rx_queues[i];
167 		/* Disable Rx DMA channel */
168 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
169 	}
170 }
171 
172 void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
173 {
174 	struct axgbe_rx_queue *rxq;
175 	struct axgbe_port *pdata = dev->data->dev_private;
176 	unsigned int i;
177 	unsigned int reg_val = 0;
178 
179 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
180 		rxq = dev->data->rx_queues[i];
181 		/* Enable Rx DMA channel */
182 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
183 	}
184 
185 	reg_val = 0;
186 	for (i = 0; i < pdata->rx_q_count; i++)
187 		reg_val |= (0x02 << (i << 1));
188 	AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
189 
190 	/* Enable MAC Rx */
191 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
192 	/* Frame is forwarded after stripping CRC to application*/
193 	if (pdata->crc_strip_enable) {
194 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
195 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
196 	}
197 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
198 }
199 
200 /* Rx function one to one refresh */
201 uint16_t
202 axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
203 		uint16_t nb_pkts)
204 {
205 	PMD_INIT_FUNC_TRACE();
206 	uint16_t nb_rx = 0;
207 	struct axgbe_rx_queue *rxq = rx_queue;
208 	volatile union axgbe_rx_desc *desc;
209 	uint64_t old_dirty = rxq->dirty;
210 	struct rte_mbuf *mbuf, *tmbuf;
211 	unsigned int err, etlt;
212 	uint32_t error_status;
213 	uint16_t idx, pidx, pkt_len;
214 	uint64_t offloads;
215 
216 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
217 	while (nb_rx < nb_pkts) {
218 		if (unlikely(idx == rxq->nb_desc))
219 			idx = 0;
220 
221 		desc = &rxq->desc[idx];
222 
223 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
224 			break;
225 		tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
226 		if (unlikely(!tmbuf)) {
227 			PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
228 				    " queue_id = %u\n",
229 				    (unsigned int)rxq->port_id,
230 				    (unsigned int)rxq->queue_id);
231 			rte_eth_devices[
232 				rxq->port_id].data->rx_mbuf_alloc_failed++;
233 			rxq->rx_mbuf_alloc_failed++;
234 			break;
235 		}
236 		pidx = idx + 1;
237 		if (unlikely(pidx == rxq->nb_desc))
238 			pidx = 0;
239 
240 		rte_prefetch0(rxq->sw_ring[pidx]);
241 		if ((pidx & 0x3) == 0) {
242 			rte_prefetch0(&rxq->desc[pidx]);
243 			rte_prefetch0(&rxq->sw_ring[pidx]);
244 		}
245 
246 		mbuf = rxq->sw_ring[idx];
247 		/* Check for any errors and free mbuf*/
248 		err = AXGMAC_GET_BITS_LE(desc->write.desc3,
249 					 RX_NORMAL_DESC3, ES);
250 		error_status = 0;
251 		if (unlikely(err)) {
252 			error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
253 			if ((error_status != AXGBE_L3_CSUM_ERR) &&
254 			    (error_status != AXGBE_L4_CSUM_ERR)) {
255 				rxq->errors++;
256 				rte_pktmbuf_free(mbuf);
257 				goto err_set;
258 			}
259 		}
260 		if (rxq->pdata->rx_csum_enable) {
261 			mbuf->ol_flags = 0;
262 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
263 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
264 			if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
265 				mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
266 				mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
267 				mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
268 				mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
269 			} else if (
270 				unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
271 				mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
272 				mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
273 			}
274 		}
275 		rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
276 		/* Get the RSS hash */
277 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
278 			mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
279 		etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
280 				RX_NORMAL_DESC3, ETLT);
281 		offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
282 		if (!err || !etlt) {
283 			if (etlt == RX_CVLAN_TAG_PRESENT) {
284 				mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
285 				mbuf->vlan_tci =
286 					AXGMAC_GET_BITS_LE(desc->write.desc0,
287 							RX_NORMAL_DESC0, OVT);
288 				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
289 					mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
290 				else
291 					mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
292 			} else {
293 				mbuf->ol_flags &=
294 					~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
295 				mbuf->vlan_tci = 0;
296 			}
297 		}
298 		/* Indicate if a Context Descriptor is next */
299 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA))
300 			mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP
301 					| RTE_MBUF_F_RX_IEEE1588_TMST;
302 		pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
303 					     PL) - rxq->crc_len;
304 		/* Mbuf populate */
305 		mbuf->next = NULL;
306 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
307 		mbuf->nb_segs = 1;
308 		mbuf->port = rxq->port_id;
309 		mbuf->pkt_len = pkt_len;
310 		mbuf->data_len = pkt_len;
311 		rxq->bytes += pkt_len;
312 		rx_pkts[nb_rx++] = mbuf;
313 err_set:
314 		rxq->cur++;
315 		rxq->sw_ring[idx++] = tmbuf;
316 		desc->read.baddr =
317 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
318 		memset((void *)(&desc->read.desc2), 0, 8);
319 		AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
320 		rxq->dirty++;
321 	}
322 	rxq->pkts += nb_rx;
323 	if (rxq->dirty != old_dirty) {
324 		rte_wmb();
325 		idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
326 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
327 				   low32_value(rxq->ring_phys_addr +
328 				   (idx * sizeof(union axgbe_rx_desc))));
329 	}
330 
331 	return nb_rx;
332 }
333 
334 
335 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
336 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
337 {
338 	PMD_INIT_FUNC_TRACE();
339 	uint16_t nb_rx = 0;
340 	struct axgbe_rx_queue *rxq = rx_queue;
341 	volatile union axgbe_rx_desc *desc;
342 
343 	struct rte_mbuf *first_seg = NULL;
344 	struct rte_mbuf *mbuf, *tmbuf;
345 	unsigned int err = 0, etlt;
346 	uint32_t error_status = 0;
347 	uint16_t idx, pidx, data_len = 0, pkt_len = 0;
348 	uint64_t offloads;
349 	bool eop = 0;
350 
351 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
352 
353 	while (nb_rx < nb_pkts) {
354 next_desc:
355 		idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
356 
357 		desc = &rxq->desc[idx];
358 
359 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
360 			break;
361 
362 		tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
363 		if (unlikely(!tmbuf)) {
364 			PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
365 				    " queue_id = %u\n",
366 				    (unsigned int)rxq->port_id,
367 				    (unsigned int)rxq->queue_id);
368 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
369 			break;
370 		}
371 
372 		pidx = idx + 1;
373 		if (unlikely(pidx == rxq->nb_desc))
374 			pidx = 0;
375 
376 		rte_prefetch0(rxq->sw_ring[pidx]);
377 		if ((pidx & 0x3) == 0) {
378 			rte_prefetch0(&rxq->desc[pidx]);
379 			rte_prefetch0(&rxq->sw_ring[pidx]);
380 		}
381 
382 		mbuf = rxq->sw_ring[idx];
383 		rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
384 
385 		if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
386 					RX_NORMAL_DESC3, LD)) {
387 			eop = 0;
388 			pkt_len = rxq->buf_size;
389 			data_len = pkt_len;
390 		} else {
391 			eop = 1;
392 			pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
393 					RX_NORMAL_DESC3, PL) - rxq->crc_len;
394 			data_len = pkt_len % rxq->buf_size;
395 			/* Check for any errors and free mbuf*/
396 			err = AXGMAC_GET_BITS_LE(desc->write.desc3,
397 					RX_NORMAL_DESC3, ES);
398 			error_status = 0;
399 			if (unlikely(err)) {
400 				error_status = desc->write.desc3 &
401 					AXGBE_ERR_STATUS;
402 				if (error_status != AXGBE_L3_CSUM_ERR &&
403 						error_status != AXGBE_L4_CSUM_ERR) {
404 					rxq->errors++;
405 					rte_pktmbuf_free(mbuf);
406 					rte_pktmbuf_free(first_seg);
407 					first_seg = NULL;
408 					eop = 0;
409 					goto err_set;
410 				}
411 			}
412 
413 		}
414 		/* Mbuf populate */
415 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
416 		mbuf->data_len = data_len;
417 		mbuf->pkt_len = data_len;
418 
419 		if (rxq->saved_mbuf) {
420 			first_seg = rxq->saved_mbuf;
421 			rxq->saved_mbuf = NULL;
422 		}
423 
424 		if (first_seg != NULL) {
425 			if (rte_pktmbuf_chain(first_seg, mbuf) != 0) {
426 				rte_pktmbuf_free(first_seg);
427 				first_seg = NULL;
428 				rte_pktmbuf_free(mbuf);
429 				rxq->saved_mbuf = NULL;
430 				rxq->errors++;
431 				eop = 0;
432 				break;
433 			}
434 		} else {
435 			first_seg = mbuf;
436 		}
437 
438 		/* Get the RSS hash */
439 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
440 			first_seg->hash.rss =
441 				rte_le_to_cpu_32(desc->write.desc1);
442 		etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
443 				RX_NORMAL_DESC3, ETLT);
444 		offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
445 		if (!err || !etlt) {
446 			if (etlt == RX_CVLAN_TAG_PRESENT) {
447 				first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN;
448 				first_seg->vlan_tci =
449 					AXGMAC_GET_BITS_LE(desc->write.desc0,
450 							RX_NORMAL_DESC0, OVT);
451 				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
452 					first_seg->ol_flags |=
453 						RTE_MBUF_F_RX_VLAN_STRIPPED;
454 				else
455 					first_seg->ol_flags &=
456 						~RTE_MBUF_F_RX_VLAN_STRIPPED;
457 			} else {
458 				first_seg->ol_flags &=
459 					~(RTE_MBUF_F_RX_VLAN |
460 							RTE_MBUF_F_RX_VLAN_STRIPPED);
461 				first_seg->vlan_tci = 0;
462 			}
463 		}
464 
465 err_set:
466 		rxq->cur++;
467 		rxq->sw_ring[idx] = tmbuf;
468 		desc->read.baddr =
469 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
470 		memset((void *)(&desc->read.desc2), 0, 8);
471 		AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
472 
473 		if (!eop)
474 			goto next_desc;
475 		eop = 0;
476 
477 		rxq->bytes += pkt_len;
478 
479 		first_seg->port = rxq->port_id;
480 		if (rxq->pdata->rx_csum_enable) {
481 			first_seg->ol_flags = 0;
482 			first_seg->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
483 			first_seg->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
484 			if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
485 				first_seg->ol_flags &=
486 					~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
487 				first_seg->ol_flags |=
488 					RTE_MBUF_F_RX_IP_CKSUM_BAD;
489 				first_seg->ol_flags &=
490 					~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
491 				first_seg->ol_flags |=
492 					RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
493 			} else if (unlikely(error_status
494 						== AXGBE_L4_CSUM_ERR)) {
495 				first_seg->ol_flags &=
496 					~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
497 				first_seg->ol_flags |=
498 					RTE_MBUF_F_RX_L4_CKSUM_BAD;
499 			}
500 		}
501 
502 		rx_pkts[nb_rx++] = first_seg;
503 
504 		 /* Setup receipt context for a new packet.*/
505 		first_seg = NULL;
506 	}
507 
508 	/* Check if we need to save state before leaving */
509 	if (first_seg != NULL && eop == 0)
510 		rxq->saved_mbuf = first_seg;
511 
512 	/* Save receive context.*/
513 	rxq->pkts += nb_rx;
514 
515 	if (rxq->dirty != rxq->cur) {
516 		rte_wmb();
517 		idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur - 1);
518 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
519 				   low32_value(rxq->ring_phys_addr +
520 				   (idx * sizeof(union axgbe_rx_desc))));
521 		rxq->dirty = rxq->cur;
522 	}
523 	return nb_rx;
524 }
525 
526 /* Tx Apis */
527 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
528 {
529 	uint16_t i;
530 	struct rte_mbuf **sw_ring;
531 
532 	if (tx_queue) {
533 		sw_ring = tx_queue->sw_ring;
534 		if (sw_ring) {
535 			for (i = 0; i < tx_queue->nb_desc; i++) {
536 				rte_pktmbuf_free(sw_ring[i]);
537 			}
538 			rte_free(sw_ring);
539 		}
540 		rte_free(tx_queue);
541 	}
542 }
543 
544 void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
545 {
546 	axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
547 }
548 
549 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
550 			     uint16_t nb_desc, unsigned int socket_id,
551 			     const struct rte_eth_txconf *tx_conf)
552 {
553 	PMD_INIT_FUNC_TRACE();
554 	uint32_t tx_desc;
555 	struct axgbe_port *pdata;
556 	struct axgbe_tx_queue *txq;
557 	unsigned int tsize;
558 	const struct rte_memzone *tz;
559 	uint64_t offloads;
560 
561 	tx_desc = nb_desc;
562 	pdata = dev->data->dev_private;
563 
564 	/*
565 	 * validate tx descriptors count
566 	 * should be power of 2 and less than h/w supported
567 	 */
568 	if ((!rte_is_power_of_2(tx_desc)) ||
569 	    tx_desc > pdata->tx_desc_count ||
570 	    tx_desc < AXGBE_MIN_RING_DESC)
571 		return -EINVAL;
572 
573 	/* First allocate the tx queue data structure */
574 	txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
575 			  RTE_CACHE_LINE_SIZE);
576 	if (!txq)
577 		return -ENOMEM;
578 	txq->pdata = pdata;
579 	offloads = tx_conf->offloads |
580 		txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
581 	txq->nb_desc = tx_desc;
582 	txq->free_thresh = tx_conf->tx_free_thresh ?
583 		tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
584 	if (txq->free_thresh > txq->nb_desc)
585 		txq->free_thresh = (txq->nb_desc >> 1);
586 	txq->free_batch_cnt = txq->free_thresh;
587 
588 	/* In vector_tx path threshold should be multiple of queue_size*/
589 	if (txq->nb_desc % txq->free_thresh != 0)
590 		txq->vector_disable = 1;
591 
592 	if (offloads != 0)
593 		txq->vector_disable = 1;
594 
595 	/* Allocate TX ring hardware descriptors */
596 	tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
597 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
598 				      tsize, AXGBE_DESC_ALIGN, socket_id);
599 	if (!tz) {
600 		axgbe_tx_queue_release(txq);
601 		return -ENOMEM;
602 	}
603 	memset(tz->addr, 0, tsize);
604 	txq->ring_phys_addr = (uint64_t)tz->iova;
605 	txq->desc = tz->addr;
606 	txq->queue_id = queue_idx;
607 	txq->port_id = dev->data->port_id;
608 	txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
609 		(DMA_CH_INC * txq->queue_id));
610 	txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
611 						  DMA_CH_TDTR_LO);
612 	txq->cur = 0;
613 	txq->dirty = 0;
614 	txq->nb_desc_free = txq->nb_desc;
615 	/* Allocate software ring */
616 	tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
617 	txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
618 				   RTE_CACHE_LINE_SIZE);
619 	if (!txq->sw_ring) {
620 		axgbe_tx_queue_release(txq);
621 		return -ENOMEM;
622 	}
623 	dev->data->tx_queues[queue_idx] = txq;
624 	if (!pdata->tx_queues)
625 		pdata->tx_queues = dev->data->tx_queues;
626 
627 	if (txq->vector_disable ||
628 			rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
629 		dev->tx_pkt_burst = &axgbe_xmit_pkts;
630 	else
631 #ifdef RTE_ARCH_X86
632 		dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
633 #else
634 		dev->tx_pkt_burst = &axgbe_xmit_pkts;
635 #endif
636 
637 	return 0;
638 }
639 
640 int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
641 		char *fw_version, size_t fw_size)
642 {
643 	struct axgbe_port *pdata;
644 	struct axgbe_hw_features *hw_feat;
645 	int ret;
646 
647 	pdata = (struct axgbe_port *)eth_dev->data->dev_private;
648 	hw_feat = &pdata->hw_feat;
649 
650 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
651 			AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
652 			AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
653 			AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
654 	if (ret < 0)
655 		return -EINVAL;
656 
657 	ret += 1; /* add the size of '\0' */
658 	if (fw_size < (size_t)ret)
659 		return ret;
660 	else
661 		return 0;
662 }
663 
664 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
665 				      unsigned int queue)
666 {
667 	unsigned int tx_status;
668 	unsigned long tx_timeout;
669 
670 	/* The Tx engine cannot be stopped if it is actively processing
671 	 * packets. Wait for the Tx queue to empty the Tx fifo.  Don't
672 	 * wait forever though...
673 	 */
674 	tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
675 					       rte_get_timer_hz());
676 	while (time_before(rte_get_timer_cycles(), tx_timeout)) {
677 		tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
678 		if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
679 		    (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
680 			break;
681 
682 		rte_delay_us(900);
683 	}
684 
685 	if (!time_before(rte_get_timer_cycles(), tx_timeout))
686 		PMD_DRV_LOG(ERR,
687 			    "timed out waiting for Tx queue %u to empty\n",
688 			    queue);
689 }
690 
691 static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
692 				  unsigned int queue)
693 {
694 	unsigned int tx_dsr, tx_pos, tx_qidx;
695 	unsigned int tx_status;
696 	unsigned long tx_timeout;
697 
698 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
699 		return axgbe_txq_prepare_tx_stop(pdata, queue);
700 
701 	/* Calculate the status register to read and the position within */
702 	if (queue < DMA_DSRX_FIRST_QUEUE) {
703 		tx_dsr = DMA_DSR0;
704 		tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
705 	} else {
706 		tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
707 
708 		tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
709 		tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
710 			DMA_DSRX_TPS_START;
711 	}
712 
713 	/* The Tx engine cannot be stopped if it is actively processing
714 	 * descriptors. Wait for the Tx engine to enter the stopped or
715 	 * suspended state.  Don't wait forever though...
716 	 */
717 	tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
718 					       rte_get_timer_hz());
719 	while (time_before(rte_get_timer_cycles(), tx_timeout)) {
720 		tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
721 		tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
722 		if ((tx_status == DMA_TPS_STOPPED) ||
723 		    (tx_status == DMA_TPS_SUSPENDED))
724 			break;
725 
726 		rte_delay_us(900);
727 	}
728 
729 	if (!time_before(rte_get_timer_cycles(), tx_timeout))
730 		PMD_DRV_LOG(ERR,
731 			    "timed out waiting for Tx DMA channel %u to stop\n",
732 			    queue);
733 }
734 
735 void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
736 {
737 	struct axgbe_tx_queue *txq;
738 	struct axgbe_port *pdata = dev->data->dev_private;
739 	unsigned int i;
740 
741 	/* Prepare for stopping DMA channel */
742 	for (i = 0; i < pdata->tx_q_count; i++) {
743 		txq = dev->data->tx_queues[i];
744 		axgbe_prepare_tx_stop(pdata, i);
745 	}
746 	/* Disable MAC Tx */
747 	AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
748 	/* Disable each Tx queue*/
749 	for (i = 0; i < pdata->tx_q_count; i++)
750 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
751 					0);
752 	/* Disable each  Tx DMA channel */
753 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
754 		txq = dev->data->tx_queues[i];
755 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
756 	}
757 }
758 
759 void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
760 {
761 	struct axgbe_tx_queue *txq;
762 	struct axgbe_port *pdata = dev->data->dev_private;
763 	unsigned int i;
764 
765 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
766 		txq = dev->data->tx_queues[i];
767 		/* Enable Tx DMA channel */
768 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
769 	}
770 	/* Enable Tx queue*/
771 	for (i = 0; i < pdata->tx_q_count; i++)
772 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
773 					MTL_Q_ENABLED);
774 	/* Enable MAC Tx */
775 	AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
776 }
777 
778 /* Free Tx conformed mbufs */
779 static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
780 {
781 	volatile struct axgbe_tx_desc *desc;
782 	uint16_t idx;
783 
784 	idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
785 	while (txq->cur != txq->dirty) {
786 		if (unlikely(idx == txq->nb_desc))
787 			idx = 0;
788 		desc = &txq->desc[idx];
789 		/* Check for ownership */
790 		if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
791 			return;
792 		memset((void *)&desc->desc2, 0, 8);
793 		/* Free mbuf */
794 		rte_pktmbuf_free(txq->sw_ring[idx]);
795 		txq->sw_ring[idx++] = NULL;
796 		txq->dirty++;
797 	}
798 }
799 
800 /* Tx Descriptor formation
801  * Considering each mbuf requires one desc
802  * mbuf is linear
803  */
804 static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
805 			 struct rte_mbuf *mbuf)
806 {
807 	volatile struct axgbe_tx_desc *desc;
808 	uint16_t idx;
809 	uint64_t mask;
810 
811 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
812 	desc = &txq->desc[idx];
813 
814 	/* Update buffer address  and length */
815 	desc->baddr = rte_mbuf_data_iova(mbuf);
816 	AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
817 			   mbuf->pkt_len);
818 	/* Total msg length to transmit */
819 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
820 			   mbuf->pkt_len);
821 	/* Timestamp enablement check */
822 	if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
823 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
824 	rte_wmb();
825 	/* Mark it as First and Last Descriptor */
826 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
827 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
828 	/* Mark it as a NORMAL descriptor */
829 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
830 	/* configure h/w Offload */
831 	mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
832 	if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
833 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
834 	else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
835 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
836 	rte_wmb();
837 
838 	if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
839 		/* Mark it as a CONTEXT descriptor */
840 		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
841 				  CTXT, 1);
842 		/* Set the VLAN tag */
843 		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
844 				  VT, mbuf->vlan_tci);
845 		/* Indicate this descriptor contains the VLAN tag */
846 		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
847 					  VLTV, 1);
848 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
849 				TX_NORMAL_DESC2_VLAN_INSERT);
850 	} else {
851 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
852 	}
853 	rte_wmb();
854 
855 	/* Set OWN bit */
856 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
857 	rte_wmb();
858 
859 
860 	/* Save mbuf */
861 	txq->sw_ring[idx] = mbuf;
862 	/* Update current index*/
863 	txq->cur++;
864 	/* Update stats */
865 	txq->bytes += mbuf->pkt_len;
866 
867 	return 0;
868 }
869 
870 /* Eal supported tx wrapper*/
871 uint16_t
872 axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
873 		uint16_t nb_pkts)
874 {
875 	PMD_INIT_FUNC_TRACE();
876 
877 	if (unlikely(nb_pkts == 0))
878 		return nb_pkts;
879 
880 	struct axgbe_tx_queue *txq;
881 	uint16_t nb_desc_free;
882 	uint16_t nb_pkt_sent = 0;
883 	uint16_t idx;
884 	uint32_t tail_addr;
885 	struct rte_mbuf *mbuf;
886 
887 	txq  = (struct axgbe_tx_queue *)tx_queue;
888 	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
889 
890 	if (unlikely(nb_desc_free <= txq->free_thresh)) {
891 		axgbe_xmit_cleanup(txq);
892 		nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
893 		if (unlikely(nb_desc_free == 0))
894 			return 0;
895 	}
896 	nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
897 	while (nb_pkts--) {
898 		mbuf = *tx_pkts++;
899 		if (axgbe_xmit_hw(txq, mbuf))
900 			goto out;
901 		nb_pkt_sent++;
902 	}
903 out:
904 	/* Sync read and write */
905 	rte_mb();
906 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
907 	tail_addr = low32_value(txq->ring_phys_addr +
908 				idx * sizeof(struct axgbe_tx_desc));
909 	/* Update tail reg with next immediate address to kick Tx DMA channel*/
910 	AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
911 	txq->pkts += nb_pkt_sent;
912 	return nb_pkt_sent;
913 }
914 
915 void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
916 {
917 	PMD_INIT_FUNC_TRACE();
918 	uint8_t i;
919 	struct axgbe_rx_queue *rxq;
920 	struct axgbe_tx_queue *txq;
921 
922 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
923 		rxq = dev->data->rx_queues[i];
924 
925 		if (rxq) {
926 			axgbe_rx_queue_release(rxq);
927 			dev->data->rx_queues[i] = NULL;
928 		}
929 	}
930 
931 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
932 		txq = dev->data->tx_queues[i];
933 
934 		if (txq) {
935 			axgbe_tx_queue_release(txq);
936 			dev->data->tx_queues[i] = NULL;
937 		}
938 	}
939 }
940 
941 int
942 axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
943 {
944 	struct axgbe_rx_queue *rxq = rx_queue;
945 	volatile union axgbe_rx_desc *desc;
946 	uint16_t idx;
947 
948 
949 	if (unlikely(offset >= rxq->nb_desc))
950 		return -EINVAL;
951 
952 	if (offset >= rxq->nb_desc - rxq->dirty)
953 		return RTE_ETH_RX_DESC_UNAVAIL;
954 
955 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
956 	desc = &rxq->desc[idx + offset];
957 
958 	if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
959 		return RTE_ETH_RX_DESC_DONE;
960 
961 	return RTE_ETH_RX_DESC_AVAIL;
962 }
963 
964 int
965 axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
966 {
967 	struct axgbe_tx_queue *txq = tx_queue;
968 	volatile struct axgbe_tx_desc *desc;
969 	uint16_t idx;
970 
971 
972 	if (unlikely(offset >= txq->nb_desc))
973 		return -EINVAL;
974 
975 	if (offset >= txq->nb_desc - txq->dirty)
976 		return RTE_ETH_TX_DESC_UNAVAIL;
977 
978 	idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1);
979 	desc = &txq->desc[idx + offset];
980 
981 	if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
982 		return RTE_ETH_TX_DESC_DONE;
983 
984 	return RTE_ETH_TX_DESC_FULL;
985 }
986