xref: /dpdk/drivers/net/axgbe/axgbe_rxtx.c (revision 186f8e8c336158942d9dceae03db89266dddaa97)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_ethdev.h"
7 #include "axgbe_rxtx.h"
8 #include "axgbe_phy.h"
9 
10 #include <rte_time.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_vect.h>
14 
15 static void
16 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
17 {
18 	uint16_t i;
19 	struct rte_mbuf **sw_ring;
20 
21 	if (rx_queue) {
22 		sw_ring = rx_queue->sw_ring;
23 		if (sw_ring) {
24 			for (i = 0; i < rx_queue->nb_desc; i++) {
25 				rte_pktmbuf_free(sw_ring[i]);
26 			}
27 			rte_free(sw_ring);
28 		}
29 		rte_free(rx_queue);
30 	}
31 }
32 
33 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
34 {
35 	axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
36 }
37 
38 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
39 			     uint16_t nb_desc, unsigned int socket_id,
40 			     const struct rte_eth_rxconf *rx_conf,
41 			     struct rte_mempool *mp)
42 {
43 	PMD_INIT_FUNC_TRACE();
44 	uint32_t size;
45 	const struct rte_memzone *dma;
46 	struct axgbe_rx_queue *rxq;
47 	uint32_t rx_desc = nb_desc;
48 	struct axgbe_port *pdata =  dev->data->dev_private;
49 
50 	/*
51 	 * validate Rx descriptors count
52 	 * should be power of 2 and less than h/w supported
53 	 */
54 	if ((!rte_is_power_of_2(rx_desc)) ||
55 	    rx_desc > pdata->rx_desc_count)
56 		return -EINVAL;
57 	/* First allocate the rx queue data structure */
58 	rxq = rte_zmalloc_socket("ethdev RX queue",
59 				 sizeof(struct axgbe_rx_queue),
60 				 RTE_CACHE_LINE_SIZE, socket_id);
61 	if (!rxq) {
62 		PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
63 		return -ENOMEM;
64 	}
65 
66 	rxq->cur = 0;
67 	rxq->dirty = 0;
68 	rxq->pdata = pdata;
69 	rxq->mb_pool = mp;
70 	rxq->queue_id = queue_idx;
71 	rxq->port_id = dev->data->port_id;
72 	rxq->nb_desc = rx_desc;
73 	rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
74 		(DMA_CH_INC * rxq->queue_id));
75 	rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
76 						  DMA_CH_RDTR_LO);
77 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
78 		rxq->crc_len = RTE_ETHER_CRC_LEN;
79 	else
80 		rxq->crc_len = 0;
81 
82 	/* CRC strip in AXGBE supports per port not per queue */
83 	pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
84 	rxq->free_thresh = rx_conf->rx_free_thresh ?
85 		rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
86 	if (rxq->free_thresh >  rxq->nb_desc)
87 		rxq->free_thresh = rxq->nb_desc >> 3;
88 
89 	rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
90 	/* Allocate RX ring hardware descriptors */
91 	size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
92 	dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
93 				       socket_id);
94 	if (!dma) {
95 		PMD_DRV_LOG_LINE(ERR, "ring_dma_zone_reserve for rx_ring failed");
96 		axgbe_rx_queue_release(rxq);
97 		return -ENOMEM;
98 	}
99 	rxq->ring_phys_addr = (uint64_t)dma->iova;
100 	rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
101 	memset((void *)rxq->desc, 0, size);
102 	/* Allocate software ring */
103 	size = rxq->nb_desc * sizeof(struct rte_mbuf *);
104 	rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
105 					  RTE_CACHE_LINE_SIZE,
106 					  socket_id);
107 	if (!rxq->sw_ring) {
108 		PMD_DRV_LOG_LINE(ERR, "rte_zmalloc for sw_ring failed");
109 		axgbe_rx_queue_release(rxq);
110 		return -ENOMEM;
111 	}
112 	dev->data->rx_queues[queue_idx] = rxq;
113 	if (!pdata->rx_queues)
114 		pdata->rx_queues = dev->data->rx_queues;
115 
116 	return 0;
117 }
118 
119 static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
120 				  unsigned int queue)
121 {
122 	unsigned int rx_status;
123 	unsigned long rx_timeout;
124 
125 	/* The Rx engine cannot be stopped if it is actively processing
126 	 * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
127 	 * wait forever though...
128 	 */
129 	rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
130 					       rte_get_timer_hz());
131 
132 	while (time_before(rte_get_timer_cycles(), rx_timeout)) {
133 		rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
134 		if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
135 		    (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
136 			break;
137 
138 		rte_delay_us(900);
139 	}
140 
141 	if (!time_before(rte_get_timer_cycles(), rx_timeout))
142 		PMD_DRV_LOG_LINE(ERR,
143 			    "timed out waiting for Rx queue %u to empty",
144 			    queue);
145 }
146 
147 void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
148 {
149 	struct axgbe_rx_queue *rxq;
150 	struct axgbe_port *pdata = dev->data->dev_private;
151 	unsigned int i;
152 
153 	/* Disable MAC Rx */
154 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
155 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
156 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
157 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
158 
159 	/* Prepare for Rx DMA channel stop */
160 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
161 		rxq = dev->data->rx_queues[i];
162 		axgbe_prepare_rx_stop(pdata, i);
163 	}
164 	/* Disable each Rx queue */
165 	AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
166 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
167 		rxq = dev->data->rx_queues[i];
168 		/* Disable Rx DMA channel */
169 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
170 	}
171 }
172 
173 void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
174 {
175 	struct axgbe_rx_queue *rxq;
176 	struct axgbe_port *pdata = dev->data->dev_private;
177 	unsigned int i;
178 	unsigned int reg_val = 0;
179 
180 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
181 		rxq = dev->data->rx_queues[i];
182 		/* Enable Rx DMA channel */
183 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
184 	}
185 
186 	reg_val = 0;
187 	for (i = 0; i < pdata->rx_q_count; i++)
188 		reg_val |= (0x02 << (i << 1));
189 	AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
190 
191 	/* Enable MAC Rx */
192 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
193 	/* Frame is forwarded after stripping CRC to application*/
194 	if (pdata->crc_strip_enable) {
195 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
196 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
197 	}
198 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
199 }
200 
201 /* Rx function one to one refresh */
202 uint16_t
203 axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
204 		uint16_t nb_pkts)
205 {
206 	PMD_INIT_FUNC_TRACE();
207 	uint16_t nb_rx = 0;
208 	struct axgbe_rx_queue *rxq = rx_queue;
209 	volatile union axgbe_rx_desc *desc;
210 	uint64_t old_dirty = rxq->dirty;
211 	struct rte_mbuf *mbuf, *tmbuf;
212 	unsigned int err, etlt;
213 	uint32_t error_status;
214 	uint16_t idx, pidx, pkt_len;
215 
216 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
217 	while (nb_rx < nb_pkts) {
218 		if (unlikely(idx == rxq->nb_desc))
219 			idx = 0;
220 
221 		desc = &rxq->desc[idx];
222 
223 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
224 			break;
225 		tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
226 		if (unlikely(!tmbuf)) {
227 			PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed port_id = %u"
228 				    " queue_id = %u",
229 				    (unsigned int)rxq->port_id,
230 				    (unsigned int)rxq->queue_id);
231 			rte_eth_devices[
232 				rxq->port_id].data->rx_mbuf_alloc_failed++;
233 			rxq->rx_mbuf_alloc_failed++;
234 			break;
235 		}
236 		pidx = idx + 1;
237 		if (unlikely(pidx == rxq->nb_desc))
238 			pidx = 0;
239 
240 		rte_prefetch0(rxq->sw_ring[pidx]);
241 		if ((pidx & 0x3) == 0) {
242 			rte_prefetch0(&rxq->desc[pidx]);
243 			rte_prefetch0(&rxq->sw_ring[pidx]);
244 		}
245 
246 		mbuf = rxq->sw_ring[idx];
247 		/* Check for any errors and free mbuf*/
248 		err = AXGMAC_GET_BITS_LE(desc->write.desc3,
249 					 RX_NORMAL_DESC3, ES);
250 		error_status = 0;
251 		if (unlikely(err)) {
252 			error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
253 			if ((error_status != AXGBE_L3_CSUM_ERR) &&
254 			    (error_status != AXGBE_L4_CSUM_ERR)) {
255 				rxq->errors++;
256 				rte_pktmbuf_free(mbuf);
257 				goto err_set;
258 			}
259 		}
260 		if (rxq->pdata->rx_csum_enable) {
261 			mbuf->ol_flags = 0;
262 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
263 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
264 			if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
265 				mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
266 				mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
267 				mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
268 				mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
269 			} else if (
270 				unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
271 				mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
272 				mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
273 			}
274 		}
275 		rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
276 		/* Get the RSS hash */
277 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
278 			mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
279 		etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
280 				RX_NORMAL_DESC3, ETLT);
281 		if (!err || !etlt) {
282 			if (etlt == RX_CVLAN_TAG_PRESENT) {
283 				mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
284 				mbuf->vlan_tci =
285 					AXGMAC_GET_BITS_LE(desc->write.desc0,
286 							RX_NORMAL_DESC0, OVT);
287 				if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
288 					mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
289 				else
290 					mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
291 			} else {
292 				mbuf->ol_flags &=
293 					~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
294 				mbuf->vlan_tci = 0;
295 			}
296 		}
297 		/* Indicate if a Context Descriptor is next */
298 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA))
299 			mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP
300 					| RTE_MBUF_F_RX_IEEE1588_TMST;
301 		pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
302 					     PL) - rxq->crc_len;
303 		/* Mbuf populate */
304 		mbuf->next = NULL;
305 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
306 		mbuf->nb_segs = 1;
307 		mbuf->port = rxq->port_id;
308 		mbuf->pkt_len = pkt_len;
309 		mbuf->data_len = pkt_len;
310 		rxq->bytes += pkt_len;
311 		rx_pkts[nb_rx++] = mbuf;
312 err_set:
313 		rxq->cur++;
314 		rxq->sw_ring[idx++] = tmbuf;
315 		desc->read.baddr =
316 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
317 		memset((void *)(&desc->read.desc2), 0, 8);
318 		AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
319 		rxq->dirty++;
320 	}
321 	rxq->pkts += nb_rx;
322 	if (rxq->dirty != old_dirty) {
323 		rte_wmb();
324 		idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
325 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
326 				   low32_value(rxq->ring_phys_addr +
327 				   (idx * sizeof(union axgbe_rx_desc))));
328 	}
329 
330 	return nb_rx;
331 }
332 
333 
334 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
335 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
336 {
337 	PMD_INIT_FUNC_TRACE();
338 	uint16_t nb_rx = 0;
339 	struct axgbe_rx_queue *rxq = rx_queue;
340 	volatile union axgbe_rx_desc *desc;
341 
342 	struct rte_mbuf *first_seg = NULL;
343 	struct rte_mbuf *mbuf, *tmbuf;
344 	unsigned int err = 0, etlt;
345 	uint32_t error_status = 0;
346 	uint16_t idx, pidx, data_len = 0, pkt_len = 0;
347 	bool eop = 0;
348 
349 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
350 
351 	while (nb_rx < nb_pkts) {
352 next_desc:
353 		idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
354 
355 		desc = &rxq->desc[idx];
356 
357 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
358 			break;
359 
360 		tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
361 		if (unlikely(!tmbuf)) {
362 			PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed port_id = %u"
363 				    " queue_id = %u",
364 				    (unsigned int)rxq->port_id,
365 				    (unsigned int)rxq->queue_id);
366 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
367 			break;
368 		}
369 
370 		pidx = idx + 1;
371 		if (unlikely(pidx == rxq->nb_desc))
372 			pidx = 0;
373 
374 		rte_prefetch0(rxq->sw_ring[pidx]);
375 		if ((pidx & 0x3) == 0) {
376 			rte_prefetch0(&rxq->desc[pidx]);
377 			rte_prefetch0(&rxq->sw_ring[pidx]);
378 		}
379 
380 		mbuf = rxq->sw_ring[idx];
381 		rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
382 
383 		if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
384 					RX_NORMAL_DESC3, LD)) {
385 			eop = 0;
386 			pkt_len = rxq->buf_size;
387 			data_len = pkt_len;
388 		} else {
389 			eop = 1;
390 			pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
391 					RX_NORMAL_DESC3, PL) - rxq->crc_len;
392 			data_len = pkt_len % rxq->buf_size;
393 			/* Check for any errors and free mbuf*/
394 			err = AXGMAC_GET_BITS_LE(desc->write.desc3,
395 					RX_NORMAL_DESC3, ES);
396 			error_status = 0;
397 			if (unlikely(err)) {
398 				error_status = desc->write.desc3 &
399 					AXGBE_ERR_STATUS;
400 				if (error_status != AXGBE_L3_CSUM_ERR &&
401 						error_status != AXGBE_L4_CSUM_ERR) {
402 					rxq->errors++;
403 					rte_pktmbuf_free(mbuf);
404 					rte_pktmbuf_free(first_seg);
405 					first_seg = NULL;
406 					eop = 0;
407 					goto err_set;
408 				}
409 			}
410 
411 		}
412 		/* Mbuf populate */
413 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
414 		mbuf->data_len = data_len;
415 		mbuf->pkt_len = data_len;
416 
417 		if (rxq->saved_mbuf) {
418 			first_seg = rxq->saved_mbuf;
419 			rxq->saved_mbuf = NULL;
420 		}
421 
422 		if (first_seg != NULL) {
423 			if (rte_pktmbuf_chain(first_seg, mbuf) != 0) {
424 				rte_pktmbuf_free(first_seg);
425 				first_seg = NULL;
426 				rte_pktmbuf_free(mbuf);
427 				rxq->saved_mbuf = NULL;
428 				rxq->errors++;
429 				eop = 0;
430 				break;
431 			}
432 		} else {
433 			first_seg = mbuf;
434 		}
435 
436 		/* Get the RSS hash */
437 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
438 			first_seg->hash.rss =
439 				rte_le_to_cpu_32(desc->write.desc1);
440 		etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
441 				RX_NORMAL_DESC3, ETLT);
442 		if (!err || !etlt) {
443 			if (etlt == RX_CVLAN_TAG_PRESENT) {
444 				first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN;
445 				first_seg->vlan_tci =
446 					AXGMAC_GET_BITS_LE(desc->write.desc0,
447 							RX_NORMAL_DESC0, OVT);
448 				if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
449 					first_seg->ol_flags |=
450 						RTE_MBUF_F_RX_VLAN_STRIPPED;
451 				else
452 					first_seg->ol_flags &=
453 						~RTE_MBUF_F_RX_VLAN_STRIPPED;
454 			} else {
455 				first_seg->ol_flags &=
456 					~(RTE_MBUF_F_RX_VLAN |
457 							RTE_MBUF_F_RX_VLAN_STRIPPED);
458 				first_seg->vlan_tci = 0;
459 			}
460 		}
461 
462 err_set:
463 		rxq->cur++;
464 		rxq->sw_ring[idx] = tmbuf;
465 		desc->read.baddr =
466 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
467 		memset((void *)(&desc->read.desc2), 0, 8);
468 		AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
469 
470 		if (!eop)
471 			goto next_desc;
472 		eop = 0;
473 
474 		rxq->bytes += pkt_len;
475 
476 		first_seg->port = rxq->port_id;
477 		if (rxq->pdata->rx_csum_enable) {
478 			first_seg->ol_flags = 0;
479 			first_seg->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
480 			first_seg->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
481 			if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
482 				first_seg->ol_flags &=
483 					~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
484 				first_seg->ol_flags |=
485 					RTE_MBUF_F_RX_IP_CKSUM_BAD;
486 				first_seg->ol_flags &=
487 					~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
488 				first_seg->ol_flags |=
489 					RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
490 			} else if (unlikely(error_status
491 						== AXGBE_L4_CSUM_ERR)) {
492 				first_seg->ol_flags &=
493 					~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
494 				first_seg->ol_flags |=
495 					RTE_MBUF_F_RX_L4_CKSUM_BAD;
496 			}
497 		}
498 
499 		rx_pkts[nb_rx++] = first_seg;
500 
501 		 /* Setup receipt context for a new packet.*/
502 		first_seg = NULL;
503 	}
504 
505 	/* Check if we need to save state before leaving */
506 	if (first_seg != NULL && eop == 0)
507 		rxq->saved_mbuf = first_seg;
508 
509 	/* Save receive context.*/
510 	rxq->pkts += nb_rx;
511 
512 	if (rxq->dirty != rxq->cur) {
513 		rte_wmb();
514 		idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur - 1);
515 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
516 				   low32_value(rxq->ring_phys_addr +
517 				   (idx * sizeof(union axgbe_rx_desc))));
518 		rxq->dirty = rxq->cur;
519 	}
520 	return nb_rx;
521 }
522 
523 /* Tx Apis */
524 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
525 {
526 	uint16_t i;
527 	struct rte_mbuf **sw_ring;
528 
529 	if (tx_queue) {
530 		sw_ring = tx_queue->sw_ring;
531 		if (sw_ring) {
532 			for (i = 0; i < tx_queue->nb_desc; i++) {
533 				rte_pktmbuf_free(sw_ring[i]);
534 			}
535 			rte_free(sw_ring);
536 		}
537 		rte_free(tx_queue);
538 	}
539 }
540 
541 void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
542 {
543 	axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
544 }
545 
546 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
547 			     uint16_t nb_desc, unsigned int socket_id,
548 			     const struct rte_eth_txconf *tx_conf)
549 {
550 	PMD_INIT_FUNC_TRACE();
551 	uint32_t tx_desc;
552 	struct axgbe_port *pdata;
553 	struct axgbe_tx_queue *txq;
554 	unsigned int tsize;
555 	const struct rte_memzone *tz;
556 	uint64_t offloads;
557 	struct rte_eth_dev_data *dev_data = dev->data;
558 
559 	tx_desc = nb_desc;
560 	pdata = dev->data->dev_private;
561 
562 	/*
563 	 * validate tx descriptors count
564 	 * should be power of 2 and less than h/w supported
565 	 */
566 	if ((!rte_is_power_of_2(tx_desc)) ||
567 	    tx_desc > pdata->tx_desc_count ||
568 	    tx_desc < AXGBE_MIN_RING_DESC)
569 		return -EINVAL;
570 
571 	/* First allocate the tx queue data structure */
572 	txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
573 			  RTE_CACHE_LINE_SIZE);
574 	if (!txq)
575 		return -ENOMEM;
576 	txq->pdata = pdata;
577 	offloads = tx_conf->offloads |
578 		dev->data->dev_conf.txmode.offloads;
579 	txq->nb_desc = tx_desc;
580 	txq->free_thresh = tx_conf->tx_free_thresh ?
581 		tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
582 	if (txq->free_thresh > txq->nb_desc)
583 		txq->free_thresh = (txq->nb_desc >> 1);
584 	txq->free_batch_cnt = txq->free_thresh;
585 
586 	/* In vector_tx path threshold should be multiple of queue_size*/
587 	if (txq->nb_desc % txq->free_thresh != 0)
588 		txq->vector_disable = 1;
589 
590 	if (offloads != 0)
591 		txq->vector_disable = 1;
592 
593 	/* Allocate TX ring hardware descriptors */
594 	tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
595 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
596 				      tsize, AXGBE_DESC_ALIGN, socket_id);
597 	if (!tz) {
598 		axgbe_tx_queue_release(txq);
599 		return -ENOMEM;
600 	}
601 	memset(tz->addr, 0, tsize);
602 	txq->ring_phys_addr = (uint64_t)tz->iova;
603 	txq->desc = tz->addr;
604 	txq->queue_id = queue_idx;
605 	txq->port_id = dev->data->port_id;
606 	txq->offloads = offloads;
607 	txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
608 		(DMA_CH_INC * txq->queue_id));
609 	txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
610 						  DMA_CH_TDTR_LO);
611 	txq->cur = 0;
612 	txq->dirty = 0;
613 	txq->nb_desc_free = txq->nb_desc;
614 	/* Allocate software ring */
615 	tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
616 	txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
617 				   RTE_CACHE_LINE_SIZE);
618 	if (!txq->sw_ring) {
619 		axgbe_tx_queue_release(txq);
620 		return -ENOMEM;
621 	}
622 	dev->data->tx_queues[queue_idx] = txq;
623 	if (!pdata->tx_queues)
624 		pdata->tx_queues = dev->data->tx_queues;
625 
626 	if ((dev_data->dev_conf.txmode.offloads &
627 				RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
628 		pdata->multi_segs_tx = true;
629 
630 	if ((dev_data->dev_conf.txmode.offloads &
631 				RTE_ETH_TX_OFFLOAD_TCP_TSO))
632 		pdata->tso_tx = true;
633 
634 	return 0;
635 }
636 
637 int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
638 		char *fw_version, size_t fw_size)
639 {
640 	struct axgbe_port *pdata;
641 	struct axgbe_hw_features *hw_feat;
642 	int ret;
643 
644 	pdata = (struct axgbe_port *)eth_dev->data->dev_private;
645 	hw_feat = &pdata->hw_feat;
646 
647 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
648 			AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
649 			AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
650 			AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
651 	if (ret < 0)
652 		return -EINVAL;
653 
654 	ret += 1; /* add the size of '\0' */
655 	if (fw_size < (size_t)ret)
656 		return ret;
657 	else
658 		return 0;
659 }
660 
661 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
662 				      unsigned int queue)
663 {
664 	unsigned int tx_status;
665 	unsigned long tx_timeout;
666 
667 	/* The Tx engine cannot be stopped if it is actively processing
668 	 * packets. Wait for the Tx queue to empty the Tx fifo.  Don't
669 	 * wait forever though...
670 	 */
671 	tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
672 					       rte_get_timer_hz());
673 	while (time_before(rte_get_timer_cycles(), tx_timeout)) {
674 		tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
675 		if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
676 		    (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
677 			break;
678 
679 		rte_delay_us(900);
680 	}
681 
682 	if (!time_before(rte_get_timer_cycles(), tx_timeout))
683 		PMD_DRV_LOG_LINE(ERR,
684 			    "timed out waiting for Tx queue %u to empty",
685 			    queue);
686 }
687 
688 static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
689 				  unsigned int queue)
690 {
691 	unsigned int tx_dsr, tx_pos, tx_qidx;
692 	unsigned int tx_status;
693 	unsigned long tx_timeout;
694 
695 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
696 		return axgbe_txq_prepare_tx_stop(pdata, queue);
697 
698 	/* Calculate the status register to read and the position within */
699 	if (queue < DMA_DSRX_FIRST_QUEUE) {
700 		tx_dsr = DMA_DSR0;
701 		tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
702 	} else {
703 		tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
704 
705 		tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
706 		tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
707 			DMA_DSRX_TPS_START;
708 	}
709 
710 	/* The Tx engine cannot be stopped if it is actively processing
711 	 * descriptors. Wait for the Tx engine to enter the stopped or
712 	 * suspended state.  Don't wait forever though...
713 	 */
714 	tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
715 					       rte_get_timer_hz());
716 	while (time_before(rte_get_timer_cycles(), tx_timeout)) {
717 		tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
718 		tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
719 		if ((tx_status == DMA_TPS_STOPPED) ||
720 		    (tx_status == DMA_TPS_SUSPENDED))
721 			break;
722 
723 		rte_delay_us(900);
724 	}
725 
726 	if (!time_before(rte_get_timer_cycles(), tx_timeout))
727 		PMD_DRV_LOG_LINE(ERR,
728 			    "timed out waiting for Tx DMA channel %u to stop",
729 			    queue);
730 }
731 
732 void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
733 {
734 	struct axgbe_tx_queue *txq;
735 	struct axgbe_port *pdata = dev->data->dev_private;
736 	unsigned int i;
737 
738 	/* Prepare for stopping DMA channel */
739 	for (i = 0; i < pdata->tx_q_count; i++) {
740 		txq = dev->data->tx_queues[i];
741 		axgbe_prepare_tx_stop(pdata, i);
742 	}
743 	/* Disable MAC Tx */
744 	AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
745 	/* Disable each Tx queue*/
746 	for (i = 0; i < pdata->tx_q_count; i++)
747 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
748 					0);
749 	/* Disable each  Tx DMA channel */
750 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
751 		txq = dev->data->tx_queues[i];
752 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
753 	}
754 }
755 
756 void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
757 {
758 	struct axgbe_tx_queue *txq;
759 	struct axgbe_port *pdata = dev->data->dev_private;
760 	unsigned int i;
761 
762 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
763 		txq = dev->data->tx_queues[i];
764 		/* Enable Tx DMA channel */
765 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
766 	}
767 	/* Enable Tx queue*/
768 	for (i = 0; i < pdata->tx_q_count; i++)
769 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
770 					MTL_Q_ENABLED);
771 	/* Enable MAC Tx */
772 	AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
773 }
774 
775 /* Free Tx conformed mbufs segments */
776 static void
777 axgbe_xmit_cleanup_seg(struct axgbe_tx_queue *txq)
778 {
779 	volatile struct axgbe_tx_desc *desc;
780 	uint16_t idx;
781 
782 	idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
783 	while (txq->cur != txq->dirty) {
784 		if (unlikely(idx == txq->nb_desc))
785 			idx = 0;
786 		desc = &txq->desc[idx];
787 		/* Check for ownership */
788 		if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
789 			return;
790 		memset((void *)&desc->desc2, 0, 8);
791 		/* Free mbuf */
792 		rte_pktmbuf_free_seg(txq->sw_ring[idx]);
793 		txq->sw_ring[idx++] = NULL;
794 		txq->dirty++;
795 	}
796 }
797 
798 /* Free Tx conformed mbufs */
799 static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
800 {
801 	volatile struct axgbe_tx_desc *desc;
802 	uint16_t idx;
803 
804 	idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
805 	while (txq->cur != txq->dirty) {
806 		if (unlikely(idx == txq->nb_desc))
807 			idx = 0;
808 		desc = &txq->desc[idx];
809 		/* Check for ownership */
810 		if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
811 			return;
812 		memset((void *)&desc->desc2, 0, 8);
813 		/* Free mbuf */
814 		rte_pktmbuf_free(txq->sw_ring[idx]);
815 		txq->sw_ring[idx++] = NULL;
816 		txq->dirty++;
817 	}
818 }
819 
820 /* Tx Descriptor formation
821  * Considering each mbuf requires one desc
822  * mbuf is linear
823  */
824 static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
825 			 struct rte_mbuf *mbuf)
826 {
827 	volatile struct axgbe_tx_desc *desc;
828 	uint16_t idx;
829 	uint64_t mask;
830 	int start_index;
831 	uint64_t l2_len = 0;
832 	uint64_t l3_len = 0;
833 	uint64_t l4_len = 0;
834 	uint64_t tso_segz = 0;
835 	uint64_t total_hdr_len;
836 	int tso = 0;
837 
838 	/*Parameters required for tso*/
839 	l2_len = mbuf->l2_len;
840 	l3_len = mbuf->l3_len;
841 	l4_len = mbuf->l4_len;
842 	total_hdr_len = l2_len + l3_len + l4_len;
843 	tso_segz = mbuf->tso_segsz;
844 
845 	if (txq->pdata->tso_tx)
846 		tso = 1;
847 	else
848 		tso = 0;
849 
850 	AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, MSS, tso_segz);
851 
852 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
853 	desc = &txq->desc[idx];
854 
855 	/* Saving the start index for setting the OWN bit finally */
856 	start_index = idx;
857 	if (tso) {
858 		/* Update buffer address  and length */
859 		desc->baddr = rte_mbuf_data_iova(mbuf);
860 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
861 				total_hdr_len);
862 	} else {
863 		/* Update buffer address  and length */
864 		desc->baddr = rte_mbuf_data_iova(mbuf);
865 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
866 				mbuf->pkt_len);
867 		/* Total msg length to transmit */
868 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
869 				mbuf->pkt_len);
870 	}
871 	/* Timestamp enablement check */
872 	if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
873 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
874 	rte_wmb();
875 	/* Mark it as First and Last Descriptor */
876 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
877 	/* Mark it as a NORMAL descriptor */
878 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
879 	if (tso) {
880 		/*Register settings for TSO*/
881 		/* Enable TSO */
882 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE, 1);
883 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL,
884 				((mbuf->pkt_len) - total_hdr_len));
885 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL,
886 				(l4_len / 4));
887 		rte_wmb();
888 		txq->cur++;
889 		idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
890 		desc = &txq->desc[idx];
891 		desc->baddr = rte_mbuf_data_iova(mbuf);
892 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
893 				(mbuf->pkt_len) - total_hdr_len);
894 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
895 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
896 		/* Mark it as a NORMAL descriptor */
897 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
898 	} else {
899 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
900 	}
901 	/* configure h/w Offload */
902 	mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
903 	if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
904 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
905 	else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
906 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
907 	rte_wmb();
908 
909 	if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
910 		/* Mark it as a CONTEXT descriptor */
911 		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
912 				  CTXT, 1);
913 		/* Set the VLAN tag */
914 		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
915 				  VT, mbuf->vlan_tci);
916 		/* Indicate this descriptor contains the VLAN tag */
917 		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
918 					  VLTV, 1);
919 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
920 				TX_NORMAL_DESC2_VLAN_INSERT);
921 	} else {
922 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
923 	}
924 
925 	if (!tso) {
926 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
927 		rte_wmb();
928 	} else {
929 		/* Set OWN bit for the first descriptor */
930 		desc = &txq->desc[start_index];
931 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
932 		rte_wmb();
933 	}
934 
935 	/* Save mbuf */
936 	txq->sw_ring[idx] = mbuf;
937 	/* Update current index*/
938 	txq->cur++;
939 	/* Update stats */
940 	txq->bytes += mbuf->pkt_len;
941 
942 	return 0;
943 }
944 
945 /* Tx Descriptor formation for segmented mbuf
946  * Each mbuf will require multiple descriptors
947  */
948 
949 static int
950 axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
951 		struct rte_mbuf *mbuf)
952 {
953 	volatile struct axgbe_tx_desc *desc;
954 	uint16_t idx;
955 	uint64_t mask;
956 	int start_index;
957 	uint32_t pkt_len = 0;
958 	int nb_desc_free;
959 	struct rte_mbuf  *tx_pkt;
960 	uint32_t tso = 0;
961 
962 	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
963 
964 	if (mbuf->nb_segs > nb_desc_free) {
965 		axgbe_xmit_cleanup_seg(txq);
966 		nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
967 		if (unlikely(mbuf->nb_segs > nb_desc_free))
968 			return RTE_ETH_TX_DESC_UNAVAIL;
969 	}
970 
971 	if (txq->pdata->tso_tx)
972 		tso = 1;
973 	else
974 		tso = 0;
975 
976 	if (tso) {
977 		axgbe_xmit_hw(txq, mbuf);
978 	} else {
979 		idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
980 		desc = &txq->desc[idx];
981 		/* Saving the start index for setting the OWN bit finally */
982 		start_index = idx;
983 		tx_pkt = mbuf;
984 		/* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
985 		pkt_len = tx_pkt->pkt_len;
986 
987 		/* Update buffer address  and length */
988 		desc->baddr = rte_mbuf_data_iova(tx_pkt);
989 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
990 				tx_pkt->data_len);
991 		/* Total msg length to transmit */
992 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
993 				tx_pkt->pkt_len);
994 		/* Timestamp enablement check */
995 		if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
996 			AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
997 		rte_wmb();
998 		/* Mark it as First Descriptor */
999 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
1000 		/* Mark it as a NORMAL descriptor */
1001 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1002 		/* configure h/w Offload */
1003 		mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
1004 		if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
1005 			AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
1006 		else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
1007 			AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
1008 		rte_wmb();
1009 		if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
1010 			/* Mark it as a CONTEXT descriptor */
1011 			AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
1012 					CTXT, 1);
1013 			/* Set the VLAN tag */
1014 			AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
1015 					VT, mbuf->vlan_tci);
1016 			/* Indicate this descriptor contains the VLAN tag */
1017 			AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
1018 					VLTV, 1);
1019 			AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
1020 					TX_NORMAL_DESC2_VLAN_INSERT);
1021 		} else {
1022 			AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
1023 		}
1024 		rte_wmb();
1025 		/* Save mbuf */
1026 		txq->sw_ring[idx] = tx_pkt;
1027 		/* Update current index*/
1028 		txq->cur++;
1029 		tx_pkt = tx_pkt->next;
1030 		while (tx_pkt != NULL) {
1031 			idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
1032 			desc = &txq->desc[idx];
1033 			/* Update buffer address  and length */
1034 			desc->baddr = rte_mbuf_data_iova(tx_pkt);
1035 			AXGMAC_SET_BITS_LE(desc->desc2,
1036 					TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
1037 			rte_wmb();
1038 			/* Mark it as a NORMAL descriptor */
1039 			AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1040 			/* configure h/w Offload */
1041 			mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
1042 			if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
1043 					mask == RTE_MBUF_F_TX_UDP_CKSUM)
1044 				AXGMAC_SET_BITS_LE(desc->desc3,
1045 						TX_NORMAL_DESC3, CIC, 0x3);
1046 			else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
1047 				AXGMAC_SET_BITS_LE(desc->desc3,
1048 						TX_NORMAL_DESC3, CIC, 0x1);
1049 			rte_wmb();
1050 			/* Set OWN bit */
1051 			AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
1052 			rte_wmb();
1053 			/* Save mbuf */
1054 			txq->sw_ring[idx] = tx_pkt;
1055 			/* Update current index*/
1056 			txq->cur++;
1057 			tx_pkt = tx_pkt->next;
1058 		}
1059 
1060 		/* Set LD bit for the last descriptor */
1061 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
1062 		rte_wmb();
1063 
1064 		/* Update stats */
1065 		txq->bytes += pkt_len;
1066 
1067 		/* Set OWN bit for the first descriptor */
1068 		desc = &txq->desc[start_index];
1069 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
1070 		rte_wmb();
1071 	}
1072 	return 0;
1073 }
1074 
1075 /* Eal supported tx wrapper- Segmented*/
1076 uint16_t
1077 axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
1078 		uint16_t nb_pkts)
1079 {
1080 	PMD_INIT_FUNC_TRACE();
1081 
1082 	struct axgbe_tx_queue *txq;
1083 	uint16_t nb_desc_free;
1084 	uint16_t nb_pkt_sent = 0;
1085 	uint16_t idx;
1086 	uint32_t tail_addr;
1087 	struct rte_mbuf *mbuf = NULL;
1088 
1089 	if (unlikely(nb_pkts == 0))
1090 		return nb_pkts;
1091 
1092 	txq = (struct axgbe_tx_queue *)tx_queue;
1093 
1094 	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
1095 	if (unlikely(nb_desc_free <= txq->free_thresh)) {
1096 		axgbe_xmit_cleanup_seg(txq);
1097 		nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
1098 		if (unlikely(nb_desc_free == 0))
1099 			return 0;
1100 	}
1101 
1102 	while (nb_pkts--) {
1103 		mbuf = *tx_pkts++;
1104 
1105 		if (axgbe_xmit_hw_seg(txq, mbuf))
1106 			goto out;
1107 		nb_pkt_sent++;
1108 	}
1109 out:
1110 	/* Sync read and write */
1111 	rte_mb();
1112 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
1113 	tail_addr = low32_value(txq->ring_phys_addr +
1114 				idx * sizeof(struct axgbe_tx_desc));
1115 	/* Update tail reg with next immediate address to kick Tx DMA channel*/
1116 	AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
1117 	txq->pkts += nb_pkt_sent;
1118 	return nb_pkt_sent;
1119 }
1120 
1121 /* Eal supported tx wrapper*/
1122 uint16_t
1123 axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1124 		uint16_t nb_pkts)
1125 {
1126 	PMD_INIT_FUNC_TRACE();
1127 
1128 	if (unlikely(nb_pkts == 0))
1129 		return nb_pkts;
1130 
1131 	struct axgbe_tx_queue *txq;
1132 	uint16_t nb_desc_free;
1133 	uint16_t nb_pkt_sent = 0;
1134 	uint16_t idx;
1135 	uint32_t tail_addr;
1136 	struct rte_mbuf *mbuf;
1137 
1138 	txq  = (struct axgbe_tx_queue *)tx_queue;
1139 	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
1140 
1141 	if (unlikely(nb_desc_free <= txq->free_thresh)) {
1142 		axgbe_xmit_cleanup(txq);
1143 		nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
1144 		if (unlikely(nb_desc_free == 0))
1145 			return 0;
1146 	}
1147 	nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
1148 	while (nb_pkts--) {
1149 		mbuf = *tx_pkts++;
1150 		if (axgbe_xmit_hw(txq, mbuf))
1151 			goto out;
1152 		nb_pkt_sent++;
1153 	}
1154 out:
1155 	/* Sync read and write */
1156 	rte_mb();
1157 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
1158 	tail_addr = low32_value(txq->ring_phys_addr +
1159 				idx * sizeof(struct axgbe_tx_desc));
1160 	/* Update tail reg with next immediate address to kick Tx DMA channel*/
1161 	AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
1162 	txq->pkts += nb_pkt_sent;
1163 	return nb_pkt_sent;
1164 }
1165 
1166 void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
1167 {
1168 	PMD_INIT_FUNC_TRACE();
1169 	uint8_t i;
1170 	struct axgbe_rx_queue *rxq;
1171 	struct axgbe_tx_queue *txq;
1172 
1173 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1174 		rxq = dev->data->rx_queues[i];
1175 
1176 		if (rxq) {
1177 			axgbe_rx_queue_release(rxq);
1178 			dev->data->rx_queues[i] = NULL;
1179 		}
1180 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1181 	}
1182 
1183 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1184 		txq = dev->data->tx_queues[i];
1185 
1186 		if (txq) {
1187 			axgbe_tx_queue_release(txq);
1188 			dev->data->tx_queues[i] = NULL;
1189 		}
1190 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1191 	}
1192 }
1193 
1194 int
1195 axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
1196 {
1197 	struct axgbe_rx_queue *rxq = rx_queue;
1198 	volatile union axgbe_rx_desc *desc;
1199 	uint16_t idx;
1200 
1201 
1202 	if (unlikely(offset >= rxq->nb_desc))
1203 		return -EINVAL;
1204 
1205 	if (offset >= rxq->nb_desc - rxq->dirty)
1206 		return RTE_ETH_RX_DESC_UNAVAIL;
1207 
1208 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
1209 	desc = &rxq->desc[idx + offset];
1210 
1211 	if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
1212 		return RTE_ETH_RX_DESC_DONE;
1213 
1214 	return RTE_ETH_RX_DESC_AVAIL;
1215 }
1216 
1217 int
1218 axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
1219 {
1220 	struct axgbe_tx_queue *txq = tx_queue;
1221 	volatile struct axgbe_tx_desc *desc;
1222 	uint16_t idx;
1223 
1224 
1225 	if (unlikely(offset >= txq->nb_desc))
1226 		return -EINVAL;
1227 
1228 	if (offset >= txq->nb_desc - txq->dirty)
1229 		return RTE_ETH_TX_DESC_UNAVAIL;
1230 
1231 	idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1);
1232 	desc = &txq->desc[idx + offset];
1233 
1234 	if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
1235 		return RTE_ETH_TX_DESC_DONE;
1236 
1237 	return RTE_ETH_TX_DESC_FULL;
1238 }
1239