xref: /dpdk/drivers/net/ngbe/ngbe_rxtx.c (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <sys/queue.h>
7 
8 #include <stdint.h>
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
12 #include <rte_net.h>
13 #include <rte_vect.h>
14 
15 #include "ngbe_logs.h"
16 #include "base/ngbe.h"
17 #include "ngbe_ethdev.h"
18 #include "ngbe_rxtx.h"
19 
20 #ifdef RTE_LIBRTE_IEEE1588
21 #define NGBE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
22 #else
23 #define NGBE_TX_IEEE1588_TMST 0
24 #endif
25 
26 /* Bit Mask to indicate what bits required for building Tx context */
27 static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
28 		RTE_MBUF_F_TX_IPV6 |
29 		RTE_MBUF_F_TX_IPV4 |
30 		RTE_MBUF_F_TX_VLAN |
31 		RTE_MBUF_F_TX_L4_MASK |
32 		RTE_MBUF_F_TX_TCP_SEG |
33 		NGBE_TX_IEEE1588_TMST);
34 
35 #define NGBE_TX_OFFLOAD_NOTSUP_MASK \
36 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ NGBE_TX_OFFLOAD_MASK)
37 
38 /*
39  * Prefetch a cache line into all cache levels.
40  */
41 #define rte_ngbe_prefetch(p)   rte_prefetch0(p)
42 
43 /*********************************************************************
44  *
45  *  Tx functions
46  *
47  **********************************************************************/
48 
49 /*
50  * Check for descriptors with their DD bit set and free mbufs.
51  * Return the total number of buffers freed.
52  */
53 static __rte_always_inline int
54 ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)
55 {
56 	struct ngbe_tx_entry *txep;
57 	uint32_t status;
58 	int i, nb_free = 0;
59 	struct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];
60 
61 	/* check DD bit on threshold descriptor */
62 	status = txq->tx_ring[txq->tx_next_dd].dw3;
63 	if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
64 		if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
65 			ngbe_set32_masked(txq->tdc_reg_addr,
66 				NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
67 		return 0;
68 	}
69 
70 	/*
71 	 * first buffer to free from S/W ring is at index
72 	 * tx_next_dd - (tx_free_thresh-1)
73 	 */
74 	txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
75 	for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
76 		/* free buffers one at a time */
77 		m = rte_pktmbuf_prefree_seg(txep->mbuf);
78 		txep->mbuf = NULL;
79 
80 		if (unlikely(m == NULL))
81 			continue;
82 
83 		if (nb_free >= RTE_NGBE_TX_MAX_FREE_BUF_SZ ||
84 		    (nb_free > 0 && m->pool != free[0]->pool)) {
85 			rte_mempool_put_bulk(free[0]->pool,
86 					     (void **)free, nb_free);
87 			nb_free = 0;
88 		}
89 
90 		free[nb_free++] = m;
91 	}
92 
93 	if (nb_free > 0)
94 		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
95 
96 	/* buffers were freed, update counters */
97 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
98 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
99 	if (txq->tx_next_dd >= txq->nb_tx_desc)
100 		txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
101 
102 	return txq->tx_free_thresh;
103 }
104 
105 /* Populate 4 descriptors with data from 4 mbufs */
106 static inline void
107 tx4(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
108 {
109 	uint64_t buf_dma_addr;
110 	uint32_t pkt_len;
111 	int i;
112 
113 	for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
114 		buf_dma_addr = rte_mbuf_data_iova(*pkts);
115 		pkt_len = (*pkts)->data_len;
116 
117 		/* write data to descriptor */
118 		txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
119 		txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
120 					NGBE_TXD_DATLEN(pkt_len));
121 		txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
122 
123 		rte_prefetch0(&(*pkts)->pool);
124 	}
125 }
126 
127 /* Populate 1 descriptor with data from 1 mbuf */
128 static inline void
129 tx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
130 {
131 	uint64_t buf_dma_addr;
132 	uint32_t pkt_len;
133 
134 	buf_dma_addr = rte_mbuf_data_iova(*pkts);
135 	pkt_len = (*pkts)->data_len;
136 
137 	/* write data to descriptor */
138 	txdp->qw0 = cpu_to_le64(buf_dma_addr);
139 	txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
140 				NGBE_TXD_DATLEN(pkt_len));
141 	txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
142 
143 	rte_prefetch0(&(*pkts)->pool);
144 }
145 
146 /*
147  * Fill H/W descriptor ring with mbuf data.
148  * Copy mbuf pointers to the S/W ring.
149  */
150 static inline void
151 ngbe_tx_fill_hw_ring(struct ngbe_tx_queue *txq, struct rte_mbuf **pkts,
152 		      uint16_t nb_pkts)
153 {
154 	volatile struct ngbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
155 	struct ngbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
156 	const int N_PER_LOOP = 4;
157 	const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
158 	int mainpart, leftover;
159 	int i, j;
160 
161 	/*
162 	 * Process most of the packets in chunks of N pkts.  Any
163 	 * leftover packets will get processed one at a time.
164 	 */
165 	mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
166 	leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
167 	for (i = 0; i < mainpart; i += N_PER_LOOP) {
168 		/* Copy N mbuf pointers to the S/W ring */
169 		for (j = 0; j < N_PER_LOOP; ++j)
170 			(txep + i + j)->mbuf = *(pkts + i + j);
171 		tx4(txdp + i, pkts + i);
172 	}
173 
174 	if (unlikely(leftover > 0)) {
175 		for (i = 0; i < leftover; ++i) {
176 			(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
177 			tx1(txdp + mainpart + i, pkts + mainpart + i);
178 		}
179 	}
180 }
181 
182 static inline uint16_t
183 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
184 	     uint16_t nb_pkts)
185 {
186 	struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
187 	uint16_t n = 0;
188 
189 	/*
190 	 * Begin scanning the H/W ring for done descriptors when the
191 	 * number of available descriptors drops below tx_free_thresh.
192 	 * For each done descriptor, free the associated buffer.
193 	 */
194 	if (txq->nb_tx_free < txq->tx_free_thresh)
195 		ngbe_tx_free_bufs(txq);
196 
197 	/* Only use descriptors that are available */
198 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
199 	if (unlikely(nb_pkts == 0))
200 		return 0;
201 
202 	/* Use exactly nb_pkts descriptors */
203 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
204 
205 	/*
206 	 * At this point, we know there are enough descriptors in the
207 	 * ring to transmit all the packets.  This assumes that each
208 	 * mbuf contains a single segment, and that no new offloads
209 	 * are expected, which would require a new context descriptor.
210 	 */
211 
212 	/*
213 	 * See if we're going to wrap-around. If so, handle the top
214 	 * of the descriptor ring first, then do the bottom.  If not,
215 	 * the processing looks just like the "bottom" part anyway...
216 	 */
217 	if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
218 		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
219 		ngbe_tx_fill_hw_ring(txq, tx_pkts, n);
220 		txq->tx_tail = 0;
221 	}
222 
223 	/* Fill H/W descriptor ring with mbuf data */
224 	ngbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
225 	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
226 
227 	/*
228 	 * Check for wrap-around. This would only happen if we used
229 	 * up to the last descriptor in the ring, no more, no less.
230 	 */
231 	if (txq->tx_tail >= txq->nb_tx_desc)
232 		txq->tx_tail = 0;
233 
234 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
235 		   (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
236 		   (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
237 
238 	/* update tail pointer */
239 	rte_wmb();
240 	ngbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
241 
242 	return nb_pkts;
243 }
244 
245 uint16_t
246 ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
247 		       uint16_t nb_pkts)
248 {
249 	uint16_t nb_tx;
250 
251 	/* Try to transmit at least chunks of TX_MAX_BURST pkts */
252 	if (likely(nb_pkts <= RTE_PMD_NGBE_TX_MAX_BURST))
253 		return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
254 
255 	/* transmit more than the max burst, in chunks of TX_MAX_BURST */
256 	nb_tx = 0;
257 	while (nb_pkts != 0) {
258 		uint16_t ret, n;
259 
260 		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_TX_MAX_BURST);
261 		ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
262 		nb_tx = (uint16_t)(nb_tx + ret);
263 		nb_pkts = (uint16_t)(nb_pkts - ret);
264 		if (ret < n)
265 			break;
266 	}
267 
268 	return nb_tx;
269 }
270 
271 static uint16_t
272 ngbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
273 		   uint16_t nb_pkts)
274 {
275 	struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
276 	uint16_t nb_tx = 0;
277 
278 	while (nb_pkts) {
279 		uint16_t ret, num;
280 
281 		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_free_thresh);
282 		ret = ngbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
283 		nb_tx += ret;
284 		nb_pkts -= ret;
285 		if (ret < num)
286 			break;
287 	}
288 
289 	return nb_tx;
290 }
291 
292 static inline void
293 ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
294 		volatile struct ngbe_tx_ctx_desc *ctx_txd,
295 		uint64_t ol_flags, union ngbe_tx_offload tx_offload)
296 {
297 	union ngbe_tx_offload tx_offload_mask;
298 	uint32_t type_tucmd_mlhl;
299 	uint32_t mss_l4len_idx;
300 	uint32_t ctx_idx;
301 	uint32_t vlan_macip_lens;
302 	uint32_t tunnel_seed;
303 
304 	ctx_idx = txq->ctx_curr;
305 	tx_offload_mask.data[0] = 0;
306 	tx_offload_mask.data[1] = 0;
307 
308 	/* Specify which HW CTX to upload. */
309 	mss_l4len_idx = NGBE_TXD_IDX(ctx_idx);
310 	type_tucmd_mlhl = NGBE_TXD_CTXT;
311 
312 	tx_offload_mask.ptid |= ~0;
313 	type_tucmd_mlhl |= NGBE_TXD_PTID(tx_offload.ptid);
314 
315 	/* check if TCP segmentation required for this packet */
316 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
317 		tx_offload_mask.l2_len |= ~0;
318 		tx_offload_mask.l3_len |= ~0;
319 		tx_offload_mask.l4_len |= ~0;
320 		tx_offload_mask.tso_segsz |= ~0;
321 		mss_l4len_idx |= NGBE_TXD_MSS(tx_offload.tso_segsz);
322 		mss_l4len_idx |= NGBE_TXD_L4LEN(tx_offload.l4_len);
323 	} else { /* no TSO, check if hardware checksum is needed */
324 		if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
325 			tx_offload_mask.l2_len |= ~0;
326 			tx_offload_mask.l3_len |= ~0;
327 		}
328 
329 		switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
330 		case RTE_MBUF_F_TX_UDP_CKSUM:
331 			mss_l4len_idx |=
332 				NGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
333 			tx_offload_mask.l2_len |= ~0;
334 			tx_offload_mask.l3_len |= ~0;
335 			break;
336 		case RTE_MBUF_F_TX_TCP_CKSUM:
337 			mss_l4len_idx |=
338 				NGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
339 			tx_offload_mask.l2_len |= ~0;
340 			tx_offload_mask.l3_len |= ~0;
341 			break;
342 		case RTE_MBUF_F_TX_SCTP_CKSUM:
343 			mss_l4len_idx |=
344 				NGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
345 			tx_offload_mask.l2_len |= ~0;
346 			tx_offload_mask.l3_len |= ~0;
347 			break;
348 		default:
349 			break;
350 		}
351 	}
352 
353 	vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
354 	vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
355 
356 	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
357 		tx_offload_mask.vlan_tci |= ~0;
358 		vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
359 	}
360 
361 	tunnel_seed = 0;
362 
363 	txq->ctx_cache[ctx_idx].flags = ol_flags;
364 	txq->ctx_cache[ctx_idx].tx_offload.data[0] =
365 		tx_offload_mask.data[0] & tx_offload.data[0];
366 	txq->ctx_cache[ctx_idx].tx_offload.data[1] =
367 		tx_offload_mask.data[1] & tx_offload.data[1];
368 	txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
369 
370 	ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
371 	ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
372 	ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
373 	ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
374 }
375 
376 /*
377  * Check which hardware context can be used. Use the existing match
378  * or create a new context descriptor.
379  */
380 static inline uint32_t
381 what_ctx_update(struct ngbe_tx_queue *txq, uint64_t flags,
382 		   union ngbe_tx_offload tx_offload)
383 {
384 	/* If match with the current used context */
385 	if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
386 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
387 		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
388 		     & tx_offload.data[0])) &&
389 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
390 		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
391 		     & tx_offload.data[1]))))
392 		return txq->ctx_curr;
393 
394 	/* What if match with the next context  */
395 	txq->ctx_curr ^= 1;
396 	if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
397 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
398 		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
399 		     & tx_offload.data[0])) &&
400 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
401 		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
402 		     & tx_offload.data[1]))))
403 		return txq->ctx_curr;
404 
405 	/* Mismatch, use the previous context */
406 	return NGBE_CTX_NUM;
407 }
408 
409 static inline uint32_t
410 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
411 {
412 	uint32_t tmp = 0;
413 
414 	if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
415 		tmp |= NGBE_TXD_CC;
416 		tmp |= NGBE_TXD_L4CS;
417 	}
418 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
419 		tmp |= NGBE_TXD_CC;
420 		tmp |= NGBE_TXD_IPCS;
421 	}
422 	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
423 		tmp |= NGBE_TXD_CC;
424 		tmp |= NGBE_TXD_EIPCS;
425 	}
426 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
427 		tmp |= NGBE_TXD_CC;
428 		/* implies IPv4 cksum */
429 		if (ol_flags & RTE_MBUF_F_TX_IPV4)
430 			tmp |= NGBE_TXD_IPCS;
431 		tmp |= NGBE_TXD_L4CS;
432 	}
433 	if (ol_flags & RTE_MBUF_F_TX_VLAN)
434 		tmp |= NGBE_TXD_CC;
435 
436 	return tmp;
437 }
438 
439 static inline uint32_t
440 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
441 {
442 	uint32_t cmdtype = 0;
443 
444 	if (ol_flags & RTE_MBUF_F_TX_VLAN)
445 		cmdtype |= NGBE_TXD_VLE;
446 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
447 		cmdtype |= NGBE_TXD_TSE;
448 	return cmdtype;
449 }
450 
451 static inline uint32_t
452 tx_desc_ol_flags_to_ptype(uint64_t oflags)
453 {
454 	uint32_t ptype;
455 
456 	/* L2 level */
457 	ptype = RTE_PTYPE_L2_ETHER;
458 	if (oflags & RTE_MBUF_F_TX_VLAN)
459 		ptype |= RTE_PTYPE_L2_ETHER_VLAN;
460 
461 	/* L3 level */
462 	if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM))
463 		ptype |= RTE_PTYPE_L3_IPV4;
464 	else if (oflags & (RTE_MBUF_F_TX_IPV6))
465 		ptype |= RTE_PTYPE_L3_IPV6;
466 
467 	/* L4 level */
468 	switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) {
469 	case RTE_MBUF_F_TX_TCP_CKSUM:
470 		ptype |= RTE_PTYPE_L4_TCP;
471 		break;
472 	case RTE_MBUF_F_TX_UDP_CKSUM:
473 		ptype |= RTE_PTYPE_L4_UDP;
474 		break;
475 	case RTE_MBUF_F_TX_SCTP_CKSUM:
476 		ptype |= RTE_PTYPE_L4_SCTP;
477 		break;
478 	}
479 
480 	if (oflags & RTE_MBUF_F_TX_TCP_SEG)
481 		ptype |= RTE_PTYPE_L4_TCP;
482 
483 	return ptype;
484 }
485 
486 static inline uint8_t
487 tx_desc_ol_flags_to_ptid(uint64_t oflags)
488 {
489 	uint32_t ptype;
490 
491 	ptype = tx_desc_ol_flags_to_ptype(oflags);
492 
493 	return ngbe_encode_ptype(ptype);
494 }
495 
496 /* Reset transmit descriptors after they have been used */
497 static inline int
498 ngbe_xmit_cleanup(struct ngbe_tx_queue *txq)
499 {
500 	struct ngbe_tx_entry *sw_ring = txq->sw_ring;
501 	volatile struct ngbe_tx_desc *txr = txq->tx_ring;
502 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
503 	uint16_t nb_tx_desc = txq->nb_tx_desc;
504 	uint16_t desc_to_clean_to;
505 	uint16_t nb_tx_to_clean;
506 	uint32_t status;
507 
508 	/* Determine the last descriptor needing to be cleaned */
509 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
510 	if (desc_to_clean_to >= nb_tx_desc)
511 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
512 
513 	/* Check to make sure the last descriptor to clean is done */
514 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
515 	status = txr[desc_to_clean_to].dw3;
516 	if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
517 		PMD_TX_LOG(DEBUG,
518 			"Tx descriptor %4u is not done"
519 			"(port=%d queue=%d)",
520 			desc_to_clean_to,
521 			txq->port_id, txq->queue_id);
522 		if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
523 			ngbe_set32_masked(txq->tdc_reg_addr,
524 				NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
525 		/* Failed to clean any descriptors, better luck next time */
526 		return -(1);
527 	}
528 
529 	/* Figure out how many descriptors will be cleaned */
530 	if (last_desc_cleaned > desc_to_clean_to)
531 		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
532 							desc_to_clean_to);
533 	else
534 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
535 						last_desc_cleaned);
536 
537 	PMD_TX_LOG(DEBUG,
538 		"Cleaning %4u Tx descriptors: %4u to %4u (port=%d queue=%d)",
539 		nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
540 		txq->port_id, txq->queue_id);
541 
542 	/*
543 	 * The last descriptor to clean is done, so that means all the
544 	 * descriptors from the last descriptor that was cleaned
545 	 * up to the last descriptor with the RS bit set
546 	 * are done. Only reset the threshold descriptor.
547 	 */
548 	txr[desc_to_clean_to].dw3 = 0;
549 
550 	/* Update the txq to reflect the last descriptor that was cleaned */
551 	txq->last_desc_cleaned = desc_to_clean_to;
552 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
553 
554 	/* No Error */
555 	return 0;
556 }
557 
558 uint16_t
559 ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
560 		uint16_t nb_pkts)
561 {
562 	struct ngbe_tx_queue *txq;
563 	struct ngbe_tx_entry *sw_ring;
564 	struct ngbe_tx_entry *txe, *txn;
565 	volatile struct ngbe_tx_desc *txr;
566 	volatile struct ngbe_tx_desc *txd;
567 	struct rte_mbuf     *tx_pkt;
568 	struct rte_mbuf     *m_seg;
569 	uint64_t buf_dma_addr;
570 	uint32_t olinfo_status;
571 	uint32_t cmd_type_len;
572 	uint32_t pkt_len;
573 	uint16_t slen;
574 	uint64_t ol_flags;
575 	uint16_t tx_id;
576 	uint16_t tx_last;
577 	uint16_t nb_tx;
578 	uint16_t nb_used;
579 	uint64_t tx_ol_req;
580 	uint32_t ctx = 0;
581 	uint32_t new_ctx;
582 	union ngbe_tx_offload tx_offload;
583 
584 	tx_offload.data[0] = 0;
585 	tx_offload.data[1] = 0;
586 	txq = tx_queue;
587 	sw_ring = txq->sw_ring;
588 	txr     = txq->tx_ring;
589 	tx_id   = txq->tx_tail;
590 	txe = &sw_ring[tx_id];
591 
592 	/* Determine if the descriptor ring needs to be cleaned. */
593 	if (txq->nb_tx_free < txq->tx_free_thresh)
594 		ngbe_xmit_cleanup(txq);
595 
596 	rte_prefetch0(&txe->mbuf->pool);
597 
598 	/* Tx loop */
599 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
600 		new_ctx = 0;
601 		tx_pkt = *tx_pkts++;
602 		pkt_len = tx_pkt->pkt_len;
603 
604 		/*
605 		 * Determine how many (if any) context descriptors
606 		 * are needed for offload functionality.
607 		 */
608 		ol_flags = tx_pkt->ol_flags;
609 
610 		/* If hardware offload required */
611 		tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK;
612 		if (tx_ol_req) {
613 			tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req);
614 			tx_offload.l2_len = tx_pkt->l2_len;
615 			tx_offload.l3_len = tx_pkt->l3_len;
616 			tx_offload.l4_len = tx_pkt->l4_len;
617 			tx_offload.vlan_tci = tx_pkt->vlan_tci;
618 			tx_offload.tso_segsz = tx_pkt->tso_segsz;
619 
620 			/* If new context need be built or reuse the exist ctx*/
621 			ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
622 			/* Only allocate context descriptor if required */
623 			new_ctx = (ctx == NGBE_CTX_NUM);
624 			ctx = txq->ctx_curr;
625 		}
626 
627 		/*
628 		 * Keep track of how many descriptors are used this loop
629 		 * This will always be the number of segments + the number of
630 		 * Context descriptors required to transmit the packet
631 		 */
632 		nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
633 
634 		/*
635 		 * The number of descriptors that must be allocated for a
636 		 * packet is the number of segments of that packet, plus 1
637 		 * Context Descriptor for the hardware offload, if any.
638 		 * Determine the last Tx descriptor to allocate in the Tx ring
639 		 * for the packet, starting from the current position (tx_id)
640 		 * in the ring.
641 		 */
642 		tx_last = (uint16_t)(tx_id + nb_used - 1);
643 
644 		/* Circular ring */
645 		if (tx_last >= txq->nb_tx_desc)
646 			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
647 
648 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
649 			   " tx_first=%u tx_last=%u",
650 			   (uint16_t)txq->port_id,
651 			   (uint16_t)txq->queue_id,
652 			   (uint32_t)pkt_len,
653 			   (uint16_t)tx_id,
654 			   (uint16_t)tx_last);
655 
656 		/*
657 		 * Make sure there are enough Tx descriptors available to
658 		 * transmit the entire packet.
659 		 * nb_used better be less than or equal to txq->tx_free_thresh
660 		 */
661 		if (nb_used > txq->nb_tx_free) {
662 			PMD_TX_LOG(DEBUG,
663 				"Not enough free Tx descriptors "
664 				"nb_used=%4u nb_free=%4u "
665 				"(port=%d queue=%d)",
666 				nb_used, txq->nb_tx_free,
667 				txq->port_id, txq->queue_id);
668 
669 			if (ngbe_xmit_cleanup(txq) != 0) {
670 				/* Could not clean any descriptors */
671 				if (nb_tx == 0)
672 					return 0;
673 				goto end_of_tx;
674 			}
675 
676 			/* nb_used better be <= txq->tx_free_thresh */
677 			if (unlikely(nb_used > txq->tx_free_thresh)) {
678 				PMD_TX_LOG(DEBUG,
679 					"The number of descriptors needed to "
680 					"transmit the packet exceeds the "
681 					"RS bit threshold. This will impact "
682 					"performance."
683 					"nb_used=%4u nb_free=%4u "
684 					"tx_free_thresh=%4u. "
685 					"(port=%d queue=%d)",
686 					nb_used, txq->nb_tx_free,
687 					txq->tx_free_thresh,
688 					txq->port_id, txq->queue_id);
689 				/*
690 				 * Loop here until there are enough Tx
691 				 * descriptors or until the ring cannot be
692 				 * cleaned.
693 				 */
694 				while (nb_used > txq->nb_tx_free) {
695 					if (ngbe_xmit_cleanup(txq) != 0) {
696 						/*
697 						 * Could not clean any
698 						 * descriptors
699 						 */
700 						if (nb_tx == 0)
701 							return 0;
702 						goto end_of_tx;
703 					}
704 				}
705 			}
706 		}
707 
708 		/*
709 		 * By now there are enough free Tx descriptors to transmit
710 		 * the packet.
711 		 */
712 
713 		/*
714 		 * Set common flags of all Tx Data Descriptors.
715 		 *
716 		 * The following bits must be set in the first Data Descriptor
717 		 * and are ignored in the other ones:
718 		 *   - NGBE_TXD_FCS
719 		 *
720 		 * The following bits must only be set in the last Data
721 		 * Descriptor:
722 		 *   - NGBE_TXD_EOP
723 		 */
724 		cmd_type_len = NGBE_TXD_FCS;
725 
726 #ifdef RTE_LIBRTE_IEEE1588
727 		if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
728 			cmd_type_len |= NGBE_TXD_1588;
729 #endif
730 
731 		olinfo_status = 0;
732 		if (tx_ol_req) {
733 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
734 				/* when TSO is on, paylen in descriptor is the
735 				 * not the packet len but the tcp payload len
736 				 */
737 				pkt_len -= (tx_offload.l2_len +
738 					tx_offload.l3_len + tx_offload.l4_len);
739 			}
740 
741 			/*
742 			 * Setup the Tx Context Descriptor if required
743 			 */
744 			if (new_ctx) {
745 				volatile struct ngbe_tx_ctx_desc *ctx_txd;
746 
747 				ctx_txd = (volatile struct ngbe_tx_ctx_desc *)
748 				    &txr[tx_id];
749 
750 				txn = &sw_ring[txe->next_id];
751 				rte_prefetch0(&txn->mbuf->pool);
752 
753 				if (txe->mbuf != NULL) {
754 					rte_pktmbuf_free_seg(txe->mbuf);
755 					txe->mbuf = NULL;
756 				}
757 
758 				ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
759 					tx_offload);
760 
761 				txe->last_id = tx_last;
762 				tx_id = txe->next_id;
763 				txe = txn;
764 			}
765 
766 			/*
767 			 * Setup the Tx Data Descriptor,
768 			 * This path will go through
769 			 * whatever new/reuse the context descriptor
770 			 */
771 			cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
772 			olinfo_status |=
773 				tx_desc_cksum_flags_to_olinfo(ol_flags);
774 			olinfo_status |= NGBE_TXD_IDX(ctx);
775 		}
776 
777 		olinfo_status |= NGBE_TXD_PAYLEN(pkt_len);
778 
779 		m_seg = tx_pkt;
780 		do {
781 			txd = &txr[tx_id];
782 			txn = &sw_ring[txe->next_id];
783 			rte_prefetch0(&txn->mbuf->pool);
784 
785 			if (txe->mbuf != NULL)
786 				rte_pktmbuf_free_seg(txe->mbuf);
787 			txe->mbuf = m_seg;
788 
789 			/*
790 			 * Set up Transmit Data Descriptor.
791 			 */
792 			slen = m_seg->data_len;
793 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
794 			txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
795 			txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
796 			txd->dw3 = rte_cpu_to_le_32(olinfo_status);
797 			txe->last_id = tx_last;
798 			tx_id = txe->next_id;
799 			txe = txn;
800 			m_seg = m_seg->next;
801 		} while (m_seg != NULL);
802 
803 		/*
804 		 * The last packet data descriptor needs End Of Packet (EOP)
805 		 */
806 		cmd_type_len |= NGBE_TXD_EOP;
807 		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
808 
809 		txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
810 	}
811 
812 end_of_tx:
813 
814 	rte_wmb();
815 
816 	/*
817 	 * Set the Transmit Descriptor Tail (TDT)
818 	 */
819 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
820 		   (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
821 		   (uint16_t)tx_id, (uint16_t)nb_tx);
822 	ngbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
823 	txq->tx_tail = tx_id;
824 
825 	return nb_tx;
826 }
827 
828 /*********************************************************************
829  *
830  *  Tx prep functions
831  *
832  **********************************************************************/
833 uint16_t
834 ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
835 {
836 	int i, ret;
837 	uint64_t ol_flags;
838 	struct rte_mbuf *m;
839 	struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
840 
841 	for (i = 0; i < nb_pkts; i++) {
842 		m = tx_pkts[i];
843 		ol_flags = m->ol_flags;
844 
845 		/**
846 		 * Check if packet meets requirements for number of segments
847 		 *
848 		 * NOTE: for ngbe it's always (40 - WTHRESH) for both TSO and
849 		 *       non-TSO
850 		 */
851 
852 		if (m->nb_segs > NGBE_TX_MAX_SEG - txq->wthresh) {
853 			rte_errno = -EINVAL;
854 			return i;
855 		}
856 
857 		if (ol_flags & NGBE_TX_OFFLOAD_NOTSUP_MASK) {
858 			rte_errno = -ENOTSUP;
859 			return i;
860 		}
861 
862 #ifdef RTE_ETHDEV_DEBUG_TX
863 		ret = rte_validate_tx_offload(m);
864 		if (ret != 0) {
865 			rte_errno = ret;
866 			return i;
867 		}
868 #endif
869 		ret = rte_net_intel_cksum_prepare(m);
870 		if (ret != 0) {
871 			rte_errno = ret;
872 			return i;
873 		}
874 	}
875 
876 	return i;
877 }
878 
879 /*********************************************************************
880  *
881  *  Rx functions
882  *
883  **********************************************************************/
884 static inline uint32_t
885 ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
886 {
887 	uint16_t ptid = NGBE_RXD_PTID(pkt_info);
888 
889 	ptid &= ptid_mask;
890 
891 	return ngbe_decode_ptype(ptid);
892 }
893 
894 static inline uint64_t
895 ngbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
896 {
897 	static alignas(RTE_CACHE_LINE_SIZE) uint64_t ip_rss_types_map[16] = {
898 		0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
899 		0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
900 		RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
901 		0, 0, 0,  RTE_MBUF_F_RX_FDIR,
902 	};
903 #ifdef RTE_LIBRTE_IEEE1588
904 	static uint64_t ip_pkt_etqf_map[8] = {
905 		0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
906 		0, 0, 0, 0,
907 	};
908 	int etfid = ngbe_etflt_id(NGBE_RXD_PTID(pkt_info));
909 	if (likely(-1 != etfid))
910 		return ip_pkt_etqf_map[etfid] |
911 		       ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
912 	else
913 		return ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
914 #else
915 	return ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
916 #endif
917 }
918 
919 static inline uint64_t
920 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
921 {
922 	uint64_t pkt_flags;
923 
924 	/*
925 	 * Check if VLAN present only.
926 	 * Do not check whether L3/L4 rx checksum done by NIC or not,
927 	 * That can be found from rte_eth_rxmode.offloads flag
928 	 */
929 	pkt_flags = (rx_status & NGBE_RXD_STAT_VLAN &&
930 		     vlan_flags & RTE_MBUF_F_RX_VLAN_STRIPPED)
931 		    ? vlan_flags : 0;
932 
933 #ifdef RTE_LIBRTE_IEEE1588
934 	if (rx_status & NGBE_RXD_STAT_1588)
935 		pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
936 #endif
937 	return pkt_flags;
938 }
939 
940 static inline uint64_t
941 rx_desc_error_to_pkt_flags(uint32_t rx_status)
942 {
943 	uint64_t pkt_flags = 0;
944 
945 	/* checksum offload can't be disabled */
946 	if (rx_status & NGBE_RXD_STAT_IPCS)
947 		pkt_flags |= (rx_status & NGBE_RXD_ERR_IPCS
948 				? RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
949 
950 	if (rx_status & NGBE_RXD_STAT_L4CS)
951 		pkt_flags |= (rx_status & NGBE_RXD_ERR_L4CS
952 				? RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD);
953 
954 	if (rx_status & NGBE_RXD_STAT_EIPCS &&
955 	    rx_status & NGBE_RXD_ERR_EIPCS)
956 		pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
957 
958 	return pkt_flags;
959 }
960 
961 /*
962  * LOOK_AHEAD defines how many desc statuses to check beyond the
963  * current descriptor.
964  * It must be a pound define for optimal performance.
965  * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
966  * function only works with LOOK_AHEAD=8.
967  */
968 #define LOOK_AHEAD 8
969 #if (LOOK_AHEAD != 8)
970 #error "PMD NGBE: LOOK_AHEAD must be 8\n"
971 #endif
972 static inline int
973 ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
974 {
975 	volatile struct ngbe_rx_desc *rxdp;
976 	struct ngbe_rx_entry *rxep;
977 	struct rte_mbuf *mb;
978 	uint16_t pkt_len;
979 	uint64_t pkt_flags;
980 	int nb_dd;
981 	uint32_t s[LOOK_AHEAD];
982 	uint32_t pkt_info[LOOK_AHEAD];
983 	int i, j, nb_rx = 0;
984 	uint32_t status;
985 
986 	/* get references to current descriptor and S/W ring entry */
987 	rxdp = &rxq->rx_ring[rxq->rx_tail];
988 	rxep = &rxq->sw_ring[rxq->rx_tail];
989 
990 	status = rxdp->qw1.lo.status;
991 	/* check to make sure there is at least 1 packet to receive */
992 	if (!(status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
993 		return 0;
994 
995 	/*
996 	 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
997 	 * reference packets that are ready to be received.
998 	 */
999 	for (i = 0; i < RTE_PMD_NGBE_RX_MAX_BURST;
1000 	     i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1001 		/* Read desc statuses backwards to avoid race condition */
1002 		for (j = 0; j < LOOK_AHEAD; j++)
1003 			s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
1004 
1005 		rte_atomic_thread_fence(rte_memory_order_acquire);
1006 
1007 		/* Compute how many status bits were set */
1008 		for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1009 				(s[nb_dd] & NGBE_RXD_STAT_DD); nb_dd++)
1010 			;
1011 
1012 		for (j = 0; j < nb_dd; j++)
1013 			pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
1014 
1015 		nb_rx += nb_dd;
1016 
1017 		/* Translate descriptor info to mbuf format */
1018 		for (j = 0; j < nb_dd; ++j) {
1019 			mb = rxep[j].mbuf;
1020 			pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
1021 				  rxq->crc_len;
1022 			mb->data_len = pkt_len;
1023 			mb->pkt_len = pkt_len;
1024 			mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
1025 
1026 			/* convert descriptor fields to rte mbuf flags */
1027 			pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1028 					rxq->vlan_flags);
1029 			pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1030 			pkt_flags |=
1031 				ngbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1032 			mb->ol_flags = pkt_flags;
1033 			mb->packet_type =
1034 				ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
1035 				NGBE_PTID_MASK);
1036 
1037 			if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
1038 				mb->hash.rss =
1039 					rte_le_to_cpu_32(rxdp[j].qw0.dw1);
1040 		}
1041 
1042 		/* Move mbuf pointers from the S/W ring to the stage */
1043 		for (j = 0; j < LOOK_AHEAD; ++j)
1044 			rxq->rx_stage[i + j] = rxep[j].mbuf;
1045 
1046 		/* stop if all requested packets could not be received */
1047 		if (nb_dd != LOOK_AHEAD)
1048 			break;
1049 	}
1050 
1051 	/* clear software ring entries so we can cleanup correctly */
1052 	for (i = 0; i < nb_rx; ++i)
1053 		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1054 
1055 	return nb_rx;
1056 }
1057 
1058 static inline int
1059 ngbe_rx_alloc_bufs(struct ngbe_rx_queue *rxq, bool reset_mbuf)
1060 {
1061 	volatile struct ngbe_rx_desc *rxdp;
1062 	struct ngbe_rx_entry *rxep;
1063 	struct rte_mbuf *mb;
1064 	uint16_t alloc_idx;
1065 	__le64 dma_addr;
1066 	int diag, i;
1067 
1068 	/* allocate buffers in bulk directly into the S/W ring */
1069 	alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1070 	rxep = &rxq->sw_ring[alloc_idx];
1071 	diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1072 				    rxq->rx_free_thresh);
1073 	if (unlikely(diag != 0))
1074 		return -ENOMEM;
1075 
1076 	rxdp = &rxq->rx_ring[alloc_idx];
1077 	for (i = 0; i < rxq->rx_free_thresh; ++i) {
1078 		/* populate the static rte mbuf fields */
1079 		mb = rxep[i].mbuf;
1080 		if (reset_mbuf)
1081 			mb->port = rxq->port_id;
1082 
1083 		rte_mbuf_refcnt_set(mb, 1);
1084 		mb->data_off = RTE_PKTMBUF_HEADROOM;
1085 
1086 		/* populate the descriptors */
1087 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1088 		NGBE_RXD_HDRADDR(&rxdp[i], 0);
1089 		NGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
1090 	}
1091 
1092 	/* update state of internal queue structure */
1093 	rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1094 	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1095 		rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1096 
1097 	/* no errors */
1098 	return 0;
1099 }
1100 
1101 static inline uint16_t
1102 ngbe_rx_fill_from_stage(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1103 			 uint16_t nb_pkts)
1104 {
1105 	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1106 	int i;
1107 
1108 	/* how many packets are ready to return? */
1109 	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1110 
1111 	/* copy mbuf pointers to the application's packet list */
1112 	for (i = 0; i < nb_pkts; ++i)
1113 		rx_pkts[i] = stage[i];
1114 
1115 	/* update internal queue state */
1116 	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1117 	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1118 
1119 	return nb_pkts;
1120 }
1121 
1122 static inline uint16_t
1123 ngbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1124 	     uint16_t nb_pkts)
1125 {
1126 	struct ngbe_rx_queue *rxq = (struct ngbe_rx_queue *)rx_queue;
1127 	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1128 	uint16_t nb_rx = 0;
1129 
1130 	/* Any previously recv'd pkts will be returned from the Rx stage */
1131 	if (rxq->rx_nb_avail)
1132 		return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1133 
1134 	/* Scan the H/W ring for packets to receive */
1135 	nb_rx = (uint16_t)ngbe_rx_scan_hw_ring(rxq);
1136 
1137 	/* update internal queue state */
1138 	rxq->rx_next_avail = 0;
1139 	rxq->rx_nb_avail = nb_rx;
1140 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1141 
1142 	/* if required, allocate new buffers to replenish descriptors */
1143 	if (rxq->rx_tail > rxq->rx_free_trigger) {
1144 		uint16_t cur_free_trigger = rxq->rx_free_trigger;
1145 
1146 		if (ngbe_rx_alloc_bufs(rxq, true) != 0) {
1147 			int i, j;
1148 
1149 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1150 				   "queue_id=%u", (uint16_t)rxq->port_id,
1151 				   (uint16_t)rxq->queue_id);
1152 
1153 			dev->data->rx_mbuf_alloc_failed +=
1154 				rxq->rx_free_thresh;
1155 
1156 			/*
1157 			 * Need to rewind any previous receives if we cannot
1158 			 * allocate new buffers to replenish the old ones.
1159 			 */
1160 			rxq->rx_nb_avail = 0;
1161 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1162 			for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1163 				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1164 
1165 			return 0;
1166 		}
1167 
1168 		/* update tail pointer */
1169 		rte_wmb();
1170 		ngbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
1171 	}
1172 
1173 	if (rxq->rx_tail >= rxq->nb_rx_desc)
1174 		rxq->rx_tail = 0;
1175 
1176 	/* received any packets this loop? */
1177 	if (rxq->rx_nb_avail)
1178 		return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1179 
1180 	return 0;
1181 }
1182 
1183 /* split requests into chunks of size RTE_PMD_NGBE_RX_MAX_BURST */
1184 uint16_t
1185 ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1186 			   uint16_t nb_pkts)
1187 {
1188 	uint16_t nb_rx;
1189 
1190 	if (unlikely(nb_pkts == 0))
1191 		return 0;
1192 
1193 	if (likely(nb_pkts <= RTE_PMD_NGBE_RX_MAX_BURST))
1194 		return ngbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1195 
1196 	/* request is relatively large, chunk it up */
1197 	nb_rx = 0;
1198 	while (nb_pkts) {
1199 		uint16_t ret, n;
1200 
1201 		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_RX_MAX_BURST);
1202 		ret = ngbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1203 		nb_rx = (uint16_t)(nb_rx + ret);
1204 		nb_pkts = (uint16_t)(nb_pkts - ret);
1205 		if (ret < n)
1206 			break;
1207 	}
1208 
1209 	return nb_rx;
1210 }
1211 
1212 uint16_t
1213 ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1214 		uint16_t nb_pkts)
1215 {
1216 	struct ngbe_rx_queue *rxq;
1217 	volatile struct ngbe_rx_desc *rx_ring;
1218 	volatile struct ngbe_rx_desc *rxdp;
1219 	struct ngbe_rx_entry *sw_ring;
1220 	struct ngbe_rx_entry *rxe;
1221 	struct rte_mbuf *rxm;
1222 	struct rte_mbuf *nmb;
1223 	struct ngbe_rx_desc rxd;
1224 	uint64_t dma_addr;
1225 	uint32_t staterr;
1226 	uint32_t pkt_info;
1227 	uint16_t pkt_len;
1228 	uint16_t rx_id;
1229 	uint16_t nb_rx;
1230 	uint16_t nb_hold;
1231 	uint64_t pkt_flags;
1232 
1233 	nb_rx = 0;
1234 	nb_hold = 0;
1235 	rxq = rx_queue;
1236 	rx_id = rxq->rx_tail;
1237 	rx_ring = rxq->rx_ring;
1238 	sw_ring = rxq->sw_ring;
1239 	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1240 	while (nb_rx < nb_pkts) {
1241 		/*
1242 		 * The order of operations here is important as the DD status
1243 		 * bit must not be read after any other descriptor fields.
1244 		 * rx_ring and rxdp are pointing to volatile data so the order
1245 		 * of accesses cannot be reordered by the compiler. If they were
1246 		 * not volatile, they could be reordered which could lead to
1247 		 * using invalid descriptor fields when read from rxd.
1248 		 *
1249 		 * Meanwhile, to prevent the CPU from executing out of order, we
1250 		 * need to use a proper memory barrier to ensure the memory
1251 		 * ordering below.
1252 		 */
1253 		rxdp = &rx_ring[rx_id];
1254 		staterr = rxdp->qw1.lo.status;
1255 		if (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
1256 			break;
1257 
1258 		/*
1259 		 * Use acquire fence to ensure that status_error which includes
1260 		 * DD bit is loaded before loading of other descriptor words.
1261 		 */
1262 		rte_atomic_thread_fence(rte_memory_order_acquire);
1263 
1264 		rxd = *rxdp;
1265 
1266 		/*
1267 		 * End of packet.
1268 		 *
1269 		 * If the NGBE_RXD_STAT_EOP flag is not set, the Rx packet
1270 		 * is likely to be invalid and to be dropped by the various
1271 		 * validation checks performed by the network stack.
1272 		 *
1273 		 * Allocate a new mbuf to replenish the RX ring descriptor.
1274 		 * If the allocation fails:
1275 		 *    - arrange for that Rx descriptor to be the first one
1276 		 *      being parsed the next time the receive function is
1277 		 *      invoked [on the same queue].
1278 		 *
1279 		 *    - Stop parsing the Rx ring and return immediately.
1280 		 *
1281 		 * This policy do not drop the packet received in the Rx
1282 		 * descriptor for which the allocation of a new mbuf failed.
1283 		 * Thus, it allows that packet to be later retrieved if
1284 		 * mbuf have been freed in the mean time.
1285 		 * As a side effect, holding Rx descriptors instead of
1286 		 * systematically giving them back to the NIC may lead to
1287 		 * Rx ring exhaustion situations.
1288 		 * However, the NIC can gracefully prevent such situations
1289 		 * to happen by sending specific "back-pressure" flow control
1290 		 * frames to its peer(s).
1291 		 */
1292 		PMD_RX_LOG(DEBUG,
1293 			   "port_id=%u queue_id=%u rx_id=%u ext_err_stat=0x%08x pkt_len=%u",
1294 			   (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1295 			   (uint16_t)rx_id, (uint32_t)staterr,
1296 			   (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
1297 
1298 		nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1299 		if (nmb == NULL) {
1300 			PMD_RX_LOG(DEBUG,
1301 				   "Rx mbuf alloc failed port_id=%u queue_id=%u",
1302 				   (uint16_t)rxq->port_id,
1303 				   (uint16_t)rxq->queue_id);
1304 			dev->data->rx_mbuf_alloc_failed++;
1305 			break;
1306 		}
1307 
1308 		nb_hold++;
1309 		rxe = &sw_ring[rx_id];
1310 		rx_id++;
1311 		if (rx_id == rxq->nb_rx_desc)
1312 			rx_id = 0;
1313 
1314 		/* Prefetch next mbuf while processing current one. */
1315 		rte_ngbe_prefetch(sw_ring[rx_id].mbuf);
1316 
1317 		/*
1318 		 * When next Rx descriptor is on a cache-line boundary,
1319 		 * prefetch the next 4 Rx descriptors and the next 8 pointers
1320 		 * to mbufs.
1321 		 */
1322 		if ((rx_id & 0x3) == 0) {
1323 			rte_ngbe_prefetch(&rx_ring[rx_id]);
1324 			rte_ngbe_prefetch(&sw_ring[rx_id]);
1325 		}
1326 
1327 		rxm = rxe->mbuf;
1328 		rxe->mbuf = nmb;
1329 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1330 		NGBE_RXD_HDRADDR(rxdp, 0);
1331 		NGBE_RXD_PKTADDR(rxdp, dma_addr);
1332 
1333 		/*
1334 		 * Initialize the returned mbuf.
1335 		 * 1) setup generic mbuf fields:
1336 		 *    - number of segments,
1337 		 *    - next segment,
1338 		 *    - packet length,
1339 		 *    - Rx port identifier.
1340 		 * 2) integrate hardware offload data, if any:
1341 		 *    - RSS flag & hash,
1342 		 *    - IP checksum flag,
1343 		 *    - VLAN TCI, if any,
1344 		 *    - error flags.
1345 		 */
1346 		pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
1347 				      rxq->crc_len);
1348 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1349 		rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1350 		rxm->nb_segs = 1;
1351 		rxm->next = NULL;
1352 		rxm->pkt_len = pkt_len;
1353 		rxm->data_len = pkt_len;
1354 		rxm->port = rxq->port_id;
1355 
1356 		pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
1357 		/* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
1358 		rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
1359 
1360 		pkt_flags = rx_desc_status_to_pkt_flags(staterr,
1361 					rxq->vlan_flags);
1362 		pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1363 		pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1364 		rxm->ol_flags = pkt_flags;
1365 		rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
1366 						       NGBE_PTID_MASK);
1367 
1368 		if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
1369 			rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
1370 
1371 		/*
1372 		 * Store the mbuf address into the next entry of the array
1373 		 * of returned packets.
1374 		 */
1375 		rx_pkts[nb_rx++] = rxm;
1376 	}
1377 	rxq->rx_tail = rx_id;
1378 
1379 	/*
1380 	 * If the number of free Rx descriptors is greater than the Rx free
1381 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1382 	 * register.
1383 	 * Update the RDT with the value of the last processed Rx descriptor
1384 	 * minus 1, to guarantee that the RDT register is never equal to the
1385 	 * RDH register, which creates a "full" ring situation from the
1386 	 * hardware point of view...
1387 	 */
1388 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1389 	if (nb_hold > rxq->rx_free_thresh) {
1390 		PMD_RX_LOG(DEBUG,
1391 			   "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
1392 			   (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1393 			   (uint16_t)rx_id, (uint16_t)nb_hold,
1394 			   (uint16_t)nb_rx);
1395 		rx_id = (uint16_t)((rx_id == 0) ?
1396 				(rxq->nb_rx_desc - 1) : (rx_id - 1));
1397 		ngbe_set32(rxq->rdt_reg_addr, rx_id);
1398 		nb_hold = 0;
1399 	}
1400 	rxq->nb_rx_hold = nb_hold;
1401 	return nb_rx;
1402 }
1403 
1404 /**
1405  * ngbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1406  *
1407  * Fill the following info in the HEAD buffer of the Rx cluster:
1408  *    - RX port identifier
1409  *    - hardware offload data, if any:
1410  *      - RSS flag & hash
1411  *      - IP checksum flag
1412  *      - VLAN TCI, if any
1413  *      - error flags
1414  * @head HEAD of the packet cluster
1415  * @desc HW descriptor to get data from
1416  * @rxq Pointer to the Rx queue
1417  */
1418 static inline void
1419 ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
1420 		struct ngbe_rx_queue *rxq, uint32_t staterr)
1421 {
1422 	uint32_t pkt_info;
1423 	uint64_t pkt_flags;
1424 
1425 	head->port = rxq->port_id;
1426 
1427 	/* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
1428 	 * set in the pkt_flags field.
1429 	 */
1430 	head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
1431 	pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
1432 	pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1433 	pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1434 	pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1435 	head->ol_flags = pkt_flags;
1436 	head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
1437 						NGBE_PTID_MASK);
1438 
1439 	if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
1440 		head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
1441 }
1442 
1443 /**
1444  * ngbe_recv_pkts_sc - receive handler for scatter case.
1445  *
1446  * @rx_queue Rx queue handle
1447  * @rx_pkts table of received packets
1448  * @nb_pkts size of rx_pkts table
1449  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1450  *
1451  * Returns the number of received packets/clusters (according to the "bulk
1452  * receive" interface).
1453  */
1454 static inline uint16_t
1455 ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1456 		    bool bulk_alloc)
1457 {
1458 	struct ngbe_rx_queue *rxq = rx_queue;
1459 	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1460 	volatile struct ngbe_rx_desc *rx_ring = rxq->rx_ring;
1461 	struct ngbe_rx_entry *sw_ring = rxq->sw_ring;
1462 	struct ngbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1463 	uint16_t rx_id = rxq->rx_tail;
1464 	uint16_t nb_rx = 0;
1465 	uint16_t nb_hold = rxq->nb_rx_hold;
1466 	uint16_t prev_id = rxq->rx_tail;
1467 
1468 	while (nb_rx < nb_pkts) {
1469 		bool eop;
1470 		struct ngbe_rx_entry *rxe;
1471 		struct ngbe_scattered_rx_entry *sc_entry;
1472 		struct ngbe_scattered_rx_entry *next_sc_entry = NULL;
1473 		struct ngbe_rx_entry *next_rxe = NULL;
1474 		struct rte_mbuf *first_seg;
1475 		struct rte_mbuf *rxm;
1476 		struct rte_mbuf *nmb = NULL;
1477 		struct ngbe_rx_desc rxd;
1478 		uint16_t data_len;
1479 		uint16_t next_id;
1480 		volatile struct ngbe_rx_desc *rxdp;
1481 		uint32_t staterr;
1482 
1483 next_desc:
1484 		rxdp = &rx_ring[rx_id];
1485 		staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
1486 
1487 		if (!(staterr & NGBE_RXD_STAT_DD))
1488 			break;
1489 
1490 		/*
1491 		 * Use acquire fence to ensure that status_error which includes
1492 		 * DD bit is loaded before loading of other descriptor words.
1493 		 */
1494 		rte_atomic_thread_fence(rte_memory_order_acquire);
1495 
1496 		rxd = *rxdp;
1497 
1498 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1499 				  "staterr=0x%x data_len=%u",
1500 			   rxq->port_id, rxq->queue_id, rx_id, staterr,
1501 			   rte_le_to_cpu_16(rxd.qw1.hi.len));
1502 
1503 		if (!bulk_alloc) {
1504 			nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1505 			if (nmb == NULL) {
1506 				PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed "
1507 						  "port_id=%u queue_id=%u",
1508 					   rxq->port_id, rxq->queue_id);
1509 
1510 				dev->data->rx_mbuf_alloc_failed++;
1511 				break;
1512 			}
1513 		} else if (nb_hold > rxq->rx_free_thresh) {
1514 			uint16_t next_rdt = rxq->rx_free_trigger;
1515 
1516 			if (!ngbe_rx_alloc_bufs(rxq, false)) {
1517 				rte_wmb();
1518 				ngbe_set32_relaxed(rxq->rdt_reg_addr,
1519 							    next_rdt);
1520 				nb_hold -= rxq->rx_free_thresh;
1521 			} else {
1522 				PMD_RX_LOG(DEBUG, "Rx bulk alloc failed "
1523 						  "port_id=%u queue_id=%u",
1524 					   rxq->port_id, rxq->queue_id);
1525 
1526 				dev->data->rx_mbuf_alloc_failed++;
1527 				break;
1528 			}
1529 		}
1530 
1531 		nb_hold++;
1532 		rxe = &sw_ring[rx_id];
1533 		eop = staterr & NGBE_RXD_STAT_EOP;
1534 
1535 		next_id = rx_id + 1;
1536 		if (next_id == rxq->nb_rx_desc)
1537 			next_id = 0;
1538 
1539 		/* Prefetch next mbuf while processing current one. */
1540 		rte_ngbe_prefetch(sw_ring[next_id].mbuf);
1541 
1542 		/*
1543 		 * When next Rx descriptor is on a cache-line boundary,
1544 		 * prefetch the next 4 RX descriptors and the next 4 pointers
1545 		 * to mbufs.
1546 		 */
1547 		if ((next_id & 0x3) == 0) {
1548 			rte_ngbe_prefetch(&rx_ring[next_id]);
1549 			rte_ngbe_prefetch(&sw_ring[next_id]);
1550 		}
1551 
1552 		rxm = rxe->mbuf;
1553 
1554 		if (!bulk_alloc) {
1555 			__le64 dma =
1556 			  rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1557 			/*
1558 			 * Update Rx descriptor with the physical address of the
1559 			 * new data buffer of the new allocated mbuf.
1560 			 */
1561 			rxe->mbuf = nmb;
1562 
1563 			rxm->data_off = RTE_PKTMBUF_HEADROOM;
1564 			NGBE_RXD_HDRADDR(rxdp, 0);
1565 			NGBE_RXD_PKTADDR(rxdp, dma);
1566 		} else {
1567 			rxe->mbuf = NULL;
1568 		}
1569 
1570 		/*
1571 		 * Set data length & data buffer address of mbuf.
1572 		 */
1573 		data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
1574 		rxm->data_len = data_len;
1575 
1576 		if (!eop) {
1577 			uint16_t nextp_id;
1578 
1579 			nextp_id = next_id;
1580 			next_sc_entry = &sw_sc_ring[nextp_id];
1581 			next_rxe = &sw_ring[nextp_id];
1582 			rte_ngbe_prefetch(next_rxe);
1583 		}
1584 
1585 		sc_entry = &sw_sc_ring[rx_id];
1586 		first_seg = sc_entry->fbuf;
1587 		sc_entry->fbuf = NULL;
1588 
1589 		/*
1590 		 * If this is the first buffer of the received packet,
1591 		 * set the pointer to the first mbuf of the packet and
1592 		 * initialize its context.
1593 		 * Otherwise, update the total length and the number of segments
1594 		 * of the current scattered packet, and update the pointer to
1595 		 * the last mbuf of the current packet.
1596 		 */
1597 		if (first_seg == NULL) {
1598 			first_seg = rxm;
1599 			first_seg->pkt_len = data_len;
1600 			first_seg->nb_segs = 1;
1601 		} else {
1602 			first_seg->pkt_len += data_len;
1603 			first_seg->nb_segs++;
1604 		}
1605 
1606 		prev_id = rx_id;
1607 		rx_id = next_id;
1608 
1609 		/*
1610 		 * If this is not the last buffer of the received packet, update
1611 		 * the pointer to the first mbuf at the NEXTP entry in the
1612 		 * sw_sc_ring and continue to parse the Rx ring.
1613 		 */
1614 		if (!eop && next_rxe) {
1615 			rxm->next = next_rxe->mbuf;
1616 			next_sc_entry->fbuf = first_seg;
1617 			goto next_desc;
1618 		}
1619 
1620 		/* Initialize the first mbuf of the returned packet */
1621 		ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
1622 
1623 		/* Deal with the case, when HW CRC srip is disabled. */
1624 		first_seg->pkt_len -= rxq->crc_len;
1625 		if (unlikely(rxm->data_len <= rxq->crc_len)) {
1626 			struct rte_mbuf *lp;
1627 
1628 			for (lp = first_seg; lp->next != rxm; lp = lp->next)
1629 				;
1630 
1631 			first_seg->nb_segs--;
1632 			lp->data_len -= rxq->crc_len - rxm->data_len;
1633 			lp->next = NULL;
1634 			rte_pktmbuf_free_seg(rxm);
1635 		} else {
1636 			rxm->data_len -= rxq->crc_len;
1637 		}
1638 
1639 		/* Prefetch data of first segment, if configured to do so. */
1640 		rte_packet_prefetch((char *)first_seg->buf_addr +
1641 			first_seg->data_off);
1642 
1643 		/*
1644 		 * Store the mbuf address into the next entry of the array
1645 		 * of returned packets.
1646 		 */
1647 		rx_pkts[nb_rx++] = first_seg;
1648 	}
1649 
1650 	/*
1651 	 * Record index of the next Rx descriptor to probe.
1652 	 */
1653 	rxq->rx_tail = rx_id;
1654 
1655 	/*
1656 	 * If the number of free Rx descriptors is greater than the Rx free
1657 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1658 	 * register.
1659 	 * Update the RDT with the value of the last processed Rx descriptor
1660 	 * minus 1, to guarantee that the RDT register is never equal to the
1661 	 * RDH register, which creates a "full" ring situation from the
1662 	 * hardware point of view...
1663 	 */
1664 	if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1665 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1666 			   "nb_hold=%u nb_rx=%u",
1667 			   rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1668 
1669 		rte_wmb();
1670 		ngbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
1671 		nb_hold = 0;
1672 	}
1673 
1674 	rxq->nb_rx_hold = nb_hold;
1675 	return nb_rx;
1676 }
1677 
1678 uint16_t
1679 ngbe_recv_pkts_sc_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1680 				 uint16_t nb_pkts)
1681 {
1682 	return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, false);
1683 }
1684 
1685 uint16_t
1686 ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1687 			       uint16_t nb_pkts)
1688 {
1689 	return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, true);
1690 }
1691 
1692 /*********************************************************************
1693  *
1694  *  Queue management functions
1695  *
1696  **********************************************************************/
1697 
1698 static void
1699 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
1700 {
1701 	unsigned int i;
1702 
1703 	if (txq->sw_ring != NULL) {
1704 		for (i = 0; i < txq->nb_tx_desc; i++) {
1705 			if (txq->sw_ring[i].mbuf != NULL) {
1706 				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1707 				txq->sw_ring[i].mbuf = NULL;
1708 			}
1709 		}
1710 	}
1711 }
1712 
1713 static int
1714 ngbe_tx_done_cleanup_full(struct ngbe_tx_queue *txq, uint32_t free_cnt)
1715 {
1716 	struct ngbe_tx_entry *swr_ring = txq->sw_ring;
1717 	uint16_t i, tx_last, tx_id;
1718 	uint16_t nb_tx_free_last;
1719 	uint16_t nb_tx_to_clean;
1720 	uint32_t pkt_cnt;
1721 
1722 	/* Start free mbuf from the next of tx_tail */
1723 	tx_last = txq->tx_tail;
1724 	tx_id  = swr_ring[tx_last].next_id;
1725 
1726 	if (txq->nb_tx_free == 0 && ngbe_xmit_cleanup(txq))
1727 		return 0;
1728 
1729 	nb_tx_to_clean = txq->nb_tx_free;
1730 	nb_tx_free_last = txq->nb_tx_free;
1731 	if (!free_cnt)
1732 		free_cnt = txq->nb_tx_desc;
1733 
1734 	/* Loop through swr_ring to count the amount of
1735 	 * freeable mubfs and packets.
1736 	 */
1737 	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
1738 		for (i = 0; i < nb_tx_to_clean &&
1739 			pkt_cnt < free_cnt &&
1740 			tx_id != tx_last; i++) {
1741 			if (swr_ring[tx_id].mbuf != NULL) {
1742 				rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
1743 				swr_ring[tx_id].mbuf = NULL;
1744 
1745 				/*
1746 				 * last segment in the packet,
1747 				 * increment packet count
1748 				 */
1749 				pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
1750 			}
1751 
1752 			tx_id = swr_ring[tx_id].next_id;
1753 		}
1754 
1755 		if (pkt_cnt < free_cnt) {
1756 			if (ngbe_xmit_cleanup(txq))
1757 				break;
1758 
1759 			nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
1760 			nb_tx_free_last = txq->nb_tx_free;
1761 		}
1762 	}
1763 
1764 	return (int)pkt_cnt;
1765 }
1766 
1767 static int
1768 ngbe_tx_done_cleanup_simple(struct ngbe_tx_queue *txq,
1769 			uint32_t free_cnt)
1770 {
1771 	int i, n, cnt;
1772 
1773 	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
1774 		free_cnt = txq->nb_tx_desc;
1775 
1776 	cnt = free_cnt - free_cnt % txq->tx_free_thresh;
1777 
1778 	for (i = 0; i < cnt; i += n) {
1779 		if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
1780 			break;
1781 
1782 		n = ngbe_tx_free_bufs(txq);
1783 
1784 		if (n == 0)
1785 			break;
1786 	}
1787 
1788 	return i;
1789 }
1790 
1791 int
1792 ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
1793 {
1794 	struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
1795 	if (txq->offloads == 0 &&
1796 		txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST)
1797 		return ngbe_tx_done_cleanup_simple(txq, free_cnt);
1798 
1799 	return ngbe_tx_done_cleanup_full(txq, free_cnt);
1800 }
1801 
1802 static void
1803 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
1804 {
1805 	if (txq != NULL)
1806 		rte_free(txq->sw_ring);
1807 }
1808 
1809 static void
1810 ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
1811 {
1812 	if (txq != NULL) {
1813 		if (txq->ops != NULL) {
1814 			txq->ops->release_mbufs(txq);
1815 			txq->ops->free_swring(txq);
1816 		}
1817 		rte_free(txq);
1818 	}
1819 }
1820 
1821 void
1822 ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1823 {
1824 	ngbe_tx_queue_release(dev->data->tx_queues[qid]);
1825 }
1826 
1827 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
1828 static void
1829 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
1830 {
1831 	static const struct ngbe_tx_desc zeroed_desc = {0};
1832 	struct ngbe_tx_entry *txe = txq->sw_ring;
1833 	uint16_t prev, i;
1834 
1835 	/* Zero out HW ring memory */
1836 	for (i = 0; i < txq->nb_tx_desc; i++)
1837 		txq->tx_ring[i] = zeroed_desc;
1838 
1839 	/* Initialize SW ring entries */
1840 	prev = (uint16_t)(txq->nb_tx_desc - 1);
1841 	for (i = 0; i < txq->nb_tx_desc; i++) {
1842 		/* the ring can also be modified by hardware */
1843 		volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
1844 
1845 		txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
1846 		txe[i].mbuf = NULL;
1847 		txe[i].last_id = i;
1848 		txe[prev].next_id = i;
1849 		prev = i;
1850 	}
1851 
1852 	txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
1853 	txq->tx_tail = 0;
1854 
1855 	/*
1856 	 * Always allow 1 descriptor to be un-allocated to avoid
1857 	 * a H/W race condition
1858 	 */
1859 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1860 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1861 	txq->ctx_curr = 0;
1862 	memset((void *)&txq->ctx_cache, 0,
1863 		NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
1864 }
1865 
1866 static const struct ngbe_txq_ops def_txq_ops = {
1867 	.release_mbufs = ngbe_tx_queue_release_mbufs,
1868 	.free_swring = ngbe_tx_free_swring,
1869 	.reset = ngbe_reset_tx_queue,
1870 };
1871 
1872 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1873  * the queue parameters. Used in tx_queue_setup by primary process and then
1874  * in dev_init by secondary process when attaching to an existing ethdev.
1875  */
1876 void
1877 ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
1878 {
1879 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
1880 	if (txq->offloads == 0 &&
1881 			txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) {
1882 		PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1883 		dev->tx_pkt_prepare = NULL;
1884 		if (txq->tx_free_thresh <= RTE_NGBE_TX_MAX_FREE_BUF_SZ &&
1885 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
1886 				(rte_eal_process_type() != RTE_PROC_PRIMARY ||
1887 					ngbe_txq_vec_setup(txq) == 0)) {
1888 			PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
1889 			dev->tx_pkt_burst = ngbe_xmit_pkts_vec;
1890 		} else {
1891 			dev->tx_pkt_burst = ngbe_xmit_pkts_simple;
1892 		}
1893 	} else {
1894 		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1895 		PMD_INIT_LOG(DEBUG,
1896 				" - offloads = 0x%" PRIx64,
1897 				txq->offloads);
1898 		PMD_INIT_LOG(DEBUG,
1899 				" - tx_free_thresh = %lu [RTE_PMD_NGBE_TX_MAX_BURST=%lu]",
1900 				(unsigned long)txq->tx_free_thresh,
1901 				(unsigned long)RTE_PMD_NGBE_TX_MAX_BURST);
1902 		dev->tx_pkt_burst = ngbe_xmit_pkts;
1903 		dev->tx_pkt_prepare = ngbe_prep_pkts;
1904 	}
1905 }
1906 
1907 static const struct {
1908 	eth_tx_burst_t pkt_burst;
1909 	const char *info;
1910 } ngbe_tx_burst_infos[] = {
1911 	{ ngbe_xmit_pkts_simple,   "Scalar Simple"},
1912 	{ ngbe_xmit_pkts,          "Scalar"},
1913 #ifdef RTE_ARCH_X86
1914 	{ ngbe_xmit_pkts_vec,      "Vector SSE" },
1915 #elif defined(RTE_ARCH_ARM)
1916 	{ ngbe_xmit_pkts_vec,      "Vector Neon" },
1917 #endif
1918 };
1919 
1920 int
1921 ngbe_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
1922 		      struct rte_eth_burst_mode *mode)
1923 {
1924 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
1925 	int ret = -EINVAL;
1926 	unsigned int i;
1927 
1928 	for (i = 0; i < RTE_DIM(ngbe_tx_burst_infos); ++i) {
1929 		if (pkt_burst == ngbe_tx_burst_infos[i].pkt_burst) {
1930 			snprintf(mode->info, sizeof(mode->info), "%s",
1931 				 ngbe_tx_burst_infos[i].info);
1932 			ret = 0;
1933 			break;
1934 		}
1935 	}
1936 
1937 	return ret;
1938 }
1939 
1940 uint64_t
1941 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
1942 {
1943 	uint64_t tx_offload_capa;
1944 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1945 
1946 	tx_offload_capa =
1947 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1948 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
1949 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
1950 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
1951 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
1952 		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
1953 		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
1954 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1955 
1956 	if (hw->is_pf)
1957 		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
1958 
1959 	return tx_offload_capa;
1960 }
1961 
1962 int
1963 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1964 			 uint16_t queue_idx,
1965 			 uint16_t nb_desc,
1966 			 unsigned int socket_id,
1967 			 const struct rte_eth_txconf *tx_conf)
1968 {
1969 	const struct rte_memzone *tz;
1970 	struct ngbe_tx_queue *txq;
1971 	struct ngbe_hw     *hw;
1972 	uint16_t tx_free_thresh;
1973 	uint64_t offloads;
1974 
1975 	PMD_INIT_FUNC_TRACE();
1976 	hw = ngbe_dev_hw(dev);
1977 
1978 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1979 
1980 	/*
1981 	 * The Tx descriptor ring will be cleaned after txq->tx_free_thresh
1982 	 * descriptors are used or if the number of descriptors required
1983 	 * to transmit a packet is greater than the number of free Tx
1984 	 * descriptors.
1985 	 * One descriptor in the Tx ring is used as a sentinel to avoid a
1986 	 * H/W race condition, hence the maximum threshold constraints.
1987 	 * When set to zero use default values.
1988 	 */
1989 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1990 			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1991 	if (tx_free_thresh >= (nb_desc - 3)) {
1992 		PMD_INIT_LOG(ERR,
1993 			     "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)",
1994 			     (unsigned int)tx_free_thresh,
1995 			     (int)dev->data->port_id, (int)queue_idx);
1996 		return -(EINVAL);
1997 	}
1998 
1999 	if (nb_desc % tx_free_thresh != 0) {
2000 		PMD_INIT_LOG(ERR,
2001 			     "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)",
2002 			     (unsigned int)tx_free_thresh,
2003 			     (int)dev->data->port_id, (int)queue_idx);
2004 		return -(EINVAL);
2005 	}
2006 
2007 	/* Free memory prior to re-allocation if needed... */
2008 	if (dev->data->tx_queues[queue_idx] != NULL) {
2009 		ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2010 		dev->data->tx_queues[queue_idx] = NULL;
2011 	}
2012 
2013 	/* First allocate the Tx queue data structure */
2014 	txq = rte_zmalloc_socket("ethdev Tx queue",
2015 				 sizeof(struct ngbe_tx_queue),
2016 				 RTE_CACHE_LINE_SIZE, socket_id);
2017 	if (txq == NULL)
2018 		return -ENOMEM;
2019 
2020 	/*
2021 	 * Allocate Tx ring hardware descriptors. A memzone large enough to
2022 	 * handle the maximum ring size is allocated in order to allow for
2023 	 * resizing in later calls to the queue setup function.
2024 	 */
2025 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2026 			sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
2027 			NGBE_ALIGN, socket_id);
2028 	if (tz == NULL) {
2029 		ngbe_tx_queue_release(txq);
2030 		return -ENOMEM;
2031 	}
2032 
2033 	txq->nb_tx_desc = nb_desc;
2034 	txq->tx_free_thresh = tx_free_thresh;
2035 	txq->pthresh = tx_conf->tx_thresh.pthresh;
2036 	txq->hthresh = tx_conf->tx_thresh.hthresh;
2037 	txq->wthresh = tx_conf->tx_thresh.wthresh;
2038 	txq->queue_id = queue_idx;
2039 	txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2040 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2041 	txq->port_id = dev->data->port_id;
2042 	txq->offloads = offloads;
2043 	txq->ops = &def_txq_ops;
2044 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
2045 
2046 	txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
2047 	txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
2048 
2049 	txq->tx_ring_phys_addr = TMZ_PADDR(tz);
2050 	txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
2051 
2052 	/* Allocate software ring */
2053 	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2054 				sizeof(struct ngbe_tx_entry) * nb_desc,
2055 				RTE_CACHE_LINE_SIZE, socket_id);
2056 	if (txq->sw_ring == NULL) {
2057 		ngbe_tx_queue_release(txq);
2058 		return -ENOMEM;
2059 	}
2060 	PMD_INIT_LOG(DEBUG,
2061 		     "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2062 		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2063 
2064 	/* set up scalar Tx function as appropriate */
2065 	ngbe_set_tx_function(dev, txq);
2066 
2067 	txq->ops->reset(txq);
2068 
2069 	dev->data->tx_queues[queue_idx] = txq;
2070 
2071 	return 0;
2072 }
2073 
2074 /**
2075  * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
2076  *
2077  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2078  * in the sw_sc_ring is not set to NULL but rather points to the next
2079  * mbuf of this RSC aggregation (that has not been completed yet and still
2080  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2081  * will just free first "nb_segs" segments of the cluster explicitly by calling
2082  * an rte_pktmbuf_free_seg().
2083  *
2084  * @m scattered cluster head
2085  */
2086 static void
2087 ngbe_free_sc_cluster(struct rte_mbuf *m)
2088 {
2089 	uint16_t i, nb_segs = m->nb_segs;
2090 	struct rte_mbuf *next_seg;
2091 
2092 	for (i = 0; i < nb_segs; i++) {
2093 		next_seg = m->next;
2094 		rte_pktmbuf_free_seg(m);
2095 		m = next_seg;
2096 	}
2097 }
2098 
2099 static void
2100 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
2101 {
2102 	unsigned int i;
2103 
2104 	/* SSE Vector driver has a different way of releasing mbufs. */
2105 	if (rxq->rx_using_sse) {
2106 		ngbe_rx_queue_release_mbufs_vec(rxq);
2107 		return;
2108 	}
2109 
2110 	if (rxq->sw_ring != NULL) {
2111 		for (i = 0; i < rxq->nb_rx_desc; i++) {
2112 			if (rxq->sw_ring[i].mbuf != NULL) {
2113 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2114 				rxq->sw_ring[i].mbuf = NULL;
2115 			}
2116 		}
2117 		for (i = 0; i < rxq->rx_nb_avail; ++i) {
2118 			struct rte_mbuf *mb;
2119 
2120 			mb = rxq->rx_stage[rxq->rx_next_avail + i];
2121 			rte_pktmbuf_free_seg(mb);
2122 		}
2123 		rxq->rx_nb_avail = 0;
2124 	}
2125 
2126 	if (rxq->sw_sc_ring != NULL)
2127 		for (i = 0; i < rxq->nb_rx_desc; i++)
2128 			if (rxq->sw_sc_ring[i].fbuf != NULL) {
2129 				ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2130 				rxq->sw_sc_ring[i].fbuf = NULL;
2131 			}
2132 }
2133 
2134 static void
2135 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
2136 {
2137 	if (rxq != NULL) {
2138 		ngbe_rx_queue_release_mbufs(rxq);
2139 		rte_free(rxq->sw_ring);
2140 		rte_free(rxq->sw_sc_ring);
2141 		rte_free(rxq);
2142 	}
2143 }
2144 
2145 void
2146 ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
2147 {
2148 	ngbe_rx_queue_release(dev->data->rx_queues[qid]);
2149 }
2150 
2151 /*
2152  * Check if Rx Burst Bulk Alloc function can be used.
2153  * Return
2154  *        0: the preconditions are satisfied and the bulk allocation function
2155  *           can be used.
2156  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2157  *           function must be used.
2158  */
2159 static inline int
2160 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
2161 {
2162 	int ret = 0;
2163 
2164 	/*
2165 	 * Make sure the following pre-conditions are satisfied:
2166 	 *   rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
2167 	 *   rxq->rx_free_thresh < rxq->nb_rx_desc
2168 	 *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2169 	 * Scattered packets are not supported.  This should be checked
2170 	 * outside of this function.
2171 	 */
2172 	if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
2173 		PMD_INIT_LOG(DEBUG,
2174 			     "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
2175 			     rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
2176 		ret = -EINVAL;
2177 	} else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
2178 		PMD_INIT_LOG(DEBUG,
2179 			     "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
2180 			     rxq->rx_free_thresh, rxq->nb_rx_desc);
2181 		ret = -EINVAL;
2182 	} else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
2183 		PMD_INIT_LOG(DEBUG,
2184 			     "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
2185 			     rxq->nb_rx_desc, rxq->rx_free_thresh);
2186 		ret = -EINVAL;
2187 	}
2188 
2189 	return ret;
2190 }
2191 
2192 /* Reset dynamic ngbe_rx_queue fields back to defaults */
2193 static void
2194 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
2195 {
2196 	static const struct ngbe_rx_desc zeroed_desc = {
2197 						{{0}, {0} }, {{0}, {0} } };
2198 	unsigned int i;
2199 	uint16_t len = rxq->nb_rx_desc;
2200 
2201 	/*
2202 	 * By default, the Rx queue setup function allocates enough memory for
2203 	 * NGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
2204 	 * extra memory at the end of the descriptor ring to be zero'd out.
2205 	 */
2206 	if (adapter->rx_bulk_alloc_allowed)
2207 		/* zero out extra memory */
2208 		len += RTE_PMD_NGBE_RX_MAX_BURST;
2209 
2210 	/*
2211 	 * Zero out HW ring memory. Zero out extra memory at the end of
2212 	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2213 	 * reads extra memory as zeros.
2214 	 */
2215 	for (i = 0; i < len; i++)
2216 		rxq->rx_ring[i] = zeroed_desc;
2217 
2218 	/*
2219 	 * initialize extra software ring entries. Space for these extra
2220 	 * entries is always allocated
2221 	 */
2222 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2223 	for (i = rxq->nb_rx_desc; i < len; ++i)
2224 		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2225 
2226 	rxq->rx_nb_avail = 0;
2227 	rxq->rx_next_avail = 0;
2228 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2229 	rxq->rx_tail = 0;
2230 	rxq->nb_rx_hold = 0;
2231 	rxq->pkt_first_seg = NULL;
2232 	rxq->pkt_last_seg = NULL;
2233 
2234 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
2235 	rxq->rxrearm_start = 0;
2236 	rxq->rxrearm_nb = 0;
2237 #endif
2238 }
2239 
2240 uint64_t
2241 ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
2242 {
2243 	return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2244 }
2245 
2246 uint64_t
2247 ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2248 {
2249 	uint64_t offloads;
2250 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2251 
2252 	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
2253 		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
2254 		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
2255 		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
2256 		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
2257 		   RTE_ETH_RX_OFFLOAD_RSS_HASH    |
2258 		   RTE_ETH_RX_OFFLOAD_SCATTER;
2259 
2260 	if (hw->is_pf)
2261 		offloads |= (RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
2262 			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
2263 
2264 	return offloads;
2265 }
2266 
2267 int
2268 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2269 			 uint16_t queue_idx,
2270 			 uint16_t nb_desc,
2271 			 unsigned int socket_id,
2272 			 const struct rte_eth_rxconf *rx_conf,
2273 			 struct rte_mempool *mp)
2274 {
2275 	const struct rte_memzone *rz;
2276 	struct ngbe_rx_queue *rxq;
2277 	struct ngbe_hw     *hw;
2278 	uint16_t len;
2279 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2280 	uint64_t offloads;
2281 
2282 	PMD_INIT_FUNC_TRACE();
2283 	hw = ngbe_dev_hw(dev);
2284 
2285 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2286 
2287 	/* Free memory prior to re-allocation if needed... */
2288 	if (dev->data->rx_queues[queue_idx] != NULL) {
2289 		ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2290 		dev->data->rx_queues[queue_idx] = NULL;
2291 	}
2292 
2293 	/* First allocate the Rx queue data structure */
2294 	rxq = rte_zmalloc_socket("ethdev RX queue",
2295 				 sizeof(struct ngbe_rx_queue),
2296 				 RTE_CACHE_LINE_SIZE, socket_id);
2297 	if (rxq == NULL)
2298 		return -ENOMEM;
2299 	rxq->mb_pool = mp;
2300 	rxq->nb_rx_desc = nb_desc;
2301 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2302 	rxq->queue_id = queue_idx;
2303 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2304 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2305 	rxq->port_id = dev->data->port_id;
2306 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2307 		rxq->crc_len = RTE_ETHER_CRC_LEN;
2308 	else
2309 		rxq->crc_len = 0;
2310 	rxq->drop_en = rx_conf->rx_drop_en;
2311 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2312 	rxq->offloads = offloads;
2313 
2314 	/*
2315 	 * Allocate Rx ring hardware descriptors. A memzone large enough to
2316 	 * handle the maximum ring size is allocated in order to allow for
2317 	 * resizing in later calls to the queue setup function.
2318 	 */
2319 	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2320 				      RX_RING_SZ, NGBE_ALIGN, socket_id);
2321 	if (rz == NULL) {
2322 		ngbe_rx_queue_release(rxq);
2323 		return -ENOMEM;
2324 	}
2325 
2326 	/*
2327 	 * Zero init all the descriptors in the ring.
2328 	 */
2329 	memset(rz->addr, 0, RX_RING_SZ);
2330 
2331 	rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
2332 	rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
2333 
2334 	rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
2335 	rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
2336 
2337 	/*
2338 	 * Certain constraints must be met in order to use the bulk buffer
2339 	 * allocation Rx burst function. If any of Rx queues doesn't meet them
2340 	 * the feature should be disabled for the whole port.
2341 	 */
2342 	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2343 		PMD_INIT_LOG(DEBUG,
2344 			     "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
2345 			     rxq->queue_id, rxq->port_id);
2346 		adapter->rx_bulk_alloc_allowed = false;
2347 	}
2348 
2349 	/*
2350 	 * Allocate software ring. Allow for space at the end of the
2351 	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2352 	 * function does not access an invalid memory region.
2353 	 */
2354 	len = nb_desc;
2355 	if (adapter->rx_bulk_alloc_allowed)
2356 		len += RTE_PMD_NGBE_RX_MAX_BURST;
2357 
2358 	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2359 					  sizeof(struct ngbe_rx_entry) * len,
2360 					  RTE_CACHE_LINE_SIZE, socket_id);
2361 	if (rxq->sw_ring == NULL) {
2362 		ngbe_rx_queue_release(rxq);
2363 		return -ENOMEM;
2364 	}
2365 
2366 	/*
2367 	 * Always allocate even if it's not going to be needed in order to
2368 	 * simplify the code.
2369 	 *
2370 	 * This ring is used in Scattered Rx cases and Scattered Rx may
2371 	 * be requested in ngbe_dev_rx_init(), which is called later from
2372 	 * dev_start() flow.
2373 	 */
2374 	rxq->sw_sc_ring =
2375 		rte_zmalloc_socket("rxq->sw_sc_ring",
2376 				  sizeof(struct ngbe_scattered_rx_entry) * len,
2377 				  RTE_CACHE_LINE_SIZE, socket_id);
2378 	if (rxq->sw_sc_ring == NULL) {
2379 		ngbe_rx_queue_release(rxq);
2380 		return -ENOMEM;
2381 	}
2382 
2383 	PMD_INIT_LOG(DEBUG,
2384 		     "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2385 		     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2386 		     rxq->rx_ring_phys_addr);
2387 
2388 	if (!rte_is_power_of_2(nb_desc)) {
2389 		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
2390 				    "preconditions - canceling the feature for "
2391 				    "the whole port[%d]",
2392 			     rxq->queue_id, rxq->port_id);
2393 		adapter->rx_vec_allowed = false;
2394 	} else {
2395 		ngbe_rxq_vec_setup(rxq);
2396 	}
2397 
2398 	dev->data->rx_queues[queue_idx] = rxq;
2399 
2400 	ngbe_reset_rx_queue(adapter, rxq);
2401 
2402 	return 0;
2403 }
2404 
2405 uint32_t
2406 ngbe_dev_rx_queue_count(void *rx_queue)
2407 {
2408 #define NGBE_RXQ_SCAN_INTERVAL 4
2409 	volatile struct ngbe_rx_desc *rxdp;
2410 	struct ngbe_rx_queue *rxq = rx_queue;
2411 	uint32_t desc = 0;
2412 
2413 	rxdp = &rxq->rx_ring[rxq->rx_tail];
2414 
2415 	while ((desc < rxq->nb_rx_desc) &&
2416 		(rxdp->qw1.lo.status &
2417 			rte_cpu_to_le_32(NGBE_RXD_STAT_DD))) {
2418 		desc += NGBE_RXQ_SCAN_INTERVAL;
2419 		rxdp += NGBE_RXQ_SCAN_INTERVAL;
2420 		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2421 			rxdp = &(rxq->rx_ring[rxq->rx_tail +
2422 				desc - rxq->nb_rx_desc]);
2423 	}
2424 
2425 	return desc;
2426 }
2427 
2428 int
2429 ngbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
2430 {
2431 	struct ngbe_rx_queue *rxq = rx_queue;
2432 	volatile uint32_t *status;
2433 	uint32_t nb_hold, desc;
2434 
2435 	if (unlikely(offset >= rxq->nb_rx_desc))
2436 		return -EINVAL;
2437 
2438 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
2439 	if (rxq->rx_using_sse)
2440 		nb_hold = rxq->rxrearm_nb;
2441 	else
2442 #endif
2443 		nb_hold = rxq->nb_rx_hold;
2444 	if (offset >= rxq->nb_rx_desc - nb_hold)
2445 		return RTE_ETH_RX_DESC_UNAVAIL;
2446 
2447 	desc = rxq->rx_tail + offset;
2448 	if (desc >= rxq->nb_rx_desc)
2449 		desc -= rxq->nb_rx_desc;
2450 
2451 	status = &rxq->rx_ring[desc].qw1.lo.status;
2452 	if (*status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD))
2453 		return RTE_ETH_RX_DESC_DONE;
2454 
2455 	return RTE_ETH_RX_DESC_AVAIL;
2456 }
2457 
2458 int
2459 ngbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
2460 {
2461 	struct ngbe_tx_queue *txq = tx_queue;
2462 	volatile uint32_t *status;
2463 	uint32_t desc;
2464 
2465 	if (unlikely(offset >= txq->nb_tx_desc))
2466 		return -EINVAL;
2467 
2468 	desc = txq->tx_tail + offset;
2469 	if (desc >= txq->nb_tx_desc) {
2470 		desc -= txq->nb_tx_desc;
2471 		if (desc >= txq->nb_tx_desc)
2472 			desc -= txq->nb_tx_desc;
2473 	}
2474 
2475 	status = &txq->tx_ring[desc].dw3;
2476 	if (*status & rte_cpu_to_le_32(NGBE_TXD_DD))
2477 		return RTE_ETH_TX_DESC_DONE;
2478 
2479 	return RTE_ETH_TX_DESC_FULL;
2480 }
2481 
2482 void
2483 ngbe_dev_clear_queues(struct rte_eth_dev *dev)
2484 {
2485 	unsigned int i;
2486 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2487 
2488 	PMD_INIT_FUNC_TRACE();
2489 
2490 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2491 		struct ngbe_tx_queue *txq = dev->data->tx_queues[i];
2492 
2493 		if (txq != NULL) {
2494 			txq->ops->release_mbufs(txq);
2495 			txq->ops->reset(txq);
2496 			dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2497 		}
2498 	}
2499 
2500 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2501 		struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
2502 
2503 		if (rxq != NULL) {
2504 			ngbe_rx_queue_release_mbufs(rxq);
2505 			ngbe_reset_rx_queue(adapter, rxq);
2506 			dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2507 		}
2508 	}
2509 }
2510 
2511 void
2512 ngbe_dev_free_queues(struct rte_eth_dev *dev)
2513 {
2514 	unsigned int i;
2515 
2516 	PMD_INIT_FUNC_TRACE();
2517 
2518 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2519 		ngbe_dev_rx_queue_release(dev, i);
2520 		dev->data->rx_queues[i] = NULL;
2521 	}
2522 	dev->data->nb_rx_queues = 0;
2523 
2524 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2525 		ngbe_dev_tx_queue_release(dev, i);
2526 		dev->data->tx_queues[i] = NULL;
2527 	}
2528 	dev->data->nb_tx_queues = 0;
2529 }
2530 
2531 /**
2532  * Receive Side Scaling (RSS)
2533  *
2534  * Principles:
2535  * The source and destination IP addresses of the IP header and the source
2536  * and destination ports of TCP/UDP headers, if any, of received packets are
2537  * hashed against a configurable random key to compute a 32-bit RSS hash result.
2538  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2539  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
2540  * RSS output index which is used as the Rx queue index where to store the
2541  * received packets.
2542  * The following output is supplied in the Rx write-back descriptor:
2543  *     - 32-bit result of the Microsoft RSS hash function,
2544  *     - 4-bit RSS type field.
2545  */
2546 
2547 /*
2548  * Used as the default key.
2549  */
2550 static uint8_t rss_intel_key[40] = {
2551 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2552 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2553 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2554 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2555 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2556 };
2557 
2558 static void
2559 ngbe_rss_disable(struct rte_eth_dev *dev)
2560 {
2561 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2562 
2563 	wr32m(hw, NGBE_RACTL, NGBE_RACTL_RSSENA, 0);
2564 }
2565 
2566 int
2567 ngbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2568 			  struct rte_eth_rss_conf *rss_conf)
2569 {
2570 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2571 	uint8_t  *hash_key;
2572 	uint32_t mrqc;
2573 	uint32_t rss_key;
2574 	uint64_t rss_hf;
2575 	uint16_t i;
2576 
2577 	if (!hw->is_pf) {
2578 		PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2579 			"NIC.");
2580 		return -ENOTSUP;
2581 	}
2582 
2583 	hash_key = rss_conf->rss_key;
2584 	if (hash_key) {
2585 		/* Fill in RSS hash key */
2586 		for (i = 0; i < 10; i++) {
2587 			rss_key  = LS32(hash_key[(i * 4) + 0], 0, 0xFF);
2588 			rss_key |= LS32(hash_key[(i * 4) + 1], 8, 0xFF);
2589 			rss_key |= LS32(hash_key[(i * 4) + 2], 16, 0xFF);
2590 			rss_key |= LS32(hash_key[(i * 4) + 3], 24, 0xFF);
2591 			wr32a(hw, NGBE_REG_RSSKEY, i, rss_key);
2592 		}
2593 	}
2594 
2595 	/* Set configured hashing protocols */
2596 	rss_hf = rss_conf->rss_hf & NGBE_RSS_OFFLOAD_ALL;
2597 
2598 	mrqc = rd32(hw, NGBE_RACTL);
2599 	mrqc &= ~NGBE_RACTL_RSSMASK;
2600 	if (rss_hf & RTE_ETH_RSS_IPV4)
2601 		mrqc |= NGBE_RACTL_RSSIPV4;
2602 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
2603 		mrqc |= NGBE_RACTL_RSSIPV4TCP;
2604 	if (rss_hf & RTE_ETH_RSS_IPV6 ||
2605 	    rss_hf & RTE_ETH_RSS_IPV6_EX)
2606 		mrqc |= NGBE_RACTL_RSSIPV6;
2607 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
2608 	    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
2609 		mrqc |= NGBE_RACTL_RSSIPV6TCP;
2610 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
2611 		mrqc |= NGBE_RACTL_RSSIPV4UDP;
2612 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
2613 	    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
2614 		mrqc |= NGBE_RACTL_RSSIPV6UDP;
2615 
2616 	if (rss_hf)
2617 		mrqc |= NGBE_RACTL_RSSENA;
2618 	else
2619 		mrqc &= ~NGBE_RACTL_RSSENA;
2620 
2621 	wr32(hw, NGBE_RACTL, mrqc);
2622 
2623 	return 0;
2624 }
2625 
2626 int
2627 ngbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2628 			    struct rte_eth_rss_conf *rss_conf)
2629 {
2630 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2631 	uint8_t *hash_key;
2632 	uint32_t mrqc;
2633 	uint32_t rss_key;
2634 	uint64_t rss_hf;
2635 	uint16_t i;
2636 
2637 	hash_key = rss_conf->rss_key;
2638 	if (hash_key) {
2639 		/* Return RSS hash key */
2640 		for (i = 0; i < 10; i++) {
2641 			rss_key = rd32a(hw, NGBE_REG_RSSKEY, i);
2642 			hash_key[(i * 4) + 0] = RS32(rss_key, 0, 0xFF);
2643 			hash_key[(i * 4) + 1] = RS32(rss_key, 8, 0xFF);
2644 			hash_key[(i * 4) + 2] = RS32(rss_key, 16, 0xFF);
2645 			hash_key[(i * 4) + 3] = RS32(rss_key, 24, 0xFF);
2646 		}
2647 	}
2648 
2649 	rss_hf = 0;
2650 
2651 	mrqc = rd32(hw, NGBE_RACTL);
2652 	if (mrqc & NGBE_RACTL_RSSIPV4)
2653 		rss_hf |= RTE_ETH_RSS_IPV4;
2654 	if (mrqc & NGBE_RACTL_RSSIPV4TCP)
2655 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2656 	if (mrqc & NGBE_RACTL_RSSIPV6)
2657 		rss_hf |= RTE_ETH_RSS_IPV6 |
2658 			  RTE_ETH_RSS_IPV6_EX;
2659 	if (mrqc & NGBE_RACTL_RSSIPV6TCP)
2660 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
2661 			  RTE_ETH_RSS_IPV6_TCP_EX;
2662 	if (mrqc & NGBE_RACTL_RSSIPV4UDP)
2663 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2664 	if (mrqc & NGBE_RACTL_RSSIPV6UDP)
2665 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
2666 			  RTE_ETH_RSS_IPV6_UDP_EX;
2667 	if (!(mrqc & NGBE_RACTL_RSSENA))
2668 		rss_hf = 0;
2669 
2670 	rss_hf &= NGBE_RSS_OFFLOAD_ALL;
2671 
2672 	rss_conf->rss_hf = rss_hf;
2673 	return 0;
2674 }
2675 
2676 static void
2677 ngbe_rss_configure(struct rte_eth_dev *dev)
2678 {
2679 	struct rte_eth_rss_conf rss_conf;
2680 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2681 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2682 	uint32_t reta;
2683 	uint16_t i;
2684 	uint16_t j;
2685 
2686 	PMD_INIT_FUNC_TRACE();
2687 
2688 	/*
2689 	 * Fill in redirection table
2690 	 * The byte-swap is needed because NIC registers are in
2691 	 * little-endian order.
2692 	 */
2693 	if (adapter->rss_reta_updated == 0) {
2694 		reta = 0;
2695 		for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
2696 			if (j == dev->data->nb_rx_queues)
2697 				j = 0;
2698 			reta = (reta >> 8) | LS32(j, 24, 0xFF);
2699 			if ((i & 3) == 3)
2700 				wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2701 		}
2702 	}
2703 	/*
2704 	 * Configure the RSS key and the RSS protocols used to compute
2705 	 * the RSS hash of input packets.
2706 	 */
2707 	rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2708 	if (rss_conf.rss_key == NULL)
2709 		rss_conf.rss_key = rss_intel_key; /* Default hash key */
2710 	ngbe_dev_rss_hash_update(dev, &rss_conf);
2711 }
2712 
2713 void ngbe_configure_port(struct rte_eth_dev *dev)
2714 {
2715 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2716 	int i = 0;
2717 	uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
2718 				0x9100, 0x9200,
2719 				0x0000, 0x0000,
2720 				0x0000, 0x0000};
2721 
2722 	PMD_INIT_FUNC_TRACE();
2723 
2724 	/* default outer vlan tpid */
2725 	wr32(hw, NGBE_EXTAG,
2726 		NGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
2727 		NGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
2728 
2729 	/* default inner vlan tpid */
2730 	wr32m(hw, NGBE_VLANCTL,
2731 		NGBE_VLANCTL_TPID_MASK,
2732 		NGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
2733 	wr32m(hw, NGBE_DMATXCTRL,
2734 		NGBE_DMATXCTRL_TPID_MASK,
2735 		NGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
2736 
2737 	/* default vlan tpid filters */
2738 	for (i = 0; i < 8; i++) {
2739 		wr32m(hw, NGBE_TAGTPID(i / 2),
2740 			(i % 2 ? NGBE_TAGTPID_MSB_MASK
2741 			       : NGBE_TAGTPID_LSB_MASK),
2742 			(i % 2 ? NGBE_TAGTPID_MSB(tpids[i])
2743 			       : NGBE_TAGTPID_LSB(tpids[i])));
2744 	}
2745 }
2746 
2747 static int
2748 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
2749 {
2750 	struct ngbe_rx_entry *rxe = rxq->sw_ring;
2751 	uint64_t dma_addr;
2752 	unsigned int i;
2753 
2754 	/* Initialize software ring entries */
2755 	for (i = 0; i < rxq->nb_rx_desc; i++) {
2756 		/* the ring can also be modified by hardware */
2757 		volatile struct ngbe_rx_desc *rxd;
2758 		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2759 
2760 		if (mbuf == NULL) {
2761 			PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u",
2762 				     (unsigned int)rxq->queue_id,
2763 				     (unsigned int)rxq->port_id);
2764 			return -ENOMEM;
2765 		}
2766 
2767 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2768 		mbuf->port = rxq->port_id;
2769 
2770 		dma_addr =
2771 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2772 		rxd = &rxq->rx_ring[i];
2773 		NGBE_RXD_HDRADDR(rxd, 0);
2774 		NGBE_RXD_PKTADDR(rxd, dma_addr);
2775 		rxe[i].mbuf = mbuf;
2776 	}
2777 
2778 	return 0;
2779 }
2780 
2781 static int
2782 ngbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
2783 {
2784 	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
2785 		switch (dev->data->dev_conf.rxmode.mq_mode) {
2786 		case RTE_ETH_MQ_RX_RSS:
2787 			ngbe_rss_configure(dev);
2788 			break;
2789 
2790 		case RTE_ETH_MQ_RX_NONE:
2791 		default:
2792 			/* if mq_mode is none, disable rss mode.*/
2793 			ngbe_rss_disable(dev);
2794 			break;
2795 		}
2796 	}
2797 
2798 	return 0;
2799 }
2800 
2801 void
2802 ngbe_set_rx_function(struct rte_eth_dev *dev)
2803 {
2804 	uint16_t i, rx_using_sse;
2805 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2806 
2807 	/*
2808 	 * In order to allow Vector Rx there are a few configuration
2809 	 * conditions to be met and Rx Bulk Allocation should be allowed.
2810 	 */
2811 	if (ngbe_rx_vec_dev_conf_condition_check(dev) ||
2812 	    !adapter->rx_bulk_alloc_allowed ||
2813 			rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
2814 		PMD_INIT_LOG(DEBUG,
2815 			     "Port[%d] doesn't meet Vector Rx preconditions",
2816 			     dev->data->port_id);
2817 		adapter->rx_vec_allowed = false;
2818 	}
2819 
2820 	if (dev->data->scattered_rx) {
2821 		/*
2822 		 * Set the scattered callback: there are bulk and
2823 		 * single allocation versions.
2824 		 */
2825 		if (adapter->rx_vec_allowed) {
2826 			PMD_INIT_LOG(DEBUG,
2827 				     "Using Vector Scattered Rx callback (port=%d).",
2828 				     dev->data->port_id);
2829 			dev->rx_pkt_burst = ngbe_recv_scattered_pkts_vec;
2830 		} else if (adapter->rx_bulk_alloc_allowed) {
2831 			PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
2832 					   "allocation callback (port=%d).",
2833 				     dev->data->port_id);
2834 			dev->rx_pkt_burst = ngbe_recv_pkts_sc_bulk_alloc;
2835 		} else {
2836 			PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
2837 					    "single allocation) "
2838 					    "Scattered Rx callback "
2839 					    "(port=%d).",
2840 				     dev->data->port_id);
2841 
2842 			dev->rx_pkt_burst = ngbe_recv_pkts_sc_single_alloc;
2843 		}
2844 	/*
2845 	 * Below we set "simple" callbacks according to port/queues parameters.
2846 	 * If parameters allow we are going to choose between the following
2847 	 * callbacks:
2848 	 *    - Vector
2849 	 *    - Bulk Allocation
2850 	 *    - Single buffer allocation (the simplest one)
2851 	 */
2852 	} else if (adapter->rx_vec_allowed) {
2853 		PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure Rx "
2854 				    "burst size no less than %d (port=%d).",
2855 			     RTE_NGBE_DESCS_PER_LOOP,
2856 			     dev->data->port_id);
2857 		dev->rx_pkt_burst = ngbe_recv_pkts_vec;
2858 	} else if (adapter->rx_bulk_alloc_allowed) {
2859 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2860 				    "satisfied. Rx Burst Bulk Alloc function "
2861 				    "will be used on port=%d.",
2862 			     dev->data->port_id);
2863 
2864 		dev->rx_pkt_burst = ngbe_recv_pkts_bulk_alloc;
2865 	} else {
2866 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
2867 				    "satisfied, or Scattered Rx is requested "
2868 				    "(port=%d).",
2869 			     dev->data->port_id);
2870 
2871 		dev->rx_pkt_burst = ngbe_recv_pkts;
2872 	}
2873 
2874 	rx_using_sse = (dev->rx_pkt_burst == ngbe_recv_scattered_pkts_vec ||
2875 			dev->rx_pkt_burst == ngbe_recv_pkts_vec);
2876 
2877 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2878 		struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
2879 
2880 		rxq->rx_using_sse = rx_using_sse;
2881 	}
2882 }
2883 
2884 static const struct {
2885 	eth_rx_burst_t pkt_burst;
2886 	const char *info;
2887 } ngbe_rx_burst_infos[] = {
2888 	{ ngbe_recv_pkts_sc_single_alloc,    "Scalar Scattered"},
2889 	{ ngbe_recv_pkts_sc_bulk_alloc,      "Scalar Scattered Bulk Alloc"},
2890 	{ ngbe_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc"},
2891 	{ ngbe_recv_pkts,                    "Scalar"},
2892 #ifdef RTE_ARCH_X86
2893 	{ ngbe_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
2894 	{ ngbe_recv_pkts_vec,                "Vector SSE" },
2895 #elif defined(RTE_ARCH_ARM64)
2896 	{ ngbe_recv_scattered_pkts_vec,      "Vector Neon Scattered" },
2897 	{ ngbe_recv_pkts_vec,                "Vector Neon" },
2898 #endif
2899 };
2900 
2901 int
2902 ngbe_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2903 		      struct rte_eth_burst_mode *mode)
2904 {
2905 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2906 	int ret = -EINVAL;
2907 	unsigned int i;
2908 
2909 	for (i = 0; i < RTE_DIM(ngbe_rx_burst_infos); ++i) {
2910 		if (pkt_burst == ngbe_rx_burst_infos[i].pkt_burst) {
2911 			snprintf(mode->info, sizeof(mode->info), "%s",
2912 				 ngbe_rx_burst_infos[i].info);
2913 			ret = 0;
2914 			break;
2915 		}
2916 	}
2917 
2918 	return ret;
2919 }
2920 
2921 /*
2922  * Initializes Receive Unit.
2923  */
2924 int
2925 ngbe_dev_rx_init(struct rte_eth_dev *dev)
2926 {
2927 	struct ngbe_hw *hw;
2928 	struct ngbe_rx_queue *rxq;
2929 	uint64_t bus_addr;
2930 	uint32_t fctrl;
2931 	uint32_t hlreg0;
2932 	uint32_t srrctl;
2933 	uint32_t rdrxctl;
2934 	uint32_t rxcsum;
2935 	uint16_t buf_size;
2936 	uint16_t i;
2937 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
2938 
2939 	PMD_INIT_FUNC_TRACE();
2940 	hw = ngbe_dev_hw(dev);
2941 
2942 	/*
2943 	 * Make sure receives are disabled while setting
2944 	 * up the Rx context (registers, descriptor rings, etc.).
2945 	 */
2946 	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
2947 	wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
2948 
2949 	/* Enable receipt of broadcasted frames */
2950 	fctrl = rd32(hw, NGBE_PSRCTL);
2951 	fctrl |= NGBE_PSRCTL_BCA;
2952 	wr32(hw, NGBE_PSRCTL, fctrl);
2953 
2954 	/*
2955 	 * Configure CRC stripping, if any.
2956 	 */
2957 	hlreg0 = rd32(hw, NGBE_SECRXCTL);
2958 	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2959 		hlreg0 &= ~NGBE_SECRXCTL_CRCSTRIP;
2960 	else
2961 		hlreg0 |= NGBE_SECRXCTL_CRCSTRIP;
2962 	hlreg0 &= ~NGBE_SECRXCTL_XDSA;
2963 	wr32(hw, NGBE_SECRXCTL, hlreg0);
2964 
2965 	/*
2966 	 * Configure jumbo frame support, if any.
2967 	 */
2968 	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2969 		NGBE_FRMSZ_MAX(dev->data->mtu + NGBE_ETH_OVERHEAD));
2970 
2971 	/*
2972 	 * If loopback mode is configured, set LPBK bit.
2973 	 */
2974 	hlreg0 = rd32(hw, NGBE_PSRCTL);
2975 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
2976 		hlreg0 |= NGBE_PSRCTL_LBENA;
2977 	else
2978 		hlreg0 &= ~NGBE_PSRCTL_LBENA;
2979 
2980 	wr32(hw, NGBE_PSRCTL, hlreg0);
2981 
2982 	/*
2983 	 * Assume no header split and no VLAN strip support
2984 	 * on any Rx queue first .
2985 	 */
2986 	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2987 
2988 	/* Setup Rx queues */
2989 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2990 		rxq = dev->data->rx_queues[i];
2991 
2992 		/*
2993 		 * Reset crc_len in case it was changed after queue setup by a
2994 		 * call to configure.
2995 		 */
2996 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2997 			rxq->crc_len = RTE_ETHER_CRC_LEN;
2998 		else
2999 			rxq->crc_len = 0;
3000 
3001 		/* Setup the Base and Length of the Rx Descriptor Rings */
3002 		bus_addr = rxq->rx_ring_phys_addr;
3003 		wr32(hw, NGBE_RXBAL(rxq->reg_idx),
3004 				(uint32_t)(bus_addr & BIT_MASK32));
3005 		wr32(hw, NGBE_RXBAH(rxq->reg_idx),
3006 				(uint32_t)(bus_addr >> 32));
3007 		wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
3008 		wr32(hw, NGBE_RXWP(rxq->reg_idx), 0);
3009 
3010 		srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
3011 
3012 		/* Set if packets are dropped when no descriptors available */
3013 		if (rxq->drop_en)
3014 			srrctl |= NGBE_RXCFG_DROP;
3015 
3016 		/*
3017 		 * Configure the Rx buffer size in the PKTLEN field of
3018 		 * the RXCFG register of the queue.
3019 		 * The value is in 1 KB resolution. Valid values can be from
3020 		 * 1 KB to 16 KB.
3021 		 */
3022 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
3023 			RTE_PKTMBUF_HEADROOM);
3024 		buf_size = ROUND_DOWN(buf_size, 0x1 << 10);
3025 		srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
3026 
3027 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
3028 
3029 		/* It adds dual VLAN length for supporting dual VLAN */
3030 		if (dev->data->mtu + NGBE_ETH_OVERHEAD +
3031 				2 * RTE_VLAN_HLEN > buf_size)
3032 			dev->data->scattered_rx = 1;
3033 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3034 			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3035 	}
3036 
3037 	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
3038 		dev->data->scattered_rx = 1;
3039 
3040 	/*
3041 	 * Device configured with multiple RX queues.
3042 	 */
3043 	ngbe_dev_mq_rx_configure(dev);
3044 
3045 	/*
3046 	 * Setup the Checksum Register.
3047 	 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
3048 	 * Enable IP/L4 checksum computation by hardware if requested to do so.
3049 	 */
3050 	rxcsum = rd32(hw, NGBE_PSRCTL);
3051 	rxcsum |= NGBE_PSRCTL_PCSD;
3052 	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
3053 		rxcsum |= NGBE_PSRCTL_L4CSUM;
3054 	else
3055 		rxcsum &= ~NGBE_PSRCTL_L4CSUM;
3056 
3057 	wr32(hw, NGBE_PSRCTL, rxcsum);
3058 
3059 	if (hw->is_pf) {
3060 		rdrxctl = rd32(hw, NGBE_SECRXCTL);
3061 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
3062 			rdrxctl &= ~NGBE_SECRXCTL_CRCSTRIP;
3063 		else
3064 			rdrxctl |= NGBE_SECRXCTL_CRCSTRIP;
3065 		wr32(hw, NGBE_SECRXCTL, rdrxctl);
3066 	}
3067 
3068 	ngbe_set_rx_function(dev);
3069 
3070 	return 0;
3071 }
3072 
3073 /*
3074  * Initializes Transmit Unit.
3075  */
3076 void
3077 ngbe_dev_tx_init(struct rte_eth_dev *dev)
3078 {
3079 	struct ngbe_hw     *hw;
3080 	struct ngbe_tx_queue *txq;
3081 	uint64_t bus_addr;
3082 	uint16_t i;
3083 
3084 	PMD_INIT_FUNC_TRACE();
3085 	hw = ngbe_dev_hw(dev);
3086 
3087 	wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA);
3088 	wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0);
3089 
3090 	/* Setup the Base and Length of the Tx Descriptor Rings */
3091 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
3092 		txq = dev->data->tx_queues[i];
3093 
3094 		bus_addr = txq->tx_ring_phys_addr;
3095 		wr32(hw, NGBE_TXBAL(txq->reg_idx),
3096 				(uint32_t)(bus_addr & BIT_MASK32));
3097 		wr32(hw, NGBE_TXBAH(txq->reg_idx),
3098 				(uint32_t)(bus_addr >> 32));
3099 		wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK,
3100 			NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
3101 		/* Setup the HW Tx Head and TX Tail descriptor pointers */
3102 		wr32(hw, NGBE_TXRP(txq->reg_idx), 0);
3103 		wr32(hw, NGBE_TXWP(txq->reg_idx), 0);
3104 	}
3105 }
3106 
3107 /*
3108  * Set up link loopback mode Tx->Rx.
3109  */
3110 static inline void
3111 ngbe_setup_loopback_link(struct ngbe_hw *hw)
3112 {
3113 	PMD_INIT_FUNC_TRACE();
3114 
3115 	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_LB, NGBE_MACRXCFG_LB);
3116 
3117 	msec_delay(50);
3118 }
3119 
3120 /*
3121  * Start Transmit and Receive Units.
3122  */
3123 int
3124 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
3125 {
3126 	struct ngbe_hw     *hw;
3127 	struct ngbe_tx_queue *txq;
3128 	struct ngbe_rx_queue *rxq;
3129 	uint32_t dmatxctl;
3130 	uint32_t rxctrl;
3131 	uint16_t i;
3132 	int ret = 0;
3133 
3134 	PMD_INIT_FUNC_TRACE();
3135 	hw = ngbe_dev_hw(dev);
3136 
3137 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
3138 		txq = dev->data->tx_queues[i];
3139 		/* Setup Transmit Threshold Registers */
3140 		wr32m(hw, NGBE_TXCFG(txq->reg_idx),
3141 		      NGBE_TXCFG_HTHRESH_MASK |
3142 		      NGBE_TXCFG_WTHRESH_MASK,
3143 		      NGBE_TXCFG_HTHRESH(txq->hthresh) |
3144 		      NGBE_TXCFG_WTHRESH(txq->wthresh));
3145 	}
3146 
3147 	dmatxctl = rd32(hw, NGBE_DMATXCTRL);
3148 	dmatxctl |= NGBE_DMATXCTRL_ENA;
3149 	wr32(hw, NGBE_DMATXCTRL, dmatxctl);
3150 
3151 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
3152 		txq = dev->data->tx_queues[i];
3153 		if (txq->tx_deferred_start == 0) {
3154 			ret = ngbe_dev_tx_queue_start(dev, i);
3155 			if (ret < 0)
3156 				return ret;
3157 		}
3158 	}
3159 
3160 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
3161 		rxq = dev->data->rx_queues[i];
3162 		if (rxq->rx_deferred_start == 0) {
3163 			ret = ngbe_dev_rx_queue_start(dev, i);
3164 			if (ret < 0)
3165 				return ret;
3166 		}
3167 	}
3168 
3169 	/* Enable Receive engine */
3170 	rxctrl = rd32(hw, NGBE_PBRXCTL);
3171 	rxctrl |= NGBE_PBRXCTL_ENA;
3172 	hw->mac.enable_rx_dma(hw, rxctrl);
3173 
3174 	/* If loopback mode is enabled, set up the link accordingly */
3175 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
3176 		ngbe_setup_loopback_link(hw);
3177 
3178 	return 0;
3179 }
3180 
3181 void
3182 ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
3183 {
3184 	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
3185 	*(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));
3186 	*(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));
3187 	*(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));
3188 }
3189 
3190 void
3191 ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
3192 {
3193 	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
3194 	wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));
3195 	wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));
3196 	wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);
3197 }
3198 
3199 void
3200 ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
3201 {
3202 	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
3203 	*(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
3204 	*(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
3205 	*(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
3206 }
3207 
3208 void
3209 ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
3210 {
3211 	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
3212 	wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
3213 	wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
3214 	wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
3215 }
3216 
3217 /*
3218  * Start Receive Units for specified queue.
3219  */
3220 int
3221 ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3222 {
3223 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3224 	struct ngbe_rx_queue *rxq;
3225 	uint32_t rxdctl;
3226 	int poll_ms;
3227 
3228 	PMD_INIT_FUNC_TRACE();
3229 
3230 	rxq = dev->data->rx_queues[rx_queue_id];
3231 
3232 	/* Allocate buffers for descriptor rings */
3233 	if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {
3234 		PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
3235 			     rx_queue_id);
3236 		return -1;
3237 	}
3238 	rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
3239 	rxdctl |= NGBE_RXCFG_ENA;
3240 	wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);
3241 
3242 	/* Wait until Rx Enable ready */
3243 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3244 	do {
3245 		rte_delay_ms(1);
3246 		rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
3247 	} while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
3248 	if (poll_ms == 0)
3249 		PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
3250 	rte_wmb();
3251 	wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
3252 	wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
3253 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3254 
3255 	return 0;
3256 }
3257 
3258 /*
3259  * Stop Receive Units for specified queue.
3260  */
3261 int
3262 ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3263 {
3264 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3265 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3266 	struct ngbe_rx_queue *rxq;
3267 	uint32_t rxdctl;
3268 	int poll_ms;
3269 
3270 	PMD_INIT_FUNC_TRACE();
3271 
3272 	rxq = dev->data->rx_queues[rx_queue_id];
3273 
3274 	ngbe_dev_save_rx_queue(hw, rxq->reg_idx);
3275 	wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);
3276 
3277 	/* Wait until Rx Enable bit clear */
3278 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3279 	do {
3280 		rte_delay_ms(1);
3281 		rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
3282 	} while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));
3283 	if (poll_ms == 0)
3284 		PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
3285 
3286 	rte_delay_us(RTE_NGBE_WAIT_100_US);
3287 	ngbe_dev_store_rx_queue(hw, rxq->reg_idx);
3288 
3289 	ngbe_rx_queue_release_mbufs(rxq);
3290 	ngbe_reset_rx_queue(adapter, rxq);
3291 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3292 
3293 	return 0;
3294 }
3295 
3296 /*
3297  * Start Transmit Units for specified queue.
3298  */
3299 int
3300 ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3301 {
3302 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3303 	struct ngbe_tx_queue *txq;
3304 	uint32_t txdctl;
3305 	int poll_ms;
3306 
3307 	PMD_INIT_FUNC_TRACE();
3308 
3309 	txq = dev->data->tx_queues[tx_queue_id];
3310 	wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
3311 
3312 	/* Wait until Tx Enable ready */
3313 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3314 	do {
3315 		rte_delay_ms(1);
3316 		txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
3317 	} while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
3318 	if (poll_ms == 0)
3319 		PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
3320 			     tx_queue_id);
3321 
3322 	rte_wmb();
3323 	wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
3324 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3325 
3326 	return 0;
3327 }
3328 
3329 /*
3330  * Stop Transmit Units for specified queue.
3331  */
3332 int
3333 ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3334 {
3335 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3336 	struct ngbe_tx_queue *txq;
3337 	uint32_t txdctl;
3338 	uint32_t txtdh, txtdt;
3339 	int poll_ms;
3340 
3341 	PMD_INIT_FUNC_TRACE();
3342 
3343 	txq = dev->data->tx_queues[tx_queue_id];
3344 
3345 	/* Wait until Tx queue is empty */
3346 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3347 	do {
3348 		rte_delay_us(RTE_NGBE_WAIT_100_US);
3349 		txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
3350 		txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
3351 	} while (--poll_ms && (txtdh != txtdt));
3352 	if (poll_ms == 0)
3353 		PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.",
3354 			     tx_queue_id);
3355 
3356 	ngbe_dev_save_tx_queue(hw, txq->reg_idx);
3357 	wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
3358 
3359 	/* Wait until Tx Enable bit clear */
3360 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3361 	do {
3362 		rte_delay_ms(1);
3363 		txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
3364 	} while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
3365 	if (poll_ms == 0)
3366 		PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
3367 			     tx_queue_id);
3368 
3369 	rte_delay_us(RTE_NGBE_WAIT_100_US);
3370 	ngbe_dev_store_tx_queue(hw, txq->reg_idx);
3371 
3372 	if (txq->ops != NULL) {
3373 		txq->ops->release_mbufs(txq);
3374 		txq->ops->reset(txq);
3375 	}
3376 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3377 
3378 	return 0;
3379 }
3380 
3381 void
3382 ngbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3383 	struct rte_eth_rxq_info *qinfo)
3384 {
3385 	struct ngbe_rx_queue *rxq;
3386 
3387 	rxq = dev->data->rx_queues[queue_id];
3388 
3389 	qinfo->mp = rxq->mb_pool;
3390 	qinfo->scattered_rx = dev->data->scattered_rx;
3391 	qinfo->nb_desc = rxq->nb_rx_desc;
3392 
3393 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3394 	qinfo->conf.rx_drop_en = rxq->drop_en;
3395 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3396 	qinfo->conf.offloads = rxq->offloads;
3397 }
3398 
3399 void
3400 ngbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3401 	struct rte_eth_txq_info *qinfo)
3402 {
3403 	struct ngbe_tx_queue *txq;
3404 
3405 	txq = dev->data->tx_queues[queue_id];
3406 
3407 	qinfo->nb_desc = txq->nb_tx_desc;
3408 
3409 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
3410 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
3411 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
3412 
3413 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
3414 	qinfo->conf.offloads = txq->offloads;
3415 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3416 }
3417 
3418 /* Stubs needed for linkage when RTE_ARCH_PPC_64, RTE_ARCH_RISCV or
3419  * RTE_ARCH_LOONGARCH is set.
3420  */
3421 #if defined(RTE_ARCH_PPC_64) || defined(RTE_ARCH_RISCV) || \
3422 	defined(RTE_ARCH_LOONGARCH)
3423 int
3424 ngbe_rx_vec_dev_conf_condition_check(__rte_unused struct rte_eth_dev *dev)
3425 {
3426 	return -1;
3427 }
3428 
3429 uint16_t
3430 ngbe_recv_pkts_vec(__rte_unused void *rx_queue,
3431 		   __rte_unused struct rte_mbuf **rx_pkts,
3432 		   __rte_unused uint16_t nb_pkts)
3433 {
3434 	return 0;
3435 }
3436 
3437 uint16_t
3438 ngbe_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
3439 			     __rte_unused struct rte_mbuf **rx_pkts,
3440 			     __rte_unused uint16_t nb_pkts)
3441 {
3442 	return 0;
3443 }
3444 
3445 int
3446 ngbe_rxq_vec_setup(__rte_unused struct ngbe_rx_queue *rxq)
3447 {
3448 	return -1;
3449 }
3450 
3451 uint16_t
3452 ngbe_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
3453 			  __rte_unused struct rte_mbuf **tx_pkts,
3454 			  __rte_unused uint16_t nb_pkts)
3455 {
3456 	return 0;
3457 }
3458 
3459 int
3460 ngbe_txq_vec_setup(__rte_unused struct ngbe_tx_queue *txq)
3461 {
3462 	return -1;
3463 }
3464 
3465 void
3466 ngbe_rx_queue_release_mbufs_vec(__rte_unused struct ngbe_rx_queue *rxq)
3467 {
3468 }
3469 #endif
3470