xref: /dpdk/drivers/net/ngbe/ngbe_rxtx.c (revision a74c5001e96e0463db0ace848a1605b98c1d8c24)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <sys/queue.h>
7 
8 #include <stdint.h>
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
12 #include <rte_net.h>
13 
14 #include "ngbe_logs.h"
15 #include "base/ngbe.h"
16 #include "ngbe_ethdev.h"
17 #include "ngbe_rxtx.h"
18 
19 #ifdef RTE_LIBRTE_IEEE1588
20 #define NGBE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
21 #else
22 #define NGBE_TX_IEEE1588_TMST 0
23 #endif
24 
25 /* Bit Mask to indicate what bits required for building Tx context */
26 static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
27 		RTE_MBUF_F_TX_IPV6 |
28 		RTE_MBUF_F_TX_IPV4 |
29 		RTE_MBUF_F_TX_VLAN |
30 		RTE_MBUF_F_TX_L4_MASK |
31 		RTE_MBUF_F_TX_TCP_SEG |
32 		NGBE_TX_IEEE1588_TMST);
33 
34 #define NGBE_TX_OFFLOAD_NOTSUP_MASK \
35 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ NGBE_TX_OFFLOAD_MASK)
36 
37 /*
38  * Prefetch a cache line into all cache levels.
39  */
40 #define rte_ngbe_prefetch(p)   rte_prefetch0(p)
41 
42 /*********************************************************************
43  *
44  *  Tx functions
45  *
46  **********************************************************************/
47 
48 /*
49  * Check for descriptors with their DD bit set and free mbufs.
50  * Return the total number of buffers freed.
51  */
52 static __rte_always_inline int
53 ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)
54 {
55 	struct ngbe_tx_entry *txep;
56 	uint32_t status;
57 	int i, nb_free = 0;
58 	struct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];
59 
60 	/* check DD bit on threshold descriptor */
61 	status = txq->tx_ring[txq->tx_next_dd].dw3;
62 	if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
63 		if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
64 			ngbe_set32_masked(txq->tdc_reg_addr,
65 				NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
66 		return 0;
67 	}
68 
69 	/*
70 	 * first buffer to free from S/W ring is at index
71 	 * tx_next_dd - (tx_free_thresh-1)
72 	 */
73 	txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
74 	for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
75 		/* free buffers one at a time */
76 		m = rte_pktmbuf_prefree_seg(txep->mbuf);
77 		txep->mbuf = NULL;
78 
79 		if (unlikely(m == NULL))
80 			continue;
81 
82 		if (nb_free >= RTE_NGBE_TX_MAX_FREE_BUF_SZ ||
83 		    (nb_free > 0 && m->pool != free[0]->pool)) {
84 			rte_mempool_put_bulk(free[0]->pool,
85 					     (void **)free, nb_free);
86 			nb_free = 0;
87 		}
88 
89 		free[nb_free++] = m;
90 	}
91 
92 	if (nb_free > 0)
93 		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
94 
95 	/* buffers were freed, update counters */
96 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
97 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
98 	if (txq->tx_next_dd >= txq->nb_tx_desc)
99 		txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
100 
101 	return txq->tx_free_thresh;
102 }
103 
104 /* Populate 4 descriptors with data from 4 mbufs */
105 static inline void
106 tx4(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
107 {
108 	uint64_t buf_dma_addr;
109 	uint32_t pkt_len;
110 	int i;
111 
112 	for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
113 		buf_dma_addr = rte_mbuf_data_iova(*pkts);
114 		pkt_len = (*pkts)->data_len;
115 
116 		/* write data to descriptor */
117 		txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
118 		txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
119 					NGBE_TXD_DATLEN(pkt_len));
120 		txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
121 
122 		rte_prefetch0(&(*pkts)->pool);
123 	}
124 }
125 
126 /* Populate 1 descriptor with data from 1 mbuf */
127 static inline void
128 tx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
129 {
130 	uint64_t buf_dma_addr;
131 	uint32_t pkt_len;
132 
133 	buf_dma_addr = rte_mbuf_data_iova(*pkts);
134 	pkt_len = (*pkts)->data_len;
135 
136 	/* write data to descriptor */
137 	txdp->qw0 = cpu_to_le64(buf_dma_addr);
138 	txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
139 				NGBE_TXD_DATLEN(pkt_len));
140 	txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
141 
142 	rte_prefetch0(&(*pkts)->pool);
143 }
144 
145 /*
146  * Fill H/W descriptor ring with mbuf data.
147  * Copy mbuf pointers to the S/W ring.
148  */
149 static inline void
150 ngbe_tx_fill_hw_ring(struct ngbe_tx_queue *txq, struct rte_mbuf **pkts,
151 		      uint16_t nb_pkts)
152 {
153 	volatile struct ngbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
154 	struct ngbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
155 	const int N_PER_LOOP = 4;
156 	const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
157 	int mainpart, leftover;
158 	int i, j;
159 
160 	/*
161 	 * Process most of the packets in chunks of N pkts.  Any
162 	 * leftover packets will get processed one at a time.
163 	 */
164 	mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
165 	leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
166 	for (i = 0; i < mainpart; i += N_PER_LOOP) {
167 		/* Copy N mbuf pointers to the S/W ring */
168 		for (j = 0; j < N_PER_LOOP; ++j)
169 			(txep + i + j)->mbuf = *(pkts + i + j);
170 		tx4(txdp + i, pkts + i);
171 	}
172 
173 	if (unlikely(leftover > 0)) {
174 		for (i = 0; i < leftover; ++i) {
175 			(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
176 			tx1(txdp + mainpart + i, pkts + mainpart + i);
177 		}
178 	}
179 }
180 
181 static inline uint16_t
182 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
183 	     uint16_t nb_pkts)
184 {
185 	struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
186 	uint16_t n = 0;
187 
188 	/*
189 	 * Begin scanning the H/W ring for done descriptors when the
190 	 * number of available descriptors drops below tx_free_thresh.
191 	 * For each done descriptor, free the associated buffer.
192 	 */
193 	if (txq->nb_tx_free < txq->tx_free_thresh)
194 		ngbe_tx_free_bufs(txq);
195 
196 	/* Only use descriptors that are available */
197 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
198 	if (unlikely(nb_pkts == 0))
199 		return 0;
200 
201 	/* Use exactly nb_pkts descriptors */
202 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
203 
204 	/*
205 	 * At this point, we know there are enough descriptors in the
206 	 * ring to transmit all the packets.  This assumes that each
207 	 * mbuf contains a single segment, and that no new offloads
208 	 * are expected, which would require a new context descriptor.
209 	 */
210 
211 	/*
212 	 * See if we're going to wrap-around. If so, handle the top
213 	 * of the descriptor ring first, then do the bottom.  If not,
214 	 * the processing looks just like the "bottom" part anyway...
215 	 */
216 	if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
217 		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
218 		ngbe_tx_fill_hw_ring(txq, tx_pkts, n);
219 		txq->tx_tail = 0;
220 	}
221 
222 	/* Fill H/W descriptor ring with mbuf data */
223 	ngbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
224 	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
225 
226 	/*
227 	 * Check for wrap-around. This would only happen if we used
228 	 * up to the last descriptor in the ring, no more, no less.
229 	 */
230 	if (txq->tx_tail >= txq->nb_tx_desc)
231 		txq->tx_tail = 0;
232 
233 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
234 		   (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
235 		   (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
236 
237 	/* update tail pointer */
238 	rte_wmb();
239 	ngbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
240 
241 	return nb_pkts;
242 }
243 
244 uint16_t
245 ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
246 		       uint16_t nb_pkts)
247 {
248 	uint16_t nb_tx;
249 
250 	/* Try to transmit at least chunks of TX_MAX_BURST pkts */
251 	if (likely(nb_pkts <= RTE_PMD_NGBE_TX_MAX_BURST))
252 		return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
253 
254 	/* transmit more than the max burst, in chunks of TX_MAX_BURST */
255 	nb_tx = 0;
256 	while (nb_pkts != 0) {
257 		uint16_t ret, n;
258 
259 		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_TX_MAX_BURST);
260 		ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
261 		nb_tx = (uint16_t)(nb_tx + ret);
262 		nb_pkts = (uint16_t)(nb_pkts - ret);
263 		if (ret < n)
264 			break;
265 	}
266 
267 	return nb_tx;
268 }
269 
270 static inline void
271 ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
272 		volatile struct ngbe_tx_ctx_desc *ctx_txd,
273 		uint64_t ol_flags, union ngbe_tx_offload tx_offload)
274 {
275 	union ngbe_tx_offload tx_offload_mask;
276 	uint32_t type_tucmd_mlhl;
277 	uint32_t mss_l4len_idx;
278 	uint32_t ctx_idx;
279 	uint32_t vlan_macip_lens;
280 	uint32_t tunnel_seed;
281 
282 	ctx_idx = txq->ctx_curr;
283 	tx_offload_mask.data[0] = 0;
284 	tx_offload_mask.data[1] = 0;
285 
286 	/* Specify which HW CTX to upload. */
287 	mss_l4len_idx = NGBE_TXD_IDX(ctx_idx);
288 	type_tucmd_mlhl = NGBE_TXD_CTXT;
289 
290 	tx_offload_mask.ptid |= ~0;
291 	type_tucmd_mlhl |= NGBE_TXD_PTID(tx_offload.ptid);
292 
293 	/* check if TCP segmentation required for this packet */
294 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
295 		tx_offload_mask.l2_len |= ~0;
296 		tx_offload_mask.l3_len |= ~0;
297 		tx_offload_mask.l4_len |= ~0;
298 		tx_offload_mask.tso_segsz |= ~0;
299 		mss_l4len_idx |= NGBE_TXD_MSS(tx_offload.tso_segsz);
300 		mss_l4len_idx |= NGBE_TXD_L4LEN(tx_offload.l4_len);
301 	} else { /* no TSO, check if hardware checksum is needed */
302 		if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
303 			tx_offload_mask.l2_len |= ~0;
304 			tx_offload_mask.l3_len |= ~0;
305 		}
306 
307 		switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
308 		case RTE_MBUF_F_TX_UDP_CKSUM:
309 			mss_l4len_idx |=
310 				NGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
311 			tx_offload_mask.l2_len |= ~0;
312 			tx_offload_mask.l3_len |= ~0;
313 			break;
314 		case RTE_MBUF_F_TX_TCP_CKSUM:
315 			mss_l4len_idx |=
316 				NGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
317 			tx_offload_mask.l2_len |= ~0;
318 			tx_offload_mask.l3_len |= ~0;
319 			break;
320 		case RTE_MBUF_F_TX_SCTP_CKSUM:
321 			mss_l4len_idx |=
322 				NGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
323 			tx_offload_mask.l2_len |= ~0;
324 			tx_offload_mask.l3_len |= ~0;
325 			break;
326 		default:
327 			break;
328 		}
329 	}
330 
331 	vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
332 	vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
333 
334 	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
335 		tx_offload_mask.vlan_tci |= ~0;
336 		vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
337 	}
338 
339 	tunnel_seed = 0;
340 
341 	txq->ctx_cache[ctx_idx].flags = ol_flags;
342 	txq->ctx_cache[ctx_idx].tx_offload.data[0] =
343 		tx_offload_mask.data[0] & tx_offload.data[0];
344 	txq->ctx_cache[ctx_idx].tx_offload.data[1] =
345 		tx_offload_mask.data[1] & tx_offload.data[1];
346 	txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
347 
348 	ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
349 	ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
350 	ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
351 	ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
352 }
353 
354 /*
355  * Check which hardware context can be used. Use the existing match
356  * or create a new context descriptor.
357  */
358 static inline uint32_t
359 what_ctx_update(struct ngbe_tx_queue *txq, uint64_t flags,
360 		   union ngbe_tx_offload tx_offload)
361 {
362 	/* If match with the current used context */
363 	if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
364 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
365 		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
366 		     & tx_offload.data[0])) &&
367 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
368 		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
369 		     & tx_offload.data[1]))))
370 		return txq->ctx_curr;
371 
372 	/* What if match with the next context  */
373 	txq->ctx_curr ^= 1;
374 	if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
375 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
376 		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
377 		     & tx_offload.data[0])) &&
378 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
379 		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
380 		     & tx_offload.data[1]))))
381 		return txq->ctx_curr;
382 
383 	/* Mismatch, use the previous context */
384 	return NGBE_CTX_NUM;
385 }
386 
387 static inline uint32_t
388 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
389 {
390 	uint32_t tmp = 0;
391 
392 	if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
393 		tmp |= NGBE_TXD_CC;
394 		tmp |= NGBE_TXD_L4CS;
395 	}
396 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
397 		tmp |= NGBE_TXD_CC;
398 		tmp |= NGBE_TXD_IPCS;
399 	}
400 	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
401 		tmp |= NGBE_TXD_CC;
402 		tmp |= NGBE_TXD_EIPCS;
403 	}
404 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
405 		tmp |= NGBE_TXD_CC;
406 		/* implies IPv4 cksum */
407 		if (ol_flags & RTE_MBUF_F_TX_IPV4)
408 			tmp |= NGBE_TXD_IPCS;
409 		tmp |= NGBE_TXD_L4CS;
410 	}
411 	if (ol_flags & RTE_MBUF_F_TX_VLAN)
412 		tmp |= NGBE_TXD_CC;
413 
414 	return tmp;
415 }
416 
417 static inline uint32_t
418 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
419 {
420 	uint32_t cmdtype = 0;
421 
422 	if (ol_flags & RTE_MBUF_F_TX_VLAN)
423 		cmdtype |= NGBE_TXD_VLE;
424 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
425 		cmdtype |= NGBE_TXD_TSE;
426 	return cmdtype;
427 }
428 
429 static inline uint32_t
430 tx_desc_ol_flags_to_ptype(uint64_t oflags)
431 {
432 	uint32_t ptype;
433 
434 	/* L2 level */
435 	ptype = RTE_PTYPE_L2_ETHER;
436 	if (oflags & RTE_MBUF_F_TX_VLAN)
437 		ptype |= RTE_PTYPE_L2_ETHER_VLAN;
438 
439 	/* L3 level */
440 	if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM))
441 		ptype |= RTE_PTYPE_L3_IPV4;
442 	else if (oflags & (RTE_MBUF_F_TX_IPV6))
443 		ptype |= RTE_PTYPE_L3_IPV6;
444 
445 	/* L4 level */
446 	switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) {
447 	case RTE_MBUF_F_TX_TCP_CKSUM:
448 		ptype |= RTE_PTYPE_L4_TCP;
449 		break;
450 	case RTE_MBUF_F_TX_UDP_CKSUM:
451 		ptype |= RTE_PTYPE_L4_UDP;
452 		break;
453 	case RTE_MBUF_F_TX_SCTP_CKSUM:
454 		ptype |= RTE_PTYPE_L4_SCTP;
455 		break;
456 	}
457 
458 	if (oflags & RTE_MBUF_F_TX_TCP_SEG)
459 		ptype |= RTE_PTYPE_L4_TCP;
460 
461 	return ptype;
462 }
463 
464 static inline uint8_t
465 tx_desc_ol_flags_to_ptid(uint64_t oflags)
466 {
467 	uint32_t ptype;
468 
469 	ptype = tx_desc_ol_flags_to_ptype(oflags);
470 
471 	return ngbe_encode_ptype(ptype);
472 }
473 
474 /* Reset transmit descriptors after they have been used */
475 static inline int
476 ngbe_xmit_cleanup(struct ngbe_tx_queue *txq)
477 {
478 	struct ngbe_tx_entry *sw_ring = txq->sw_ring;
479 	volatile struct ngbe_tx_desc *txr = txq->tx_ring;
480 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
481 	uint16_t nb_tx_desc = txq->nb_tx_desc;
482 	uint16_t desc_to_clean_to;
483 	uint16_t nb_tx_to_clean;
484 	uint32_t status;
485 
486 	/* Determine the last descriptor needing to be cleaned */
487 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
488 	if (desc_to_clean_to >= nb_tx_desc)
489 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
490 
491 	/* Check to make sure the last descriptor to clean is done */
492 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
493 	status = txr[desc_to_clean_to].dw3;
494 	if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
495 		PMD_TX_LOG(DEBUG,
496 			"Tx descriptor %4u is not done"
497 			"(port=%d queue=%d)",
498 			desc_to_clean_to,
499 			txq->port_id, txq->queue_id);
500 		if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
501 			ngbe_set32_masked(txq->tdc_reg_addr,
502 				NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
503 		/* Failed to clean any descriptors, better luck next time */
504 		return -(1);
505 	}
506 
507 	/* Figure out how many descriptors will be cleaned */
508 	if (last_desc_cleaned > desc_to_clean_to)
509 		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
510 							desc_to_clean_to);
511 	else
512 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
513 						last_desc_cleaned);
514 
515 	PMD_TX_LOG(DEBUG,
516 		"Cleaning %4u Tx descriptors: %4u to %4u (port=%d queue=%d)",
517 		nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
518 		txq->port_id, txq->queue_id);
519 
520 	/*
521 	 * The last descriptor to clean is done, so that means all the
522 	 * descriptors from the last descriptor that was cleaned
523 	 * up to the last descriptor with the RS bit set
524 	 * are done. Only reset the threshold descriptor.
525 	 */
526 	txr[desc_to_clean_to].dw3 = 0;
527 
528 	/* Update the txq to reflect the last descriptor that was cleaned */
529 	txq->last_desc_cleaned = desc_to_clean_to;
530 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
531 
532 	/* No Error */
533 	return 0;
534 }
535 
536 uint16_t
537 ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
538 		uint16_t nb_pkts)
539 {
540 	struct ngbe_tx_queue *txq;
541 	struct ngbe_tx_entry *sw_ring;
542 	struct ngbe_tx_entry *txe, *txn;
543 	volatile struct ngbe_tx_desc *txr;
544 	volatile struct ngbe_tx_desc *txd;
545 	struct rte_mbuf     *tx_pkt;
546 	struct rte_mbuf     *m_seg;
547 	uint64_t buf_dma_addr;
548 	uint32_t olinfo_status;
549 	uint32_t cmd_type_len;
550 	uint32_t pkt_len;
551 	uint16_t slen;
552 	uint64_t ol_flags;
553 	uint16_t tx_id;
554 	uint16_t tx_last;
555 	uint16_t nb_tx;
556 	uint16_t nb_used;
557 	uint64_t tx_ol_req;
558 	uint32_t ctx = 0;
559 	uint32_t new_ctx;
560 	union ngbe_tx_offload tx_offload;
561 
562 	tx_offload.data[0] = 0;
563 	tx_offload.data[1] = 0;
564 	txq = tx_queue;
565 	sw_ring = txq->sw_ring;
566 	txr     = txq->tx_ring;
567 	tx_id   = txq->tx_tail;
568 	txe = &sw_ring[tx_id];
569 
570 	/* Determine if the descriptor ring needs to be cleaned. */
571 	if (txq->nb_tx_free < txq->tx_free_thresh)
572 		ngbe_xmit_cleanup(txq);
573 
574 	rte_prefetch0(&txe->mbuf->pool);
575 
576 	/* Tx loop */
577 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
578 		new_ctx = 0;
579 		tx_pkt = *tx_pkts++;
580 		pkt_len = tx_pkt->pkt_len;
581 
582 		/*
583 		 * Determine how many (if any) context descriptors
584 		 * are needed for offload functionality.
585 		 */
586 		ol_flags = tx_pkt->ol_flags;
587 
588 		/* If hardware offload required */
589 		tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK;
590 		if (tx_ol_req) {
591 			tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req);
592 			tx_offload.l2_len = tx_pkt->l2_len;
593 			tx_offload.l3_len = tx_pkt->l3_len;
594 			tx_offload.l4_len = tx_pkt->l4_len;
595 			tx_offload.vlan_tci = tx_pkt->vlan_tci;
596 			tx_offload.tso_segsz = tx_pkt->tso_segsz;
597 
598 			/* If new context need be built or reuse the exist ctx*/
599 			ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
600 			/* Only allocate context descriptor if required */
601 			new_ctx = (ctx == NGBE_CTX_NUM);
602 			ctx = txq->ctx_curr;
603 		}
604 
605 		/*
606 		 * Keep track of how many descriptors are used this loop
607 		 * This will always be the number of segments + the number of
608 		 * Context descriptors required to transmit the packet
609 		 */
610 		nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
611 
612 		/*
613 		 * The number of descriptors that must be allocated for a
614 		 * packet is the number of segments of that packet, plus 1
615 		 * Context Descriptor for the hardware offload, if any.
616 		 * Determine the last Tx descriptor to allocate in the Tx ring
617 		 * for the packet, starting from the current position (tx_id)
618 		 * in the ring.
619 		 */
620 		tx_last = (uint16_t)(tx_id + nb_used - 1);
621 
622 		/* Circular ring */
623 		if (tx_last >= txq->nb_tx_desc)
624 			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
625 
626 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
627 			   " tx_first=%u tx_last=%u",
628 			   (uint16_t)txq->port_id,
629 			   (uint16_t)txq->queue_id,
630 			   (uint32_t)pkt_len,
631 			   (uint16_t)tx_id,
632 			   (uint16_t)tx_last);
633 
634 		/*
635 		 * Make sure there are enough Tx descriptors available to
636 		 * transmit the entire packet.
637 		 * nb_used better be less than or equal to txq->tx_free_thresh
638 		 */
639 		if (nb_used > txq->nb_tx_free) {
640 			PMD_TX_LOG(DEBUG,
641 				"Not enough free Tx descriptors "
642 				"nb_used=%4u nb_free=%4u "
643 				"(port=%d queue=%d)",
644 				nb_used, txq->nb_tx_free,
645 				txq->port_id, txq->queue_id);
646 
647 			if (ngbe_xmit_cleanup(txq) != 0) {
648 				/* Could not clean any descriptors */
649 				if (nb_tx == 0)
650 					return 0;
651 				goto end_of_tx;
652 			}
653 
654 			/* nb_used better be <= txq->tx_free_thresh */
655 			if (unlikely(nb_used > txq->tx_free_thresh)) {
656 				PMD_TX_LOG(DEBUG,
657 					"The number of descriptors needed to "
658 					"transmit the packet exceeds the "
659 					"RS bit threshold. This will impact "
660 					"performance."
661 					"nb_used=%4u nb_free=%4u "
662 					"tx_free_thresh=%4u. "
663 					"(port=%d queue=%d)",
664 					nb_used, txq->nb_tx_free,
665 					txq->tx_free_thresh,
666 					txq->port_id, txq->queue_id);
667 				/*
668 				 * Loop here until there are enough Tx
669 				 * descriptors or until the ring cannot be
670 				 * cleaned.
671 				 */
672 				while (nb_used > txq->nb_tx_free) {
673 					if (ngbe_xmit_cleanup(txq) != 0) {
674 						/*
675 						 * Could not clean any
676 						 * descriptors
677 						 */
678 						if (nb_tx == 0)
679 							return 0;
680 						goto end_of_tx;
681 					}
682 				}
683 			}
684 		}
685 
686 		/*
687 		 * By now there are enough free Tx descriptors to transmit
688 		 * the packet.
689 		 */
690 
691 		/*
692 		 * Set common flags of all Tx Data Descriptors.
693 		 *
694 		 * The following bits must be set in the first Data Descriptor
695 		 * and are ignored in the other ones:
696 		 *   - NGBE_TXD_FCS
697 		 *
698 		 * The following bits must only be set in the last Data
699 		 * Descriptor:
700 		 *   - NGBE_TXD_EOP
701 		 */
702 		cmd_type_len = NGBE_TXD_FCS;
703 
704 #ifdef RTE_LIBRTE_IEEE1588
705 		if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
706 			cmd_type_len |= NGBE_TXD_1588;
707 #endif
708 
709 		olinfo_status = 0;
710 		if (tx_ol_req) {
711 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
712 				/* when TSO is on, paylen in descriptor is the
713 				 * not the packet len but the tcp payload len
714 				 */
715 				pkt_len -= (tx_offload.l2_len +
716 					tx_offload.l3_len + tx_offload.l4_len);
717 			}
718 
719 			/*
720 			 * Setup the Tx Context Descriptor if required
721 			 */
722 			if (new_ctx) {
723 				volatile struct ngbe_tx_ctx_desc *ctx_txd;
724 
725 				ctx_txd = (volatile struct ngbe_tx_ctx_desc *)
726 				    &txr[tx_id];
727 
728 				txn = &sw_ring[txe->next_id];
729 				rte_prefetch0(&txn->mbuf->pool);
730 
731 				if (txe->mbuf != NULL) {
732 					rte_pktmbuf_free_seg(txe->mbuf);
733 					txe->mbuf = NULL;
734 				}
735 
736 				ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
737 					tx_offload);
738 
739 				txe->last_id = tx_last;
740 				tx_id = txe->next_id;
741 				txe = txn;
742 			}
743 
744 			/*
745 			 * Setup the Tx Data Descriptor,
746 			 * This path will go through
747 			 * whatever new/reuse the context descriptor
748 			 */
749 			cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
750 			olinfo_status |=
751 				tx_desc_cksum_flags_to_olinfo(ol_flags);
752 			olinfo_status |= NGBE_TXD_IDX(ctx);
753 		}
754 
755 		olinfo_status |= NGBE_TXD_PAYLEN(pkt_len);
756 
757 		m_seg = tx_pkt;
758 		do {
759 			txd = &txr[tx_id];
760 			txn = &sw_ring[txe->next_id];
761 			rte_prefetch0(&txn->mbuf->pool);
762 
763 			if (txe->mbuf != NULL)
764 				rte_pktmbuf_free_seg(txe->mbuf);
765 			txe->mbuf = m_seg;
766 
767 			/*
768 			 * Set up Transmit Data Descriptor.
769 			 */
770 			slen = m_seg->data_len;
771 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
772 			txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
773 			txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
774 			txd->dw3 = rte_cpu_to_le_32(olinfo_status);
775 			txe->last_id = tx_last;
776 			tx_id = txe->next_id;
777 			txe = txn;
778 			m_seg = m_seg->next;
779 		} while (m_seg != NULL);
780 
781 		/*
782 		 * The last packet data descriptor needs End Of Packet (EOP)
783 		 */
784 		cmd_type_len |= NGBE_TXD_EOP;
785 		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
786 
787 		txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
788 	}
789 
790 end_of_tx:
791 
792 	rte_wmb();
793 
794 	/*
795 	 * Set the Transmit Descriptor Tail (TDT)
796 	 */
797 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
798 		   (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
799 		   (uint16_t)tx_id, (uint16_t)nb_tx);
800 	ngbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
801 	txq->tx_tail = tx_id;
802 
803 	return nb_tx;
804 }
805 
806 /*********************************************************************
807  *
808  *  Tx prep functions
809  *
810  **********************************************************************/
811 uint16_t
812 ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
813 {
814 	int i, ret;
815 	uint64_t ol_flags;
816 	struct rte_mbuf *m;
817 	struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
818 
819 	for (i = 0; i < nb_pkts; i++) {
820 		m = tx_pkts[i];
821 		ol_flags = m->ol_flags;
822 
823 		/**
824 		 * Check if packet meets requirements for number of segments
825 		 *
826 		 * NOTE: for ngbe it's always (40 - WTHRESH) for both TSO and
827 		 *       non-TSO
828 		 */
829 
830 		if (m->nb_segs > NGBE_TX_MAX_SEG - txq->wthresh) {
831 			rte_errno = -EINVAL;
832 			return i;
833 		}
834 
835 		if (ol_flags & NGBE_TX_OFFLOAD_NOTSUP_MASK) {
836 			rte_errno = -ENOTSUP;
837 			return i;
838 		}
839 
840 #ifdef RTE_ETHDEV_DEBUG_TX
841 		ret = rte_validate_tx_offload(m);
842 		if (ret != 0) {
843 			rte_errno = ret;
844 			return i;
845 		}
846 #endif
847 		ret = rte_net_intel_cksum_prepare(m);
848 		if (ret != 0) {
849 			rte_errno = ret;
850 			return i;
851 		}
852 	}
853 
854 	return i;
855 }
856 
857 /*********************************************************************
858  *
859  *  Rx functions
860  *
861  **********************************************************************/
862 static inline uint32_t
863 ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
864 {
865 	uint16_t ptid = NGBE_RXD_PTID(pkt_info);
866 
867 	ptid &= ptid_mask;
868 
869 	return ngbe_decode_ptype(ptid);
870 }
871 
872 static inline uint64_t
873 ngbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
874 {
875 	static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
876 		0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
877 		0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
878 		RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
879 		0, 0, 0,  RTE_MBUF_F_RX_FDIR,
880 	};
881 #ifdef RTE_LIBRTE_IEEE1588
882 	static uint64_t ip_pkt_etqf_map[8] = {
883 		0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
884 		0, 0, 0, 0,
885 	};
886 	int etfid = ngbe_etflt_id(NGBE_RXD_PTID(pkt_info));
887 	if (likely(-1 != etfid))
888 		return ip_pkt_etqf_map[etfid] |
889 		       ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
890 	else
891 		return ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
892 #else
893 	return ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
894 #endif
895 }
896 
897 static inline uint64_t
898 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
899 {
900 	uint64_t pkt_flags;
901 
902 	/*
903 	 * Check if VLAN present only.
904 	 * Do not check whether L3/L4 rx checksum done by NIC or not,
905 	 * That can be found from rte_eth_rxmode.offloads flag
906 	 */
907 	pkt_flags = (rx_status & NGBE_RXD_STAT_VLAN &&
908 		     vlan_flags & RTE_MBUF_F_RX_VLAN_STRIPPED)
909 		    ? vlan_flags : 0;
910 
911 #ifdef RTE_LIBRTE_IEEE1588
912 	if (rx_status & NGBE_RXD_STAT_1588)
913 		pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
914 #endif
915 	return pkt_flags;
916 }
917 
918 static inline uint64_t
919 rx_desc_error_to_pkt_flags(uint32_t rx_status)
920 {
921 	uint64_t pkt_flags = 0;
922 
923 	/* checksum offload can't be disabled */
924 	if (rx_status & NGBE_RXD_STAT_IPCS)
925 		pkt_flags |= (rx_status & NGBE_RXD_ERR_IPCS
926 				? RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
927 
928 	if (rx_status & NGBE_RXD_STAT_L4CS)
929 		pkt_flags |= (rx_status & NGBE_RXD_ERR_L4CS
930 				? RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD);
931 
932 	if (rx_status & NGBE_RXD_STAT_EIPCS &&
933 	    rx_status & NGBE_RXD_ERR_EIPCS)
934 		pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
935 
936 	return pkt_flags;
937 }
938 
939 /*
940  * LOOK_AHEAD defines how many desc statuses to check beyond the
941  * current descriptor.
942  * It must be a pound define for optimal performance.
943  * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
944  * function only works with LOOK_AHEAD=8.
945  */
946 #define LOOK_AHEAD 8
947 #if (LOOK_AHEAD != 8)
948 #error "PMD NGBE: LOOK_AHEAD must be 8\n"
949 #endif
950 static inline int
951 ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
952 {
953 	volatile struct ngbe_rx_desc *rxdp;
954 	struct ngbe_rx_entry *rxep;
955 	struct rte_mbuf *mb;
956 	uint16_t pkt_len;
957 	uint64_t pkt_flags;
958 	int nb_dd;
959 	uint32_t s[LOOK_AHEAD];
960 	uint32_t pkt_info[LOOK_AHEAD];
961 	int i, j, nb_rx = 0;
962 	uint32_t status;
963 
964 	/* get references to current descriptor and S/W ring entry */
965 	rxdp = &rxq->rx_ring[rxq->rx_tail];
966 	rxep = &rxq->sw_ring[rxq->rx_tail];
967 
968 	status = rxdp->qw1.lo.status;
969 	/* check to make sure there is at least 1 packet to receive */
970 	if (!(status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
971 		return 0;
972 
973 	/*
974 	 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
975 	 * reference packets that are ready to be received.
976 	 */
977 	for (i = 0; i < RTE_PMD_NGBE_RX_MAX_BURST;
978 	     i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
979 		/* Read desc statuses backwards to avoid race condition */
980 		for (j = 0; j < LOOK_AHEAD; j++)
981 			s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
982 
983 		rte_atomic_thread_fence(rte_memory_order_acquire);
984 
985 		/* Compute how many status bits were set */
986 		for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
987 				(s[nb_dd] & NGBE_RXD_STAT_DD); nb_dd++)
988 			;
989 
990 		for (j = 0; j < nb_dd; j++)
991 			pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
992 
993 		nb_rx += nb_dd;
994 
995 		/* Translate descriptor info to mbuf format */
996 		for (j = 0; j < nb_dd; ++j) {
997 			mb = rxep[j].mbuf;
998 			pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
999 				  rxq->crc_len;
1000 			mb->data_len = pkt_len;
1001 			mb->pkt_len = pkt_len;
1002 			mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
1003 
1004 			/* convert descriptor fields to rte mbuf flags */
1005 			pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1006 					rxq->vlan_flags);
1007 			pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1008 			pkt_flags |=
1009 				ngbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1010 			mb->ol_flags = pkt_flags;
1011 			mb->packet_type =
1012 				ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
1013 				NGBE_PTID_MASK);
1014 
1015 			if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
1016 				mb->hash.rss =
1017 					rte_le_to_cpu_32(rxdp[j].qw0.dw1);
1018 		}
1019 
1020 		/* Move mbuf pointers from the S/W ring to the stage */
1021 		for (j = 0; j < LOOK_AHEAD; ++j)
1022 			rxq->rx_stage[i + j] = rxep[j].mbuf;
1023 
1024 		/* stop if all requested packets could not be received */
1025 		if (nb_dd != LOOK_AHEAD)
1026 			break;
1027 	}
1028 
1029 	/* clear software ring entries so we can cleanup correctly */
1030 	for (i = 0; i < nb_rx; ++i)
1031 		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1032 
1033 	return nb_rx;
1034 }
1035 
1036 static inline int
1037 ngbe_rx_alloc_bufs(struct ngbe_rx_queue *rxq, bool reset_mbuf)
1038 {
1039 	volatile struct ngbe_rx_desc *rxdp;
1040 	struct ngbe_rx_entry *rxep;
1041 	struct rte_mbuf *mb;
1042 	uint16_t alloc_idx;
1043 	__le64 dma_addr;
1044 	int diag, i;
1045 
1046 	/* allocate buffers in bulk directly into the S/W ring */
1047 	alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1048 	rxep = &rxq->sw_ring[alloc_idx];
1049 	diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1050 				    rxq->rx_free_thresh);
1051 	if (unlikely(diag != 0))
1052 		return -ENOMEM;
1053 
1054 	rxdp = &rxq->rx_ring[alloc_idx];
1055 	for (i = 0; i < rxq->rx_free_thresh; ++i) {
1056 		/* populate the static rte mbuf fields */
1057 		mb = rxep[i].mbuf;
1058 		if (reset_mbuf)
1059 			mb->port = rxq->port_id;
1060 
1061 		rte_mbuf_refcnt_set(mb, 1);
1062 		mb->data_off = RTE_PKTMBUF_HEADROOM;
1063 
1064 		/* populate the descriptors */
1065 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1066 		NGBE_RXD_HDRADDR(&rxdp[i], 0);
1067 		NGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
1068 	}
1069 
1070 	/* update state of internal queue structure */
1071 	rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1072 	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1073 		rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1074 
1075 	/* no errors */
1076 	return 0;
1077 }
1078 
1079 static inline uint16_t
1080 ngbe_rx_fill_from_stage(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1081 			 uint16_t nb_pkts)
1082 {
1083 	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1084 	int i;
1085 
1086 	/* how many packets are ready to return? */
1087 	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1088 
1089 	/* copy mbuf pointers to the application's packet list */
1090 	for (i = 0; i < nb_pkts; ++i)
1091 		rx_pkts[i] = stage[i];
1092 
1093 	/* update internal queue state */
1094 	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1095 	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1096 
1097 	return nb_pkts;
1098 }
1099 
1100 static inline uint16_t
1101 ngbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1102 	     uint16_t nb_pkts)
1103 {
1104 	struct ngbe_rx_queue *rxq = (struct ngbe_rx_queue *)rx_queue;
1105 	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1106 	uint16_t nb_rx = 0;
1107 
1108 	/* Any previously recv'd pkts will be returned from the Rx stage */
1109 	if (rxq->rx_nb_avail)
1110 		return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1111 
1112 	/* Scan the H/W ring for packets to receive */
1113 	nb_rx = (uint16_t)ngbe_rx_scan_hw_ring(rxq);
1114 
1115 	/* update internal queue state */
1116 	rxq->rx_next_avail = 0;
1117 	rxq->rx_nb_avail = nb_rx;
1118 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1119 
1120 	/* if required, allocate new buffers to replenish descriptors */
1121 	if (rxq->rx_tail > rxq->rx_free_trigger) {
1122 		uint16_t cur_free_trigger = rxq->rx_free_trigger;
1123 
1124 		if (ngbe_rx_alloc_bufs(rxq, true) != 0) {
1125 			int i, j;
1126 
1127 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1128 				   "queue_id=%u", (uint16_t)rxq->port_id,
1129 				   (uint16_t)rxq->queue_id);
1130 
1131 			dev->data->rx_mbuf_alloc_failed +=
1132 				rxq->rx_free_thresh;
1133 
1134 			/*
1135 			 * Need to rewind any previous receives if we cannot
1136 			 * allocate new buffers to replenish the old ones.
1137 			 */
1138 			rxq->rx_nb_avail = 0;
1139 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1140 			for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1141 				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1142 
1143 			return 0;
1144 		}
1145 
1146 		/* update tail pointer */
1147 		rte_wmb();
1148 		ngbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
1149 	}
1150 
1151 	if (rxq->rx_tail >= rxq->nb_rx_desc)
1152 		rxq->rx_tail = 0;
1153 
1154 	/* received any packets this loop? */
1155 	if (rxq->rx_nb_avail)
1156 		return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1157 
1158 	return 0;
1159 }
1160 
1161 /* split requests into chunks of size RTE_PMD_NGBE_RX_MAX_BURST */
1162 uint16_t
1163 ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1164 			   uint16_t nb_pkts)
1165 {
1166 	uint16_t nb_rx;
1167 
1168 	if (unlikely(nb_pkts == 0))
1169 		return 0;
1170 
1171 	if (likely(nb_pkts <= RTE_PMD_NGBE_RX_MAX_BURST))
1172 		return ngbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1173 
1174 	/* request is relatively large, chunk it up */
1175 	nb_rx = 0;
1176 	while (nb_pkts) {
1177 		uint16_t ret, n;
1178 
1179 		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_RX_MAX_BURST);
1180 		ret = ngbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1181 		nb_rx = (uint16_t)(nb_rx + ret);
1182 		nb_pkts = (uint16_t)(nb_pkts - ret);
1183 		if (ret < n)
1184 			break;
1185 	}
1186 
1187 	return nb_rx;
1188 }
1189 
1190 uint16_t
1191 ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1192 		uint16_t nb_pkts)
1193 {
1194 	struct ngbe_rx_queue *rxq;
1195 	volatile struct ngbe_rx_desc *rx_ring;
1196 	volatile struct ngbe_rx_desc *rxdp;
1197 	struct ngbe_rx_entry *sw_ring;
1198 	struct ngbe_rx_entry *rxe;
1199 	struct rte_mbuf *rxm;
1200 	struct rte_mbuf *nmb;
1201 	struct ngbe_rx_desc rxd;
1202 	uint64_t dma_addr;
1203 	uint32_t staterr;
1204 	uint32_t pkt_info;
1205 	uint16_t pkt_len;
1206 	uint16_t rx_id;
1207 	uint16_t nb_rx;
1208 	uint16_t nb_hold;
1209 	uint64_t pkt_flags;
1210 
1211 	nb_rx = 0;
1212 	nb_hold = 0;
1213 	rxq = rx_queue;
1214 	rx_id = rxq->rx_tail;
1215 	rx_ring = rxq->rx_ring;
1216 	sw_ring = rxq->sw_ring;
1217 	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1218 	while (nb_rx < nb_pkts) {
1219 		/*
1220 		 * The order of operations here is important as the DD status
1221 		 * bit must not be read after any other descriptor fields.
1222 		 * rx_ring and rxdp are pointing to volatile data so the order
1223 		 * of accesses cannot be reordered by the compiler. If they were
1224 		 * not volatile, they could be reordered which could lead to
1225 		 * using invalid descriptor fields when read from rxd.
1226 		 *
1227 		 * Meanwhile, to prevent the CPU from executing out of order, we
1228 		 * need to use a proper memory barrier to ensure the memory
1229 		 * ordering below.
1230 		 */
1231 		rxdp = &rx_ring[rx_id];
1232 		staterr = rxdp->qw1.lo.status;
1233 		if (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
1234 			break;
1235 
1236 		/*
1237 		 * Use acquire fence to ensure that status_error which includes
1238 		 * DD bit is loaded before loading of other descriptor words.
1239 		 */
1240 		rte_atomic_thread_fence(rte_memory_order_acquire);
1241 
1242 		rxd = *rxdp;
1243 
1244 		/*
1245 		 * End of packet.
1246 		 *
1247 		 * If the NGBE_RXD_STAT_EOP flag is not set, the Rx packet
1248 		 * is likely to be invalid and to be dropped by the various
1249 		 * validation checks performed by the network stack.
1250 		 *
1251 		 * Allocate a new mbuf to replenish the RX ring descriptor.
1252 		 * If the allocation fails:
1253 		 *    - arrange for that Rx descriptor to be the first one
1254 		 *      being parsed the next time the receive function is
1255 		 *      invoked [on the same queue].
1256 		 *
1257 		 *    - Stop parsing the Rx ring and return immediately.
1258 		 *
1259 		 * This policy do not drop the packet received in the Rx
1260 		 * descriptor for which the allocation of a new mbuf failed.
1261 		 * Thus, it allows that packet to be later retrieved if
1262 		 * mbuf have been freed in the mean time.
1263 		 * As a side effect, holding Rx descriptors instead of
1264 		 * systematically giving them back to the NIC may lead to
1265 		 * Rx ring exhaustion situations.
1266 		 * However, the NIC can gracefully prevent such situations
1267 		 * to happen by sending specific "back-pressure" flow control
1268 		 * frames to its peer(s).
1269 		 */
1270 		PMD_RX_LOG(DEBUG,
1271 			   "port_id=%u queue_id=%u rx_id=%u ext_err_stat=0x%08x pkt_len=%u",
1272 			   (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1273 			   (uint16_t)rx_id, (uint32_t)staterr,
1274 			   (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
1275 
1276 		nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1277 		if (nmb == NULL) {
1278 			PMD_RX_LOG(DEBUG,
1279 				   "Rx mbuf alloc failed port_id=%u queue_id=%u",
1280 				   (uint16_t)rxq->port_id,
1281 				   (uint16_t)rxq->queue_id);
1282 			dev->data->rx_mbuf_alloc_failed++;
1283 			break;
1284 		}
1285 
1286 		nb_hold++;
1287 		rxe = &sw_ring[rx_id];
1288 		rx_id++;
1289 		if (rx_id == rxq->nb_rx_desc)
1290 			rx_id = 0;
1291 
1292 		/* Prefetch next mbuf while processing current one. */
1293 		rte_ngbe_prefetch(sw_ring[rx_id].mbuf);
1294 
1295 		/*
1296 		 * When next Rx descriptor is on a cache-line boundary,
1297 		 * prefetch the next 4 Rx descriptors and the next 8 pointers
1298 		 * to mbufs.
1299 		 */
1300 		if ((rx_id & 0x3) == 0) {
1301 			rte_ngbe_prefetch(&rx_ring[rx_id]);
1302 			rte_ngbe_prefetch(&sw_ring[rx_id]);
1303 		}
1304 
1305 		rxm = rxe->mbuf;
1306 		rxe->mbuf = nmb;
1307 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1308 		NGBE_RXD_HDRADDR(rxdp, 0);
1309 		NGBE_RXD_PKTADDR(rxdp, dma_addr);
1310 
1311 		/*
1312 		 * Initialize the returned mbuf.
1313 		 * 1) setup generic mbuf fields:
1314 		 *    - number of segments,
1315 		 *    - next segment,
1316 		 *    - packet length,
1317 		 *    - Rx port identifier.
1318 		 * 2) integrate hardware offload data, if any:
1319 		 *    - RSS flag & hash,
1320 		 *    - IP checksum flag,
1321 		 *    - VLAN TCI, if any,
1322 		 *    - error flags.
1323 		 */
1324 		pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
1325 				      rxq->crc_len);
1326 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1327 		rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1328 		rxm->nb_segs = 1;
1329 		rxm->next = NULL;
1330 		rxm->pkt_len = pkt_len;
1331 		rxm->data_len = pkt_len;
1332 		rxm->port = rxq->port_id;
1333 
1334 		pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
1335 		/* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
1336 		rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
1337 
1338 		pkt_flags = rx_desc_status_to_pkt_flags(staterr,
1339 					rxq->vlan_flags);
1340 		pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1341 		pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1342 		rxm->ol_flags = pkt_flags;
1343 		rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
1344 						       NGBE_PTID_MASK);
1345 
1346 		if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
1347 			rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
1348 
1349 		/*
1350 		 * Store the mbuf address into the next entry of the array
1351 		 * of returned packets.
1352 		 */
1353 		rx_pkts[nb_rx++] = rxm;
1354 	}
1355 	rxq->rx_tail = rx_id;
1356 
1357 	/*
1358 	 * If the number of free Rx descriptors is greater than the Rx free
1359 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1360 	 * register.
1361 	 * Update the RDT with the value of the last processed Rx descriptor
1362 	 * minus 1, to guarantee that the RDT register is never equal to the
1363 	 * RDH register, which creates a "full" ring situation from the
1364 	 * hardware point of view...
1365 	 */
1366 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1367 	if (nb_hold > rxq->rx_free_thresh) {
1368 		PMD_RX_LOG(DEBUG,
1369 			   "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
1370 			   (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1371 			   (uint16_t)rx_id, (uint16_t)nb_hold,
1372 			   (uint16_t)nb_rx);
1373 		rx_id = (uint16_t)((rx_id == 0) ?
1374 				(rxq->nb_rx_desc - 1) : (rx_id - 1));
1375 		ngbe_set32(rxq->rdt_reg_addr, rx_id);
1376 		nb_hold = 0;
1377 	}
1378 	rxq->nb_rx_hold = nb_hold;
1379 	return nb_rx;
1380 }
1381 
1382 /**
1383  * ngbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1384  *
1385  * Fill the following info in the HEAD buffer of the Rx cluster:
1386  *    - RX port identifier
1387  *    - hardware offload data, if any:
1388  *      - RSS flag & hash
1389  *      - IP checksum flag
1390  *      - VLAN TCI, if any
1391  *      - error flags
1392  * @head HEAD of the packet cluster
1393  * @desc HW descriptor to get data from
1394  * @rxq Pointer to the Rx queue
1395  */
1396 static inline void
1397 ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
1398 		struct ngbe_rx_queue *rxq, uint32_t staterr)
1399 {
1400 	uint32_t pkt_info;
1401 	uint64_t pkt_flags;
1402 
1403 	head->port = rxq->port_id;
1404 
1405 	/* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
1406 	 * set in the pkt_flags field.
1407 	 */
1408 	head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
1409 	pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
1410 	pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1411 	pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1412 	pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1413 	head->ol_flags = pkt_flags;
1414 	head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
1415 						NGBE_PTID_MASK);
1416 
1417 	if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
1418 		head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
1419 }
1420 
1421 /**
1422  * ngbe_recv_pkts_sc - receive handler for scatter case.
1423  *
1424  * @rx_queue Rx queue handle
1425  * @rx_pkts table of received packets
1426  * @nb_pkts size of rx_pkts table
1427  * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1428  *
1429  * Returns the number of received packets/clusters (according to the "bulk
1430  * receive" interface).
1431  */
1432 static inline uint16_t
1433 ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1434 		    bool bulk_alloc)
1435 {
1436 	struct ngbe_rx_queue *rxq = rx_queue;
1437 	struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1438 	volatile struct ngbe_rx_desc *rx_ring = rxq->rx_ring;
1439 	struct ngbe_rx_entry *sw_ring = rxq->sw_ring;
1440 	struct ngbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1441 	uint16_t rx_id = rxq->rx_tail;
1442 	uint16_t nb_rx = 0;
1443 	uint16_t nb_hold = rxq->nb_rx_hold;
1444 	uint16_t prev_id = rxq->rx_tail;
1445 
1446 	while (nb_rx < nb_pkts) {
1447 		bool eop;
1448 		struct ngbe_rx_entry *rxe;
1449 		struct ngbe_scattered_rx_entry *sc_entry;
1450 		struct ngbe_scattered_rx_entry *next_sc_entry = NULL;
1451 		struct ngbe_rx_entry *next_rxe = NULL;
1452 		struct rte_mbuf *first_seg;
1453 		struct rte_mbuf *rxm;
1454 		struct rte_mbuf *nmb = NULL;
1455 		struct ngbe_rx_desc rxd;
1456 		uint16_t data_len;
1457 		uint16_t next_id;
1458 		volatile struct ngbe_rx_desc *rxdp;
1459 		uint32_t staterr;
1460 
1461 next_desc:
1462 		rxdp = &rx_ring[rx_id];
1463 		staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
1464 
1465 		if (!(staterr & NGBE_RXD_STAT_DD))
1466 			break;
1467 
1468 		/*
1469 		 * Use acquire fence to ensure that status_error which includes
1470 		 * DD bit is loaded before loading of other descriptor words.
1471 		 */
1472 		rte_atomic_thread_fence(rte_memory_order_acquire);
1473 
1474 		rxd = *rxdp;
1475 
1476 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1477 				  "staterr=0x%x data_len=%u",
1478 			   rxq->port_id, rxq->queue_id, rx_id, staterr,
1479 			   rte_le_to_cpu_16(rxd.qw1.hi.len));
1480 
1481 		if (!bulk_alloc) {
1482 			nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1483 			if (nmb == NULL) {
1484 				PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed "
1485 						  "port_id=%u queue_id=%u",
1486 					   rxq->port_id, rxq->queue_id);
1487 
1488 				dev->data->rx_mbuf_alloc_failed++;
1489 				break;
1490 			}
1491 		} else if (nb_hold > rxq->rx_free_thresh) {
1492 			uint16_t next_rdt = rxq->rx_free_trigger;
1493 
1494 			if (!ngbe_rx_alloc_bufs(rxq, false)) {
1495 				rte_wmb();
1496 				ngbe_set32_relaxed(rxq->rdt_reg_addr,
1497 							    next_rdt);
1498 				nb_hold -= rxq->rx_free_thresh;
1499 			} else {
1500 				PMD_RX_LOG(DEBUG, "Rx bulk alloc failed "
1501 						  "port_id=%u queue_id=%u",
1502 					   rxq->port_id, rxq->queue_id);
1503 
1504 				dev->data->rx_mbuf_alloc_failed++;
1505 				break;
1506 			}
1507 		}
1508 
1509 		nb_hold++;
1510 		rxe = &sw_ring[rx_id];
1511 		eop = staterr & NGBE_RXD_STAT_EOP;
1512 
1513 		next_id = rx_id + 1;
1514 		if (next_id == rxq->nb_rx_desc)
1515 			next_id = 0;
1516 
1517 		/* Prefetch next mbuf while processing current one. */
1518 		rte_ngbe_prefetch(sw_ring[next_id].mbuf);
1519 
1520 		/*
1521 		 * When next Rx descriptor is on a cache-line boundary,
1522 		 * prefetch the next 4 RX descriptors and the next 4 pointers
1523 		 * to mbufs.
1524 		 */
1525 		if ((next_id & 0x3) == 0) {
1526 			rte_ngbe_prefetch(&rx_ring[next_id]);
1527 			rte_ngbe_prefetch(&sw_ring[next_id]);
1528 		}
1529 
1530 		rxm = rxe->mbuf;
1531 
1532 		if (!bulk_alloc) {
1533 			__le64 dma =
1534 			  rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1535 			/*
1536 			 * Update Rx descriptor with the physical address of the
1537 			 * new data buffer of the new allocated mbuf.
1538 			 */
1539 			rxe->mbuf = nmb;
1540 
1541 			rxm->data_off = RTE_PKTMBUF_HEADROOM;
1542 			NGBE_RXD_HDRADDR(rxdp, 0);
1543 			NGBE_RXD_PKTADDR(rxdp, dma);
1544 		} else {
1545 			rxe->mbuf = NULL;
1546 		}
1547 
1548 		/*
1549 		 * Set data length & data buffer address of mbuf.
1550 		 */
1551 		data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
1552 		rxm->data_len = data_len;
1553 
1554 		if (!eop) {
1555 			uint16_t nextp_id;
1556 
1557 			nextp_id = next_id;
1558 			next_sc_entry = &sw_sc_ring[nextp_id];
1559 			next_rxe = &sw_ring[nextp_id];
1560 			rte_ngbe_prefetch(next_rxe);
1561 		}
1562 
1563 		sc_entry = &sw_sc_ring[rx_id];
1564 		first_seg = sc_entry->fbuf;
1565 		sc_entry->fbuf = NULL;
1566 
1567 		/*
1568 		 * If this is the first buffer of the received packet,
1569 		 * set the pointer to the first mbuf of the packet and
1570 		 * initialize its context.
1571 		 * Otherwise, update the total length and the number of segments
1572 		 * of the current scattered packet, and update the pointer to
1573 		 * the last mbuf of the current packet.
1574 		 */
1575 		if (first_seg == NULL) {
1576 			first_seg = rxm;
1577 			first_seg->pkt_len = data_len;
1578 			first_seg->nb_segs = 1;
1579 		} else {
1580 			first_seg->pkt_len += data_len;
1581 			first_seg->nb_segs++;
1582 		}
1583 
1584 		prev_id = rx_id;
1585 		rx_id = next_id;
1586 
1587 		/*
1588 		 * If this is not the last buffer of the received packet, update
1589 		 * the pointer to the first mbuf at the NEXTP entry in the
1590 		 * sw_sc_ring and continue to parse the Rx ring.
1591 		 */
1592 		if (!eop && next_rxe) {
1593 			rxm->next = next_rxe->mbuf;
1594 			next_sc_entry->fbuf = first_seg;
1595 			goto next_desc;
1596 		}
1597 
1598 		/* Initialize the first mbuf of the returned packet */
1599 		ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
1600 
1601 		/* Deal with the case, when HW CRC srip is disabled. */
1602 		first_seg->pkt_len -= rxq->crc_len;
1603 		if (unlikely(rxm->data_len <= rxq->crc_len)) {
1604 			struct rte_mbuf *lp;
1605 
1606 			for (lp = first_seg; lp->next != rxm; lp = lp->next)
1607 				;
1608 
1609 			first_seg->nb_segs--;
1610 			lp->data_len -= rxq->crc_len - rxm->data_len;
1611 			lp->next = NULL;
1612 			rte_pktmbuf_free_seg(rxm);
1613 		} else {
1614 			rxm->data_len -= rxq->crc_len;
1615 		}
1616 
1617 		/* Prefetch data of first segment, if configured to do so. */
1618 		rte_packet_prefetch((char *)first_seg->buf_addr +
1619 			first_seg->data_off);
1620 
1621 		/*
1622 		 * Store the mbuf address into the next entry of the array
1623 		 * of returned packets.
1624 		 */
1625 		rx_pkts[nb_rx++] = first_seg;
1626 	}
1627 
1628 	/*
1629 	 * Record index of the next Rx descriptor to probe.
1630 	 */
1631 	rxq->rx_tail = rx_id;
1632 
1633 	/*
1634 	 * If the number of free Rx descriptors is greater than the Rx free
1635 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1636 	 * register.
1637 	 * Update the RDT with the value of the last processed Rx descriptor
1638 	 * minus 1, to guarantee that the RDT register is never equal to the
1639 	 * RDH register, which creates a "full" ring situation from the
1640 	 * hardware point of view...
1641 	 */
1642 	if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1643 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1644 			   "nb_hold=%u nb_rx=%u",
1645 			   rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1646 
1647 		rte_wmb();
1648 		ngbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
1649 		nb_hold = 0;
1650 	}
1651 
1652 	rxq->nb_rx_hold = nb_hold;
1653 	return nb_rx;
1654 }
1655 
1656 uint16_t
1657 ngbe_recv_pkts_sc_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1658 				 uint16_t nb_pkts)
1659 {
1660 	return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, false);
1661 }
1662 
1663 uint16_t
1664 ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1665 			       uint16_t nb_pkts)
1666 {
1667 	return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, true);
1668 }
1669 
1670 /*********************************************************************
1671  *
1672  *  Queue management functions
1673  *
1674  **********************************************************************/
1675 
1676 static void
1677 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
1678 {
1679 	unsigned int i;
1680 
1681 	if (txq->sw_ring != NULL) {
1682 		for (i = 0; i < txq->nb_tx_desc; i++) {
1683 			if (txq->sw_ring[i].mbuf != NULL) {
1684 				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1685 				txq->sw_ring[i].mbuf = NULL;
1686 			}
1687 		}
1688 	}
1689 }
1690 
1691 static int
1692 ngbe_tx_done_cleanup_full(struct ngbe_tx_queue *txq, uint32_t free_cnt)
1693 {
1694 	struct ngbe_tx_entry *swr_ring = txq->sw_ring;
1695 	uint16_t i, tx_last, tx_id;
1696 	uint16_t nb_tx_free_last;
1697 	uint16_t nb_tx_to_clean;
1698 	uint32_t pkt_cnt;
1699 
1700 	/* Start free mbuf from the next of tx_tail */
1701 	tx_last = txq->tx_tail;
1702 	tx_id  = swr_ring[tx_last].next_id;
1703 
1704 	if (txq->nb_tx_free == 0 && ngbe_xmit_cleanup(txq))
1705 		return 0;
1706 
1707 	nb_tx_to_clean = txq->nb_tx_free;
1708 	nb_tx_free_last = txq->nb_tx_free;
1709 	if (!free_cnt)
1710 		free_cnt = txq->nb_tx_desc;
1711 
1712 	/* Loop through swr_ring to count the amount of
1713 	 * freeable mubfs and packets.
1714 	 */
1715 	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
1716 		for (i = 0; i < nb_tx_to_clean &&
1717 			pkt_cnt < free_cnt &&
1718 			tx_id != tx_last; i++) {
1719 			if (swr_ring[tx_id].mbuf != NULL) {
1720 				rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
1721 				swr_ring[tx_id].mbuf = NULL;
1722 
1723 				/*
1724 				 * last segment in the packet,
1725 				 * increment packet count
1726 				 */
1727 				pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
1728 			}
1729 
1730 			tx_id = swr_ring[tx_id].next_id;
1731 		}
1732 
1733 		if (pkt_cnt < free_cnt) {
1734 			if (ngbe_xmit_cleanup(txq))
1735 				break;
1736 
1737 			nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
1738 			nb_tx_free_last = txq->nb_tx_free;
1739 		}
1740 	}
1741 
1742 	return (int)pkt_cnt;
1743 }
1744 
1745 static int
1746 ngbe_tx_done_cleanup_simple(struct ngbe_tx_queue *txq,
1747 			uint32_t free_cnt)
1748 {
1749 	int i, n, cnt;
1750 
1751 	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
1752 		free_cnt = txq->nb_tx_desc;
1753 
1754 	cnt = free_cnt - free_cnt % txq->tx_free_thresh;
1755 
1756 	for (i = 0; i < cnt; i += n) {
1757 		if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
1758 			break;
1759 
1760 		n = ngbe_tx_free_bufs(txq);
1761 
1762 		if (n == 0)
1763 			break;
1764 	}
1765 
1766 	return i;
1767 }
1768 
1769 int
1770 ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
1771 {
1772 	struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
1773 	if (txq->offloads == 0 &&
1774 		txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST)
1775 		return ngbe_tx_done_cleanup_simple(txq, free_cnt);
1776 
1777 	return ngbe_tx_done_cleanup_full(txq, free_cnt);
1778 }
1779 
1780 static void
1781 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
1782 {
1783 	if (txq != NULL)
1784 		rte_free(txq->sw_ring);
1785 }
1786 
1787 static void
1788 ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
1789 {
1790 	if (txq != NULL) {
1791 		if (txq->ops != NULL) {
1792 			txq->ops->release_mbufs(txq);
1793 			txq->ops->free_swring(txq);
1794 		}
1795 		rte_free(txq);
1796 	}
1797 }
1798 
1799 void
1800 ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1801 {
1802 	ngbe_tx_queue_release(dev->data->tx_queues[qid]);
1803 }
1804 
1805 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
1806 static void
1807 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
1808 {
1809 	static const struct ngbe_tx_desc zeroed_desc = {0};
1810 	struct ngbe_tx_entry *txe = txq->sw_ring;
1811 	uint16_t prev, i;
1812 
1813 	/* Zero out HW ring memory */
1814 	for (i = 0; i < txq->nb_tx_desc; i++)
1815 		txq->tx_ring[i] = zeroed_desc;
1816 
1817 	/* Initialize SW ring entries */
1818 	prev = (uint16_t)(txq->nb_tx_desc - 1);
1819 	for (i = 0; i < txq->nb_tx_desc; i++) {
1820 		/* the ring can also be modified by hardware */
1821 		volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
1822 
1823 		txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
1824 		txe[i].mbuf = NULL;
1825 		txe[i].last_id = i;
1826 		txe[prev].next_id = i;
1827 		prev = i;
1828 	}
1829 
1830 	txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
1831 	txq->tx_tail = 0;
1832 
1833 	/*
1834 	 * Always allow 1 descriptor to be un-allocated to avoid
1835 	 * a H/W race condition
1836 	 */
1837 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1838 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1839 	txq->ctx_curr = 0;
1840 	memset((void *)&txq->ctx_cache, 0,
1841 		NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
1842 }
1843 
1844 static const struct ngbe_txq_ops def_txq_ops = {
1845 	.release_mbufs = ngbe_tx_queue_release_mbufs,
1846 	.free_swring = ngbe_tx_free_swring,
1847 	.reset = ngbe_reset_tx_queue,
1848 };
1849 
1850 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1851  * the queue parameters. Used in tx_queue_setup by primary process and then
1852  * in dev_init by secondary process when attaching to an existing ethdev.
1853  */
1854 void
1855 ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
1856 {
1857 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
1858 	if (txq->offloads == 0 &&
1859 			txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) {
1860 		PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1861 		dev->tx_pkt_burst = ngbe_xmit_pkts_simple;
1862 		dev->tx_pkt_prepare = NULL;
1863 	} else {
1864 		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1865 		PMD_INIT_LOG(DEBUG,
1866 				" - offloads = 0x%" PRIx64,
1867 				txq->offloads);
1868 		PMD_INIT_LOG(DEBUG,
1869 				" - tx_free_thresh = %lu [RTE_PMD_NGBE_TX_MAX_BURST=%lu]",
1870 				(unsigned long)txq->tx_free_thresh,
1871 				(unsigned long)RTE_PMD_NGBE_TX_MAX_BURST);
1872 		dev->tx_pkt_burst = ngbe_xmit_pkts;
1873 		dev->tx_pkt_prepare = ngbe_prep_pkts;
1874 	}
1875 }
1876 
1877 static const struct {
1878 	eth_tx_burst_t pkt_burst;
1879 	const char *info;
1880 } ngbe_tx_burst_infos[] = {
1881 	{ ngbe_xmit_pkts_simple,   "Scalar Simple"},
1882 	{ ngbe_xmit_pkts,          "Scalar"},
1883 };
1884 
1885 int
1886 ngbe_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
1887 		      struct rte_eth_burst_mode *mode)
1888 {
1889 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
1890 	int ret = -EINVAL;
1891 	unsigned int i;
1892 
1893 	for (i = 0; i < RTE_DIM(ngbe_tx_burst_infos); ++i) {
1894 		if (pkt_burst == ngbe_tx_burst_infos[i].pkt_burst) {
1895 			snprintf(mode->info, sizeof(mode->info), "%s",
1896 				 ngbe_tx_burst_infos[i].info);
1897 			ret = 0;
1898 			break;
1899 		}
1900 	}
1901 
1902 	return ret;
1903 }
1904 
1905 uint64_t
1906 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
1907 {
1908 	uint64_t tx_offload_capa;
1909 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1910 
1911 	tx_offload_capa =
1912 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1913 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
1914 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
1915 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
1916 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
1917 		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
1918 		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
1919 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1920 
1921 	if (hw->is_pf)
1922 		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
1923 
1924 	return tx_offload_capa;
1925 }
1926 
1927 int
1928 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1929 			 uint16_t queue_idx,
1930 			 uint16_t nb_desc,
1931 			 unsigned int socket_id,
1932 			 const struct rte_eth_txconf *tx_conf)
1933 {
1934 	const struct rte_memzone *tz;
1935 	struct ngbe_tx_queue *txq;
1936 	struct ngbe_hw     *hw;
1937 	uint16_t tx_free_thresh;
1938 	uint64_t offloads;
1939 
1940 	PMD_INIT_FUNC_TRACE();
1941 	hw = ngbe_dev_hw(dev);
1942 
1943 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1944 
1945 	/*
1946 	 * The Tx descriptor ring will be cleaned after txq->tx_free_thresh
1947 	 * descriptors are used or if the number of descriptors required
1948 	 * to transmit a packet is greater than the number of free Tx
1949 	 * descriptors.
1950 	 * One descriptor in the Tx ring is used as a sentinel to avoid a
1951 	 * H/W race condition, hence the maximum threshold constraints.
1952 	 * When set to zero use default values.
1953 	 */
1954 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1955 			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1956 	if (tx_free_thresh >= (nb_desc - 3)) {
1957 		PMD_INIT_LOG(ERR,
1958 			     "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)",
1959 			     (unsigned int)tx_free_thresh,
1960 			     (int)dev->data->port_id, (int)queue_idx);
1961 		return -(EINVAL);
1962 	}
1963 
1964 	if (nb_desc % tx_free_thresh != 0) {
1965 		PMD_INIT_LOG(ERR,
1966 			     "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)",
1967 			     (unsigned int)tx_free_thresh,
1968 			     (int)dev->data->port_id, (int)queue_idx);
1969 		return -(EINVAL);
1970 	}
1971 
1972 	/* Free memory prior to re-allocation if needed... */
1973 	if (dev->data->tx_queues[queue_idx] != NULL) {
1974 		ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
1975 		dev->data->tx_queues[queue_idx] = NULL;
1976 	}
1977 
1978 	/* First allocate the Tx queue data structure */
1979 	txq = rte_zmalloc_socket("ethdev Tx queue",
1980 				 sizeof(struct ngbe_tx_queue),
1981 				 RTE_CACHE_LINE_SIZE, socket_id);
1982 	if (txq == NULL)
1983 		return -ENOMEM;
1984 
1985 	/*
1986 	 * Allocate Tx ring hardware descriptors. A memzone large enough to
1987 	 * handle the maximum ring size is allocated in order to allow for
1988 	 * resizing in later calls to the queue setup function.
1989 	 */
1990 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1991 			sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
1992 			NGBE_ALIGN, socket_id);
1993 	if (tz == NULL) {
1994 		ngbe_tx_queue_release(txq);
1995 		return -ENOMEM;
1996 	}
1997 
1998 	txq->nb_tx_desc = nb_desc;
1999 	txq->tx_free_thresh = tx_free_thresh;
2000 	txq->pthresh = tx_conf->tx_thresh.pthresh;
2001 	txq->hthresh = tx_conf->tx_thresh.hthresh;
2002 	txq->wthresh = tx_conf->tx_thresh.wthresh;
2003 	txq->queue_id = queue_idx;
2004 	txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2005 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2006 	txq->port_id = dev->data->port_id;
2007 	txq->offloads = offloads;
2008 	txq->ops = &def_txq_ops;
2009 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
2010 
2011 	txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
2012 	txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
2013 
2014 	txq->tx_ring_phys_addr = TMZ_PADDR(tz);
2015 	txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
2016 
2017 	/* Allocate software ring */
2018 	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2019 				sizeof(struct ngbe_tx_entry) * nb_desc,
2020 				RTE_CACHE_LINE_SIZE, socket_id);
2021 	if (txq->sw_ring == NULL) {
2022 		ngbe_tx_queue_release(txq);
2023 		return -ENOMEM;
2024 	}
2025 	PMD_INIT_LOG(DEBUG,
2026 		     "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2027 		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2028 
2029 	/* set up scalar Tx function as appropriate */
2030 	ngbe_set_tx_function(dev, txq);
2031 
2032 	txq->ops->reset(txq);
2033 
2034 	dev->data->tx_queues[queue_idx] = txq;
2035 
2036 	return 0;
2037 }
2038 
2039 /**
2040  * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
2041  *
2042  * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2043  * in the sw_sc_ring is not set to NULL but rather points to the next
2044  * mbuf of this RSC aggregation (that has not been completed yet and still
2045  * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2046  * will just free first "nb_segs" segments of the cluster explicitly by calling
2047  * an rte_pktmbuf_free_seg().
2048  *
2049  * @m scattered cluster head
2050  */
2051 static void
2052 ngbe_free_sc_cluster(struct rte_mbuf *m)
2053 {
2054 	uint16_t i, nb_segs = m->nb_segs;
2055 	struct rte_mbuf *next_seg;
2056 
2057 	for (i = 0; i < nb_segs; i++) {
2058 		next_seg = m->next;
2059 		rte_pktmbuf_free_seg(m);
2060 		m = next_seg;
2061 	}
2062 }
2063 
2064 static void
2065 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
2066 {
2067 	unsigned int i;
2068 
2069 	if (rxq->sw_ring != NULL) {
2070 		for (i = 0; i < rxq->nb_rx_desc; i++) {
2071 			if (rxq->sw_ring[i].mbuf != NULL) {
2072 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2073 				rxq->sw_ring[i].mbuf = NULL;
2074 			}
2075 		}
2076 		for (i = 0; i < rxq->rx_nb_avail; ++i) {
2077 			struct rte_mbuf *mb;
2078 
2079 			mb = rxq->rx_stage[rxq->rx_next_avail + i];
2080 			rte_pktmbuf_free_seg(mb);
2081 		}
2082 		rxq->rx_nb_avail = 0;
2083 	}
2084 
2085 	if (rxq->sw_sc_ring != NULL)
2086 		for (i = 0; i < rxq->nb_rx_desc; i++)
2087 			if (rxq->sw_sc_ring[i].fbuf != NULL) {
2088 				ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2089 				rxq->sw_sc_ring[i].fbuf = NULL;
2090 			}
2091 }
2092 
2093 static void
2094 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
2095 {
2096 	if (rxq != NULL) {
2097 		ngbe_rx_queue_release_mbufs(rxq);
2098 		rte_free(rxq->sw_ring);
2099 		rte_free(rxq->sw_sc_ring);
2100 		rte_free(rxq);
2101 	}
2102 }
2103 
2104 void
2105 ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
2106 {
2107 	ngbe_rx_queue_release(dev->data->rx_queues[qid]);
2108 }
2109 
2110 /*
2111  * Check if Rx Burst Bulk Alloc function can be used.
2112  * Return
2113  *        0: the preconditions are satisfied and the bulk allocation function
2114  *           can be used.
2115  *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2116  *           function must be used.
2117  */
2118 static inline int
2119 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
2120 {
2121 	int ret = 0;
2122 
2123 	/*
2124 	 * Make sure the following pre-conditions are satisfied:
2125 	 *   rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
2126 	 *   rxq->rx_free_thresh < rxq->nb_rx_desc
2127 	 *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2128 	 * Scattered packets are not supported.  This should be checked
2129 	 * outside of this function.
2130 	 */
2131 	if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
2132 		PMD_INIT_LOG(DEBUG,
2133 			     "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
2134 			     rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
2135 		ret = -EINVAL;
2136 	} else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
2137 		PMD_INIT_LOG(DEBUG,
2138 			     "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
2139 			     rxq->rx_free_thresh, rxq->nb_rx_desc);
2140 		ret = -EINVAL;
2141 	} else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
2142 		PMD_INIT_LOG(DEBUG,
2143 			     "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
2144 			     rxq->nb_rx_desc, rxq->rx_free_thresh);
2145 		ret = -EINVAL;
2146 	}
2147 
2148 	return ret;
2149 }
2150 
2151 /* Reset dynamic ngbe_rx_queue fields back to defaults */
2152 static void
2153 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
2154 {
2155 	static const struct ngbe_rx_desc zeroed_desc = {
2156 						{{0}, {0} }, {{0}, {0} } };
2157 	unsigned int i;
2158 	uint16_t len = rxq->nb_rx_desc;
2159 
2160 	/*
2161 	 * By default, the Rx queue setup function allocates enough memory for
2162 	 * NGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
2163 	 * extra memory at the end of the descriptor ring to be zero'd out.
2164 	 */
2165 	if (adapter->rx_bulk_alloc_allowed)
2166 		/* zero out extra memory */
2167 		len += RTE_PMD_NGBE_RX_MAX_BURST;
2168 
2169 	/*
2170 	 * Zero out HW ring memory. Zero out extra memory at the end of
2171 	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2172 	 * reads extra memory as zeros.
2173 	 */
2174 	for (i = 0; i < len; i++)
2175 		rxq->rx_ring[i] = zeroed_desc;
2176 
2177 	/*
2178 	 * initialize extra software ring entries. Space for these extra
2179 	 * entries is always allocated
2180 	 */
2181 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2182 	for (i = rxq->nb_rx_desc; i < len; ++i)
2183 		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2184 
2185 	rxq->rx_nb_avail = 0;
2186 	rxq->rx_next_avail = 0;
2187 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2188 	rxq->rx_tail = 0;
2189 	rxq->nb_rx_hold = 0;
2190 	rxq->pkt_first_seg = NULL;
2191 	rxq->pkt_last_seg = NULL;
2192 }
2193 
2194 uint64_t
2195 ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
2196 {
2197 	return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2198 }
2199 
2200 uint64_t
2201 ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2202 {
2203 	uint64_t offloads;
2204 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2205 
2206 	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
2207 		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
2208 		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
2209 		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
2210 		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
2211 		   RTE_ETH_RX_OFFLOAD_RSS_HASH    |
2212 		   RTE_ETH_RX_OFFLOAD_SCATTER;
2213 
2214 	if (hw->is_pf)
2215 		offloads |= (RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
2216 			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
2217 
2218 	return offloads;
2219 }
2220 
2221 int
2222 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2223 			 uint16_t queue_idx,
2224 			 uint16_t nb_desc,
2225 			 unsigned int socket_id,
2226 			 const struct rte_eth_rxconf *rx_conf,
2227 			 struct rte_mempool *mp)
2228 {
2229 	const struct rte_memzone *rz;
2230 	struct ngbe_rx_queue *rxq;
2231 	struct ngbe_hw     *hw;
2232 	uint16_t len;
2233 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2234 	uint64_t offloads;
2235 
2236 	PMD_INIT_FUNC_TRACE();
2237 	hw = ngbe_dev_hw(dev);
2238 
2239 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2240 
2241 	/* Free memory prior to re-allocation if needed... */
2242 	if (dev->data->rx_queues[queue_idx] != NULL) {
2243 		ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2244 		dev->data->rx_queues[queue_idx] = NULL;
2245 	}
2246 
2247 	/* First allocate the Rx queue data structure */
2248 	rxq = rte_zmalloc_socket("ethdev RX queue",
2249 				 sizeof(struct ngbe_rx_queue),
2250 				 RTE_CACHE_LINE_SIZE, socket_id);
2251 	if (rxq == NULL)
2252 		return -ENOMEM;
2253 	rxq->mb_pool = mp;
2254 	rxq->nb_rx_desc = nb_desc;
2255 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2256 	rxq->queue_id = queue_idx;
2257 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2258 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2259 	rxq->port_id = dev->data->port_id;
2260 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2261 		rxq->crc_len = RTE_ETHER_CRC_LEN;
2262 	else
2263 		rxq->crc_len = 0;
2264 	rxq->drop_en = rx_conf->rx_drop_en;
2265 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2266 	rxq->offloads = offloads;
2267 
2268 	/*
2269 	 * Allocate Rx ring hardware descriptors. A memzone large enough to
2270 	 * handle the maximum ring size is allocated in order to allow for
2271 	 * resizing in later calls to the queue setup function.
2272 	 */
2273 	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2274 				      RX_RING_SZ, NGBE_ALIGN, socket_id);
2275 	if (rz == NULL) {
2276 		ngbe_rx_queue_release(rxq);
2277 		return -ENOMEM;
2278 	}
2279 
2280 	/*
2281 	 * Zero init all the descriptors in the ring.
2282 	 */
2283 	memset(rz->addr, 0, RX_RING_SZ);
2284 
2285 	rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
2286 	rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
2287 
2288 	rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
2289 	rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
2290 
2291 	/*
2292 	 * Certain constraints must be met in order to use the bulk buffer
2293 	 * allocation Rx burst function. If any of Rx queues doesn't meet them
2294 	 * the feature should be disabled for the whole port.
2295 	 */
2296 	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2297 		PMD_INIT_LOG(DEBUG,
2298 			     "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
2299 			     rxq->queue_id, rxq->port_id);
2300 		adapter->rx_bulk_alloc_allowed = false;
2301 	}
2302 
2303 	/*
2304 	 * Allocate software ring. Allow for space at the end of the
2305 	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2306 	 * function does not access an invalid memory region.
2307 	 */
2308 	len = nb_desc;
2309 	if (adapter->rx_bulk_alloc_allowed)
2310 		len += RTE_PMD_NGBE_RX_MAX_BURST;
2311 
2312 	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2313 					  sizeof(struct ngbe_rx_entry) * len,
2314 					  RTE_CACHE_LINE_SIZE, socket_id);
2315 	if (rxq->sw_ring == NULL) {
2316 		ngbe_rx_queue_release(rxq);
2317 		return -ENOMEM;
2318 	}
2319 
2320 	/*
2321 	 * Always allocate even if it's not going to be needed in order to
2322 	 * simplify the code.
2323 	 *
2324 	 * This ring is used in Scattered Rx cases and Scattered Rx may
2325 	 * be requested in ngbe_dev_rx_init(), which is called later from
2326 	 * dev_start() flow.
2327 	 */
2328 	rxq->sw_sc_ring =
2329 		rte_zmalloc_socket("rxq->sw_sc_ring",
2330 				  sizeof(struct ngbe_scattered_rx_entry) * len,
2331 				  RTE_CACHE_LINE_SIZE, socket_id);
2332 	if (rxq->sw_sc_ring == NULL) {
2333 		ngbe_rx_queue_release(rxq);
2334 		return -ENOMEM;
2335 	}
2336 
2337 	PMD_INIT_LOG(DEBUG,
2338 		     "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2339 		     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2340 		     rxq->rx_ring_phys_addr);
2341 
2342 	dev->data->rx_queues[queue_idx] = rxq;
2343 
2344 	ngbe_reset_rx_queue(adapter, rxq);
2345 
2346 	return 0;
2347 }
2348 
2349 uint32_t
2350 ngbe_dev_rx_queue_count(void *rx_queue)
2351 {
2352 #define NGBE_RXQ_SCAN_INTERVAL 4
2353 	volatile struct ngbe_rx_desc *rxdp;
2354 	struct ngbe_rx_queue *rxq = rx_queue;
2355 	uint32_t desc = 0;
2356 
2357 	rxdp = &rxq->rx_ring[rxq->rx_tail];
2358 
2359 	while ((desc < rxq->nb_rx_desc) &&
2360 		(rxdp->qw1.lo.status &
2361 			rte_cpu_to_le_32(NGBE_RXD_STAT_DD))) {
2362 		desc += NGBE_RXQ_SCAN_INTERVAL;
2363 		rxdp += NGBE_RXQ_SCAN_INTERVAL;
2364 		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2365 			rxdp = &(rxq->rx_ring[rxq->rx_tail +
2366 				desc - rxq->nb_rx_desc]);
2367 	}
2368 
2369 	return desc;
2370 }
2371 
2372 int
2373 ngbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
2374 {
2375 	struct ngbe_rx_queue *rxq = rx_queue;
2376 	volatile uint32_t *status;
2377 	uint32_t nb_hold, desc;
2378 
2379 	if (unlikely(offset >= rxq->nb_rx_desc))
2380 		return -EINVAL;
2381 
2382 	nb_hold = rxq->nb_rx_hold;
2383 	if (offset >= rxq->nb_rx_desc - nb_hold)
2384 		return RTE_ETH_RX_DESC_UNAVAIL;
2385 
2386 	desc = rxq->rx_tail + offset;
2387 	if (desc >= rxq->nb_rx_desc)
2388 		desc -= rxq->nb_rx_desc;
2389 
2390 	status = &rxq->rx_ring[desc].qw1.lo.status;
2391 	if (*status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD))
2392 		return RTE_ETH_RX_DESC_DONE;
2393 
2394 	return RTE_ETH_RX_DESC_AVAIL;
2395 }
2396 
2397 int
2398 ngbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
2399 {
2400 	struct ngbe_tx_queue *txq = tx_queue;
2401 	volatile uint32_t *status;
2402 	uint32_t desc;
2403 
2404 	if (unlikely(offset >= txq->nb_tx_desc))
2405 		return -EINVAL;
2406 
2407 	desc = txq->tx_tail + offset;
2408 	if (desc >= txq->nb_tx_desc) {
2409 		desc -= txq->nb_tx_desc;
2410 		if (desc >= txq->nb_tx_desc)
2411 			desc -= txq->nb_tx_desc;
2412 	}
2413 
2414 	status = &txq->tx_ring[desc].dw3;
2415 	if (*status & rte_cpu_to_le_32(NGBE_TXD_DD))
2416 		return RTE_ETH_TX_DESC_DONE;
2417 
2418 	return RTE_ETH_TX_DESC_FULL;
2419 }
2420 
2421 void
2422 ngbe_dev_clear_queues(struct rte_eth_dev *dev)
2423 {
2424 	unsigned int i;
2425 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2426 
2427 	PMD_INIT_FUNC_TRACE();
2428 
2429 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2430 		struct ngbe_tx_queue *txq = dev->data->tx_queues[i];
2431 
2432 		if (txq != NULL) {
2433 			txq->ops->release_mbufs(txq);
2434 			txq->ops->reset(txq);
2435 			dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2436 		}
2437 	}
2438 
2439 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2440 		struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
2441 
2442 		if (rxq != NULL) {
2443 			ngbe_rx_queue_release_mbufs(rxq);
2444 			ngbe_reset_rx_queue(adapter, rxq);
2445 			dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2446 		}
2447 	}
2448 }
2449 
2450 void
2451 ngbe_dev_free_queues(struct rte_eth_dev *dev)
2452 {
2453 	unsigned int i;
2454 
2455 	PMD_INIT_FUNC_TRACE();
2456 
2457 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2458 		ngbe_dev_rx_queue_release(dev, i);
2459 		dev->data->rx_queues[i] = NULL;
2460 	}
2461 	dev->data->nb_rx_queues = 0;
2462 
2463 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2464 		ngbe_dev_tx_queue_release(dev, i);
2465 		dev->data->tx_queues[i] = NULL;
2466 	}
2467 	dev->data->nb_tx_queues = 0;
2468 }
2469 
2470 /**
2471  * Receive Side Scaling (RSS)
2472  *
2473  * Principles:
2474  * The source and destination IP addresses of the IP header and the source
2475  * and destination ports of TCP/UDP headers, if any, of received packets are
2476  * hashed against a configurable random key to compute a 32-bit RSS hash result.
2477  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2478  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
2479  * RSS output index which is used as the Rx queue index where to store the
2480  * received packets.
2481  * The following output is supplied in the Rx write-back descriptor:
2482  *     - 32-bit result of the Microsoft RSS hash function,
2483  *     - 4-bit RSS type field.
2484  */
2485 
2486 /*
2487  * Used as the default key.
2488  */
2489 static uint8_t rss_intel_key[40] = {
2490 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2491 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2492 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2493 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2494 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2495 };
2496 
2497 static void
2498 ngbe_rss_disable(struct rte_eth_dev *dev)
2499 {
2500 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2501 
2502 	wr32m(hw, NGBE_RACTL, NGBE_RACTL_RSSENA, 0);
2503 }
2504 
2505 int
2506 ngbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2507 			  struct rte_eth_rss_conf *rss_conf)
2508 {
2509 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2510 	uint8_t  *hash_key;
2511 	uint32_t mrqc;
2512 	uint32_t rss_key;
2513 	uint64_t rss_hf;
2514 	uint16_t i;
2515 
2516 	if (!hw->is_pf) {
2517 		PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2518 			"NIC.");
2519 		return -ENOTSUP;
2520 	}
2521 
2522 	hash_key = rss_conf->rss_key;
2523 	if (hash_key) {
2524 		/* Fill in RSS hash key */
2525 		for (i = 0; i < 10; i++) {
2526 			rss_key  = LS32(hash_key[(i * 4) + 0], 0, 0xFF);
2527 			rss_key |= LS32(hash_key[(i * 4) + 1], 8, 0xFF);
2528 			rss_key |= LS32(hash_key[(i * 4) + 2], 16, 0xFF);
2529 			rss_key |= LS32(hash_key[(i * 4) + 3], 24, 0xFF);
2530 			wr32a(hw, NGBE_REG_RSSKEY, i, rss_key);
2531 		}
2532 	}
2533 
2534 	/* Set configured hashing protocols */
2535 	rss_hf = rss_conf->rss_hf & NGBE_RSS_OFFLOAD_ALL;
2536 
2537 	mrqc = rd32(hw, NGBE_RACTL);
2538 	mrqc &= ~NGBE_RACTL_RSSMASK;
2539 	if (rss_hf & RTE_ETH_RSS_IPV4)
2540 		mrqc |= NGBE_RACTL_RSSIPV4;
2541 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
2542 		mrqc |= NGBE_RACTL_RSSIPV4TCP;
2543 	if (rss_hf & RTE_ETH_RSS_IPV6 ||
2544 	    rss_hf & RTE_ETH_RSS_IPV6_EX)
2545 		mrqc |= NGBE_RACTL_RSSIPV6;
2546 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
2547 	    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
2548 		mrqc |= NGBE_RACTL_RSSIPV6TCP;
2549 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
2550 		mrqc |= NGBE_RACTL_RSSIPV4UDP;
2551 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
2552 	    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
2553 		mrqc |= NGBE_RACTL_RSSIPV6UDP;
2554 
2555 	if (rss_hf)
2556 		mrqc |= NGBE_RACTL_RSSENA;
2557 	else
2558 		mrqc &= ~NGBE_RACTL_RSSENA;
2559 
2560 	wr32(hw, NGBE_RACTL, mrqc);
2561 
2562 	return 0;
2563 }
2564 
2565 int
2566 ngbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2567 			    struct rte_eth_rss_conf *rss_conf)
2568 {
2569 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2570 	uint8_t *hash_key;
2571 	uint32_t mrqc;
2572 	uint32_t rss_key;
2573 	uint64_t rss_hf;
2574 	uint16_t i;
2575 
2576 	hash_key = rss_conf->rss_key;
2577 	if (hash_key) {
2578 		/* Return RSS hash key */
2579 		for (i = 0; i < 10; i++) {
2580 			rss_key = rd32a(hw, NGBE_REG_RSSKEY, i);
2581 			hash_key[(i * 4) + 0] = RS32(rss_key, 0, 0xFF);
2582 			hash_key[(i * 4) + 1] = RS32(rss_key, 8, 0xFF);
2583 			hash_key[(i * 4) + 2] = RS32(rss_key, 16, 0xFF);
2584 			hash_key[(i * 4) + 3] = RS32(rss_key, 24, 0xFF);
2585 		}
2586 	}
2587 
2588 	rss_hf = 0;
2589 
2590 	mrqc = rd32(hw, NGBE_RACTL);
2591 	if (mrqc & NGBE_RACTL_RSSIPV4)
2592 		rss_hf |= RTE_ETH_RSS_IPV4;
2593 	if (mrqc & NGBE_RACTL_RSSIPV4TCP)
2594 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2595 	if (mrqc & NGBE_RACTL_RSSIPV6)
2596 		rss_hf |= RTE_ETH_RSS_IPV6 |
2597 			  RTE_ETH_RSS_IPV6_EX;
2598 	if (mrqc & NGBE_RACTL_RSSIPV6TCP)
2599 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
2600 			  RTE_ETH_RSS_IPV6_TCP_EX;
2601 	if (mrqc & NGBE_RACTL_RSSIPV4UDP)
2602 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2603 	if (mrqc & NGBE_RACTL_RSSIPV6UDP)
2604 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
2605 			  RTE_ETH_RSS_IPV6_UDP_EX;
2606 	if (!(mrqc & NGBE_RACTL_RSSENA))
2607 		rss_hf = 0;
2608 
2609 	rss_hf &= NGBE_RSS_OFFLOAD_ALL;
2610 
2611 	rss_conf->rss_hf = rss_hf;
2612 	return 0;
2613 }
2614 
2615 static void
2616 ngbe_rss_configure(struct rte_eth_dev *dev)
2617 {
2618 	struct rte_eth_rss_conf rss_conf;
2619 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2620 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2621 	uint32_t reta;
2622 	uint16_t i;
2623 	uint16_t j;
2624 
2625 	PMD_INIT_FUNC_TRACE();
2626 
2627 	/*
2628 	 * Fill in redirection table
2629 	 * The byte-swap is needed because NIC registers are in
2630 	 * little-endian order.
2631 	 */
2632 	if (adapter->rss_reta_updated == 0) {
2633 		reta = 0;
2634 		for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
2635 			if (j == dev->data->nb_rx_queues)
2636 				j = 0;
2637 			reta = (reta >> 8) | LS32(j, 24, 0xFF);
2638 			if ((i & 3) == 3)
2639 				wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2640 		}
2641 	}
2642 	/*
2643 	 * Configure the RSS key and the RSS protocols used to compute
2644 	 * the RSS hash of input packets.
2645 	 */
2646 	rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2647 	if (rss_conf.rss_key == NULL)
2648 		rss_conf.rss_key = rss_intel_key; /* Default hash key */
2649 	ngbe_dev_rss_hash_update(dev, &rss_conf);
2650 }
2651 
2652 void ngbe_configure_port(struct rte_eth_dev *dev)
2653 {
2654 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2655 	int i = 0;
2656 	uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
2657 				0x9100, 0x9200,
2658 				0x0000, 0x0000,
2659 				0x0000, 0x0000};
2660 
2661 	PMD_INIT_FUNC_TRACE();
2662 
2663 	/* default outer vlan tpid */
2664 	wr32(hw, NGBE_EXTAG,
2665 		NGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
2666 		NGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
2667 
2668 	/* default inner vlan tpid */
2669 	wr32m(hw, NGBE_VLANCTL,
2670 		NGBE_VLANCTL_TPID_MASK,
2671 		NGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
2672 	wr32m(hw, NGBE_DMATXCTRL,
2673 		NGBE_DMATXCTRL_TPID_MASK,
2674 		NGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
2675 
2676 	/* default vlan tpid filters */
2677 	for (i = 0; i < 8; i++) {
2678 		wr32m(hw, NGBE_TAGTPID(i / 2),
2679 			(i % 2 ? NGBE_TAGTPID_MSB_MASK
2680 			       : NGBE_TAGTPID_LSB_MASK),
2681 			(i % 2 ? NGBE_TAGTPID_MSB(tpids[i])
2682 			       : NGBE_TAGTPID_LSB(tpids[i])));
2683 	}
2684 }
2685 
2686 static int
2687 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
2688 {
2689 	struct ngbe_rx_entry *rxe = rxq->sw_ring;
2690 	uint64_t dma_addr;
2691 	unsigned int i;
2692 
2693 	/* Initialize software ring entries */
2694 	for (i = 0; i < rxq->nb_rx_desc; i++) {
2695 		/* the ring can also be modified by hardware */
2696 		volatile struct ngbe_rx_desc *rxd;
2697 		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2698 
2699 		if (mbuf == NULL) {
2700 			PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u",
2701 				     (unsigned int)rxq->queue_id,
2702 				     (unsigned int)rxq->port_id);
2703 			return -ENOMEM;
2704 		}
2705 
2706 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2707 		mbuf->port = rxq->port_id;
2708 
2709 		dma_addr =
2710 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2711 		rxd = &rxq->rx_ring[i];
2712 		NGBE_RXD_HDRADDR(rxd, 0);
2713 		NGBE_RXD_PKTADDR(rxd, dma_addr);
2714 		rxe[i].mbuf = mbuf;
2715 	}
2716 
2717 	return 0;
2718 }
2719 
2720 static int
2721 ngbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
2722 {
2723 	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
2724 		switch (dev->data->dev_conf.rxmode.mq_mode) {
2725 		case RTE_ETH_MQ_RX_RSS:
2726 			ngbe_rss_configure(dev);
2727 			break;
2728 
2729 		case RTE_ETH_MQ_RX_NONE:
2730 		default:
2731 			/* if mq_mode is none, disable rss mode.*/
2732 			ngbe_rss_disable(dev);
2733 			break;
2734 		}
2735 	}
2736 
2737 	return 0;
2738 }
2739 
2740 void
2741 ngbe_set_rx_function(struct rte_eth_dev *dev)
2742 {
2743 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2744 
2745 	if (dev->data->scattered_rx) {
2746 		/*
2747 		 * Set the scattered callback: there are bulk and
2748 		 * single allocation versions.
2749 		 */
2750 		if (adapter->rx_bulk_alloc_allowed) {
2751 			PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
2752 					   "allocation callback (port=%d).",
2753 				     dev->data->port_id);
2754 			dev->rx_pkt_burst = ngbe_recv_pkts_sc_bulk_alloc;
2755 		} else {
2756 			PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
2757 					    "single allocation) "
2758 					    "Scattered Rx callback "
2759 					    "(port=%d).",
2760 				     dev->data->port_id);
2761 
2762 			dev->rx_pkt_burst = ngbe_recv_pkts_sc_single_alloc;
2763 		}
2764 	/*
2765 	 * Below we set "simple" callbacks according to port/queues parameters.
2766 	 * If parameters allow we are going to choose between the following
2767 	 * callbacks:
2768 	 *    - Bulk Allocation
2769 	 *    - Single buffer allocation (the simplest one)
2770 	 */
2771 	} else if (adapter->rx_bulk_alloc_allowed) {
2772 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2773 				    "satisfied. Rx Burst Bulk Alloc function "
2774 				    "will be used on port=%d.",
2775 			     dev->data->port_id);
2776 
2777 		dev->rx_pkt_burst = ngbe_recv_pkts_bulk_alloc;
2778 	} else {
2779 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
2780 				    "satisfied, or Scattered Rx is requested "
2781 				    "(port=%d).",
2782 			     dev->data->port_id);
2783 
2784 		dev->rx_pkt_burst = ngbe_recv_pkts;
2785 	}
2786 }
2787 
2788 static const struct {
2789 	eth_rx_burst_t pkt_burst;
2790 	const char *info;
2791 } ngbe_rx_burst_infos[] = {
2792 	{ ngbe_recv_pkts_sc_single_alloc,    "Scalar Scattered"},
2793 	{ ngbe_recv_pkts_sc_bulk_alloc,      "Scalar Scattered Bulk Alloc"},
2794 	{ ngbe_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc"},
2795 	{ ngbe_recv_pkts,                    "Scalar"},
2796 };
2797 
2798 int
2799 ngbe_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2800 		      struct rte_eth_burst_mode *mode)
2801 {
2802 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2803 	int ret = -EINVAL;
2804 	unsigned int i;
2805 
2806 	for (i = 0; i < RTE_DIM(ngbe_rx_burst_infos); ++i) {
2807 		if (pkt_burst == ngbe_rx_burst_infos[i].pkt_burst) {
2808 			snprintf(mode->info, sizeof(mode->info), "%s",
2809 				 ngbe_rx_burst_infos[i].info);
2810 			ret = 0;
2811 			break;
2812 		}
2813 	}
2814 
2815 	return ret;
2816 }
2817 
2818 /*
2819  * Initializes Receive Unit.
2820  */
2821 int
2822 ngbe_dev_rx_init(struct rte_eth_dev *dev)
2823 {
2824 	struct ngbe_hw *hw;
2825 	struct ngbe_rx_queue *rxq;
2826 	uint64_t bus_addr;
2827 	uint32_t fctrl;
2828 	uint32_t hlreg0;
2829 	uint32_t srrctl;
2830 	uint32_t rdrxctl;
2831 	uint32_t rxcsum;
2832 	uint16_t buf_size;
2833 	uint16_t i;
2834 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
2835 
2836 	PMD_INIT_FUNC_TRACE();
2837 	hw = ngbe_dev_hw(dev);
2838 
2839 	/*
2840 	 * Make sure receives are disabled while setting
2841 	 * up the Rx context (registers, descriptor rings, etc.).
2842 	 */
2843 	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
2844 	wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
2845 
2846 	/* Enable receipt of broadcasted frames */
2847 	fctrl = rd32(hw, NGBE_PSRCTL);
2848 	fctrl |= NGBE_PSRCTL_BCA;
2849 	wr32(hw, NGBE_PSRCTL, fctrl);
2850 
2851 	/*
2852 	 * Configure CRC stripping, if any.
2853 	 */
2854 	hlreg0 = rd32(hw, NGBE_SECRXCTL);
2855 	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2856 		hlreg0 &= ~NGBE_SECRXCTL_CRCSTRIP;
2857 	else
2858 		hlreg0 |= NGBE_SECRXCTL_CRCSTRIP;
2859 	hlreg0 &= ~NGBE_SECRXCTL_XDSA;
2860 	wr32(hw, NGBE_SECRXCTL, hlreg0);
2861 
2862 	/*
2863 	 * Configure jumbo frame support, if any.
2864 	 */
2865 	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2866 		NGBE_FRMSZ_MAX(dev->data->mtu + NGBE_ETH_OVERHEAD));
2867 
2868 	/*
2869 	 * If loopback mode is configured, set LPBK bit.
2870 	 */
2871 	hlreg0 = rd32(hw, NGBE_PSRCTL);
2872 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
2873 		hlreg0 |= NGBE_PSRCTL_LBENA;
2874 	else
2875 		hlreg0 &= ~NGBE_PSRCTL_LBENA;
2876 
2877 	wr32(hw, NGBE_PSRCTL, hlreg0);
2878 
2879 	/*
2880 	 * Assume no header split and no VLAN strip support
2881 	 * on any Rx queue first .
2882 	 */
2883 	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2884 
2885 	/* Setup Rx queues */
2886 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2887 		rxq = dev->data->rx_queues[i];
2888 
2889 		/*
2890 		 * Reset crc_len in case it was changed after queue setup by a
2891 		 * call to configure.
2892 		 */
2893 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2894 			rxq->crc_len = RTE_ETHER_CRC_LEN;
2895 		else
2896 			rxq->crc_len = 0;
2897 
2898 		/* Setup the Base and Length of the Rx Descriptor Rings */
2899 		bus_addr = rxq->rx_ring_phys_addr;
2900 		wr32(hw, NGBE_RXBAL(rxq->reg_idx),
2901 				(uint32_t)(bus_addr & BIT_MASK32));
2902 		wr32(hw, NGBE_RXBAH(rxq->reg_idx),
2903 				(uint32_t)(bus_addr >> 32));
2904 		wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
2905 		wr32(hw, NGBE_RXWP(rxq->reg_idx), 0);
2906 
2907 		srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
2908 
2909 		/* Set if packets are dropped when no descriptors available */
2910 		if (rxq->drop_en)
2911 			srrctl |= NGBE_RXCFG_DROP;
2912 
2913 		/*
2914 		 * Configure the Rx buffer size in the PKTLEN field of
2915 		 * the RXCFG register of the queue.
2916 		 * The value is in 1 KB resolution. Valid values can be from
2917 		 * 1 KB to 16 KB.
2918 		 */
2919 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2920 			RTE_PKTMBUF_HEADROOM);
2921 		buf_size = ROUND_DOWN(buf_size, 0x1 << 10);
2922 		srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
2923 
2924 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
2925 
2926 		/* It adds dual VLAN length for supporting dual VLAN */
2927 		if (dev->data->mtu + NGBE_ETH_OVERHEAD +
2928 				2 * RTE_VLAN_HLEN > buf_size)
2929 			dev->data->scattered_rx = 1;
2930 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2931 			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2932 	}
2933 
2934 	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
2935 		dev->data->scattered_rx = 1;
2936 
2937 	/*
2938 	 * Device configured with multiple RX queues.
2939 	 */
2940 	ngbe_dev_mq_rx_configure(dev);
2941 
2942 	/*
2943 	 * Setup the Checksum Register.
2944 	 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
2945 	 * Enable IP/L4 checksum computation by hardware if requested to do so.
2946 	 */
2947 	rxcsum = rd32(hw, NGBE_PSRCTL);
2948 	rxcsum |= NGBE_PSRCTL_PCSD;
2949 	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
2950 		rxcsum |= NGBE_PSRCTL_L4CSUM;
2951 	else
2952 		rxcsum &= ~NGBE_PSRCTL_L4CSUM;
2953 
2954 	wr32(hw, NGBE_PSRCTL, rxcsum);
2955 
2956 	if (hw->is_pf) {
2957 		rdrxctl = rd32(hw, NGBE_SECRXCTL);
2958 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2959 			rdrxctl &= ~NGBE_SECRXCTL_CRCSTRIP;
2960 		else
2961 			rdrxctl |= NGBE_SECRXCTL_CRCSTRIP;
2962 		wr32(hw, NGBE_SECRXCTL, rdrxctl);
2963 	}
2964 
2965 	ngbe_set_rx_function(dev);
2966 
2967 	return 0;
2968 }
2969 
2970 /*
2971  * Initializes Transmit Unit.
2972  */
2973 void
2974 ngbe_dev_tx_init(struct rte_eth_dev *dev)
2975 {
2976 	struct ngbe_hw     *hw;
2977 	struct ngbe_tx_queue *txq;
2978 	uint64_t bus_addr;
2979 	uint16_t i;
2980 
2981 	PMD_INIT_FUNC_TRACE();
2982 	hw = ngbe_dev_hw(dev);
2983 
2984 	wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA);
2985 	wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0);
2986 
2987 	/* Setup the Base and Length of the Tx Descriptor Rings */
2988 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2989 		txq = dev->data->tx_queues[i];
2990 
2991 		bus_addr = txq->tx_ring_phys_addr;
2992 		wr32(hw, NGBE_TXBAL(txq->reg_idx),
2993 				(uint32_t)(bus_addr & BIT_MASK32));
2994 		wr32(hw, NGBE_TXBAH(txq->reg_idx),
2995 				(uint32_t)(bus_addr >> 32));
2996 		wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK,
2997 			NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
2998 		/* Setup the HW Tx Head and TX Tail descriptor pointers */
2999 		wr32(hw, NGBE_TXRP(txq->reg_idx), 0);
3000 		wr32(hw, NGBE_TXWP(txq->reg_idx), 0);
3001 	}
3002 }
3003 
3004 /*
3005  * Set up link loopback mode Tx->Rx.
3006  */
3007 static inline void
3008 ngbe_setup_loopback_link(struct ngbe_hw *hw)
3009 {
3010 	PMD_INIT_FUNC_TRACE();
3011 
3012 	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_LB, NGBE_MACRXCFG_LB);
3013 
3014 	msec_delay(50);
3015 }
3016 
3017 /*
3018  * Start Transmit and Receive Units.
3019  */
3020 int
3021 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
3022 {
3023 	struct ngbe_hw     *hw;
3024 	struct ngbe_tx_queue *txq;
3025 	struct ngbe_rx_queue *rxq;
3026 	uint32_t dmatxctl;
3027 	uint32_t rxctrl;
3028 	uint16_t i;
3029 	int ret = 0;
3030 
3031 	PMD_INIT_FUNC_TRACE();
3032 	hw = ngbe_dev_hw(dev);
3033 
3034 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
3035 		txq = dev->data->tx_queues[i];
3036 		/* Setup Transmit Threshold Registers */
3037 		wr32m(hw, NGBE_TXCFG(txq->reg_idx),
3038 		      NGBE_TXCFG_HTHRESH_MASK |
3039 		      NGBE_TXCFG_WTHRESH_MASK,
3040 		      NGBE_TXCFG_HTHRESH(txq->hthresh) |
3041 		      NGBE_TXCFG_WTHRESH(txq->wthresh));
3042 	}
3043 
3044 	dmatxctl = rd32(hw, NGBE_DMATXCTRL);
3045 	dmatxctl |= NGBE_DMATXCTRL_ENA;
3046 	wr32(hw, NGBE_DMATXCTRL, dmatxctl);
3047 
3048 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
3049 		txq = dev->data->tx_queues[i];
3050 		if (txq->tx_deferred_start == 0) {
3051 			ret = ngbe_dev_tx_queue_start(dev, i);
3052 			if (ret < 0)
3053 				return ret;
3054 		}
3055 	}
3056 
3057 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
3058 		rxq = dev->data->rx_queues[i];
3059 		if (rxq->rx_deferred_start == 0) {
3060 			ret = ngbe_dev_rx_queue_start(dev, i);
3061 			if (ret < 0)
3062 				return ret;
3063 		}
3064 	}
3065 
3066 	/* Enable Receive engine */
3067 	rxctrl = rd32(hw, NGBE_PBRXCTL);
3068 	rxctrl |= NGBE_PBRXCTL_ENA;
3069 	hw->mac.enable_rx_dma(hw, rxctrl);
3070 
3071 	/* If loopback mode is enabled, set up the link accordingly */
3072 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
3073 		ngbe_setup_loopback_link(hw);
3074 
3075 	return 0;
3076 }
3077 
3078 void
3079 ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
3080 {
3081 	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
3082 	*(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));
3083 	*(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));
3084 	*(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));
3085 }
3086 
3087 void
3088 ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
3089 {
3090 	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
3091 	wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));
3092 	wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));
3093 	wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);
3094 }
3095 
3096 void
3097 ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
3098 {
3099 	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
3100 	*(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
3101 	*(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
3102 	*(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
3103 }
3104 
3105 void
3106 ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
3107 {
3108 	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
3109 	wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
3110 	wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
3111 	wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
3112 }
3113 
3114 /*
3115  * Start Receive Units for specified queue.
3116  */
3117 int
3118 ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3119 {
3120 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3121 	struct ngbe_rx_queue *rxq;
3122 	uint32_t rxdctl;
3123 	int poll_ms;
3124 
3125 	PMD_INIT_FUNC_TRACE();
3126 
3127 	rxq = dev->data->rx_queues[rx_queue_id];
3128 
3129 	/* Allocate buffers for descriptor rings */
3130 	if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {
3131 		PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
3132 			     rx_queue_id);
3133 		return -1;
3134 	}
3135 	rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
3136 	rxdctl |= NGBE_RXCFG_ENA;
3137 	wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);
3138 
3139 	/* Wait until Rx Enable ready */
3140 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3141 	do {
3142 		rte_delay_ms(1);
3143 		rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
3144 	} while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
3145 	if (poll_ms == 0)
3146 		PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
3147 	rte_wmb();
3148 	wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
3149 	wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
3150 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3151 
3152 	return 0;
3153 }
3154 
3155 /*
3156  * Stop Receive Units for specified queue.
3157  */
3158 int
3159 ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3160 {
3161 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3162 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3163 	struct ngbe_rx_queue *rxq;
3164 	uint32_t rxdctl;
3165 	int poll_ms;
3166 
3167 	PMD_INIT_FUNC_TRACE();
3168 
3169 	rxq = dev->data->rx_queues[rx_queue_id];
3170 
3171 	ngbe_dev_save_rx_queue(hw, rxq->reg_idx);
3172 	wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);
3173 
3174 	/* Wait until Rx Enable bit clear */
3175 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3176 	do {
3177 		rte_delay_ms(1);
3178 		rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
3179 	} while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));
3180 	if (poll_ms == 0)
3181 		PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
3182 
3183 	rte_delay_us(RTE_NGBE_WAIT_100_US);
3184 	ngbe_dev_store_rx_queue(hw, rxq->reg_idx);
3185 
3186 	ngbe_rx_queue_release_mbufs(rxq);
3187 	ngbe_reset_rx_queue(adapter, rxq);
3188 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3189 
3190 	return 0;
3191 }
3192 
3193 /*
3194  * Start Transmit Units for specified queue.
3195  */
3196 int
3197 ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3198 {
3199 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3200 	struct ngbe_tx_queue *txq;
3201 	uint32_t txdctl;
3202 	int poll_ms;
3203 
3204 	PMD_INIT_FUNC_TRACE();
3205 
3206 	txq = dev->data->tx_queues[tx_queue_id];
3207 	wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
3208 
3209 	/* Wait until Tx Enable ready */
3210 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3211 	do {
3212 		rte_delay_ms(1);
3213 		txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
3214 	} while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
3215 	if (poll_ms == 0)
3216 		PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
3217 			     tx_queue_id);
3218 
3219 	rte_wmb();
3220 	wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
3221 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3222 
3223 	return 0;
3224 }
3225 
3226 /*
3227  * Stop Transmit Units for specified queue.
3228  */
3229 int
3230 ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3231 {
3232 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3233 	struct ngbe_tx_queue *txq;
3234 	uint32_t txdctl;
3235 	uint32_t txtdh, txtdt;
3236 	int poll_ms;
3237 
3238 	PMD_INIT_FUNC_TRACE();
3239 
3240 	txq = dev->data->tx_queues[tx_queue_id];
3241 
3242 	/* Wait until Tx queue is empty */
3243 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3244 	do {
3245 		rte_delay_us(RTE_NGBE_WAIT_100_US);
3246 		txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
3247 		txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
3248 	} while (--poll_ms && (txtdh != txtdt));
3249 	if (poll_ms == 0)
3250 		PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.",
3251 			     tx_queue_id);
3252 
3253 	ngbe_dev_save_tx_queue(hw, txq->reg_idx);
3254 	wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
3255 
3256 	/* Wait until Tx Enable bit clear */
3257 	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
3258 	do {
3259 		rte_delay_ms(1);
3260 		txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
3261 	} while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
3262 	if (poll_ms == 0)
3263 		PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
3264 			     tx_queue_id);
3265 
3266 	rte_delay_us(RTE_NGBE_WAIT_100_US);
3267 	ngbe_dev_store_tx_queue(hw, txq->reg_idx);
3268 
3269 	if (txq->ops != NULL) {
3270 		txq->ops->release_mbufs(txq);
3271 		txq->ops->reset(txq);
3272 	}
3273 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3274 
3275 	return 0;
3276 }
3277 
3278 void
3279 ngbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3280 	struct rte_eth_rxq_info *qinfo)
3281 {
3282 	struct ngbe_rx_queue *rxq;
3283 
3284 	rxq = dev->data->rx_queues[queue_id];
3285 
3286 	qinfo->mp = rxq->mb_pool;
3287 	qinfo->scattered_rx = dev->data->scattered_rx;
3288 	qinfo->nb_desc = rxq->nb_rx_desc;
3289 
3290 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3291 	qinfo->conf.rx_drop_en = rxq->drop_en;
3292 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3293 	qinfo->conf.offloads = rxq->offloads;
3294 }
3295 
3296 void
3297 ngbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3298 	struct rte_eth_txq_info *qinfo)
3299 {
3300 	struct ngbe_tx_queue *txq;
3301 
3302 	txq = dev->data->tx_queues[queue_id];
3303 
3304 	qinfo->nb_desc = txq->nb_tx_desc;
3305 
3306 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
3307 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
3308 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
3309 
3310 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
3311 	qinfo->conf.offloads = txq->offloads;
3312 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3313 }
3314