xref: /dpdk/drivers/net/qede/qede_rxtx.c (revision 1cde1b9a9b4dbf31cb5e5ccdfc5da3cb079f43a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include <rte_net.h>
8 #include "qede_rxtx.h"
9 
10 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
11 {
12 	struct rte_mbuf *new_mb = NULL;
13 	struct eth_rx_bd *rx_bd;
14 	dma_addr_t mapping;
15 	uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
16 
17 	new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
18 	if (unlikely(!new_mb)) {
19 		PMD_RX_LOG(ERR, rxq,
20 			   "Failed to allocate rx buffer "
21 			   "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
22 			   idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
23 			   rte_mempool_avail_count(rxq->mb_pool),
24 			   rte_mempool_in_use_count(rxq->mb_pool));
25 		return -ENOMEM;
26 	}
27 	rxq->sw_rx_ring[idx].mbuf = new_mb;
28 	rxq->sw_rx_ring[idx].page_offset = 0;
29 	mapping = rte_mbuf_data_iova_default(new_mb);
30 	/* Advance PROD and get BD pointer */
31 	rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
32 	rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
33 	rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
34 	rxq->sw_rx_prod++;
35 	return 0;
36 }
37 
38 #define QEDE_MAX_BULK_ALLOC_COUNT 512
39 
40 static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
41 {
42 	void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
43 	struct rte_mbuf *mbuf = NULL;
44 	struct eth_rx_bd *rx_bd;
45 	dma_addr_t mapping;
46 	int i, ret = 0;
47 	uint16_t idx;
48 
49 	idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
50 
51 	if (count > QEDE_MAX_BULK_ALLOC_COUNT)
52 		count = QEDE_MAX_BULK_ALLOC_COUNT;
53 
54 	ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
55 	if (unlikely(ret)) {
56 		PMD_RX_LOG(ERR, rxq,
57 			   "Failed to allocate %d rx buffers "
58 			    "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
59 			    count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
60 			    rte_mempool_avail_count(rxq->mb_pool),
61 			    rte_mempool_in_use_count(rxq->mb_pool));
62 		return -ENOMEM;
63 	}
64 
65 	for (i = 0; i < count; i++) {
66 		mbuf = obj_p[i];
67 		if (likely(i < count - 1))
68 			rte_prefetch0(obj_p[i + 1]);
69 
70 		idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
71 		rxq->sw_rx_ring[idx].mbuf = mbuf;
72 		rxq->sw_rx_ring[idx].page_offset = 0;
73 		mapping = rte_mbuf_data_iova_default(mbuf);
74 		rx_bd = (struct eth_rx_bd *)
75 			ecore_chain_produce(&rxq->rx_bd_ring);
76 		rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
77 		rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
78 		rxq->sw_rx_prod++;
79 	}
80 
81 	return 0;
82 }
83 
84 /* Criterias for calculating Rx buffer size -
85  * 1) rx_buf_size should not exceed the size of mbuf
86  * 2) In scattered_rx mode - minimum rx_buf_size should be
87  *    (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
88  * 3) In regular mode - minimum rx_buf_size should be
89  *    (MTU + Maximum L2 Header Size + 2)
90  *    In above cases +2 corrosponds to 2 bytes padding in front of L2
91  *    header.
92  * 4) rx_buf_size should be cacheline-size aligned. So considering
93  *    criteria 1, we need to adjust the size to floor instead of ceil,
94  *    so that we don't exceed mbuf size while ceiling rx_buf_size.
95  */
96 int
97 qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
98 		      uint16_t max_frame_size)
99 {
100 	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
101 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
102 	int rx_buf_size;
103 
104 	if (dev->data->scattered_rx) {
105 		/* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
106 		 * bufferes can be used for single packet. So need to make sure
107 		 * mbuf size is sufficient enough for this.
108 		 */
109 		if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
110 		     (max_frame_size + QEDE_ETH_OVERHEAD)) {
111 			DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
112 			       mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
113 			return -EINVAL;
114 		}
115 
116 		rx_buf_size = RTE_MAX(mbufsz,
117 				      (max_frame_size + QEDE_ETH_OVERHEAD) /
118 				       ETH_RX_MAX_BUFF_PER_PKT);
119 	} else {
120 		rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
121 	}
122 
123 	/* Align to cache-line size if needed */
124 	return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
125 }
126 
127 static struct qede_rx_queue *
128 qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
129 			uint16_t queue_idx,
130 			uint16_t nb_desc,
131 			unsigned int socket_id,
132 			struct rte_mempool *mp,
133 			uint16_t bufsz)
134 {
135 	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
136 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
137 	struct qede_rx_queue *rxq;
138 	size_t size;
139 	int rc;
140 
141 	/* First allocate the rx queue data structure */
142 	rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
143 				 RTE_CACHE_LINE_SIZE, socket_id);
144 
145 	if (!rxq) {
146 		DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
147 			  socket_id);
148 		return NULL;
149 	}
150 
151 	rxq->qdev = qdev;
152 	rxq->mb_pool = mp;
153 	rxq->nb_rx_desc = nb_desc;
154 	rxq->queue_id = queue_idx;
155 	rxq->port_id = dev->data->port_id;
156 
157 
158 	rxq->rx_buf_size = bufsz;
159 
160 	DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
161 		qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
162 
163 	/* Allocate the parallel driver ring for Rx buffers */
164 	size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
165 	rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
166 					     RTE_CACHE_LINE_SIZE, socket_id);
167 	if (!rxq->sw_rx_ring) {
168 		DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
169 		       " socket %u\n", socket_id);
170 		rte_free(rxq);
171 		return NULL;
172 	}
173 
174 	/* Allocate FW Rx ring  */
175 	rc = qdev->ops->common->chain_alloc(edev,
176 					    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
177 					    ECORE_CHAIN_MODE_NEXT_PTR,
178 					    ECORE_CHAIN_CNT_TYPE_U16,
179 					    rxq->nb_rx_desc,
180 					    sizeof(struct eth_rx_bd),
181 					    &rxq->rx_bd_ring,
182 					    NULL);
183 
184 	if (rc != ECORE_SUCCESS) {
185 		DP_ERR(edev, "Memory allocation fails for RX BD ring"
186 		       " on socket %u\n", socket_id);
187 		rte_free(rxq->sw_rx_ring);
188 		rte_free(rxq);
189 		return NULL;
190 	}
191 
192 	/* Allocate FW completion ring */
193 	rc = qdev->ops->common->chain_alloc(edev,
194 					    ECORE_CHAIN_USE_TO_CONSUME,
195 					    ECORE_CHAIN_MODE_PBL,
196 					    ECORE_CHAIN_CNT_TYPE_U16,
197 					    rxq->nb_rx_desc,
198 					    sizeof(union eth_rx_cqe),
199 					    &rxq->rx_comp_ring,
200 					    NULL);
201 
202 	if (rc != ECORE_SUCCESS) {
203 		DP_ERR(edev, "Memory allocation fails for RX CQE ring"
204 		       " on socket %u\n", socket_id);
205 		qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
206 		rte_free(rxq->sw_rx_ring);
207 		rte_free(rxq);
208 		return NULL;
209 	}
210 
211 	return rxq;
212 }
213 
214 int
215 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
216 		    uint16_t nb_desc, unsigned int socket_id,
217 		    __rte_unused const struct rte_eth_rxconf *rx_conf,
218 		    struct rte_mempool *mp)
219 {
220 	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
221 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
222 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
223 	struct qede_rx_queue *rxq;
224 	uint16_t max_rx_pkt_len;
225 	uint16_t bufsz;
226 	int rc;
227 
228 	PMD_INIT_FUNC_TRACE(edev);
229 
230 	/* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
231 	if (!rte_is_power_of_2(nb_desc)) {
232 		DP_ERR(edev, "Ring size %u is not power of 2\n",
233 			  nb_desc);
234 		return -EINVAL;
235 	}
236 
237 	/* Free memory prior to re-allocation if needed... */
238 	if (dev->data->rx_queues[qid] != NULL) {
239 		qede_rx_queue_release(dev->data->rx_queues[qid]);
240 		dev->data->rx_queues[qid] = NULL;
241 	}
242 
243 	max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
244 
245 	/* Fix up RX buffer size */
246 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
247 	/* cache align the mbuf size to simplfy rx_buf_size calculation */
248 	bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
249 	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)	||
250 	    (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
251 		if (!dev->data->scattered_rx) {
252 			DP_INFO(edev, "Forcing scatter-gather mode\n");
253 			dev->data->scattered_rx = 1;
254 		}
255 	}
256 
257 	rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
258 	if (rc < 0)
259 		return rc;
260 
261 	bufsz = rc;
262 
263 	if (ECORE_IS_CMT(edev)) {
264 		rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
265 					      socket_id, mp, bufsz);
266 		if (!rxq)
267 			return -ENOMEM;
268 
269 		qdev->fp_array[qid * 2].rxq = rxq;
270 		rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
271 					      socket_id, mp, bufsz);
272 		if (!rxq)
273 			return -ENOMEM;
274 
275 		qdev->fp_array[qid * 2 + 1].rxq = rxq;
276 		/* provide per engine fp struct as rx queue */
277 		dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
278 	} else {
279 		rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
280 					      socket_id, mp, bufsz);
281 		if (!rxq)
282 			return -ENOMEM;
283 
284 		dev->data->rx_queues[qid] = rxq;
285 		qdev->fp_array[qid].rxq = rxq;
286 	}
287 
288 	DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
289 		  qid, nb_desc, rxq->rx_buf_size, socket_id);
290 
291 	return 0;
292 }
293 
294 static void
295 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
296 		    struct qede_rx_queue *rxq)
297 {
298 	DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
299 	ecore_chain_reset(&rxq->rx_bd_ring);
300 	ecore_chain_reset(&rxq->rx_comp_ring);
301 	rxq->sw_rx_prod = 0;
302 	rxq->sw_rx_cons = 0;
303 	*rxq->hw_cons_ptr = 0;
304 }
305 
306 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
307 {
308 	uint16_t i;
309 
310 	if (rxq->sw_rx_ring) {
311 		for (i = 0; i < rxq->nb_rx_desc; i++) {
312 			if (rxq->sw_rx_ring[i].mbuf) {
313 				rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
314 				rxq->sw_rx_ring[i].mbuf = NULL;
315 			}
316 		}
317 	}
318 }
319 
320 static void _qede_rx_queue_release(struct qede_dev *qdev,
321 				   struct ecore_dev *edev,
322 				   struct qede_rx_queue *rxq)
323 {
324 	qede_rx_queue_release_mbufs(rxq);
325 	qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
326 	qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
327 	rte_free(rxq->sw_rx_ring);
328 	rte_free(rxq);
329 }
330 
331 void qede_rx_queue_release(void *rx_queue)
332 {
333 	struct qede_rx_queue *rxq = rx_queue;
334 	struct qede_fastpath_cmt *fp_cmt;
335 	struct qede_dev *qdev;
336 	struct ecore_dev *edev;
337 
338 	if (rxq) {
339 		qdev = rxq->qdev;
340 		edev = QEDE_INIT_EDEV(qdev);
341 		PMD_INIT_FUNC_TRACE(edev);
342 		if (ECORE_IS_CMT(edev)) {
343 			fp_cmt = rx_queue;
344 			_qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
345 			_qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
346 		} else {
347 			_qede_rx_queue_release(qdev, edev, rxq);
348 		}
349 	}
350 }
351 
352 /* Stops a given RX queue in the HW */
353 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
354 {
355 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
356 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
357 	struct ecore_hwfn *p_hwfn;
358 	struct qede_rx_queue *rxq;
359 	int hwfn_index;
360 	int rc;
361 
362 	if (rx_queue_id < qdev->num_rx_queues) {
363 		rxq = qdev->fp_array[rx_queue_id].rxq;
364 		hwfn_index = rx_queue_id % edev->num_hwfns;
365 		p_hwfn = &edev->hwfns[hwfn_index];
366 		rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
367 				true, false);
368 		if (rc != ECORE_SUCCESS) {
369 			DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
370 			return -1;
371 		}
372 		qede_rx_queue_release_mbufs(rxq);
373 		qede_rx_queue_reset(qdev, rxq);
374 		eth_dev->data->rx_queue_state[rx_queue_id] =
375 			RTE_ETH_QUEUE_STATE_STOPPED;
376 		DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
377 	} else {
378 		DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
379 		rc = -EINVAL;
380 	}
381 
382 	return rc;
383 }
384 
385 static struct qede_tx_queue *
386 qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
387 			uint16_t queue_idx,
388 			uint16_t nb_desc,
389 			unsigned int socket_id,
390 			const struct rte_eth_txconf *tx_conf)
391 {
392 	struct qede_dev *qdev = dev->data->dev_private;
393 	struct ecore_dev *edev = &qdev->edev;
394 	struct qede_tx_queue *txq;
395 	int rc;
396 
397 	txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
398 				 RTE_CACHE_LINE_SIZE, socket_id);
399 
400 	if (txq == NULL) {
401 		DP_ERR(edev,
402 		       "Unable to allocate memory for txq on socket %u",
403 		       socket_id);
404 		return NULL;
405 	}
406 
407 	txq->nb_tx_desc = nb_desc;
408 	txq->qdev = qdev;
409 	txq->port_id = dev->data->port_id;
410 
411 	rc = qdev->ops->common->chain_alloc(edev,
412 					    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
413 					    ECORE_CHAIN_MODE_PBL,
414 					    ECORE_CHAIN_CNT_TYPE_U16,
415 					    txq->nb_tx_desc,
416 					    sizeof(union eth_tx_bd_types),
417 					    &txq->tx_pbl,
418 					    NULL);
419 	if (rc != ECORE_SUCCESS) {
420 		DP_ERR(edev,
421 		       "Unable to allocate memory for txbd ring on socket %u",
422 		       socket_id);
423 		qede_tx_queue_release(txq);
424 		return NULL;
425 	}
426 
427 	/* Allocate software ring */
428 	txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
429 					     (sizeof(struct qede_tx_entry) *
430 					      txq->nb_tx_desc),
431 					     RTE_CACHE_LINE_SIZE, socket_id);
432 
433 	if (!txq->sw_tx_ring) {
434 		DP_ERR(edev,
435 		       "Unable to allocate memory for txbd ring on socket %u",
436 		       socket_id);
437 		qdev->ops->common->chain_free(edev, &txq->tx_pbl);
438 		qede_tx_queue_release(txq);
439 		return NULL;
440 	}
441 
442 	txq->queue_id = queue_idx;
443 
444 	txq->nb_tx_avail = txq->nb_tx_desc;
445 
446 	txq->tx_free_thresh =
447 	    tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
448 	    (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
449 
450 	DP_INFO(edev,
451 		  "txq %u num_desc %u tx_free_thresh %u socket %u\n",
452 		  queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
453 	return txq;
454 }
455 
456 int
457 qede_tx_queue_setup(struct rte_eth_dev *dev,
458 		    uint16_t queue_idx,
459 		    uint16_t nb_desc,
460 		    unsigned int socket_id,
461 		    const struct rte_eth_txconf *tx_conf)
462 {
463 	struct qede_dev *qdev = dev->data->dev_private;
464 	struct ecore_dev *edev = &qdev->edev;
465 	struct qede_tx_queue *txq;
466 
467 	PMD_INIT_FUNC_TRACE(edev);
468 
469 	if (!rte_is_power_of_2(nb_desc)) {
470 		DP_ERR(edev, "Ring size %u is not power of 2\n",
471 		       nb_desc);
472 		return -EINVAL;
473 	}
474 
475 	/* Free memory prior to re-allocation if needed... */
476 	if (dev->data->tx_queues[queue_idx] != NULL) {
477 		qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
478 		dev->data->tx_queues[queue_idx] = NULL;
479 	}
480 
481 	if (ECORE_IS_CMT(edev)) {
482 		txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
483 					      socket_id, tx_conf);
484 		if (!txq)
485 			return -ENOMEM;
486 
487 		qdev->fp_array[queue_idx * 2].txq = txq;
488 		txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
489 					      socket_id, tx_conf);
490 		if (!txq)
491 			return -ENOMEM;
492 
493 		qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
494 		dev->data->tx_queues[queue_idx] =
495 					&qdev->fp_array_cmt[queue_idx];
496 	} else {
497 		txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
498 					      socket_id, tx_conf);
499 		if (!txq)
500 			return -ENOMEM;
501 
502 		dev->data->tx_queues[queue_idx] = txq;
503 		qdev->fp_array[queue_idx].txq = txq;
504 	}
505 
506 	return 0;
507 }
508 
509 static void
510 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
511 		    struct qede_tx_queue *txq)
512 {
513 	DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
514 	ecore_chain_reset(&txq->tx_pbl);
515 	txq->sw_tx_cons = 0;
516 	txq->sw_tx_prod = 0;
517 	*txq->hw_cons_ptr = 0;
518 }
519 
520 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
521 {
522 	uint16_t i;
523 
524 	if (txq->sw_tx_ring) {
525 		for (i = 0; i < txq->nb_tx_desc; i++) {
526 			if (txq->sw_tx_ring[i].mbuf) {
527 				rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
528 				txq->sw_tx_ring[i].mbuf = NULL;
529 			}
530 		}
531 	}
532 }
533 
534 static void _qede_tx_queue_release(struct qede_dev *qdev,
535 				   struct ecore_dev *edev,
536 				   struct qede_tx_queue *txq)
537 {
538 	qede_tx_queue_release_mbufs(txq);
539 	qdev->ops->common->chain_free(edev, &txq->tx_pbl);
540 	rte_free(txq->sw_tx_ring);
541 	rte_free(txq);
542 }
543 
544 void qede_tx_queue_release(void *tx_queue)
545 {
546 	struct qede_tx_queue *txq = tx_queue;
547 	struct qede_fastpath_cmt *fp_cmt;
548 	struct qede_dev *qdev;
549 	struct ecore_dev *edev;
550 
551 	if (txq) {
552 		qdev = txq->qdev;
553 		edev = QEDE_INIT_EDEV(qdev);
554 		PMD_INIT_FUNC_TRACE(edev);
555 
556 		if (ECORE_IS_CMT(edev)) {
557 			fp_cmt = tx_queue;
558 			_qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
559 			_qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
560 		} else {
561 			_qede_tx_queue_release(qdev, edev, txq);
562 		}
563 	}
564 }
565 
566 /* This function allocates fast-path status block memory */
567 static int
568 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
569 		  uint16_t sb_id)
570 {
571 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
572 	struct status_block_e4 *sb_virt;
573 	dma_addr_t sb_phys;
574 	int rc;
575 
576 	sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
577 					  sizeof(struct status_block_e4));
578 	if (!sb_virt) {
579 		DP_ERR(edev, "Status block allocation failed\n");
580 		return -ENOMEM;
581 	}
582 	rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
583 					sb_phys, sb_id);
584 	if (rc) {
585 		DP_ERR(edev, "Status block initialization failed\n");
586 		OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
587 				       sizeof(struct status_block_e4));
588 		return rc;
589 	}
590 
591 	return 0;
592 }
593 
594 int qede_alloc_fp_resc(struct qede_dev *qdev)
595 {
596 	struct ecore_dev *edev = &qdev->edev;
597 	struct qede_fastpath *fp;
598 	uint32_t num_sbs;
599 	uint16_t sb_idx;
600 	int i;
601 
602 	if (IS_VF(edev))
603 		ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
604 	else
605 		num_sbs = ecore_cxt_get_proto_cid_count
606 			  (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
607 
608 	if (num_sbs == 0) {
609 		DP_ERR(edev, "No status blocks available\n");
610 		return -EINVAL;
611 	}
612 
613 	qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
614 				sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
615 
616 	if (!qdev->fp_array) {
617 		DP_ERR(edev, "fp array allocation failed\n");
618 		return -ENOMEM;
619 	}
620 
621 	memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
622 			sizeof(*qdev->fp_array));
623 
624 	if (ECORE_IS_CMT(edev)) {
625 		qdev->fp_array_cmt = rte_calloc("fp_cmt",
626 						QEDE_RXTX_MAX(qdev) / 2,
627 						sizeof(*qdev->fp_array_cmt),
628 						RTE_CACHE_LINE_SIZE);
629 
630 		if (!qdev->fp_array_cmt) {
631 			DP_ERR(edev, "fp array for CMT allocation failed\n");
632 			return -ENOMEM;
633 		}
634 
635 		memset((void *)qdev->fp_array_cmt, 0,
636 		       (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
637 
638 		/* Establish the mapping of fp_array with fp_array_cmt */
639 		for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
640 			qdev->fp_array_cmt[i].qdev = qdev;
641 			qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
642 			qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
643 		}
644 	}
645 
646 	for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
647 		fp = &qdev->fp_array[sb_idx];
648 		if (!fp)
649 			continue;
650 		fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
651 				RTE_CACHE_LINE_SIZE);
652 		if (!fp->sb_info) {
653 			DP_ERR(edev, "FP sb_info allocation fails\n");
654 			return -1;
655 		}
656 		if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
657 			DP_ERR(edev, "FP status block allocation fails\n");
658 			return -1;
659 		}
660 		DP_INFO(edev, "sb_info idx 0x%x initialized\n",
661 				fp->sb_info->igu_sb_id);
662 	}
663 
664 	return 0;
665 }
666 
667 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
668 {
669 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
670 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
671 	struct qede_fastpath *fp;
672 	uint16_t sb_idx;
673 	uint8_t i;
674 
675 	PMD_INIT_FUNC_TRACE(edev);
676 
677 	for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
678 		fp = &qdev->fp_array[sb_idx];
679 		if (!fp)
680 			continue;
681 		DP_INFO(edev, "Free sb_info index 0x%x\n",
682 				fp->sb_info->igu_sb_id);
683 		if (fp->sb_info) {
684 			OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
685 				fp->sb_info->sb_phys,
686 				sizeof(struct status_block_e4));
687 			rte_free(fp->sb_info);
688 			fp->sb_info = NULL;
689 		}
690 	}
691 
692 	/* Free packet buffers and ring memories */
693 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
694 		if (eth_dev->data->rx_queues[i]) {
695 			qede_rx_queue_release(eth_dev->data->rx_queues[i]);
696 			eth_dev->data->rx_queues[i] = NULL;
697 		}
698 	}
699 
700 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
701 		if (eth_dev->data->tx_queues[i]) {
702 			qede_tx_queue_release(eth_dev->data->tx_queues[i]);
703 			eth_dev->data->tx_queues[i] = NULL;
704 		}
705 	}
706 
707 	if (qdev->fp_array)
708 		rte_free(qdev->fp_array);
709 	qdev->fp_array = NULL;
710 
711 	if (qdev->fp_array_cmt)
712 		rte_free(qdev->fp_array_cmt);
713 	qdev->fp_array_cmt = NULL;
714 }
715 
716 static inline void
717 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
718 		    struct qede_rx_queue *rxq)
719 {
720 	uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
721 	uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
722 	struct eth_rx_prod_data rx_prods = { 0 };
723 
724 	/* Update producers */
725 	rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
726 	rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
727 
728 	/* Make sure that the BD and SGE data is updated before updating the
729 	 * producers since FW might read the BD/SGE right after the producer
730 	 * is updated.
731 	 */
732 	rte_wmb();
733 
734 	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
735 			(uint32_t *)&rx_prods);
736 
737 	/* mmiowb is needed to synchronize doorbell writes from more than one
738 	 * processor. It guarantees that the write arrives to the device before
739 	 * the napi lock is released and another qede_poll is called (possibly
740 	 * on another CPU). Without this barrier, the next doorbell can bypass
741 	 * this doorbell. This is applicable to IA64/Altix systems.
742 	 */
743 	rte_wmb();
744 
745 	PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u", bd_prod, cqe_prod);
746 }
747 
748 /* Starts a given RX queue in HW */
749 static int
750 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
751 {
752 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
753 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
754 	struct ecore_queue_start_common_params params;
755 	struct ecore_rxq_start_ret_params ret_params;
756 	struct qede_rx_queue *rxq;
757 	struct qede_fastpath *fp;
758 	struct ecore_hwfn *p_hwfn;
759 	dma_addr_t p_phys_table;
760 	uint16_t page_cnt;
761 	uint16_t j;
762 	int hwfn_index;
763 	int rc;
764 
765 	if (rx_queue_id < qdev->num_rx_queues) {
766 		fp = &qdev->fp_array[rx_queue_id];
767 		rxq = fp->rxq;
768 		/* Allocate buffers for the Rx ring */
769 		for (j = 0; j < rxq->nb_rx_desc; j++) {
770 			rc = qede_alloc_rx_buffer(rxq);
771 			if (rc) {
772 				DP_ERR(edev, "RX buffer allocation failed"
773 						" for rxq = %u\n", rx_queue_id);
774 				return -ENOMEM;
775 			}
776 		}
777 		/* disable interrupts */
778 		ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
779 		/* Prepare ramrod */
780 		memset(&params, 0, sizeof(params));
781 		params.queue_id = rx_queue_id / edev->num_hwfns;
782 		params.vport_id = 0;
783 		params.stats_id = params.vport_id;
784 		params.p_sb = fp->sb_info;
785 		DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
786 				fp->rxq->queue_id, fp->sb_info->igu_sb_id);
787 		params.sb_idx = RX_PI;
788 		hwfn_index = rx_queue_id % edev->num_hwfns;
789 		p_hwfn = &edev->hwfns[hwfn_index];
790 		p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
791 		page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
792 		memset(&ret_params, 0, sizeof(ret_params));
793 		rc = ecore_eth_rx_queue_start(p_hwfn,
794 				p_hwfn->hw_info.opaque_fid,
795 				&params, fp->rxq->rx_buf_size,
796 				fp->rxq->rx_bd_ring.p_phys_addr,
797 				p_phys_table, page_cnt,
798 				&ret_params);
799 		if (rc) {
800 			DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
801 					rx_queue_id, rc);
802 			return -1;
803 		}
804 		/* Update with the returned parameters */
805 		fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
806 		fp->rxq->handle = ret_params.p_handle;
807 
808 		fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
809 		qede_update_rx_prod(qdev, fp->rxq);
810 		eth_dev->data->rx_queue_state[rx_queue_id] =
811 			RTE_ETH_QUEUE_STATE_STARTED;
812 		DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
813 	} else {
814 		DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
815 		rc = -EINVAL;
816 	}
817 
818 	return rc;
819 }
820 
821 static int
822 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
823 {
824 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
825 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
826 	struct ecore_queue_start_common_params params;
827 	struct ecore_txq_start_ret_params ret_params;
828 	struct ecore_hwfn *p_hwfn;
829 	dma_addr_t p_phys_table;
830 	struct qede_tx_queue *txq;
831 	struct qede_fastpath *fp;
832 	uint16_t page_cnt;
833 	int hwfn_index;
834 	int rc;
835 
836 	if (tx_queue_id < qdev->num_tx_queues) {
837 		fp = &qdev->fp_array[tx_queue_id];
838 		txq = fp->txq;
839 		memset(&params, 0, sizeof(params));
840 		params.queue_id = tx_queue_id / edev->num_hwfns;
841 		params.vport_id = 0;
842 		params.stats_id = params.vport_id;
843 		params.p_sb = fp->sb_info;
844 		DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
845 				fp->txq->queue_id, fp->sb_info->igu_sb_id);
846 		params.sb_idx = TX_PI(0); /* tc = 0 */
847 		p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
848 		page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
849 		hwfn_index = tx_queue_id % edev->num_hwfns;
850 		p_hwfn = &edev->hwfns[hwfn_index];
851 		if (qdev->dev_info.is_legacy)
852 			fp->txq->is_legacy = true;
853 		rc = ecore_eth_tx_queue_start(p_hwfn,
854 				p_hwfn->hw_info.opaque_fid,
855 				&params, 0 /* tc */,
856 				p_phys_table, page_cnt,
857 				&ret_params);
858 		if (rc != ECORE_SUCCESS) {
859 			DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
860 					tx_queue_id, rc);
861 			return -1;
862 		}
863 		txq->doorbell_addr = ret_params.p_doorbell;
864 		txq->handle = ret_params.p_handle;
865 
866 		txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
867 		SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
868 				DB_DEST_XCM);
869 		SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
870 				DB_AGG_CMD_SET);
871 		SET_FIELD(txq->tx_db.data.params,
872 				ETH_DB_DATA_AGG_VAL_SEL,
873 				DQ_XCM_ETH_TX_BD_PROD_CMD);
874 		txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
875 		eth_dev->data->tx_queue_state[tx_queue_id] =
876 			RTE_ETH_QUEUE_STATE_STARTED;
877 		DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
878 	} else {
879 		DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
880 		rc = -EINVAL;
881 	}
882 
883 	return rc;
884 }
885 
886 static inline void
887 qede_free_tx_pkt(struct qede_tx_queue *txq)
888 {
889 	struct rte_mbuf *mbuf;
890 	uint16_t nb_segs;
891 	uint16_t idx;
892 
893 	idx = TX_CONS(txq);
894 	mbuf = txq->sw_tx_ring[idx].mbuf;
895 	if (mbuf) {
896 		nb_segs = mbuf->nb_segs;
897 		PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
898 		while (nb_segs) {
899 			/* It's like consuming rxbuf in recv() */
900 			ecore_chain_consume(&txq->tx_pbl);
901 			txq->nb_tx_avail++;
902 			nb_segs--;
903 		}
904 		rte_pktmbuf_free(mbuf);
905 		txq->sw_tx_ring[idx].mbuf = NULL;
906 		txq->sw_tx_cons++;
907 		PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
908 	} else {
909 		ecore_chain_consume(&txq->tx_pbl);
910 		txq->nb_tx_avail++;
911 	}
912 }
913 
914 static inline void
915 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
916 		      struct qede_tx_queue *txq)
917 {
918 	uint16_t hw_bd_cons;
919 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
920 	uint16_t sw_tx_cons;
921 #endif
922 
923 	rte_compiler_barrier();
924 	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
925 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
926 	sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
927 	PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
928 		   abs(hw_bd_cons - sw_tx_cons));
929 #endif
930 	while (hw_bd_cons !=  ecore_chain_get_cons_idx(&txq->tx_pbl))
931 		qede_free_tx_pkt(txq);
932 }
933 
934 static int qede_drain_txq(struct qede_dev *qdev,
935 			  struct qede_tx_queue *txq, bool allow_drain)
936 {
937 	struct ecore_dev *edev = &qdev->edev;
938 	int rc, cnt = 1000;
939 
940 	while (txq->sw_tx_cons != txq->sw_tx_prod) {
941 		qede_process_tx_compl(edev, txq);
942 		if (!cnt) {
943 			if (allow_drain) {
944 				DP_ERR(edev, "Tx queue[%u] is stuck,"
945 					  "requesting MCP to drain\n",
946 					  txq->queue_id);
947 				rc = qdev->ops->common->drain(edev);
948 				if (rc)
949 					return rc;
950 				return qede_drain_txq(qdev, txq, false);
951 			}
952 			DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
953 				  "PROD=%d, CONS=%d\n",
954 				  txq->queue_id, txq->sw_tx_prod,
955 				  txq->sw_tx_cons);
956 			return -1;
957 		}
958 		cnt--;
959 		DELAY(1000);
960 		rte_compiler_barrier();
961 	}
962 
963 	/* FW finished processing, wait for HW to transmit all tx packets */
964 	DELAY(2000);
965 
966 	return 0;
967 }
968 
969 /* Stops a given TX queue in the HW */
970 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
971 {
972 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
973 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
974 	struct ecore_hwfn *p_hwfn;
975 	struct qede_tx_queue *txq;
976 	int hwfn_index;
977 	int rc;
978 
979 	if (tx_queue_id < qdev->num_tx_queues) {
980 		txq = qdev->fp_array[tx_queue_id].txq;
981 		/* Drain txq */
982 		if (qede_drain_txq(qdev, txq, true))
983 			return -1; /* For the lack of retcodes */
984 		/* Stop txq */
985 		hwfn_index = tx_queue_id % edev->num_hwfns;
986 		p_hwfn = &edev->hwfns[hwfn_index];
987 		rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
988 		if (rc != ECORE_SUCCESS) {
989 			DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
990 			return -1;
991 		}
992 		qede_tx_queue_release_mbufs(txq);
993 		qede_tx_queue_reset(qdev, txq);
994 		eth_dev->data->tx_queue_state[tx_queue_id] =
995 			RTE_ETH_QUEUE_STATE_STOPPED;
996 		DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
997 	} else {
998 		DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
999 		rc = -EINVAL;
1000 	}
1001 
1002 	return rc;
1003 }
1004 
1005 int qede_start_queues(struct rte_eth_dev *eth_dev)
1006 {
1007 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1008 	uint8_t id;
1009 	int rc = -1;
1010 
1011 	for (id = 0; id < qdev->num_rx_queues; id++) {
1012 		rc = qede_rx_queue_start(eth_dev, id);
1013 		if (rc != ECORE_SUCCESS)
1014 			return -1;
1015 	}
1016 
1017 	for (id = 0; id < qdev->num_tx_queues; id++) {
1018 		rc = qede_tx_queue_start(eth_dev, id);
1019 		if (rc != ECORE_SUCCESS)
1020 			return -1;
1021 	}
1022 
1023 	return rc;
1024 }
1025 
1026 void qede_stop_queues(struct rte_eth_dev *eth_dev)
1027 {
1028 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1029 	uint8_t id;
1030 
1031 	/* Stopping RX/TX queues */
1032 	for (id = 0; id < qdev->num_tx_queues; id++)
1033 		qede_tx_queue_stop(eth_dev, id);
1034 
1035 	for (id = 0; id < qdev->num_rx_queues; id++)
1036 		qede_rx_queue_stop(eth_dev, id);
1037 }
1038 
1039 static inline bool qede_tunn_exist(uint16_t flag)
1040 {
1041 	return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1042 		    PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
1043 }
1044 
1045 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
1046 {
1047 	return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1048 		PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
1049 }
1050 
1051 /*
1052  * qede_check_tunn_csum_l4:
1053  * Returns:
1054  * 1 : If L4 csum is enabled AND if the validation has failed.
1055  * 0 : Otherwise
1056  */
1057 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
1058 {
1059 	if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1060 	     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
1061 		return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1062 			PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
1063 
1064 	return 0;
1065 }
1066 
1067 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
1068 {
1069 	if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1070 	     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
1071 		return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1072 			   PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
1073 
1074 	return 0;
1075 }
1076 
1077 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
1078 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
1079 {
1080 	uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1081 	struct rte_ether_hdr *eth_hdr;
1082 	struct rte_ipv4_hdr *ipv4_hdr;
1083 	struct rte_ipv6_hdr *ipv6_hdr;
1084 	struct rte_vlan_hdr *vlan_hdr;
1085 	uint16_t ethertype;
1086 	bool vlan_tagged = 0;
1087 	uint16_t len;
1088 
1089 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1090 	len = sizeof(struct rte_ether_hdr);
1091 	ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
1092 
1093 	 /* Note: Valid only if VLAN stripping is disabled */
1094 	if (ethertype == RTE_ETHER_TYPE_VLAN) {
1095 		vlan_tagged = 1;
1096 		vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
1097 		len += sizeof(struct rte_vlan_hdr);
1098 		ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
1099 	}
1100 
1101 	if (ethertype == RTE_ETHER_TYPE_IPV4) {
1102 		packet_type |= RTE_PTYPE_L3_IPV4;
1103 		ipv4_hdr = rte_pktmbuf_mtod_offset(m,
1104 					struct rte_ipv4_hdr *, len);
1105 		if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
1106 			packet_type |= RTE_PTYPE_L4_TCP;
1107 		else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
1108 			packet_type |= RTE_PTYPE_L4_UDP;
1109 	} else if (ethertype == RTE_ETHER_TYPE_IPV6) {
1110 		packet_type |= RTE_PTYPE_L3_IPV6;
1111 		ipv6_hdr = rte_pktmbuf_mtod_offset(m,
1112 						struct rte_ipv6_hdr *, len);
1113 		if (ipv6_hdr->proto == IPPROTO_TCP)
1114 			packet_type |= RTE_PTYPE_L4_TCP;
1115 		else if (ipv6_hdr->proto == IPPROTO_UDP)
1116 			packet_type |= RTE_PTYPE_L4_UDP;
1117 	}
1118 
1119 	if (vlan_tagged)
1120 		packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1121 	else
1122 		packet_type |= RTE_PTYPE_L2_ETHER;
1123 
1124 	return packet_type;
1125 }
1126 
1127 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
1128 {
1129 	uint16_t val;
1130 
1131 	/* Lookup table */
1132 	static const uint32_t
1133 	ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1134 		[QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4		|
1135 				       RTE_PTYPE_INNER_L2_ETHER,
1136 		[QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6		|
1137 				       RTE_PTYPE_INNER_L2_ETHER,
1138 		[QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4	|
1139 					   RTE_PTYPE_INNER_L4_TCP	|
1140 					   RTE_PTYPE_INNER_L2_ETHER,
1141 		[QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6	|
1142 					   RTE_PTYPE_INNER_L4_TCP	|
1143 					   RTE_PTYPE_INNER_L2_ETHER,
1144 		[QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4	|
1145 					   RTE_PTYPE_INNER_L4_UDP	|
1146 					   RTE_PTYPE_INNER_L2_ETHER,
1147 		[QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6	|
1148 					   RTE_PTYPE_INNER_L4_UDP	|
1149 					   RTE_PTYPE_INNER_L2_ETHER,
1150 		/* Frags with no VLAN */
1151 		[QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4	|
1152 					    RTE_PTYPE_INNER_L4_FRAG	|
1153 					    RTE_PTYPE_INNER_L2_ETHER,
1154 		[QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6	|
1155 					    RTE_PTYPE_INNER_L4_FRAG	|
1156 					    RTE_PTYPE_INNER_L2_ETHER,
1157 		/* VLANs */
1158 		[QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4	|
1159 					    RTE_PTYPE_INNER_L2_ETHER_VLAN,
1160 		[QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6	|
1161 					    RTE_PTYPE_INNER_L2_ETHER_VLAN,
1162 		[QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4	|
1163 						RTE_PTYPE_INNER_L4_TCP	|
1164 						RTE_PTYPE_INNER_L2_ETHER_VLAN,
1165 		[QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6	|
1166 						RTE_PTYPE_INNER_L4_TCP	|
1167 						RTE_PTYPE_INNER_L2_ETHER_VLAN,
1168 		[QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4	|
1169 						RTE_PTYPE_INNER_L4_UDP	|
1170 						RTE_PTYPE_INNER_L2_ETHER_VLAN,
1171 		[QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6	|
1172 						RTE_PTYPE_INNER_L4_UDP	|
1173 						RTE_PTYPE_INNER_L2_ETHER_VLAN,
1174 		/* Frags with VLAN */
1175 		[QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1176 						 RTE_PTYPE_INNER_L4_FRAG |
1177 						 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1178 		[QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1179 						 RTE_PTYPE_INNER_L4_FRAG |
1180 						 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1181 	};
1182 
1183 	/* Bits (0..3) provides L3/L4 protocol type */
1184 	/* Bits (4,5) provides frag and VLAN info */
1185 	val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1186 	       PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1187 	       (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1188 		PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1189 	       (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1190 		PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1191 		(PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1192 		 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1193 
1194 	if (val < QEDE_PKT_TYPE_MAX)
1195 		return ptype_lkup_tbl[val];
1196 
1197 	return RTE_PTYPE_UNKNOWN;
1198 }
1199 
1200 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1201 {
1202 	uint16_t val;
1203 
1204 	/* Lookup table */
1205 	static const uint32_t
1206 	ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1207 		[QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1208 		[QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1209 		[QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4	|
1210 					   RTE_PTYPE_L4_TCP	|
1211 					   RTE_PTYPE_L2_ETHER,
1212 		[QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6	|
1213 					   RTE_PTYPE_L4_TCP	|
1214 					   RTE_PTYPE_L2_ETHER,
1215 		[QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4	|
1216 					   RTE_PTYPE_L4_UDP	|
1217 					   RTE_PTYPE_L2_ETHER,
1218 		[QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6	|
1219 					   RTE_PTYPE_L4_UDP	|
1220 					   RTE_PTYPE_L2_ETHER,
1221 		/* Frags with no VLAN */
1222 		[QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4	|
1223 					    RTE_PTYPE_L4_FRAG	|
1224 					    RTE_PTYPE_L2_ETHER,
1225 		[QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6	|
1226 					    RTE_PTYPE_L4_FRAG	|
1227 					    RTE_PTYPE_L2_ETHER,
1228 		/* VLANs */
1229 		[QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4		|
1230 					    RTE_PTYPE_L2_ETHER_VLAN,
1231 		[QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6		|
1232 					    RTE_PTYPE_L2_ETHER_VLAN,
1233 		[QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4	|
1234 						RTE_PTYPE_L4_TCP	|
1235 						RTE_PTYPE_L2_ETHER_VLAN,
1236 		[QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6	|
1237 						RTE_PTYPE_L4_TCP	|
1238 						RTE_PTYPE_L2_ETHER_VLAN,
1239 		[QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4	|
1240 						RTE_PTYPE_L4_UDP	|
1241 						RTE_PTYPE_L2_ETHER_VLAN,
1242 		[QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6	|
1243 						RTE_PTYPE_L4_UDP	|
1244 						RTE_PTYPE_L2_ETHER_VLAN,
1245 		/* Frags with VLAN */
1246 		[QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4	|
1247 						 RTE_PTYPE_L4_FRAG	|
1248 						 RTE_PTYPE_L2_ETHER_VLAN,
1249 		[QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6	|
1250 						 RTE_PTYPE_L4_FRAG	|
1251 						 RTE_PTYPE_L2_ETHER_VLAN,
1252 	};
1253 
1254 	/* Bits (0..3) provides L3/L4 protocol type */
1255 	/* Bits (4,5) provides frag and VLAN info */
1256 	val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1257 	       PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1258 	       (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1259 		PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1260 	       (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1261 		PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1262 		(PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1263 		 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1264 
1265 	if (val < QEDE_PKT_TYPE_MAX)
1266 		return ptype_lkup_tbl[val];
1267 
1268 	return RTE_PTYPE_UNKNOWN;
1269 }
1270 
1271 static inline uint8_t
1272 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1273 {
1274 	struct rte_ipv4_hdr *ip;
1275 	uint16_t pkt_csum;
1276 	uint16_t calc_csum;
1277 	uint16_t val;
1278 
1279 	val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1280 		PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1281 
1282 	if (unlikely(val)) {
1283 		m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1284 		if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1285 			ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
1286 					   sizeof(struct rte_ether_hdr));
1287 			pkt_csum = ip->hdr_checksum;
1288 			ip->hdr_checksum = 0;
1289 			calc_csum = rte_ipv4_cksum(ip);
1290 			ip->hdr_checksum = pkt_csum;
1291 			return (calc_csum != pkt_csum);
1292 		} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1293 			return 1;
1294 		}
1295 	}
1296 	return 0;
1297 }
1298 
1299 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1300 {
1301 	ecore_chain_consume(&rxq->rx_bd_ring);
1302 	rxq->sw_rx_cons++;
1303 }
1304 
1305 static inline void
1306 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1307 		struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
1308 {
1309 	struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1310 	uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
1311 	struct qede_rx_entry *curr_prod;
1312 	dma_addr_t new_mapping;
1313 
1314 	curr_prod = &rxq->sw_rx_ring[idx];
1315 	*curr_prod = *curr_cons;
1316 
1317 	new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
1318 		      curr_prod->page_offset;
1319 
1320 	rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1321 	rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1322 
1323 	rxq->sw_rx_prod++;
1324 }
1325 
1326 static inline void
1327 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1328 			struct qede_dev *qdev, uint8_t count)
1329 {
1330 	struct qede_rx_entry *curr_cons;
1331 
1332 	for (; count > 0; count--) {
1333 		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1334 		qede_reuse_page(qdev, rxq, curr_cons);
1335 		qede_rx_bd_ring_consume(rxq);
1336 	}
1337 }
1338 
1339 static inline void
1340 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1341 				     struct qede_rx_queue *rxq,
1342 				     uint8_t agg_index, uint16_t len)
1343 {
1344 	struct qede_agg_info *tpa_info;
1345 	struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1346 	uint16_t cons_idx;
1347 
1348 	/* Under certain conditions it is possible that FW may not consume
1349 	 * additional or new BD. So decision to consume the BD must be made
1350 	 * based on len_list[0].
1351 	 */
1352 	if (rte_le_to_cpu_16(len)) {
1353 		tpa_info = &rxq->tpa_info[agg_index];
1354 		cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1355 		curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
1356 		assert(curr_frag);
1357 		curr_frag->nb_segs = 1;
1358 		curr_frag->pkt_len = rte_le_to_cpu_16(len);
1359 		curr_frag->data_len = curr_frag->pkt_len;
1360 		tpa_info->tpa_tail->next = curr_frag;
1361 		tpa_info->tpa_tail = curr_frag;
1362 		qede_rx_bd_ring_consume(rxq);
1363 		if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1364 			PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1365 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1366 			rxq->rx_alloc_errors++;
1367 		}
1368 	}
1369 }
1370 
1371 static inline void
1372 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1373 			     struct qede_rx_queue *rxq,
1374 			     struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1375 {
1376 	PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1377 		   cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1378 	/* only len_list[0] will have value */
1379 	qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1380 					     cqe->len_list[0]);
1381 }
1382 
1383 static inline void
1384 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1385 			    struct qede_rx_queue *rxq,
1386 			    struct eth_fast_path_rx_tpa_end_cqe *cqe)
1387 {
1388 	struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1389 
1390 	qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1391 					     cqe->len_list[0]);
1392 	/* Update total length and frags based on end TPA */
1393 	rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1394 	/* TODO:  Add Sanity Checks */
1395 	rx_mb->nb_segs = cqe->num_of_bds;
1396 	rx_mb->pkt_len = cqe->total_packet_len;
1397 
1398 	PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1399 		   " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1400 		   rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1401 		   rx_mb->pkt_len);
1402 }
1403 
1404 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1405 {
1406 	uint32_t val;
1407 
1408 	/* Lookup table */
1409 	static const uint32_t
1410 	ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1411 		[QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1412 		[QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1413 		[QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1414 		[QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1415 		[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1416 				RTE_PTYPE_TUNNEL_GENEVE,
1417 		[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1418 				RTE_PTYPE_TUNNEL_GRE,
1419 		[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1420 				RTE_PTYPE_TUNNEL_VXLAN,
1421 		[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1422 				RTE_PTYPE_TUNNEL_GENEVE,
1423 		[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1424 				RTE_PTYPE_TUNNEL_GRE,
1425 		[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1426 				RTE_PTYPE_TUNNEL_VXLAN,
1427 		[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1428 				RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1429 		[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1430 				RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1431 		[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1432 				RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1433 		[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1434 				RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1435 		[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1436 				RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1437 		[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1438 				RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1439 		[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1440 				RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1441 		[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1442 				RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1443 		[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1444 				RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1445 		[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1446 				RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1447 		[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1448 				RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1449 		[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1450 				RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1451 	};
1452 
1453 	/* Cover bits[4-0] to include tunn_type and next protocol */
1454 	val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1455 		ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1456 		(ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1457 		ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1458 
1459 	if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1460 		return ptype_tunn_lkup_tbl[val];
1461 	else
1462 		return RTE_PTYPE_UNKNOWN;
1463 }
1464 
1465 static inline int
1466 qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
1467 		     uint8_t num_segs, uint16_t pkt_len)
1468 {
1469 	struct qede_rx_queue *rxq = p_rxq;
1470 	struct qede_dev *qdev = rxq->qdev;
1471 	register struct rte_mbuf *seg1 = NULL;
1472 	register struct rte_mbuf *seg2 = NULL;
1473 	uint16_t sw_rx_index;
1474 	uint16_t cur_size;
1475 
1476 	seg1 = rx_mb;
1477 	while (num_segs) {
1478 		cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1479 							pkt_len;
1480 		if (unlikely(!cur_size)) {
1481 			PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1482 				   " left for mapping jumbo\n", num_segs);
1483 			qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1484 			return -EINVAL;
1485 		}
1486 		sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1487 		seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
1488 		qede_rx_bd_ring_consume(rxq);
1489 		pkt_len -= cur_size;
1490 		seg2->data_len = cur_size;
1491 		seg1->next = seg2;
1492 		seg1 = seg1->next;
1493 		num_segs--;
1494 		rxq->rx_segs++;
1495 	}
1496 
1497 	return 0;
1498 }
1499 
1500 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1501 static inline void
1502 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1503 		 uint8_t bitfield)
1504 {
1505 	PMD_RX_LOG(INFO, rxq,
1506 		"len 0x%04x bf 0x%04x hash_val 0x%x"
1507 		" ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1508 		" inner_l2=%s inner_l3=%s inner_l4=%s\n",
1509 		m->data_len, bitfield, m->hash.rss,
1510 		(unsigned long)m->ol_flags,
1511 		rte_get_ptype_l2_name(m->packet_type),
1512 		rte_get_ptype_l3_name(m->packet_type),
1513 		rte_get_ptype_l4_name(m->packet_type),
1514 		rte_get_ptype_tunnel_name(m->packet_type),
1515 		rte_get_ptype_inner_l2_name(m->packet_type),
1516 		rte_get_ptype_inner_l3_name(m->packet_type),
1517 		rte_get_ptype_inner_l4_name(m->packet_type));
1518 }
1519 #endif
1520 
1521 uint16_t
1522 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1523 {
1524 	struct qede_rx_queue *rxq = p_rxq;
1525 	struct qede_dev *qdev = rxq->qdev;
1526 	struct ecore_dev *edev = &qdev->edev;
1527 	uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1528 	uint16_t rx_pkt = 0;
1529 	union eth_rx_cqe *cqe;
1530 	struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1531 	register struct rte_mbuf *rx_mb = NULL;
1532 	register struct rte_mbuf *seg1 = NULL;
1533 	enum eth_rx_cqe_type cqe_type;
1534 	uint16_t pkt_len = 0; /* Sum of all BD segments */
1535 	uint16_t len; /* Length of first BD */
1536 	uint8_t num_segs = 1;
1537 	uint16_t preload_idx;
1538 	uint16_t parse_flag;
1539 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1540 	uint8_t bitfield_val;
1541 #endif
1542 	uint8_t tunn_parse_flag;
1543 	struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1544 	uint64_t ol_flags;
1545 	uint32_t packet_type;
1546 	uint16_t vlan_tci;
1547 	bool tpa_start_flg;
1548 	uint8_t offset, tpa_agg_idx, flags;
1549 	struct qede_agg_info *tpa_info = NULL;
1550 	uint32_t rss_hash;
1551 	int rx_alloc_count = 0;
1552 
1553 
1554 	/* Allocate buffers that we used in previous loop */
1555 	if (rxq->rx_alloc_count) {
1556 		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
1557 			     rxq->rx_alloc_count))) {
1558 			struct rte_eth_dev *dev;
1559 
1560 			PMD_RX_LOG(ERR, rxq,
1561 				   "New buffer allocation failed,"
1562 				   "dropping incoming packetn");
1563 			dev = &rte_eth_devices[rxq->port_id];
1564 			dev->data->rx_mbuf_alloc_failed +=
1565 							rxq->rx_alloc_count;
1566 			rxq->rx_alloc_errors += rxq->rx_alloc_count;
1567 			return 0;
1568 		}
1569 		qede_update_rx_prod(qdev, rxq);
1570 		rxq->rx_alloc_count = 0;
1571 	}
1572 
1573 	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1574 	sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1575 
1576 	rte_rmb();
1577 
1578 	if (hw_comp_cons == sw_comp_cons)
1579 		return 0;
1580 
1581 	while (sw_comp_cons != hw_comp_cons) {
1582 		ol_flags = 0;
1583 		packet_type = RTE_PTYPE_UNKNOWN;
1584 		vlan_tci = 0;
1585 		tpa_start_flg = false;
1586 		rss_hash = 0;
1587 
1588 		/* Get the CQE from the completion ring */
1589 		cqe =
1590 		    (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1591 		cqe_type = cqe->fast_path_regular.type;
1592 		PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1593 
1594 		switch (cqe_type) {
1595 		case ETH_RX_CQE_TYPE_REGULAR:
1596 			fp_cqe = &cqe->fast_path_regular;
1597 		break;
1598 		case ETH_RX_CQE_TYPE_TPA_START:
1599 			cqe_start_tpa = &cqe->fast_path_tpa_start;
1600 			tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1601 			tpa_start_flg = true;
1602 			/* Mark it as LRO packet */
1603 			ol_flags |= PKT_RX_LRO;
1604 			/* In split mode,  seg_len is same as len_on_first_bd
1605 			 * and ext_bd_len_list will be empty since there are
1606 			 * no additional buffers
1607 			 */
1608 			PMD_RX_LOG(INFO, rxq,
1609 			    "TPA start[%d] - len_on_first_bd %d header %d"
1610 			    " [bd_list[0] %d], [seg_len %d]\n",
1611 			    cqe_start_tpa->tpa_agg_index,
1612 			    rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1613 			    cqe_start_tpa->header_len,
1614 			    rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
1615 			    rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1616 
1617 		break;
1618 		case ETH_RX_CQE_TYPE_TPA_CONT:
1619 			qede_rx_process_tpa_cont_cqe(qdev, rxq,
1620 						     &cqe->fast_path_tpa_cont);
1621 			goto next_cqe;
1622 		case ETH_RX_CQE_TYPE_TPA_END:
1623 			qede_rx_process_tpa_end_cqe(qdev, rxq,
1624 						    &cqe->fast_path_tpa_end);
1625 			tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1626 			tpa_info = &rxq->tpa_info[tpa_agg_idx];
1627 			rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1628 			goto tpa_end;
1629 		case ETH_RX_CQE_TYPE_SLOW_PATH:
1630 			PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1631 			ecore_eth_cqe_completion(
1632 				&edev->hwfns[rxq->queue_id % edev->num_hwfns],
1633 				(struct eth_slow_path_rx_cqe *)cqe);
1634 			/* fall-thru */
1635 		default:
1636 			goto next_cqe;
1637 		}
1638 
1639 		/* Get the data from the SW ring */
1640 		sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1641 		rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1642 		assert(rx_mb != NULL);
1643 
1644 		/* Handle regular CQE or TPA start CQE */
1645 		if (!tpa_start_flg) {
1646 			parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1647 			offset = fp_cqe->placement_offset;
1648 			len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1649 			pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1650 			vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1651 			rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1652 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1653 			bitfield_val = fp_cqe->bitfields;
1654 #endif
1655 		} else {
1656 			parse_flag =
1657 			    rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1658 			offset = cqe_start_tpa->placement_offset;
1659 			/* seg_len = len_on_first_bd */
1660 			len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1661 			vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1662 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1663 			bitfield_val = cqe_start_tpa->bitfields;
1664 #endif
1665 			rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1666 		}
1667 		if (qede_tunn_exist(parse_flag)) {
1668 			PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1669 			if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1670 				PMD_RX_LOG(ERR, rxq,
1671 					    "L4 csum failed, flags = 0x%x\n",
1672 					    parse_flag);
1673 				rxq->rx_hw_errors++;
1674 				ol_flags |= PKT_RX_L4_CKSUM_BAD;
1675 			} else {
1676 				ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1677 			}
1678 
1679 			if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1680 				PMD_RX_LOG(ERR, rxq,
1681 					"Outer L3 csum failed, flags = 0x%x\n",
1682 					parse_flag);
1683 				  rxq->rx_hw_errors++;
1684 				  ol_flags |= PKT_RX_EIP_CKSUM_BAD;
1685 			} else {
1686 				  ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1687 			}
1688 
1689 			if (tpa_start_flg)
1690 				flags = cqe_start_tpa->tunnel_pars_flags.flags;
1691 			else
1692 				flags = fp_cqe->tunnel_pars_flags.flags;
1693 			tunn_parse_flag = flags;
1694 
1695 			/* Tunnel_type */
1696 			packet_type =
1697 				qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1698 
1699 			/* Inner header */
1700 			packet_type |=
1701 			      qede_rx_cqe_to_pkt_type_inner(parse_flag);
1702 
1703 			/* Outer L3/L4 types is not available in CQE */
1704 			packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1705 
1706 			/* Outer L3/L4 types is not available in CQE.
1707 			 * Need to add offset to parse correctly,
1708 			 */
1709 			rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1710 			packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1711 		} else {
1712 			packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1713 		}
1714 
1715 		/* Common handling for non-tunnel packets and for inner
1716 		 * headers in the case of tunnel.
1717 		 */
1718 		if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1719 			PMD_RX_LOG(ERR, rxq,
1720 				    "L4 csum failed, flags = 0x%x\n",
1721 				    parse_flag);
1722 			rxq->rx_hw_errors++;
1723 			ol_flags |= PKT_RX_L4_CKSUM_BAD;
1724 		} else {
1725 			ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1726 		}
1727 		if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1728 			PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1729 				   parse_flag);
1730 			rxq->rx_hw_errors++;
1731 			ol_flags |= PKT_RX_IP_CKSUM_BAD;
1732 		} else {
1733 			ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1734 		}
1735 
1736 		if (CQE_HAS_VLAN(parse_flag) ||
1737 		    CQE_HAS_OUTER_VLAN(parse_flag)) {
1738 			/* Note: FW doesn't indicate Q-in-Q packet */
1739 			ol_flags |= PKT_RX_VLAN;
1740 			if (qdev->vlan_strip_flg) {
1741 				ol_flags |= PKT_RX_VLAN_STRIPPED;
1742 				rx_mb->vlan_tci = vlan_tci;
1743 			}
1744 		}
1745 
1746 		/* RSS Hash */
1747 		if (qdev->rss_enable) {
1748 			ol_flags |= PKT_RX_RSS_HASH;
1749 			rx_mb->hash.rss = rss_hash;
1750 		}
1751 
1752 		rx_alloc_count++;
1753 		qede_rx_bd_ring_consume(rxq);
1754 
1755 		if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1756 			PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1757 				   " len on first: %04x Total Len: %04x",
1758 				   fp_cqe->bd_num, len, pkt_len);
1759 			num_segs = fp_cqe->bd_num - 1;
1760 			seg1 = rx_mb;
1761 			if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1762 						 pkt_len - len))
1763 				goto next_cqe;
1764 
1765 			rx_alloc_count += num_segs;
1766 			rxq->rx_segs += num_segs;
1767 		}
1768 		rxq->rx_segs++; /* for the first segment */
1769 
1770 		/* Prefetch next mbuf while processing current one. */
1771 		preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1772 		rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
1773 
1774 		/* Update rest of the MBUF fields */
1775 		rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1776 		rx_mb->port = rxq->port_id;
1777 		rx_mb->ol_flags = ol_flags;
1778 		rx_mb->data_len = len;
1779 		rx_mb->packet_type = packet_type;
1780 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1781 		print_rx_bd_info(rx_mb, rxq, bitfield_val);
1782 #endif
1783 		if (!tpa_start_flg) {
1784 			rx_mb->nb_segs = fp_cqe->bd_num;
1785 			rx_mb->pkt_len = pkt_len;
1786 		} else {
1787 			/* store ref to the updated mbuf */
1788 			tpa_info->tpa_head = rx_mb;
1789 			tpa_info->tpa_tail = tpa_info->tpa_head;
1790 		}
1791 		rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1792 tpa_end:
1793 		if (!tpa_start_flg) {
1794 			rx_pkts[rx_pkt] = rx_mb;
1795 			rx_pkt++;
1796 		}
1797 next_cqe:
1798 		ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1799 		sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1800 		if (rx_pkt == nb_pkts) {
1801 			PMD_RX_LOG(DEBUG, rxq,
1802 				   "Budget reached nb_pkts=%u received=%u",
1803 				   rx_pkt, nb_pkts);
1804 			break;
1805 		}
1806 	}
1807 
1808 	/* Request number of bufferes to be allocated in next loop */
1809 	rxq->rx_alloc_count = rx_alloc_count;
1810 
1811 	rxq->rcv_pkts += rx_pkt;
1812 
1813 	PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1814 
1815 	return rx_pkt;
1816 }
1817 
1818 uint16_t
1819 qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1820 {
1821 	struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
1822 	uint16_t eng0_pkts, eng1_pkts;
1823 
1824 	eng0_pkts = nb_pkts / 2;
1825 
1826 	eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
1827 
1828 	eng1_pkts = nb_pkts - eng0_pkts;
1829 
1830 	eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
1831 				   eng1_pkts);
1832 
1833 	return eng0_pkts + eng1_pkts;
1834 }
1835 
1836 /* Populate scatter gather buffer descriptor fields */
1837 static inline uint16_t
1838 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
1839 		  struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
1840 		  uint16_t start_seg)
1841 {
1842 	struct qede_tx_queue *txq = p_txq;
1843 	struct eth_tx_bd *tx_bd = NULL;
1844 	dma_addr_t mapping;
1845 	uint16_t nb_segs = 0;
1846 
1847 	/* Check for scattered buffers */
1848 	while (m_seg) {
1849 		if (start_seg == 0) {
1850 			if (!*bd2) {
1851 				*bd2 = (struct eth_tx_2nd_bd *)
1852 					ecore_chain_produce(&txq->tx_pbl);
1853 				memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
1854 				nb_segs++;
1855 			}
1856 			mapping = rte_mbuf_data_iova(m_seg);
1857 			QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
1858 			PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
1859 		} else if (start_seg == 1) {
1860 			if (!*bd3) {
1861 				*bd3 = (struct eth_tx_3rd_bd *)
1862 					ecore_chain_produce(&txq->tx_pbl);
1863 				memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
1864 				nb_segs++;
1865 			}
1866 			mapping = rte_mbuf_data_iova(m_seg);
1867 			QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
1868 			PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
1869 		} else {
1870 			tx_bd = (struct eth_tx_bd *)
1871 				ecore_chain_produce(&txq->tx_pbl);
1872 			memset(tx_bd, 0, sizeof(*tx_bd));
1873 			nb_segs++;
1874 			mapping = rte_mbuf_data_iova(m_seg);
1875 			QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
1876 			PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
1877 		}
1878 		start_seg++;
1879 		m_seg = m_seg->next;
1880 	}
1881 
1882 	/* Return total scattered buffers */
1883 	return nb_segs;
1884 }
1885 
1886 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1887 static inline void
1888 print_tx_bd_info(struct qede_tx_queue *txq,
1889 		 struct eth_tx_1st_bd *bd1,
1890 		 struct eth_tx_2nd_bd *bd2,
1891 		 struct eth_tx_3rd_bd *bd3,
1892 		 uint64_t tx_ol_flags)
1893 {
1894 	char ol_buf[256] = { 0 }; /* for verbose prints */
1895 
1896 	if (bd1)
1897 		PMD_TX_LOG(INFO, txq,
1898 		   "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
1899 		   rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
1900 		   bd1->data.bd_flags.bitfields,
1901 		   rte_cpu_to_le_16(bd1->data.bitfields));
1902 	if (bd2)
1903 		PMD_TX_LOG(INFO, txq,
1904 		   "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
1905 		   rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
1906 		   bd2->data.bitfields2, bd2->data.tunn_ip_size);
1907 	if (bd3)
1908 		PMD_TX_LOG(INFO, txq,
1909 		   "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
1910 		   "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
1911 		   rte_cpu_to_le_16(bd3->nbytes),
1912 		   rte_cpu_to_le_16(bd3->data.bitfields),
1913 		   rte_cpu_to_le_16(bd3->data.lso_mss),
1914 		   bd3->data.tunn_l4_hdr_start_offset_w,
1915 		   bd3->data.tunn_hdr_size_w);
1916 
1917 	rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
1918 	PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
1919 }
1920 #endif
1921 
1922 /* TX prepare to check packets meets TX conditions */
1923 uint16_t
1924 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1925 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
1926 		    uint16_t nb_pkts)
1927 {
1928 	struct qede_tx_queue *txq = p_txq;
1929 #else
1930 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
1931 		    uint16_t nb_pkts)
1932 {
1933 #endif
1934 	uint64_t ol_flags;
1935 	struct rte_mbuf *m;
1936 	uint16_t i;
1937 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1938 	int ret;
1939 #endif
1940 
1941 	for (i = 0; i < nb_pkts; i++) {
1942 		m = tx_pkts[i];
1943 		ol_flags = m->ol_flags;
1944 		if (ol_flags & PKT_TX_TCP_SEG) {
1945 			if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
1946 				rte_errno = EINVAL;
1947 				break;
1948 			}
1949 			/* TBD: confirm its ~9700B for both ? */
1950 			if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
1951 				rte_errno = EINVAL;
1952 				break;
1953 			}
1954 		} else {
1955 			if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
1956 				rte_errno = EINVAL;
1957 				break;
1958 			}
1959 		}
1960 		if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
1961 			/* We support only limited tunnel protocols */
1962 			if (ol_flags & PKT_TX_TUNNEL_MASK) {
1963 				uint64_t temp;
1964 
1965 				temp = ol_flags & PKT_TX_TUNNEL_MASK;
1966 				if (temp == PKT_TX_TUNNEL_VXLAN ||
1967 				    temp == PKT_TX_TUNNEL_GENEVE ||
1968 				    temp == PKT_TX_TUNNEL_MPLSINUDP ||
1969 				    temp == PKT_TX_TUNNEL_GRE)
1970 					continue;
1971 			}
1972 
1973 			rte_errno = ENOTSUP;
1974 			break;
1975 		}
1976 
1977 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1978 		ret = rte_validate_tx_offload(m);
1979 		if (ret != 0) {
1980 			rte_errno = -ret;
1981 			break;
1982 		}
1983 #endif
1984 	}
1985 
1986 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1987 	if (unlikely(i != nb_pkts))
1988 		PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
1989 			   nb_pkts - i);
1990 #endif
1991 	return i;
1992 }
1993 
1994 #define MPLSINUDP_HDR_SIZE			(12)
1995 
1996 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1997 static inline void
1998 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
1999 			       struct qede_tx_queue *txq)
2000 {
2001 	if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
2002 		PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
2003 	if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
2004 		MPLSINUDP_HDR_SIZE) / 2) > 0xff)
2005 		PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
2006 	if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
2007 		ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
2008 		PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2009 	if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
2010 		ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
2011 		PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2012 }
2013 #endif
2014 
2015 uint16_t
2016 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2017 {
2018 	struct qede_tx_queue *txq = p_txq;
2019 	struct qede_dev *qdev = txq->qdev;
2020 	struct ecore_dev *edev = &qdev->edev;
2021 	struct rte_mbuf *mbuf;
2022 	struct rte_mbuf *m_seg = NULL;
2023 	uint16_t nb_tx_pkts;
2024 	uint16_t bd_prod;
2025 	uint16_t idx;
2026 	uint16_t nb_frags;
2027 	uint16_t nb_pkt_sent = 0;
2028 	uint8_t nbds;
2029 	bool lso_flg;
2030 	bool mplsoudp_flg;
2031 	__rte_unused bool tunn_flg;
2032 	bool tunn_ipv6_ext_flg;
2033 	struct eth_tx_1st_bd *bd1;
2034 	struct eth_tx_2nd_bd *bd2;
2035 	struct eth_tx_3rd_bd *bd3;
2036 	uint64_t tx_ol_flags;
2037 	uint16_t hdr_size;
2038 	/* BD1 */
2039 	uint16_t bd1_bf;
2040 	uint8_t bd1_bd_flags_bf;
2041 	uint16_t vlan;
2042 	/* BD2 */
2043 	uint16_t bd2_bf1;
2044 	uint16_t bd2_bf2;
2045 	/* BD3 */
2046 	uint16_t mss;
2047 	uint16_t bd3_bf;
2048 
2049 	uint8_t tunn_l4_hdr_start_offset;
2050 	uint8_t tunn_hdr_size;
2051 	uint8_t inner_l2_hdr_size;
2052 	uint16_t inner_l4_hdr_offset;
2053 
2054 	if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2055 		PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2056 			   nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2057 		qede_process_tx_compl(edev, txq);
2058 	}
2059 
2060 	nb_tx_pkts  = nb_pkts;
2061 	bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2062 	while (nb_tx_pkts--) {
2063 		/* Init flags/values */
2064 		tunn_flg = false;
2065 		lso_flg = false;
2066 		nbds = 0;
2067 		vlan = 0;
2068 		bd1 = NULL;
2069 		bd2 = NULL;
2070 		bd3 = NULL;
2071 		hdr_size = 0;
2072 		bd1_bf = 0;
2073 		bd1_bd_flags_bf = 0;
2074 		bd2_bf1 = 0;
2075 		bd2_bf2 = 0;
2076 		mss = 0;
2077 		bd3_bf = 0;
2078 		mplsoudp_flg = false;
2079 		tunn_ipv6_ext_flg = false;
2080 		tunn_hdr_size = 0;
2081 		tunn_l4_hdr_start_offset = 0;
2082 
2083 		mbuf = *tx_pkts++;
2084 		assert(mbuf);
2085 
2086 		/* Check minimum TX BDS availability against available BDs */
2087 		if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2088 			break;
2089 
2090 		tx_ol_flags = mbuf->ol_flags;
2091 		bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2092 
2093 		/* TX prepare would have already checked supported tunnel Tx
2094 		 * offloads. Don't rely on pkt_type marked by Rx, instead use
2095 		 * tx_ol_flags to decide.
2096 		 */
2097 		tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
2098 
2099 		if (tunn_flg) {
2100 			/* Check against max which is Tunnel IPv6 + ext */
2101 			if (unlikely(txq->nb_tx_avail <
2102 				ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
2103 					break;
2104 
2105 			/* First indicate its a tunnel pkt */
2106 			bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
2107 				  ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2108 			/* Legacy FW had flipped behavior in regard to this bit
2109 			 * i.e. it needed to set to prevent FW from touching
2110 			 * encapsulated packets when it didn't need to.
2111 			 */
2112 			if (unlikely(txq->is_legacy)) {
2113 				bd1_bf ^= 1 <<
2114 					ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2115 			}
2116 
2117 			/* Outer IP checksum offload */
2118 			if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
2119 					   PKT_TX_OUTER_IPV4)) {
2120 				bd1_bd_flags_bf |=
2121 					ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
2122 					ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
2123 			}
2124 
2125 			/**
2126 			 * Currently, only inner checksum offload in MPLS-in-UDP
2127 			 * tunnel with one MPLS label is supported. Both outer
2128 			 * and inner layers  lengths need to be provided in
2129 			 * mbuf.
2130 			 */
2131 			if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
2132 						PKT_TX_TUNNEL_MPLSINUDP) {
2133 				mplsoudp_flg = true;
2134 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2135 				qede_mpls_tunn_tx_sanity_check(mbuf, txq);
2136 #endif
2137 				/* Outer L4 offset in two byte words */
2138 				tunn_l4_hdr_start_offset =
2139 				  (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
2140 				/* Tunnel header size in two byte words */
2141 				tunn_hdr_size = (mbuf->outer_l2_len +
2142 						mbuf->outer_l3_len +
2143 						MPLSINUDP_HDR_SIZE) / 2;
2144 				/* Inner L2 header size in two byte words */
2145 				inner_l2_hdr_size = (mbuf->l2_len -
2146 						MPLSINUDP_HDR_SIZE) / 2;
2147 				/* Inner L4 header offset from the beggining
2148 				 * of inner packet in two byte words
2149 				 */
2150 				inner_l4_hdr_offset = (mbuf->l2_len -
2151 					MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
2152 
2153 				/* Inner L2 size and address type */
2154 				bd2_bf1 |= (inner_l2_hdr_size &
2155 					ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
2156 					ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
2157 				bd2_bf1 |= (UNICAST_ADDRESS &
2158 					ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
2159 					ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
2160 				/* Treated as IPv6+Ext */
2161 				bd2_bf1 |=
2162 				    1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
2163 
2164 				/* Mark inner IPv6 if present */
2165 				if (tx_ol_flags & PKT_TX_IPV6)
2166 					bd2_bf1 |=
2167 						1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
2168 
2169 				/* Inner L4 offsets */
2170 				if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2171 				     (tx_ol_flags & (PKT_TX_UDP_CKSUM |
2172 							PKT_TX_TCP_CKSUM))) {
2173 					/* Determines if BD3 is needed */
2174 					tunn_ipv6_ext_flg = true;
2175 					if ((tx_ol_flags & PKT_TX_L4_MASK) ==
2176 							PKT_TX_UDP_CKSUM) {
2177 						bd2_bf1 |=
2178 							1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
2179 					}
2180 
2181 					/* TODO other pseudo checksum modes are
2182 					 * not supported
2183 					 */
2184 					bd2_bf1 |=
2185 					ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
2186 					ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
2187 					bd2_bf2 |= (inner_l4_hdr_offset &
2188 						ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
2189 						ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
2190 				}
2191 			} /* End MPLSoUDP */
2192 		} /* End Tunnel handling */
2193 
2194 		if (tx_ol_flags & PKT_TX_TCP_SEG) {
2195 			lso_flg = true;
2196 			if (unlikely(txq->nb_tx_avail <
2197 						ETH_TX_MIN_BDS_PER_LSO_PKT))
2198 				break;
2199 			/* For LSO, packet header and payload must reside on
2200 			 * buffers pointed by different BDs. Using BD1 for HDR
2201 			 * and BD2 onwards for data.
2202 			 */
2203 			hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
2204 			if (tunn_flg)
2205 				hdr_size += mbuf->outer_l2_len +
2206 					    mbuf->outer_l3_len;
2207 
2208 			bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
2209 			bd1_bd_flags_bf |=
2210 					1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2211 			/* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
2212 			bd1_bd_flags_bf |=
2213 					1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2214 			mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2215 			/* Using one header BD */
2216 			bd3_bf |= rte_cpu_to_le_16(1 <<
2217 					ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2218 		} else {
2219 			if (unlikely(txq->nb_tx_avail <
2220 					ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2221 				break;
2222 			bd1_bf |=
2223 			       (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2224 				<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2225 		}
2226 
2227 		/* Descriptor based VLAN insertion */
2228 		if (tx_ol_flags & PKT_TX_VLAN_PKT) {
2229 			vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2230 			bd1_bd_flags_bf |=
2231 			    1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2232 		}
2233 
2234 		/* Offload the IP checksum in the hardware */
2235 		if (tx_ol_flags & PKT_TX_IP_CKSUM) {
2236 			bd1_bd_flags_bf |=
2237 				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2238 			/* There's no DPDK flag to request outer-L4 csum
2239 			 * offload. But in the case of tunnel if inner L3 or L4
2240 			 * csum offload is requested then we need to force
2241 			 * recalculation of L4 tunnel header csum also.
2242 			 */
2243 			if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
2244 							PKT_TX_TUNNEL_GRE)) {
2245 				bd1_bd_flags_bf |=
2246 					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2247 					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2248 			}
2249 		}
2250 
2251 		/* L4 checksum offload (tcp or udp) */
2252 		if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2253 		    (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
2254 			bd1_bd_flags_bf |=
2255 				1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2256 			/* There's no DPDK flag to request outer-L4 csum
2257 			 * offload. But in the case of tunnel if inner L3 or L4
2258 			 * csum offload is requested then we need to force
2259 			 * recalculation of L4 tunnel header csum also.
2260 			 */
2261 			if (tunn_flg) {
2262 				bd1_bd_flags_bf |=
2263 					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2264 					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2265 			}
2266 		}
2267 
2268 		/* Fill the entry in the SW ring and the BDs in the FW ring */
2269 		idx = TX_PROD(txq);
2270 		txq->sw_tx_ring[idx].mbuf = mbuf;
2271 
2272 		/* BD1 */
2273 		bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2274 		memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2275 		nbds++;
2276 
2277 		/* Map MBUF linear data for DMA and set in the BD1 */
2278 		QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2279 				     mbuf->data_len);
2280 		bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2281 		bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2282 		bd1->data.vlan = vlan;
2283 
2284 		if (lso_flg || mplsoudp_flg) {
2285 			bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2286 							(&txq->tx_pbl);
2287 			memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2288 			nbds++;
2289 
2290 			/* BD1 */
2291 			QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2292 					     hdr_size);
2293 			/* BD2 */
2294 			QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2295 					     rte_mbuf_data_iova(mbuf)),
2296 					     mbuf->data_len - hdr_size);
2297 			bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2298 			if (mplsoudp_flg) {
2299 				bd2->data.bitfields2 =
2300 					rte_cpu_to_le_16(bd2_bf2);
2301 				/* Outer L3 size */
2302 				bd2->data.tunn_ip_size =
2303 					rte_cpu_to_le_16(mbuf->outer_l3_len);
2304 			}
2305 			/* BD3 */
2306 			if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2307 				bd3 = (struct eth_tx_3rd_bd *)
2308 					ecore_chain_produce(&txq->tx_pbl);
2309 				memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2310 				nbds++;
2311 				bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2312 				if (lso_flg)
2313 					bd3->data.lso_mss = mss;
2314 				if (mplsoudp_flg) {
2315 					bd3->data.tunn_l4_hdr_start_offset_w =
2316 						tunn_l4_hdr_start_offset;
2317 					bd3->data.tunn_hdr_size_w =
2318 						tunn_hdr_size;
2319 				}
2320 			}
2321 		}
2322 
2323 		/* Handle fragmented MBUF */
2324 		m_seg = mbuf->next;
2325 
2326 		/* Encode scatter gather buffer descriptors if required */
2327 		nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2328 		bd1->data.nbds = nbds + nb_frags;
2329 
2330 		txq->nb_tx_avail -= bd1->data.nbds;
2331 		txq->sw_tx_prod++;
2332 		bd_prod =
2333 		    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2334 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2335 		print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2336 #endif
2337 		nb_pkt_sent++;
2338 		txq->xmit_pkts++;
2339 	}
2340 
2341 	/* Write value of prod idx into bd_prod */
2342 	txq->tx_db.data.bd_prod = bd_prod;
2343 	rte_wmb();
2344 	rte_compiler_barrier();
2345 	DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2346 	rte_wmb();
2347 
2348 	/* Check again for Tx completions */
2349 	qede_process_tx_compl(edev, txq);
2350 
2351 	PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2352 		   nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2353 
2354 	return nb_pkt_sent;
2355 }
2356 
2357 uint16_t
2358 qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2359 {
2360 	struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2361 	uint16_t eng0_pkts, eng1_pkts;
2362 
2363 	eng0_pkts = nb_pkts / 2;
2364 
2365 	eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
2366 
2367 	eng1_pkts = nb_pkts - eng0_pkts;
2368 
2369 	eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
2370 				   eng1_pkts);
2371 
2372 	return eng0_pkts + eng1_pkts;
2373 }
2374 
2375 uint16_t
2376 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
2377 		     __rte_unused struct rte_mbuf **pkts,
2378 		     __rte_unused uint16_t nb_pkts)
2379 {
2380 	return 0;
2381 }
2382 
2383 
2384 /* this function does a fake walk through over completion queue
2385  * to calculate number of BDs used by HW.
2386  * At the end, it restores the state of completion queue.
2387  */
2388 static uint16_t
2389 qede_parse_fp_cqe(struct qede_rx_queue *rxq)
2390 {
2391 	uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
2392 	union eth_rx_cqe *cqe, *orig_cqe = NULL;
2393 
2394 	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
2395 	sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2396 
2397 	if (hw_comp_cons == sw_comp_cons)
2398 		return 0;
2399 
2400 	/* Get the CQE from the completion ring */
2401 	cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2402 	orig_cqe = cqe;
2403 
2404 	while (sw_comp_cons != hw_comp_cons) {
2405 		switch (cqe->fast_path_regular.type) {
2406 		case ETH_RX_CQE_TYPE_REGULAR:
2407 			bd_count += cqe->fast_path_regular.bd_num;
2408 			break;
2409 		case ETH_RX_CQE_TYPE_TPA_END:
2410 			bd_count += cqe->fast_path_tpa_end.num_of_bds;
2411 			break;
2412 		default:
2413 			break;
2414 		}
2415 
2416 		cqe =
2417 		(union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2418 		sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2419 	}
2420 
2421 	/* revert comp_ring to original state */
2422 	ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
2423 
2424 	return bd_count;
2425 }
2426 
2427 int
2428 qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
2429 {
2430 	uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
2431 	uint16_t produced, consumed;
2432 	struct qede_rx_queue *rxq = p_rxq;
2433 
2434 	if (offset > rxq->nb_rx_desc)
2435 		return -EINVAL;
2436 
2437 	sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
2438 	sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
2439 
2440 	/* find BDs used by HW from completion queue elements */
2441 	hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
2442 
2443 	if (hw_bd_cons < sw_bd_cons)
2444 		/* wraparound case */
2445 		consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
2446 	else
2447 		consumed = hw_bd_cons - sw_bd_cons;
2448 
2449 	if (offset <= consumed)
2450 		return RTE_ETH_RX_DESC_DONE;
2451 
2452 	if (sw_bd_prod < sw_bd_cons)
2453 		/* wraparound case */
2454 		produced = (0xffff - sw_bd_cons) + sw_bd_prod;
2455 	else
2456 		produced = sw_bd_prod - sw_bd_cons;
2457 
2458 	if (offset <= produced)
2459 		return RTE_ETH_RX_DESC_AVAIL;
2460 
2461 	return RTE_ETH_RX_DESC_UNAVAIL;
2462 }
2463