xref: /dpdk/drivers/net/mlx5/mlx5_rxtx.c (revision 1d88ba1719429c1e41bd61ab3add65ad9cc43551)
12e22920bSAdrien Mazarguil /*-
22e22920bSAdrien Mazarguil  *   BSD LICENSE
32e22920bSAdrien Mazarguil  *
42e22920bSAdrien Mazarguil  *   Copyright 2015 6WIND S.A.
52e22920bSAdrien Mazarguil  *   Copyright 2015 Mellanox.
62e22920bSAdrien Mazarguil  *
72e22920bSAdrien Mazarguil  *   Redistribution and use in source and binary forms, with or without
82e22920bSAdrien Mazarguil  *   modification, are permitted provided that the following conditions
92e22920bSAdrien Mazarguil  *   are met:
102e22920bSAdrien Mazarguil  *
112e22920bSAdrien Mazarguil  *     * Redistributions of source code must retain the above copyright
122e22920bSAdrien Mazarguil  *       notice, this list of conditions and the following disclaimer.
132e22920bSAdrien Mazarguil  *     * Redistributions in binary form must reproduce the above copyright
142e22920bSAdrien Mazarguil  *       notice, this list of conditions and the following disclaimer in
152e22920bSAdrien Mazarguil  *       the documentation and/or other materials provided with the
162e22920bSAdrien Mazarguil  *       distribution.
172e22920bSAdrien Mazarguil  *     * Neither the name of 6WIND S.A. nor the names of its
182e22920bSAdrien Mazarguil  *       contributors may be used to endorse or promote products derived
192e22920bSAdrien Mazarguil  *       from this software without specific prior written permission.
202e22920bSAdrien Mazarguil  *
212e22920bSAdrien Mazarguil  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
222e22920bSAdrien Mazarguil  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
232e22920bSAdrien Mazarguil  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
242e22920bSAdrien Mazarguil  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
252e22920bSAdrien Mazarguil  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
262e22920bSAdrien Mazarguil  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
272e22920bSAdrien Mazarguil  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
282e22920bSAdrien Mazarguil  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
292e22920bSAdrien Mazarguil  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
302e22920bSAdrien Mazarguil  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
312e22920bSAdrien Mazarguil  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
322e22920bSAdrien Mazarguil  */
332e22920bSAdrien Mazarguil 
342e22920bSAdrien Mazarguil #include <assert.h>
352e22920bSAdrien Mazarguil #include <stdint.h>
362e22920bSAdrien Mazarguil #include <string.h>
372e22920bSAdrien Mazarguil #include <stdlib.h>
382e22920bSAdrien Mazarguil 
392e22920bSAdrien Mazarguil /* Verbs header. */
402e22920bSAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
412e22920bSAdrien Mazarguil #ifdef PEDANTIC
422e22920bSAdrien Mazarguil #pragma GCC diagnostic ignored "-pedantic"
432e22920bSAdrien Mazarguil #endif
442e22920bSAdrien Mazarguil #include <infiniband/verbs.h>
456218063bSNélio Laranjeiro #include <infiniband/mlx5_hw.h>
466218063bSNélio Laranjeiro #include <infiniband/arch.h>
472e22920bSAdrien Mazarguil #ifdef PEDANTIC
482e22920bSAdrien Mazarguil #pragma GCC diagnostic error "-pedantic"
492e22920bSAdrien Mazarguil #endif
502e22920bSAdrien Mazarguil 
512e22920bSAdrien Mazarguil /* DPDK headers don't like -pedantic. */
522e22920bSAdrien Mazarguil #ifdef PEDANTIC
532e22920bSAdrien Mazarguil #pragma GCC diagnostic ignored "-pedantic"
542e22920bSAdrien Mazarguil #endif
552e22920bSAdrien Mazarguil #include <rte_mbuf.h>
562e22920bSAdrien Mazarguil #include <rte_mempool.h>
572e22920bSAdrien Mazarguil #include <rte_prefetch.h>
582e22920bSAdrien Mazarguil #include <rte_common.h>
592e22920bSAdrien Mazarguil #include <rte_branch_prediction.h>
606218063bSNélio Laranjeiro #include <rte_ether.h>
612e22920bSAdrien Mazarguil #ifdef PEDANTIC
622e22920bSAdrien Mazarguil #pragma GCC diagnostic error "-pedantic"
632e22920bSAdrien Mazarguil #endif
642e22920bSAdrien Mazarguil 
652e22920bSAdrien Mazarguil #include "mlx5.h"
662e22920bSAdrien Mazarguil #include "mlx5_utils.h"
672e22920bSAdrien Mazarguil #include "mlx5_rxtx.h"
68f3db9489SYaacov Hazan #include "mlx5_autoconf.h"
692e22920bSAdrien Mazarguil #include "mlx5_defs.h"
706218063bSNélio Laranjeiro #include "mlx5_prm.h"
716218063bSNélio Laranjeiro 
726218063bSNélio Laranjeiro static inline volatile struct mlx5_cqe64 *
736218063bSNélio Laranjeiro get_cqe64(volatile struct mlx5_cqe cqes[],
746218063bSNélio Laranjeiro 	  unsigned int cqes_n, uint16_t *ci)
756218063bSNélio Laranjeiro 	  __attribute__((always_inline));
766218063bSNélio Laranjeiro 
776218063bSNélio Laranjeiro static inline int
786218063bSNélio Laranjeiro rx_poll_len(struct rxq *rxq) __attribute__((always_inline));
796218063bSNélio Laranjeiro 
806218063bSNélio Laranjeiro static volatile struct mlx5_cqe64 *
816218063bSNélio Laranjeiro get_cqe64(volatile struct mlx5_cqe cqes[],
826218063bSNélio Laranjeiro 	  unsigned int cqes_n, uint16_t *ci)
836218063bSNélio Laranjeiro {
846218063bSNélio Laranjeiro 	volatile struct mlx5_cqe64 *cqe;
856218063bSNélio Laranjeiro 	uint16_t idx = *ci;
866218063bSNélio Laranjeiro 	uint8_t op_own;
876218063bSNélio Laranjeiro 
886218063bSNélio Laranjeiro 	cqe = &cqes[idx & (cqes_n - 1)].cqe64;
896218063bSNélio Laranjeiro 	op_own = cqe->op_own;
906218063bSNélio Laranjeiro 	if (unlikely((op_own & MLX5_CQE_OWNER_MASK) == !(idx & cqes_n))) {
916218063bSNélio Laranjeiro 		return NULL;
926218063bSNélio Laranjeiro 	} else if (unlikely(op_own & 0x80)) {
936218063bSNélio Laranjeiro 		switch (op_own >> 4) {
946218063bSNélio Laranjeiro 		case MLX5_CQE_INVALID:
956218063bSNélio Laranjeiro 			return NULL; /* No CQE */
966218063bSNélio Laranjeiro 		case MLX5_CQE_REQ_ERR:
976218063bSNélio Laranjeiro 			return cqe;
986218063bSNélio Laranjeiro 		case MLX5_CQE_RESP_ERR:
996218063bSNélio Laranjeiro 			++(*ci);
1006218063bSNélio Laranjeiro 			return NULL;
1016218063bSNélio Laranjeiro 		default:
1026218063bSNélio Laranjeiro 			return NULL;
1036218063bSNélio Laranjeiro 		}
1046218063bSNélio Laranjeiro 	}
1056218063bSNélio Laranjeiro 	if (cqe) {
1066218063bSNélio Laranjeiro 		*ci = idx + 1;
1076218063bSNélio Laranjeiro 		return cqe;
1086218063bSNélio Laranjeiro 	}
1096218063bSNélio Laranjeiro 	return NULL;
1106218063bSNélio Laranjeiro }
1112e22920bSAdrien Mazarguil 
1122e22920bSAdrien Mazarguil /**
1132e22920bSAdrien Mazarguil  * Manage TX completions.
1142e22920bSAdrien Mazarguil  *
1152e22920bSAdrien Mazarguil  * When sending a burst, mlx5_tx_burst() posts several WRs.
1162e22920bSAdrien Mazarguil  * To improve performance, a completion event is only required once every
1172e22920bSAdrien Mazarguil  * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
1182e22920bSAdrien Mazarguil  * for other WRs, but this information would not be used anyway.
1192e22920bSAdrien Mazarguil  *
1202e22920bSAdrien Mazarguil  * @param txq
1212e22920bSAdrien Mazarguil  *   Pointer to TX queue structure.
1222e22920bSAdrien Mazarguil  */
123*1d88ba17SNélio Laranjeiro static void
1242e22920bSAdrien Mazarguil txq_complete(struct txq *txq)
1252e22920bSAdrien Mazarguil {
1262e22920bSAdrien Mazarguil 	const unsigned int elts_n = txq->elts_n;
127*1d88ba17SNélio Laranjeiro 	const unsigned int cqe_n = txq->cqe_n;
128*1d88ba17SNélio Laranjeiro 	uint16_t elts_free = txq->elts_tail;
129*1d88ba17SNélio Laranjeiro 	uint16_t elts_tail;
130*1d88ba17SNélio Laranjeiro 	uint16_t cq_ci = txq->cq_ci;
131*1d88ba17SNélio Laranjeiro 	unsigned int wqe_ci = (unsigned int)-1;
132*1d88ba17SNélio Laranjeiro 	int ret = 0;
1332e22920bSAdrien Mazarguil 
134*1d88ba17SNélio Laranjeiro 	while (ret == 0) {
135*1d88ba17SNélio Laranjeiro 		volatile struct mlx5_cqe64 *cqe;
136*1d88ba17SNélio Laranjeiro 
137*1d88ba17SNélio Laranjeiro 		cqe = get_cqe64(*txq->cqes, cqe_n, &cq_ci);
138*1d88ba17SNélio Laranjeiro 		if (cqe == NULL)
139*1d88ba17SNélio Laranjeiro 			break;
140*1d88ba17SNélio Laranjeiro 		wqe_ci = ntohs(cqe->wqe_counter);
1412e22920bSAdrien Mazarguil 	}
142*1d88ba17SNélio Laranjeiro 	if (unlikely(wqe_ci == (unsigned int)-1))
143*1d88ba17SNélio Laranjeiro 		return;
144*1d88ba17SNélio Laranjeiro 	/* Free buffers. */
145*1d88ba17SNélio Laranjeiro 	elts_tail = (wqe_ci + 1) & (elts_n - 1);
146*1d88ba17SNélio Laranjeiro 	do {
147*1d88ba17SNélio Laranjeiro 		struct rte_mbuf *elt = (*txq->elts)[elts_free];
148a859e8a9SNelio Laranjeiro 		unsigned int elts_free_next =
149*1d88ba17SNélio Laranjeiro 			(elts_free + 1) & (elts_n - 1);
150*1d88ba17SNélio Laranjeiro 		struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next];
151a859e8a9SNelio Laranjeiro 
152b185e63fSAdrien Mazarguil #ifndef NDEBUG
153b185e63fSAdrien Mazarguil 		/* Poisoning. */
154*1d88ba17SNélio Laranjeiro 		memset(&(*txq->elts)[elts_free],
155*1d88ba17SNélio Laranjeiro 		       0x66,
156*1d88ba17SNélio Laranjeiro 		       sizeof((*txq->elts)[elts_free]));
157b185e63fSAdrien Mazarguil #endif
158*1d88ba17SNélio Laranjeiro 		RTE_MBUF_PREFETCH_TO_FREE(elt_next);
159*1d88ba17SNélio Laranjeiro 		/* Only one segment needs to be freed. */
160*1d88ba17SNélio Laranjeiro 		rte_pktmbuf_free_seg(elt);
161a859e8a9SNelio Laranjeiro 		elts_free = elts_free_next;
162*1d88ba17SNélio Laranjeiro 	} while (elts_free != elts_tail);
163*1d88ba17SNélio Laranjeiro 	txq->cq_ci = cq_ci;
1642e22920bSAdrien Mazarguil 	txq->elts_tail = elts_tail;
165*1d88ba17SNélio Laranjeiro 	/* Update the consumer index. */
166*1d88ba17SNélio Laranjeiro 	rte_wmb();
167*1d88ba17SNélio Laranjeiro 	*txq->cq_db = htonl(cq_ci);
1682e22920bSAdrien Mazarguil }
1692e22920bSAdrien Mazarguil 
1702e22920bSAdrien Mazarguil /**
1718340392eSAdrien Mazarguil  * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
1728340392eSAdrien Mazarguil  * the cloned mbuf is allocated is returned instead.
1738340392eSAdrien Mazarguil  *
1748340392eSAdrien Mazarguil  * @param buf
1758340392eSAdrien Mazarguil  *   Pointer to mbuf.
1768340392eSAdrien Mazarguil  *
1778340392eSAdrien Mazarguil  * @return
1788340392eSAdrien Mazarguil  *   Memory pool where data is located for given mbuf.
1798340392eSAdrien Mazarguil  */
1808340392eSAdrien Mazarguil static struct rte_mempool *
1818340392eSAdrien Mazarguil txq_mb2mp(struct rte_mbuf *buf)
1828340392eSAdrien Mazarguil {
1838340392eSAdrien Mazarguil 	if (unlikely(RTE_MBUF_INDIRECT(buf)))
1848340392eSAdrien Mazarguil 		return rte_mbuf_from_indirect(buf)->pool;
1858340392eSAdrien Mazarguil 	return buf->pool;
1868340392eSAdrien Mazarguil }
1878340392eSAdrien Mazarguil 
188491770faSNélio Laranjeiro static inline uint32_t
189491770faSNélio Laranjeiro txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
190491770faSNélio Laranjeiro 	__attribute__((always_inline));
191491770faSNélio Laranjeiro 
1928340392eSAdrien Mazarguil /**
1932e22920bSAdrien Mazarguil  * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
1942e22920bSAdrien Mazarguil  * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
1952e22920bSAdrien Mazarguil  * remove an entry first.
1962e22920bSAdrien Mazarguil  *
1972e22920bSAdrien Mazarguil  * @param txq
1982e22920bSAdrien Mazarguil  *   Pointer to TX queue structure.
1992e22920bSAdrien Mazarguil  * @param[in] mp
2002e22920bSAdrien Mazarguil  *   Memory Pool for which a Memory Region lkey must be returned.
2012e22920bSAdrien Mazarguil  *
2022e22920bSAdrien Mazarguil  * @return
2032e22920bSAdrien Mazarguil  *   mr->lkey on success, (uint32_t)-1 on failure.
2042e22920bSAdrien Mazarguil  */
205491770faSNélio Laranjeiro static inline uint32_t
206d1d914ebSOlivier Matz txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
2072e22920bSAdrien Mazarguil {
2082e22920bSAdrien Mazarguil 	unsigned int i;
209491770faSNélio Laranjeiro 	uint32_t lkey = (uint32_t)-1;
2102e22920bSAdrien Mazarguil 
2112e22920bSAdrien Mazarguil 	for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
2122e22920bSAdrien Mazarguil 		if (unlikely(txq->mp2mr[i].mp == NULL)) {
2132e22920bSAdrien Mazarguil 			/* Unknown MP, add a new MR for it. */
2142e22920bSAdrien Mazarguil 			break;
2152e22920bSAdrien Mazarguil 		}
2162e22920bSAdrien Mazarguil 		if (txq->mp2mr[i].mp == mp) {
2172e22920bSAdrien Mazarguil 			assert(txq->mp2mr[i].lkey != (uint32_t)-1);
218*1d88ba17SNélio Laranjeiro 			assert(htonl(txq->mp2mr[i].mr->lkey) ==
219*1d88ba17SNélio Laranjeiro 			       txq->mp2mr[i].lkey);
220491770faSNélio Laranjeiro 			lkey = txq->mp2mr[i].lkey;
221491770faSNélio Laranjeiro 			break;
2222e22920bSAdrien Mazarguil 		}
2232e22920bSAdrien Mazarguil 	}
224491770faSNélio Laranjeiro 	if (unlikely(lkey == (uint32_t)-1))
225491770faSNélio Laranjeiro 		lkey = txq_mp2mr_reg(txq, mp, i);
226491770faSNélio Laranjeiro 	return lkey;
2270a3b350dSOlga Shern }
2280a3b350dSOlga Shern 
229e192ef80SYaacov Hazan /**
230*1d88ba17SNélio Laranjeiro  * Write a regular WQE.
231e192ef80SYaacov Hazan  *
232*1d88ba17SNélio Laranjeiro  * @param txq
233*1d88ba17SNélio Laranjeiro  *   Pointer to TX queue structure.
234*1d88ba17SNélio Laranjeiro  * @param wqe
235*1d88ba17SNélio Laranjeiro  *   Pointer to the WQE to fill.
236*1d88ba17SNélio Laranjeiro  * @param addr
237*1d88ba17SNélio Laranjeiro  *   Buffer data address.
238*1d88ba17SNélio Laranjeiro  * @param length
239*1d88ba17SNélio Laranjeiro  *   Packet length.
240*1d88ba17SNélio Laranjeiro  * @param lkey
241*1d88ba17SNélio Laranjeiro  *   Memory region lkey.
242e192ef80SYaacov Hazan  */
243*1d88ba17SNélio Laranjeiro static inline void
244*1d88ba17SNélio Laranjeiro mlx5_wqe_write(struct txq *txq, volatile union mlx5_wqe *wqe,
245*1d88ba17SNélio Laranjeiro 	       uintptr_t addr, uint32_t length, uint32_t lkey)
246e192ef80SYaacov Hazan {
247*1d88ba17SNélio Laranjeiro 	wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
248*1d88ba17SNélio Laranjeiro 	wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4);
249*1d88ba17SNélio Laranjeiro 	wqe->wqe.ctrl.data[3] = 0;
250*1d88ba17SNélio Laranjeiro 	wqe->inl.eseg.rsvd0 = 0;
251*1d88ba17SNélio Laranjeiro 	wqe->inl.eseg.rsvd1 = 0;
252*1d88ba17SNélio Laranjeiro 	wqe->inl.eseg.mss = 0;
253*1d88ba17SNélio Laranjeiro 	wqe->inl.eseg.rsvd2 = 0;
254*1d88ba17SNélio Laranjeiro 	wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE);
255*1d88ba17SNélio Laranjeiro 	/* Copy the first 16 bytes into inline header. */
256*1d88ba17SNélio Laranjeiro 	rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start,
257*1d88ba17SNélio Laranjeiro 		   (uint8_t *)(uintptr_t)addr,
258*1d88ba17SNélio Laranjeiro 		   MLX5_ETH_INLINE_HEADER_SIZE);
259*1d88ba17SNélio Laranjeiro 	addr += MLX5_ETH_INLINE_HEADER_SIZE;
260*1d88ba17SNélio Laranjeiro 	length -= MLX5_ETH_INLINE_HEADER_SIZE;
261*1d88ba17SNélio Laranjeiro 	/* Store remaining data in data segment. */
262*1d88ba17SNélio Laranjeiro 	wqe->wqe.dseg.byte_count = htonl(length);
263*1d88ba17SNélio Laranjeiro 	wqe->wqe.dseg.lkey = lkey;
264*1d88ba17SNélio Laranjeiro 	wqe->wqe.dseg.addr = htonll(addr);
265*1d88ba17SNélio Laranjeiro 	/* Increment consumer index. */
266*1d88ba17SNélio Laranjeiro 	++txq->wqe_ci;
267*1d88ba17SNélio Laranjeiro }
268e192ef80SYaacov Hazan 
269*1d88ba17SNélio Laranjeiro /**
270*1d88ba17SNélio Laranjeiro  * Write a regular WQE with VLAN.
271*1d88ba17SNélio Laranjeiro  *
272*1d88ba17SNélio Laranjeiro  * @param txq
273*1d88ba17SNélio Laranjeiro  *   Pointer to TX queue structure.
274*1d88ba17SNélio Laranjeiro  * @param wqe
275*1d88ba17SNélio Laranjeiro  *   Pointer to the WQE to fill.
276*1d88ba17SNélio Laranjeiro  * @param addr
277*1d88ba17SNélio Laranjeiro  *   Buffer data address.
278*1d88ba17SNélio Laranjeiro  * @param length
279*1d88ba17SNélio Laranjeiro  *   Packet length.
280*1d88ba17SNélio Laranjeiro  * @param lkey
281*1d88ba17SNélio Laranjeiro  *   Memory region lkey.
282*1d88ba17SNélio Laranjeiro  * @param vlan_tci
283*1d88ba17SNélio Laranjeiro  *   VLAN field to insert in packet.
284*1d88ba17SNélio Laranjeiro  */
285*1d88ba17SNélio Laranjeiro static inline void
286*1d88ba17SNélio Laranjeiro mlx5_wqe_write_vlan(struct txq *txq, volatile union mlx5_wqe *wqe,
287*1d88ba17SNélio Laranjeiro 		    uintptr_t addr, uint32_t length, uint32_t lkey,
288*1d88ba17SNélio Laranjeiro 		    uint16_t vlan_tci)
289*1d88ba17SNélio Laranjeiro {
290*1d88ba17SNélio Laranjeiro 	uint32_t vlan = htonl(0x81000000 | vlan_tci);
291e192ef80SYaacov Hazan 
292*1d88ba17SNélio Laranjeiro 	wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
293*1d88ba17SNélio Laranjeiro 	wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4);
294*1d88ba17SNélio Laranjeiro 	wqe->wqe.ctrl.data[3] = 0;
295*1d88ba17SNélio Laranjeiro 	wqe->inl.eseg.rsvd0 = 0;
296*1d88ba17SNélio Laranjeiro 	wqe->inl.eseg.rsvd1 = 0;
297*1d88ba17SNélio Laranjeiro 	wqe->inl.eseg.mss = 0;
298*1d88ba17SNélio Laranjeiro 	wqe->inl.eseg.rsvd2 = 0;
299*1d88ba17SNélio Laranjeiro 	wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE);
300*1d88ba17SNélio Laranjeiro 	/*
301*1d88ba17SNélio Laranjeiro 	 * Copy 12 bytes of source & destination MAC address.
302*1d88ba17SNélio Laranjeiro 	 * Copy 4 bytes of VLAN.
303*1d88ba17SNélio Laranjeiro 	 * Copy 2 bytes of Ether type.
304*1d88ba17SNélio Laranjeiro 	 */
305*1d88ba17SNélio Laranjeiro 	rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start,
306*1d88ba17SNélio Laranjeiro 		   (uint8_t *)(uintptr_t)addr, 12);
307*1d88ba17SNélio Laranjeiro 	rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 12),
308*1d88ba17SNélio Laranjeiro 		   &vlan, sizeof(vlan));
309*1d88ba17SNélio Laranjeiro 	rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 16),
310*1d88ba17SNélio Laranjeiro 		   (uint8_t *)((uintptr_t)addr + 12), 2);
311*1d88ba17SNélio Laranjeiro 	addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
312*1d88ba17SNélio Laranjeiro 	length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
313*1d88ba17SNélio Laranjeiro 	/* Store remaining data in data segment. */
314*1d88ba17SNélio Laranjeiro 	wqe->wqe.dseg.byte_count = htonl(length);
315*1d88ba17SNélio Laranjeiro 	wqe->wqe.dseg.lkey = lkey;
316*1d88ba17SNélio Laranjeiro 	wqe->wqe.dseg.addr = htonll(addr);
317*1d88ba17SNélio Laranjeiro 	/* Increment consumer index. */
318*1d88ba17SNélio Laranjeiro 	++txq->wqe_ci;
319*1d88ba17SNélio Laranjeiro }
320e192ef80SYaacov Hazan 
321*1d88ba17SNélio Laranjeiro /**
322*1d88ba17SNélio Laranjeiro  * Ring TX queue doorbell.
323*1d88ba17SNélio Laranjeiro  *
324*1d88ba17SNélio Laranjeiro  * @param txq
325*1d88ba17SNélio Laranjeiro  *   Pointer to TX queue structure.
326*1d88ba17SNélio Laranjeiro  */
327*1d88ba17SNélio Laranjeiro static inline void
328*1d88ba17SNélio Laranjeiro mlx5_tx_dbrec(struct txq *txq)
329*1d88ba17SNélio Laranjeiro {
330*1d88ba17SNélio Laranjeiro 	uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset);
331*1d88ba17SNélio Laranjeiro 	uint32_t data[4] = {
332*1d88ba17SNélio Laranjeiro 		htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
333*1d88ba17SNélio Laranjeiro 		htonl(txq->qp_num_8s),
334*1d88ba17SNélio Laranjeiro 		0,
335*1d88ba17SNélio Laranjeiro 		0,
336*1d88ba17SNélio Laranjeiro 	};
337*1d88ba17SNélio Laranjeiro 	rte_wmb();
338*1d88ba17SNélio Laranjeiro 	*txq->qp_db = htonl(txq->wqe_ci);
339*1d88ba17SNélio Laranjeiro 	/* Ensure ordering between DB record and BF copy. */
340*1d88ba17SNélio Laranjeiro 	rte_wmb();
341*1d88ba17SNélio Laranjeiro 	rte_mov16(dst, (uint8_t *)data);
342*1d88ba17SNélio Laranjeiro 	txq->bf_offset ^= txq->bf_buf_size;
343*1d88ba17SNélio Laranjeiro }
344e192ef80SYaacov Hazan 
345*1d88ba17SNélio Laranjeiro /**
346*1d88ba17SNélio Laranjeiro  * Prefetch a CQE.
347*1d88ba17SNélio Laranjeiro  *
348*1d88ba17SNélio Laranjeiro  * @param txq
349*1d88ba17SNélio Laranjeiro  *   Pointer to TX queue structure.
350*1d88ba17SNélio Laranjeiro  * @param cqe_ci
351*1d88ba17SNélio Laranjeiro  *   CQE consumer index.
352*1d88ba17SNélio Laranjeiro  */
353*1d88ba17SNélio Laranjeiro static inline void
354*1d88ba17SNélio Laranjeiro tx_prefetch_cqe(struct txq *txq, uint16_t ci)
355*1d88ba17SNélio Laranjeiro {
356*1d88ba17SNélio Laranjeiro 	volatile struct mlx5_cqe64 *cqe;
357*1d88ba17SNélio Laranjeiro 
358*1d88ba17SNélio Laranjeiro 	cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64;
359*1d88ba17SNélio Laranjeiro 	rte_prefetch0(cqe);
360e192ef80SYaacov Hazan }
361e192ef80SYaacov Hazan 
3622e22920bSAdrien Mazarguil /**
3632e22920bSAdrien Mazarguil  * DPDK callback for TX.
3642e22920bSAdrien Mazarguil  *
3652e22920bSAdrien Mazarguil  * @param dpdk_txq
3662e22920bSAdrien Mazarguil  *   Generic pointer to TX queue structure.
3672e22920bSAdrien Mazarguil  * @param[in] pkts
3682e22920bSAdrien Mazarguil  *   Packets to transmit.
3692e22920bSAdrien Mazarguil  * @param pkts_n
3702e22920bSAdrien Mazarguil  *   Number of packets in array.
3712e22920bSAdrien Mazarguil  *
3722e22920bSAdrien Mazarguil  * @return
3732e22920bSAdrien Mazarguil  *   Number of packets successfully transmitted (<= pkts_n).
3742e22920bSAdrien Mazarguil  */
3752e22920bSAdrien Mazarguil uint16_t
3762e22920bSAdrien Mazarguil mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
3772e22920bSAdrien Mazarguil {
3782e22920bSAdrien Mazarguil 	struct txq *txq = (struct txq *)dpdk_txq;
379*1d88ba17SNélio Laranjeiro 	uint16_t elts_head = txq->elts_head;
3802e22920bSAdrien Mazarguil 	const unsigned int elts_n = txq->elts_n;
3812e22920bSAdrien Mazarguil 	unsigned int i;
3822e22920bSAdrien Mazarguil 	unsigned int max;
383*1d88ba17SNélio Laranjeiro 	volatile union mlx5_wqe *wqe;
384*1d88ba17SNélio Laranjeiro 	struct rte_mbuf *buf;
3852e22920bSAdrien Mazarguil 
386*1d88ba17SNélio Laranjeiro 	if (unlikely(!pkts_n))
387*1d88ba17SNélio Laranjeiro 		return 0;
388*1d88ba17SNélio Laranjeiro 	buf = pkts[0];
3895e1d11ecSNelio Laranjeiro 	/* Prefetch first packet cacheline. */
390*1d88ba17SNélio Laranjeiro 	tx_prefetch_cqe(txq, txq->cq_ci);
391*1d88ba17SNélio Laranjeiro 	tx_prefetch_cqe(txq, txq->cq_ci + 1);
3925e1d11ecSNelio Laranjeiro 	rte_prefetch0(buf);
393*1d88ba17SNélio Laranjeiro 	/* Start processing. */
3942e22920bSAdrien Mazarguil 	txq_complete(txq);
3954f52bbfbSNelio Laranjeiro 	max = (elts_n - (elts_head - txq->elts_tail));
3962e22920bSAdrien Mazarguil 	if (max > elts_n)
3972e22920bSAdrien Mazarguil 		max -= elts_n;
3982e22920bSAdrien Mazarguil 	assert(max >= 1);
3992e22920bSAdrien Mazarguil 	assert(max <= elts_n);
4002e22920bSAdrien Mazarguil 	/* Always leave one free entry in the ring. */
4012e22920bSAdrien Mazarguil 	--max;
4022e22920bSAdrien Mazarguil 	if (max == 0)
4032e22920bSAdrien Mazarguil 		return 0;
4042e22920bSAdrien Mazarguil 	if (max > pkts_n)
4052e22920bSAdrien Mazarguil 		max = pkts_n;
4062e22920bSAdrien Mazarguil 	for (i = 0; (i != max); ++i) {
407*1d88ba17SNélio Laranjeiro 		unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1);
408573f54afSNélio Laranjeiro 		uintptr_t addr;
409573f54afSNélio Laranjeiro 		uint32_t length;
410573f54afSNélio Laranjeiro 		uint32_t lkey;
4112e22920bSAdrien Mazarguil 
412*1d88ba17SNélio Laranjeiro 		wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
413*1d88ba17SNélio Laranjeiro 		rte_prefetch0(wqe);
4145e1d11ecSNelio Laranjeiro 		if (i + 1 < max)
415*1d88ba17SNélio Laranjeiro 			rte_prefetch0(pkts[i + 1]);
4162e22920bSAdrien Mazarguil 		/* Retrieve buffer information. */
4172e22920bSAdrien Mazarguil 		addr = rte_pktmbuf_mtod(buf, uintptr_t);
4182e22920bSAdrien Mazarguil 		length = DATA_LEN(buf);
4192e22920bSAdrien Mazarguil 		/* Update element. */
420*1d88ba17SNélio Laranjeiro 		(*txq->elts)[elts_head] = buf;
4215e1d11ecSNelio Laranjeiro 		/* Prefetch next buffer data. */
422*1d88ba17SNélio Laranjeiro 		if (i + 1 < max)
423*1d88ba17SNélio Laranjeiro 			rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 1],
424*1d88ba17SNélio Laranjeiro 						       volatile void *));
4250431c40fSNélio Laranjeiro 		/* Retrieve Memory Region key for this memory pool. */
426d970e992SNelio Laranjeiro 		lkey = txq_mp2mr(txq, txq_mb2mp(buf));
427*1d88ba17SNélio Laranjeiro 		if (buf->ol_flags & PKT_TX_VLAN_PKT)
428*1d88ba17SNélio Laranjeiro 			mlx5_wqe_write_vlan(txq, wqe, addr, length, lkey,
429*1d88ba17SNélio Laranjeiro 					    buf->vlan_tci);
430e192ef80SYaacov Hazan 		else
431*1d88ba17SNélio Laranjeiro 			mlx5_wqe_write(txq, wqe, addr, length, lkey);
432*1d88ba17SNélio Laranjeiro 		/* Request completion if needed. */
433*1d88ba17SNélio Laranjeiro 		if (unlikely(--txq->elts_comp == 0)) {
434*1d88ba17SNélio Laranjeiro 			wqe->wqe.ctrl.data[2] = htonl(8);
435*1d88ba17SNélio Laranjeiro 			txq->elts_comp = txq->elts_comp_cd_init;
436*1d88ba17SNélio Laranjeiro 		} else {
437*1d88ba17SNélio Laranjeiro 			wqe->wqe.ctrl.data[2] = 0;
438*1d88ba17SNélio Laranjeiro 		}
439*1d88ba17SNélio Laranjeiro 		/* Should we enable HW CKSUM offload */
440*1d88ba17SNélio Laranjeiro 		if (buf->ol_flags &
441*1d88ba17SNélio Laranjeiro 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
442*1d88ba17SNélio Laranjeiro 			wqe->wqe.eseg.cs_flags =
443*1d88ba17SNélio Laranjeiro 				MLX5_ETH_WQE_L3_CSUM |
444*1d88ba17SNélio Laranjeiro 				MLX5_ETH_WQE_L4_CSUM;
445*1d88ba17SNélio Laranjeiro 		} else {
446*1d88ba17SNélio Laranjeiro 			wqe->wqe.eseg.cs_flags = 0;
447*1d88ba17SNélio Laranjeiro 		}
44887011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
449573f54afSNélio Laranjeiro 		/* Increment sent bytes counter. */
450573f54afSNélio Laranjeiro 		txq->stats.obytes += length;
45187011737SAdrien Mazarguil #endif
4522e22920bSAdrien Mazarguil 		elts_head = elts_head_next;
453*1d88ba17SNélio Laranjeiro 		buf = pkts[i + 1];
4542e22920bSAdrien Mazarguil 	}
4552e22920bSAdrien Mazarguil 	/* Take a shortcut if nothing must be sent. */
4562e22920bSAdrien Mazarguil 	if (unlikely(i == 0))
4572e22920bSAdrien Mazarguil 		return 0;
45887011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
45987011737SAdrien Mazarguil 	/* Increment sent packets counter. */
46087011737SAdrien Mazarguil 	txq->stats.opackets += i;
46187011737SAdrien Mazarguil #endif
4622e22920bSAdrien Mazarguil 	/* Ring QP doorbell. */
463*1d88ba17SNélio Laranjeiro 	mlx5_tx_dbrec(txq);
4642e22920bSAdrien Mazarguil 	txq->elts_head = elts_head;
4652e22920bSAdrien Mazarguil 	return i;
4662e22920bSAdrien Mazarguil }
4672e22920bSAdrien Mazarguil 
4682e22920bSAdrien Mazarguil /**
46967fa62bcSAdrien Mazarguil  * Translate RX completion flags to packet type.
47067fa62bcSAdrien Mazarguil  *
4716218063bSNélio Laranjeiro  * @param[in] cqe
4726218063bSNélio Laranjeiro  *   Pointer to CQE.
47367fa62bcSAdrien Mazarguil  *
47478a38edfSJianfeng Tan  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
47578a38edfSJianfeng Tan  *
47667fa62bcSAdrien Mazarguil  * @return
47767fa62bcSAdrien Mazarguil  *   Packet type for struct rte_mbuf.
47867fa62bcSAdrien Mazarguil  */
47967fa62bcSAdrien Mazarguil static inline uint32_t
4806218063bSNélio Laranjeiro rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
48167fa62bcSAdrien Mazarguil {
48267fa62bcSAdrien Mazarguil 	uint32_t pkt_type;
4836218063bSNélio Laranjeiro 	uint8_t flags = cqe->l4_hdr_type_etc;
4846218063bSNélio Laranjeiro 	uint8_t info = cqe->rsvd0[0];
48567fa62bcSAdrien Mazarguil 
4866218063bSNélio Laranjeiro 	if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET)
48767fa62bcSAdrien Mazarguil 		pkt_type =
48867fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
48967fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
49067fa62bcSAdrien Mazarguil 				  RTE_PTYPE_L3_IPV4) |
49167fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
49267fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
49367fa62bcSAdrien Mazarguil 				  RTE_PTYPE_L3_IPV6) |
49467fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
49567fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_IPV4_PACKET,
49667fa62bcSAdrien Mazarguil 				  RTE_PTYPE_INNER_L3_IPV4) |
49767fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
49867fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_IPV6_PACKET,
49967fa62bcSAdrien Mazarguil 				  RTE_PTYPE_INNER_L3_IPV6);
50067fa62bcSAdrien Mazarguil 	else
50167fa62bcSAdrien Mazarguil 		pkt_type =
50267fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
5036218063bSNélio Laranjeiro 				  MLX5_CQE_L3_HDR_TYPE_IPV6,
5046218063bSNélio Laranjeiro 				  RTE_PTYPE_L3_IPV6) |
50567fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
5066218063bSNélio Laranjeiro 				  MLX5_CQE_L3_HDR_TYPE_IPV4,
5076218063bSNélio Laranjeiro 				  RTE_PTYPE_L3_IPV4);
50867fa62bcSAdrien Mazarguil 	return pkt_type;
50967fa62bcSAdrien Mazarguil }
51067fa62bcSAdrien Mazarguil 
51167fa62bcSAdrien Mazarguil /**
51267fa62bcSAdrien Mazarguil  * Translate RX completion flags to offload flags.
51367fa62bcSAdrien Mazarguil  *
51467fa62bcSAdrien Mazarguil  * @param[in] rxq
51567fa62bcSAdrien Mazarguil  *   Pointer to RX queue structure.
5166218063bSNélio Laranjeiro  * @param[in] cqe
5176218063bSNélio Laranjeiro  *   Pointer to CQE.
51867fa62bcSAdrien Mazarguil  *
51967fa62bcSAdrien Mazarguil  * @return
52067fa62bcSAdrien Mazarguil  *   Offload flags (ol_flags) for struct rte_mbuf.
52167fa62bcSAdrien Mazarguil  */
52267fa62bcSAdrien Mazarguil static inline uint32_t
5236218063bSNélio Laranjeiro rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
52467fa62bcSAdrien Mazarguil {
52567fa62bcSAdrien Mazarguil 	uint32_t ol_flags = 0;
5266218063bSNélio Laranjeiro 	uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
5276218063bSNélio Laranjeiro 	uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
5286218063bSNélio Laranjeiro 	uint8_t info = cqe->rsvd0[0];
52967fa62bcSAdrien Mazarguil 
5306218063bSNélio Laranjeiro 	if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
5316218063bSNélio Laranjeiro 	    (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
53267fa62bcSAdrien Mazarguil 		ol_flags |=
5336218063bSNélio Laranjeiro 			(!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) *
534d0087d76SYaacov Hazan 			 PKT_RX_IP_CKSUM_BAD);
5356218063bSNélio Laranjeiro 	if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
5366218063bSNélio Laranjeiro 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
5376218063bSNélio Laranjeiro 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
5386218063bSNélio Laranjeiro 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
539d0087d76SYaacov Hazan 		ol_flags |=
5406218063bSNélio Laranjeiro 			(!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) *
54167fa62bcSAdrien Mazarguil 			 PKT_RX_L4_CKSUM_BAD);
54267fa62bcSAdrien Mazarguil 	/*
54367fa62bcSAdrien Mazarguil 	 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
54467fa62bcSAdrien Mazarguil 	 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
54567fa62bcSAdrien Mazarguil 	 * (its value is 0).
54667fa62bcSAdrien Mazarguil 	 */
5476218063bSNélio Laranjeiro 	if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
54867fa62bcSAdrien Mazarguil 		ol_flags |=
5496218063bSNélio Laranjeiro 			TRANSPOSE(~cqe->l4_hdr_type_etc,
55067fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
55167fa62bcSAdrien Mazarguil 				  PKT_RX_IP_CKSUM_BAD) |
5526218063bSNélio Laranjeiro 			TRANSPOSE(~cqe->l4_hdr_type_etc,
55367fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
55467fa62bcSAdrien Mazarguil 				  PKT_RX_L4_CKSUM_BAD);
55567fa62bcSAdrien Mazarguil 	return ol_flags;
55667fa62bcSAdrien Mazarguil }
55767fa62bcSAdrien Mazarguil 
55867fa62bcSAdrien Mazarguil /**
5596218063bSNélio Laranjeiro  * Get size of the next packet.
5606218063bSNélio Laranjeiro  *
5616218063bSNélio Laranjeiro  * @param rxq
5626218063bSNélio Laranjeiro  *   RX queue to fetch packet from.
5636218063bSNélio Laranjeiro  *
5646218063bSNélio Laranjeiro  * @return
5656218063bSNélio Laranjeiro  *   Packet size in bytes.
5666218063bSNélio Laranjeiro  */
5676218063bSNélio Laranjeiro static inline int __attribute__((always_inline))
5686218063bSNélio Laranjeiro rx_poll_len(struct rxq *rxq)
5696218063bSNélio Laranjeiro {
5706218063bSNélio Laranjeiro 	volatile struct mlx5_cqe64 *cqe;
5716218063bSNélio Laranjeiro 
5726218063bSNélio Laranjeiro 	cqe = get_cqe64(*rxq->cqes, rxq->elts_n, &rxq->cq_ci);
5736218063bSNélio Laranjeiro 	if (cqe)
5746218063bSNélio Laranjeiro 		return ntohl(cqe->byte_cnt);
5756218063bSNélio Laranjeiro 	return 0;
5766218063bSNélio Laranjeiro }
5776218063bSNélio Laranjeiro 
5786218063bSNélio Laranjeiro /**
5792e22920bSAdrien Mazarguil  * DPDK callback for RX.
5802e22920bSAdrien Mazarguil  *
5812e22920bSAdrien Mazarguil  * @param dpdk_rxq
5822e22920bSAdrien Mazarguil  *   Generic pointer to RX queue structure.
5832e22920bSAdrien Mazarguil  * @param[out] pkts
5842e22920bSAdrien Mazarguil  *   Array to store received packets.
5852e22920bSAdrien Mazarguil  * @param pkts_n
5862e22920bSAdrien Mazarguil  *   Maximum number of packets in array.
5872e22920bSAdrien Mazarguil  *
5882e22920bSAdrien Mazarguil  * @return
5892e22920bSAdrien Mazarguil  *   Number of packets successfully received (<= pkts_n).
5902e22920bSAdrien Mazarguil  */
5912e22920bSAdrien Mazarguil uint16_t
5922e22920bSAdrien Mazarguil mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
5932e22920bSAdrien Mazarguil {
5946218063bSNélio Laranjeiro 	struct rxq *rxq = dpdk_rxq;
5952e22920bSAdrien Mazarguil 	unsigned int pkts_ret = 0;
5966218063bSNélio Laranjeiro 	unsigned int i;
5976218063bSNélio Laranjeiro 	unsigned int rq_ci = rxq->rq_ci;
5986218063bSNélio Laranjeiro 	const unsigned int elts_n = rxq->elts_n;
5996218063bSNélio Laranjeiro 	const unsigned int wqe_cnt = elts_n - 1;
6002e22920bSAdrien Mazarguil 
6012e22920bSAdrien Mazarguil 	for (i = 0; (i != pkts_n); ++i) {
6026218063bSNélio Laranjeiro 		unsigned int idx = rq_ci & wqe_cnt;
6032e22920bSAdrien Mazarguil 		struct rte_mbuf *rep;
6046218063bSNélio Laranjeiro 		struct rte_mbuf *pkt;
6056218063bSNélio Laranjeiro 		unsigned int len;
6066218063bSNélio Laranjeiro 		volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
6076218063bSNélio Laranjeiro 		volatile struct mlx5_cqe64 *cqe =
6086218063bSNélio Laranjeiro 			&(*rxq->cqes)[rxq->cq_ci & wqe_cnt].cqe64;
6092e22920bSAdrien Mazarguil 
6106218063bSNélio Laranjeiro 		pkt = (*rxq->elts)[idx];
6116218063bSNélio Laranjeiro 		rte_prefetch0(cqe);
612fbfd9955SOlivier Matz 		rep = rte_mbuf_raw_alloc(rxq->mp);
6132e22920bSAdrien Mazarguil 		if (unlikely(rep == NULL)) {
61487011737SAdrien Mazarguil 			++rxq->stats.rx_nombuf;
6156218063bSNélio Laranjeiro 			break;
6162e22920bSAdrien Mazarguil 		}
6176218063bSNélio Laranjeiro 		SET_DATA_OFF(rep, RTE_PKTMBUF_HEADROOM);
6186218063bSNélio Laranjeiro 		NB_SEGS(rep) = 1;
6196218063bSNélio Laranjeiro 		PORT(rep) = rxq->port_id;
6206218063bSNélio Laranjeiro 		NEXT(rep) = NULL;
6216218063bSNélio Laranjeiro 		len = rx_poll_len(rxq);
6226218063bSNélio Laranjeiro 		if (unlikely(len == 0)) {
6236218063bSNélio Laranjeiro 			rte_mbuf_refcnt_set(rep, 0);
6246218063bSNélio Laranjeiro 			__rte_mbuf_raw_free(rep);
6256218063bSNélio Laranjeiro 			break;
6266218063bSNélio Laranjeiro 		}
6276218063bSNélio Laranjeiro 		/*
6286218063bSNélio Laranjeiro 		 * Fill NIC descriptor with the new buffer.  The lkey and size
6296218063bSNélio Laranjeiro 		 * of the buffers are already known, only the buffer address
6306218063bSNélio Laranjeiro 		 * changes.
6316218063bSNélio Laranjeiro 		 */
6326218063bSNélio Laranjeiro 		wqe->addr = htonll((uintptr_t)rep->buf_addr +
6336218063bSNélio Laranjeiro 				   RTE_PKTMBUF_HEADROOM);
6346218063bSNélio Laranjeiro 		(*rxq->elts)[idx] = rep;
6356218063bSNélio Laranjeiro 		/* Update pkt information. */
6366218063bSNélio Laranjeiro 		if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
6376218063bSNélio Laranjeiro 		    rxq->crc_present) {
6386218063bSNélio Laranjeiro 			if (rxq->csum) {
6396218063bSNélio Laranjeiro 				pkt->packet_type = rxq_cq_to_pkt_type(cqe);
6406218063bSNélio Laranjeiro 				pkt->ol_flags = rxq_cq_to_ol_flags(rxq, cqe);
6416218063bSNélio Laranjeiro 			}
6426218063bSNélio Laranjeiro 			if (cqe->l4_hdr_type_etc & MLX5_CQE_VLAN_STRIPPED) {
6436218063bSNélio Laranjeiro 				pkt->ol_flags |= PKT_RX_VLAN_PKT |
644b37b528dSOlivier Matz 					PKT_RX_VLAN_STRIPPED;
6456218063bSNélio Laranjeiro 				pkt->vlan_tci = ntohs(cqe->vlan_info);
646f3db9489SYaacov Hazan 			}
6476218063bSNélio Laranjeiro 			if (rxq->crc_present)
6486218063bSNélio Laranjeiro 				len -= ETHER_CRC_LEN;
649081f7eaeSNelio Laranjeiro 		}
6506218063bSNélio Laranjeiro 		PKT_LEN(pkt) = len;
6516218063bSNélio Laranjeiro 		DATA_LEN(pkt) = len;
65287011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
65387011737SAdrien Mazarguil 		/* Increment bytes counter. */
65487011737SAdrien Mazarguil 		rxq->stats.ibytes += len;
65587011737SAdrien Mazarguil #endif
6566218063bSNélio Laranjeiro 		/* Return packet. */
6576218063bSNélio Laranjeiro 		*(pkts++) = pkt;
6586218063bSNélio Laranjeiro 		++pkts_ret;
6596218063bSNélio Laranjeiro 		++rq_ci;
6602e22920bSAdrien Mazarguil 	}
6616218063bSNélio Laranjeiro 	if (unlikely((i == 0) && (rq_ci == rxq->rq_ci)))
6622e22920bSAdrien Mazarguil 		return 0;
6632e22920bSAdrien Mazarguil 	/* Repost WRs. */
6642e22920bSAdrien Mazarguil #ifdef DEBUG_RECV
6652e22920bSAdrien Mazarguil 	DEBUG("%p: reposting %u WRs", (void *)rxq, i);
6662e22920bSAdrien Mazarguil #endif
6676218063bSNélio Laranjeiro 	/* Update the consumer index. */
6686218063bSNélio Laranjeiro 	rxq->rq_ci = rq_ci;
6696218063bSNélio Laranjeiro 	rte_wmb();
6706218063bSNélio Laranjeiro 	*rxq->cq_db = htonl(rxq->cq_ci);
6716218063bSNélio Laranjeiro 	rte_wmb();
6726218063bSNélio Laranjeiro 	*rxq->rq_db = htonl(rxq->rq_ci);
67387011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
67487011737SAdrien Mazarguil 	/* Increment packets counter. */
67587011737SAdrien Mazarguil 	rxq->stats.ipackets += pkts_ret;
67687011737SAdrien Mazarguil #endif
6772e22920bSAdrien Mazarguil 	return pkts_ret;
6782e22920bSAdrien Mazarguil }
6792e22920bSAdrien Mazarguil 
6802e22920bSAdrien Mazarguil /**
6812e22920bSAdrien Mazarguil  * Dummy DPDK callback for TX.
6822e22920bSAdrien Mazarguil  *
6832e22920bSAdrien Mazarguil  * This function is used to temporarily replace the real callback during
6842e22920bSAdrien Mazarguil  * unsafe control operations on the queue, or in case of error.
6852e22920bSAdrien Mazarguil  *
6862e22920bSAdrien Mazarguil  * @param dpdk_txq
6872e22920bSAdrien Mazarguil  *   Generic pointer to TX queue structure.
6882e22920bSAdrien Mazarguil  * @param[in] pkts
6892e22920bSAdrien Mazarguil  *   Packets to transmit.
6902e22920bSAdrien Mazarguil  * @param pkts_n
6912e22920bSAdrien Mazarguil  *   Number of packets in array.
6922e22920bSAdrien Mazarguil  *
6932e22920bSAdrien Mazarguil  * @return
6942e22920bSAdrien Mazarguil  *   Number of packets successfully transmitted (<= pkts_n).
6952e22920bSAdrien Mazarguil  */
6962e22920bSAdrien Mazarguil uint16_t
6972e22920bSAdrien Mazarguil removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
6982e22920bSAdrien Mazarguil {
6992e22920bSAdrien Mazarguil 	(void)dpdk_txq;
7002e22920bSAdrien Mazarguil 	(void)pkts;
7012e22920bSAdrien Mazarguil 	(void)pkts_n;
7022e22920bSAdrien Mazarguil 	return 0;
7032e22920bSAdrien Mazarguil }
7042e22920bSAdrien Mazarguil 
7052e22920bSAdrien Mazarguil /**
7062e22920bSAdrien Mazarguil  * Dummy DPDK callback for RX.
7072e22920bSAdrien Mazarguil  *
7082e22920bSAdrien Mazarguil  * This function is used to temporarily replace the real callback during
7092e22920bSAdrien Mazarguil  * unsafe control operations on the queue, or in case of error.
7102e22920bSAdrien Mazarguil  *
7112e22920bSAdrien Mazarguil  * @param dpdk_rxq
7122e22920bSAdrien Mazarguil  *   Generic pointer to RX queue structure.
7132e22920bSAdrien Mazarguil  * @param[out] pkts
7142e22920bSAdrien Mazarguil  *   Array to store received packets.
7152e22920bSAdrien Mazarguil  * @param pkts_n
7162e22920bSAdrien Mazarguil  *   Maximum number of packets in array.
7172e22920bSAdrien Mazarguil  *
7182e22920bSAdrien Mazarguil  * @return
7192e22920bSAdrien Mazarguil  *   Number of packets successfully received (<= pkts_n).
7202e22920bSAdrien Mazarguil  */
7212e22920bSAdrien Mazarguil uint16_t
7222e22920bSAdrien Mazarguil removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
7232e22920bSAdrien Mazarguil {
7242e22920bSAdrien Mazarguil 	(void)dpdk_rxq;
7252e22920bSAdrien Mazarguil 	(void)pkts;
7262e22920bSAdrien Mazarguil 	(void)pkts_n;
7272e22920bSAdrien Mazarguil 	return 0;
7282e22920bSAdrien Mazarguil }
729