xref: /dpdk/drivers/net/mlx5/mlx5_rxtx.c (revision 0e8679fcddc45902cd8aa1d0fbfa542fee11b074)
12e22920bSAdrien Mazarguil /*-
22e22920bSAdrien Mazarguil  *   BSD LICENSE
32e22920bSAdrien Mazarguil  *
42e22920bSAdrien Mazarguil  *   Copyright 2015 6WIND S.A.
52e22920bSAdrien Mazarguil  *   Copyright 2015 Mellanox.
62e22920bSAdrien Mazarguil  *
72e22920bSAdrien Mazarguil  *   Redistribution and use in source and binary forms, with or without
82e22920bSAdrien Mazarguil  *   modification, are permitted provided that the following conditions
92e22920bSAdrien Mazarguil  *   are met:
102e22920bSAdrien Mazarguil  *
112e22920bSAdrien Mazarguil  *     * Redistributions of source code must retain the above copyright
122e22920bSAdrien Mazarguil  *       notice, this list of conditions and the following disclaimer.
132e22920bSAdrien Mazarguil  *     * Redistributions in binary form must reproduce the above copyright
142e22920bSAdrien Mazarguil  *       notice, this list of conditions and the following disclaimer in
152e22920bSAdrien Mazarguil  *       the documentation and/or other materials provided with the
162e22920bSAdrien Mazarguil  *       distribution.
172e22920bSAdrien Mazarguil  *     * Neither the name of 6WIND S.A. nor the names of its
182e22920bSAdrien Mazarguil  *       contributors may be used to endorse or promote products derived
192e22920bSAdrien Mazarguil  *       from this software without specific prior written permission.
202e22920bSAdrien Mazarguil  *
212e22920bSAdrien Mazarguil  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
222e22920bSAdrien Mazarguil  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
232e22920bSAdrien Mazarguil  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
242e22920bSAdrien Mazarguil  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
252e22920bSAdrien Mazarguil  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
262e22920bSAdrien Mazarguil  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
272e22920bSAdrien Mazarguil  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
282e22920bSAdrien Mazarguil  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
292e22920bSAdrien Mazarguil  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
302e22920bSAdrien Mazarguil  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
312e22920bSAdrien Mazarguil  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
322e22920bSAdrien Mazarguil  */
332e22920bSAdrien Mazarguil 
342e22920bSAdrien Mazarguil #include <assert.h>
352e22920bSAdrien Mazarguil #include <stdint.h>
362e22920bSAdrien Mazarguil #include <string.h>
372e22920bSAdrien Mazarguil #include <stdlib.h>
382e22920bSAdrien Mazarguil 
392e22920bSAdrien Mazarguil /* Verbs header. */
402e22920bSAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
412e22920bSAdrien Mazarguil #ifdef PEDANTIC
422e22920bSAdrien Mazarguil #pragma GCC diagnostic ignored "-pedantic"
432e22920bSAdrien Mazarguil #endif
442e22920bSAdrien Mazarguil #include <infiniband/verbs.h>
456218063bSNélio Laranjeiro #include <infiniband/mlx5_hw.h>
466218063bSNélio Laranjeiro #include <infiniband/arch.h>
472e22920bSAdrien Mazarguil #ifdef PEDANTIC
482e22920bSAdrien Mazarguil #pragma GCC diagnostic error "-pedantic"
492e22920bSAdrien Mazarguil #endif
502e22920bSAdrien Mazarguil 
512e22920bSAdrien Mazarguil /* DPDK headers don't like -pedantic. */
522e22920bSAdrien Mazarguil #ifdef PEDANTIC
532e22920bSAdrien Mazarguil #pragma GCC diagnostic ignored "-pedantic"
542e22920bSAdrien Mazarguil #endif
552e22920bSAdrien Mazarguil #include <rte_mbuf.h>
562e22920bSAdrien Mazarguil #include <rte_mempool.h>
572e22920bSAdrien Mazarguil #include <rte_prefetch.h>
582e22920bSAdrien Mazarguil #include <rte_common.h>
592e22920bSAdrien Mazarguil #include <rte_branch_prediction.h>
606218063bSNélio Laranjeiro #include <rte_ether.h>
612e22920bSAdrien Mazarguil #ifdef PEDANTIC
622e22920bSAdrien Mazarguil #pragma GCC diagnostic error "-pedantic"
632e22920bSAdrien Mazarguil #endif
642e22920bSAdrien Mazarguil 
652e22920bSAdrien Mazarguil #include "mlx5.h"
662e22920bSAdrien Mazarguil #include "mlx5_utils.h"
672e22920bSAdrien Mazarguil #include "mlx5_rxtx.h"
68f3db9489SYaacov Hazan #include "mlx5_autoconf.h"
692e22920bSAdrien Mazarguil #include "mlx5_defs.h"
706218063bSNélio Laranjeiro #include "mlx5_prm.h"
716218063bSNélio Laranjeiro 
7299c12dccSNélio Laranjeiro #ifndef NDEBUG
7399c12dccSNélio Laranjeiro 
7499c12dccSNélio Laranjeiro /**
7599c12dccSNélio Laranjeiro  * Verify or set magic value in CQE.
7699c12dccSNélio Laranjeiro  *
7799c12dccSNélio Laranjeiro  * @param cqe
7899c12dccSNélio Laranjeiro  *   Pointer to CQE.
7999c12dccSNélio Laranjeiro  *
8099c12dccSNélio Laranjeiro  * @return
8199c12dccSNélio Laranjeiro  *   0 the first time.
8299c12dccSNélio Laranjeiro  */
8399c12dccSNélio Laranjeiro static inline int
8499c12dccSNélio Laranjeiro check_cqe64_seen(volatile struct mlx5_cqe64 *cqe)
8599c12dccSNélio Laranjeiro {
8699c12dccSNélio Laranjeiro 	static const uint8_t magic[] = "seen";
8799c12dccSNélio Laranjeiro 	volatile uint8_t (*buf)[sizeof(cqe->rsvd40)] = &cqe->rsvd40;
8899c12dccSNélio Laranjeiro 	int ret = 1;
8999c12dccSNélio Laranjeiro 	unsigned int i;
9099c12dccSNélio Laranjeiro 
9199c12dccSNélio Laranjeiro 	for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
9299c12dccSNélio Laranjeiro 		if (!ret || (*buf)[i] != magic[i]) {
9399c12dccSNélio Laranjeiro 			ret = 0;
9499c12dccSNélio Laranjeiro 			(*buf)[i] = magic[i];
9599c12dccSNélio Laranjeiro 		}
9699c12dccSNélio Laranjeiro 	return ret;
9799c12dccSNélio Laranjeiro }
9899c12dccSNélio Laranjeiro 
9999c12dccSNélio Laranjeiro #endif /* NDEBUG */
1006218063bSNélio Laranjeiro 
1016218063bSNélio Laranjeiro static inline int
10299c12dccSNélio Laranjeiro check_cqe64(volatile struct mlx5_cqe64 *cqe,
10399c12dccSNélio Laranjeiro 	    unsigned int cqes_n, const uint16_t ci)
10499c12dccSNélio Laranjeiro 	    __attribute__((always_inline));
1056218063bSNélio Laranjeiro 
10699c12dccSNélio Laranjeiro /**
10799c12dccSNélio Laranjeiro  * Check whether CQE is valid.
10899c12dccSNélio Laranjeiro  *
10999c12dccSNélio Laranjeiro  * @param cqe
11099c12dccSNélio Laranjeiro  *   Pointer to CQE.
11199c12dccSNélio Laranjeiro  * @param cqes_n
11299c12dccSNélio Laranjeiro  *   Size of completion queue.
11399c12dccSNélio Laranjeiro  * @param ci
11499c12dccSNélio Laranjeiro  *   Consumer index.
11599c12dccSNélio Laranjeiro  *
11699c12dccSNélio Laranjeiro  * @return
11799c12dccSNélio Laranjeiro  *   0 on success, 1 on failure.
11899c12dccSNélio Laranjeiro  */
11999c12dccSNélio Laranjeiro static inline int
12099c12dccSNélio Laranjeiro check_cqe64(volatile struct mlx5_cqe64 *cqe,
12199c12dccSNélio Laranjeiro 		unsigned int cqes_n, const uint16_t ci)
1226218063bSNélio Laranjeiro {
12399c12dccSNélio Laranjeiro 	uint16_t idx = ci & cqes_n;
12499c12dccSNélio Laranjeiro 	uint8_t op_own = cqe->op_own;
12599c12dccSNélio Laranjeiro 	uint8_t op_owner = MLX5_CQE_OWNER(op_own);
12699c12dccSNélio Laranjeiro 	uint8_t op_code = MLX5_CQE_OPCODE(op_own);
1276218063bSNélio Laranjeiro 
12899c12dccSNélio Laranjeiro 	if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
12999c12dccSNélio Laranjeiro 		return 1; /* No CQE. */
13099c12dccSNélio Laranjeiro #ifndef NDEBUG
13199c12dccSNélio Laranjeiro 	if ((op_code == MLX5_CQE_RESP_ERR) ||
13299c12dccSNélio Laranjeiro 	    (op_code == MLX5_CQE_REQ_ERR)) {
13399c12dccSNélio Laranjeiro 		volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
13499c12dccSNélio Laranjeiro 		uint8_t syndrome = err_cqe->syndrome;
13599c12dccSNélio Laranjeiro 
13699c12dccSNélio Laranjeiro 		if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
13799c12dccSNélio Laranjeiro 		    (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
13899c12dccSNélio Laranjeiro 			return 0;
13999c12dccSNélio Laranjeiro 		if (!check_cqe64_seen(cqe))
14099c12dccSNélio Laranjeiro 			ERROR("unexpected CQE error %u (0x%02x)"
14199c12dccSNélio Laranjeiro 			      " syndrome 0x%02x",
14299c12dccSNélio Laranjeiro 			      op_code, op_code, syndrome);
14399c12dccSNélio Laranjeiro 		return 1;
14499c12dccSNélio Laranjeiro 	} else if ((op_code != MLX5_CQE_RESP_SEND) &&
14599c12dccSNélio Laranjeiro 		   (op_code != MLX5_CQE_REQ)) {
14699c12dccSNélio Laranjeiro 		if (!check_cqe64_seen(cqe))
14799c12dccSNélio Laranjeiro 			ERROR("unexpected CQE opcode %u (0x%02x)",
14899c12dccSNélio Laranjeiro 			      op_code, op_code);
14999c12dccSNélio Laranjeiro 		return 1;
1506218063bSNélio Laranjeiro 	}
15199c12dccSNélio Laranjeiro #endif /* NDEBUG */
15299c12dccSNélio Laranjeiro 	return 0;
1536218063bSNélio Laranjeiro }
1542e22920bSAdrien Mazarguil 
155a6ca35aaSNélio Laranjeiro static inline void
156a6ca35aaSNélio Laranjeiro txq_complete(struct txq *txq) __attribute__((always_inline));
157a6ca35aaSNélio Laranjeiro 
1582e22920bSAdrien Mazarguil /**
1592e22920bSAdrien Mazarguil  * Manage TX completions.
1602e22920bSAdrien Mazarguil  *
1612e22920bSAdrien Mazarguil  * When sending a burst, mlx5_tx_burst() posts several WRs.
1622e22920bSAdrien Mazarguil  *
1632e22920bSAdrien Mazarguil  * @param txq
1642e22920bSAdrien Mazarguil  *   Pointer to TX queue structure.
1652e22920bSAdrien Mazarguil  */
166a6ca35aaSNélio Laranjeiro static inline void
1672e22920bSAdrien Mazarguil txq_complete(struct txq *txq)
1682e22920bSAdrien Mazarguil {
1692e22920bSAdrien Mazarguil 	const unsigned int elts_n = txq->elts_n;
1701d88ba17SNélio Laranjeiro 	const unsigned int cqe_n = txq->cqe_n;
17199c12dccSNélio Laranjeiro 	const unsigned int cqe_cnt = cqe_n - 1;
1721d88ba17SNélio Laranjeiro 	uint16_t elts_free = txq->elts_tail;
1731d88ba17SNélio Laranjeiro 	uint16_t elts_tail;
1741d88ba17SNélio Laranjeiro 	uint16_t cq_ci = txq->cq_ci;
175c305090bSAdrien Mazarguil 	volatile struct mlx5_cqe64 *cqe = NULL;
176c305090bSAdrien Mazarguil 	volatile union mlx5_wqe *wqe;
1772e22920bSAdrien Mazarguil 
17899c12dccSNélio Laranjeiro 	do {
179c305090bSAdrien Mazarguil 		volatile struct mlx5_cqe64 *tmp;
1801d88ba17SNélio Laranjeiro 
181c305090bSAdrien Mazarguil 		tmp = &(*txq->cqes)[cq_ci & cqe_cnt].cqe64;
182c305090bSAdrien Mazarguil 		if (check_cqe64(tmp, cqe_n, cq_ci))
1831d88ba17SNélio Laranjeiro 			break;
184c305090bSAdrien Mazarguil 		cqe = tmp;
18599c12dccSNélio Laranjeiro #ifndef NDEBUG
18699c12dccSNélio Laranjeiro 		if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) {
18799c12dccSNélio Laranjeiro 			if (!check_cqe64_seen(cqe))
18899c12dccSNélio Laranjeiro 				ERROR("unexpected compressed CQE, TX stopped");
18999c12dccSNélio Laranjeiro 			return;
1902e22920bSAdrien Mazarguil 		}
19199c12dccSNélio Laranjeiro 		if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
19299c12dccSNélio Laranjeiro 		    (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
19399c12dccSNélio Laranjeiro 			if (!check_cqe64_seen(cqe))
19499c12dccSNélio Laranjeiro 				ERROR("unexpected error CQE, TX stopped");
19599c12dccSNélio Laranjeiro 			return;
19699c12dccSNélio Laranjeiro 		}
19799c12dccSNélio Laranjeiro #endif /* NDEBUG */
19899c12dccSNélio Laranjeiro 		++cq_ci;
19999c12dccSNélio Laranjeiro 	} while (1);
200c305090bSAdrien Mazarguil 	if (unlikely(cqe == NULL))
2011d88ba17SNélio Laranjeiro 		return;
202c305090bSAdrien Mazarguil 	wqe = &(*txq->wqes)[htons(cqe->wqe_counter) & (txq->wqe_n - 1)];
203c305090bSAdrien Mazarguil 	elts_tail = wqe->wqe.ctrl.data[3];
204c305090bSAdrien Mazarguil 	assert(elts_tail < txq->wqe_n);
2051d88ba17SNélio Laranjeiro 	/* Free buffers. */
206c305090bSAdrien Mazarguil 	while (elts_free != elts_tail) {
2071d88ba17SNélio Laranjeiro 		struct rte_mbuf *elt = (*txq->elts)[elts_free];
208a859e8a9SNelio Laranjeiro 		unsigned int elts_free_next =
2091d88ba17SNélio Laranjeiro 			(elts_free + 1) & (elts_n - 1);
2101d88ba17SNélio Laranjeiro 		struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next];
211a859e8a9SNelio Laranjeiro 
212b185e63fSAdrien Mazarguil #ifndef NDEBUG
213b185e63fSAdrien Mazarguil 		/* Poisoning. */
2141d88ba17SNélio Laranjeiro 		memset(&(*txq->elts)[elts_free],
2151d88ba17SNélio Laranjeiro 		       0x66,
2161d88ba17SNélio Laranjeiro 		       sizeof((*txq->elts)[elts_free]));
217b185e63fSAdrien Mazarguil #endif
2181d88ba17SNélio Laranjeiro 		RTE_MBUF_PREFETCH_TO_FREE(elt_next);
2191d88ba17SNélio Laranjeiro 		/* Only one segment needs to be freed. */
2201d88ba17SNélio Laranjeiro 		rte_pktmbuf_free_seg(elt);
221a859e8a9SNelio Laranjeiro 		elts_free = elts_free_next;
222c305090bSAdrien Mazarguil 	}
2231d88ba17SNélio Laranjeiro 	txq->cq_ci = cq_ci;
2242e22920bSAdrien Mazarguil 	txq->elts_tail = elts_tail;
2251d88ba17SNélio Laranjeiro 	/* Update the consumer index. */
2261d88ba17SNélio Laranjeiro 	rte_wmb();
2271d88ba17SNélio Laranjeiro 	*txq->cq_db = htonl(cq_ci);
2282e22920bSAdrien Mazarguil }
2292e22920bSAdrien Mazarguil 
2302e22920bSAdrien Mazarguil /**
2318340392eSAdrien Mazarguil  * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
2328340392eSAdrien Mazarguil  * the cloned mbuf is allocated is returned instead.
2338340392eSAdrien Mazarguil  *
2348340392eSAdrien Mazarguil  * @param buf
2358340392eSAdrien Mazarguil  *   Pointer to mbuf.
2368340392eSAdrien Mazarguil  *
2378340392eSAdrien Mazarguil  * @return
2388340392eSAdrien Mazarguil  *   Memory pool where data is located for given mbuf.
2398340392eSAdrien Mazarguil  */
2408340392eSAdrien Mazarguil static struct rte_mempool *
2418340392eSAdrien Mazarguil txq_mb2mp(struct rte_mbuf *buf)
2428340392eSAdrien Mazarguil {
2438340392eSAdrien Mazarguil 	if (unlikely(RTE_MBUF_INDIRECT(buf)))
2448340392eSAdrien Mazarguil 		return rte_mbuf_from_indirect(buf)->pool;
2458340392eSAdrien Mazarguil 	return buf->pool;
2468340392eSAdrien Mazarguil }
2478340392eSAdrien Mazarguil 
248491770faSNélio Laranjeiro static inline uint32_t
249491770faSNélio Laranjeiro txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
250491770faSNélio Laranjeiro 	__attribute__((always_inline));
251491770faSNélio Laranjeiro 
2528340392eSAdrien Mazarguil /**
2532e22920bSAdrien Mazarguil  * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
2542e22920bSAdrien Mazarguil  * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
2552e22920bSAdrien Mazarguil  * remove an entry first.
2562e22920bSAdrien Mazarguil  *
2572e22920bSAdrien Mazarguil  * @param txq
2582e22920bSAdrien Mazarguil  *   Pointer to TX queue structure.
2592e22920bSAdrien Mazarguil  * @param[in] mp
2602e22920bSAdrien Mazarguil  *   Memory Pool for which a Memory Region lkey must be returned.
2612e22920bSAdrien Mazarguil  *
2622e22920bSAdrien Mazarguil  * @return
2632e22920bSAdrien Mazarguil  *   mr->lkey on success, (uint32_t)-1 on failure.
2642e22920bSAdrien Mazarguil  */
265491770faSNélio Laranjeiro static inline uint32_t
266d1d914ebSOlivier Matz txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
2672e22920bSAdrien Mazarguil {
2682e22920bSAdrien Mazarguil 	unsigned int i;
269491770faSNélio Laranjeiro 	uint32_t lkey = (uint32_t)-1;
2702e22920bSAdrien Mazarguil 
2712e22920bSAdrien Mazarguil 	for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
2722e22920bSAdrien Mazarguil 		if (unlikely(txq->mp2mr[i].mp == NULL)) {
2732e22920bSAdrien Mazarguil 			/* Unknown MP, add a new MR for it. */
2742e22920bSAdrien Mazarguil 			break;
2752e22920bSAdrien Mazarguil 		}
2762e22920bSAdrien Mazarguil 		if (txq->mp2mr[i].mp == mp) {
2772e22920bSAdrien Mazarguil 			assert(txq->mp2mr[i].lkey != (uint32_t)-1);
2781d88ba17SNélio Laranjeiro 			assert(htonl(txq->mp2mr[i].mr->lkey) ==
2791d88ba17SNélio Laranjeiro 			       txq->mp2mr[i].lkey);
280491770faSNélio Laranjeiro 			lkey = txq->mp2mr[i].lkey;
281491770faSNélio Laranjeiro 			break;
2822e22920bSAdrien Mazarguil 		}
2832e22920bSAdrien Mazarguil 	}
284491770faSNélio Laranjeiro 	if (unlikely(lkey == (uint32_t)-1))
285491770faSNélio Laranjeiro 		lkey = txq_mp2mr_reg(txq, mp, i);
286491770faSNélio Laranjeiro 	return lkey;
2870a3b350dSOlga Shern }
2880a3b350dSOlga Shern 
289e192ef80SYaacov Hazan /**
2901d88ba17SNélio Laranjeiro  * Write a regular WQE.
291e192ef80SYaacov Hazan  *
2921d88ba17SNélio Laranjeiro  * @param txq
2931d88ba17SNélio Laranjeiro  *   Pointer to TX queue structure.
2941d88ba17SNélio Laranjeiro  * @param wqe
2951d88ba17SNélio Laranjeiro  *   Pointer to the WQE to fill.
296d772d440SNélio Laranjeiro  * @param buf
297d772d440SNélio Laranjeiro  *   Buffer.
2981d88ba17SNélio Laranjeiro  * @param length
2991d88ba17SNélio Laranjeiro  *   Packet length.
300*0e8679fcSNélio Laranjeiro  *
301*0e8679fcSNélio Laranjeiro  * @return ds
302*0e8679fcSNélio Laranjeiro  *   Number of DS elements consumed.
303e192ef80SYaacov Hazan  */
304*0e8679fcSNélio Laranjeiro static inline unsigned int
3051d88ba17SNélio Laranjeiro mlx5_wqe_write(struct txq *txq, volatile union mlx5_wqe *wqe,
306*0e8679fcSNélio Laranjeiro 	       struct rte_mbuf *buf, uint32_t length)
307e192ef80SYaacov Hazan {
308*0e8679fcSNélio Laranjeiro 	uintptr_t raw = (uintptr_t)&wqe->wqe.eseg.inline_hdr_start;
309*0e8679fcSNélio Laranjeiro 	uint16_t ds;
310*0e8679fcSNélio Laranjeiro 	uint16_t pkt_inline_sz = 16;
311d772d440SNélio Laranjeiro 	uintptr_t addr = rte_pktmbuf_mtod(buf, uintptr_t);
312*0e8679fcSNélio Laranjeiro 	struct mlx5_wqe_data_seg *dseg = NULL;
313e192ef80SYaacov Hazan 
314*0e8679fcSNélio Laranjeiro 	assert(length >= 16);
315*0e8679fcSNélio Laranjeiro 	/* Start the know and common part of the WQE structure. */
316*0e8679fcSNélio Laranjeiro 	wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
317*0e8679fcSNélio Laranjeiro 	wqe->wqe.ctrl.data[2] = 0;
318*0e8679fcSNélio Laranjeiro 	wqe->wqe.ctrl.data[3] = 0;
319*0e8679fcSNélio Laranjeiro 	wqe->wqe.eseg.rsvd0 = 0;
320*0e8679fcSNélio Laranjeiro 	wqe->wqe.eseg.rsvd1 = 0;
321*0e8679fcSNélio Laranjeiro 	wqe->wqe.eseg.mss = 0;
322*0e8679fcSNélio Laranjeiro 	wqe->wqe.eseg.rsvd2 = 0;
323*0e8679fcSNélio Laranjeiro 	/* Start by copying the Ethernet Header. */
324*0e8679fcSNélio Laranjeiro 	rte_mov16((uint8_t *)raw, (uint8_t *)addr);
325d772d440SNélio Laranjeiro 	length -= 16;
326*0e8679fcSNélio Laranjeiro 	addr += 16;
327*0e8679fcSNélio Laranjeiro 	/* Replace the Ethernet type by the VLAN if necessary. */
328d772d440SNélio Laranjeiro 	if (buf->ol_flags & PKT_TX_VLAN_PKT) {
329d772d440SNélio Laranjeiro 		uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
330e192ef80SYaacov Hazan 
331*0e8679fcSNélio Laranjeiro 		memcpy((uint8_t *)(raw + 16 - sizeof(vlan)),
3321d88ba17SNélio Laranjeiro 		       &vlan, sizeof(vlan));
333d772d440SNélio Laranjeiro 		addr -= sizeof(vlan);
334d772d440SNélio Laranjeiro 		length += sizeof(vlan);
335d772d440SNélio Laranjeiro 	}
336*0e8679fcSNélio Laranjeiro 	/* Inline if enough room. */
337*0e8679fcSNélio Laranjeiro 	if (txq->max_inline != 0) {
338*0e8679fcSNélio Laranjeiro 		uintptr_t end = (uintptr_t)&(*txq->wqes)[txq->wqe_n];
339*0e8679fcSNélio Laranjeiro 		uint16_t max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
340*0e8679fcSNélio Laranjeiro 		uint16_t room;
341*0e8679fcSNélio Laranjeiro 
342*0e8679fcSNélio Laranjeiro 		raw += 16;
343*0e8679fcSNélio Laranjeiro 		room = end - (uintptr_t)raw;
344*0e8679fcSNélio Laranjeiro 		if (room > max_inline) {
345*0e8679fcSNélio Laranjeiro 			uintptr_t addr_end = (addr + max_inline) &
346*0e8679fcSNélio Laranjeiro 				~(RTE_CACHE_LINE_SIZE - 1);
347*0e8679fcSNélio Laranjeiro 			uint16_t copy_b = ((addr_end - addr) > length) ?
348*0e8679fcSNélio Laranjeiro 					  length :
349*0e8679fcSNélio Laranjeiro 					  (addr_end - addr);
350*0e8679fcSNélio Laranjeiro 
351*0e8679fcSNélio Laranjeiro 			rte_memcpy((void *)raw, (void *)addr, copy_b);
352*0e8679fcSNélio Laranjeiro 			addr += copy_b;
353*0e8679fcSNélio Laranjeiro 			length -= copy_b;
354*0e8679fcSNélio Laranjeiro 			pkt_inline_sz += copy_b;
355*0e8679fcSNélio Laranjeiro 			/* Sanity check. */
356*0e8679fcSNélio Laranjeiro 			assert(addr <= addr_end);
3571d88ba17SNélio Laranjeiro 		}
358*0e8679fcSNélio Laranjeiro 		/* Store the inlined packet size in the WQE. */
359*0e8679fcSNélio Laranjeiro 		wqe->wqe.eseg.inline_hdr_sz = htons(pkt_inline_sz);
3602a66cf37SYaacov Hazan 		/*
361*0e8679fcSNélio Laranjeiro 		 * 2 DWORDs consumed by the WQE header + 1 DSEG +
362*0e8679fcSNélio Laranjeiro 		 * the size of the inline part of the packet.
3632a66cf37SYaacov Hazan 		 */
364*0e8679fcSNélio Laranjeiro 		ds = 2 + ((pkt_inline_sz - 2 + 15) / 16);
365*0e8679fcSNélio Laranjeiro 		if (length > 0) {
366*0e8679fcSNélio Laranjeiro 			dseg = (struct mlx5_wqe_data_seg *)
367*0e8679fcSNélio Laranjeiro 				((uintptr_t)wqe + (ds * 16));
368*0e8679fcSNélio Laranjeiro 			if ((uintptr_t)dseg >= end)
369*0e8679fcSNélio Laranjeiro 				dseg = (struct mlx5_wqe_data_seg *)
370*0e8679fcSNélio Laranjeiro 					((uintptr_t)&(*txq->wqes)[0]);
371*0e8679fcSNélio Laranjeiro 			goto use_dseg;
3722a66cf37SYaacov Hazan 		}
373*0e8679fcSNélio Laranjeiro 	} else {
374*0e8679fcSNélio Laranjeiro 		/* Add the remaining packet as a simple ds. */
375*0e8679fcSNélio Laranjeiro 		ds = 3;
376*0e8679fcSNélio Laranjeiro 		/*
377*0e8679fcSNélio Laranjeiro 		 * No inline has been done in the packet, only the Ethernet
378*0e8679fcSNélio Laranjeiro 		 * Header as been stored.
379*0e8679fcSNélio Laranjeiro 		 */
380*0e8679fcSNélio Laranjeiro 		wqe->wqe.eseg.inline_hdr_sz = htons(16);
381*0e8679fcSNélio Laranjeiro 		dseg = (struct mlx5_wqe_data_seg *)
382*0e8679fcSNélio Laranjeiro 			((uintptr_t)wqe + (ds * 16));
383*0e8679fcSNélio Laranjeiro use_dseg:
384*0e8679fcSNélio Laranjeiro 		*dseg = (struct mlx5_wqe_data_seg) {
385*0e8679fcSNélio Laranjeiro 			.addr = htonll(addr),
386*0e8679fcSNélio Laranjeiro 			.byte_count = htonl(length),
387*0e8679fcSNélio Laranjeiro 			.lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
388*0e8679fcSNélio Laranjeiro 		};
389*0e8679fcSNélio Laranjeiro 		++ds;
390*0e8679fcSNélio Laranjeiro 	}
391*0e8679fcSNélio Laranjeiro 	wqe->wqe.ctrl.data[1] = htonl(txq->qp_num_8s | ds);
392*0e8679fcSNélio Laranjeiro 	return ds;
3932a66cf37SYaacov Hazan }
3942a66cf37SYaacov Hazan 
3952a66cf37SYaacov Hazan /**
3961d88ba17SNélio Laranjeiro  * Ring TX queue doorbell.
3971d88ba17SNélio Laranjeiro  *
3981d88ba17SNélio Laranjeiro  * @param txq
3991d88ba17SNélio Laranjeiro  *   Pointer to TX queue structure.
4001d88ba17SNélio Laranjeiro  */
4011d88ba17SNélio Laranjeiro static inline void
4021d88ba17SNélio Laranjeiro mlx5_tx_dbrec(struct txq *txq)
4031d88ba17SNélio Laranjeiro {
4041d88ba17SNélio Laranjeiro 	uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset);
4051d88ba17SNélio Laranjeiro 	uint32_t data[4] = {
4061d88ba17SNélio Laranjeiro 		htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
4071d88ba17SNélio Laranjeiro 		htonl(txq->qp_num_8s),
4081d88ba17SNélio Laranjeiro 		0,
4091d88ba17SNélio Laranjeiro 		0,
4101d88ba17SNélio Laranjeiro 	};
4111d88ba17SNélio Laranjeiro 	rte_wmb();
4121d88ba17SNélio Laranjeiro 	*txq->qp_db = htonl(txq->wqe_ci);
4131d88ba17SNélio Laranjeiro 	/* Ensure ordering between DB record and BF copy. */
4141d88ba17SNélio Laranjeiro 	rte_wmb();
4151d88ba17SNélio Laranjeiro 	rte_mov16(dst, (uint8_t *)data);
4161d88ba17SNélio Laranjeiro 	txq->bf_offset ^= txq->bf_buf_size;
4171d88ba17SNélio Laranjeiro }
418e192ef80SYaacov Hazan 
4191d88ba17SNélio Laranjeiro /**
4201d88ba17SNélio Laranjeiro  * Prefetch a CQE.
4211d88ba17SNélio Laranjeiro  *
4221d88ba17SNélio Laranjeiro  * @param txq
4231d88ba17SNélio Laranjeiro  *   Pointer to TX queue structure.
4241d88ba17SNélio Laranjeiro  * @param cqe_ci
4251d88ba17SNélio Laranjeiro  *   CQE consumer index.
4261d88ba17SNélio Laranjeiro  */
4271d88ba17SNélio Laranjeiro static inline void
4281d88ba17SNélio Laranjeiro tx_prefetch_cqe(struct txq *txq, uint16_t ci)
4291d88ba17SNélio Laranjeiro {
4301d88ba17SNélio Laranjeiro 	volatile struct mlx5_cqe64 *cqe;
4311d88ba17SNélio Laranjeiro 
4321d88ba17SNélio Laranjeiro 	cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64;
4331d88ba17SNélio Laranjeiro 	rte_prefetch0(cqe);
434e192ef80SYaacov Hazan }
435e192ef80SYaacov Hazan 
4362e22920bSAdrien Mazarguil /**
4372a66cf37SYaacov Hazan  * Prefetch a WQE.
4382a66cf37SYaacov Hazan  *
4392a66cf37SYaacov Hazan  * @param txq
4402a66cf37SYaacov Hazan  *   Pointer to TX queue structure.
4412a66cf37SYaacov Hazan  * @param  wqe_ci
4422a66cf37SYaacov Hazan  *   WQE consumer index.
4432a66cf37SYaacov Hazan  */
4442a66cf37SYaacov Hazan static inline void
4452a66cf37SYaacov Hazan tx_prefetch_wqe(struct txq *txq, uint16_t ci)
4462a66cf37SYaacov Hazan {
4472a66cf37SYaacov Hazan 	volatile union mlx5_wqe *wqe;
4482a66cf37SYaacov Hazan 
4492a66cf37SYaacov Hazan 	wqe = &(*txq->wqes)[ci & (txq->wqe_n - 1)];
4502a66cf37SYaacov Hazan 	rte_prefetch0(wqe);
4512a66cf37SYaacov Hazan }
4522a66cf37SYaacov Hazan 
4532a66cf37SYaacov Hazan /**
4542e22920bSAdrien Mazarguil  * DPDK callback for TX.
4552e22920bSAdrien Mazarguil  *
4562e22920bSAdrien Mazarguil  * @param dpdk_txq
4572e22920bSAdrien Mazarguil  *   Generic pointer to TX queue structure.
4582e22920bSAdrien Mazarguil  * @param[in] pkts
4592e22920bSAdrien Mazarguil  *   Packets to transmit.
4602e22920bSAdrien Mazarguil  * @param pkts_n
4612e22920bSAdrien Mazarguil  *   Number of packets in array.
4622e22920bSAdrien Mazarguil  *
4632e22920bSAdrien Mazarguil  * @return
4642e22920bSAdrien Mazarguil  *   Number of packets successfully transmitted (<= pkts_n).
4652e22920bSAdrien Mazarguil  */
4662e22920bSAdrien Mazarguil uint16_t
4672e22920bSAdrien Mazarguil mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
4682e22920bSAdrien Mazarguil {
4692e22920bSAdrien Mazarguil 	struct txq *txq = (struct txq *)dpdk_txq;
4701d88ba17SNélio Laranjeiro 	uint16_t elts_head = txq->elts_head;
4712e22920bSAdrien Mazarguil 	const unsigned int elts_n = txq->elts_n;
472c3d62cc9SAdrien Mazarguil 	unsigned int i = 0;
473a5bf6af9SAdrien Mazarguil 	unsigned int j = 0;
4742e22920bSAdrien Mazarguil 	unsigned int max;
475c305090bSAdrien Mazarguil 	unsigned int comp;
476a5bf6af9SAdrien Mazarguil 	volatile union mlx5_wqe *wqe = NULL;
4772e22920bSAdrien Mazarguil 
4781d88ba17SNélio Laranjeiro 	if (unlikely(!pkts_n))
4791d88ba17SNélio Laranjeiro 		return 0;
4805e1d11ecSNelio Laranjeiro 	/* Prefetch first packet cacheline. */
4811d88ba17SNélio Laranjeiro 	tx_prefetch_cqe(txq, txq->cq_ci);
4821d88ba17SNélio Laranjeiro 	tx_prefetch_cqe(txq, txq->cq_ci + 1);
483c3d62cc9SAdrien Mazarguil 	rte_prefetch0(*pkts);
4841d88ba17SNélio Laranjeiro 	/* Start processing. */
4852e22920bSAdrien Mazarguil 	txq_complete(txq);
4864f52bbfbSNelio Laranjeiro 	max = (elts_n - (elts_head - txq->elts_tail));
4872e22920bSAdrien Mazarguil 	if (max > elts_n)
4882e22920bSAdrien Mazarguil 		max -= elts_n;
489c3d62cc9SAdrien Mazarguil 	do {
490a5bf6af9SAdrien Mazarguil 		struct rte_mbuf *buf = *(pkts++);
491c3d62cc9SAdrien Mazarguil 		unsigned int elts_head_next;
492573f54afSNélio Laranjeiro 		uint32_t length;
493a5bf6af9SAdrien Mazarguil 		unsigned int segs_n = buf->nb_segs;
494a5bf6af9SAdrien Mazarguil 		volatile struct mlx5_wqe_data_seg *dseg;
495a5bf6af9SAdrien Mazarguil 		unsigned int ds = sizeof(*wqe) / 16;
4962e22920bSAdrien Mazarguil 
497c3d62cc9SAdrien Mazarguil 		/*
498c3d62cc9SAdrien Mazarguil 		 * Make sure there is enough room to store this packet and
499c3d62cc9SAdrien Mazarguil 		 * that one ring entry remains unused.
500c3d62cc9SAdrien Mazarguil 		 */
501a5bf6af9SAdrien Mazarguil 		assert(segs_n);
502a5bf6af9SAdrien Mazarguil 		if (max < segs_n + 1)
503c3d62cc9SAdrien Mazarguil 			break;
504a5bf6af9SAdrien Mazarguil 		max -= segs_n;
505c3d62cc9SAdrien Mazarguil 		--pkts_n;
506c3d62cc9SAdrien Mazarguil 		elts_head_next = (elts_head + 1) & (elts_n - 1);
5071d88ba17SNélio Laranjeiro 		wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
508*0e8679fcSNélio Laranjeiro 		tx_prefetch_wqe(txq, txq->wqe_ci);
509*0e8679fcSNélio Laranjeiro 		tx_prefetch_wqe(txq, txq->wqe_ci + 1);
510c3d62cc9SAdrien Mazarguil 		if (pkts_n)
511c3d62cc9SAdrien Mazarguil 			rte_prefetch0(*pkts);
5122e22920bSAdrien Mazarguil 		length = DATA_LEN(buf);
5132e22920bSAdrien Mazarguil 		/* Update element. */
5141d88ba17SNélio Laranjeiro 		(*txq->elts)[elts_head] = buf;
5155e1d11ecSNelio Laranjeiro 		/* Prefetch next buffer data. */
516c3d62cc9SAdrien Mazarguil 		if (pkts_n)
517c3d62cc9SAdrien Mazarguil 			rte_prefetch0(rte_pktmbuf_mtod(*pkts,
5181d88ba17SNélio Laranjeiro 						       volatile void *));
5191d88ba17SNélio Laranjeiro 		/* Should we enable HW CKSUM offload */
5201d88ba17SNélio Laranjeiro 		if (buf->ol_flags &
5211d88ba17SNélio Laranjeiro 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
5221d88ba17SNélio Laranjeiro 			wqe->wqe.eseg.cs_flags =
5231d88ba17SNélio Laranjeiro 				MLX5_ETH_WQE_L3_CSUM |
5241d88ba17SNélio Laranjeiro 				MLX5_ETH_WQE_L4_CSUM;
5251d88ba17SNélio Laranjeiro 		} else {
5261d88ba17SNélio Laranjeiro 			wqe->wqe.eseg.cs_flags = 0;
5271d88ba17SNélio Laranjeiro 		}
528*0e8679fcSNélio Laranjeiro 		ds = mlx5_wqe_write(txq, wqe, buf, length);
529*0e8679fcSNélio Laranjeiro 		if (segs_n == 1)
530*0e8679fcSNélio Laranjeiro 			goto skip_segs;
531*0e8679fcSNélio Laranjeiro 		dseg = (volatile struct mlx5_wqe_data_seg *)
532*0e8679fcSNélio Laranjeiro 			(((uintptr_t)wqe) + ds * 16);
533a5bf6af9SAdrien Mazarguil 		while (--segs_n) {
534a5bf6af9SAdrien Mazarguil 			/*
535a5bf6af9SAdrien Mazarguil 			 * Spill on next WQE when the current one does not have
536a5bf6af9SAdrien Mazarguil 			 * enough room left. Size of WQE must a be a multiple
537a5bf6af9SAdrien Mazarguil 			 * of data segment size.
538a5bf6af9SAdrien Mazarguil 			 */
539a5bf6af9SAdrien Mazarguil 			assert(!(sizeof(*wqe) % sizeof(*dseg)));
540a5bf6af9SAdrien Mazarguil 			if (!(ds % (sizeof(*wqe) / 16)))
541a5bf6af9SAdrien Mazarguil 				dseg = (volatile void *)
542a5bf6af9SAdrien Mazarguil 					&(*txq->wqes)[txq->wqe_ci++ &
543a5bf6af9SAdrien Mazarguil 						      (txq->wqe_n - 1)];
544a5bf6af9SAdrien Mazarguil 			else
545a5bf6af9SAdrien Mazarguil 				++dseg;
546a5bf6af9SAdrien Mazarguil 			++ds;
547a5bf6af9SAdrien Mazarguil 			buf = buf->next;
548a5bf6af9SAdrien Mazarguil 			assert(buf);
549a5bf6af9SAdrien Mazarguil 			/* Store segment information. */
550a5bf6af9SAdrien Mazarguil 			dseg->byte_count = htonl(DATA_LEN(buf));
551a5bf6af9SAdrien Mazarguil 			dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf));
552a5bf6af9SAdrien Mazarguil 			dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
553a5bf6af9SAdrien Mazarguil 			(*txq->elts)[elts_head_next] = buf;
554a5bf6af9SAdrien Mazarguil 			elts_head_next = (elts_head_next + 1) & (elts_n - 1);
555a5bf6af9SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
556a5bf6af9SAdrien Mazarguil 			length += DATA_LEN(buf);
557a5bf6af9SAdrien Mazarguil #endif
558a5bf6af9SAdrien Mazarguil 			++j;
559a5bf6af9SAdrien Mazarguil 		}
560a5bf6af9SAdrien Mazarguil 		/* Update DS field in WQE. */
561a5bf6af9SAdrien Mazarguil 		wqe->wqe.ctrl.data[1] &= htonl(0xffffffc0);
562a5bf6af9SAdrien Mazarguil 		wqe->wqe.ctrl.data[1] |= htonl(ds & 0x3f);
563*0e8679fcSNélio Laranjeiro skip_segs:
56487011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
565573f54afSNélio Laranjeiro 		/* Increment sent bytes counter. */
566573f54afSNélio Laranjeiro 		txq->stats.obytes += length;
56787011737SAdrien Mazarguil #endif
568*0e8679fcSNélio Laranjeiro 		/* Increment consumer index. */
569*0e8679fcSNélio Laranjeiro 		txq->wqe_ci += (ds + 3) / 4;
5702e22920bSAdrien Mazarguil 		elts_head = elts_head_next;
571c3d62cc9SAdrien Mazarguil 		++i;
572c3d62cc9SAdrien Mazarguil 	} while (pkts_n);
5732e22920bSAdrien Mazarguil 	/* Take a shortcut if nothing must be sent. */
5742e22920bSAdrien Mazarguil 	if (unlikely(i == 0))
5752e22920bSAdrien Mazarguil 		return 0;
576c305090bSAdrien Mazarguil 	/* Check whether completion threshold has been reached. */
577a5bf6af9SAdrien Mazarguil 	comp = txq->elts_comp + i + j;
578c305090bSAdrien Mazarguil 	if (comp >= MLX5_TX_COMP_THRESH) {
579c305090bSAdrien Mazarguil 		/* Request completion on last WQE. */
580c305090bSAdrien Mazarguil 		wqe->wqe.ctrl.data[2] = htonl(8);
581c305090bSAdrien Mazarguil 		/* Save elts_head in unused "immediate" field of WQE. */
582c305090bSAdrien Mazarguil 		wqe->wqe.ctrl.data[3] = elts_head;
583c305090bSAdrien Mazarguil 		txq->elts_comp = 0;
584c305090bSAdrien Mazarguil 	} else {
585c305090bSAdrien Mazarguil 		txq->elts_comp = comp;
586c305090bSAdrien Mazarguil 	}
58787011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
58887011737SAdrien Mazarguil 	/* Increment sent packets counter. */
58987011737SAdrien Mazarguil 	txq->stats.opackets += i;
59087011737SAdrien Mazarguil #endif
5912e22920bSAdrien Mazarguil 	/* Ring QP doorbell. */
5921d88ba17SNélio Laranjeiro 	mlx5_tx_dbrec(txq);
5932e22920bSAdrien Mazarguil 	txq->elts_head = elts_head;
5942e22920bSAdrien Mazarguil 	return i;
5952e22920bSAdrien Mazarguil }
5962e22920bSAdrien Mazarguil 
5972e22920bSAdrien Mazarguil /**
598230189d9SNélio Laranjeiro  * Open a MPW session.
599230189d9SNélio Laranjeiro  *
600230189d9SNélio Laranjeiro  * @param txq
601230189d9SNélio Laranjeiro  *   Pointer to TX queue structure.
602230189d9SNélio Laranjeiro  * @param mpw
603230189d9SNélio Laranjeiro  *   Pointer to MPW session structure.
604230189d9SNélio Laranjeiro  * @param length
605230189d9SNélio Laranjeiro  *   Packet length.
606230189d9SNélio Laranjeiro  */
607230189d9SNélio Laranjeiro static inline void
608230189d9SNélio Laranjeiro mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
609230189d9SNélio Laranjeiro {
610230189d9SNélio Laranjeiro 	uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1);
611230189d9SNélio Laranjeiro 	volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
612230189d9SNélio Laranjeiro 		(volatile struct mlx5_wqe_data_seg (*)[])
613230189d9SNélio Laranjeiro 		(uintptr_t)&(*txq->wqes)[(idx + 1) & (txq->wqe_n - 1)];
614230189d9SNélio Laranjeiro 
615230189d9SNélio Laranjeiro 	mpw->state = MLX5_MPW_STATE_OPENED;
616230189d9SNélio Laranjeiro 	mpw->pkts_n = 0;
617230189d9SNélio Laranjeiro 	mpw->len = length;
618230189d9SNélio Laranjeiro 	mpw->total_len = 0;
619230189d9SNélio Laranjeiro 	mpw->wqe = &(*txq->wqes)[idx];
620230189d9SNélio Laranjeiro 	mpw->wqe->mpw.eseg.mss = htons(length);
621230189d9SNélio Laranjeiro 	mpw->wqe->mpw.eseg.inline_hdr_sz = 0;
622230189d9SNélio Laranjeiro 	mpw->wqe->mpw.eseg.rsvd0 = 0;
623230189d9SNélio Laranjeiro 	mpw->wqe->mpw.eseg.rsvd1 = 0;
624230189d9SNélio Laranjeiro 	mpw->wqe->mpw.eseg.rsvd2 = 0;
625230189d9SNélio Laranjeiro 	mpw->wqe->mpw.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
626230189d9SNélio Laranjeiro 					   (txq->wqe_ci << 8) |
627230189d9SNélio Laranjeiro 					   MLX5_OPCODE_LSO_MPW);
628230189d9SNélio Laranjeiro 	mpw->wqe->mpw.ctrl.data[2] = 0;
629230189d9SNélio Laranjeiro 	mpw->wqe->mpw.ctrl.data[3] = 0;
630230189d9SNélio Laranjeiro 	mpw->data.dseg[0] = &mpw->wqe->mpw.dseg[0];
631230189d9SNélio Laranjeiro 	mpw->data.dseg[1] = &mpw->wqe->mpw.dseg[1];
632230189d9SNélio Laranjeiro 	mpw->data.dseg[2] = &(*dseg)[0];
633230189d9SNélio Laranjeiro 	mpw->data.dseg[3] = &(*dseg)[1];
634230189d9SNélio Laranjeiro 	mpw->data.dseg[4] = &(*dseg)[2];
635230189d9SNélio Laranjeiro }
636230189d9SNélio Laranjeiro 
637230189d9SNélio Laranjeiro /**
638230189d9SNélio Laranjeiro  * Close a MPW session.
639230189d9SNélio Laranjeiro  *
640230189d9SNélio Laranjeiro  * @param txq
641230189d9SNélio Laranjeiro  *   Pointer to TX queue structure.
642230189d9SNélio Laranjeiro  * @param mpw
643230189d9SNélio Laranjeiro  *   Pointer to MPW session structure.
644230189d9SNélio Laranjeiro  */
645230189d9SNélio Laranjeiro static inline void
646230189d9SNélio Laranjeiro mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
647230189d9SNélio Laranjeiro {
648230189d9SNélio Laranjeiro 	unsigned int num = mpw->pkts_n;
649230189d9SNélio Laranjeiro 
650230189d9SNélio Laranjeiro 	/*
651230189d9SNélio Laranjeiro 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
652230189d9SNélio Laranjeiro 	 * count as 2.
653230189d9SNélio Laranjeiro 	 */
654230189d9SNélio Laranjeiro 	mpw->wqe->mpw.ctrl.data[1] = htonl(txq->qp_num_8s | (2 + num));
655230189d9SNélio Laranjeiro 	mpw->state = MLX5_MPW_STATE_CLOSED;
656230189d9SNélio Laranjeiro 	if (num < 3)
657230189d9SNélio Laranjeiro 		++txq->wqe_ci;
658230189d9SNélio Laranjeiro 	else
659230189d9SNélio Laranjeiro 		txq->wqe_ci += 2;
660230189d9SNélio Laranjeiro 	tx_prefetch_wqe(txq, txq->wqe_ci);
661230189d9SNélio Laranjeiro 	tx_prefetch_wqe(txq, txq->wqe_ci + 1);
662230189d9SNélio Laranjeiro }
663230189d9SNélio Laranjeiro 
664230189d9SNélio Laranjeiro /**
665230189d9SNélio Laranjeiro  * DPDK callback for TX with MPW support.
666230189d9SNélio Laranjeiro  *
667230189d9SNélio Laranjeiro  * @param dpdk_txq
668230189d9SNélio Laranjeiro  *   Generic pointer to TX queue structure.
669230189d9SNélio Laranjeiro  * @param[in] pkts
670230189d9SNélio Laranjeiro  *   Packets to transmit.
671230189d9SNélio Laranjeiro  * @param pkts_n
672230189d9SNélio Laranjeiro  *   Number of packets in array.
673230189d9SNélio Laranjeiro  *
674230189d9SNélio Laranjeiro  * @return
675230189d9SNélio Laranjeiro  *   Number of packets successfully transmitted (<= pkts_n).
676230189d9SNélio Laranjeiro  */
677230189d9SNélio Laranjeiro uint16_t
678230189d9SNélio Laranjeiro mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
679230189d9SNélio Laranjeiro {
680230189d9SNélio Laranjeiro 	struct txq *txq = (struct txq *)dpdk_txq;
681230189d9SNélio Laranjeiro 	uint16_t elts_head = txq->elts_head;
682230189d9SNélio Laranjeiro 	const unsigned int elts_n = txq->elts_n;
683c3d62cc9SAdrien Mazarguil 	unsigned int i = 0;
684a5bf6af9SAdrien Mazarguil 	unsigned int j = 0;
685230189d9SNélio Laranjeiro 	unsigned int max;
686230189d9SNélio Laranjeiro 	unsigned int comp;
687230189d9SNélio Laranjeiro 	struct mlx5_mpw mpw = {
688230189d9SNélio Laranjeiro 		.state = MLX5_MPW_STATE_CLOSED,
689230189d9SNélio Laranjeiro 	};
690230189d9SNélio Laranjeiro 
691c3d62cc9SAdrien Mazarguil 	if (unlikely(!pkts_n))
692c3d62cc9SAdrien Mazarguil 		return 0;
693230189d9SNélio Laranjeiro 	/* Prefetch first packet cacheline. */
694230189d9SNélio Laranjeiro 	tx_prefetch_cqe(txq, txq->cq_ci);
695230189d9SNélio Laranjeiro 	tx_prefetch_wqe(txq, txq->wqe_ci);
696230189d9SNélio Laranjeiro 	tx_prefetch_wqe(txq, txq->wqe_ci + 1);
697230189d9SNélio Laranjeiro 	/* Start processing. */
698230189d9SNélio Laranjeiro 	txq_complete(txq);
699230189d9SNélio Laranjeiro 	max = (elts_n - (elts_head - txq->elts_tail));
700230189d9SNélio Laranjeiro 	if (max > elts_n)
701230189d9SNélio Laranjeiro 		max -= elts_n;
702c3d62cc9SAdrien Mazarguil 	do {
703a5bf6af9SAdrien Mazarguil 		struct rte_mbuf *buf = *(pkts++);
704c3d62cc9SAdrien Mazarguil 		unsigned int elts_head_next;
705230189d9SNélio Laranjeiro 		uint32_t length;
706a5bf6af9SAdrien Mazarguil 		unsigned int segs_n = buf->nb_segs;
707230189d9SNélio Laranjeiro 		uint32_t cs_flags = 0;
708230189d9SNélio Laranjeiro 
709c3d62cc9SAdrien Mazarguil 		/*
710c3d62cc9SAdrien Mazarguil 		 * Make sure there is enough room to store this packet and
711c3d62cc9SAdrien Mazarguil 		 * that one ring entry remains unused.
712c3d62cc9SAdrien Mazarguil 		 */
713a5bf6af9SAdrien Mazarguil 		assert(segs_n);
714a5bf6af9SAdrien Mazarguil 		if (max < segs_n + 1)
715c3d62cc9SAdrien Mazarguil 			break;
716a5bf6af9SAdrien Mazarguil 		/* Do not bother with large packets MPW cannot handle. */
717a5bf6af9SAdrien Mazarguil 		if (segs_n > MLX5_MPW_DSEG_MAX)
718a5bf6af9SAdrien Mazarguil 			break;
719a5bf6af9SAdrien Mazarguil 		max -= segs_n;
720c3d62cc9SAdrien Mazarguil 		--pkts_n;
721230189d9SNélio Laranjeiro 		/* Should we enable HW CKSUM offload */
722230189d9SNélio Laranjeiro 		if (buf->ol_flags &
723230189d9SNélio Laranjeiro 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
724230189d9SNélio Laranjeiro 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
725a5bf6af9SAdrien Mazarguil 		/* Retrieve packet information. */
726a5bf6af9SAdrien Mazarguil 		length = PKT_LEN(buf);
727a5bf6af9SAdrien Mazarguil 		assert(length);
728230189d9SNélio Laranjeiro 		/* Start new session if packet differs. */
729230189d9SNélio Laranjeiro 		if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
730230189d9SNélio Laranjeiro 		    ((mpw.len != length) ||
731a5bf6af9SAdrien Mazarguil 		     (segs_n != 1) ||
732230189d9SNélio Laranjeiro 		     (mpw.wqe->mpw.eseg.cs_flags != cs_flags)))
733230189d9SNélio Laranjeiro 			mlx5_mpw_close(txq, &mpw);
734230189d9SNélio Laranjeiro 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
735230189d9SNélio Laranjeiro 			mlx5_mpw_new(txq, &mpw, length);
736230189d9SNélio Laranjeiro 			mpw.wqe->mpw.eseg.cs_flags = cs_flags;
737230189d9SNélio Laranjeiro 		}
738a5bf6af9SAdrien Mazarguil 		/* Multi-segment packets must be alone in their MPW. */
739a5bf6af9SAdrien Mazarguil 		assert((segs_n == 1) || (mpw.pkts_n == 0));
740a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
741a5bf6af9SAdrien Mazarguil 		length = 0;
742a5bf6af9SAdrien Mazarguil #endif
743a5bf6af9SAdrien Mazarguil 		do {
744a5bf6af9SAdrien Mazarguil 			volatile struct mlx5_wqe_data_seg *dseg;
745a5bf6af9SAdrien Mazarguil 			uintptr_t addr;
746a5bf6af9SAdrien Mazarguil 
747a5bf6af9SAdrien Mazarguil 			elts_head_next = (elts_head + 1) & (elts_n - 1);
748a5bf6af9SAdrien Mazarguil 			assert(buf);
749a5bf6af9SAdrien Mazarguil 			(*txq->elts)[elts_head] = buf;
750230189d9SNélio Laranjeiro 			dseg = mpw.data.dseg[mpw.pkts_n];
751a5bf6af9SAdrien Mazarguil 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
752230189d9SNélio Laranjeiro 			*dseg = (struct mlx5_wqe_data_seg){
753a5bf6af9SAdrien Mazarguil 				.byte_count = htonl(DATA_LEN(buf)),
754230189d9SNélio Laranjeiro 				.lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
755230189d9SNélio Laranjeiro 				.addr = htonll(addr),
756230189d9SNélio Laranjeiro 			};
757a5bf6af9SAdrien Mazarguil 			elts_head = elts_head_next;
758a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
759a5bf6af9SAdrien Mazarguil 			length += DATA_LEN(buf);
760a5bf6af9SAdrien Mazarguil #endif
761a5bf6af9SAdrien Mazarguil 			buf = buf->next;
762230189d9SNélio Laranjeiro 			++mpw.pkts_n;
763a5bf6af9SAdrien Mazarguil 			++j;
764a5bf6af9SAdrien Mazarguil 		} while (--segs_n);
765a5bf6af9SAdrien Mazarguil 		assert(length == mpw.len);
766230189d9SNélio Laranjeiro 		if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
767230189d9SNélio Laranjeiro 			mlx5_mpw_close(txq, &mpw);
768230189d9SNélio Laranjeiro 		elts_head = elts_head_next;
769230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS
770230189d9SNélio Laranjeiro 		/* Increment sent bytes counter. */
771230189d9SNélio Laranjeiro 		txq->stats.obytes += length;
772230189d9SNélio Laranjeiro #endif
773c3d62cc9SAdrien Mazarguil 		++i;
774c3d62cc9SAdrien Mazarguil 	} while (pkts_n);
775230189d9SNélio Laranjeiro 	/* Take a shortcut if nothing must be sent. */
776230189d9SNélio Laranjeiro 	if (unlikely(i == 0))
777230189d9SNélio Laranjeiro 		return 0;
778230189d9SNélio Laranjeiro 	/* Check whether completion threshold has been reached. */
779a5bf6af9SAdrien Mazarguil 	/* "j" includes both packets and segments. */
780a5bf6af9SAdrien Mazarguil 	comp = txq->elts_comp + j;
781230189d9SNélio Laranjeiro 	if (comp >= MLX5_TX_COMP_THRESH) {
782230189d9SNélio Laranjeiro 		volatile union mlx5_wqe *wqe = mpw.wqe;
783230189d9SNélio Laranjeiro 
784230189d9SNélio Laranjeiro 		/* Request completion on last WQE. */
785230189d9SNélio Laranjeiro 		wqe->mpw.ctrl.data[2] = htonl(8);
786230189d9SNélio Laranjeiro 		/* Save elts_head in unused "immediate" field of WQE. */
787230189d9SNélio Laranjeiro 		wqe->mpw.ctrl.data[3] = elts_head;
788230189d9SNélio Laranjeiro 		txq->elts_comp = 0;
789230189d9SNélio Laranjeiro 	} else {
790230189d9SNélio Laranjeiro 		txq->elts_comp = comp;
791230189d9SNélio Laranjeiro 	}
792230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS
793230189d9SNélio Laranjeiro 	/* Increment sent packets counter. */
794230189d9SNélio Laranjeiro 	txq->stats.opackets += i;
795230189d9SNélio Laranjeiro #endif
796230189d9SNélio Laranjeiro 	/* Ring QP doorbell. */
797230189d9SNélio Laranjeiro 	if (mpw.state == MLX5_MPW_STATE_OPENED)
798230189d9SNélio Laranjeiro 		mlx5_mpw_close(txq, &mpw);
799230189d9SNélio Laranjeiro 	mlx5_tx_dbrec(txq);
800230189d9SNélio Laranjeiro 	txq->elts_head = elts_head;
801230189d9SNélio Laranjeiro 	return i;
802230189d9SNélio Laranjeiro }
803230189d9SNélio Laranjeiro 
804230189d9SNélio Laranjeiro /**
805230189d9SNélio Laranjeiro  * Open a MPW inline session.
806230189d9SNélio Laranjeiro  *
807230189d9SNélio Laranjeiro  * @param txq
808230189d9SNélio Laranjeiro  *   Pointer to TX queue structure.
809230189d9SNélio Laranjeiro  * @param mpw
810230189d9SNélio Laranjeiro  *   Pointer to MPW session structure.
811230189d9SNélio Laranjeiro  * @param length
812230189d9SNélio Laranjeiro  *   Packet length.
813230189d9SNélio Laranjeiro  */
814230189d9SNélio Laranjeiro static inline void
815230189d9SNélio Laranjeiro mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
816230189d9SNélio Laranjeiro {
817230189d9SNélio Laranjeiro 	uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1);
818230189d9SNélio Laranjeiro 
819230189d9SNélio Laranjeiro 	mpw->state = MLX5_MPW_INL_STATE_OPENED;
820230189d9SNélio Laranjeiro 	mpw->pkts_n = 0;
821230189d9SNélio Laranjeiro 	mpw->len = length;
822230189d9SNélio Laranjeiro 	mpw->total_len = 0;
823230189d9SNélio Laranjeiro 	mpw->wqe = &(*txq->wqes)[idx];
824230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
825230189d9SNélio Laranjeiro 					       (txq->wqe_ci << 8) |
826230189d9SNélio Laranjeiro 					       MLX5_OPCODE_LSO_MPW);
827230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.ctrl.data[2] = 0;
828230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.ctrl.data[3] = 0;
829230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.eseg.mss = htons(length);
830230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.eseg.inline_hdr_sz = 0;
831230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.eseg.cs_flags = 0;
832230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.eseg.rsvd0 = 0;
833230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.eseg.rsvd1 = 0;
834230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.eseg.rsvd2 = 0;
835230189d9SNélio Laranjeiro 	mpw->data.raw = &mpw->wqe->mpw_inl.data[0];
836230189d9SNélio Laranjeiro }
837230189d9SNélio Laranjeiro 
838230189d9SNélio Laranjeiro /**
839230189d9SNélio Laranjeiro  * Close a MPW inline session.
840230189d9SNélio Laranjeiro  *
841230189d9SNélio Laranjeiro  * @param txq
842230189d9SNélio Laranjeiro  *   Pointer to TX queue structure.
843230189d9SNélio Laranjeiro  * @param mpw
844230189d9SNélio Laranjeiro  *   Pointer to MPW session structure.
845230189d9SNélio Laranjeiro  */
846230189d9SNélio Laranjeiro static inline void
847230189d9SNélio Laranjeiro mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
848230189d9SNélio Laranjeiro {
849230189d9SNélio Laranjeiro 	unsigned int size;
850230189d9SNélio Laranjeiro 
851230189d9SNélio Laranjeiro 	size = sizeof(*mpw->wqe) - MLX5_MWQE64_INL_DATA + mpw->total_len;
852230189d9SNélio Laranjeiro 	/*
853230189d9SNélio Laranjeiro 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
854230189d9SNélio Laranjeiro 	 * count as 2.
855230189d9SNélio Laranjeiro 	 */
856230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.ctrl.data[1] =
857230189d9SNélio Laranjeiro 		htonl(txq->qp_num_8s | ((size + 15) / 16));
858230189d9SNélio Laranjeiro 	mpw->state = MLX5_MPW_STATE_CLOSED;
859230189d9SNélio Laranjeiro 	mpw->wqe->mpw_inl.byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
860230189d9SNélio Laranjeiro 	txq->wqe_ci += (size + (sizeof(*mpw->wqe) - 1)) / sizeof(*mpw->wqe);
861230189d9SNélio Laranjeiro }
862230189d9SNélio Laranjeiro 
863230189d9SNélio Laranjeiro /**
864230189d9SNélio Laranjeiro  * DPDK callback for TX with MPW inline support.
865230189d9SNélio Laranjeiro  *
866230189d9SNélio Laranjeiro  * @param dpdk_txq
867230189d9SNélio Laranjeiro  *   Generic pointer to TX queue structure.
868230189d9SNélio Laranjeiro  * @param[in] pkts
869230189d9SNélio Laranjeiro  *   Packets to transmit.
870230189d9SNélio Laranjeiro  * @param pkts_n
871230189d9SNélio Laranjeiro  *   Number of packets in array.
872230189d9SNélio Laranjeiro  *
873230189d9SNélio Laranjeiro  * @return
874230189d9SNélio Laranjeiro  *   Number of packets successfully transmitted (<= pkts_n).
875230189d9SNélio Laranjeiro  */
876230189d9SNélio Laranjeiro uint16_t
877230189d9SNélio Laranjeiro mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
878230189d9SNélio Laranjeiro 			 uint16_t pkts_n)
879230189d9SNélio Laranjeiro {
880230189d9SNélio Laranjeiro 	struct txq *txq = (struct txq *)dpdk_txq;
881230189d9SNélio Laranjeiro 	uint16_t elts_head = txq->elts_head;
882230189d9SNélio Laranjeiro 	const unsigned int elts_n = txq->elts_n;
883c3d62cc9SAdrien Mazarguil 	unsigned int i = 0;
884a5bf6af9SAdrien Mazarguil 	unsigned int j = 0;
885230189d9SNélio Laranjeiro 	unsigned int max;
886230189d9SNélio Laranjeiro 	unsigned int comp;
887*0e8679fcSNélio Laranjeiro 	unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
888230189d9SNélio Laranjeiro 	struct mlx5_mpw mpw = {
889230189d9SNélio Laranjeiro 		.state = MLX5_MPW_STATE_CLOSED,
890230189d9SNélio Laranjeiro 	};
891230189d9SNélio Laranjeiro 
892c3d62cc9SAdrien Mazarguil 	if (unlikely(!pkts_n))
893c3d62cc9SAdrien Mazarguil 		return 0;
894230189d9SNélio Laranjeiro 	/* Prefetch first packet cacheline. */
895230189d9SNélio Laranjeiro 	tx_prefetch_cqe(txq, txq->cq_ci);
896230189d9SNélio Laranjeiro 	tx_prefetch_wqe(txq, txq->wqe_ci);
897230189d9SNélio Laranjeiro 	tx_prefetch_wqe(txq, txq->wqe_ci + 1);
898230189d9SNélio Laranjeiro 	/* Start processing. */
899230189d9SNélio Laranjeiro 	txq_complete(txq);
900230189d9SNélio Laranjeiro 	max = (elts_n - (elts_head - txq->elts_tail));
901230189d9SNélio Laranjeiro 	if (max > elts_n)
902230189d9SNélio Laranjeiro 		max -= elts_n;
903c3d62cc9SAdrien Mazarguil 	do {
904a5bf6af9SAdrien Mazarguil 		struct rte_mbuf *buf = *(pkts++);
905c3d62cc9SAdrien Mazarguil 		unsigned int elts_head_next;
906230189d9SNélio Laranjeiro 		uintptr_t addr;
907230189d9SNélio Laranjeiro 		uint32_t length;
908a5bf6af9SAdrien Mazarguil 		unsigned int segs_n = buf->nb_segs;
909230189d9SNélio Laranjeiro 		uint32_t cs_flags = 0;
910230189d9SNélio Laranjeiro 
911c3d62cc9SAdrien Mazarguil 		/*
912c3d62cc9SAdrien Mazarguil 		 * Make sure there is enough room to store this packet and
913c3d62cc9SAdrien Mazarguil 		 * that one ring entry remains unused.
914c3d62cc9SAdrien Mazarguil 		 */
915a5bf6af9SAdrien Mazarguil 		assert(segs_n);
916a5bf6af9SAdrien Mazarguil 		if (max < segs_n + 1)
917c3d62cc9SAdrien Mazarguil 			break;
918a5bf6af9SAdrien Mazarguil 		/* Do not bother with large packets MPW cannot handle. */
919a5bf6af9SAdrien Mazarguil 		if (segs_n > MLX5_MPW_DSEG_MAX)
920a5bf6af9SAdrien Mazarguil 			break;
921a5bf6af9SAdrien Mazarguil 		max -= segs_n;
922c3d62cc9SAdrien Mazarguil 		--pkts_n;
923230189d9SNélio Laranjeiro 		/* Should we enable HW CKSUM offload */
924230189d9SNélio Laranjeiro 		if (buf->ol_flags &
925230189d9SNélio Laranjeiro 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
926230189d9SNélio Laranjeiro 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
927a5bf6af9SAdrien Mazarguil 		/* Retrieve packet information. */
928a5bf6af9SAdrien Mazarguil 		length = PKT_LEN(buf);
929230189d9SNélio Laranjeiro 		/* Start new session if packet differs. */
930230189d9SNélio Laranjeiro 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
931230189d9SNélio Laranjeiro 			if ((mpw.len != length) ||
932a5bf6af9SAdrien Mazarguil 			    (segs_n != 1) ||
933230189d9SNélio Laranjeiro 			    (mpw.wqe->mpw.eseg.cs_flags != cs_flags))
934230189d9SNélio Laranjeiro 				mlx5_mpw_close(txq, &mpw);
935230189d9SNélio Laranjeiro 		} else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
936230189d9SNélio Laranjeiro 			if ((mpw.len != length) ||
937a5bf6af9SAdrien Mazarguil 			    (segs_n != 1) ||
938230189d9SNélio Laranjeiro 			    (length > inline_room) ||
939230189d9SNélio Laranjeiro 			    (mpw.wqe->mpw_inl.eseg.cs_flags != cs_flags)) {
940230189d9SNélio Laranjeiro 				mlx5_mpw_inline_close(txq, &mpw);
941*0e8679fcSNélio Laranjeiro 				inline_room =
942*0e8679fcSNélio Laranjeiro 					txq->max_inline * RTE_CACHE_LINE_SIZE;
943230189d9SNélio Laranjeiro 			}
944230189d9SNélio Laranjeiro 		}
945230189d9SNélio Laranjeiro 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
946a5bf6af9SAdrien Mazarguil 			if ((segs_n != 1) ||
947a5bf6af9SAdrien Mazarguil 			    (length > inline_room)) {
948230189d9SNélio Laranjeiro 				mlx5_mpw_new(txq, &mpw, length);
949230189d9SNélio Laranjeiro 				mpw.wqe->mpw.eseg.cs_flags = cs_flags;
950230189d9SNélio Laranjeiro 			} else {
951230189d9SNélio Laranjeiro 				mlx5_mpw_inline_new(txq, &mpw, length);
952230189d9SNélio Laranjeiro 				mpw.wqe->mpw_inl.eseg.cs_flags = cs_flags;
953230189d9SNélio Laranjeiro 			}
954230189d9SNélio Laranjeiro 		}
955a5bf6af9SAdrien Mazarguil 		/* Multi-segment packets must be alone in their MPW. */
956a5bf6af9SAdrien Mazarguil 		assert((segs_n == 1) || (mpw.pkts_n == 0));
957230189d9SNélio Laranjeiro 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
958*0e8679fcSNélio Laranjeiro 			assert(inline_room ==
959*0e8679fcSNélio Laranjeiro 			       txq->max_inline * RTE_CACHE_LINE_SIZE);
960a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
961a5bf6af9SAdrien Mazarguil 			length = 0;
962a5bf6af9SAdrien Mazarguil #endif
963a5bf6af9SAdrien Mazarguil 			do {
964230189d9SNélio Laranjeiro 				volatile struct mlx5_wqe_data_seg *dseg;
965230189d9SNélio Laranjeiro 
966a5bf6af9SAdrien Mazarguil 				elts_head_next =
967a5bf6af9SAdrien Mazarguil 					(elts_head + 1) & (elts_n - 1);
968a5bf6af9SAdrien Mazarguil 				assert(buf);
969a5bf6af9SAdrien Mazarguil 				(*txq->elts)[elts_head] = buf;
970230189d9SNélio Laranjeiro 				dseg = mpw.data.dseg[mpw.pkts_n];
971a5bf6af9SAdrien Mazarguil 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
972230189d9SNélio Laranjeiro 				*dseg = (struct mlx5_wqe_data_seg){
973a5bf6af9SAdrien Mazarguil 					.byte_count = htonl(DATA_LEN(buf)),
974230189d9SNélio Laranjeiro 					.lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
975230189d9SNélio Laranjeiro 					.addr = htonll(addr),
976230189d9SNélio Laranjeiro 				};
977a5bf6af9SAdrien Mazarguil 				elts_head = elts_head_next;
978a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
979a5bf6af9SAdrien Mazarguil 				length += DATA_LEN(buf);
980a5bf6af9SAdrien Mazarguil #endif
981a5bf6af9SAdrien Mazarguil 				buf = buf->next;
982230189d9SNélio Laranjeiro 				++mpw.pkts_n;
983a5bf6af9SAdrien Mazarguil 				++j;
984a5bf6af9SAdrien Mazarguil 			} while (--segs_n);
985a5bf6af9SAdrien Mazarguil 			assert(length == mpw.len);
986230189d9SNélio Laranjeiro 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
987230189d9SNélio Laranjeiro 				mlx5_mpw_close(txq, &mpw);
988230189d9SNélio Laranjeiro 		} else {
989230189d9SNélio Laranjeiro 			unsigned int max;
990230189d9SNélio Laranjeiro 
991230189d9SNélio Laranjeiro 			assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
992230189d9SNélio Laranjeiro 			assert(length <= inline_room);
993a5bf6af9SAdrien Mazarguil 			assert(length == DATA_LEN(buf));
994a5bf6af9SAdrien Mazarguil 			elts_head_next = (elts_head + 1) & (elts_n - 1);
995a5bf6af9SAdrien Mazarguil 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
996a5bf6af9SAdrien Mazarguil 			(*txq->elts)[elts_head] = buf;
997230189d9SNélio Laranjeiro 			/* Maximum number of bytes before wrapping. */
998230189d9SNélio Laranjeiro 			max = ((uintptr_t)&(*txq->wqes)[txq->wqe_n] -
999230189d9SNélio Laranjeiro 			       (uintptr_t)mpw.data.raw);
1000230189d9SNélio Laranjeiro 			if (length > max) {
1001230189d9SNélio Laranjeiro 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1002230189d9SNélio Laranjeiro 					   (void *)addr,
1003230189d9SNélio Laranjeiro 					   max);
1004230189d9SNélio Laranjeiro 				mpw.data.raw =
1005230189d9SNélio Laranjeiro 					(volatile void *)&(*txq->wqes)[0];
1006230189d9SNélio Laranjeiro 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1007230189d9SNélio Laranjeiro 					   (void *)(addr + max),
1008230189d9SNélio Laranjeiro 					   length - max);
1009230189d9SNélio Laranjeiro 				mpw.data.raw += length - max;
1010230189d9SNélio Laranjeiro 			} else {
1011230189d9SNélio Laranjeiro 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1012230189d9SNélio Laranjeiro 					   (void *)addr,
1013230189d9SNélio Laranjeiro 					   length);
1014230189d9SNélio Laranjeiro 				mpw.data.raw += length;
1015230189d9SNélio Laranjeiro 			}
1016230189d9SNélio Laranjeiro 			if ((uintptr_t)mpw.data.raw ==
1017230189d9SNélio Laranjeiro 			    (uintptr_t)&(*txq->wqes)[txq->wqe_n])
1018230189d9SNélio Laranjeiro 				mpw.data.raw =
1019230189d9SNélio Laranjeiro 					(volatile void *)&(*txq->wqes)[0];
1020230189d9SNélio Laranjeiro 			++mpw.pkts_n;
1021a5bf6af9SAdrien Mazarguil 			++j;
1022230189d9SNélio Laranjeiro 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
1023230189d9SNélio Laranjeiro 				mlx5_mpw_inline_close(txq, &mpw);
1024*0e8679fcSNélio Laranjeiro 				inline_room =
1025*0e8679fcSNélio Laranjeiro 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1026230189d9SNélio Laranjeiro 			} else {
1027230189d9SNélio Laranjeiro 				inline_room -= length;
1028230189d9SNélio Laranjeiro 			}
1029230189d9SNélio Laranjeiro 		}
1030230189d9SNélio Laranjeiro 		mpw.total_len += length;
1031230189d9SNélio Laranjeiro 		elts_head = elts_head_next;
1032230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS
1033230189d9SNélio Laranjeiro 		/* Increment sent bytes counter. */
1034230189d9SNélio Laranjeiro 		txq->stats.obytes += length;
1035230189d9SNélio Laranjeiro #endif
1036c3d62cc9SAdrien Mazarguil 		++i;
1037c3d62cc9SAdrien Mazarguil 	} while (pkts_n);
1038230189d9SNélio Laranjeiro 	/* Take a shortcut if nothing must be sent. */
1039230189d9SNélio Laranjeiro 	if (unlikely(i == 0))
1040230189d9SNélio Laranjeiro 		return 0;
1041230189d9SNélio Laranjeiro 	/* Check whether completion threshold has been reached. */
1042a5bf6af9SAdrien Mazarguil 	/* "j" includes both packets and segments. */
1043a5bf6af9SAdrien Mazarguil 	comp = txq->elts_comp + j;
1044230189d9SNélio Laranjeiro 	if (comp >= MLX5_TX_COMP_THRESH) {
1045230189d9SNélio Laranjeiro 		volatile union mlx5_wqe *wqe = mpw.wqe;
1046230189d9SNélio Laranjeiro 
1047230189d9SNélio Laranjeiro 		/* Request completion on last WQE. */
1048230189d9SNélio Laranjeiro 		wqe->mpw_inl.ctrl.data[2] = htonl(8);
1049230189d9SNélio Laranjeiro 		/* Save elts_head in unused "immediate" field of WQE. */
1050230189d9SNélio Laranjeiro 		wqe->mpw_inl.ctrl.data[3] = elts_head;
1051230189d9SNélio Laranjeiro 		txq->elts_comp = 0;
1052230189d9SNélio Laranjeiro 	} else {
1053230189d9SNélio Laranjeiro 		txq->elts_comp = comp;
1054230189d9SNélio Laranjeiro 	}
1055230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS
1056230189d9SNélio Laranjeiro 	/* Increment sent packets counter. */
1057230189d9SNélio Laranjeiro 	txq->stats.opackets += i;
1058230189d9SNélio Laranjeiro #endif
1059230189d9SNélio Laranjeiro 	/* Ring QP doorbell. */
1060230189d9SNélio Laranjeiro 	if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
1061230189d9SNélio Laranjeiro 		mlx5_mpw_inline_close(txq, &mpw);
1062230189d9SNélio Laranjeiro 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1063230189d9SNélio Laranjeiro 		mlx5_mpw_close(txq, &mpw);
1064230189d9SNélio Laranjeiro 	mlx5_tx_dbrec(txq);
1065230189d9SNélio Laranjeiro 	txq->elts_head = elts_head;
1066230189d9SNélio Laranjeiro 	return i;
1067230189d9SNélio Laranjeiro }
1068230189d9SNélio Laranjeiro 
1069230189d9SNélio Laranjeiro /**
107067fa62bcSAdrien Mazarguil  * Translate RX completion flags to packet type.
107167fa62bcSAdrien Mazarguil  *
10726218063bSNélio Laranjeiro  * @param[in] cqe
10736218063bSNélio Laranjeiro  *   Pointer to CQE.
107467fa62bcSAdrien Mazarguil  *
107578a38edfSJianfeng Tan  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
107678a38edfSJianfeng Tan  *
107767fa62bcSAdrien Mazarguil  * @return
107867fa62bcSAdrien Mazarguil  *   Packet type for struct rte_mbuf.
107967fa62bcSAdrien Mazarguil  */
108067fa62bcSAdrien Mazarguil static inline uint32_t
10816218063bSNélio Laranjeiro rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
108267fa62bcSAdrien Mazarguil {
108367fa62bcSAdrien Mazarguil 	uint32_t pkt_type;
10846218063bSNélio Laranjeiro 	uint8_t flags = cqe->l4_hdr_type_etc;
10856218063bSNélio Laranjeiro 	uint8_t info = cqe->rsvd0[0];
108667fa62bcSAdrien Mazarguil 
10876218063bSNélio Laranjeiro 	if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET)
108867fa62bcSAdrien Mazarguil 		pkt_type =
108967fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
109067fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
109167fa62bcSAdrien Mazarguil 				  RTE_PTYPE_L3_IPV4) |
109267fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
109367fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
109467fa62bcSAdrien Mazarguil 				  RTE_PTYPE_L3_IPV6) |
109567fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
109667fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_IPV4_PACKET,
109767fa62bcSAdrien Mazarguil 				  RTE_PTYPE_INNER_L3_IPV4) |
109867fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
109967fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_IPV6_PACKET,
110067fa62bcSAdrien Mazarguil 				  RTE_PTYPE_INNER_L3_IPV6);
110167fa62bcSAdrien Mazarguil 	else
110267fa62bcSAdrien Mazarguil 		pkt_type =
110367fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
11046218063bSNélio Laranjeiro 				  MLX5_CQE_L3_HDR_TYPE_IPV6,
11056218063bSNélio Laranjeiro 				  RTE_PTYPE_L3_IPV6) |
110667fa62bcSAdrien Mazarguil 			TRANSPOSE(flags,
11076218063bSNélio Laranjeiro 				  MLX5_CQE_L3_HDR_TYPE_IPV4,
11086218063bSNélio Laranjeiro 				  RTE_PTYPE_L3_IPV4);
110967fa62bcSAdrien Mazarguil 	return pkt_type;
111067fa62bcSAdrien Mazarguil }
111167fa62bcSAdrien Mazarguil 
111267fa62bcSAdrien Mazarguil /**
111399c12dccSNélio Laranjeiro  * Get size of the next packet for a given CQE. For compressed CQEs, the
111499c12dccSNélio Laranjeiro  * consumer index is updated only once all packets of the current one have
111599c12dccSNélio Laranjeiro  * been processed.
111699c12dccSNélio Laranjeiro  *
111799c12dccSNélio Laranjeiro  * @param rxq
111899c12dccSNélio Laranjeiro  *   Pointer to RX queue.
111999c12dccSNélio Laranjeiro  * @param cqe
112099c12dccSNélio Laranjeiro  *   CQE to process.
112199c12dccSNélio Laranjeiro  *
112299c12dccSNélio Laranjeiro  * @return
112399c12dccSNélio Laranjeiro  *   Packet size in bytes (0 if there is none), -1 in case of completion
112499c12dccSNélio Laranjeiro  *   with error.
112599c12dccSNélio Laranjeiro  */
112699c12dccSNélio Laranjeiro static inline int
112799c12dccSNélio Laranjeiro mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
112899c12dccSNélio Laranjeiro 		 uint16_t cqe_cnt)
112999c12dccSNélio Laranjeiro {
113099c12dccSNélio Laranjeiro 	struct rxq_zip *zip = &rxq->zip;
113199c12dccSNélio Laranjeiro 	uint16_t cqe_n = cqe_cnt + 1;
113299c12dccSNélio Laranjeiro 	int len = 0;
113399c12dccSNélio Laranjeiro 
113499c12dccSNélio Laranjeiro 	/* Process compressed data in the CQE and mini arrays. */
113599c12dccSNélio Laranjeiro 	if (zip->ai) {
113699c12dccSNélio Laranjeiro 		volatile struct mlx5_mini_cqe8 (*mc)[8] =
113799c12dccSNélio Laranjeiro 			(volatile struct mlx5_mini_cqe8 (*)[8])
113899c12dccSNélio Laranjeiro 			(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].cqe64);
113999c12dccSNélio Laranjeiro 
114099c12dccSNélio Laranjeiro 		len = ntohl((*mc)[zip->ai & 7].byte_cnt);
114199c12dccSNélio Laranjeiro 		if ((++zip->ai & 7) == 0) {
114299c12dccSNélio Laranjeiro 			/*
114399c12dccSNélio Laranjeiro 			 * Increment consumer index to skip the number of
114499c12dccSNélio Laranjeiro 			 * CQEs consumed. Hardware leaves holes in the CQ
114599c12dccSNélio Laranjeiro 			 * ring for software use.
114699c12dccSNélio Laranjeiro 			 */
114799c12dccSNélio Laranjeiro 			zip->ca = zip->na;
114899c12dccSNélio Laranjeiro 			zip->na += 8;
114999c12dccSNélio Laranjeiro 		}
115099c12dccSNélio Laranjeiro 		if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
115199c12dccSNélio Laranjeiro 			uint16_t idx = rxq->cq_ci;
115299c12dccSNélio Laranjeiro 			uint16_t end = zip->cq_ci;
115399c12dccSNélio Laranjeiro 
115499c12dccSNélio Laranjeiro 			while (idx != end) {
115599c12dccSNélio Laranjeiro 				(*rxq->cqes)[idx & cqe_cnt].cqe64.op_own =
115699c12dccSNélio Laranjeiro 					MLX5_CQE_INVALIDATE;
115799c12dccSNélio Laranjeiro 				++idx;
115899c12dccSNélio Laranjeiro 			}
115999c12dccSNélio Laranjeiro 			rxq->cq_ci = zip->cq_ci;
116099c12dccSNélio Laranjeiro 			zip->ai = 0;
116199c12dccSNélio Laranjeiro 		}
116299c12dccSNélio Laranjeiro 	/* No compressed data, get next CQE and verify if it is compressed. */
116399c12dccSNélio Laranjeiro 	} else {
116499c12dccSNélio Laranjeiro 		int ret;
116599c12dccSNélio Laranjeiro 		int8_t op_own;
116699c12dccSNélio Laranjeiro 
116799c12dccSNélio Laranjeiro 		ret = check_cqe64(cqe, cqe_n, rxq->cq_ci);
116899c12dccSNélio Laranjeiro 		if (unlikely(ret == 1))
116999c12dccSNélio Laranjeiro 			return 0;
117099c12dccSNélio Laranjeiro 		++rxq->cq_ci;
117199c12dccSNélio Laranjeiro 		op_own = cqe->op_own;
117299c12dccSNélio Laranjeiro 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
117399c12dccSNélio Laranjeiro 			volatile struct mlx5_mini_cqe8 (*mc)[8] =
117499c12dccSNélio Laranjeiro 				(volatile struct mlx5_mini_cqe8 (*)[8])
117599c12dccSNélio Laranjeiro 				(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
117699c12dccSNélio Laranjeiro 							  cqe_cnt].cqe64);
117799c12dccSNélio Laranjeiro 
117899c12dccSNélio Laranjeiro 			/* Fix endianness. */
117999c12dccSNélio Laranjeiro 			zip->cqe_cnt = ntohl(cqe->byte_cnt);
118099c12dccSNélio Laranjeiro 			/*
118199c12dccSNélio Laranjeiro 			 * Current mini array position is the one returned by
118299c12dccSNélio Laranjeiro 			 * check_cqe64().
118399c12dccSNélio Laranjeiro 			 *
118499c12dccSNélio Laranjeiro 			 * If completion comprises several mini arrays, as a
118599c12dccSNélio Laranjeiro 			 * special case the second one is located 7 CQEs after
118699c12dccSNélio Laranjeiro 			 * the initial CQE instead of 8 for subsequent ones.
118799c12dccSNélio Laranjeiro 			 */
118899c12dccSNélio Laranjeiro 			zip->ca = rxq->cq_ci & cqe_cnt;
118999c12dccSNélio Laranjeiro 			zip->na = zip->ca + 7;
119099c12dccSNélio Laranjeiro 			/* Compute the next non compressed CQE. */
119199c12dccSNélio Laranjeiro 			--rxq->cq_ci;
119299c12dccSNélio Laranjeiro 			zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
119399c12dccSNélio Laranjeiro 			/* Get packet size to return. */
119499c12dccSNélio Laranjeiro 			len = ntohl((*mc)[0].byte_cnt);
119599c12dccSNélio Laranjeiro 			zip->ai = 1;
119699c12dccSNélio Laranjeiro 		} else {
119799c12dccSNélio Laranjeiro 			len = ntohl(cqe->byte_cnt);
119899c12dccSNélio Laranjeiro 		}
119999c12dccSNélio Laranjeiro 		/* Error while receiving packet. */
120099c12dccSNélio Laranjeiro 		if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
120199c12dccSNélio Laranjeiro 			return -1;
120299c12dccSNélio Laranjeiro 	}
120399c12dccSNélio Laranjeiro 	return len;
120499c12dccSNélio Laranjeiro }
120599c12dccSNélio Laranjeiro 
120699c12dccSNélio Laranjeiro /**
120767fa62bcSAdrien Mazarguil  * Translate RX completion flags to offload flags.
120867fa62bcSAdrien Mazarguil  *
120967fa62bcSAdrien Mazarguil  * @param[in] rxq
121067fa62bcSAdrien Mazarguil  *   Pointer to RX queue structure.
12116218063bSNélio Laranjeiro  * @param[in] cqe
12126218063bSNélio Laranjeiro  *   Pointer to CQE.
121367fa62bcSAdrien Mazarguil  *
121467fa62bcSAdrien Mazarguil  * @return
121567fa62bcSAdrien Mazarguil  *   Offload flags (ol_flags) for struct rte_mbuf.
121667fa62bcSAdrien Mazarguil  */
121767fa62bcSAdrien Mazarguil static inline uint32_t
12186218063bSNélio Laranjeiro rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
121967fa62bcSAdrien Mazarguil {
122067fa62bcSAdrien Mazarguil 	uint32_t ol_flags = 0;
12216218063bSNélio Laranjeiro 	uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
12226218063bSNélio Laranjeiro 	uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
12236218063bSNélio Laranjeiro 	uint8_t info = cqe->rsvd0[0];
122467fa62bcSAdrien Mazarguil 
12256218063bSNélio Laranjeiro 	if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
12266218063bSNélio Laranjeiro 	    (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
122767fa62bcSAdrien Mazarguil 		ol_flags |=
12286218063bSNélio Laranjeiro 			(!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) *
1229d0087d76SYaacov Hazan 			 PKT_RX_IP_CKSUM_BAD);
12306218063bSNélio Laranjeiro 	if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
12316218063bSNélio Laranjeiro 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
12326218063bSNélio Laranjeiro 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
12336218063bSNélio Laranjeiro 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
1234d0087d76SYaacov Hazan 		ol_flags |=
12356218063bSNélio Laranjeiro 			(!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) *
123667fa62bcSAdrien Mazarguil 			 PKT_RX_L4_CKSUM_BAD);
123767fa62bcSAdrien Mazarguil 	/*
123867fa62bcSAdrien Mazarguil 	 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
123967fa62bcSAdrien Mazarguil 	 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
124067fa62bcSAdrien Mazarguil 	 * (its value is 0).
124167fa62bcSAdrien Mazarguil 	 */
12426218063bSNélio Laranjeiro 	if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
124367fa62bcSAdrien Mazarguil 		ol_flags |=
12446218063bSNélio Laranjeiro 			TRANSPOSE(~cqe->l4_hdr_type_etc,
124567fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
124667fa62bcSAdrien Mazarguil 				  PKT_RX_IP_CKSUM_BAD) |
12476218063bSNélio Laranjeiro 			TRANSPOSE(~cqe->l4_hdr_type_etc,
124867fa62bcSAdrien Mazarguil 				  IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
124967fa62bcSAdrien Mazarguil 				  PKT_RX_L4_CKSUM_BAD);
125067fa62bcSAdrien Mazarguil 	return ol_flags;
125167fa62bcSAdrien Mazarguil }
125267fa62bcSAdrien Mazarguil 
125367fa62bcSAdrien Mazarguil /**
12542e22920bSAdrien Mazarguil  * DPDK callback for RX.
12552e22920bSAdrien Mazarguil  *
12562e22920bSAdrien Mazarguil  * @param dpdk_rxq
12572e22920bSAdrien Mazarguil  *   Generic pointer to RX queue structure.
12582e22920bSAdrien Mazarguil  * @param[out] pkts
12592e22920bSAdrien Mazarguil  *   Array to store received packets.
12602e22920bSAdrien Mazarguil  * @param pkts_n
12612e22920bSAdrien Mazarguil  *   Maximum number of packets in array.
12622e22920bSAdrien Mazarguil  *
12632e22920bSAdrien Mazarguil  * @return
12642e22920bSAdrien Mazarguil  *   Number of packets successfully received (<= pkts_n).
12652e22920bSAdrien Mazarguil  */
12662e22920bSAdrien Mazarguil uint16_t
12672e22920bSAdrien Mazarguil mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
12682e22920bSAdrien Mazarguil {
12696218063bSNélio Laranjeiro 	struct rxq *rxq = dpdk_rxq;
12709964b965SNélio Laranjeiro 	const unsigned int wqe_cnt = rxq->elts_n - 1;
127199c12dccSNélio Laranjeiro 	const unsigned int cqe_cnt = rxq->cqe_n - 1;
12729964b965SNélio Laranjeiro 	const unsigned int sges_n = rxq->sges_n;
12739964b965SNélio Laranjeiro 	struct rte_mbuf *pkt = NULL;
12749964b965SNélio Laranjeiro 	struct rte_mbuf *seg = NULL;
12756218063bSNélio Laranjeiro 	volatile struct mlx5_cqe64 *cqe =
127699c12dccSNélio Laranjeiro 		&(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
12779964b965SNélio Laranjeiro 	unsigned int i = 0;
12789964b965SNélio Laranjeiro 	unsigned int rq_ci = rxq->rq_ci << sges_n;
12799964b965SNélio Laranjeiro 	int len;
12802e22920bSAdrien Mazarguil 
12819964b965SNélio Laranjeiro 	while (pkts_n) {
12829964b965SNélio Laranjeiro 		unsigned int idx = rq_ci & wqe_cnt;
12839964b965SNélio Laranjeiro 		volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
12849964b965SNélio Laranjeiro 		struct rte_mbuf *rep = (*rxq->elts)[idx];
12859964b965SNélio Laranjeiro 
12869964b965SNélio Laranjeiro 		if (pkt)
12879964b965SNélio Laranjeiro 			NEXT(seg) = rep;
12889964b965SNélio Laranjeiro 		seg = rep;
12899964b965SNélio Laranjeiro 		rte_prefetch0(seg);
12906218063bSNélio Laranjeiro 		rte_prefetch0(cqe);
12919964b965SNélio Laranjeiro 		rte_prefetch0(wqe);
1292fbfd9955SOlivier Matz 		rep = rte_mbuf_raw_alloc(rxq->mp);
12932e22920bSAdrien Mazarguil 		if (unlikely(rep == NULL)) {
129415a756b6SSagi Grimberg 			++rxq->stats.rx_nombuf;
129515a756b6SSagi Grimberg 			if (!pkt) {
129615a756b6SSagi Grimberg 				/*
129715a756b6SSagi Grimberg 				 * no buffers before we even started,
129815a756b6SSagi Grimberg 				 * bail out silently.
129915a756b6SSagi Grimberg 				 */
130015a756b6SSagi Grimberg 				break;
130115a756b6SSagi Grimberg 			}
1302a1bdb71aSNélio Laranjeiro 			while (pkt != seg) {
1303a1bdb71aSNélio Laranjeiro 				assert(pkt != (*rxq->elts)[idx]);
13049964b965SNélio Laranjeiro 				seg = NEXT(pkt);
13059964b965SNélio Laranjeiro 				rte_mbuf_refcnt_set(pkt, 0);
13069964b965SNélio Laranjeiro 				__rte_mbuf_raw_free(pkt);
13079964b965SNélio Laranjeiro 				pkt = seg;
13089964b965SNélio Laranjeiro 			}
13096218063bSNélio Laranjeiro 			break;
13102e22920bSAdrien Mazarguil 		}
13119964b965SNélio Laranjeiro 		if (!pkt) {
13129964b965SNélio Laranjeiro 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
131399c12dccSNélio Laranjeiro 			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt);
13149964b965SNélio Laranjeiro 			if (len == 0) {
13156218063bSNélio Laranjeiro 				rte_mbuf_refcnt_set(rep, 0);
13166218063bSNélio Laranjeiro 				__rte_mbuf_raw_free(rep);
13176218063bSNélio Laranjeiro 				break;
13186218063bSNélio Laranjeiro 			}
131999c12dccSNélio Laranjeiro 			if (unlikely(len == -1)) {
132099c12dccSNélio Laranjeiro 				/* RX error, packet is likely too large. */
132199c12dccSNélio Laranjeiro 				rte_mbuf_refcnt_set(rep, 0);
132299c12dccSNélio Laranjeiro 				__rte_mbuf_raw_free(rep);
132399c12dccSNélio Laranjeiro 				++rxq->stats.idropped;
132499c12dccSNélio Laranjeiro 				goto skip;
132599c12dccSNélio Laranjeiro 			}
13269964b965SNélio Laranjeiro 			pkt = seg;
13279964b965SNélio Laranjeiro 			assert(len >= (rxq->crc_present << 2));
13289964b965SNélio Laranjeiro 			/* Update packet information. */
13290ac64846SMaxime Leroy 			pkt->packet_type = 0;
13300ac64846SMaxime Leroy 			pkt->ol_flags = 0;
13316218063bSNélio Laranjeiro 			if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
13326218063bSNélio Laranjeiro 			    rxq->crc_present) {
13336218063bSNélio Laranjeiro 				if (rxq->csum) {
13349964b965SNélio Laranjeiro 					pkt->packet_type =
13359964b965SNélio Laranjeiro 						rxq_cq_to_pkt_type(cqe);
13369964b965SNélio Laranjeiro 					pkt->ol_flags =
13379964b965SNélio Laranjeiro 						rxq_cq_to_ol_flags(rxq, cqe);
13386218063bSNélio Laranjeiro 				}
13399964b965SNélio Laranjeiro 				if (cqe->l4_hdr_type_etc &
13409964b965SNélio Laranjeiro 				    MLX5_CQE_VLAN_STRIPPED) {
13416218063bSNélio Laranjeiro 					pkt->ol_flags |= PKT_RX_VLAN_PKT |
1342b37b528dSOlivier Matz 						PKT_RX_VLAN_STRIPPED;
13436218063bSNélio Laranjeiro 					pkt->vlan_tci = ntohs(cqe->vlan_info);
1344f3db9489SYaacov Hazan 				}
13456218063bSNélio Laranjeiro 				if (rxq->crc_present)
13466218063bSNélio Laranjeiro 					len -= ETHER_CRC_LEN;
1347081f7eaeSNelio Laranjeiro 			}
13486218063bSNélio Laranjeiro 			PKT_LEN(pkt) = len;
13499964b965SNélio Laranjeiro 		}
13509964b965SNélio Laranjeiro 		DATA_LEN(rep) = DATA_LEN(seg);
13519964b965SNélio Laranjeiro 		PKT_LEN(rep) = PKT_LEN(seg);
13529964b965SNélio Laranjeiro 		SET_DATA_OFF(rep, DATA_OFF(seg));
13539964b965SNélio Laranjeiro 		NB_SEGS(rep) = NB_SEGS(seg);
13549964b965SNélio Laranjeiro 		PORT(rep) = PORT(seg);
13559964b965SNélio Laranjeiro 		NEXT(rep) = NULL;
13569964b965SNélio Laranjeiro 		(*rxq->elts)[idx] = rep;
13579964b965SNélio Laranjeiro 		/*
13589964b965SNélio Laranjeiro 		 * Fill NIC descriptor with the new buffer.  The lkey and size
13599964b965SNélio Laranjeiro 		 * of the buffers are already known, only the buffer address
13609964b965SNélio Laranjeiro 		 * changes.
13619964b965SNélio Laranjeiro 		 */
13629964b965SNélio Laranjeiro 		wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t));
13639964b965SNélio Laranjeiro 		if (len > DATA_LEN(seg)) {
13649964b965SNélio Laranjeiro 			len -= DATA_LEN(seg);
13659964b965SNélio Laranjeiro 			++NB_SEGS(pkt);
13669964b965SNélio Laranjeiro 			++rq_ci;
13679964b965SNélio Laranjeiro 			continue;
13689964b965SNélio Laranjeiro 		}
13699964b965SNélio Laranjeiro 		DATA_LEN(seg) = len;
137087011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
137187011737SAdrien Mazarguil 		/* Increment bytes counter. */
13729964b965SNélio Laranjeiro 		rxq->stats.ibytes += PKT_LEN(pkt);
137387011737SAdrien Mazarguil #endif
13746218063bSNélio Laranjeiro 		/* Return packet. */
13756218063bSNélio Laranjeiro 		*(pkts++) = pkt;
13769964b965SNélio Laranjeiro 		pkt = NULL;
13779964b965SNélio Laranjeiro 		--pkts_n;
13789964b965SNélio Laranjeiro 		++i;
137999c12dccSNélio Laranjeiro skip:
13809964b965SNélio Laranjeiro 		/* Align consumer index to the next stride. */
13819964b965SNélio Laranjeiro 		rq_ci >>= sges_n;
13826218063bSNélio Laranjeiro 		++rq_ci;
13839964b965SNélio Laranjeiro 		rq_ci <<= sges_n;
13842e22920bSAdrien Mazarguil 	}
13859964b965SNélio Laranjeiro 	if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
13862e22920bSAdrien Mazarguil 		return 0;
13876218063bSNélio Laranjeiro 	/* Update the consumer index. */
13889964b965SNélio Laranjeiro 	rxq->rq_ci = rq_ci >> sges_n;
13896218063bSNélio Laranjeiro 	rte_wmb();
13906218063bSNélio Laranjeiro 	*rxq->cq_db = htonl(rxq->cq_ci);
13916218063bSNélio Laranjeiro 	rte_wmb();
13926218063bSNélio Laranjeiro 	*rxq->rq_db = htonl(rxq->rq_ci);
139387011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS
139487011737SAdrien Mazarguil 	/* Increment packets counter. */
13959964b965SNélio Laranjeiro 	rxq->stats.ipackets += i;
139687011737SAdrien Mazarguil #endif
13979964b965SNélio Laranjeiro 	return i;
13982e22920bSAdrien Mazarguil }
13992e22920bSAdrien Mazarguil 
14002e22920bSAdrien Mazarguil /**
14012e22920bSAdrien Mazarguil  * Dummy DPDK callback for TX.
14022e22920bSAdrien Mazarguil  *
14032e22920bSAdrien Mazarguil  * This function is used to temporarily replace the real callback during
14042e22920bSAdrien Mazarguil  * unsafe control operations on the queue, or in case of error.
14052e22920bSAdrien Mazarguil  *
14062e22920bSAdrien Mazarguil  * @param dpdk_txq
14072e22920bSAdrien Mazarguil  *   Generic pointer to TX queue structure.
14082e22920bSAdrien Mazarguil  * @param[in] pkts
14092e22920bSAdrien Mazarguil  *   Packets to transmit.
14102e22920bSAdrien Mazarguil  * @param pkts_n
14112e22920bSAdrien Mazarguil  *   Number of packets in array.
14122e22920bSAdrien Mazarguil  *
14132e22920bSAdrien Mazarguil  * @return
14142e22920bSAdrien Mazarguil  *   Number of packets successfully transmitted (<= pkts_n).
14152e22920bSAdrien Mazarguil  */
14162e22920bSAdrien Mazarguil uint16_t
14172e22920bSAdrien Mazarguil removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
14182e22920bSAdrien Mazarguil {
14192e22920bSAdrien Mazarguil 	(void)dpdk_txq;
14202e22920bSAdrien Mazarguil 	(void)pkts;
14212e22920bSAdrien Mazarguil 	(void)pkts_n;
14222e22920bSAdrien Mazarguil 	return 0;
14232e22920bSAdrien Mazarguil }
14242e22920bSAdrien Mazarguil 
14252e22920bSAdrien Mazarguil /**
14262e22920bSAdrien Mazarguil  * Dummy DPDK callback for RX.
14272e22920bSAdrien Mazarguil  *
14282e22920bSAdrien Mazarguil  * This function is used to temporarily replace the real callback during
14292e22920bSAdrien Mazarguil  * unsafe control operations on the queue, or in case of error.
14302e22920bSAdrien Mazarguil  *
14312e22920bSAdrien Mazarguil  * @param dpdk_rxq
14322e22920bSAdrien Mazarguil  *   Generic pointer to RX queue structure.
14332e22920bSAdrien Mazarguil  * @param[out] pkts
14342e22920bSAdrien Mazarguil  *   Array to store received packets.
14352e22920bSAdrien Mazarguil  * @param pkts_n
14362e22920bSAdrien Mazarguil  *   Maximum number of packets in array.
14372e22920bSAdrien Mazarguil  *
14382e22920bSAdrien Mazarguil  * @return
14392e22920bSAdrien Mazarguil  *   Number of packets successfully received (<= pkts_n).
14402e22920bSAdrien Mazarguil  */
14412e22920bSAdrien Mazarguil uint16_t
14422e22920bSAdrien Mazarguil removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
14432e22920bSAdrien Mazarguil {
14442e22920bSAdrien Mazarguil 	(void)dpdk_rxq;
14452e22920bSAdrien Mazarguil 	(void)pkts;
14462e22920bSAdrien Mazarguil 	(void)pkts_n;
14472e22920bSAdrien Mazarguil 	return 0;
14482e22920bSAdrien Mazarguil }
1449