12e22920bSAdrien Mazarguil /*- 22e22920bSAdrien Mazarguil * BSD LICENSE 32e22920bSAdrien Mazarguil * 42e22920bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 52e22920bSAdrien Mazarguil * Copyright 2015 Mellanox. 62e22920bSAdrien Mazarguil * 72e22920bSAdrien Mazarguil * Redistribution and use in source and binary forms, with or without 82e22920bSAdrien Mazarguil * modification, are permitted provided that the following conditions 92e22920bSAdrien Mazarguil * are met: 102e22920bSAdrien Mazarguil * 112e22920bSAdrien Mazarguil * * Redistributions of source code must retain the above copyright 122e22920bSAdrien Mazarguil * notice, this list of conditions and the following disclaimer. 132e22920bSAdrien Mazarguil * * Redistributions in binary form must reproduce the above copyright 142e22920bSAdrien Mazarguil * notice, this list of conditions and the following disclaimer in 152e22920bSAdrien Mazarguil * the documentation and/or other materials provided with the 162e22920bSAdrien Mazarguil * distribution. 172e22920bSAdrien Mazarguil * * Neither the name of 6WIND S.A. nor the names of its 182e22920bSAdrien Mazarguil * contributors may be used to endorse or promote products derived 192e22920bSAdrien Mazarguil * from this software without specific prior written permission. 202e22920bSAdrien Mazarguil * 212e22920bSAdrien Mazarguil * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 222e22920bSAdrien Mazarguil * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 232e22920bSAdrien Mazarguil * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 242e22920bSAdrien Mazarguil * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 252e22920bSAdrien Mazarguil * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 262e22920bSAdrien Mazarguil * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 272e22920bSAdrien Mazarguil * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 282e22920bSAdrien Mazarguil * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 292e22920bSAdrien Mazarguil * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 302e22920bSAdrien Mazarguil * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 312e22920bSAdrien Mazarguil * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 322e22920bSAdrien Mazarguil */ 332e22920bSAdrien Mazarguil 342e22920bSAdrien Mazarguil #include <assert.h> 352e22920bSAdrien Mazarguil #include <stdint.h> 362e22920bSAdrien Mazarguil #include <string.h> 372e22920bSAdrien Mazarguil #include <stdlib.h> 382e22920bSAdrien Mazarguil 392e22920bSAdrien Mazarguil /* Verbs header. */ 402e22920bSAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 412e22920bSAdrien Mazarguil #ifdef PEDANTIC 422e22920bSAdrien Mazarguil #pragma GCC diagnostic ignored "-pedantic" 432e22920bSAdrien Mazarguil #endif 442e22920bSAdrien Mazarguil #include <infiniband/verbs.h> 456218063bSNélio Laranjeiro #include <infiniband/mlx5_hw.h> 466218063bSNélio Laranjeiro #include <infiniband/arch.h> 472e22920bSAdrien Mazarguil #ifdef PEDANTIC 482e22920bSAdrien Mazarguil #pragma GCC diagnostic error "-pedantic" 492e22920bSAdrien Mazarguil #endif 502e22920bSAdrien Mazarguil 512e22920bSAdrien Mazarguil /* DPDK headers don't like -pedantic. */ 522e22920bSAdrien Mazarguil #ifdef PEDANTIC 532e22920bSAdrien Mazarguil #pragma GCC diagnostic ignored "-pedantic" 542e22920bSAdrien Mazarguil #endif 552e22920bSAdrien Mazarguil #include <rte_mbuf.h> 562e22920bSAdrien Mazarguil #include <rte_mempool.h> 572e22920bSAdrien Mazarguil #include <rte_prefetch.h> 582e22920bSAdrien Mazarguil #include <rte_common.h> 592e22920bSAdrien Mazarguil #include <rte_branch_prediction.h> 606218063bSNélio Laranjeiro #include <rte_ether.h> 612e22920bSAdrien Mazarguil #ifdef PEDANTIC 622e22920bSAdrien Mazarguil #pragma GCC diagnostic error "-pedantic" 632e22920bSAdrien Mazarguil #endif 642e22920bSAdrien Mazarguil 652e22920bSAdrien Mazarguil #include "mlx5.h" 662e22920bSAdrien Mazarguil #include "mlx5_utils.h" 672e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 68f3db9489SYaacov Hazan #include "mlx5_autoconf.h" 692e22920bSAdrien Mazarguil #include "mlx5_defs.h" 706218063bSNélio Laranjeiro #include "mlx5_prm.h" 716218063bSNélio Laranjeiro 7299c12dccSNélio Laranjeiro #ifndef NDEBUG 7399c12dccSNélio Laranjeiro 7499c12dccSNélio Laranjeiro /** 7599c12dccSNélio Laranjeiro * Verify or set magic value in CQE. 7699c12dccSNélio Laranjeiro * 7799c12dccSNélio Laranjeiro * @param cqe 7899c12dccSNélio Laranjeiro * Pointer to CQE. 7999c12dccSNélio Laranjeiro * 8099c12dccSNélio Laranjeiro * @return 8199c12dccSNélio Laranjeiro * 0 the first time. 8299c12dccSNélio Laranjeiro */ 8399c12dccSNélio Laranjeiro static inline int 8499c12dccSNélio Laranjeiro check_cqe64_seen(volatile struct mlx5_cqe64 *cqe) 8599c12dccSNélio Laranjeiro { 8699c12dccSNélio Laranjeiro static const uint8_t magic[] = "seen"; 8799c12dccSNélio Laranjeiro volatile uint8_t (*buf)[sizeof(cqe->rsvd40)] = &cqe->rsvd40; 8899c12dccSNélio Laranjeiro int ret = 1; 8999c12dccSNélio Laranjeiro unsigned int i; 9099c12dccSNélio Laranjeiro 9199c12dccSNélio Laranjeiro for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i) 9299c12dccSNélio Laranjeiro if (!ret || (*buf)[i] != magic[i]) { 9399c12dccSNélio Laranjeiro ret = 0; 9499c12dccSNélio Laranjeiro (*buf)[i] = magic[i]; 9599c12dccSNélio Laranjeiro } 9699c12dccSNélio Laranjeiro return ret; 9799c12dccSNélio Laranjeiro } 9899c12dccSNélio Laranjeiro 9999c12dccSNélio Laranjeiro #endif /* NDEBUG */ 1006218063bSNélio Laranjeiro 1016218063bSNélio Laranjeiro static inline int 10299c12dccSNélio Laranjeiro check_cqe64(volatile struct mlx5_cqe64 *cqe, 10399c12dccSNélio Laranjeiro unsigned int cqes_n, const uint16_t ci) 10499c12dccSNélio Laranjeiro __attribute__((always_inline)); 1056218063bSNélio Laranjeiro 10699c12dccSNélio Laranjeiro /** 10799c12dccSNélio Laranjeiro * Check whether CQE is valid. 10899c12dccSNélio Laranjeiro * 10999c12dccSNélio Laranjeiro * @param cqe 11099c12dccSNélio Laranjeiro * Pointer to CQE. 11199c12dccSNélio Laranjeiro * @param cqes_n 11299c12dccSNélio Laranjeiro * Size of completion queue. 11399c12dccSNélio Laranjeiro * @param ci 11499c12dccSNélio Laranjeiro * Consumer index. 11599c12dccSNélio Laranjeiro * 11699c12dccSNélio Laranjeiro * @return 11799c12dccSNélio Laranjeiro * 0 on success, 1 on failure. 11899c12dccSNélio Laranjeiro */ 11999c12dccSNélio Laranjeiro static inline int 12099c12dccSNélio Laranjeiro check_cqe64(volatile struct mlx5_cqe64 *cqe, 12199c12dccSNélio Laranjeiro unsigned int cqes_n, const uint16_t ci) 1226218063bSNélio Laranjeiro { 12399c12dccSNélio Laranjeiro uint16_t idx = ci & cqes_n; 12499c12dccSNélio Laranjeiro uint8_t op_own = cqe->op_own; 12599c12dccSNélio Laranjeiro uint8_t op_owner = MLX5_CQE_OWNER(op_own); 12699c12dccSNélio Laranjeiro uint8_t op_code = MLX5_CQE_OPCODE(op_own); 1276218063bSNélio Laranjeiro 12899c12dccSNélio Laranjeiro if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID))) 12999c12dccSNélio Laranjeiro return 1; /* No CQE. */ 13099c12dccSNélio Laranjeiro #ifndef NDEBUG 13199c12dccSNélio Laranjeiro if ((op_code == MLX5_CQE_RESP_ERR) || 13299c12dccSNélio Laranjeiro (op_code == MLX5_CQE_REQ_ERR)) { 13399c12dccSNélio Laranjeiro volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe; 13499c12dccSNélio Laranjeiro uint8_t syndrome = err_cqe->syndrome; 13599c12dccSNélio Laranjeiro 13699c12dccSNélio Laranjeiro if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) || 13799c12dccSNélio Laranjeiro (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR)) 13899c12dccSNélio Laranjeiro return 0; 13999c12dccSNélio Laranjeiro if (!check_cqe64_seen(cqe)) 14099c12dccSNélio Laranjeiro ERROR("unexpected CQE error %u (0x%02x)" 14199c12dccSNélio Laranjeiro " syndrome 0x%02x", 14299c12dccSNélio Laranjeiro op_code, op_code, syndrome); 14399c12dccSNélio Laranjeiro return 1; 14499c12dccSNélio Laranjeiro } else if ((op_code != MLX5_CQE_RESP_SEND) && 14599c12dccSNélio Laranjeiro (op_code != MLX5_CQE_REQ)) { 14699c12dccSNélio Laranjeiro if (!check_cqe64_seen(cqe)) 14799c12dccSNélio Laranjeiro ERROR("unexpected CQE opcode %u (0x%02x)", 14899c12dccSNélio Laranjeiro op_code, op_code); 14999c12dccSNélio Laranjeiro return 1; 1506218063bSNélio Laranjeiro } 15199c12dccSNélio Laranjeiro #endif /* NDEBUG */ 15299c12dccSNélio Laranjeiro return 0; 1536218063bSNélio Laranjeiro } 1542e22920bSAdrien Mazarguil 1552e22920bSAdrien Mazarguil /** 1562e22920bSAdrien Mazarguil * Manage TX completions. 1572e22920bSAdrien Mazarguil * 1582e22920bSAdrien Mazarguil * When sending a burst, mlx5_tx_burst() posts several WRs. 1592e22920bSAdrien Mazarguil * 1602e22920bSAdrien Mazarguil * @param txq 1612e22920bSAdrien Mazarguil * Pointer to TX queue structure. 1622e22920bSAdrien Mazarguil */ 1631d88ba17SNélio Laranjeiro static void 1642e22920bSAdrien Mazarguil txq_complete(struct txq *txq) 1652e22920bSAdrien Mazarguil { 1662e22920bSAdrien Mazarguil const unsigned int elts_n = txq->elts_n; 1671d88ba17SNélio Laranjeiro const unsigned int cqe_n = txq->cqe_n; 16899c12dccSNélio Laranjeiro const unsigned int cqe_cnt = cqe_n - 1; 1691d88ba17SNélio Laranjeiro uint16_t elts_free = txq->elts_tail; 1701d88ba17SNélio Laranjeiro uint16_t elts_tail; 1711d88ba17SNélio Laranjeiro uint16_t cq_ci = txq->cq_ci; 172c305090bSAdrien Mazarguil volatile struct mlx5_cqe64 *cqe = NULL; 173c305090bSAdrien Mazarguil volatile union mlx5_wqe *wqe; 1742e22920bSAdrien Mazarguil 17599c12dccSNélio Laranjeiro do { 176c305090bSAdrien Mazarguil volatile struct mlx5_cqe64 *tmp; 1771d88ba17SNélio Laranjeiro 178c305090bSAdrien Mazarguil tmp = &(*txq->cqes)[cq_ci & cqe_cnt].cqe64; 179c305090bSAdrien Mazarguil if (check_cqe64(tmp, cqe_n, cq_ci)) 1801d88ba17SNélio Laranjeiro break; 181c305090bSAdrien Mazarguil cqe = tmp; 18299c12dccSNélio Laranjeiro #ifndef NDEBUG 18399c12dccSNélio Laranjeiro if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) { 18499c12dccSNélio Laranjeiro if (!check_cqe64_seen(cqe)) 18599c12dccSNélio Laranjeiro ERROR("unexpected compressed CQE, TX stopped"); 18699c12dccSNélio Laranjeiro return; 1872e22920bSAdrien Mazarguil } 18899c12dccSNélio Laranjeiro if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) || 18999c12dccSNélio Laranjeiro (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) { 19099c12dccSNélio Laranjeiro if (!check_cqe64_seen(cqe)) 19199c12dccSNélio Laranjeiro ERROR("unexpected error CQE, TX stopped"); 19299c12dccSNélio Laranjeiro return; 19399c12dccSNélio Laranjeiro } 19499c12dccSNélio Laranjeiro #endif /* NDEBUG */ 19599c12dccSNélio Laranjeiro ++cq_ci; 19699c12dccSNélio Laranjeiro } while (1); 197c305090bSAdrien Mazarguil if (unlikely(cqe == NULL)) 1981d88ba17SNélio Laranjeiro return; 199c305090bSAdrien Mazarguil wqe = &(*txq->wqes)[htons(cqe->wqe_counter) & (txq->wqe_n - 1)]; 200c305090bSAdrien Mazarguil elts_tail = wqe->wqe.ctrl.data[3]; 201c305090bSAdrien Mazarguil assert(elts_tail < txq->wqe_n); 2021d88ba17SNélio Laranjeiro /* Free buffers. */ 203c305090bSAdrien Mazarguil while (elts_free != elts_tail) { 2041d88ba17SNélio Laranjeiro struct rte_mbuf *elt = (*txq->elts)[elts_free]; 205a859e8a9SNelio Laranjeiro unsigned int elts_free_next = 2061d88ba17SNélio Laranjeiro (elts_free + 1) & (elts_n - 1); 2071d88ba17SNélio Laranjeiro struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next]; 208a859e8a9SNelio Laranjeiro 209b185e63fSAdrien Mazarguil #ifndef NDEBUG 210b185e63fSAdrien Mazarguil /* Poisoning. */ 2111d88ba17SNélio Laranjeiro memset(&(*txq->elts)[elts_free], 2121d88ba17SNélio Laranjeiro 0x66, 2131d88ba17SNélio Laranjeiro sizeof((*txq->elts)[elts_free])); 214b185e63fSAdrien Mazarguil #endif 2151d88ba17SNélio Laranjeiro RTE_MBUF_PREFETCH_TO_FREE(elt_next); 2161d88ba17SNélio Laranjeiro /* Only one segment needs to be freed. */ 2171d88ba17SNélio Laranjeiro rte_pktmbuf_free_seg(elt); 218a859e8a9SNelio Laranjeiro elts_free = elts_free_next; 219c305090bSAdrien Mazarguil } 2201d88ba17SNélio Laranjeiro txq->cq_ci = cq_ci; 2212e22920bSAdrien Mazarguil txq->elts_tail = elts_tail; 2221d88ba17SNélio Laranjeiro /* Update the consumer index. */ 2231d88ba17SNélio Laranjeiro rte_wmb(); 2241d88ba17SNélio Laranjeiro *txq->cq_db = htonl(cq_ci); 2252e22920bSAdrien Mazarguil } 2262e22920bSAdrien Mazarguil 2272e22920bSAdrien Mazarguil /** 2288340392eSAdrien Mazarguil * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which 2298340392eSAdrien Mazarguil * the cloned mbuf is allocated is returned instead. 2308340392eSAdrien Mazarguil * 2318340392eSAdrien Mazarguil * @param buf 2328340392eSAdrien Mazarguil * Pointer to mbuf. 2338340392eSAdrien Mazarguil * 2348340392eSAdrien Mazarguil * @return 2358340392eSAdrien Mazarguil * Memory pool where data is located for given mbuf. 2368340392eSAdrien Mazarguil */ 2378340392eSAdrien Mazarguil static struct rte_mempool * 2388340392eSAdrien Mazarguil txq_mb2mp(struct rte_mbuf *buf) 2398340392eSAdrien Mazarguil { 2408340392eSAdrien Mazarguil if (unlikely(RTE_MBUF_INDIRECT(buf))) 2418340392eSAdrien Mazarguil return rte_mbuf_from_indirect(buf)->pool; 2428340392eSAdrien Mazarguil return buf->pool; 2438340392eSAdrien Mazarguil } 2448340392eSAdrien Mazarguil 245491770faSNélio Laranjeiro static inline uint32_t 246491770faSNélio Laranjeiro txq_mp2mr(struct txq *txq, struct rte_mempool *mp) 247491770faSNélio Laranjeiro __attribute__((always_inline)); 248491770faSNélio Laranjeiro 2498340392eSAdrien Mazarguil /** 2502e22920bSAdrien Mazarguil * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[]. 2512e22920bSAdrien Mazarguil * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, 2522e22920bSAdrien Mazarguil * remove an entry first. 2532e22920bSAdrien Mazarguil * 2542e22920bSAdrien Mazarguil * @param txq 2552e22920bSAdrien Mazarguil * Pointer to TX queue structure. 2562e22920bSAdrien Mazarguil * @param[in] mp 2572e22920bSAdrien Mazarguil * Memory Pool for which a Memory Region lkey must be returned. 2582e22920bSAdrien Mazarguil * 2592e22920bSAdrien Mazarguil * @return 2602e22920bSAdrien Mazarguil * mr->lkey on success, (uint32_t)-1 on failure. 2612e22920bSAdrien Mazarguil */ 262491770faSNélio Laranjeiro static inline uint32_t 263d1d914ebSOlivier Matz txq_mp2mr(struct txq *txq, struct rte_mempool *mp) 2642e22920bSAdrien Mazarguil { 2652e22920bSAdrien Mazarguil unsigned int i; 266491770faSNélio Laranjeiro uint32_t lkey = (uint32_t)-1; 2672e22920bSAdrien Mazarguil 2682e22920bSAdrien Mazarguil for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { 2692e22920bSAdrien Mazarguil if (unlikely(txq->mp2mr[i].mp == NULL)) { 2702e22920bSAdrien Mazarguil /* Unknown MP, add a new MR for it. */ 2712e22920bSAdrien Mazarguil break; 2722e22920bSAdrien Mazarguil } 2732e22920bSAdrien Mazarguil if (txq->mp2mr[i].mp == mp) { 2742e22920bSAdrien Mazarguil assert(txq->mp2mr[i].lkey != (uint32_t)-1); 2751d88ba17SNélio Laranjeiro assert(htonl(txq->mp2mr[i].mr->lkey) == 2761d88ba17SNélio Laranjeiro txq->mp2mr[i].lkey); 277491770faSNélio Laranjeiro lkey = txq->mp2mr[i].lkey; 278491770faSNélio Laranjeiro break; 2792e22920bSAdrien Mazarguil } 2802e22920bSAdrien Mazarguil } 281491770faSNélio Laranjeiro if (unlikely(lkey == (uint32_t)-1)) 282491770faSNélio Laranjeiro lkey = txq_mp2mr_reg(txq, mp, i); 283491770faSNélio Laranjeiro return lkey; 2840a3b350dSOlga Shern } 2850a3b350dSOlga Shern 286e192ef80SYaacov Hazan /** 2871d88ba17SNélio Laranjeiro * Write a regular WQE. 288e192ef80SYaacov Hazan * 2891d88ba17SNélio Laranjeiro * @param txq 2901d88ba17SNélio Laranjeiro * Pointer to TX queue structure. 2911d88ba17SNélio Laranjeiro * @param wqe 2921d88ba17SNélio Laranjeiro * Pointer to the WQE to fill. 2931d88ba17SNélio Laranjeiro * @param addr 2941d88ba17SNélio Laranjeiro * Buffer data address. 2951d88ba17SNélio Laranjeiro * @param length 2961d88ba17SNélio Laranjeiro * Packet length. 2971d88ba17SNélio Laranjeiro * @param lkey 2981d88ba17SNélio Laranjeiro * Memory region lkey. 299e192ef80SYaacov Hazan */ 3001d88ba17SNélio Laranjeiro static inline void 3011d88ba17SNélio Laranjeiro mlx5_wqe_write(struct txq *txq, volatile union mlx5_wqe *wqe, 3021d88ba17SNélio Laranjeiro uintptr_t addr, uint32_t length, uint32_t lkey) 303e192ef80SYaacov Hazan { 3041d88ba17SNélio Laranjeiro wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); 3051d88ba17SNélio Laranjeiro wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4); 306*a5bf6af9SAdrien Mazarguil wqe->wqe.ctrl.data[2] = 0; 3071d88ba17SNélio Laranjeiro wqe->wqe.ctrl.data[3] = 0; 3081d88ba17SNélio Laranjeiro wqe->inl.eseg.rsvd0 = 0; 3091d88ba17SNélio Laranjeiro wqe->inl.eseg.rsvd1 = 0; 3101d88ba17SNélio Laranjeiro wqe->inl.eseg.mss = 0; 3111d88ba17SNélio Laranjeiro wqe->inl.eseg.rsvd2 = 0; 3121d88ba17SNélio Laranjeiro wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE); 3131d88ba17SNélio Laranjeiro /* Copy the first 16 bytes into inline header. */ 3141d88ba17SNélio Laranjeiro rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start, 3151d88ba17SNélio Laranjeiro (uint8_t *)(uintptr_t)addr, 3161d88ba17SNélio Laranjeiro MLX5_ETH_INLINE_HEADER_SIZE); 3171d88ba17SNélio Laranjeiro addr += MLX5_ETH_INLINE_HEADER_SIZE; 3181d88ba17SNélio Laranjeiro length -= MLX5_ETH_INLINE_HEADER_SIZE; 3191d88ba17SNélio Laranjeiro /* Store remaining data in data segment. */ 3201d88ba17SNélio Laranjeiro wqe->wqe.dseg.byte_count = htonl(length); 3211d88ba17SNélio Laranjeiro wqe->wqe.dseg.lkey = lkey; 3221d88ba17SNélio Laranjeiro wqe->wqe.dseg.addr = htonll(addr); 3231d88ba17SNélio Laranjeiro /* Increment consumer index. */ 3241d88ba17SNélio Laranjeiro ++txq->wqe_ci; 3251d88ba17SNélio Laranjeiro } 326e192ef80SYaacov Hazan 3271d88ba17SNélio Laranjeiro /** 3281d88ba17SNélio Laranjeiro * Write a regular WQE with VLAN. 3291d88ba17SNélio Laranjeiro * 3301d88ba17SNélio Laranjeiro * @param txq 3311d88ba17SNélio Laranjeiro * Pointer to TX queue structure. 3321d88ba17SNélio Laranjeiro * @param wqe 3331d88ba17SNélio Laranjeiro * Pointer to the WQE to fill. 3341d88ba17SNélio Laranjeiro * @param addr 3351d88ba17SNélio Laranjeiro * Buffer data address. 3361d88ba17SNélio Laranjeiro * @param length 3371d88ba17SNélio Laranjeiro * Packet length. 3381d88ba17SNélio Laranjeiro * @param lkey 3391d88ba17SNélio Laranjeiro * Memory region lkey. 3401d88ba17SNélio Laranjeiro * @param vlan_tci 3411d88ba17SNélio Laranjeiro * VLAN field to insert in packet. 3421d88ba17SNélio Laranjeiro */ 3431d88ba17SNélio Laranjeiro static inline void 3441d88ba17SNélio Laranjeiro mlx5_wqe_write_vlan(struct txq *txq, volatile union mlx5_wqe *wqe, 3451d88ba17SNélio Laranjeiro uintptr_t addr, uint32_t length, uint32_t lkey, 3461d88ba17SNélio Laranjeiro uint16_t vlan_tci) 3471d88ba17SNélio Laranjeiro { 3481d88ba17SNélio Laranjeiro uint32_t vlan = htonl(0x81000000 | vlan_tci); 349e192ef80SYaacov Hazan 3501d88ba17SNélio Laranjeiro wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); 3511d88ba17SNélio Laranjeiro wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4); 352*a5bf6af9SAdrien Mazarguil wqe->wqe.ctrl.data[2] = 0; 3531d88ba17SNélio Laranjeiro wqe->wqe.ctrl.data[3] = 0; 3541d88ba17SNélio Laranjeiro wqe->inl.eseg.rsvd0 = 0; 3551d88ba17SNélio Laranjeiro wqe->inl.eseg.rsvd1 = 0; 3561d88ba17SNélio Laranjeiro wqe->inl.eseg.mss = 0; 3571d88ba17SNélio Laranjeiro wqe->inl.eseg.rsvd2 = 0; 3581d88ba17SNélio Laranjeiro wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE); 3591d88ba17SNélio Laranjeiro /* 3601d88ba17SNélio Laranjeiro * Copy 12 bytes of source & destination MAC address. 3611d88ba17SNélio Laranjeiro * Copy 4 bytes of VLAN. 3621d88ba17SNélio Laranjeiro * Copy 2 bytes of Ether type. 3631d88ba17SNélio Laranjeiro */ 3641d88ba17SNélio Laranjeiro rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start, 3651d88ba17SNélio Laranjeiro (uint8_t *)(uintptr_t)addr, 12); 3661d88ba17SNélio Laranjeiro rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 12), 3671d88ba17SNélio Laranjeiro &vlan, sizeof(vlan)); 3681d88ba17SNélio Laranjeiro rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 16), 3691d88ba17SNélio Laranjeiro (uint8_t *)((uintptr_t)addr + 12), 2); 3701d88ba17SNélio Laranjeiro addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan); 3711d88ba17SNélio Laranjeiro length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan); 3721d88ba17SNélio Laranjeiro /* Store remaining data in data segment. */ 3731d88ba17SNélio Laranjeiro wqe->wqe.dseg.byte_count = htonl(length); 3741d88ba17SNélio Laranjeiro wqe->wqe.dseg.lkey = lkey; 3751d88ba17SNélio Laranjeiro wqe->wqe.dseg.addr = htonll(addr); 3761d88ba17SNélio Laranjeiro /* Increment consumer index. */ 3771d88ba17SNélio Laranjeiro ++txq->wqe_ci; 3781d88ba17SNélio Laranjeiro } 379e192ef80SYaacov Hazan 3801d88ba17SNélio Laranjeiro /** 3812a66cf37SYaacov Hazan * Write a inline WQE. 3822a66cf37SYaacov Hazan * 3832a66cf37SYaacov Hazan * @param txq 3842a66cf37SYaacov Hazan * Pointer to TX queue structure. 3852a66cf37SYaacov Hazan * @param wqe 3862a66cf37SYaacov Hazan * Pointer to the WQE to fill. 3872a66cf37SYaacov Hazan * @param addr 3882a66cf37SYaacov Hazan * Buffer data address. 3892a66cf37SYaacov Hazan * @param length 3902a66cf37SYaacov Hazan * Packet length. 3912a66cf37SYaacov Hazan * @param lkey 3922a66cf37SYaacov Hazan * Memory region lkey. 3932a66cf37SYaacov Hazan */ 3942a66cf37SYaacov Hazan static inline void 3952a66cf37SYaacov Hazan mlx5_wqe_write_inline(struct txq *txq, volatile union mlx5_wqe *wqe, 3962a66cf37SYaacov Hazan uintptr_t addr, uint32_t length) 3972a66cf37SYaacov Hazan { 3982a66cf37SYaacov Hazan uint32_t size; 3992a66cf37SYaacov Hazan uint16_t wqe_cnt = txq->wqe_n - 1; 4002a66cf37SYaacov Hazan uint16_t wqe_ci = txq->wqe_ci + 1; 4012a66cf37SYaacov Hazan 4022a66cf37SYaacov Hazan /* Copy the first 16 bytes into inline header. */ 4032a66cf37SYaacov Hazan rte_memcpy((void *)(uintptr_t)wqe->inl.eseg.inline_hdr_start, 4042a66cf37SYaacov Hazan (void *)(uintptr_t)addr, 4052a66cf37SYaacov Hazan MLX5_ETH_INLINE_HEADER_SIZE); 4062a66cf37SYaacov Hazan addr += MLX5_ETH_INLINE_HEADER_SIZE; 4072a66cf37SYaacov Hazan length -= MLX5_ETH_INLINE_HEADER_SIZE; 4082a66cf37SYaacov Hazan size = 3 + ((4 + length + 15) / 16); 4092a66cf37SYaacov Hazan wqe->inl.byte_cnt = htonl(length | MLX5_INLINE_SEG); 4102a66cf37SYaacov Hazan rte_memcpy((void *)(uintptr_t)&wqe->inl.data[0], 4112a66cf37SYaacov Hazan (void *)addr, MLX5_WQE64_INL_DATA); 4122a66cf37SYaacov Hazan addr += MLX5_WQE64_INL_DATA; 4132a66cf37SYaacov Hazan length -= MLX5_WQE64_INL_DATA; 4142a66cf37SYaacov Hazan while (length) { 4152a66cf37SYaacov Hazan volatile union mlx5_wqe *wqe_next = 4162a66cf37SYaacov Hazan &(*txq->wqes)[wqe_ci & wqe_cnt]; 4172a66cf37SYaacov Hazan uint32_t copy_bytes = (length > sizeof(*wqe)) ? 4182a66cf37SYaacov Hazan sizeof(*wqe) : 4192a66cf37SYaacov Hazan length; 4202a66cf37SYaacov Hazan 4212a66cf37SYaacov Hazan rte_mov64((uint8_t *)(uintptr_t)&wqe_next->data[0], 4222a66cf37SYaacov Hazan (uint8_t *)addr); 4232a66cf37SYaacov Hazan addr += copy_bytes; 4242a66cf37SYaacov Hazan length -= copy_bytes; 4252a66cf37SYaacov Hazan ++wqe_ci; 4262a66cf37SYaacov Hazan } 4272a66cf37SYaacov Hazan assert(size < 64); 4282a66cf37SYaacov Hazan wqe->inl.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); 4292a66cf37SYaacov Hazan wqe->inl.ctrl.data[1] = htonl(txq->qp_num_8s | size); 430*a5bf6af9SAdrien Mazarguil wqe->inl.ctrl.data[2] = 0; 4312a66cf37SYaacov Hazan wqe->inl.ctrl.data[3] = 0; 4322a66cf37SYaacov Hazan wqe->inl.eseg.rsvd0 = 0; 4332a66cf37SYaacov Hazan wqe->inl.eseg.rsvd1 = 0; 4342a66cf37SYaacov Hazan wqe->inl.eseg.mss = 0; 4352a66cf37SYaacov Hazan wqe->inl.eseg.rsvd2 = 0; 4362a66cf37SYaacov Hazan wqe->inl.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE); 4372a66cf37SYaacov Hazan /* Increment consumer index. */ 4382a66cf37SYaacov Hazan txq->wqe_ci = wqe_ci; 4392a66cf37SYaacov Hazan } 4402a66cf37SYaacov Hazan 4412a66cf37SYaacov Hazan /** 4422a66cf37SYaacov Hazan * Write a inline WQE with VLAN. 4432a66cf37SYaacov Hazan * 4442a66cf37SYaacov Hazan * @param txq 4452a66cf37SYaacov Hazan * Pointer to TX queue structure. 4462a66cf37SYaacov Hazan * @param wqe 4472a66cf37SYaacov Hazan * Pointer to the WQE to fill. 4482a66cf37SYaacov Hazan * @param addr 4492a66cf37SYaacov Hazan * Buffer data address. 4502a66cf37SYaacov Hazan * @param length 4512a66cf37SYaacov Hazan * Packet length. 4522a66cf37SYaacov Hazan * @param lkey 4532a66cf37SYaacov Hazan * Memory region lkey. 4542a66cf37SYaacov Hazan * @param vlan_tci 4552a66cf37SYaacov Hazan * VLAN field to insert in packet. 4562a66cf37SYaacov Hazan */ 4572a66cf37SYaacov Hazan static inline void 4582a66cf37SYaacov Hazan mlx5_wqe_write_inline_vlan(struct txq *txq, volatile union mlx5_wqe *wqe, 4592a66cf37SYaacov Hazan uintptr_t addr, uint32_t length, uint16_t vlan_tci) 4602a66cf37SYaacov Hazan { 4612a66cf37SYaacov Hazan uint32_t size; 4622a66cf37SYaacov Hazan uint32_t wqe_cnt = txq->wqe_n - 1; 4632a66cf37SYaacov Hazan uint16_t wqe_ci = txq->wqe_ci + 1; 4642a66cf37SYaacov Hazan uint32_t vlan = htonl(0x81000000 | vlan_tci); 4652a66cf37SYaacov Hazan 4662a66cf37SYaacov Hazan /* 4672a66cf37SYaacov Hazan * Copy 12 bytes of source & destination MAC address. 4682a66cf37SYaacov Hazan * Copy 4 bytes of VLAN. 4692a66cf37SYaacov Hazan * Copy 2 bytes of Ether type. 4702a66cf37SYaacov Hazan */ 4712a66cf37SYaacov Hazan rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start, 4722a66cf37SYaacov Hazan (uint8_t *)addr, 12); 4732a66cf37SYaacov Hazan rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start + 12, 4742a66cf37SYaacov Hazan &vlan, sizeof(vlan)); 4752a66cf37SYaacov Hazan rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start + 16, 4762a66cf37SYaacov Hazan ((uint8_t *)addr + 12), 2); 4772a66cf37SYaacov Hazan addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan); 4782a66cf37SYaacov Hazan length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan); 4792a66cf37SYaacov Hazan size = (sizeof(wqe->inl.ctrl.ctrl) + 4802a66cf37SYaacov Hazan sizeof(wqe->inl.eseg) + 4812a66cf37SYaacov Hazan sizeof(wqe->inl.byte_cnt) + 4822a66cf37SYaacov Hazan length + 15) / 16; 4832a66cf37SYaacov Hazan wqe->inl.byte_cnt = htonl(length | MLX5_INLINE_SEG); 4842a66cf37SYaacov Hazan rte_memcpy((void *)(uintptr_t)&wqe->inl.data[0], 4852a66cf37SYaacov Hazan (void *)addr, MLX5_WQE64_INL_DATA); 4862a66cf37SYaacov Hazan addr += MLX5_WQE64_INL_DATA; 4872a66cf37SYaacov Hazan length -= MLX5_WQE64_INL_DATA; 4882a66cf37SYaacov Hazan while (length) { 4892a66cf37SYaacov Hazan volatile union mlx5_wqe *wqe_next = 4902a66cf37SYaacov Hazan &(*txq->wqes)[wqe_ci & wqe_cnt]; 4912a66cf37SYaacov Hazan uint32_t copy_bytes = (length > sizeof(*wqe)) ? 4922a66cf37SYaacov Hazan sizeof(*wqe) : 4932a66cf37SYaacov Hazan length; 4942a66cf37SYaacov Hazan 4952a66cf37SYaacov Hazan rte_mov64((uint8_t *)(uintptr_t)&wqe_next->data[0], 4962a66cf37SYaacov Hazan (uint8_t *)addr); 4972a66cf37SYaacov Hazan addr += copy_bytes; 4982a66cf37SYaacov Hazan length -= copy_bytes; 4992a66cf37SYaacov Hazan ++wqe_ci; 5002a66cf37SYaacov Hazan } 5012a66cf37SYaacov Hazan assert(size < 64); 5022a66cf37SYaacov Hazan wqe->inl.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); 5032a66cf37SYaacov Hazan wqe->inl.ctrl.data[1] = htonl(txq->qp_num_8s | size); 504*a5bf6af9SAdrien Mazarguil wqe->inl.ctrl.data[2] = 0; 5052a66cf37SYaacov Hazan wqe->inl.ctrl.data[3] = 0; 5062a66cf37SYaacov Hazan wqe->inl.eseg.rsvd0 = 0; 5072a66cf37SYaacov Hazan wqe->inl.eseg.rsvd1 = 0; 5082a66cf37SYaacov Hazan wqe->inl.eseg.mss = 0; 5092a66cf37SYaacov Hazan wqe->inl.eseg.rsvd2 = 0; 5102a66cf37SYaacov Hazan wqe->inl.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE); 5112a66cf37SYaacov Hazan /* Increment consumer index. */ 5122a66cf37SYaacov Hazan txq->wqe_ci = wqe_ci; 5132a66cf37SYaacov Hazan } 5142a66cf37SYaacov Hazan 5152a66cf37SYaacov Hazan /** 5161d88ba17SNélio Laranjeiro * Ring TX queue doorbell. 5171d88ba17SNélio Laranjeiro * 5181d88ba17SNélio Laranjeiro * @param txq 5191d88ba17SNélio Laranjeiro * Pointer to TX queue structure. 5201d88ba17SNélio Laranjeiro */ 5211d88ba17SNélio Laranjeiro static inline void 5221d88ba17SNélio Laranjeiro mlx5_tx_dbrec(struct txq *txq) 5231d88ba17SNélio Laranjeiro { 5241d88ba17SNélio Laranjeiro uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset); 5251d88ba17SNélio Laranjeiro uint32_t data[4] = { 5261d88ba17SNélio Laranjeiro htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), 5271d88ba17SNélio Laranjeiro htonl(txq->qp_num_8s), 5281d88ba17SNélio Laranjeiro 0, 5291d88ba17SNélio Laranjeiro 0, 5301d88ba17SNélio Laranjeiro }; 5311d88ba17SNélio Laranjeiro rte_wmb(); 5321d88ba17SNélio Laranjeiro *txq->qp_db = htonl(txq->wqe_ci); 5331d88ba17SNélio Laranjeiro /* Ensure ordering between DB record and BF copy. */ 5341d88ba17SNélio Laranjeiro rte_wmb(); 5351d88ba17SNélio Laranjeiro rte_mov16(dst, (uint8_t *)data); 5361d88ba17SNélio Laranjeiro txq->bf_offset ^= txq->bf_buf_size; 5371d88ba17SNélio Laranjeiro } 538e192ef80SYaacov Hazan 5391d88ba17SNélio Laranjeiro /** 5401d88ba17SNélio Laranjeiro * Prefetch a CQE. 5411d88ba17SNélio Laranjeiro * 5421d88ba17SNélio Laranjeiro * @param txq 5431d88ba17SNélio Laranjeiro * Pointer to TX queue structure. 5441d88ba17SNélio Laranjeiro * @param cqe_ci 5451d88ba17SNélio Laranjeiro * CQE consumer index. 5461d88ba17SNélio Laranjeiro */ 5471d88ba17SNélio Laranjeiro static inline void 5481d88ba17SNélio Laranjeiro tx_prefetch_cqe(struct txq *txq, uint16_t ci) 5491d88ba17SNélio Laranjeiro { 5501d88ba17SNélio Laranjeiro volatile struct mlx5_cqe64 *cqe; 5511d88ba17SNélio Laranjeiro 5521d88ba17SNélio Laranjeiro cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64; 5531d88ba17SNélio Laranjeiro rte_prefetch0(cqe); 554e192ef80SYaacov Hazan } 555e192ef80SYaacov Hazan 5562e22920bSAdrien Mazarguil /** 5572a66cf37SYaacov Hazan * Prefetch a WQE. 5582a66cf37SYaacov Hazan * 5592a66cf37SYaacov Hazan * @param txq 5602a66cf37SYaacov Hazan * Pointer to TX queue structure. 5612a66cf37SYaacov Hazan * @param wqe_ci 5622a66cf37SYaacov Hazan * WQE consumer index. 5632a66cf37SYaacov Hazan */ 5642a66cf37SYaacov Hazan static inline void 5652a66cf37SYaacov Hazan tx_prefetch_wqe(struct txq *txq, uint16_t ci) 5662a66cf37SYaacov Hazan { 5672a66cf37SYaacov Hazan volatile union mlx5_wqe *wqe; 5682a66cf37SYaacov Hazan 5692a66cf37SYaacov Hazan wqe = &(*txq->wqes)[ci & (txq->wqe_n - 1)]; 5702a66cf37SYaacov Hazan rte_prefetch0(wqe); 5712a66cf37SYaacov Hazan } 5722a66cf37SYaacov Hazan 5732a66cf37SYaacov Hazan /** 5742e22920bSAdrien Mazarguil * DPDK callback for TX. 5752e22920bSAdrien Mazarguil * 5762e22920bSAdrien Mazarguil * @param dpdk_txq 5772e22920bSAdrien Mazarguil * Generic pointer to TX queue structure. 5782e22920bSAdrien Mazarguil * @param[in] pkts 5792e22920bSAdrien Mazarguil * Packets to transmit. 5802e22920bSAdrien Mazarguil * @param pkts_n 5812e22920bSAdrien Mazarguil * Number of packets in array. 5822e22920bSAdrien Mazarguil * 5832e22920bSAdrien Mazarguil * @return 5842e22920bSAdrien Mazarguil * Number of packets successfully transmitted (<= pkts_n). 5852e22920bSAdrien Mazarguil */ 5862e22920bSAdrien Mazarguil uint16_t 5872e22920bSAdrien Mazarguil mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 5882e22920bSAdrien Mazarguil { 5892e22920bSAdrien Mazarguil struct txq *txq = (struct txq *)dpdk_txq; 5901d88ba17SNélio Laranjeiro uint16_t elts_head = txq->elts_head; 5912e22920bSAdrien Mazarguil const unsigned int elts_n = txq->elts_n; 592c3d62cc9SAdrien Mazarguil unsigned int i = 0; 593*a5bf6af9SAdrien Mazarguil unsigned int j = 0; 5942e22920bSAdrien Mazarguil unsigned int max; 595c305090bSAdrien Mazarguil unsigned int comp; 596*a5bf6af9SAdrien Mazarguil volatile union mlx5_wqe *wqe = NULL; 5972e22920bSAdrien Mazarguil 5981d88ba17SNélio Laranjeiro if (unlikely(!pkts_n)) 5991d88ba17SNélio Laranjeiro return 0; 6005e1d11ecSNelio Laranjeiro /* Prefetch first packet cacheline. */ 6011d88ba17SNélio Laranjeiro tx_prefetch_cqe(txq, txq->cq_ci); 6021d88ba17SNélio Laranjeiro tx_prefetch_cqe(txq, txq->cq_ci + 1); 603c3d62cc9SAdrien Mazarguil rte_prefetch0(*pkts); 6041d88ba17SNélio Laranjeiro /* Start processing. */ 6052e22920bSAdrien Mazarguil txq_complete(txq); 6064f52bbfbSNelio Laranjeiro max = (elts_n - (elts_head - txq->elts_tail)); 6072e22920bSAdrien Mazarguil if (max > elts_n) 6082e22920bSAdrien Mazarguil max -= elts_n; 609c3d62cc9SAdrien Mazarguil do { 610*a5bf6af9SAdrien Mazarguil struct rte_mbuf *buf = *(pkts++); 611c3d62cc9SAdrien Mazarguil unsigned int elts_head_next; 612573f54afSNélio Laranjeiro uintptr_t addr; 613573f54afSNélio Laranjeiro uint32_t length; 614573f54afSNélio Laranjeiro uint32_t lkey; 615*a5bf6af9SAdrien Mazarguil unsigned int segs_n = buf->nb_segs; 616*a5bf6af9SAdrien Mazarguil volatile struct mlx5_wqe_data_seg *dseg; 617*a5bf6af9SAdrien Mazarguil unsigned int ds = sizeof(*wqe) / 16; 6182e22920bSAdrien Mazarguil 619c3d62cc9SAdrien Mazarguil /* 620c3d62cc9SAdrien Mazarguil * Make sure there is enough room to store this packet and 621c3d62cc9SAdrien Mazarguil * that one ring entry remains unused. 622c3d62cc9SAdrien Mazarguil */ 623*a5bf6af9SAdrien Mazarguil assert(segs_n); 624*a5bf6af9SAdrien Mazarguil if (max < segs_n + 1) 625c3d62cc9SAdrien Mazarguil break; 626*a5bf6af9SAdrien Mazarguil max -= segs_n; 627c3d62cc9SAdrien Mazarguil --pkts_n; 628c3d62cc9SAdrien Mazarguil elts_head_next = (elts_head + 1) & (elts_n - 1); 6291d88ba17SNélio Laranjeiro wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)]; 630*a5bf6af9SAdrien Mazarguil dseg = &wqe->wqe.dseg; 6311d88ba17SNélio Laranjeiro rte_prefetch0(wqe); 632c3d62cc9SAdrien Mazarguil if (pkts_n) 633c3d62cc9SAdrien Mazarguil rte_prefetch0(*pkts); 6342e22920bSAdrien Mazarguil /* Retrieve buffer information. */ 6352e22920bSAdrien Mazarguil addr = rte_pktmbuf_mtod(buf, uintptr_t); 6362e22920bSAdrien Mazarguil length = DATA_LEN(buf); 6372e22920bSAdrien Mazarguil /* Update element. */ 6381d88ba17SNélio Laranjeiro (*txq->elts)[elts_head] = buf; 6395e1d11ecSNelio Laranjeiro /* Prefetch next buffer data. */ 640c3d62cc9SAdrien Mazarguil if (pkts_n) 641c3d62cc9SAdrien Mazarguil rte_prefetch0(rte_pktmbuf_mtod(*pkts, 6421d88ba17SNélio Laranjeiro volatile void *)); 6430431c40fSNélio Laranjeiro /* Retrieve Memory Region key for this memory pool. */ 644d970e992SNelio Laranjeiro lkey = txq_mp2mr(txq, txq_mb2mp(buf)); 6451d88ba17SNélio Laranjeiro if (buf->ol_flags & PKT_TX_VLAN_PKT) 6461d88ba17SNélio Laranjeiro mlx5_wqe_write_vlan(txq, wqe, addr, length, lkey, 6471d88ba17SNélio Laranjeiro buf->vlan_tci); 648e192ef80SYaacov Hazan else 6491d88ba17SNélio Laranjeiro mlx5_wqe_write(txq, wqe, addr, length, lkey); 6501d88ba17SNélio Laranjeiro /* Should we enable HW CKSUM offload */ 6511d88ba17SNélio Laranjeiro if (buf->ol_flags & 6521d88ba17SNélio Laranjeiro (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { 6531d88ba17SNélio Laranjeiro wqe->wqe.eseg.cs_flags = 6541d88ba17SNélio Laranjeiro MLX5_ETH_WQE_L3_CSUM | 6551d88ba17SNélio Laranjeiro MLX5_ETH_WQE_L4_CSUM; 6561d88ba17SNélio Laranjeiro } else { 6571d88ba17SNélio Laranjeiro wqe->wqe.eseg.cs_flags = 0; 6581d88ba17SNélio Laranjeiro } 659*a5bf6af9SAdrien Mazarguil while (--segs_n) { 660*a5bf6af9SAdrien Mazarguil /* 661*a5bf6af9SAdrien Mazarguil * Spill on next WQE when the current one does not have 662*a5bf6af9SAdrien Mazarguil * enough room left. Size of WQE must a be a multiple 663*a5bf6af9SAdrien Mazarguil * of data segment size. 664*a5bf6af9SAdrien Mazarguil */ 665*a5bf6af9SAdrien Mazarguil assert(!(sizeof(*wqe) % sizeof(*dseg))); 666*a5bf6af9SAdrien Mazarguil if (!(ds % (sizeof(*wqe) / 16))) 667*a5bf6af9SAdrien Mazarguil dseg = (volatile void *) 668*a5bf6af9SAdrien Mazarguil &(*txq->wqes)[txq->wqe_ci++ & 669*a5bf6af9SAdrien Mazarguil (txq->wqe_n - 1)]; 670*a5bf6af9SAdrien Mazarguil else 671*a5bf6af9SAdrien Mazarguil ++dseg; 672*a5bf6af9SAdrien Mazarguil ++ds; 673*a5bf6af9SAdrien Mazarguil buf = buf->next; 674*a5bf6af9SAdrien Mazarguil assert(buf); 675*a5bf6af9SAdrien Mazarguil /* Store segment information. */ 676*a5bf6af9SAdrien Mazarguil dseg->byte_count = htonl(DATA_LEN(buf)); 677*a5bf6af9SAdrien Mazarguil dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf)); 678*a5bf6af9SAdrien Mazarguil dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); 679*a5bf6af9SAdrien Mazarguil (*txq->elts)[elts_head_next] = buf; 680*a5bf6af9SAdrien Mazarguil elts_head_next = (elts_head_next + 1) & (elts_n - 1); 681*a5bf6af9SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 682*a5bf6af9SAdrien Mazarguil length += DATA_LEN(buf); 683*a5bf6af9SAdrien Mazarguil #endif 684*a5bf6af9SAdrien Mazarguil ++j; 685*a5bf6af9SAdrien Mazarguil } 686*a5bf6af9SAdrien Mazarguil /* Update DS field in WQE. */ 687*a5bf6af9SAdrien Mazarguil wqe->wqe.ctrl.data[1] &= htonl(0xffffffc0); 688*a5bf6af9SAdrien Mazarguil wqe->wqe.ctrl.data[1] |= htonl(ds & 0x3f); 689*a5bf6af9SAdrien Mazarguil elts_head = elts_head_next; 69087011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 691573f54afSNélio Laranjeiro /* Increment sent bytes counter. */ 692573f54afSNélio Laranjeiro txq->stats.obytes += length; 69387011737SAdrien Mazarguil #endif 6942e22920bSAdrien Mazarguil elts_head = elts_head_next; 695c3d62cc9SAdrien Mazarguil ++i; 696c3d62cc9SAdrien Mazarguil } while (pkts_n); 6972e22920bSAdrien Mazarguil /* Take a shortcut if nothing must be sent. */ 6982e22920bSAdrien Mazarguil if (unlikely(i == 0)) 6992e22920bSAdrien Mazarguil return 0; 700c305090bSAdrien Mazarguil /* Check whether completion threshold has been reached. */ 701*a5bf6af9SAdrien Mazarguil comp = txq->elts_comp + i + j; 702c305090bSAdrien Mazarguil if (comp >= MLX5_TX_COMP_THRESH) { 703c305090bSAdrien Mazarguil /* Request completion on last WQE. */ 704c305090bSAdrien Mazarguil wqe->wqe.ctrl.data[2] = htonl(8); 705c305090bSAdrien Mazarguil /* Save elts_head in unused "immediate" field of WQE. */ 706c305090bSAdrien Mazarguil wqe->wqe.ctrl.data[3] = elts_head; 707c305090bSAdrien Mazarguil txq->elts_comp = 0; 708c305090bSAdrien Mazarguil } else { 709c305090bSAdrien Mazarguil txq->elts_comp = comp; 710c305090bSAdrien Mazarguil } 71187011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 71287011737SAdrien Mazarguil /* Increment sent packets counter. */ 71387011737SAdrien Mazarguil txq->stats.opackets += i; 71487011737SAdrien Mazarguil #endif 7152e22920bSAdrien Mazarguil /* Ring QP doorbell. */ 7161d88ba17SNélio Laranjeiro mlx5_tx_dbrec(txq); 7172e22920bSAdrien Mazarguil txq->elts_head = elts_head; 7182e22920bSAdrien Mazarguil return i; 7192e22920bSAdrien Mazarguil } 7202e22920bSAdrien Mazarguil 7212e22920bSAdrien Mazarguil /** 7222a66cf37SYaacov Hazan * DPDK callback for TX with inline support. 7232a66cf37SYaacov Hazan * 7242a66cf37SYaacov Hazan * @param dpdk_txq 7252a66cf37SYaacov Hazan * Generic pointer to TX queue structure. 7262a66cf37SYaacov Hazan * @param[in] pkts 7272a66cf37SYaacov Hazan * Packets to transmit. 7282a66cf37SYaacov Hazan * @param pkts_n 7292a66cf37SYaacov Hazan * Number of packets in array. 7302a66cf37SYaacov Hazan * 7312a66cf37SYaacov Hazan * @return 7322a66cf37SYaacov Hazan * Number of packets successfully transmitted (<= pkts_n). 7332a66cf37SYaacov Hazan */ 7342a66cf37SYaacov Hazan uint16_t 7352a66cf37SYaacov Hazan mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 7362a66cf37SYaacov Hazan { 7372a66cf37SYaacov Hazan struct txq *txq = (struct txq *)dpdk_txq; 7382a66cf37SYaacov Hazan uint16_t elts_head = txq->elts_head; 7392a66cf37SYaacov Hazan const unsigned int elts_n = txq->elts_n; 740c3d62cc9SAdrien Mazarguil unsigned int i = 0; 741*a5bf6af9SAdrien Mazarguil unsigned int j = 0; 7422a66cf37SYaacov Hazan unsigned int max; 7432a66cf37SYaacov Hazan unsigned int comp; 744*a5bf6af9SAdrien Mazarguil volatile union mlx5_wqe *wqe = NULL; 7452a66cf37SYaacov Hazan unsigned int max_inline = txq->max_inline; 7462a66cf37SYaacov Hazan 7472a66cf37SYaacov Hazan if (unlikely(!pkts_n)) 7482a66cf37SYaacov Hazan return 0; 7492a66cf37SYaacov Hazan /* Prefetch first packet cacheline. */ 7502a66cf37SYaacov Hazan tx_prefetch_cqe(txq, txq->cq_ci); 7512a66cf37SYaacov Hazan tx_prefetch_cqe(txq, txq->cq_ci + 1); 752c3d62cc9SAdrien Mazarguil rte_prefetch0(*pkts); 7532a66cf37SYaacov Hazan /* Start processing. */ 7542a66cf37SYaacov Hazan txq_complete(txq); 7552a66cf37SYaacov Hazan max = (elts_n - (elts_head - txq->elts_tail)); 7562a66cf37SYaacov Hazan if (max > elts_n) 7572a66cf37SYaacov Hazan max -= elts_n; 758c3d62cc9SAdrien Mazarguil do { 759*a5bf6af9SAdrien Mazarguil struct rte_mbuf *buf = *(pkts++); 760c3d62cc9SAdrien Mazarguil unsigned int elts_head_next; 7612a66cf37SYaacov Hazan uintptr_t addr; 7622a66cf37SYaacov Hazan uint32_t length; 7632a66cf37SYaacov Hazan uint32_t lkey; 764*a5bf6af9SAdrien Mazarguil unsigned int segs_n = buf->nb_segs; 765*a5bf6af9SAdrien Mazarguil volatile struct mlx5_wqe_data_seg *dseg; 766*a5bf6af9SAdrien Mazarguil unsigned int ds = sizeof(*wqe) / 16; 7672a66cf37SYaacov Hazan 768c3d62cc9SAdrien Mazarguil /* 769c3d62cc9SAdrien Mazarguil * Make sure there is enough room to store this packet and 770c3d62cc9SAdrien Mazarguil * that one ring entry remains unused. 771c3d62cc9SAdrien Mazarguil */ 772*a5bf6af9SAdrien Mazarguil assert(segs_n); 773*a5bf6af9SAdrien Mazarguil if (max < segs_n + 1) 774c3d62cc9SAdrien Mazarguil break; 775*a5bf6af9SAdrien Mazarguil max -= segs_n; 776c3d62cc9SAdrien Mazarguil --pkts_n; 777c3d62cc9SAdrien Mazarguil elts_head_next = (elts_head + 1) & (elts_n - 1); 7782a66cf37SYaacov Hazan wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)]; 779*a5bf6af9SAdrien Mazarguil dseg = &wqe->wqe.dseg; 7802a66cf37SYaacov Hazan tx_prefetch_wqe(txq, txq->wqe_ci); 7812a66cf37SYaacov Hazan tx_prefetch_wqe(txq, txq->wqe_ci + 1); 782c3d62cc9SAdrien Mazarguil if (pkts_n) 783c3d62cc9SAdrien Mazarguil rte_prefetch0(*pkts); 7842a66cf37SYaacov Hazan /* Should we enable HW CKSUM offload */ 7852a66cf37SYaacov Hazan if (buf->ol_flags & 7862a66cf37SYaacov Hazan (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { 7872a66cf37SYaacov Hazan wqe->inl.eseg.cs_flags = 7882a66cf37SYaacov Hazan MLX5_ETH_WQE_L3_CSUM | 7892a66cf37SYaacov Hazan MLX5_ETH_WQE_L4_CSUM; 7902a66cf37SYaacov Hazan } else { 7912a66cf37SYaacov Hazan wqe->inl.eseg.cs_flags = 0; 7922a66cf37SYaacov Hazan } 7932a66cf37SYaacov Hazan /* Retrieve buffer information. */ 7942a66cf37SYaacov Hazan addr = rte_pktmbuf_mtod(buf, uintptr_t); 7952a66cf37SYaacov Hazan length = DATA_LEN(buf); 7962a66cf37SYaacov Hazan /* Update element. */ 7972a66cf37SYaacov Hazan (*txq->elts)[elts_head] = buf; 7982a66cf37SYaacov Hazan /* Prefetch next buffer data. */ 799c3d62cc9SAdrien Mazarguil if (pkts_n) 800c3d62cc9SAdrien Mazarguil rte_prefetch0(rte_pktmbuf_mtod(*pkts, 8012a66cf37SYaacov Hazan volatile void *)); 802*a5bf6af9SAdrien Mazarguil if ((length <= max_inline) && (segs_n == 1)) { 8032a66cf37SYaacov Hazan if (buf->ol_flags & PKT_TX_VLAN_PKT) 8042a66cf37SYaacov Hazan mlx5_wqe_write_inline_vlan(txq, wqe, 8052a66cf37SYaacov Hazan addr, length, 8062a66cf37SYaacov Hazan buf->vlan_tci); 8072a66cf37SYaacov Hazan else 8082a66cf37SYaacov Hazan mlx5_wqe_write_inline(txq, wqe, addr, length); 809*a5bf6af9SAdrien Mazarguil goto skip_segs; 8102a66cf37SYaacov Hazan } else { 8112a66cf37SYaacov Hazan /* Retrieve Memory Region key for this memory pool. */ 8122a66cf37SYaacov Hazan lkey = txq_mp2mr(txq, txq_mb2mp(buf)); 8132a66cf37SYaacov Hazan if (buf->ol_flags & PKT_TX_VLAN_PKT) 8142a66cf37SYaacov Hazan mlx5_wqe_write_vlan(txq, wqe, addr, length, 8152a66cf37SYaacov Hazan lkey, buf->vlan_tci); 8162a66cf37SYaacov Hazan else 8172a66cf37SYaacov Hazan mlx5_wqe_write(txq, wqe, addr, length, lkey); 8182a66cf37SYaacov Hazan } 819*a5bf6af9SAdrien Mazarguil while (--segs_n) { 820*a5bf6af9SAdrien Mazarguil /* 821*a5bf6af9SAdrien Mazarguil * Spill on next WQE when the current one does not have 822*a5bf6af9SAdrien Mazarguil * enough room left. Size of WQE must a be a multiple 823*a5bf6af9SAdrien Mazarguil * of data segment size. 824*a5bf6af9SAdrien Mazarguil */ 825*a5bf6af9SAdrien Mazarguil assert(!(sizeof(*wqe) % sizeof(*dseg))); 826*a5bf6af9SAdrien Mazarguil if (!(ds % (sizeof(*wqe) / 16))) 827*a5bf6af9SAdrien Mazarguil dseg = (volatile void *) 828*a5bf6af9SAdrien Mazarguil &(*txq->wqes)[txq->wqe_ci++ & 829*a5bf6af9SAdrien Mazarguil (txq->wqe_n - 1)]; 830*a5bf6af9SAdrien Mazarguil else 831*a5bf6af9SAdrien Mazarguil ++dseg; 832*a5bf6af9SAdrien Mazarguil ++ds; 833*a5bf6af9SAdrien Mazarguil buf = buf->next; 834*a5bf6af9SAdrien Mazarguil assert(buf); 835*a5bf6af9SAdrien Mazarguil /* Store segment information. */ 836*a5bf6af9SAdrien Mazarguil dseg->byte_count = htonl(DATA_LEN(buf)); 837*a5bf6af9SAdrien Mazarguil dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf)); 838*a5bf6af9SAdrien Mazarguil dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); 839*a5bf6af9SAdrien Mazarguil (*txq->elts)[elts_head_next] = buf; 840*a5bf6af9SAdrien Mazarguil elts_head_next = (elts_head_next + 1) & (elts_n - 1); 841*a5bf6af9SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 842*a5bf6af9SAdrien Mazarguil length += DATA_LEN(buf); 843*a5bf6af9SAdrien Mazarguil #endif 844*a5bf6af9SAdrien Mazarguil ++j; 845*a5bf6af9SAdrien Mazarguil } 846*a5bf6af9SAdrien Mazarguil /* Update DS field in WQE. */ 847*a5bf6af9SAdrien Mazarguil wqe->inl.ctrl.data[1] &= htonl(0xffffffc0); 848*a5bf6af9SAdrien Mazarguil wqe->inl.ctrl.data[1] |= htonl(ds & 0x3f); 849*a5bf6af9SAdrien Mazarguil skip_segs: 8502a66cf37SYaacov Hazan elts_head = elts_head_next; 8512a66cf37SYaacov Hazan #ifdef MLX5_PMD_SOFT_COUNTERS 8522a66cf37SYaacov Hazan /* Increment sent bytes counter. */ 8532a66cf37SYaacov Hazan txq->stats.obytes += length; 8542a66cf37SYaacov Hazan #endif 855c3d62cc9SAdrien Mazarguil ++i; 856c3d62cc9SAdrien Mazarguil } while (pkts_n); 8572a66cf37SYaacov Hazan /* Take a shortcut if nothing must be sent. */ 8582a66cf37SYaacov Hazan if (unlikely(i == 0)) 8592a66cf37SYaacov Hazan return 0; 8602a66cf37SYaacov Hazan /* Check whether completion threshold has been reached. */ 861*a5bf6af9SAdrien Mazarguil comp = txq->elts_comp + i + j; 8622a66cf37SYaacov Hazan if (comp >= MLX5_TX_COMP_THRESH) { 8632a66cf37SYaacov Hazan /* Request completion on last WQE. */ 8642a66cf37SYaacov Hazan wqe->inl.ctrl.data[2] = htonl(8); 8652a66cf37SYaacov Hazan /* Save elts_head in unused "immediate" field of WQE. */ 8662a66cf37SYaacov Hazan wqe->inl.ctrl.data[3] = elts_head; 8672a66cf37SYaacov Hazan txq->elts_comp = 0; 8682a66cf37SYaacov Hazan } else { 8692a66cf37SYaacov Hazan txq->elts_comp = comp; 8702a66cf37SYaacov Hazan } 8712a66cf37SYaacov Hazan #ifdef MLX5_PMD_SOFT_COUNTERS 8722a66cf37SYaacov Hazan /* Increment sent packets counter. */ 8732a66cf37SYaacov Hazan txq->stats.opackets += i; 8742a66cf37SYaacov Hazan #endif 8752a66cf37SYaacov Hazan /* Ring QP doorbell. */ 8762a66cf37SYaacov Hazan mlx5_tx_dbrec(txq); 8772a66cf37SYaacov Hazan txq->elts_head = elts_head; 8782a66cf37SYaacov Hazan return i; 8792a66cf37SYaacov Hazan } 8802a66cf37SYaacov Hazan 8812a66cf37SYaacov Hazan /** 882230189d9SNélio Laranjeiro * Open a MPW session. 883230189d9SNélio Laranjeiro * 884230189d9SNélio Laranjeiro * @param txq 885230189d9SNélio Laranjeiro * Pointer to TX queue structure. 886230189d9SNélio Laranjeiro * @param mpw 887230189d9SNélio Laranjeiro * Pointer to MPW session structure. 888230189d9SNélio Laranjeiro * @param length 889230189d9SNélio Laranjeiro * Packet length. 890230189d9SNélio Laranjeiro */ 891230189d9SNélio Laranjeiro static inline void 892230189d9SNélio Laranjeiro mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) 893230189d9SNélio Laranjeiro { 894230189d9SNélio Laranjeiro uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1); 895230189d9SNélio Laranjeiro volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] = 896230189d9SNélio Laranjeiro (volatile struct mlx5_wqe_data_seg (*)[]) 897230189d9SNélio Laranjeiro (uintptr_t)&(*txq->wqes)[(idx + 1) & (txq->wqe_n - 1)]; 898230189d9SNélio Laranjeiro 899230189d9SNélio Laranjeiro mpw->state = MLX5_MPW_STATE_OPENED; 900230189d9SNélio Laranjeiro mpw->pkts_n = 0; 901230189d9SNélio Laranjeiro mpw->len = length; 902230189d9SNélio Laranjeiro mpw->total_len = 0; 903230189d9SNélio Laranjeiro mpw->wqe = &(*txq->wqes)[idx]; 904230189d9SNélio Laranjeiro mpw->wqe->mpw.eseg.mss = htons(length); 905230189d9SNélio Laranjeiro mpw->wqe->mpw.eseg.inline_hdr_sz = 0; 906230189d9SNélio Laranjeiro mpw->wqe->mpw.eseg.rsvd0 = 0; 907230189d9SNélio Laranjeiro mpw->wqe->mpw.eseg.rsvd1 = 0; 908230189d9SNélio Laranjeiro mpw->wqe->mpw.eseg.rsvd2 = 0; 909230189d9SNélio Laranjeiro mpw->wqe->mpw.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) | 910230189d9SNélio Laranjeiro (txq->wqe_ci << 8) | 911230189d9SNélio Laranjeiro MLX5_OPCODE_LSO_MPW); 912230189d9SNélio Laranjeiro mpw->wqe->mpw.ctrl.data[2] = 0; 913230189d9SNélio Laranjeiro mpw->wqe->mpw.ctrl.data[3] = 0; 914230189d9SNélio Laranjeiro mpw->data.dseg[0] = &mpw->wqe->mpw.dseg[0]; 915230189d9SNélio Laranjeiro mpw->data.dseg[1] = &mpw->wqe->mpw.dseg[1]; 916230189d9SNélio Laranjeiro mpw->data.dseg[2] = &(*dseg)[0]; 917230189d9SNélio Laranjeiro mpw->data.dseg[3] = &(*dseg)[1]; 918230189d9SNélio Laranjeiro mpw->data.dseg[4] = &(*dseg)[2]; 919230189d9SNélio Laranjeiro } 920230189d9SNélio Laranjeiro 921230189d9SNélio Laranjeiro /** 922230189d9SNélio Laranjeiro * Close a MPW session. 923230189d9SNélio Laranjeiro * 924230189d9SNélio Laranjeiro * @param txq 925230189d9SNélio Laranjeiro * Pointer to TX queue structure. 926230189d9SNélio Laranjeiro * @param mpw 927230189d9SNélio Laranjeiro * Pointer to MPW session structure. 928230189d9SNélio Laranjeiro */ 929230189d9SNélio Laranjeiro static inline void 930230189d9SNélio Laranjeiro mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw) 931230189d9SNélio Laranjeiro { 932230189d9SNélio Laranjeiro unsigned int num = mpw->pkts_n; 933230189d9SNélio Laranjeiro 934230189d9SNélio Laranjeiro /* 935230189d9SNélio Laranjeiro * Store size in multiple of 16 bytes. Control and Ethernet segments 936230189d9SNélio Laranjeiro * count as 2. 937230189d9SNélio Laranjeiro */ 938230189d9SNélio Laranjeiro mpw->wqe->mpw.ctrl.data[1] = htonl(txq->qp_num_8s | (2 + num)); 939230189d9SNélio Laranjeiro mpw->state = MLX5_MPW_STATE_CLOSED; 940230189d9SNélio Laranjeiro if (num < 3) 941230189d9SNélio Laranjeiro ++txq->wqe_ci; 942230189d9SNélio Laranjeiro else 943230189d9SNélio Laranjeiro txq->wqe_ci += 2; 944230189d9SNélio Laranjeiro tx_prefetch_wqe(txq, txq->wqe_ci); 945230189d9SNélio Laranjeiro tx_prefetch_wqe(txq, txq->wqe_ci + 1); 946230189d9SNélio Laranjeiro } 947230189d9SNélio Laranjeiro 948230189d9SNélio Laranjeiro /** 949230189d9SNélio Laranjeiro * DPDK callback for TX with MPW support. 950230189d9SNélio Laranjeiro * 951230189d9SNélio Laranjeiro * @param dpdk_txq 952230189d9SNélio Laranjeiro * Generic pointer to TX queue structure. 953230189d9SNélio Laranjeiro * @param[in] pkts 954230189d9SNélio Laranjeiro * Packets to transmit. 955230189d9SNélio Laranjeiro * @param pkts_n 956230189d9SNélio Laranjeiro * Number of packets in array. 957230189d9SNélio Laranjeiro * 958230189d9SNélio Laranjeiro * @return 959230189d9SNélio Laranjeiro * Number of packets successfully transmitted (<= pkts_n). 960230189d9SNélio Laranjeiro */ 961230189d9SNélio Laranjeiro uint16_t 962230189d9SNélio Laranjeiro mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 963230189d9SNélio Laranjeiro { 964230189d9SNélio Laranjeiro struct txq *txq = (struct txq *)dpdk_txq; 965230189d9SNélio Laranjeiro uint16_t elts_head = txq->elts_head; 966230189d9SNélio Laranjeiro const unsigned int elts_n = txq->elts_n; 967c3d62cc9SAdrien Mazarguil unsigned int i = 0; 968*a5bf6af9SAdrien Mazarguil unsigned int j = 0; 969230189d9SNélio Laranjeiro unsigned int max; 970230189d9SNélio Laranjeiro unsigned int comp; 971230189d9SNélio Laranjeiro struct mlx5_mpw mpw = { 972230189d9SNélio Laranjeiro .state = MLX5_MPW_STATE_CLOSED, 973230189d9SNélio Laranjeiro }; 974230189d9SNélio Laranjeiro 975c3d62cc9SAdrien Mazarguil if (unlikely(!pkts_n)) 976c3d62cc9SAdrien Mazarguil return 0; 977230189d9SNélio Laranjeiro /* Prefetch first packet cacheline. */ 978230189d9SNélio Laranjeiro tx_prefetch_cqe(txq, txq->cq_ci); 979230189d9SNélio Laranjeiro tx_prefetch_wqe(txq, txq->wqe_ci); 980230189d9SNélio Laranjeiro tx_prefetch_wqe(txq, txq->wqe_ci + 1); 981230189d9SNélio Laranjeiro /* Start processing. */ 982230189d9SNélio Laranjeiro txq_complete(txq); 983230189d9SNélio Laranjeiro max = (elts_n - (elts_head - txq->elts_tail)); 984230189d9SNélio Laranjeiro if (max > elts_n) 985230189d9SNélio Laranjeiro max -= elts_n; 986c3d62cc9SAdrien Mazarguil do { 987*a5bf6af9SAdrien Mazarguil struct rte_mbuf *buf = *(pkts++); 988c3d62cc9SAdrien Mazarguil unsigned int elts_head_next; 989230189d9SNélio Laranjeiro uint32_t length; 990*a5bf6af9SAdrien Mazarguil unsigned int segs_n = buf->nb_segs; 991230189d9SNélio Laranjeiro uint32_t cs_flags = 0; 992230189d9SNélio Laranjeiro 993c3d62cc9SAdrien Mazarguil /* 994c3d62cc9SAdrien Mazarguil * Make sure there is enough room to store this packet and 995c3d62cc9SAdrien Mazarguil * that one ring entry remains unused. 996c3d62cc9SAdrien Mazarguil */ 997*a5bf6af9SAdrien Mazarguil assert(segs_n); 998*a5bf6af9SAdrien Mazarguil if (max < segs_n + 1) 999c3d62cc9SAdrien Mazarguil break; 1000*a5bf6af9SAdrien Mazarguil /* Do not bother with large packets MPW cannot handle. */ 1001*a5bf6af9SAdrien Mazarguil if (segs_n > MLX5_MPW_DSEG_MAX) 1002*a5bf6af9SAdrien Mazarguil break; 1003*a5bf6af9SAdrien Mazarguil max -= segs_n; 1004c3d62cc9SAdrien Mazarguil --pkts_n; 1005230189d9SNélio Laranjeiro /* Should we enable HW CKSUM offload */ 1006230189d9SNélio Laranjeiro if (buf->ol_flags & 1007230189d9SNélio Laranjeiro (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) 1008230189d9SNélio Laranjeiro cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; 1009*a5bf6af9SAdrien Mazarguil /* Retrieve packet information. */ 1010*a5bf6af9SAdrien Mazarguil length = PKT_LEN(buf); 1011*a5bf6af9SAdrien Mazarguil assert(length); 1012230189d9SNélio Laranjeiro /* Start new session if packet differs. */ 1013230189d9SNélio Laranjeiro if ((mpw.state == MLX5_MPW_STATE_OPENED) && 1014230189d9SNélio Laranjeiro ((mpw.len != length) || 1015*a5bf6af9SAdrien Mazarguil (segs_n != 1) || 1016230189d9SNélio Laranjeiro (mpw.wqe->mpw.eseg.cs_flags != cs_flags))) 1017230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 1018230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_CLOSED) { 1019230189d9SNélio Laranjeiro mlx5_mpw_new(txq, &mpw, length); 1020230189d9SNélio Laranjeiro mpw.wqe->mpw.eseg.cs_flags = cs_flags; 1021230189d9SNélio Laranjeiro } 1022*a5bf6af9SAdrien Mazarguil /* Multi-segment packets must be alone in their MPW. */ 1023*a5bf6af9SAdrien Mazarguil assert((segs_n == 1) || (mpw.pkts_n == 0)); 1024*a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) 1025*a5bf6af9SAdrien Mazarguil length = 0; 1026*a5bf6af9SAdrien Mazarguil #endif 1027*a5bf6af9SAdrien Mazarguil do { 1028*a5bf6af9SAdrien Mazarguil volatile struct mlx5_wqe_data_seg *dseg; 1029*a5bf6af9SAdrien Mazarguil uintptr_t addr; 1030*a5bf6af9SAdrien Mazarguil 1031*a5bf6af9SAdrien Mazarguil elts_head_next = (elts_head + 1) & (elts_n - 1); 1032*a5bf6af9SAdrien Mazarguil assert(buf); 1033*a5bf6af9SAdrien Mazarguil (*txq->elts)[elts_head] = buf; 1034230189d9SNélio Laranjeiro dseg = mpw.data.dseg[mpw.pkts_n]; 1035*a5bf6af9SAdrien Mazarguil addr = rte_pktmbuf_mtod(buf, uintptr_t); 1036230189d9SNélio Laranjeiro *dseg = (struct mlx5_wqe_data_seg){ 1037*a5bf6af9SAdrien Mazarguil .byte_count = htonl(DATA_LEN(buf)), 1038230189d9SNélio Laranjeiro .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), 1039230189d9SNélio Laranjeiro .addr = htonll(addr), 1040230189d9SNélio Laranjeiro }; 1041*a5bf6af9SAdrien Mazarguil elts_head = elts_head_next; 1042*a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) 1043*a5bf6af9SAdrien Mazarguil length += DATA_LEN(buf); 1044*a5bf6af9SAdrien Mazarguil #endif 1045*a5bf6af9SAdrien Mazarguil buf = buf->next; 1046230189d9SNélio Laranjeiro ++mpw.pkts_n; 1047*a5bf6af9SAdrien Mazarguil ++j; 1048*a5bf6af9SAdrien Mazarguil } while (--segs_n); 1049*a5bf6af9SAdrien Mazarguil assert(length == mpw.len); 1050230189d9SNélio Laranjeiro if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) 1051230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 1052230189d9SNélio Laranjeiro elts_head = elts_head_next; 1053230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 1054230189d9SNélio Laranjeiro /* Increment sent bytes counter. */ 1055230189d9SNélio Laranjeiro txq->stats.obytes += length; 1056230189d9SNélio Laranjeiro #endif 1057c3d62cc9SAdrien Mazarguil ++i; 1058c3d62cc9SAdrien Mazarguil } while (pkts_n); 1059230189d9SNélio Laranjeiro /* Take a shortcut if nothing must be sent. */ 1060230189d9SNélio Laranjeiro if (unlikely(i == 0)) 1061230189d9SNélio Laranjeiro return 0; 1062230189d9SNélio Laranjeiro /* Check whether completion threshold has been reached. */ 1063*a5bf6af9SAdrien Mazarguil /* "j" includes both packets and segments. */ 1064*a5bf6af9SAdrien Mazarguil comp = txq->elts_comp + j; 1065230189d9SNélio Laranjeiro if (comp >= MLX5_TX_COMP_THRESH) { 1066230189d9SNélio Laranjeiro volatile union mlx5_wqe *wqe = mpw.wqe; 1067230189d9SNélio Laranjeiro 1068230189d9SNélio Laranjeiro /* Request completion on last WQE. */ 1069230189d9SNélio Laranjeiro wqe->mpw.ctrl.data[2] = htonl(8); 1070230189d9SNélio Laranjeiro /* Save elts_head in unused "immediate" field of WQE. */ 1071230189d9SNélio Laranjeiro wqe->mpw.ctrl.data[3] = elts_head; 1072230189d9SNélio Laranjeiro txq->elts_comp = 0; 1073230189d9SNélio Laranjeiro } else { 1074230189d9SNélio Laranjeiro txq->elts_comp = comp; 1075230189d9SNélio Laranjeiro } 1076230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 1077230189d9SNélio Laranjeiro /* Increment sent packets counter. */ 1078230189d9SNélio Laranjeiro txq->stats.opackets += i; 1079230189d9SNélio Laranjeiro #endif 1080230189d9SNélio Laranjeiro /* Ring QP doorbell. */ 1081230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_OPENED) 1082230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 1083230189d9SNélio Laranjeiro mlx5_tx_dbrec(txq); 1084230189d9SNélio Laranjeiro txq->elts_head = elts_head; 1085230189d9SNélio Laranjeiro return i; 1086230189d9SNélio Laranjeiro } 1087230189d9SNélio Laranjeiro 1088230189d9SNélio Laranjeiro /** 1089230189d9SNélio Laranjeiro * Open a MPW inline session. 1090230189d9SNélio Laranjeiro * 1091230189d9SNélio Laranjeiro * @param txq 1092230189d9SNélio Laranjeiro * Pointer to TX queue structure. 1093230189d9SNélio Laranjeiro * @param mpw 1094230189d9SNélio Laranjeiro * Pointer to MPW session structure. 1095230189d9SNélio Laranjeiro * @param length 1096230189d9SNélio Laranjeiro * Packet length. 1097230189d9SNélio Laranjeiro */ 1098230189d9SNélio Laranjeiro static inline void 1099230189d9SNélio Laranjeiro mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) 1100230189d9SNélio Laranjeiro { 1101230189d9SNélio Laranjeiro uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1); 1102230189d9SNélio Laranjeiro 1103230189d9SNélio Laranjeiro mpw->state = MLX5_MPW_INL_STATE_OPENED; 1104230189d9SNélio Laranjeiro mpw->pkts_n = 0; 1105230189d9SNélio Laranjeiro mpw->len = length; 1106230189d9SNélio Laranjeiro mpw->total_len = 0; 1107230189d9SNélio Laranjeiro mpw->wqe = &(*txq->wqes)[idx]; 1108230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) | 1109230189d9SNélio Laranjeiro (txq->wqe_ci << 8) | 1110230189d9SNélio Laranjeiro MLX5_OPCODE_LSO_MPW); 1111230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.ctrl.data[2] = 0; 1112230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.ctrl.data[3] = 0; 1113230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.eseg.mss = htons(length); 1114230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.eseg.inline_hdr_sz = 0; 1115230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.eseg.cs_flags = 0; 1116230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.eseg.rsvd0 = 0; 1117230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.eseg.rsvd1 = 0; 1118230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.eseg.rsvd2 = 0; 1119230189d9SNélio Laranjeiro mpw->data.raw = &mpw->wqe->mpw_inl.data[0]; 1120230189d9SNélio Laranjeiro } 1121230189d9SNélio Laranjeiro 1122230189d9SNélio Laranjeiro /** 1123230189d9SNélio Laranjeiro * Close a MPW inline session. 1124230189d9SNélio Laranjeiro * 1125230189d9SNélio Laranjeiro * @param txq 1126230189d9SNélio Laranjeiro * Pointer to TX queue structure. 1127230189d9SNélio Laranjeiro * @param mpw 1128230189d9SNélio Laranjeiro * Pointer to MPW session structure. 1129230189d9SNélio Laranjeiro */ 1130230189d9SNélio Laranjeiro static inline void 1131230189d9SNélio Laranjeiro mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw) 1132230189d9SNélio Laranjeiro { 1133230189d9SNélio Laranjeiro unsigned int size; 1134230189d9SNélio Laranjeiro 1135230189d9SNélio Laranjeiro size = sizeof(*mpw->wqe) - MLX5_MWQE64_INL_DATA + mpw->total_len; 1136230189d9SNélio Laranjeiro /* 1137230189d9SNélio Laranjeiro * Store size in multiple of 16 bytes. Control and Ethernet segments 1138230189d9SNélio Laranjeiro * count as 2. 1139230189d9SNélio Laranjeiro */ 1140230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.ctrl.data[1] = 1141230189d9SNélio Laranjeiro htonl(txq->qp_num_8s | ((size + 15) / 16)); 1142230189d9SNélio Laranjeiro mpw->state = MLX5_MPW_STATE_CLOSED; 1143230189d9SNélio Laranjeiro mpw->wqe->mpw_inl.byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); 1144230189d9SNélio Laranjeiro txq->wqe_ci += (size + (sizeof(*mpw->wqe) - 1)) / sizeof(*mpw->wqe); 1145230189d9SNélio Laranjeiro } 1146230189d9SNélio Laranjeiro 1147230189d9SNélio Laranjeiro /** 1148230189d9SNélio Laranjeiro * DPDK callback for TX with MPW inline support. 1149230189d9SNélio Laranjeiro * 1150230189d9SNélio Laranjeiro * @param dpdk_txq 1151230189d9SNélio Laranjeiro * Generic pointer to TX queue structure. 1152230189d9SNélio Laranjeiro * @param[in] pkts 1153230189d9SNélio Laranjeiro * Packets to transmit. 1154230189d9SNélio Laranjeiro * @param pkts_n 1155230189d9SNélio Laranjeiro * Number of packets in array. 1156230189d9SNélio Laranjeiro * 1157230189d9SNélio Laranjeiro * @return 1158230189d9SNélio Laranjeiro * Number of packets successfully transmitted (<= pkts_n). 1159230189d9SNélio Laranjeiro */ 1160230189d9SNélio Laranjeiro uint16_t 1161230189d9SNélio Laranjeiro mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, 1162230189d9SNélio Laranjeiro uint16_t pkts_n) 1163230189d9SNélio Laranjeiro { 1164230189d9SNélio Laranjeiro struct txq *txq = (struct txq *)dpdk_txq; 1165230189d9SNélio Laranjeiro uint16_t elts_head = txq->elts_head; 1166230189d9SNélio Laranjeiro const unsigned int elts_n = txq->elts_n; 1167c3d62cc9SAdrien Mazarguil unsigned int i = 0; 1168*a5bf6af9SAdrien Mazarguil unsigned int j = 0; 1169230189d9SNélio Laranjeiro unsigned int max; 1170230189d9SNélio Laranjeiro unsigned int comp; 1171230189d9SNélio Laranjeiro unsigned int inline_room = txq->max_inline; 1172230189d9SNélio Laranjeiro struct mlx5_mpw mpw = { 1173230189d9SNélio Laranjeiro .state = MLX5_MPW_STATE_CLOSED, 1174230189d9SNélio Laranjeiro }; 1175230189d9SNélio Laranjeiro 1176c3d62cc9SAdrien Mazarguil if (unlikely(!pkts_n)) 1177c3d62cc9SAdrien Mazarguil return 0; 1178230189d9SNélio Laranjeiro /* Prefetch first packet cacheline. */ 1179230189d9SNélio Laranjeiro tx_prefetch_cqe(txq, txq->cq_ci); 1180230189d9SNélio Laranjeiro tx_prefetch_wqe(txq, txq->wqe_ci); 1181230189d9SNélio Laranjeiro tx_prefetch_wqe(txq, txq->wqe_ci + 1); 1182230189d9SNélio Laranjeiro /* Start processing. */ 1183230189d9SNélio Laranjeiro txq_complete(txq); 1184230189d9SNélio Laranjeiro max = (elts_n - (elts_head - txq->elts_tail)); 1185230189d9SNélio Laranjeiro if (max > elts_n) 1186230189d9SNélio Laranjeiro max -= elts_n; 1187c3d62cc9SAdrien Mazarguil do { 1188*a5bf6af9SAdrien Mazarguil struct rte_mbuf *buf = *(pkts++); 1189c3d62cc9SAdrien Mazarguil unsigned int elts_head_next; 1190230189d9SNélio Laranjeiro uintptr_t addr; 1191230189d9SNélio Laranjeiro uint32_t length; 1192*a5bf6af9SAdrien Mazarguil unsigned int segs_n = buf->nb_segs; 1193230189d9SNélio Laranjeiro uint32_t cs_flags = 0; 1194230189d9SNélio Laranjeiro 1195c3d62cc9SAdrien Mazarguil /* 1196c3d62cc9SAdrien Mazarguil * Make sure there is enough room to store this packet and 1197c3d62cc9SAdrien Mazarguil * that one ring entry remains unused. 1198c3d62cc9SAdrien Mazarguil */ 1199*a5bf6af9SAdrien Mazarguil assert(segs_n); 1200*a5bf6af9SAdrien Mazarguil if (max < segs_n + 1) 1201c3d62cc9SAdrien Mazarguil break; 1202*a5bf6af9SAdrien Mazarguil /* Do not bother with large packets MPW cannot handle. */ 1203*a5bf6af9SAdrien Mazarguil if (segs_n > MLX5_MPW_DSEG_MAX) 1204*a5bf6af9SAdrien Mazarguil break; 1205*a5bf6af9SAdrien Mazarguil max -= segs_n; 1206c3d62cc9SAdrien Mazarguil --pkts_n; 1207230189d9SNélio Laranjeiro /* Should we enable HW CKSUM offload */ 1208230189d9SNélio Laranjeiro if (buf->ol_flags & 1209230189d9SNélio Laranjeiro (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) 1210230189d9SNélio Laranjeiro cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; 1211*a5bf6af9SAdrien Mazarguil /* Retrieve packet information. */ 1212*a5bf6af9SAdrien Mazarguil length = PKT_LEN(buf); 1213230189d9SNélio Laranjeiro /* Start new session if packet differs. */ 1214230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_OPENED) { 1215230189d9SNélio Laranjeiro if ((mpw.len != length) || 1216*a5bf6af9SAdrien Mazarguil (segs_n != 1) || 1217230189d9SNélio Laranjeiro (mpw.wqe->mpw.eseg.cs_flags != cs_flags)) 1218230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 1219230189d9SNélio Laranjeiro } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) { 1220230189d9SNélio Laranjeiro if ((mpw.len != length) || 1221*a5bf6af9SAdrien Mazarguil (segs_n != 1) || 1222230189d9SNélio Laranjeiro (length > inline_room) || 1223230189d9SNélio Laranjeiro (mpw.wqe->mpw_inl.eseg.cs_flags != cs_flags)) { 1224230189d9SNélio Laranjeiro mlx5_mpw_inline_close(txq, &mpw); 1225230189d9SNélio Laranjeiro inline_room = txq->max_inline; 1226230189d9SNélio Laranjeiro } 1227230189d9SNélio Laranjeiro } 1228230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_CLOSED) { 1229*a5bf6af9SAdrien Mazarguil if ((segs_n != 1) || 1230*a5bf6af9SAdrien Mazarguil (length > inline_room)) { 1231230189d9SNélio Laranjeiro mlx5_mpw_new(txq, &mpw, length); 1232230189d9SNélio Laranjeiro mpw.wqe->mpw.eseg.cs_flags = cs_flags; 1233230189d9SNélio Laranjeiro } else { 1234230189d9SNélio Laranjeiro mlx5_mpw_inline_new(txq, &mpw, length); 1235230189d9SNélio Laranjeiro mpw.wqe->mpw_inl.eseg.cs_flags = cs_flags; 1236230189d9SNélio Laranjeiro } 1237230189d9SNélio Laranjeiro } 1238*a5bf6af9SAdrien Mazarguil /* Multi-segment packets must be alone in their MPW. */ 1239*a5bf6af9SAdrien Mazarguil assert((segs_n == 1) || (mpw.pkts_n == 0)); 1240230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_OPENED) { 1241*a5bf6af9SAdrien Mazarguil assert(inline_room == txq->max_inline); 1242*a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) 1243*a5bf6af9SAdrien Mazarguil length = 0; 1244*a5bf6af9SAdrien Mazarguil #endif 1245*a5bf6af9SAdrien Mazarguil do { 1246230189d9SNélio Laranjeiro volatile struct mlx5_wqe_data_seg *dseg; 1247230189d9SNélio Laranjeiro 1248*a5bf6af9SAdrien Mazarguil elts_head_next = 1249*a5bf6af9SAdrien Mazarguil (elts_head + 1) & (elts_n - 1); 1250*a5bf6af9SAdrien Mazarguil assert(buf); 1251*a5bf6af9SAdrien Mazarguil (*txq->elts)[elts_head] = buf; 1252230189d9SNélio Laranjeiro dseg = mpw.data.dseg[mpw.pkts_n]; 1253*a5bf6af9SAdrien Mazarguil addr = rte_pktmbuf_mtod(buf, uintptr_t); 1254230189d9SNélio Laranjeiro *dseg = (struct mlx5_wqe_data_seg){ 1255*a5bf6af9SAdrien Mazarguil .byte_count = htonl(DATA_LEN(buf)), 1256230189d9SNélio Laranjeiro .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), 1257230189d9SNélio Laranjeiro .addr = htonll(addr), 1258230189d9SNélio Laranjeiro }; 1259*a5bf6af9SAdrien Mazarguil elts_head = elts_head_next; 1260*a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) 1261*a5bf6af9SAdrien Mazarguil length += DATA_LEN(buf); 1262*a5bf6af9SAdrien Mazarguil #endif 1263*a5bf6af9SAdrien Mazarguil buf = buf->next; 1264230189d9SNélio Laranjeiro ++mpw.pkts_n; 1265*a5bf6af9SAdrien Mazarguil ++j; 1266*a5bf6af9SAdrien Mazarguil } while (--segs_n); 1267*a5bf6af9SAdrien Mazarguil assert(length == mpw.len); 1268230189d9SNélio Laranjeiro if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) 1269230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 1270230189d9SNélio Laranjeiro } else { 1271230189d9SNélio Laranjeiro unsigned int max; 1272230189d9SNélio Laranjeiro 1273230189d9SNélio Laranjeiro assert(mpw.state == MLX5_MPW_INL_STATE_OPENED); 1274230189d9SNélio Laranjeiro assert(length <= inline_room); 1275*a5bf6af9SAdrien Mazarguil assert(length == DATA_LEN(buf)); 1276*a5bf6af9SAdrien Mazarguil elts_head_next = (elts_head + 1) & (elts_n - 1); 1277*a5bf6af9SAdrien Mazarguil addr = rte_pktmbuf_mtod(buf, uintptr_t); 1278*a5bf6af9SAdrien Mazarguil (*txq->elts)[elts_head] = buf; 1279230189d9SNélio Laranjeiro /* Maximum number of bytes before wrapping. */ 1280230189d9SNélio Laranjeiro max = ((uintptr_t)&(*txq->wqes)[txq->wqe_n] - 1281230189d9SNélio Laranjeiro (uintptr_t)mpw.data.raw); 1282230189d9SNélio Laranjeiro if (length > max) { 1283230189d9SNélio Laranjeiro rte_memcpy((void *)(uintptr_t)mpw.data.raw, 1284230189d9SNélio Laranjeiro (void *)addr, 1285230189d9SNélio Laranjeiro max); 1286230189d9SNélio Laranjeiro mpw.data.raw = 1287230189d9SNélio Laranjeiro (volatile void *)&(*txq->wqes)[0]; 1288230189d9SNélio Laranjeiro rte_memcpy((void *)(uintptr_t)mpw.data.raw, 1289230189d9SNélio Laranjeiro (void *)(addr + max), 1290230189d9SNélio Laranjeiro length - max); 1291230189d9SNélio Laranjeiro mpw.data.raw += length - max; 1292230189d9SNélio Laranjeiro } else { 1293230189d9SNélio Laranjeiro rte_memcpy((void *)(uintptr_t)mpw.data.raw, 1294230189d9SNélio Laranjeiro (void *)addr, 1295230189d9SNélio Laranjeiro length); 1296230189d9SNélio Laranjeiro mpw.data.raw += length; 1297230189d9SNélio Laranjeiro } 1298230189d9SNélio Laranjeiro if ((uintptr_t)mpw.data.raw == 1299230189d9SNélio Laranjeiro (uintptr_t)&(*txq->wqes)[txq->wqe_n]) 1300230189d9SNélio Laranjeiro mpw.data.raw = 1301230189d9SNélio Laranjeiro (volatile void *)&(*txq->wqes)[0]; 1302230189d9SNélio Laranjeiro ++mpw.pkts_n; 1303*a5bf6af9SAdrien Mazarguil ++j; 1304230189d9SNélio Laranjeiro if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) { 1305230189d9SNélio Laranjeiro mlx5_mpw_inline_close(txq, &mpw); 1306230189d9SNélio Laranjeiro inline_room = txq->max_inline; 1307230189d9SNélio Laranjeiro } else { 1308230189d9SNélio Laranjeiro inline_room -= length; 1309230189d9SNélio Laranjeiro } 1310230189d9SNélio Laranjeiro } 1311230189d9SNélio Laranjeiro mpw.total_len += length; 1312230189d9SNélio Laranjeiro elts_head = elts_head_next; 1313230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 1314230189d9SNélio Laranjeiro /* Increment sent bytes counter. */ 1315230189d9SNélio Laranjeiro txq->stats.obytes += length; 1316230189d9SNélio Laranjeiro #endif 1317c3d62cc9SAdrien Mazarguil ++i; 1318c3d62cc9SAdrien Mazarguil } while (pkts_n); 1319230189d9SNélio Laranjeiro /* Take a shortcut if nothing must be sent. */ 1320230189d9SNélio Laranjeiro if (unlikely(i == 0)) 1321230189d9SNélio Laranjeiro return 0; 1322230189d9SNélio Laranjeiro /* Check whether completion threshold has been reached. */ 1323*a5bf6af9SAdrien Mazarguil /* "j" includes both packets and segments. */ 1324*a5bf6af9SAdrien Mazarguil comp = txq->elts_comp + j; 1325230189d9SNélio Laranjeiro if (comp >= MLX5_TX_COMP_THRESH) { 1326230189d9SNélio Laranjeiro volatile union mlx5_wqe *wqe = mpw.wqe; 1327230189d9SNélio Laranjeiro 1328230189d9SNélio Laranjeiro /* Request completion on last WQE. */ 1329230189d9SNélio Laranjeiro wqe->mpw_inl.ctrl.data[2] = htonl(8); 1330230189d9SNélio Laranjeiro /* Save elts_head in unused "immediate" field of WQE. */ 1331230189d9SNélio Laranjeiro wqe->mpw_inl.ctrl.data[3] = elts_head; 1332230189d9SNélio Laranjeiro txq->elts_comp = 0; 1333230189d9SNélio Laranjeiro } else { 1334230189d9SNélio Laranjeiro txq->elts_comp = comp; 1335230189d9SNélio Laranjeiro } 1336230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 1337230189d9SNélio Laranjeiro /* Increment sent packets counter. */ 1338230189d9SNélio Laranjeiro txq->stats.opackets += i; 1339230189d9SNélio Laranjeiro #endif 1340230189d9SNélio Laranjeiro /* Ring QP doorbell. */ 1341230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_INL_STATE_OPENED) 1342230189d9SNélio Laranjeiro mlx5_mpw_inline_close(txq, &mpw); 1343230189d9SNélio Laranjeiro else if (mpw.state == MLX5_MPW_STATE_OPENED) 1344230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 1345230189d9SNélio Laranjeiro mlx5_tx_dbrec(txq); 1346230189d9SNélio Laranjeiro txq->elts_head = elts_head; 1347230189d9SNélio Laranjeiro return i; 1348230189d9SNélio Laranjeiro } 1349230189d9SNélio Laranjeiro 1350230189d9SNélio Laranjeiro /** 135167fa62bcSAdrien Mazarguil * Translate RX completion flags to packet type. 135267fa62bcSAdrien Mazarguil * 13536218063bSNélio Laranjeiro * @param[in] cqe 13546218063bSNélio Laranjeiro * Pointer to CQE. 135567fa62bcSAdrien Mazarguil * 135678a38edfSJianfeng Tan * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 135778a38edfSJianfeng Tan * 135867fa62bcSAdrien Mazarguil * @return 135967fa62bcSAdrien Mazarguil * Packet type for struct rte_mbuf. 136067fa62bcSAdrien Mazarguil */ 136167fa62bcSAdrien Mazarguil static inline uint32_t 13626218063bSNélio Laranjeiro rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe) 136367fa62bcSAdrien Mazarguil { 136467fa62bcSAdrien Mazarguil uint32_t pkt_type; 13656218063bSNélio Laranjeiro uint8_t flags = cqe->l4_hdr_type_etc; 13666218063bSNélio Laranjeiro uint8_t info = cqe->rsvd0[0]; 136767fa62bcSAdrien Mazarguil 13686218063bSNélio Laranjeiro if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET) 136967fa62bcSAdrien Mazarguil pkt_type = 137067fa62bcSAdrien Mazarguil TRANSPOSE(flags, 137167fa62bcSAdrien Mazarguil IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, 137267fa62bcSAdrien Mazarguil RTE_PTYPE_L3_IPV4) | 137367fa62bcSAdrien Mazarguil TRANSPOSE(flags, 137467fa62bcSAdrien Mazarguil IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, 137567fa62bcSAdrien Mazarguil RTE_PTYPE_L3_IPV6) | 137667fa62bcSAdrien Mazarguil TRANSPOSE(flags, 137767fa62bcSAdrien Mazarguil IBV_EXP_CQ_RX_IPV4_PACKET, 137867fa62bcSAdrien Mazarguil RTE_PTYPE_INNER_L3_IPV4) | 137967fa62bcSAdrien Mazarguil TRANSPOSE(flags, 138067fa62bcSAdrien Mazarguil IBV_EXP_CQ_RX_IPV6_PACKET, 138167fa62bcSAdrien Mazarguil RTE_PTYPE_INNER_L3_IPV6); 138267fa62bcSAdrien Mazarguil else 138367fa62bcSAdrien Mazarguil pkt_type = 138467fa62bcSAdrien Mazarguil TRANSPOSE(flags, 13856218063bSNélio Laranjeiro MLX5_CQE_L3_HDR_TYPE_IPV6, 13866218063bSNélio Laranjeiro RTE_PTYPE_L3_IPV6) | 138767fa62bcSAdrien Mazarguil TRANSPOSE(flags, 13886218063bSNélio Laranjeiro MLX5_CQE_L3_HDR_TYPE_IPV4, 13896218063bSNélio Laranjeiro RTE_PTYPE_L3_IPV4); 139067fa62bcSAdrien Mazarguil return pkt_type; 139167fa62bcSAdrien Mazarguil } 139267fa62bcSAdrien Mazarguil 139367fa62bcSAdrien Mazarguil /** 139499c12dccSNélio Laranjeiro * Get size of the next packet for a given CQE. For compressed CQEs, the 139599c12dccSNélio Laranjeiro * consumer index is updated only once all packets of the current one have 139699c12dccSNélio Laranjeiro * been processed. 139799c12dccSNélio Laranjeiro * 139899c12dccSNélio Laranjeiro * @param rxq 139999c12dccSNélio Laranjeiro * Pointer to RX queue. 140099c12dccSNélio Laranjeiro * @param cqe 140199c12dccSNélio Laranjeiro * CQE to process. 140299c12dccSNélio Laranjeiro * 140399c12dccSNélio Laranjeiro * @return 140499c12dccSNélio Laranjeiro * Packet size in bytes (0 if there is none), -1 in case of completion 140599c12dccSNélio Laranjeiro * with error. 140699c12dccSNélio Laranjeiro */ 140799c12dccSNélio Laranjeiro static inline int 140899c12dccSNélio Laranjeiro mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, 140999c12dccSNélio Laranjeiro uint16_t cqe_cnt) 141099c12dccSNélio Laranjeiro { 141199c12dccSNélio Laranjeiro struct rxq_zip *zip = &rxq->zip; 141299c12dccSNélio Laranjeiro uint16_t cqe_n = cqe_cnt + 1; 141399c12dccSNélio Laranjeiro int len = 0; 141499c12dccSNélio Laranjeiro 141599c12dccSNélio Laranjeiro /* Process compressed data in the CQE and mini arrays. */ 141699c12dccSNélio Laranjeiro if (zip->ai) { 141799c12dccSNélio Laranjeiro volatile struct mlx5_mini_cqe8 (*mc)[8] = 141899c12dccSNélio Laranjeiro (volatile struct mlx5_mini_cqe8 (*)[8]) 141999c12dccSNélio Laranjeiro (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].cqe64); 142099c12dccSNélio Laranjeiro 142199c12dccSNélio Laranjeiro len = ntohl((*mc)[zip->ai & 7].byte_cnt); 142299c12dccSNélio Laranjeiro if ((++zip->ai & 7) == 0) { 142399c12dccSNélio Laranjeiro /* 142499c12dccSNélio Laranjeiro * Increment consumer index to skip the number of 142599c12dccSNélio Laranjeiro * CQEs consumed. Hardware leaves holes in the CQ 142699c12dccSNélio Laranjeiro * ring for software use. 142799c12dccSNélio Laranjeiro */ 142899c12dccSNélio Laranjeiro zip->ca = zip->na; 142999c12dccSNélio Laranjeiro zip->na += 8; 143099c12dccSNélio Laranjeiro } 143199c12dccSNélio Laranjeiro if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { 143299c12dccSNélio Laranjeiro uint16_t idx = rxq->cq_ci; 143399c12dccSNélio Laranjeiro uint16_t end = zip->cq_ci; 143499c12dccSNélio Laranjeiro 143599c12dccSNélio Laranjeiro while (idx != end) { 143699c12dccSNélio Laranjeiro (*rxq->cqes)[idx & cqe_cnt].cqe64.op_own = 143799c12dccSNélio Laranjeiro MLX5_CQE_INVALIDATE; 143899c12dccSNélio Laranjeiro ++idx; 143999c12dccSNélio Laranjeiro } 144099c12dccSNélio Laranjeiro rxq->cq_ci = zip->cq_ci; 144199c12dccSNélio Laranjeiro zip->ai = 0; 144299c12dccSNélio Laranjeiro } 144399c12dccSNélio Laranjeiro /* No compressed data, get next CQE and verify if it is compressed. */ 144499c12dccSNélio Laranjeiro } else { 144599c12dccSNélio Laranjeiro int ret; 144699c12dccSNélio Laranjeiro int8_t op_own; 144799c12dccSNélio Laranjeiro 144899c12dccSNélio Laranjeiro ret = check_cqe64(cqe, cqe_n, rxq->cq_ci); 144999c12dccSNélio Laranjeiro if (unlikely(ret == 1)) 145099c12dccSNélio Laranjeiro return 0; 145199c12dccSNélio Laranjeiro ++rxq->cq_ci; 145299c12dccSNélio Laranjeiro op_own = cqe->op_own; 145399c12dccSNélio Laranjeiro if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { 145499c12dccSNélio Laranjeiro volatile struct mlx5_mini_cqe8 (*mc)[8] = 145599c12dccSNélio Laranjeiro (volatile struct mlx5_mini_cqe8 (*)[8]) 145699c12dccSNélio Laranjeiro (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci & 145799c12dccSNélio Laranjeiro cqe_cnt].cqe64); 145899c12dccSNélio Laranjeiro 145999c12dccSNélio Laranjeiro /* Fix endianness. */ 146099c12dccSNélio Laranjeiro zip->cqe_cnt = ntohl(cqe->byte_cnt); 146199c12dccSNélio Laranjeiro /* 146299c12dccSNélio Laranjeiro * Current mini array position is the one returned by 146399c12dccSNélio Laranjeiro * check_cqe64(). 146499c12dccSNélio Laranjeiro * 146599c12dccSNélio Laranjeiro * If completion comprises several mini arrays, as a 146699c12dccSNélio Laranjeiro * special case the second one is located 7 CQEs after 146799c12dccSNélio Laranjeiro * the initial CQE instead of 8 for subsequent ones. 146899c12dccSNélio Laranjeiro */ 146999c12dccSNélio Laranjeiro zip->ca = rxq->cq_ci & cqe_cnt; 147099c12dccSNélio Laranjeiro zip->na = zip->ca + 7; 147199c12dccSNélio Laranjeiro /* Compute the next non compressed CQE. */ 147299c12dccSNélio Laranjeiro --rxq->cq_ci; 147399c12dccSNélio Laranjeiro zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; 147499c12dccSNélio Laranjeiro /* Get packet size to return. */ 147599c12dccSNélio Laranjeiro len = ntohl((*mc)[0].byte_cnt); 147699c12dccSNélio Laranjeiro zip->ai = 1; 147799c12dccSNélio Laranjeiro } else { 147899c12dccSNélio Laranjeiro len = ntohl(cqe->byte_cnt); 147999c12dccSNélio Laranjeiro } 148099c12dccSNélio Laranjeiro /* Error while receiving packet. */ 148199c12dccSNélio Laranjeiro if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) 148299c12dccSNélio Laranjeiro return -1; 148399c12dccSNélio Laranjeiro } 148499c12dccSNélio Laranjeiro return len; 148599c12dccSNélio Laranjeiro } 148699c12dccSNélio Laranjeiro 148799c12dccSNélio Laranjeiro /** 148867fa62bcSAdrien Mazarguil * Translate RX completion flags to offload flags. 148967fa62bcSAdrien Mazarguil * 149067fa62bcSAdrien Mazarguil * @param[in] rxq 149167fa62bcSAdrien Mazarguil * Pointer to RX queue structure. 14926218063bSNélio Laranjeiro * @param[in] cqe 14936218063bSNélio Laranjeiro * Pointer to CQE. 149467fa62bcSAdrien Mazarguil * 149567fa62bcSAdrien Mazarguil * @return 149667fa62bcSAdrien Mazarguil * Offload flags (ol_flags) for struct rte_mbuf. 149767fa62bcSAdrien Mazarguil */ 149867fa62bcSAdrien Mazarguil static inline uint32_t 14996218063bSNélio Laranjeiro rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe) 150067fa62bcSAdrien Mazarguil { 150167fa62bcSAdrien Mazarguil uint32_t ol_flags = 0; 15026218063bSNélio Laranjeiro uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK; 15036218063bSNélio Laranjeiro uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK; 15046218063bSNélio Laranjeiro uint8_t info = cqe->rsvd0[0]; 150567fa62bcSAdrien Mazarguil 15066218063bSNélio Laranjeiro if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) || 15076218063bSNélio Laranjeiro (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6)) 150867fa62bcSAdrien Mazarguil ol_flags |= 15096218063bSNélio Laranjeiro (!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) * 1510d0087d76SYaacov Hazan PKT_RX_IP_CKSUM_BAD); 15116218063bSNélio Laranjeiro if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) || 15126218063bSNélio Laranjeiro (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) || 15136218063bSNélio Laranjeiro (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) || 15146218063bSNélio Laranjeiro (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP)) 1515d0087d76SYaacov Hazan ol_flags |= 15166218063bSNélio Laranjeiro (!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) * 151767fa62bcSAdrien Mazarguil PKT_RX_L4_CKSUM_BAD); 151867fa62bcSAdrien Mazarguil /* 151967fa62bcSAdrien Mazarguil * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place 152067fa62bcSAdrien Mazarguil * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional 152167fa62bcSAdrien Mazarguil * (its value is 0). 152267fa62bcSAdrien Mazarguil */ 15236218063bSNélio Laranjeiro if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) 152467fa62bcSAdrien Mazarguil ol_flags |= 15256218063bSNélio Laranjeiro TRANSPOSE(~cqe->l4_hdr_type_etc, 152667fa62bcSAdrien Mazarguil IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK, 152767fa62bcSAdrien Mazarguil PKT_RX_IP_CKSUM_BAD) | 15286218063bSNélio Laranjeiro TRANSPOSE(~cqe->l4_hdr_type_etc, 152967fa62bcSAdrien Mazarguil IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK, 153067fa62bcSAdrien Mazarguil PKT_RX_L4_CKSUM_BAD); 153167fa62bcSAdrien Mazarguil return ol_flags; 153267fa62bcSAdrien Mazarguil } 153367fa62bcSAdrien Mazarguil 153467fa62bcSAdrien Mazarguil /** 15352e22920bSAdrien Mazarguil * DPDK callback for RX. 15362e22920bSAdrien Mazarguil * 15372e22920bSAdrien Mazarguil * @param dpdk_rxq 15382e22920bSAdrien Mazarguil * Generic pointer to RX queue structure. 15392e22920bSAdrien Mazarguil * @param[out] pkts 15402e22920bSAdrien Mazarguil * Array to store received packets. 15412e22920bSAdrien Mazarguil * @param pkts_n 15422e22920bSAdrien Mazarguil * Maximum number of packets in array. 15432e22920bSAdrien Mazarguil * 15442e22920bSAdrien Mazarguil * @return 15452e22920bSAdrien Mazarguil * Number of packets successfully received (<= pkts_n). 15462e22920bSAdrien Mazarguil */ 15472e22920bSAdrien Mazarguil uint16_t 15482e22920bSAdrien Mazarguil mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 15492e22920bSAdrien Mazarguil { 15506218063bSNélio Laranjeiro struct rxq *rxq = dpdk_rxq; 15512e22920bSAdrien Mazarguil unsigned int pkts_ret = 0; 15526218063bSNélio Laranjeiro unsigned int i; 15536218063bSNélio Laranjeiro unsigned int rq_ci = rxq->rq_ci; 15546218063bSNélio Laranjeiro const unsigned int elts_n = rxq->elts_n; 15556218063bSNélio Laranjeiro const unsigned int wqe_cnt = elts_n - 1; 155699c12dccSNélio Laranjeiro const unsigned int cqe_cnt = rxq->cqe_n - 1; 15572e22920bSAdrien Mazarguil 15582e22920bSAdrien Mazarguil for (i = 0; (i != pkts_n); ++i) { 15596218063bSNélio Laranjeiro unsigned int idx = rq_ci & wqe_cnt; 156099c12dccSNélio Laranjeiro int len; 15612e22920bSAdrien Mazarguil struct rte_mbuf *rep; 15626218063bSNélio Laranjeiro struct rte_mbuf *pkt; 15636218063bSNélio Laranjeiro volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx]; 15646218063bSNélio Laranjeiro volatile struct mlx5_cqe64 *cqe = 156599c12dccSNélio Laranjeiro &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64; 15662e22920bSAdrien Mazarguil 15676218063bSNélio Laranjeiro pkt = (*rxq->elts)[idx]; 15686218063bSNélio Laranjeiro rte_prefetch0(cqe); 1569fbfd9955SOlivier Matz rep = rte_mbuf_raw_alloc(rxq->mp); 15702e22920bSAdrien Mazarguil if (unlikely(rep == NULL)) { 157187011737SAdrien Mazarguil ++rxq->stats.rx_nombuf; 15726218063bSNélio Laranjeiro break; 15732e22920bSAdrien Mazarguil } 15746218063bSNélio Laranjeiro SET_DATA_OFF(rep, RTE_PKTMBUF_HEADROOM); 15756218063bSNélio Laranjeiro NB_SEGS(rep) = 1; 15766218063bSNélio Laranjeiro PORT(rep) = rxq->port_id; 15776218063bSNélio Laranjeiro NEXT(rep) = NULL; 157899c12dccSNélio Laranjeiro len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt); 15796218063bSNélio Laranjeiro if (unlikely(len == 0)) { 15806218063bSNélio Laranjeiro rte_mbuf_refcnt_set(rep, 0); 15816218063bSNélio Laranjeiro __rte_mbuf_raw_free(rep); 15826218063bSNélio Laranjeiro break; 15836218063bSNélio Laranjeiro } 158499c12dccSNélio Laranjeiro if (unlikely(len == -1)) { 158599c12dccSNélio Laranjeiro /* RX error, packet is likely too large. */ 158699c12dccSNélio Laranjeiro rte_mbuf_refcnt_set(rep, 0); 158799c12dccSNélio Laranjeiro __rte_mbuf_raw_free(rep); 158899c12dccSNélio Laranjeiro ++rxq->stats.idropped; 158999c12dccSNélio Laranjeiro --i; 159099c12dccSNélio Laranjeiro goto skip; 159199c12dccSNélio Laranjeiro } 15926218063bSNélio Laranjeiro /* 15936218063bSNélio Laranjeiro * Fill NIC descriptor with the new buffer. The lkey and size 15946218063bSNélio Laranjeiro * of the buffers are already known, only the buffer address 15956218063bSNélio Laranjeiro * changes. 15966218063bSNélio Laranjeiro */ 15976218063bSNélio Laranjeiro wqe->addr = htonll((uintptr_t)rep->buf_addr + 15986218063bSNélio Laranjeiro RTE_PKTMBUF_HEADROOM); 15996218063bSNélio Laranjeiro (*rxq->elts)[idx] = rep; 16006218063bSNélio Laranjeiro /* Update pkt information. */ 16016218063bSNélio Laranjeiro if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip | 16026218063bSNélio Laranjeiro rxq->crc_present) { 16036218063bSNélio Laranjeiro if (rxq->csum) { 16046218063bSNélio Laranjeiro pkt->packet_type = rxq_cq_to_pkt_type(cqe); 16056218063bSNélio Laranjeiro pkt->ol_flags = rxq_cq_to_ol_flags(rxq, cqe); 16066218063bSNélio Laranjeiro } 16076218063bSNélio Laranjeiro if (cqe->l4_hdr_type_etc & MLX5_CQE_VLAN_STRIPPED) { 16086218063bSNélio Laranjeiro pkt->ol_flags |= PKT_RX_VLAN_PKT | 1609b37b528dSOlivier Matz PKT_RX_VLAN_STRIPPED; 16106218063bSNélio Laranjeiro pkt->vlan_tci = ntohs(cqe->vlan_info); 1611f3db9489SYaacov Hazan } 16126218063bSNélio Laranjeiro if (rxq->crc_present) 16136218063bSNélio Laranjeiro len -= ETHER_CRC_LEN; 1614081f7eaeSNelio Laranjeiro } 16156218063bSNélio Laranjeiro PKT_LEN(pkt) = len; 16166218063bSNélio Laranjeiro DATA_LEN(pkt) = len; 161787011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 161887011737SAdrien Mazarguil /* Increment bytes counter. */ 161987011737SAdrien Mazarguil rxq->stats.ibytes += len; 162087011737SAdrien Mazarguil #endif 16216218063bSNélio Laranjeiro /* Return packet. */ 16226218063bSNélio Laranjeiro *(pkts++) = pkt; 16236218063bSNélio Laranjeiro ++pkts_ret; 162499c12dccSNélio Laranjeiro skip: 16256218063bSNélio Laranjeiro ++rq_ci; 16262e22920bSAdrien Mazarguil } 16276218063bSNélio Laranjeiro if (unlikely((i == 0) && (rq_ci == rxq->rq_ci))) 16282e22920bSAdrien Mazarguil return 0; 16292e22920bSAdrien Mazarguil /* Repost WRs. */ 16302e22920bSAdrien Mazarguil #ifdef DEBUG_RECV 16312e22920bSAdrien Mazarguil DEBUG("%p: reposting %u WRs", (void *)rxq, i); 16322e22920bSAdrien Mazarguil #endif 16336218063bSNélio Laranjeiro /* Update the consumer index. */ 16346218063bSNélio Laranjeiro rxq->rq_ci = rq_ci; 16356218063bSNélio Laranjeiro rte_wmb(); 16366218063bSNélio Laranjeiro *rxq->cq_db = htonl(rxq->cq_ci); 16376218063bSNélio Laranjeiro rte_wmb(); 16386218063bSNélio Laranjeiro *rxq->rq_db = htonl(rxq->rq_ci); 163987011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 164087011737SAdrien Mazarguil /* Increment packets counter. */ 164187011737SAdrien Mazarguil rxq->stats.ipackets += pkts_ret; 164287011737SAdrien Mazarguil #endif 16432e22920bSAdrien Mazarguil return pkts_ret; 16442e22920bSAdrien Mazarguil } 16452e22920bSAdrien Mazarguil 16462e22920bSAdrien Mazarguil /** 16472e22920bSAdrien Mazarguil * Dummy DPDK callback for TX. 16482e22920bSAdrien Mazarguil * 16492e22920bSAdrien Mazarguil * This function is used to temporarily replace the real callback during 16502e22920bSAdrien Mazarguil * unsafe control operations on the queue, or in case of error. 16512e22920bSAdrien Mazarguil * 16522e22920bSAdrien Mazarguil * @param dpdk_txq 16532e22920bSAdrien Mazarguil * Generic pointer to TX queue structure. 16542e22920bSAdrien Mazarguil * @param[in] pkts 16552e22920bSAdrien Mazarguil * Packets to transmit. 16562e22920bSAdrien Mazarguil * @param pkts_n 16572e22920bSAdrien Mazarguil * Number of packets in array. 16582e22920bSAdrien Mazarguil * 16592e22920bSAdrien Mazarguil * @return 16602e22920bSAdrien Mazarguil * Number of packets successfully transmitted (<= pkts_n). 16612e22920bSAdrien Mazarguil */ 16622e22920bSAdrien Mazarguil uint16_t 16632e22920bSAdrien Mazarguil removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 16642e22920bSAdrien Mazarguil { 16652e22920bSAdrien Mazarguil (void)dpdk_txq; 16662e22920bSAdrien Mazarguil (void)pkts; 16672e22920bSAdrien Mazarguil (void)pkts_n; 16682e22920bSAdrien Mazarguil return 0; 16692e22920bSAdrien Mazarguil } 16702e22920bSAdrien Mazarguil 16712e22920bSAdrien Mazarguil /** 16722e22920bSAdrien Mazarguil * Dummy DPDK callback for RX. 16732e22920bSAdrien Mazarguil * 16742e22920bSAdrien Mazarguil * This function is used to temporarily replace the real callback during 16752e22920bSAdrien Mazarguil * unsafe control operations on the queue, or in case of error. 16762e22920bSAdrien Mazarguil * 16772e22920bSAdrien Mazarguil * @param dpdk_rxq 16782e22920bSAdrien Mazarguil * Generic pointer to RX queue structure. 16792e22920bSAdrien Mazarguil * @param[out] pkts 16802e22920bSAdrien Mazarguil * Array to store received packets. 16812e22920bSAdrien Mazarguil * @param pkts_n 16822e22920bSAdrien Mazarguil * Maximum number of packets in array. 16832e22920bSAdrien Mazarguil * 16842e22920bSAdrien Mazarguil * @return 16852e22920bSAdrien Mazarguil * Number of packets successfully received (<= pkts_n). 16862e22920bSAdrien Mazarguil */ 16872e22920bSAdrien Mazarguil uint16_t 16882e22920bSAdrien Mazarguil removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 16892e22920bSAdrien Mazarguil { 16902e22920bSAdrien Mazarguil (void)dpdk_rxq; 16912e22920bSAdrien Mazarguil (void)pkts; 16922e22920bSAdrien Mazarguil (void)pkts_n; 16932e22920bSAdrien Mazarguil return 0; 16942e22920bSAdrien Mazarguil } 1695