18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 22e22920bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 32e22920bSAdrien Mazarguil * Copyright 2015 Mellanox. 42e22920bSAdrien Mazarguil */ 52e22920bSAdrien Mazarguil 62e22920bSAdrien Mazarguil #include <assert.h> 72e22920bSAdrien Mazarguil #include <stdint.h> 82e22920bSAdrien Mazarguil #include <string.h> 92e22920bSAdrien Mazarguil #include <stdlib.h> 102e22920bSAdrien Mazarguil 112e22920bSAdrien Mazarguil /* Verbs header. */ 122e22920bSAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 132e22920bSAdrien Mazarguil #ifdef PEDANTIC 14fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 152e22920bSAdrien Mazarguil #endif 162e22920bSAdrien Mazarguil #include <infiniband/verbs.h> 1743e9d979SShachar Beiser #include <infiniband/mlx5dv.h> 182e22920bSAdrien Mazarguil #ifdef PEDANTIC 19fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 202e22920bSAdrien Mazarguil #endif 212e22920bSAdrien Mazarguil 222e22920bSAdrien Mazarguil #include <rte_mbuf.h> 232e22920bSAdrien Mazarguil #include <rte_mempool.h> 242e22920bSAdrien Mazarguil #include <rte_prefetch.h> 252e22920bSAdrien Mazarguil #include <rte_common.h> 262e22920bSAdrien Mazarguil #include <rte_branch_prediction.h> 276218063bSNélio Laranjeiro #include <rte_ether.h> 282e22920bSAdrien Mazarguil 292e22920bSAdrien Mazarguil #include "mlx5.h" 302e22920bSAdrien Mazarguil #include "mlx5_utils.h" 312e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 32f3db9489SYaacov Hazan #include "mlx5_autoconf.h" 332e22920bSAdrien Mazarguil #include "mlx5_defs.h" 346218063bSNélio Laranjeiro #include "mlx5_prm.h" 356218063bSNélio Laranjeiro 36c0583d98SJerin Jacob static __rte_always_inline uint32_t 37c0583d98SJerin Jacob rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe); 38ff1807a3SNélio Laranjeiro 39c0583d98SJerin Jacob static __rte_always_inline int 4078142aacSNélio Laranjeiro mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 41c0583d98SJerin Jacob uint16_t cqe_cnt, uint32_t *rss_hash); 42ff1807a3SNélio Laranjeiro 43c0583d98SJerin Jacob static __rte_always_inline uint32_t 4478142aacSNélio Laranjeiro rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); 45ff1807a3SNélio Laranjeiro 46ea16068cSYongseok Koh uint32_t mlx5_ptype_table[] __rte_cache_aligned = { 47ea16068cSYongseok Koh [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ 48ea16068cSYongseok Koh }; 49ea16068cSYongseok Koh 50ea16068cSYongseok Koh /** 51ea16068cSYongseok Koh * Build a table to translate Rx completion flags to packet type. 52ea16068cSYongseok Koh * 53ea16068cSYongseok Koh * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 54ea16068cSYongseok Koh */ 55ea16068cSYongseok Koh void 56ea16068cSYongseok Koh mlx5_set_ptype_table(void) 57ea16068cSYongseok Koh { 58ea16068cSYongseok Koh unsigned int i; 59ea16068cSYongseok Koh uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table; 60ea16068cSYongseok Koh 619807f113SYongseok Koh /* Last entry must not be overwritten, reserved for errored packet. */ 629807f113SYongseok Koh for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i) 63ea16068cSYongseok Koh (*p)[i] = RTE_PTYPE_UNKNOWN; 646cb559d6SYongseok Koh /* 656cb559d6SYongseok Koh * The index to the array should have: 66ea16068cSYongseok Koh * bit[1:0] = l3_hdr_type 67ea16068cSYongseok Koh * bit[4:2] = l4_hdr_type 68ea16068cSYongseok Koh * bit[5] = ip_frag 69ea16068cSYongseok Koh * bit[6] = tunneled 70ea16068cSYongseok Koh * bit[7] = outer_l3_type 7199c12dccSNélio Laranjeiro */ 723ca63b88SShahaf Shuler /* L2 */ 733ca63b88SShahaf Shuler (*p)[0x00] = RTE_PTYPE_L2_ETHER; 74ea16068cSYongseok Koh /* L3 */ 75ea16068cSYongseok Koh (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 76ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 77ea16068cSYongseok Koh (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 78ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 79ea16068cSYongseok Koh /* Fragmented */ 80ea16068cSYongseok Koh (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 81ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 82ea16068cSYongseok Koh (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 83ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 84ea16068cSYongseok Koh /* TCP */ 85ea16068cSYongseok Koh (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 86ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 87ea16068cSYongseok Koh (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 88ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 89ea16068cSYongseok Koh /* UDP */ 90ea16068cSYongseok Koh (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 91ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 92ea16068cSYongseok Koh (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 93ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 94ea16068cSYongseok Koh /* Repeat with outer_l3_type being set. Just in case. */ 95ea16068cSYongseok Koh (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 96ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 97ea16068cSYongseok Koh (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 98ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 99ea16068cSYongseok Koh (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 100ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 101ea16068cSYongseok Koh (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 102ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 103ea16068cSYongseok Koh (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 104ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 105ea16068cSYongseok Koh (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 106ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 107ea16068cSYongseok Koh (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 108ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 109ea16068cSYongseok Koh (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 110ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 111ea16068cSYongseok Koh /* Tunneled - L3 */ 112ea16068cSYongseok Koh (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 113ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 114ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 115ea16068cSYongseok Koh (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 116ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 117ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 118ea16068cSYongseok Koh (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 119ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 120ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 121ea16068cSYongseok Koh (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 122ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 123ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 124ea16068cSYongseok Koh /* Tunneled - Fragmented */ 125ea16068cSYongseok Koh (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 126ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 127ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 128ea16068cSYongseok Koh (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 129ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 130ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 131ea16068cSYongseok Koh (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 132ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 133ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 134ea16068cSYongseok Koh (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 135ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 136ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 137ea16068cSYongseok Koh /* Tunneled - TCP */ 138ea16068cSYongseok Koh (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 139ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 1406c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 141ea16068cSYongseok Koh (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 142ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 1436c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 144ea16068cSYongseok Koh (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 145ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 1466c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 147ea16068cSYongseok Koh (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 148ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 1496c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 150ea16068cSYongseok Koh /* Tunneled - UDP */ 151ea16068cSYongseok Koh (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 152ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 1536c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 154ea16068cSYongseok Koh (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 155ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 1566c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 157ea16068cSYongseok Koh (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 158ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 1596c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 160ea16068cSYongseok Koh (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 161ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 1626c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 163ea16068cSYongseok Koh } 164fdcb0f53SNélio Laranjeiro 1652e22920bSAdrien Mazarguil /** 1666ce84bd8SYongseok Koh * Return the size of tailroom of WQ. 1676ce84bd8SYongseok Koh * 1686ce84bd8SYongseok Koh * @param txq 1696ce84bd8SYongseok Koh * Pointer to TX queue structure. 1706ce84bd8SYongseok Koh * @param addr 1716ce84bd8SYongseok Koh * Pointer to tail of WQ. 1726ce84bd8SYongseok Koh * 1736ce84bd8SYongseok Koh * @return 1746ce84bd8SYongseok Koh * Size of tailroom. 1756ce84bd8SYongseok Koh */ 1766ce84bd8SYongseok Koh static inline size_t 177991b04f6SNélio Laranjeiro tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr) 1786ce84bd8SYongseok Koh { 1796ce84bd8SYongseok Koh size_t tailroom; 1806ce84bd8SYongseok Koh tailroom = (uintptr_t)(txq->wqes) + 1816ce84bd8SYongseok Koh (1 << txq->wqe_n) * MLX5_WQE_SIZE - 1826ce84bd8SYongseok Koh (uintptr_t)addr; 1836ce84bd8SYongseok Koh return tailroom; 1846ce84bd8SYongseok Koh } 1856ce84bd8SYongseok Koh 1866ce84bd8SYongseok Koh /** 1876ce84bd8SYongseok Koh * Copy data to tailroom of circular queue. 1886ce84bd8SYongseok Koh * 1896ce84bd8SYongseok Koh * @param dst 1906ce84bd8SYongseok Koh * Pointer to destination. 1916ce84bd8SYongseok Koh * @param src 1926ce84bd8SYongseok Koh * Pointer to source. 1936ce84bd8SYongseok Koh * @param n 1946ce84bd8SYongseok Koh * Number of bytes to copy. 1956ce84bd8SYongseok Koh * @param base 1966ce84bd8SYongseok Koh * Pointer to head of queue. 1976ce84bd8SYongseok Koh * @param tailroom 1986ce84bd8SYongseok Koh * Size of tailroom from dst. 1996ce84bd8SYongseok Koh * 2006ce84bd8SYongseok Koh * @return 2016ce84bd8SYongseok Koh * Pointer after copied data. 2026ce84bd8SYongseok Koh */ 2036ce84bd8SYongseok Koh static inline void * 2046ce84bd8SYongseok Koh mlx5_copy_to_wq(void *dst, const void *src, size_t n, 2056ce84bd8SYongseok Koh void *base, size_t tailroom) 2066ce84bd8SYongseok Koh { 2076ce84bd8SYongseok Koh void *ret; 2086ce84bd8SYongseok Koh 2096ce84bd8SYongseok Koh if (n > tailroom) { 2106ce84bd8SYongseok Koh rte_memcpy(dst, src, tailroom); 2116ce84bd8SYongseok Koh rte_memcpy(base, (void *)((uintptr_t)src + tailroom), 2126ce84bd8SYongseok Koh n - tailroom); 2136ce84bd8SYongseok Koh ret = (uint8_t *)base + n - tailroom; 2146ce84bd8SYongseok Koh } else { 2156ce84bd8SYongseok Koh rte_memcpy(dst, src, n); 2166ce84bd8SYongseok Koh ret = (n == tailroom) ? base : (uint8_t *)dst + n; 2176ce84bd8SYongseok Koh } 2186ce84bd8SYongseok Koh return ret; 2196ce84bd8SYongseok Koh } 2206ce84bd8SYongseok Koh 2216ce84bd8SYongseok Koh /** 2228788fec1SOlivier Matz * DPDK callback to check the status of a tx descriptor. 2238788fec1SOlivier Matz * 2248788fec1SOlivier Matz * @param tx_queue 2258788fec1SOlivier Matz * The tx queue. 2268788fec1SOlivier Matz * @param[in] offset 2278788fec1SOlivier Matz * The index of the descriptor in the ring. 2288788fec1SOlivier Matz * 2298788fec1SOlivier Matz * @return 2308788fec1SOlivier Matz * The status of the tx descriptor. 2318788fec1SOlivier Matz */ 2328788fec1SOlivier Matz int 2338788fec1SOlivier Matz mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) 2348788fec1SOlivier Matz { 235991b04f6SNélio Laranjeiro struct mlx5_txq_data *txq = tx_queue; 2368c819a69SYongseok Koh uint16_t used; 2378788fec1SOlivier Matz 2386cb559d6SYongseok Koh mlx5_tx_complete(txq); 2398c819a69SYongseok Koh used = txq->elts_head - txq->elts_tail; 2408788fec1SOlivier Matz if (offset < used) 2418788fec1SOlivier Matz return RTE_ETH_TX_DESC_FULL; 2428788fec1SOlivier Matz return RTE_ETH_TX_DESC_DONE; 2438788fec1SOlivier Matz } 2448788fec1SOlivier Matz 2458788fec1SOlivier Matz /** 2468788fec1SOlivier Matz * DPDK callback to check the status of a rx descriptor. 2478788fec1SOlivier Matz * 2488788fec1SOlivier Matz * @param rx_queue 2498788fec1SOlivier Matz * The rx queue. 2508788fec1SOlivier Matz * @param[in] offset 2518788fec1SOlivier Matz * The index of the descriptor in the ring. 2528788fec1SOlivier Matz * 2538788fec1SOlivier Matz * @return 2548788fec1SOlivier Matz * The status of the tx descriptor. 2558788fec1SOlivier Matz */ 2568788fec1SOlivier Matz int 2578788fec1SOlivier Matz mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) 2588788fec1SOlivier Matz { 25978142aacSNélio Laranjeiro struct mlx5_rxq_data *rxq = rx_queue; 2608788fec1SOlivier Matz struct rxq_zip *zip = &rxq->zip; 2618788fec1SOlivier Matz volatile struct mlx5_cqe *cqe; 2628788fec1SOlivier Matz const unsigned int cqe_n = (1 << rxq->cqe_n); 2638788fec1SOlivier Matz const unsigned int cqe_cnt = cqe_n - 1; 2648788fec1SOlivier Matz unsigned int cq_ci; 2658788fec1SOlivier Matz unsigned int used; 2668788fec1SOlivier Matz 2678788fec1SOlivier Matz /* if we are processing a compressed cqe */ 2688788fec1SOlivier Matz if (zip->ai) { 2698788fec1SOlivier Matz used = zip->cqe_cnt - zip->ca; 2708788fec1SOlivier Matz cq_ci = zip->cq_ci; 2718788fec1SOlivier Matz } else { 2728788fec1SOlivier Matz used = 0; 2738788fec1SOlivier Matz cq_ci = rxq->cq_ci; 2748788fec1SOlivier Matz } 2758788fec1SOlivier Matz cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 2768788fec1SOlivier Matz while (check_cqe(cqe, cqe_n, cq_ci) == 0) { 2778788fec1SOlivier Matz int8_t op_own; 2788788fec1SOlivier Matz unsigned int n; 2798788fec1SOlivier Matz 2808788fec1SOlivier Matz op_own = cqe->op_own; 2818788fec1SOlivier Matz if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) 2826b30a6a8SShachar Beiser n = rte_be_to_cpu_32(cqe->byte_cnt); 2838788fec1SOlivier Matz else 2848788fec1SOlivier Matz n = 1; 2858788fec1SOlivier Matz cq_ci += n; 2868788fec1SOlivier Matz used += n; 2878788fec1SOlivier Matz cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 2888788fec1SOlivier Matz } 2898788fec1SOlivier Matz used = RTE_MIN(used, (1U << rxq->elts_n) - 1); 2908788fec1SOlivier Matz if (offset < used) 2918788fec1SOlivier Matz return RTE_ETH_RX_DESC_DONE; 2928788fec1SOlivier Matz return RTE_ETH_RX_DESC_AVAIL; 2938788fec1SOlivier Matz } 2948788fec1SOlivier Matz 2958788fec1SOlivier Matz /** 2962e22920bSAdrien Mazarguil * DPDK callback for TX. 2972e22920bSAdrien Mazarguil * 2982e22920bSAdrien Mazarguil * @param dpdk_txq 2992e22920bSAdrien Mazarguil * Generic pointer to TX queue structure. 3002e22920bSAdrien Mazarguil * @param[in] pkts 3012e22920bSAdrien Mazarguil * Packets to transmit. 3022e22920bSAdrien Mazarguil * @param pkts_n 3032e22920bSAdrien Mazarguil * Number of packets in array. 3042e22920bSAdrien Mazarguil * 3052e22920bSAdrien Mazarguil * @return 3062e22920bSAdrien Mazarguil * Number of packets successfully transmitted (<= pkts_n). 3072e22920bSAdrien Mazarguil */ 3082e22920bSAdrien Mazarguil uint16_t 3092e22920bSAdrien Mazarguil mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 3102e22920bSAdrien Mazarguil { 311991b04f6SNélio Laranjeiro struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; 3121d88ba17SNélio Laranjeiro uint16_t elts_head = txq->elts_head; 3138c819a69SYongseok Koh const uint16_t elts_n = 1 << txq->elts_n; 3148c819a69SYongseok Koh const uint16_t elts_m = elts_n - 1; 315c3d62cc9SAdrien Mazarguil unsigned int i = 0; 316a5bf6af9SAdrien Mazarguil unsigned int j = 0; 3173f13f8c2SShahaf Shuler unsigned int k = 0; 3188c819a69SYongseok Koh uint16_t max_elts; 319f04f1d51SNélio Laranjeiro uint16_t max_wqe; 320c305090bSAdrien Mazarguil unsigned int comp; 321ac180a21SYongseok Koh volatile struct mlx5_wqe_ctrl *last_wqe = NULL; 3226579c27cSNélio Laranjeiro unsigned int segs_n = 0; 32327a6b2d6SNélio Laranjeiro const unsigned int max_inline = txq->max_inline; 3242e22920bSAdrien Mazarguil 3251d88ba17SNélio Laranjeiro if (unlikely(!pkts_n)) 3261d88ba17SNélio Laranjeiro return 0; 3275e1d11ecSNelio Laranjeiro /* Prefetch first packet cacheline. */ 328c3d62cc9SAdrien Mazarguil rte_prefetch0(*pkts); 3291d88ba17SNélio Laranjeiro /* Start processing. */ 3306cb559d6SYongseok Koh mlx5_tx_complete(txq); 3318c819a69SYongseok Koh max_elts = (elts_n - (elts_head - txq->elts_tail)); 3322eefbec5SYongseok Koh /* A CQE slot must always be available. */ 3332eefbec5SYongseok Koh assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); 334f04f1d51SNélio Laranjeiro max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); 335f04f1d51SNélio Laranjeiro if (unlikely(!max_wqe)) 336f04f1d51SNélio Laranjeiro return 0; 337c3d62cc9SAdrien Mazarguil do { 3383bbae1ebSNélio Laranjeiro struct rte_mbuf *buf = NULL; 3393bbae1ebSNélio Laranjeiro uint8_t *raw; 3403bbae1ebSNélio Laranjeiro volatile struct mlx5_wqe_v *wqe = NULL; 3419a7fa9f7SNélio Laranjeiro volatile rte_v128u32_t *dseg = NULL; 342573f54afSNélio Laranjeiro uint32_t length; 3438688b2f8SNélio Laranjeiro unsigned int ds = 0; 344ac180a21SYongseok Koh unsigned int sg = 0; /* counter of additional segs attached. */ 3456579c27cSNélio Laranjeiro uintptr_t addr; 3460d637a34SNélio Laranjeiro uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2; 3473f13f8c2SShahaf Shuler uint16_t tso_header_sz = 0; 348eef822ddSNélio Laranjeiro uint16_t ehdr; 3494aa15eb1SNélio Laranjeiro uint8_t cs_flags; 3503f13f8c2SShahaf Shuler uint64_t tso = 0; 35183daf156SShahaf Shuler uint16_t tso_segsz = 0; 3526579c27cSNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 3536579c27cSNélio Laranjeiro uint32_t total_length = 0; 3546579c27cSNélio Laranjeiro #endif 3552e22920bSAdrien Mazarguil 3566579c27cSNélio Laranjeiro /* first_seg */ 3573730e6c6SYongseok Koh buf = *pkts; 3586579c27cSNélio Laranjeiro segs_n = buf->nb_segs; 359c3d62cc9SAdrien Mazarguil /* 360c3d62cc9SAdrien Mazarguil * Make sure there is enough room to store this packet and 361c3d62cc9SAdrien Mazarguil * that one ring entry remains unused. 362c3d62cc9SAdrien Mazarguil */ 363a5bf6af9SAdrien Mazarguil assert(segs_n); 3648c819a69SYongseok Koh if (max_elts < segs_n) 365c3d62cc9SAdrien Mazarguil break; 3668c819a69SYongseok Koh max_elts -= segs_n; 367f895536bSYongseok Koh sg = --segs_n; 368f04f1d51SNélio Laranjeiro if (unlikely(--max_wqe == 0)) 369f04f1d51SNélio Laranjeiro break; 3709a7fa9f7SNélio Laranjeiro wqe = (volatile struct mlx5_wqe_v *) 371fdcb0f53SNélio Laranjeiro tx_mlx5_wqe(txq, txq->wqe_ci); 372fdcb0f53SNélio Laranjeiro rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); 3733730e6c6SYongseok Koh if (pkts_n - i > 1) 3743730e6c6SYongseok Koh rte_prefetch0(*(pkts + 1)); 3756579c27cSNélio Laranjeiro addr = rte_pktmbuf_mtod(buf, uintptr_t); 3762e22920bSAdrien Mazarguil length = DATA_LEN(buf); 377eef822ddSNélio Laranjeiro ehdr = (((uint8_t *)addr)[1] << 8) | 378eef822ddSNélio Laranjeiro ((uint8_t *)addr)[0]; 3796579c27cSNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 3806579c27cSNélio Laranjeiro total_length = length; 3816579c27cSNélio Laranjeiro #endif 38224c14430SShahaf Shuler if (length < (MLX5_WQE_DWORD_SIZE + 2)) { 38324c14430SShahaf Shuler txq->stats.oerrors++; 384959be52eSNélio Laranjeiro break; 38524c14430SShahaf Shuler } 3862e22920bSAdrien Mazarguil /* Update element. */ 3878c819a69SYongseok Koh (*txq->elts)[elts_head & elts_m] = buf; 3885e1d11ecSNelio Laranjeiro /* Prefetch next buffer data. */ 3893730e6c6SYongseok Koh if (pkts_n - i > 1) 3903730e6c6SYongseok Koh rte_prefetch0( 3913730e6c6SYongseok Koh rte_pktmbuf_mtod(*(pkts + 1), volatile void *)); 3924aa15eb1SNélio Laranjeiro cs_flags = txq_ol_cksum_to_cs(txq, buf); 393b8fe952eSNélio Laranjeiro raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; 3946579c27cSNélio Laranjeiro /* Replace the Ethernet type by the VLAN if necessary. */ 3956579c27cSNélio Laranjeiro if (buf->ol_flags & PKT_TX_VLAN_PKT) { 3966b30a6a8SShachar Beiser uint32_t vlan = rte_cpu_to_be_32(0x81000000 | 3976b30a6a8SShachar Beiser buf->vlan_tci); 3980d637a34SNélio Laranjeiro unsigned int len = 2 * ETHER_ADDR_LEN - 2; 3996579c27cSNélio Laranjeiro 4000d637a34SNélio Laranjeiro addr += 2; 4010d637a34SNélio Laranjeiro length -= 2; 4020d637a34SNélio Laranjeiro /* Copy Destination and source mac address. */ 4030d637a34SNélio Laranjeiro memcpy((uint8_t *)raw, ((uint8_t *)addr), len); 4040d637a34SNélio Laranjeiro /* Copy VLAN. */ 4050d637a34SNélio Laranjeiro memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan)); 4060d637a34SNélio Laranjeiro /* Copy missing two bytes to end the DSeg. */ 4070d637a34SNélio Laranjeiro memcpy((uint8_t *)raw + len + sizeof(vlan), 4080d637a34SNélio Laranjeiro ((uint8_t *)addr) + len, 2); 4090d637a34SNélio Laranjeiro addr += len + 2; 4100d637a34SNélio Laranjeiro length -= (len + 2); 4110d637a34SNélio Laranjeiro } else { 4120d637a34SNélio Laranjeiro memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2, 4130d637a34SNélio Laranjeiro MLX5_WQE_DWORD_SIZE); 4140d637a34SNélio Laranjeiro length -= pkt_inline_sz; 4150d637a34SNélio Laranjeiro addr += pkt_inline_sz; 4166579c27cSNélio Laranjeiro } 417d8292497SYongseok Koh raw += MLX5_WQE_DWORD_SIZE; 41836aa55eaSYongseok Koh tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); 4193f13f8c2SShahaf Shuler if (tso) { 42036aa55eaSYongseok Koh uintptr_t end = 42136aa55eaSYongseok Koh (uintptr_t)(((uintptr_t)txq->wqes) + 42236aa55eaSYongseok Koh (1 << txq->wqe_n) * MLX5_WQE_SIZE); 4233f13f8c2SShahaf Shuler unsigned int copy_b; 42436aa55eaSYongseok Koh uint8_t vlan_sz = 42536aa55eaSYongseok Koh (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0; 426b247f346SShahaf Shuler const uint64_t is_tunneled = 42736aa55eaSYongseok Koh buf->ol_flags & (PKT_TX_TUNNEL_GRE | 428b247f346SShahaf Shuler PKT_TX_TUNNEL_VXLAN); 4293f13f8c2SShahaf Shuler 4303f13f8c2SShahaf Shuler tso_header_sz = buf->l2_len + vlan_sz + 4313f13f8c2SShahaf Shuler buf->l3_len + buf->l4_len; 43283daf156SShahaf Shuler tso_segsz = buf->tso_segsz; 43396fc8d65SShahaf Shuler if (unlikely(tso_segsz == 0)) { 43496fc8d65SShahaf Shuler txq->stats.oerrors++; 43596fc8d65SShahaf Shuler break; 43696fc8d65SShahaf Shuler } 437b247f346SShahaf Shuler if (is_tunneled && txq->tunnel_en) { 438b247f346SShahaf Shuler tso_header_sz += buf->outer_l2_len + 439b247f346SShahaf Shuler buf->outer_l3_len; 4402a6c96beSShahaf Shuler cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; 4412a6c96beSShahaf Shuler } else { 4422a6c96beSShahaf Shuler cs_flags |= MLX5_ETH_WQE_L4_CSUM; 443b247f346SShahaf Shuler } 44436aa55eaSYongseok Koh if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) { 44524c14430SShahaf Shuler txq->stats.oerrors++; 4463f13f8c2SShahaf Shuler break; 44724c14430SShahaf Shuler } 4483f13f8c2SShahaf Shuler copy_b = tso_header_sz - pkt_inline_sz; 4493f13f8c2SShahaf Shuler /* First seg must contain all headers. */ 4503f13f8c2SShahaf Shuler assert(copy_b <= length); 45136aa55eaSYongseok Koh if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { 45236aa55eaSYongseok Koh uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; 4533f13f8c2SShahaf Shuler 4543f13f8c2SShahaf Shuler if (unlikely(max_wqe < n)) 4553f13f8c2SShahaf Shuler break; 4563f13f8c2SShahaf Shuler max_wqe -= n; 45736aa55eaSYongseok Koh rte_memcpy((void *)raw, (void *)addr, copy_b); 4583f13f8c2SShahaf Shuler addr += copy_b; 4593f13f8c2SShahaf Shuler length -= copy_b; 460d8292497SYongseok Koh /* Include padding for TSO header. */ 461d8292497SYongseok Koh copy_b = MLX5_WQE_DS(copy_b) * 4623f13f8c2SShahaf Shuler MLX5_WQE_DWORD_SIZE; 463d8292497SYongseok Koh pkt_inline_sz += copy_b; 464d8292497SYongseok Koh raw += copy_b; 4653f13f8c2SShahaf Shuler } else { 4663f13f8c2SShahaf Shuler /* NOP WQE. */ 4673f13f8c2SShahaf Shuler wqe->ctrl = (rte_v128u32_t){ 46836aa55eaSYongseok Koh rte_cpu_to_be_32(txq->wqe_ci << 8), 46936aa55eaSYongseok Koh rte_cpu_to_be_32(txq->qp_num_8s | 1), 4703f13f8c2SShahaf Shuler 0, 4713f13f8c2SShahaf Shuler 0, 4723f13f8c2SShahaf Shuler }; 4733f13f8c2SShahaf Shuler ds = 1; 474cb98affeSThierry Herbelot #ifdef MLX5_PMD_SOFT_COUNTERS 4753f13f8c2SShahaf Shuler total_length = 0; 476cb98affeSThierry Herbelot #endif 4773f13f8c2SShahaf Shuler k++; 4783f13f8c2SShahaf Shuler goto next_wqe; 4793f13f8c2SShahaf Shuler } 4803f13f8c2SShahaf Shuler } 4816579c27cSNélio Laranjeiro /* Inline if enough room. */ 48227a6b2d6SNélio Laranjeiro if (max_inline || tso) { 483f895536bSYongseok Koh uint32_t inl = 0; 484fdcb0f53SNélio Laranjeiro uintptr_t end = (uintptr_t) 485fdcb0f53SNélio Laranjeiro (((uintptr_t)txq->wqes) + 486fdcb0f53SNélio Laranjeiro (1 << txq->wqe_n) * MLX5_WQE_SIZE); 487ab76eab3SYongseok Koh unsigned int inline_room = max_inline * 4888fcd6c2cSNélio Laranjeiro RTE_CACHE_LINE_SIZE - 489d8292497SYongseok Koh (pkt_inline_sz - 2) - 490d8292497SYongseok Koh !!tso * sizeof(inl); 491f895536bSYongseok Koh uintptr_t addr_end; 492f895536bSYongseok Koh unsigned int copy_b; 4936579c27cSNélio Laranjeiro 494f895536bSYongseok Koh pkt_inline: 495f895536bSYongseok Koh addr_end = RTE_ALIGN_FLOOR(addr + inline_room, 496f895536bSYongseok Koh RTE_CACHE_LINE_SIZE); 497f895536bSYongseok Koh copy_b = (addr_end > addr) ? 498f895536bSYongseok Koh RTE_MIN((addr_end - addr), length) : 0; 4998fcd6c2cSNélio Laranjeiro if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { 500f04f1d51SNélio Laranjeiro /* 501f04f1d51SNélio Laranjeiro * One Dseg remains in the current WQE. To 502f04f1d51SNélio Laranjeiro * keep the computation positive, it is 503f04f1d51SNélio Laranjeiro * removed after the bytes to Dseg conversion. 504f04f1d51SNélio Laranjeiro */ 5058fcd6c2cSNélio Laranjeiro uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; 5068fcd6c2cSNélio Laranjeiro 507f04f1d51SNélio Laranjeiro if (unlikely(max_wqe < n)) 508f04f1d51SNélio Laranjeiro break; 509f04f1d51SNélio Laranjeiro max_wqe -= n; 510f895536bSYongseok Koh if (tso && !inl) { 5116963ae8bSYongseok Koh inl = rte_cpu_to_be_32(copy_b | 5126b30a6a8SShachar Beiser MLX5_INLINE_SEG); 5133f13f8c2SShahaf Shuler rte_memcpy((void *)raw, 5143f13f8c2SShahaf Shuler (void *)&inl, sizeof(inl)); 5153f13f8c2SShahaf Shuler raw += sizeof(inl); 5163f13f8c2SShahaf Shuler pkt_inline_sz += sizeof(inl); 5173f13f8c2SShahaf Shuler } 5186579c27cSNélio Laranjeiro rte_memcpy((void *)raw, (void *)addr, copy_b); 5196579c27cSNélio Laranjeiro addr += copy_b; 5206579c27cSNélio Laranjeiro length -= copy_b; 5216579c27cSNélio Laranjeiro pkt_inline_sz += copy_b; 5226579c27cSNélio Laranjeiro } 5236579c27cSNélio Laranjeiro /* 524786b5c2dSShahaf Shuler * 2 DWORDs consumed by the WQE header + ETH segment + 5256579c27cSNélio Laranjeiro * the size of the inline part of the packet. 5266579c27cSNélio Laranjeiro */ 5276579c27cSNélio Laranjeiro ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2); 5286579c27cSNélio Laranjeiro if (length > 0) { 529f04f1d51SNélio Laranjeiro if (ds % (MLX5_WQE_SIZE / 530f04f1d51SNélio Laranjeiro MLX5_WQE_DWORD_SIZE) == 0) { 531f04f1d51SNélio Laranjeiro if (unlikely(--max_wqe == 0)) 532f04f1d51SNélio Laranjeiro break; 533f04f1d51SNélio Laranjeiro dseg = (volatile rte_v128u32_t *) 534f04f1d51SNélio Laranjeiro tx_mlx5_wqe(txq, txq->wqe_ci + 535f04f1d51SNélio Laranjeiro ds / 4); 536f04f1d51SNélio Laranjeiro } else { 5379a7fa9f7SNélio Laranjeiro dseg = (volatile rte_v128u32_t *) 5386579c27cSNélio Laranjeiro ((uintptr_t)wqe + 5396579c27cSNélio Laranjeiro (ds * MLX5_WQE_DWORD_SIZE)); 540f04f1d51SNélio Laranjeiro } 5416579c27cSNélio Laranjeiro goto use_dseg; 5426579c27cSNélio Laranjeiro } else if (!segs_n) { 5436579c27cSNélio Laranjeiro goto next_pkt; 5446579c27cSNélio Laranjeiro } else { 545f895536bSYongseok Koh raw += copy_b; 546f895536bSYongseok Koh inline_room -= copy_b; 547f895536bSYongseok Koh --segs_n; 548f895536bSYongseok Koh buf = buf->next; 549f895536bSYongseok Koh assert(buf); 550f895536bSYongseok Koh addr = rte_pktmbuf_mtod(buf, uintptr_t); 551f895536bSYongseok Koh length = DATA_LEN(buf); 552f895536bSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 553f895536bSYongseok Koh total_length += length; 554f895536bSYongseok Koh #endif 555f895536bSYongseok Koh (*txq->elts)[++elts_head & elts_m] = buf; 556f895536bSYongseok Koh goto pkt_inline; 5576579c27cSNélio Laranjeiro } 5586579c27cSNélio Laranjeiro } else { 5596579c27cSNélio Laranjeiro /* 5606579c27cSNélio Laranjeiro * No inline has been done in the packet, only the 5616579c27cSNélio Laranjeiro * Ethernet Header as been stored. 5626579c27cSNélio Laranjeiro */ 5639a7fa9f7SNélio Laranjeiro dseg = (volatile rte_v128u32_t *) 5646579c27cSNélio Laranjeiro ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE)); 5656579c27cSNélio Laranjeiro ds = 3; 5666579c27cSNélio Laranjeiro use_dseg: 5676579c27cSNélio Laranjeiro /* Add the remaining packet as a simple ds. */ 568ebbb81ebSNélio Laranjeiro addr = rte_cpu_to_be_64(addr); 5699a7fa9f7SNélio Laranjeiro *dseg = (rte_v128u32_t){ 5706b30a6a8SShachar Beiser rte_cpu_to_be_32(length), 5716cb559d6SYongseok Koh mlx5_tx_mb2mr(txq, buf), 572ebbb81ebSNélio Laranjeiro addr, 573ebbb81ebSNélio Laranjeiro addr >> 32, 5746579c27cSNélio Laranjeiro }; 5756579c27cSNélio Laranjeiro ++ds; 5766579c27cSNélio Laranjeiro if (!segs_n) 5776579c27cSNélio Laranjeiro goto next_pkt; 5786579c27cSNélio Laranjeiro } 5796579c27cSNélio Laranjeiro next_seg: 5806579c27cSNélio Laranjeiro assert(buf); 5816579c27cSNélio Laranjeiro assert(ds); 5826579c27cSNélio Laranjeiro assert(wqe); 583a5bf6af9SAdrien Mazarguil /* 584a5bf6af9SAdrien Mazarguil * Spill on next WQE when the current one does not have 585a5bf6af9SAdrien Mazarguil * enough room left. Size of WQE must a be a multiple 586a5bf6af9SAdrien Mazarguil * of data segment size. 587a5bf6af9SAdrien Mazarguil */ 5888688b2f8SNélio Laranjeiro assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE)); 5896579c27cSNélio Laranjeiro if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) { 590f04f1d51SNélio Laranjeiro if (unlikely(--max_wqe == 0)) 591f04f1d51SNélio Laranjeiro break; 5929a7fa9f7SNélio Laranjeiro dseg = (volatile rte_v128u32_t *) 593f04f1d51SNélio Laranjeiro tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4); 594f04f1d51SNélio Laranjeiro rte_prefetch0(tx_mlx5_wqe(txq, 595f04f1d51SNélio Laranjeiro txq->wqe_ci + ds / 4 + 1)); 5966579c27cSNélio Laranjeiro } else { 597a5bf6af9SAdrien Mazarguil ++dseg; 5986579c27cSNélio Laranjeiro } 599a5bf6af9SAdrien Mazarguil ++ds; 600a5bf6af9SAdrien Mazarguil buf = buf->next; 601a5bf6af9SAdrien Mazarguil assert(buf); 6026579c27cSNélio Laranjeiro length = DATA_LEN(buf); 603a5bf6af9SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 6046579c27cSNélio Laranjeiro total_length += length; 605a5bf6af9SAdrien Mazarguil #endif 6066579c27cSNélio Laranjeiro /* Store segment information. */ 607ebbb81ebSNélio Laranjeiro addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); 6089a7fa9f7SNélio Laranjeiro *dseg = (rte_v128u32_t){ 6096b30a6a8SShachar Beiser rte_cpu_to_be_32(length), 6106cb559d6SYongseok Koh mlx5_tx_mb2mr(txq, buf), 611ebbb81ebSNélio Laranjeiro addr, 612ebbb81ebSNélio Laranjeiro addr >> 32, 6136579c27cSNélio Laranjeiro }; 6148c819a69SYongseok Koh (*txq->elts)[++elts_head & elts_m] = buf; 615f895536bSYongseok Koh if (--segs_n) 6166579c27cSNélio Laranjeiro goto next_seg; 6176579c27cSNélio Laranjeiro next_pkt: 618883ce172SShahaf Shuler if (ds > MLX5_DSEG_MAX) { 619883ce172SShahaf Shuler txq->stats.oerrors++; 620883ce172SShahaf Shuler break; 621883ce172SShahaf Shuler } 6228c819a69SYongseok Koh ++elts_head; 6233730e6c6SYongseok Koh ++pkts; 6246579c27cSNélio Laranjeiro ++i; 625f895536bSYongseok Koh j += sg; 626b8fe952eSNélio Laranjeiro /* Initialize known and common part of the WQE structure. */ 6273f13f8c2SShahaf Shuler if (tso) { 6283f13f8c2SShahaf Shuler wqe->ctrl = (rte_v128u32_t){ 6296b30a6a8SShachar Beiser rte_cpu_to_be_32((txq->wqe_ci << 8) | 6306b30a6a8SShachar Beiser MLX5_OPCODE_TSO), 6316b30a6a8SShachar Beiser rte_cpu_to_be_32(txq->qp_num_8s | ds), 6323f13f8c2SShahaf Shuler 0, 6333f13f8c2SShahaf Shuler 0, 6343f13f8c2SShahaf Shuler }; 6353f13f8c2SShahaf Shuler wqe->eseg = (rte_v128u32_t){ 6363f13f8c2SShahaf Shuler 0, 6376b30a6a8SShachar Beiser cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), 6383f13f8c2SShahaf Shuler 0, 6396b30a6a8SShachar Beiser (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), 6403f13f8c2SShahaf Shuler }; 6413f13f8c2SShahaf Shuler } else { 6429a7fa9f7SNélio Laranjeiro wqe->ctrl = (rte_v128u32_t){ 6436b30a6a8SShachar Beiser rte_cpu_to_be_32((txq->wqe_ci << 8) | 6446b30a6a8SShachar Beiser MLX5_OPCODE_SEND), 6456b30a6a8SShachar Beiser rte_cpu_to_be_32(txq->qp_num_8s | ds), 6469a7fa9f7SNélio Laranjeiro 0, 6479a7fa9f7SNélio Laranjeiro 0, 6489a7fa9f7SNélio Laranjeiro }; 6499a7fa9f7SNélio Laranjeiro wqe->eseg = (rte_v128u32_t){ 6509a7fa9f7SNélio Laranjeiro 0, 6519a7fa9f7SNélio Laranjeiro cs_flags, 6529a7fa9f7SNélio Laranjeiro 0, 6536b30a6a8SShachar Beiser (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), 6549a7fa9f7SNélio Laranjeiro }; 6553f13f8c2SShahaf Shuler } 6563f13f8c2SShahaf Shuler next_wqe: 6576579c27cSNélio Laranjeiro txq->wqe_ci += (ds + 3) / 4; 658ac180a21SYongseok Koh /* Save the last successful WQE for completion request */ 659ac180a21SYongseok Koh last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe; 66087011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 661573f54afSNélio Laranjeiro /* Increment sent bytes counter. */ 6626579c27cSNélio Laranjeiro txq->stats.obytes += total_length; 66387011737SAdrien Mazarguil #endif 6643730e6c6SYongseok Koh } while (i < pkts_n); 6652e22920bSAdrien Mazarguil /* Take a shortcut if nothing must be sent. */ 6663f13f8c2SShahaf Shuler if (unlikely((i + k) == 0)) 6672e22920bSAdrien Mazarguil return 0; 6688c819a69SYongseok Koh txq->elts_head += (i + j); 669c305090bSAdrien Mazarguil /* Check whether completion threshold has been reached. */ 6703f13f8c2SShahaf Shuler comp = txq->elts_comp + i + j + k; 671c305090bSAdrien Mazarguil if (comp >= MLX5_TX_COMP_THRESH) { 672c305090bSAdrien Mazarguil /* Request completion on last WQE. */ 6736b30a6a8SShachar Beiser last_wqe->ctrl2 = rte_cpu_to_be_32(8); 674c305090bSAdrien Mazarguil /* Save elts_head in unused "immediate" field of WQE. */ 675ac180a21SYongseok Koh last_wqe->ctrl3 = txq->elts_head; 676c305090bSAdrien Mazarguil txq->elts_comp = 0; 6772eefbec5SYongseok Koh #ifndef NDEBUG 6782eefbec5SYongseok Koh ++txq->cq_pi; 6792eefbec5SYongseok Koh #endif 680c305090bSAdrien Mazarguil } else { 681c305090bSAdrien Mazarguil txq->elts_comp = comp; 682c305090bSAdrien Mazarguil } 68387011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 68487011737SAdrien Mazarguil /* Increment sent packets counter. */ 68587011737SAdrien Mazarguil txq->stats.opackets += i; 68687011737SAdrien Mazarguil #endif 6872e22920bSAdrien Mazarguil /* Ring QP doorbell. */ 688ac180a21SYongseok Koh mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe); 6892e22920bSAdrien Mazarguil return i; 6902e22920bSAdrien Mazarguil } 6912e22920bSAdrien Mazarguil 6922e22920bSAdrien Mazarguil /** 693230189d9SNélio Laranjeiro * Open a MPW session. 694230189d9SNélio Laranjeiro * 695230189d9SNélio Laranjeiro * @param txq 696230189d9SNélio Laranjeiro * Pointer to TX queue structure. 697230189d9SNélio Laranjeiro * @param mpw 698230189d9SNélio Laranjeiro * Pointer to MPW session structure. 699230189d9SNélio Laranjeiro * @param length 700230189d9SNélio Laranjeiro * Packet length. 701230189d9SNélio Laranjeiro */ 702230189d9SNélio Laranjeiro static inline void 703991b04f6SNélio Laranjeiro mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length) 704230189d9SNélio Laranjeiro { 705a821d09dSNélio Laranjeiro uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1); 706230189d9SNélio Laranjeiro volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] = 707230189d9SNélio Laranjeiro (volatile struct mlx5_wqe_data_seg (*)[]) 708fdcb0f53SNélio Laranjeiro tx_mlx5_wqe(txq, idx + 1); 709230189d9SNélio Laranjeiro 710230189d9SNélio Laranjeiro mpw->state = MLX5_MPW_STATE_OPENED; 711230189d9SNélio Laranjeiro mpw->pkts_n = 0; 712230189d9SNélio Laranjeiro mpw->len = length; 713230189d9SNélio Laranjeiro mpw->total_len = 0; 714fdcb0f53SNélio Laranjeiro mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); 7156b30a6a8SShachar Beiser mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); 7168688b2f8SNélio Laranjeiro mpw->wqe->eseg.inline_hdr_sz = 0; 7178688b2f8SNélio Laranjeiro mpw->wqe->eseg.rsvd0 = 0; 7188688b2f8SNélio Laranjeiro mpw->wqe->eseg.rsvd1 = 0; 7198688b2f8SNélio Laranjeiro mpw->wqe->eseg.rsvd2 = 0; 7206b30a6a8SShachar Beiser mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | 7216b30a6a8SShachar Beiser (txq->wqe_ci << 8) | 7226b30a6a8SShachar Beiser MLX5_OPCODE_TSO); 7238688b2f8SNélio Laranjeiro mpw->wqe->ctrl[2] = 0; 7248688b2f8SNélio Laranjeiro mpw->wqe->ctrl[3] = 0; 7258688b2f8SNélio Laranjeiro mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *) 7268688b2f8SNélio Laranjeiro (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE)); 7278688b2f8SNélio Laranjeiro mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *) 7288688b2f8SNélio Laranjeiro (((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE)); 729230189d9SNélio Laranjeiro mpw->data.dseg[2] = &(*dseg)[0]; 730230189d9SNélio Laranjeiro mpw->data.dseg[3] = &(*dseg)[1]; 731230189d9SNélio Laranjeiro mpw->data.dseg[4] = &(*dseg)[2]; 732230189d9SNélio Laranjeiro } 733230189d9SNélio Laranjeiro 734230189d9SNélio Laranjeiro /** 735230189d9SNélio Laranjeiro * Close a MPW session. 736230189d9SNélio Laranjeiro * 737230189d9SNélio Laranjeiro * @param txq 738230189d9SNélio Laranjeiro * Pointer to TX queue structure. 739230189d9SNélio Laranjeiro * @param mpw 740230189d9SNélio Laranjeiro * Pointer to MPW session structure. 741230189d9SNélio Laranjeiro */ 742230189d9SNélio Laranjeiro static inline void 743991b04f6SNélio Laranjeiro mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw) 744230189d9SNélio Laranjeiro { 745230189d9SNélio Laranjeiro unsigned int num = mpw->pkts_n; 746230189d9SNélio Laranjeiro 747230189d9SNélio Laranjeiro /* 748230189d9SNélio Laranjeiro * Store size in multiple of 16 bytes. Control and Ethernet segments 749230189d9SNélio Laranjeiro * count as 2. 750230189d9SNélio Laranjeiro */ 7516b30a6a8SShachar Beiser mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num)); 752230189d9SNélio Laranjeiro mpw->state = MLX5_MPW_STATE_CLOSED; 753230189d9SNélio Laranjeiro if (num < 3) 754230189d9SNélio Laranjeiro ++txq->wqe_ci; 755230189d9SNélio Laranjeiro else 756230189d9SNélio Laranjeiro txq->wqe_ci += 2; 757fdcb0f53SNélio Laranjeiro rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); 758fdcb0f53SNélio Laranjeiro rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); 759230189d9SNélio Laranjeiro } 760230189d9SNélio Laranjeiro 761230189d9SNélio Laranjeiro /** 762230189d9SNélio Laranjeiro * DPDK callback for TX with MPW support. 763230189d9SNélio Laranjeiro * 764230189d9SNélio Laranjeiro * @param dpdk_txq 765230189d9SNélio Laranjeiro * Generic pointer to TX queue structure. 766230189d9SNélio Laranjeiro * @param[in] pkts 767230189d9SNélio Laranjeiro * Packets to transmit. 768230189d9SNélio Laranjeiro * @param pkts_n 769230189d9SNélio Laranjeiro * Number of packets in array. 770230189d9SNélio Laranjeiro * 771230189d9SNélio Laranjeiro * @return 772230189d9SNélio Laranjeiro * Number of packets successfully transmitted (<= pkts_n). 773230189d9SNélio Laranjeiro */ 774230189d9SNélio Laranjeiro uint16_t 775230189d9SNélio Laranjeiro mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 776230189d9SNélio Laranjeiro { 777991b04f6SNélio Laranjeiro struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; 778230189d9SNélio Laranjeiro uint16_t elts_head = txq->elts_head; 7798c819a69SYongseok Koh const uint16_t elts_n = 1 << txq->elts_n; 7808c819a69SYongseok Koh const uint16_t elts_m = elts_n - 1; 781c3d62cc9SAdrien Mazarguil unsigned int i = 0; 782a5bf6af9SAdrien Mazarguil unsigned int j = 0; 7838c819a69SYongseok Koh uint16_t max_elts; 784f04f1d51SNélio Laranjeiro uint16_t max_wqe; 785230189d9SNélio Laranjeiro unsigned int comp; 786230189d9SNélio Laranjeiro struct mlx5_mpw mpw = { 787230189d9SNélio Laranjeiro .state = MLX5_MPW_STATE_CLOSED, 788230189d9SNélio Laranjeiro }; 789230189d9SNélio Laranjeiro 790c3d62cc9SAdrien Mazarguil if (unlikely(!pkts_n)) 791c3d62cc9SAdrien Mazarguil return 0; 792230189d9SNélio Laranjeiro /* Prefetch first packet cacheline. */ 793fdcb0f53SNélio Laranjeiro rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); 794fdcb0f53SNélio Laranjeiro rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); 795230189d9SNélio Laranjeiro /* Start processing. */ 7966cb559d6SYongseok Koh mlx5_tx_complete(txq); 7978c819a69SYongseok Koh max_elts = (elts_n - (elts_head - txq->elts_tail)); 7982eefbec5SYongseok Koh /* A CQE slot must always be available. */ 7992eefbec5SYongseok Koh assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); 800f04f1d51SNélio Laranjeiro max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); 801f04f1d51SNélio Laranjeiro if (unlikely(!max_wqe)) 802f04f1d51SNélio Laranjeiro return 0; 803c3d62cc9SAdrien Mazarguil do { 804a5bf6af9SAdrien Mazarguil struct rte_mbuf *buf = *(pkts++); 805230189d9SNélio Laranjeiro uint32_t length; 806a5bf6af9SAdrien Mazarguil unsigned int segs_n = buf->nb_segs; 8074aa15eb1SNélio Laranjeiro uint32_t cs_flags; 808230189d9SNélio Laranjeiro 809c3d62cc9SAdrien Mazarguil /* 810c3d62cc9SAdrien Mazarguil * Make sure there is enough room to store this packet and 811c3d62cc9SAdrien Mazarguil * that one ring entry remains unused. 812c3d62cc9SAdrien Mazarguil */ 813a5bf6af9SAdrien Mazarguil assert(segs_n); 8148c819a69SYongseok Koh if (max_elts < segs_n) 815c3d62cc9SAdrien Mazarguil break; 816a5bf6af9SAdrien Mazarguil /* Do not bother with large packets MPW cannot handle. */ 81724c14430SShahaf Shuler if (segs_n > MLX5_MPW_DSEG_MAX) { 81824c14430SShahaf Shuler txq->stats.oerrors++; 819a5bf6af9SAdrien Mazarguil break; 82024c14430SShahaf Shuler } 8218c819a69SYongseok Koh max_elts -= segs_n; 822c3d62cc9SAdrien Mazarguil --pkts_n; 8234aa15eb1SNélio Laranjeiro cs_flags = txq_ol_cksum_to_cs(txq, buf); 824a5bf6af9SAdrien Mazarguil /* Retrieve packet information. */ 825a5bf6af9SAdrien Mazarguil length = PKT_LEN(buf); 826a5bf6af9SAdrien Mazarguil assert(length); 827230189d9SNélio Laranjeiro /* Start new session if packet differs. */ 828230189d9SNélio Laranjeiro if ((mpw.state == MLX5_MPW_STATE_OPENED) && 829230189d9SNélio Laranjeiro ((mpw.len != length) || 830a5bf6af9SAdrien Mazarguil (segs_n != 1) || 8318688b2f8SNélio Laranjeiro (mpw.wqe->eseg.cs_flags != cs_flags))) 832230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 833230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_CLOSED) { 834f04f1d51SNélio Laranjeiro /* 835f04f1d51SNélio Laranjeiro * Multi-Packet WQE consumes at most two WQE. 836f04f1d51SNélio Laranjeiro * mlx5_mpw_new() expects to be able to use such 837f04f1d51SNélio Laranjeiro * resources. 838f04f1d51SNélio Laranjeiro */ 839f04f1d51SNélio Laranjeiro if (unlikely(max_wqe < 2)) 840f04f1d51SNélio Laranjeiro break; 841f04f1d51SNélio Laranjeiro max_wqe -= 2; 842230189d9SNélio Laranjeiro mlx5_mpw_new(txq, &mpw, length); 8438688b2f8SNélio Laranjeiro mpw.wqe->eseg.cs_flags = cs_flags; 844230189d9SNélio Laranjeiro } 845a5bf6af9SAdrien Mazarguil /* Multi-segment packets must be alone in their MPW. */ 846a5bf6af9SAdrien Mazarguil assert((segs_n == 1) || (mpw.pkts_n == 0)); 847a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) 848a5bf6af9SAdrien Mazarguil length = 0; 849a5bf6af9SAdrien Mazarguil #endif 850a5bf6af9SAdrien Mazarguil do { 851a5bf6af9SAdrien Mazarguil volatile struct mlx5_wqe_data_seg *dseg; 852a5bf6af9SAdrien Mazarguil uintptr_t addr; 853a5bf6af9SAdrien Mazarguil 854a5bf6af9SAdrien Mazarguil assert(buf); 8558c819a69SYongseok Koh (*txq->elts)[elts_head++ & elts_m] = buf; 856230189d9SNélio Laranjeiro dseg = mpw.data.dseg[mpw.pkts_n]; 857a5bf6af9SAdrien Mazarguil addr = rte_pktmbuf_mtod(buf, uintptr_t); 858230189d9SNélio Laranjeiro *dseg = (struct mlx5_wqe_data_seg){ 8596b30a6a8SShachar Beiser .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), 8606cb559d6SYongseok Koh .lkey = mlx5_tx_mb2mr(txq, buf), 8616b30a6a8SShachar Beiser .addr = rte_cpu_to_be_64(addr), 862230189d9SNélio Laranjeiro }; 863a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) 864a5bf6af9SAdrien Mazarguil length += DATA_LEN(buf); 865a5bf6af9SAdrien Mazarguil #endif 866a5bf6af9SAdrien Mazarguil buf = buf->next; 867230189d9SNélio Laranjeiro ++mpw.pkts_n; 868a5bf6af9SAdrien Mazarguil ++j; 869a5bf6af9SAdrien Mazarguil } while (--segs_n); 870a5bf6af9SAdrien Mazarguil assert(length == mpw.len); 871230189d9SNélio Laranjeiro if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) 872230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 873230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 874230189d9SNélio Laranjeiro /* Increment sent bytes counter. */ 875230189d9SNélio Laranjeiro txq->stats.obytes += length; 876230189d9SNélio Laranjeiro #endif 877c3d62cc9SAdrien Mazarguil ++i; 878c3d62cc9SAdrien Mazarguil } while (pkts_n); 879230189d9SNélio Laranjeiro /* Take a shortcut if nothing must be sent. */ 880230189d9SNélio Laranjeiro if (unlikely(i == 0)) 881230189d9SNélio Laranjeiro return 0; 882230189d9SNélio Laranjeiro /* Check whether completion threshold has been reached. */ 883a5bf6af9SAdrien Mazarguil /* "j" includes both packets and segments. */ 884a5bf6af9SAdrien Mazarguil comp = txq->elts_comp + j; 885230189d9SNélio Laranjeiro if (comp >= MLX5_TX_COMP_THRESH) { 8868688b2f8SNélio Laranjeiro volatile struct mlx5_wqe *wqe = mpw.wqe; 887230189d9SNélio Laranjeiro 888230189d9SNélio Laranjeiro /* Request completion on last WQE. */ 8896b30a6a8SShachar Beiser wqe->ctrl[2] = rte_cpu_to_be_32(8); 890230189d9SNélio Laranjeiro /* Save elts_head in unused "immediate" field of WQE. */ 8918688b2f8SNélio Laranjeiro wqe->ctrl[3] = elts_head; 892230189d9SNélio Laranjeiro txq->elts_comp = 0; 8932eefbec5SYongseok Koh #ifndef NDEBUG 8942eefbec5SYongseok Koh ++txq->cq_pi; 8952eefbec5SYongseok Koh #endif 896230189d9SNélio Laranjeiro } else { 897230189d9SNélio Laranjeiro txq->elts_comp = comp; 898230189d9SNélio Laranjeiro } 899230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 900230189d9SNélio Laranjeiro /* Increment sent packets counter. */ 901230189d9SNélio Laranjeiro txq->stats.opackets += i; 902230189d9SNélio Laranjeiro #endif 903230189d9SNélio Laranjeiro /* Ring QP doorbell. */ 904230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_OPENED) 905230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 90630807f62SNélio Laranjeiro mlx5_tx_dbrec(txq, mpw.wqe); 907230189d9SNélio Laranjeiro txq->elts_head = elts_head; 908230189d9SNélio Laranjeiro return i; 909230189d9SNélio Laranjeiro } 910230189d9SNélio Laranjeiro 911230189d9SNélio Laranjeiro /** 912230189d9SNélio Laranjeiro * Open a MPW inline session. 913230189d9SNélio Laranjeiro * 914230189d9SNélio Laranjeiro * @param txq 915230189d9SNélio Laranjeiro * Pointer to TX queue structure. 916230189d9SNélio Laranjeiro * @param mpw 917230189d9SNélio Laranjeiro * Pointer to MPW session structure. 918230189d9SNélio Laranjeiro * @param length 919230189d9SNélio Laranjeiro * Packet length. 920230189d9SNélio Laranjeiro */ 921230189d9SNélio Laranjeiro static inline void 922991b04f6SNélio Laranjeiro mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, 923991b04f6SNélio Laranjeiro uint32_t length) 924230189d9SNélio Laranjeiro { 925a821d09dSNélio Laranjeiro uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1); 9268688b2f8SNélio Laranjeiro struct mlx5_wqe_inl_small *inl; 927230189d9SNélio Laranjeiro 928230189d9SNélio Laranjeiro mpw->state = MLX5_MPW_INL_STATE_OPENED; 929230189d9SNélio Laranjeiro mpw->pkts_n = 0; 930230189d9SNélio Laranjeiro mpw->len = length; 931230189d9SNélio Laranjeiro mpw->total_len = 0; 932fdcb0f53SNélio Laranjeiro mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); 9336b30a6a8SShachar Beiser mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | 934230189d9SNélio Laranjeiro (txq->wqe_ci << 8) | 935c904ae25SNélio Laranjeiro MLX5_OPCODE_TSO); 9368688b2f8SNélio Laranjeiro mpw->wqe->ctrl[2] = 0; 9378688b2f8SNélio Laranjeiro mpw->wqe->ctrl[3] = 0; 9386b30a6a8SShachar Beiser mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); 9398688b2f8SNélio Laranjeiro mpw->wqe->eseg.inline_hdr_sz = 0; 9408688b2f8SNélio Laranjeiro mpw->wqe->eseg.cs_flags = 0; 9418688b2f8SNélio Laranjeiro mpw->wqe->eseg.rsvd0 = 0; 9428688b2f8SNélio Laranjeiro mpw->wqe->eseg.rsvd1 = 0; 9438688b2f8SNélio Laranjeiro mpw->wqe->eseg.rsvd2 = 0; 9448688b2f8SNélio Laranjeiro inl = (struct mlx5_wqe_inl_small *) 9458688b2f8SNélio Laranjeiro (((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE); 9468688b2f8SNélio Laranjeiro mpw->data.raw = (uint8_t *)&inl->raw; 947230189d9SNélio Laranjeiro } 948230189d9SNélio Laranjeiro 949230189d9SNélio Laranjeiro /** 950230189d9SNélio Laranjeiro * Close a MPW inline session. 951230189d9SNélio Laranjeiro * 952230189d9SNélio Laranjeiro * @param txq 953230189d9SNélio Laranjeiro * Pointer to TX queue structure. 954230189d9SNélio Laranjeiro * @param mpw 955230189d9SNélio Laranjeiro * Pointer to MPW session structure. 956230189d9SNélio Laranjeiro */ 957230189d9SNélio Laranjeiro static inline void 958991b04f6SNélio Laranjeiro mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw) 959230189d9SNélio Laranjeiro { 960230189d9SNélio Laranjeiro unsigned int size; 9618688b2f8SNélio Laranjeiro struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *) 9628688b2f8SNélio Laranjeiro (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE)); 963230189d9SNélio Laranjeiro 9648688b2f8SNélio Laranjeiro size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len; 965230189d9SNélio Laranjeiro /* 966230189d9SNélio Laranjeiro * Store size in multiple of 16 bytes. Control and Ethernet segments 967230189d9SNélio Laranjeiro * count as 2. 968230189d9SNélio Laranjeiro */ 9696b30a6a8SShachar Beiser mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | 9706b30a6a8SShachar Beiser MLX5_WQE_DS(size)); 971230189d9SNélio Laranjeiro mpw->state = MLX5_MPW_STATE_CLOSED; 9726b30a6a8SShachar Beiser inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG); 9738688b2f8SNélio Laranjeiro txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; 974230189d9SNélio Laranjeiro } 975230189d9SNélio Laranjeiro 976230189d9SNélio Laranjeiro /** 977230189d9SNélio Laranjeiro * DPDK callback for TX with MPW inline support. 978230189d9SNélio Laranjeiro * 979230189d9SNélio Laranjeiro * @param dpdk_txq 980230189d9SNélio Laranjeiro * Generic pointer to TX queue structure. 981230189d9SNélio Laranjeiro * @param[in] pkts 982230189d9SNélio Laranjeiro * Packets to transmit. 983230189d9SNélio Laranjeiro * @param pkts_n 984230189d9SNélio Laranjeiro * Number of packets in array. 985230189d9SNélio Laranjeiro * 986230189d9SNélio Laranjeiro * @return 987230189d9SNélio Laranjeiro * Number of packets successfully transmitted (<= pkts_n). 988230189d9SNélio Laranjeiro */ 989230189d9SNélio Laranjeiro uint16_t 990230189d9SNélio Laranjeiro mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, 991230189d9SNélio Laranjeiro uint16_t pkts_n) 992230189d9SNélio Laranjeiro { 993991b04f6SNélio Laranjeiro struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; 994230189d9SNélio Laranjeiro uint16_t elts_head = txq->elts_head; 9958c819a69SYongseok Koh const uint16_t elts_n = 1 << txq->elts_n; 9968c819a69SYongseok Koh const uint16_t elts_m = elts_n - 1; 997c3d62cc9SAdrien Mazarguil unsigned int i = 0; 998a5bf6af9SAdrien Mazarguil unsigned int j = 0; 9998c819a69SYongseok Koh uint16_t max_elts; 1000f04f1d51SNélio Laranjeiro uint16_t max_wqe; 1001230189d9SNélio Laranjeiro unsigned int comp; 10020e8679fcSNélio Laranjeiro unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE; 1003230189d9SNélio Laranjeiro struct mlx5_mpw mpw = { 1004230189d9SNélio Laranjeiro .state = MLX5_MPW_STATE_CLOSED, 1005230189d9SNélio Laranjeiro }; 1006f04f1d51SNélio Laranjeiro /* 1007f04f1d51SNélio Laranjeiro * Compute the maximum number of WQE which can be consumed by inline 1008f04f1d51SNélio Laranjeiro * code. 1009f04f1d51SNélio Laranjeiro * - 2 DSEG for: 1010f04f1d51SNélio Laranjeiro * - 1 control segment, 1011f04f1d51SNélio Laranjeiro * - 1 Ethernet segment, 1012f04f1d51SNélio Laranjeiro * - N Dseg from the inline request. 1013f04f1d51SNélio Laranjeiro */ 1014f04f1d51SNélio Laranjeiro const unsigned int wqe_inl_n = 1015f04f1d51SNélio Laranjeiro ((2 * MLX5_WQE_DWORD_SIZE + 1016f04f1d51SNélio Laranjeiro txq->max_inline * RTE_CACHE_LINE_SIZE) + 1017f04f1d51SNélio Laranjeiro RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE; 1018230189d9SNélio Laranjeiro 1019c3d62cc9SAdrien Mazarguil if (unlikely(!pkts_n)) 1020c3d62cc9SAdrien Mazarguil return 0; 1021230189d9SNélio Laranjeiro /* Prefetch first packet cacheline. */ 1022fdcb0f53SNélio Laranjeiro rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); 1023fdcb0f53SNélio Laranjeiro rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); 1024230189d9SNélio Laranjeiro /* Start processing. */ 10256cb559d6SYongseok Koh mlx5_tx_complete(txq); 10268c819a69SYongseok Koh max_elts = (elts_n - (elts_head - txq->elts_tail)); 10272eefbec5SYongseok Koh /* A CQE slot must always be available. */ 10282eefbec5SYongseok Koh assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); 1029c3d62cc9SAdrien Mazarguil do { 1030a5bf6af9SAdrien Mazarguil struct rte_mbuf *buf = *(pkts++); 1031230189d9SNélio Laranjeiro uintptr_t addr; 1032230189d9SNélio Laranjeiro uint32_t length; 1033a5bf6af9SAdrien Mazarguil unsigned int segs_n = buf->nb_segs; 10344aa15eb1SNélio Laranjeiro uint8_t cs_flags; 1035230189d9SNélio Laranjeiro 1036c3d62cc9SAdrien Mazarguil /* 1037c3d62cc9SAdrien Mazarguil * Make sure there is enough room to store this packet and 1038c3d62cc9SAdrien Mazarguil * that one ring entry remains unused. 1039c3d62cc9SAdrien Mazarguil */ 1040a5bf6af9SAdrien Mazarguil assert(segs_n); 10418c819a69SYongseok Koh if (max_elts < segs_n) 1042c3d62cc9SAdrien Mazarguil break; 1043a5bf6af9SAdrien Mazarguil /* Do not bother with large packets MPW cannot handle. */ 104424c14430SShahaf Shuler if (segs_n > MLX5_MPW_DSEG_MAX) { 104524c14430SShahaf Shuler txq->stats.oerrors++; 1046a5bf6af9SAdrien Mazarguil break; 104724c14430SShahaf Shuler } 10488c819a69SYongseok Koh max_elts -= segs_n; 1049c3d62cc9SAdrien Mazarguil --pkts_n; 1050f04f1d51SNélio Laranjeiro /* 1051f04f1d51SNélio Laranjeiro * Compute max_wqe in case less WQE were consumed in previous 1052f04f1d51SNélio Laranjeiro * iteration. 1053f04f1d51SNélio Laranjeiro */ 1054f04f1d51SNélio Laranjeiro max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); 10554aa15eb1SNélio Laranjeiro cs_flags = txq_ol_cksum_to_cs(txq, buf); 1056a5bf6af9SAdrien Mazarguil /* Retrieve packet information. */ 1057a5bf6af9SAdrien Mazarguil length = PKT_LEN(buf); 1058230189d9SNélio Laranjeiro /* Start new session if packet differs. */ 1059230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_OPENED) { 1060230189d9SNélio Laranjeiro if ((mpw.len != length) || 1061a5bf6af9SAdrien Mazarguil (segs_n != 1) || 10628688b2f8SNélio Laranjeiro (mpw.wqe->eseg.cs_flags != cs_flags)) 1063230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 1064230189d9SNélio Laranjeiro } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) { 1065230189d9SNélio Laranjeiro if ((mpw.len != length) || 1066a5bf6af9SAdrien Mazarguil (segs_n != 1) || 1067230189d9SNélio Laranjeiro (length > inline_room) || 10688688b2f8SNélio Laranjeiro (mpw.wqe->eseg.cs_flags != cs_flags)) { 1069230189d9SNélio Laranjeiro mlx5_mpw_inline_close(txq, &mpw); 10700e8679fcSNélio Laranjeiro inline_room = 10710e8679fcSNélio Laranjeiro txq->max_inline * RTE_CACHE_LINE_SIZE; 1072230189d9SNélio Laranjeiro } 1073230189d9SNélio Laranjeiro } 1074230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_CLOSED) { 1075a5bf6af9SAdrien Mazarguil if ((segs_n != 1) || 1076a5bf6af9SAdrien Mazarguil (length > inline_room)) { 1077f04f1d51SNélio Laranjeiro /* 1078f04f1d51SNélio Laranjeiro * Multi-Packet WQE consumes at most two WQE. 1079f04f1d51SNélio Laranjeiro * mlx5_mpw_new() expects to be able to use 1080f04f1d51SNélio Laranjeiro * such resources. 1081f04f1d51SNélio Laranjeiro */ 1082f04f1d51SNélio Laranjeiro if (unlikely(max_wqe < 2)) 1083f04f1d51SNélio Laranjeiro break; 1084f04f1d51SNélio Laranjeiro max_wqe -= 2; 1085230189d9SNélio Laranjeiro mlx5_mpw_new(txq, &mpw, length); 10868688b2f8SNélio Laranjeiro mpw.wqe->eseg.cs_flags = cs_flags; 1087230189d9SNélio Laranjeiro } else { 1088f04f1d51SNélio Laranjeiro if (unlikely(max_wqe < wqe_inl_n)) 1089f04f1d51SNélio Laranjeiro break; 1090f04f1d51SNélio Laranjeiro max_wqe -= wqe_inl_n; 1091230189d9SNélio Laranjeiro mlx5_mpw_inline_new(txq, &mpw, length); 10928688b2f8SNélio Laranjeiro mpw.wqe->eseg.cs_flags = cs_flags; 1093230189d9SNélio Laranjeiro } 1094230189d9SNélio Laranjeiro } 1095a5bf6af9SAdrien Mazarguil /* Multi-segment packets must be alone in their MPW. */ 1096a5bf6af9SAdrien Mazarguil assert((segs_n == 1) || (mpw.pkts_n == 0)); 1097230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_STATE_OPENED) { 10980e8679fcSNélio Laranjeiro assert(inline_room == 10990e8679fcSNélio Laranjeiro txq->max_inline * RTE_CACHE_LINE_SIZE); 1100a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) 1101a5bf6af9SAdrien Mazarguil length = 0; 1102a5bf6af9SAdrien Mazarguil #endif 1103a5bf6af9SAdrien Mazarguil do { 1104230189d9SNélio Laranjeiro volatile struct mlx5_wqe_data_seg *dseg; 1105230189d9SNélio Laranjeiro 1106a5bf6af9SAdrien Mazarguil assert(buf); 11078c819a69SYongseok Koh (*txq->elts)[elts_head++ & elts_m] = buf; 1108230189d9SNélio Laranjeiro dseg = mpw.data.dseg[mpw.pkts_n]; 1109a5bf6af9SAdrien Mazarguil addr = rte_pktmbuf_mtod(buf, uintptr_t); 1110230189d9SNélio Laranjeiro *dseg = (struct mlx5_wqe_data_seg){ 11116b30a6a8SShachar Beiser .byte_count = 11126b30a6a8SShachar Beiser rte_cpu_to_be_32(DATA_LEN(buf)), 11136cb559d6SYongseok Koh .lkey = mlx5_tx_mb2mr(txq, buf), 11146b30a6a8SShachar Beiser .addr = rte_cpu_to_be_64(addr), 1115230189d9SNélio Laranjeiro }; 1116a5bf6af9SAdrien Mazarguil #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) 1117a5bf6af9SAdrien Mazarguil length += DATA_LEN(buf); 1118a5bf6af9SAdrien Mazarguil #endif 1119a5bf6af9SAdrien Mazarguil buf = buf->next; 1120230189d9SNélio Laranjeiro ++mpw.pkts_n; 1121a5bf6af9SAdrien Mazarguil ++j; 1122a5bf6af9SAdrien Mazarguil } while (--segs_n); 1123a5bf6af9SAdrien Mazarguil assert(length == mpw.len); 1124230189d9SNélio Laranjeiro if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) 1125230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 1126230189d9SNélio Laranjeiro } else { 1127230189d9SNélio Laranjeiro unsigned int max; 1128230189d9SNélio Laranjeiro 1129230189d9SNélio Laranjeiro assert(mpw.state == MLX5_MPW_INL_STATE_OPENED); 1130230189d9SNélio Laranjeiro assert(length <= inline_room); 1131a5bf6af9SAdrien Mazarguil assert(length == DATA_LEN(buf)); 1132a5bf6af9SAdrien Mazarguil addr = rte_pktmbuf_mtod(buf, uintptr_t); 11338c819a69SYongseok Koh (*txq->elts)[elts_head++ & elts_m] = buf; 1134230189d9SNélio Laranjeiro /* Maximum number of bytes before wrapping. */ 1135fdcb0f53SNélio Laranjeiro max = ((((uintptr_t)(txq->wqes)) + 1136fdcb0f53SNélio Laranjeiro (1 << txq->wqe_n) * 1137fdcb0f53SNélio Laranjeiro MLX5_WQE_SIZE) - 1138230189d9SNélio Laranjeiro (uintptr_t)mpw.data.raw); 1139230189d9SNélio Laranjeiro if (length > max) { 1140230189d9SNélio Laranjeiro rte_memcpy((void *)(uintptr_t)mpw.data.raw, 1141230189d9SNélio Laranjeiro (void *)addr, 1142230189d9SNélio Laranjeiro max); 1143fdcb0f53SNélio Laranjeiro mpw.data.raw = (volatile void *)txq->wqes; 1144230189d9SNélio Laranjeiro rte_memcpy((void *)(uintptr_t)mpw.data.raw, 1145230189d9SNélio Laranjeiro (void *)(addr + max), 1146230189d9SNélio Laranjeiro length - max); 1147230189d9SNélio Laranjeiro mpw.data.raw += length - max; 1148230189d9SNélio Laranjeiro } else { 1149230189d9SNélio Laranjeiro rte_memcpy((void *)(uintptr_t)mpw.data.raw, 1150230189d9SNélio Laranjeiro (void *)addr, 1151230189d9SNélio Laranjeiro length); 115216c64768SYongseok Koh 115316c64768SYongseok Koh if (length == max) 115416c64768SYongseok Koh mpw.data.raw = 115516c64768SYongseok Koh (volatile void *)txq->wqes; 115616c64768SYongseok Koh else 1157230189d9SNélio Laranjeiro mpw.data.raw += length; 1158230189d9SNélio Laranjeiro } 1159230189d9SNélio Laranjeiro ++mpw.pkts_n; 116076bf1574SYongseok Koh mpw.total_len += length; 1161a5bf6af9SAdrien Mazarguil ++j; 1162230189d9SNélio Laranjeiro if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) { 1163230189d9SNélio Laranjeiro mlx5_mpw_inline_close(txq, &mpw); 11640e8679fcSNélio Laranjeiro inline_room = 11650e8679fcSNélio Laranjeiro txq->max_inline * RTE_CACHE_LINE_SIZE; 1166230189d9SNélio Laranjeiro } else { 1167230189d9SNélio Laranjeiro inline_room -= length; 1168230189d9SNélio Laranjeiro } 1169230189d9SNélio Laranjeiro } 1170230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 1171230189d9SNélio Laranjeiro /* Increment sent bytes counter. */ 1172230189d9SNélio Laranjeiro txq->stats.obytes += length; 1173230189d9SNélio Laranjeiro #endif 1174c3d62cc9SAdrien Mazarguil ++i; 1175c3d62cc9SAdrien Mazarguil } while (pkts_n); 1176230189d9SNélio Laranjeiro /* Take a shortcut if nothing must be sent. */ 1177230189d9SNélio Laranjeiro if (unlikely(i == 0)) 1178230189d9SNélio Laranjeiro return 0; 1179230189d9SNélio Laranjeiro /* Check whether completion threshold has been reached. */ 1180a5bf6af9SAdrien Mazarguil /* "j" includes both packets and segments. */ 1181a5bf6af9SAdrien Mazarguil comp = txq->elts_comp + j; 1182230189d9SNélio Laranjeiro if (comp >= MLX5_TX_COMP_THRESH) { 11838688b2f8SNélio Laranjeiro volatile struct mlx5_wqe *wqe = mpw.wqe; 1184230189d9SNélio Laranjeiro 1185230189d9SNélio Laranjeiro /* Request completion on last WQE. */ 11866b30a6a8SShachar Beiser wqe->ctrl[2] = rte_cpu_to_be_32(8); 1187230189d9SNélio Laranjeiro /* Save elts_head in unused "immediate" field of WQE. */ 11888688b2f8SNélio Laranjeiro wqe->ctrl[3] = elts_head; 1189230189d9SNélio Laranjeiro txq->elts_comp = 0; 11902eefbec5SYongseok Koh #ifndef NDEBUG 11912eefbec5SYongseok Koh ++txq->cq_pi; 11922eefbec5SYongseok Koh #endif 1193230189d9SNélio Laranjeiro } else { 1194230189d9SNélio Laranjeiro txq->elts_comp = comp; 1195230189d9SNélio Laranjeiro } 1196230189d9SNélio Laranjeiro #ifdef MLX5_PMD_SOFT_COUNTERS 1197230189d9SNélio Laranjeiro /* Increment sent packets counter. */ 1198230189d9SNélio Laranjeiro txq->stats.opackets += i; 1199230189d9SNélio Laranjeiro #endif 1200230189d9SNélio Laranjeiro /* Ring QP doorbell. */ 1201230189d9SNélio Laranjeiro if (mpw.state == MLX5_MPW_INL_STATE_OPENED) 1202230189d9SNélio Laranjeiro mlx5_mpw_inline_close(txq, &mpw); 1203230189d9SNélio Laranjeiro else if (mpw.state == MLX5_MPW_STATE_OPENED) 1204230189d9SNélio Laranjeiro mlx5_mpw_close(txq, &mpw); 120530807f62SNélio Laranjeiro mlx5_tx_dbrec(txq, mpw.wqe); 1206230189d9SNélio Laranjeiro txq->elts_head = elts_head; 1207230189d9SNélio Laranjeiro return i; 1208230189d9SNélio Laranjeiro } 1209230189d9SNélio Laranjeiro 1210230189d9SNélio Laranjeiro /** 12116ce84bd8SYongseok Koh * Open an Enhanced MPW session. 12126ce84bd8SYongseok Koh * 12136ce84bd8SYongseok Koh * @param txq 12146ce84bd8SYongseok Koh * Pointer to TX queue structure. 12156ce84bd8SYongseok Koh * @param mpw 12166ce84bd8SYongseok Koh * Pointer to MPW session structure. 12176ce84bd8SYongseok Koh * @param length 12186ce84bd8SYongseok Koh * Packet length. 12196ce84bd8SYongseok Koh */ 12206ce84bd8SYongseok Koh static inline void 1221991b04f6SNélio Laranjeiro mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding) 12226ce84bd8SYongseok Koh { 12236ce84bd8SYongseok Koh uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1); 12246ce84bd8SYongseok Koh 12256ce84bd8SYongseok Koh mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED; 12266ce84bd8SYongseok Koh mpw->pkts_n = 0; 12276ce84bd8SYongseok Koh mpw->total_len = sizeof(struct mlx5_wqe); 12286ce84bd8SYongseok Koh mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); 12296b30a6a8SShachar Beiser mpw->wqe->ctrl[0] = 12306b30a6a8SShachar Beiser rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | 12316ce84bd8SYongseok Koh (txq->wqe_ci << 8) | 12326ce84bd8SYongseok Koh MLX5_OPCODE_ENHANCED_MPSW); 12336ce84bd8SYongseok Koh mpw->wqe->ctrl[2] = 0; 12346ce84bd8SYongseok Koh mpw->wqe->ctrl[3] = 0; 12356ce84bd8SYongseok Koh memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); 12366ce84bd8SYongseok Koh if (unlikely(padding)) { 12376ce84bd8SYongseok Koh uintptr_t addr = (uintptr_t)(mpw->wqe + 1); 12386ce84bd8SYongseok Koh 12396ce84bd8SYongseok Koh /* Pad the first 2 DWORDs with zero-length inline header. */ 12406b30a6a8SShachar Beiser *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG); 12416ce84bd8SYongseok Koh *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = 12426b30a6a8SShachar Beiser rte_cpu_to_be_32(MLX5_INLINE_SEG); 12436ce84bd8SYongseok Koh mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; 12446ce84bd8SYongseok Koh /* Start from the next WQEBB. */ 12456ce84bd8SYongseok Koh mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); 12466ce84bd8SYongseok Koh } else { 12476ce84bd8SYongseok Koh mpw->data.raw = (volatile void *)(mpw->wqe + 1); 12486ce84bd8SYongseok Koh } 12496ce84bd8SYongseok Koh } 12506ce84bd8SYongseok Koh 12516ce84bd8SYongseok Koh /** 12526ce84bd8SYongseok Koh * Close an Enhanced MPW session. 12536ce84bd8SYongseok Koh * 12546ce84bd8SYongseok Koh * @param txq 12556ce84bd8SYongseok Koh * Pointer to TX queue structure. 12566ce84bd8SYongseok Koh * @param mpw 12576ce84bd8SYongseok Koh * Pointer to MPW session structure. 12586ce84bd8SYongseok Koh * 12596ce84bd8SYongseok Koh * @return 12606ce84bd8SYongseok Koh * Number of consumed WQEs. 12616ce84bd8SYongseok Koh */ 12626ce84bd8SYongseok Koh static inline uint16_t 1263991b04f6SNélio Laranjeiro mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw) 12646ce84bd8SYongseok Koh { 12656ce84bd8SYongseok Koh uint16_t ret; 12666ce84bd8SYongseok Koh 12676ce84bd8SYongseok Koh /* Store size in multiple of 16 bytes. Control and Ethernet segments 12686ce84bd8SYongseok Koh * count as 2. 12696ce84bd8SYongseok Koh */ 12706b30a6a8SShachar Beiser mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | 12716b30a6a8SShachar Beiser MLX5_WQE_DS(mpw->total_len)); 12726ce84bd8SYongseok Koh mpw->state = MLX5_MPW_STATE_CLOSED; 12736ce84bd8SYongseok Koh ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; 12746ce84bd8SYongseok Koh txq->wqe_ci += ret; 12756ce84bd8SYongseok Koh return ret; 12766ce84bd8SYongseok Koh } 12776ce84bd8SYongseok Koh 12786ce84bd8SYongseok Koh /** 12794b0d7b7fSYongseok Koh * TX with Enhanced MPW support. 12806ce84bd8SYongseok Koh * 12814b0d7b7fSYongseok Koh * @param txq 12824b0d7b7fSYongseok Koh * Pointer to TX queue structure. 12836ce84bd8SYongseok Koh * @param[in] pkts 12846ce84bd8SYongseok Koh * Packets to transmit. 12856ce84bd8SYongseok Koh * @param pkts_n 12866ce84bd8SYongseok Koh * Number of packets in array. 12876ce84bd8SYongseok Koh * 12886ce84bd8SYongseok Koh * @return 12896ce84bd8SYongseok Koh * Number of packets successfully transmitted (<= pkts_n). 12906ce84bd8SYongseok Koh */ 12914b0d7b7fSYongseok Koh static inline uint16_t 12924b0d7b7fSYongseok Koh txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, 12934b0d7b7fSYongseok Koh uint16_t pkts_n) 12946ce84bd8SYongseok Koh { 12956ce84bd8SYongseok Koh uint16_t elts_head = txq->elts_head; 12968c819a69SYongseok Koh const uint16_t elts_n = 1 << txq->elts_n; 12978c819a69SYongseok Koh const uint16_t elts_m = elts_n - 1; 12986ce84bd8SYongseok Koh unsigned int i = 0; 12996ce84bd8SYongseok Koh unsigned int j = 0; 13008c819a69SYongseok Koh uint16_t max_elts; 13016ce84bd8SYongseok Koh uint16_t max_wqe; 13026ce84bd8SYongseok Koh unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE; 13036ce84bd8SYongseok Koh unsigned int mpw_room = 0; 13046ce84bd8SYongseok Koh unsigned int inl_pad = 0; 13056ce84bd8SYongseok Koh uint32_t inl_hdr; 13066ce84bd8SYongseok Koh struct mlx5_mpw mpw = { 13076ce84bd8SYongseok Koh .state = MLX5_MPW_STATE_CLOSED, 13086ce84bd8SYongseok Koh }; 13096ce84bd8SYongseok Koh 13106ce84bd8SYongseok Koh if (unlikely(!pkts_n)) 13116ce84bd8SYongseok Koh return 0; 13126ce84bd8SYongseok Koh /* Start processing. */ 13136cb559d6SYongseok Koh mlx5_tx_complete(txq); 13146ce84bd8SYongseok Koh max_elts = (elts_n - (elts_head - txq->elts_tail)); 13156ce84bd8SYongseok Koh /* A CQE slot must always be available. */ 13166ce84bd8SYongseok Koh assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); 13176ce84bd8SYongseok Koh max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); 13186ce84bd8SYongseok Koh if (unlikely(!max_wqe)) 13196ce84bd8SYongseok Koh return 0; 13206ce84bd8SYongseok Koh do { 13216ce84bd8SYongseok Koh struct rte_mbuf *buf = *(pkts++); 13226ce84bd8SYongseok Koh uintptr_t addr; 13236ce84bd8SYongseok Koh unsigned int n; 13246ce84bd8SYongseok Koh unsigned int do_inline = 0; /* Whether inline is possible. */ 13256ce84bd8SYongseok Koh uint32_t length; 13264aa15eb1SNélio Laranjeiro uint8_t cs_flags; 13276ce84bd8SYongseok Koh 132848642ec5SYongseok Koh /* Multi-segmented packet is handled in slow-path outside. */ 132948642ec5SYongseok Koh assert(NB_SEGS(buf) == 1); 133048642ec5SYongseok Koh /* Make sure there is enough room to store this packet. */ 133148642ec5SYongseok Koh if (max_elts - j == 0) 13326ce84bd8SYongseok Koh break; 13334aa15eb1SNélio Laranjeiro cs_flags = txq_ol_cksum_to_cs(txq, buf); 13346ce84bd8SYongseok Koh /* Retrieve packet information. */ 13356ce84bd8SYongseok Koh length = PKT_LEN(buf); 13366ce84bd8SYongseok Koh /* Start new session if: 13376ce84bd8SYongseok Koh * - multi-segment packet 13386ce84bd8SYongseok Koh * - no space left even for a dseg 13396ce84bd8SYongseok Koh * - next packet can be inlined with a new WQE 13406ce84bd8SYongseok Koh * - cs_flag differs 13416ce84bd8SYongseok Koh */ 13426ce84bd8SYongseok Koh if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) { 134348642ec5SYongseok Koh if ((inl_pad + sizeof(struct mlx5_wqe_data_seg) > 13446ce84bd8SYongseok Koh mpw_room) || 13456ce84bd8SYongseok Koh (length <= txq->inline_max_packet_sz && 13466ce84bd8SYongseok Koh inl_pad + sizeof(inl_hdr) + length > 13476ce84bd8SYongseok Koh mpw_room) || 13486ce84bd8SYongseok Koh (mpw.wqe->eseg.cs_flags != cs_flags)) 13496ce84bd8SYongseok Koh max_wqe -= mlx5_empw_close(txq, &mpw); 13506ce84bd8SYongseok Koh } 13516ce84bd8SYongseok Koh if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) { 135248642ec5SYongseok Koh /* In Enhanced MPW, inline as much as the budget is 135348642ec5SYongseok Koh * allowed. The remaining space is to be filled with 135448642ec5SYongseok Koh * dsegs. If the title WQEBB isn't padded, it will have 135548642ec5SYongseok Koh * 2 dsegs there. 13566ce84bd8SYongseok Koh */ 13576ce84bd8SYongseok Koh mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX, 13586ce84bd8SYongseok Koh (max_inline ? max_inline : 13596ce84bd8SYongseok Koh pkts_n * MLX5_WQE_DWORD_SIZE) + 13606ce84bd8SYongseok Koh MLX5_WQE_SIZE); 136148642ec5SYongseok Koh if (unlikely(max_wqe * MLX5_WQE_SIZE < mpw_room)) 13626ce84bd8SYongseok Koh break; 13636ce84bd8SYongseok Koh /* Don't pad the title WQEBB to not waste WQ. */ 13646ce84bd8SYongseok Koh mlx5_empw_new(txq, &mpw, 0); 13656ce84bd8SYongseok Koh mpw_room -= mpw.total_len; 13666ce84bd8SYongseok Koh inl_pad = 0; 136748642ec5SYongseok Koh do_inline = length <= txq->inline_max_packet_sz && 13686ce84bd8SYongseok Koh sizeof(inl_hdr) + length <= mpw_room && 13696ce84bd8SYongseok Koh !txq->mpw_hdr_dseg; 13706ce84bd8SYongseok Koh mpw.wqe->eseg.cs_flags = cs_flags; 13716ce84bd8SYongseok Koh } else { 13726ce84bd8SYongseok Koh /* Evaluate whether the next packet can be inlined. 13736ce84bd8SYongseok Koh * Inlininig is possible when: 13746ce84bd8SYongseok Koh * - length is less than configured value 13756ce84bd8SYongseok Koh * - length fits for remaining space 13766ce84bd8SYongseok Koh * - not required to fill the title WQEBB with dsegs 13776ce84bd8SYongseok Koh */ 13786ce84bd8SYongseok Koh do_inline = 13796ce84bd8SYongseok Koh length <= txq->inline_max_packet_sz && 13806ce84bd8SYongseok Koh inl_pad + sizeof(inl_hdr) + length <= 13816ce84bd8SYongseok Koh mpw_room && 13826ce84bd8SYongseok Koh (!txq->mpw_hdr_dseg || 13836ce84bd8SYongseok Koh mpw.total_len >= MLX5_WQE_SIZE); 13846ce84bd8SYongseok Koh } 138524a8f524SYongseok Koh if (max_inline && do_inline) { 13866ce84bd8SYongseok Koh /* Inline packet into WQE. */ 13876ce84bd8SYongseok Koh unsigned int max; 13886ce84bd8SYongseok Koh 13896ce84bd8SYongseok Koh assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); 13906ce84bd8SYongseok Koh assert(length == DATA_LEN(buf)); 13916b30a6a8SShachar Beiser inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG); 13926ce84bd8SYongseok Koh addr = rte_pktmbuf_mtod(buf, uintptr_t); 13936ce84bd8SYongseok Koh mpw.data.raw = (volatile void *) 13946ce84bd8SYongseok Koh ((uintptr_t)mpw.data.raw + inl_pad); 13956ce84bd8SYongseok Koh max = tx_mlx5_wq_tailroom(txq, 13966ce84bd8SYongseok Koh (void *)(uintptr_t)mpw.data.raw); 13976ce84bd8SYongseok Koh /* Copy inline header. */ 13986ce84bd8SYongseok Koh mpw.data.raw = (volatile void *) 13996ce84bd8SYongseok Koh mlx5_copy_to_wq( 14006ce84bd8SYongseok Koh (void *)(uintptr_t)mpw.data.raw, 14016ce84bd8SYongseok Koh &inl_hdr, 14026ce84bd8SYongseok Koh sizeof(inl_hdr), 14036ce84bd8SYongseok Koh (void *)(uintptr_t)txq->wqes, 14046ce84bd8SYongseok Koh max); 14056ce84bd8SYongseok Koh max = tx_mlx5_wq_tailroom(txq, 14066ce84bd8SYongseok Koh (void *)(uintptr_t)mpw.data.raw); 14076ce84bd8SYongseok Koh /* Copy packet data. */ 14086ce84bd8SYongseok Koh mpw.data.raw = (volatile void *) 14096ce84bd8SYongseok Koh mlx5_copy_to_wq( 14106ce84bd8SYongseok Koh (void *)(uintptr_t)mpw.data.raw, 14116ce84bd8SYongseok Koh (void *)addr, 14126ce84bd8SYongseok Koh length, 14136ce84bd8SYongseok Koh (void *)(uintptr_t)txq->wqes, 14146ce84bd8SYongseok Koh max); 14156ce84bd8SYongseok Koh ++mpw.pkts_n; 14166ce84bd8SYongseok Koh mpw.total_len += (inl_pad + sizeof(inl_hdr) + length); 14176ce84bd8SYongseok Koh /* No need to get completion as the entire packet is 14186ce84bd8SYongseok Koh * copied to WQ. Free the buf right away. 14196ce84bd8SYongseok Koh */ 14206ce84bd8SYongseok Koh rte_pktmbuf_free_seg(buf); 14216ce84bd8SYongseok Koh mpw_room -= (inl_pad + sizeof(inl_hdr) + length); 14226ce84bd8SYongseok Koh /* Add pad in the next packet if any. */ 14236ce84bd8SYongseok Koh inl_pad = (((uintptr_t)mpw.data.raw + 14246ce84bd8SYongseok Koh (MLX5_WQE_DWORD_SIZE - 1)) & 14256ce84bd8SYongseok Koh ~(MLX5_WQE_DWORD_SIZE - 1)) - 14266ce84bd8SYongseok Koh (uintptr_t)mpw.data.raw; 14276ce84bd8SYongseok Koh } else { 14286ce84bd8SYongseok Koh /* No inline. Load a dseg of packet pointer. */ 14296ce84bd8SYongseok Koh volatile rte_v128u32_t *dseg; 14306ce84bd8SYongseok Koh 14316ce84bd8SYongseok Koh assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); 14326ce84bd8SYongseok Koh assert((inl_pad + sizeof(*dseg)) <= mpw_room); 14336ce84bd8SYongseok Koh assert(length == DATA_LEN(buf)); 14346ce84bd8SYongseok Koh if (!tx_mlx5_wq_tailroom(txq, 14356ce84bd8SYongseok Koh (void *)((uintptr_t)mpw.data.raw 14366ce84bd8SYongseok Koh + inl_pad))) 14376ce84bd8SYongseok Koh dseg = (volatile void *)txq->wqes; 14386ce84bd8SYongseok Koh else 14396ce84bd8SYongseok Koh dseg = (volatile void *) 14406ce84bd8SYongseok Koh ((uintptr_t)mpw.data.raw + 14416ce84bd8SYongseok Koh inl_pad); 14428c819a69SYongseok Koh (*txq->elts)[elts_head++ & elts_m] = buf; 14436ce84bd8SYongseok Koh addr = rte_pktmbuf_mtod(buf, uintptr_t); 14446ce84bd8SYongseok Koh for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) 14456ce84bd8SYongseok Koh rte_prefetch2((void *)(addr + 14466ce84bd8SYongseok Koh n * RTE_CACHE_LINE_SIZE)); 1447ebbb81ebSNélio Laranjeiro addr = rte_cpu_to_be_64(addr); 14486ce84bd8SYongseok Koh *dseg = (rte_v128u32_t) { 14496b30a6a8SShachar Beiser rte_cpu_to_be_32(length), 14506cb559d6SYongseok Koh mlx5_tx_mb2mr(txq, buf), 1451ebbb81ebSNélio Laranjeiro addr, 1452ebbb81ebSNélio Laranjeiro addr >> 32, 14536ce84bd8SYongseok Koh }; 14546ce84bd8SYongseok Koh mpw.data.raw = (volatile void *)(dseg + 1); 14556ce84bd8SYongseok Koh mpw.total_len += (inl_pad + sizeof(*dseg)); 14566ce84bd8SYongseok Koh ++j; 14576ce84bd8SYongseok Koh ++mpw.pkts_n; 14586ce84bd8SYongseok Koh mpw_room -= (inl_pad + sizeof(*dseg)); 14596ce84bd8SYongseok Koh inl_pad = 0; 14606ce84bd8SYongseok Koh } 14616ce84bd8SYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 14626ce84bd8SYongseok Koh /* Increment sent bytes counter. */ 14636ce84bd8SYongseok Koh txq->stats.obytes += length; 14646ce84bd8SYongseok Koh #endif 14656ce84bd8SYongseok Koh ++i; 14666ce84bd8SYongseok Koh } while (i < pkts_n); 14676ce84bd8SYongseok Koh /* Take a shortcut if nothing must be sent. */ 14686ce84bd8SYongseok Koh if (unlikely(i == 0)) 14696ce84bd8SYongseok Koh return 0; 14706ce84bd8SYongseok Koh /* Check whether completion threshold has been reached. */ 14716ce84bd8SYongseok Koh if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH || 14726ce84bd8SYongseok Koh (uint16_t)(txq->wqe_ci - txq->mpw_comp) >= 14736ce84bd8SYongseok Koh (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) { 14746ce84bd8SYongseok Koh volatile struct mlx5_wqe *wqe = mpw.wqe; 14756ce84bd8SYongseok Koh 14766ce84bd8SYongseok Koh /* Request completion on last WQE. */ 14776b30a6a8SShachar Beiser wqe->ctrl[2] = rte_cpu_to_be_32(8); 14786ce84bd8SYongseok Koh /* Save elts_head in unused "immediate" field of WQE. */ 14796ce84bd8SYongseok Koh wqe->ctrl[3] = elts_head; 14806ce84bd8SYongseok Koh txq->elts_comp = 0; 14816ce84bd8SYongseok Koh txq->mpw_comp = txq->wqe_ci; 14822eefbec5SYongseok Koh #ifndef NDEBUG 14832eefbec5SYongseok Koh ++txq->cq_pi; 14842eefbec5SYongseok Koh #endif 14856ce84bd8SYongseok Koh } else { 14866ce84bd8SYongseok Koh txq->elts_comp += j; 14876ce84bd8SYongseok Koh } 14886ce84bd8SYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 14896ce84bd8SYongseok Koh /* Increment sent packets counter. */ 14906ce84bd8SYongseok Koh txq->stats.opackets += i; 14916ce84bd8SYongseok Koh #endif 14926ce84bd8SYongseok Koh if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) 14936ce84bd8SYongseok Koh mlx5_empw_close(txq, &mpw); 14946ce84bd8SYongseok Koh /* Ring QP doorbell. */ 14956ce84bd8SYongseok Koh mlx5_tx_dbrec(txq, mpw.wqe); 14966ce84bd8SYongseok Koh txq->elts_head = elts_head; 14976ce84bd8SYongseok Koh return i; 14986ce84bd8SYongseok Koh } 14996ce84bd8SYongseok Koh 15006ce84bd8SYongseok Koh /** 15014b0d7b7fSYongseok Koh * DPDK callback for TX with Enhanced MPW support. 15024b0d7b7fSYongseok Koh * 15034b0d7b7fSYongseok Koh * @param dpdk_txq 15044b0d7b7fSYongseok Koh * Generic pointer to TX queue structure. 15054b0d7b7fSYongseok Koh * @param[in] pkts 15064b0d7b7fSYongseok Koh * Packets to transmit. 15074b0d7b7fSYongseok Koh * @param pkts_n 15084b0d7b7fSYongseok Koh * Number of packets in array. 15094b0d7b7fSYongseok Koh * 15104b0d7b7fSYongseok Koh * @return 15114b0d7b7fSYongseok Koh * Number of packets successfully transmitted (<= pkts_n). 15124b0d7b7fSYongseok Koh */ 15134b0d7b7fSYongseok Koh uint16_t 15144b0d7b7fSYongseok Koh mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 15154b0d7b7fSYongseok Koh { 15164b0d7b7fSYongseok Koh struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; 15174b0d7b7fSYongseok Koh uint16_t nb_tx = 0; 15184b0d7b7fSYongseok Koh 15194b0d7b7fSYongseok Koh while (pkts_n > nb_tx) { 15204b0d7b7fSYongseok Koh uint16_t n; 15214b0d7b7fSYongseok Koh uint16_t ret; 15224b0d7b7fSYongseok Koh 15234b0d7b7fSYongseok Koh n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx); 15244b0d7b7fSYongseok Koh if (n) { 15254b0d7b7fSYongseok Koh ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n); 15264b0d7b7fSYongseok Koh if (!ret) 15274b0d7b7fSYongseok Koh break; 15284b0d7b7fSYongseok Koh nb_tx += ret; 15294b0d7b7fSYongseok Koh } 15304b0d7b7fSYongseok Koh n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx); 15314b0d7b7fSYongseok Koh if (n) { 15324b0d7b7fSYongseok Koh ret = txq_burst_empw(txq, &pkts[nb_tx], n); 15334b0d7b7fSYongseok Koh if (!ret) 15344b0d7b7fSYongseok Koh break; 15354b0d7b7fSYongseok Koh nb_tx += ret; 15364b0d7b7fSYongseok Koh } 15374b0d7b7fSYongseok Koh } 15384b0d7b7fSYongseok Koh return nb_tx; 15394b0d7b7fSYongseok Koh } 15404b0d7b7fSYongseok Koh 15414b0d7b7fSYongseok Koh /** 154267fa62bcSAdrien Mazarguil * Translate RX completion flags to packet type. 154367fa62bcSAdrien Mazarguil * 15446218063bSNélio Laranjeiro * @param[in] cqe 15456218063bSNélio Laranjeiro * Pointer to CQE. 154667fa62bcSAdrien Mazarguil * 154778a38edfSJianfeng Tan * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 154878a38edfSJianfeng Tan * 154967fa62bcSAdrien Mazarguil * @return 155067fa62bcSAdrien Mazarguil * Packet type for struct rte_mbuf. 155167fa62bcSAdrien Mazarguil */ 155267fa62bcSAdrien Mazarguil static inline uint32_t 155397267b8eSNelio Laranjeiro rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) 155467fa62bcSAdrien Mazarguil { 1555ea16068cSYongseok Koh uint8_t idx; 1556ea16068cSYongseok Koh uint8_t pinfo = cqe->pkt_info; 1557ea16068cSYongseok Koh uint16_t ptype = cqe->hdr_type_etc; 155867fa62bcSAdrien Mazarguil 1559ea16068cSYongseok Koh /* 1560ea16068cSYongseok Koh * The index to the array should have: 1561ea16068cSYongseok Koh * bit[1:0] = l3_hdr_type 1562ea16068cSYongseok Koh * bit[4:2] = l4_hdr_type 1563ea16068cSYongseok Koh * bit[5] = ip_frag 1564ea16068cSYongseok Koh * bit[6] = tunneled 1565ea16068cSYongseok Koh * bit[7] = outer_l3_type 1566ea16068cSYongseok Koh */ 1567ea16068cSYongseok Koh idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10); 1568ea16068cSYongseok Koh return mlx5_ptype_table[idx]; 156967fa62bcSAdrien Mazarguil } 157067fa62bcSAdrien Mazarguil 157167fa62bcSAdrien Mazarguil /** 157299c12dccSNélio Laranjeiro * Get size of the next packet for a given CQE. For compressed CQEs, the 157399c12dccSNélio Laranjeiro * consumer index is updated only once all packets of the current one have 157499c12dccSNélio Laranjeiro * been processed. 157599c12dccSNélio Laranjeiro * 157699c12dccSNélio Laranjeiro * @param rxq 157799c12dccSNélio Laranjeiro * Pointer to RX queue. 157899c12dccSNélio Laranjeiro * @param cqe 157999c12dccSNélio Laranjeiro * CQE to process. 1580ecf60761SNélio Laranjeiro * @param[out] rss_hash 1581ecf60761SNélio Laranjeiro * Packet RSS Hash result. 158299c12dccSNélio Laranjeiro * 158399c12dccSNélio Laranjeiro * @return 158499c12dccSNélio Laranjeiro * Packet size in bytes (0 if there is none), -1 in case of completion 158599c12dccSNélio Laranjeiro * with error. 158699c12dccSNélio Laranjeiro */ 158799c12dccSNélio Laranjeiro static inline int 158878142aacSNélio Laranjeiro mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 1589ecf60761SNélio Laranjeiro uint16_t cqe_cnt, uint32_t *rss_hash) 159099c12dccSNélio Laranjeiro { 159199c12dccSNélio Laranjeiro struct rxq_zip *zip = &rxq->zip; 159299c12dccSNélio Laranjeiro uint16_t cqe_n = cqe_cnt + 1; 159399c12dccSNélio Laranjeiro int len = 0; 1594d2e842d0SYongseok Koh uint16_t idx, end; 159599c12dccSNélio Laranjeiro 159699c12dccSNélio Laranjeiro /* Process compressed data in the CQE and mini arrays. */ 159799c12dccSNélio Laranjeiro if (zip->ai) { 159899c12dccSNélio Laranjeiro volatile struct mlx5_mini_cqe8 (*mc)[8] = 159999c12dccSNélio Laranjeiro (volatile struct mlx5_mini_cqe8 (*)[8]) 16004aff4bcbSYongseok Koh (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); 160199c12dccSNélio Laranjeiro 16026b30a6a8SShachar Beiser len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); 16036b30a6a8SShachar Beiser *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); 160499c12dccSNélio Laranjeiro if ((++zip->ai & 7) == 0) { 1605d2e842d0SYongseok Koh /* Invalidate consumed CQEs */ 1606d2e842d0SYongseok Koh idx = zip->ca; 1607d2e842d0SYongseok Koh end = zip->na; 1608d2e842d0SYongseok Koh while (idx != end) { 1609d2e842d0SYongseok Koh (*rxq->cqes)[idx & cqe_cnt].op_own = 1610d2e842d0SYongseok Koh MLX5_CQE_INVALIDATE; 1611d2e842d0SYongseok Koh ++idx; 1612d2e842d0SYongseok Koh } 161399c12dccSNélio Laranjeiro /* 161499c12dccSNélio Laranjeiro * Increment consumer index to skip the number of 161599c12dccSNélio Laranjeiro * CQEs consumed. Hardware leaves holes in the CQ 161699c12dccSNélio Laranjeiro * ring for software use. 161799c12dccSNélio Laranjeiro */ 161899c12dccSNélio Laranjeiro zip->ca = zip->na; 161999c12dccSNélio Laranjeiro zip->na += 8; 162099c12dccSNélio Laranjeiro } 162199c12dccSNélio Laranjeiro if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { 1622d2e842d0SYongseok Koh /* Invalidate the rest */ 1623d2e842d0SYongseok Koh idx = zip->ca; 1624d2e842d0SYongseok Koh end = zip->cq_ci; 162599c12dccSNélio Laranjeiro 162699c12dccSNélio Laranjeiro while (idx != end) { 162797267b8eSNelio Laranjeiro (*rxq->cqes)[idx & cqe_cnt].op_own = 162899c12dccSNélio Laranjeiro MLX5_CQE_INVALIDATE; 162999c12dccSNélio Laranjeiro ++idx; 163099c12dccSNélio Laranjeiro } 163199c12dccSNélio Laranjeiro rxq->cq_ci = zip->cq_ci; 163299c12dccSNélio Laranjeiro zip->ai = 0; 163399c12dccSNélio Laranjeiro } 163499c12dccSNélio Laranjeiro /* No compressed data, get next CQE and verify if it is compressed. */ 163599c12dccSNélio Laranjeiro } else { 163699c12dccSNélio Laranjeiro int ret; 163799c12dccSNélio Laranjeiro int8_t op_own; 163899c12dccSNélio Laranjeiro 163997267b8eSNelio Laranjeiro ret = check_cqe(cqe, cqe_n, rxq->cq_ci); 164099c12dccSNélio Laranjeiro if (unlikely(ret == 1)) 164199c12dccSNélio Laranjeiro return 0; 164299c12dccSNélio Laranjeiro ++rxq->cq_ci; 164399c12dccSNélio Laranjeiro op_own = cqe->op_own; 16441742c2d9SYongseok Koh rte_cio_rmb(); 164599c12dccSNélio Laranjeiro if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { 164699c12dccSNélio Laranjeiro volatile struct mlx5_mini_cqe8 (*mc)[8] = 164799c12dccSNélio Laranjeiro (volatile struct mlx5_mini_cqe8 (*)[8]) 164899c12dccSNélio Laranjeiro (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci & 16494aff4bcbSYongseok Koh cqe_cnt].pkt_info); 165099c12dccSNélio Laranjeiro 165199c12dccSNélio Laranjeiro /* Fix endianness. */ 16526b30a6a8SShachar Beiser zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); 165399c12dccSNélio Laranjeiro /* 165499c12dccSNélio Laranjeiro * Current mini array position is the one returned by 165599c12dccSNélio Laranjeiro * check_cqe64(). 165699c12dccSNélio Laranjeiro * 165799c12dccSNélio Laranjeiro * If completion comprises several mini arrays, as a 165899c12dccSNélio Laranjeiro * special case the second one is located 7 CQEs after 165999c12dccSNélio Laranjeiro * the initial CQE instead of 8 for subsequent ones. 166099c12dccSNélio Laranjeiro */ 1661d2e842d0SYongseok Koh zip->ca = rxq->cq_ci; 166299c12dccSNélio Laranjeiro zip->na = zip->ca + 7; 166399c12dccSNélio Laranjeiro /* Compute the next non compressed CQE. */ 166499c12dccSNélio Laranjeiro --rxq->cq_ci; 166599c12dccSNélio Laranjeiro zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; 166699c12dccSNélio Laranjeiro /* Get packet size to return. */ 16676b30a6a8SShachar Beiser len = rte_be_to_cpu_32((*mc)[0].byte_cnt); 16686b30a6a8SShachar Beiser *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); 166999c12dccSNélio Laranjeiro zip->ai = 1; 1670d2e842d0SYongseok Koh /* Prefetch all the entries to be invalidated */ 1671d2e842d0SYongseok Koh idx = zip->ca; 1672d2e842d0SYongseok Koh end = zip->cq_ci; 1673d2e842d0SYongseok Koh while (idx != end) { 1674d2e842d0SYongseok Koh rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]); 1675d2e842d0SYongseok Koh ++idx; 1676d2e842d0SYongseok Koh } 167799c12dccSNélio Laranjeiro } else { 16786b30a6a8SShachar Beiser len = rte_be_to_cpu_32(cqe->byte_cnt); 16796b30a6a8SShachar Beiser *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); 168099c12dccSNélio Laranjeiro } 168199c12dccSNélio Laranjeiro /* Error while receiving packet. */ 168299c12dccSNélio Laranjeiro if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) 168399c12dccSNélio Laranjeiro return -1; 168499c12dccSNélio Laranjeiro } 168599c12dccSNélio Laranjeiro return len; 168699c12dccSNélio Laranjeiro } 168799c12dccSNélio Laranjeiro 168899c12dccSNélio Laranjeiro /** 168967fa62bcSAdrien Mazarguil * Translate RX completion flags to offload flags. 169067fa62bcSAdrien Mazarguil * 169167fa62bcSAdrien Mazarguil * @param[in] rxq 169267fa62bcSAdrien Mazarguil * Pointer to RX queue structure. 16936218063bSNélio Laranjeiro * @param[in] cqe 16946218063bSNélio Laranjeiro * Pointer to CQE. 169567fa62bcSAdrien Mazarguil * 169667fa62bcSAdrien Mazarguil * @return 169767fa62bcSAdrien Mazarguil * Offload flags (ol_flags) for struct rte_mbuf. 169867fa62bcSAdrien Mazarguil */ 169967fa62bcSAdrien Mazarguil static inline uint32_t 170078142aacSNélio Laranjeiro rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) 170167fa62bcSAdrien Mazarguil { 170267fa62bcSAdrien Mazarguil uint32_t ol_flags = 0; 17036b30a6a8SShachar Beiser uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); 170467fa62bcSAdrien Mazarguil 17050603df73SNélio Laranjeiro ol_flags = 17060603df73SNélio Laranjeiro TRANSPOSE(flags, 17070603df73SNélio Laranjeiro MLX5_CQE_RX_L3_HDR_VALID, 17080603df73SNélio Laranjeiro PKT_RX_IP_CKSUM_GOOD) | 17090603df73SNélio Laranjeiro TRANSPOSE(flags, 17100603df73SNélio Laranjeiro MLX5_CQE_RX_L4_HDR_VALID, 171183e9d9a3SNelio Laranjeiro PKT_RX_L4_CKSUM_GOOD); 171297267b8eSNelio Laranjeiro if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) 171367fa62bcSAdrien Mazarguil ol_flags |= 17140603df73SNélio Laranjeiro TRANSPOSE(flags, 17150603df73SNélio Laranjeiro MLX5_CQE_RX_L3_HDR_VALID, 171683e9d9a3SNelio Laranjeiro PKT_RX_IP_CKSUM_GOOD) | 17170603df73SNélio Laranjeiro TRANSPOSE(flags, 17180603df73SNélio Laranjeiro MLX5_CQE_RX_L4_HDR_VALID, 171983e9d9a3SNelio Laranjeiro PKT_RX_L4_CKSUM_GOOD); 172067fa62bcSAdrien Mazarguil return ol_flags; 172167fa62bcSAdrien Mazarguil } 172267fa62bcSAdrien Mazarguil 172367fa62bcSAdrien Mazarguil /** 17242e22920bSAdrien Mazarguil * DPDK callback for RX. 17252e22920bSAdrien Mazarguil * 17262e22920bSAdrien Mazarguil * @param dpdk_rxq 17272e22920bSAdrien Mazarguil * Generic pointer to RX queue structure. 17282e22920bSAdrien Mazarguil * @param[out] pkts 17292e22920bSAdrien Mazarguil * Array to store received packets. 17302e22920bSAdrien Mazarguil * @param pkts_n 17312e22920bSAdrien Mazarguil * Maximum number of packets in array. 17322e22920bSAdrien Mazarguil * 17332e22920bSAdrien Mazarguil * @return 17342e22920bSAdrien Mazarguil * Number of packets successfully received (<= pkts_n). 17352e22920bSAdrien Mazarguil */ 17362e22920bSAdrien Mazarguil uint16_t 17372e22920bSAdrien Mazarguil mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 17382e22920bSAdrien Mazarguil { 173978142aacSNélio Laranjeiro struct mlx5_rxq_data *rxq = dpdk_rxq; 1740b4b12e55SNélio Laranjeiro const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; 1741e2f116eeSNélio Laranjeiro const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; 17429964b965SNélio Laranjeiro const unsigned int sges_n = rxq->sges_n; 17439964b965SNélio Laranjeiro struct rte_mbuf *pkt = NULL; 17449964b965SNélio Laranjeiro struct rte_mbuf *seg = NULL; 174597267b8eSNelio Laranjeiro volatile struct mlx5_cqe *cqe = 174697267b8eSNelio Laranjeiro &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 17479964b965SNélio Laranjeiro unsigned int i = 0; 17489964b965SNélio Laranjeiro unsigned int rq_ci = rxq->rq_ci << sges_n; 17494e66a6feSNelio Laranjeiro int len = 0; /* keep its value across iterations. */ 17502e22920bSAdrien Mazarguil 17519964b965SNélio Laranjeiro while (pkts_n) { 17529964b965SNélio Laranjeiro unsigned int idx = rq_ci & wqe_cnt; 17539964b965SNélio Laranjeiro volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx]; 17549964b965SNélio Laranjeiro struct rte_mbuf *rep = (*rxq->elts)[idx]; 1755ecf60761SNélio Laranjeiro uint32_t rss_hash_res = 0; 17569964b965SNélio Laranjeiro 17579964b965SNélio Laranjeiro if (pkt) 17589964b965SNélio Laranjeiro NEXT(seg) = rep; 17599964b965SNélio Laranjeiro seg = rep; 17609964b965SNélio Laranjeiro rte_prefetch0(seg); 17616218063bSNélio Laranjeiro rte_prefetch0(cqe); 17629964b965SNélio Laranjeiro rte_prefetch0(wqe); 1763fbfd9955SOlivier Matz rep = rte_mbuf_raw_alloc(rxq->mp); 17642e22920bSAdrien Mazarguil if (unlikely(rep == NULL)) { 176515a756b6SSagi Grimberg ++rxq->stats.rx_nombuf; 176615a756b6SSagi Grimberg if (!pkt) { 176715a756b6SSagi Grimberg /* 176815a756b6SSagi Grimberg * no buffers before we even started, 176915a756b6SSagi Grimberg * bail out silently. 177015a756b6SSagi Grimberg */ 177115a756b6SSagi Grimberg break; 177215a756b6SSagi Grimberg } 1773a1bdb71aSNélio Laranjeiro while (pkt != seg) { 1774a1bdb71aSNélio Laranjeiro assert(pkt != (*rxq->elts)[idx]); 1775fe5fe382SNélio Laranjeiro rep = NEXT(pkt); 17768f094a9aSOlivier Matz NEXT(pkt) = NULL; 17778f094a9aSOlivier Matz NB_SEGS(pkt) = 1; 17781f88c0a2SOlivier Matz rte_mbuf_raw_free(pkt); 1779fe5fe382SNélio Laranjeiro pkt = rep; 17809964b965SNélio Laranjeiro } 17816218063bSNélio Laranjeiro break; 17822e22920bSAdrien Mazarguil } 17839964b965SNélio Laranjeiro if (!pkt) { 178497267b8eSNelio Laranjeiro cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 1785ecf60761SNélio Laranjeiro len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, 1786ecf60761SNélio Laranjeiro &rss_hash_res); 1787ecf60761SNélio Laranjeiro if (!len) { 17881f88c0a2SOlivier Matz rte_mbuf_raw_free(rep); 17896218063bSNélio Laranjeiro break; 17906218063bSNélio Laranjeiro } 179199c12dccSNélio Laranjeiro if (unlikely(len == -1)) { 179299c12dccSNélio Laranjeiro /* RX error, packet is likely too large. */ 17931f88c0a2SOlivier Matz rte_mbuf_raw_free(rep); 179499c12dccSNélio Laranjeiro ++rxq->stats.idropped; 179599c12dccSNélio Laranjeiro goto skip; 179699c12dccSNélio Laranjeiro } 17979964b965SNélio Laranjeiro pkt = seg; 17989964b965SNélio Laranjeiro assert(len >= (rxq->crc_present << 2)); 17999964b965SNélio Laranjeiro /* Update packet information. */ 180048dfc20fSYongseok Koh pkt->packet_type = rxq_cq_to_pkt_type(cqe); 18010ac64846SMaxime Leroy pkt->ol_flags = 0; 180236ba0c00SNélio Laranjeiro if (rss_hash_res && rxq->rss_hash) { 1803ecf60761SNélio Laranjeiro pkt->hash.rss = rss_hash_res; 1804ecf60761SNélio Laranjeiro pkt->ol_flags = PKT_RX_RSS_HASH; 1805ecf60761SNélio Laranjeiro } 1806c604f619SNélio Laranjeiro if (rxq->mark && 1807c604f619SNélio Laranjeiro MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { 1808b268a3eeSNélio Laranjeiro pkt->ol_flags |= PKT_RX_FDIR; 1809b268a3eeSNélio Laranjeiro if (cqe->sop_drop_qpn != 18106b30a6a8SShachar Beiser rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { 1811b268a3eeSNélio Laranjeiro uint32_t mark = cqe->sop_drop_qpn; 1812b268a3eeSNélio Laranjeiro 1813b268a3eeSNélio Laranjeiro pkt->ol_flags |= PKT_RX_FDIR_ID; 1814ea3bc3b1SNélio Laranjeiro pkt->hash.fdir.hi = 1815b268a3eeSNélio Laranjeiro mlx5_flow_mark_get(mark); 1816b268a3eeSNélio Laranjeiro } 1817ea3bc3b1SNélio Laranjeiro } 181848dfc20fSYongseok Koh if (rxq->csum | rxq->csum_l2tun) 18196703d836SNélio Laranjeiro pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); 18206703d836SNélio Laranjeiro if (rxq->vlan_strip && 18216703d836SNélio Laranjeiro (cqe->hdr_type_etc & 18226b30a6a8SShachar Beiser rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { 1823380a7aabSOlivier Matz pkt->ol_flags |= PKT_RX_VLAN | 1824b37b528dSOlivier Matz PKT_RX_VLAN_STRIPPED; 18256b30a6a8SShachar Beiser pkt->vlan_tci = 18266b30a6a8SShachar Beiser rte_be_to_cpu_16(cqe->vlan_info); 1827f3db9489SYaacov Hazan } 182878c7406bSRaslan Darawsheh if (rxq->hw_timestamp) { 182978c7406bSRaslan Darawsheh pkt->timestamp = 183078c7406bSRaslan Darawsheh rte_be_to_cpu_64(cqe->timestamp); 183178c7406bSRaslan Darawsheh pkt->ol_flags |= PKT_RX_TIMESTAMP; 183278c7406bSRaslan Darawsheh } 18336218063bSNélio Laranjeiro if (rxq->crc_present) 18346218063bSNélio Laranjeiro len -= ETHER_CRC_LEN; 18356218063bSNélio Laranjeiro PKT_LEN(pkt) = len; 18369964b965SNélio Laranjeiro } 18379964b965SNélio Laranjeiro DATA_LEN(rep) = DATA_LEN(seg); 18389964b965SNélio Laranjeiro PKT_LEN(rep) = PKT_LEN(seg); 18399964b965SNélio Laranjeiro SET_DATA_OFF(rep, DATA_OFF(seg)); 18409964b965SNélio Laranjeiro PORT(rep) = PORT(seg); 18419964b965SNélio Laranjeiro (*rxq->elts)[idx] = rep; 18429964b965SNélio Laranjeiro /* 18439964b965SNélio Laranjeiro * Fill NIC descriptor with the new buffer. The lkey and size 18449964b965SNélio Laranjeiro * of the buffers are already known, only the buffer address 18459964b965SNélio Laranjeiro * changes. 18469964b965SNélio Laranjeiro */ 18476b30a6a8SShachar Beiser wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); 18489964b965SNélio Laranjeiro if (len > DATA_LEN(seg)) { 18499964b965SNélio Laranjeiro len -= DATA_LEN(seg); 18509964b965SNélio Laranjeiro ++NB_SEGS(pkt); 18519964b965SNélio Laranjeiro ++rq_ci; 18529964b965SNélio Laranjeiro continue; 18539964b965SNélio Laranjeiro } 18549964b965SNélio Laranjeiro DATA_LEN(seg) = len; 185587011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 185687011737SAdrien Mazarguil /* Increment bytes counter. */ 18579964b965SNélio Laranjeiro rxq->stats.ibytes += PKT_LEN(pkt); 185887011737SAdrien Mazarguil #endif 18596218063bSNélio Laranjeiro /* Return packet. */ 18606218063bSNélio Laranjeiro *(pkts++) = pkt; 18619964b965SNélio Laranjeiro pkt = NULL; 18629964b965SNélio Laranjeiro --pkts_n; 18639964b965SNélio Laranjeiro ++i; 186499c12dccSNélio Laranjeiro skip: 18659964b965SNélio Laranjeiro /* Align consumer index to the next stride. */ 18669964b965SNélio Laranjeiro rq_ci >>= sges_n; 18676218063bSNélio Laranjeiro ++rq_ci; 18689964b965SNélio Laranjeiro rq_ci <<= sges_n; 18692e22920bSAdrien Mazarguil } 18709964b965SNélio Laranjeiro if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci))) 18712e22920bSAdrien Mazarguil return 0; 18726218063bSNélio Laranjeiro /* Update the consumer index. */ 18739964b965SNélio Laranjeiro rxq->rq_ci = rq_ci >> sges_n; 18744fe7f662SYongseok Koh rte_cio_wmb(); 18756b30a6a8SShachar Beiser *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 18764fe7f662SYongseok Koh rte_cio_wmb(); 18776b30a6a8SShachar Beiser *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 187887011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 187987011737SAdrien Mazarguil /* Increment packets counter. */ 18809964b965SNélio Laranjeiro rxq->stats.ipackets += i; 188187011737SAdrien Mazarguil #endif 18829964b965SNélio Laranjeiro return i; 18832e22920bSAdrien Mazarguil } 18842e22920bSAdrien Mazarguil 18852e22920bSAdrien Mazarguil /** 18862e22920bSAdrien Mazarguil * Dummy DPDK callback for TX. 18872e22920bSAdrien Mazarguil * 18882e22920bSAdrien Mazarguil * This function is used to temporarily replace the real callback during 18892e22920bSAdrien Mazarguil * unsafe control operations on the queue, or in case of error. 18902e22920bSAdrien Mazarguil * 18912e22920bSAdrien Mazarguil * @param dpdk_txq 18922e22920bSAdrien Mazarguil * Generic pointer to TX queue structure. 18932e22920bSAdrien Mazarguil * @param[in] pkts 18942e22920bSAdrien Mazarguil * Packets to transmit. 18952e22920bSAdrien Mazarguil * @param pkts_n 18962e22920bSAdrien Mazarguil * Number of packets in array. 18972e22920bSAdrien Mazarguil * 18982e22920bSAdrien Mazarguil * @return 18992e22920bSAdrien Mazarguil * Number of packets successfully transmitted (<= pkts_n). 19002e22920bSAdrien Mazarguil */ 19012e22920bSAdrien Mazarguil uint16_t 190256f08e16SNélio Laranjeiro removed_tx_burst(void *dpdk_txq __rte_unused, 190356f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 190456f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 19052e22920bSAdrien Mazarguil { 19062e22920bSAdrien Mazarguil return 0; 19072e22920bSAdrien Mazarguil } 19082e22920bSAdrien Mazarguil 19092e22920bSAdrien Mazarguil /** 19102e22920bSAdrien Mazarguil * Dummy DPDK callback for RX. 19112e22920bSAdrien Mazarguil * 19122e22920bSAdrien Mazarguil * This function is used to temporarily replace the real callback during 19132e22920bSAdrien Mazarguil * unsafe control operations on the queue, or in case of error. 19142e22920bSAdrien Mazarguil * 19152e22920bSAdrien Mazarguil * @param dpdk_rxq 19162e22920bSAdrien Mazarguil * Generic pointer to RX queue structure. 19172e22920bSAdrien Mazarguil * @param[out] pkts 19182e22920bSAdrien Mazarguil * Array to store received packets. 19192e22920bSAdrien Mazarguil * @param pkts_n 19202e22920bSAdrien Mazarguil * Maximum number of packets in array. 19212e22920bSAdrien Mazarguil * 19222e22920bSAdrien Mazarguil * @return 19232e22920bSAdrien Mazarguil * Number of packets successfully received (<= pkts_n). 19242e22920bSAdrien Mazarguil */ 19252e22920bSAdrien Mazarguil uint16_t 192656f08e16SNélio Laranjeiro removed_rx_burst(void *dpdk_txq __rte_unused, 192756f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 192856f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 19292e22920bSAdrien Mazarguil { 19302e22920bSAdrien Mazarguil return 0; 19312e22920bSAdrien Mazarguil } 19326cb559d6SYongseok Koh 19336cb559d6SYongseok Koh /* 19346cb559d6SYongseok Koh * Vectorized Rx/Tx routines are not compiled in when required vector 19356cb559d6SYongseok Koh * instructions are not supported on a target architecture. The following null 19366cb559d6SYongseok Koh * stubs are needed for linkage when those are not included outside of this file 19376cb559d6SYongseok Koh * (e.g. mlx5_rxtx_vec_sse.c for x86). 19386cb559d6SYongseok Koh */ 19396cb559d6SYongseok Koh 19406cb559d6SYongseok Koh uint16_t __attribute__((weak)) 194156f08e16SNélio Laranjeiro mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, 194256f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 194356f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 19446cb559d6SYongseok Koh { 19456cb559d6SYongseok Koh return 0; 19466cb559d6SYongseok Koh } 19476cb559d6SYongseok Koh 19486cb559d6SYongseok Koh uint16_t __attribute__((weak)) 194956f08e16SNélio Laranjeiro mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, 195056f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 195156f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 19526cb559d6SYongseok Koh { 19536cb559d6SYongseok Koh return 0; 19546cb559d6SYongseok Koh } 19556cb559d6SYongseok Koh 19566cb559d6SYongseok Koh uint16_t __attribute__((weak)) 195756f08e16SNélio Laranjeiro mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, 195856f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 195956f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 19606cb559d6SYongseok Koh { 19616cb559d6SYongseok Koh return 0; 19626cb559d6SYongseok Koh } 19636cb559d6SYongseok Koh 19646cb559d6SYongseok Koh int __attribute__((weak)) 1965*af4f09f2SNélio Laranjeiro mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused) 19666cb559d6SYongseok Koh { 19676cb559d6SYongseok Koh return -ENOTSUP; 19686cb559d6SYongseok Koh } 19696cb559d6SYongseok Koh 19706cb559d6SYongseok Koh int __attribute__((weak)) 1971*af4f09f2SNélio Laranjeiro mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused) 19726cb559d6SYongseok Koh { 19736cb559d6SYongseok Koh return -ENOTSUP; 19746cb559d6SYongseok Koh } 19756cb559d6SYongseok Koh 19766cb559d6SYongseok Koh int __attribute__((weak)) 1977*af4f09f2SNélio Laranjeiro mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) 19786cb559d6SYongseok Koh { 19796cb559d6SYongseok Koh return -ENOTSUP; 19806cb559d6SYongseok Koh } 19816cb559d6SYongseok Koh 19826cb559d6SYongseok Koh int __attribute__((weak)) 1983*af4f09f2SNélio Laranjeiro mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) 19846cb559d6SYongseok Koh { 19856cb559d6SYongseok Koh return -ENOTSUP; 19866cb559d6SYongseok Koh } 1987