18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 22e22920bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 3eb8121abSViacheslav Ovsiienko * Copyright 2015-2019 Mellanox Technologies, Ltd 42e22920bSAdrien Mazarguil */ 52e22920bSAdrien Mazarguil 62e22920bSAdrien Mazarguil #include <assert.h> 72e22920bSAdrien Mazarguil #include <stdint.h> 82e22920bSAdrien Mazarguil #include <string.h> 92e22920bSAdrien Mazarguil #include <stdlib.h> 102e22920bSAdrien Mazarguil 112e22920bSAdrien Mazarguil /* Verbs header. */ 122e22920bSAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 132e22920bSAdrien Mazarguil #ifdef PEDANTIC 14fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 152e22920bSAdrien Mazarguil #endif 162e22920bSAdrien Mazarguil #include <infiniband/verbs.h> 1743e9d979SShachar Beiser #include <infiniband/mlx5dv.h> 182e22920bSAdrien Mazarguil #ifdef PEDANTIC 19fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 202e22920bSAdrien Mazarguil #endif 212e22920bSAdrien Mazarguil 222e22920bSAdrien Mazarguil #include <rte_mbuf.h> 232e22920bSAdrien Mazarguil #include <rte_mempool.h> 242e22920bSAdrien Mazarguil #include <rte_prefetch.h> 252e22920bSAdrien Mazarguil #include <rte_common.h> 262e22920bSAdrien Mazarguil #include <rte_branch_prediction.h> 276218063bSNélio Laranjeiro #include <rte_ether.h> 2888c07335SMatan Azrad #include <rte_cycles.h> 292e22920bSAdrien Mazarguil 302e22920bSAdrien Mazarguil #include "mlx5.h" 312e22920bSAdrien Mazarguil #include "mlx5_utils.h" 322e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 33f3db9489SYaacov Hazan #include "mlx5_autoconf.h" 342e22920bSAdrien Mazarguil #include "mlx5_defs.h" 356218063bSNélio Laranjeiro #include "mlx5_prm.h" 366218063bSNélio Laranjeiro 37eb8121abSViacheslav Ovsiienko /* TX burst subroutines return codes. */ 38eb8121abSViacheslav Ovsiienko enum mlx5_txcmp_code { 39eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_EXIT = 0, 40eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_ERROR, 41eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_SINGLE, 42eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_MULTI, 43eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_TSO, 44eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_EMPW, 45eb8121abSViacheslav Ovsiienko }; 46eb8121abSViacheslav Ovsiienko 47eb8121abSViacheslav Ovsiienko /* 48eb8121abSViacheslav Ovsiienko * These defines are used to configure Tx burst routine option set 49eb8121abSViacheslav Ovsiienko * supported at compile time. The not specified options are optimized out 50eb8121abSViacheslav Ovsiienko * out due to if conditions can be explicitly calculated at compile time. 51eb8121abSViacheslav Ovsiienko * The offloads with bigger runtime check (require more CPU cycles to 52eb8121abSViacheslav Ovsiienko * skip) overhead should have the bigger index - this is needed to 53eb8121abSViacheslav Ovsiienko * select the better matching routine function if no exact match and 54eb8121abSViacheslav Ovsiienko * some offloads are not actually requested. 55eb8121abSViacheslav Ovsiienko */ 56eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/ 57eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/ 58eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/ 59eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */ 60eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */ 61eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/ 62eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */ 63eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/ 64eb8121abSViacheslav Ovsiienko 65eb8121abSViacheslav Ovsiienko /* The most common offloads groups. */ 66eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_NONE 0 67eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \ 68eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_TSO | \ 69eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | \ 70eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_CSUM | \ 71eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | \ 72eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | \ 73eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 74eb8121abSViacheslav Ovsiienko 75eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask) 76eb8121abSViacheslav Ovsiienko 77eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_DECL(func, olx) \ 78eb8121abSViacheslav Ovsiienko static uint16_t mlx5_tx_burst_##func(void *txq, \ 79eb8121abSViacheslav Ovsiienko struct rte_mbuf **pkts, \ 80eb8121abSViacheslav Ovsiienko uint16_t pkts_n) \ 81eb8121abSViacheslav Ovsiienko { \ 82eb8121abSViacheslav Ovsiienko return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \ 83eb8121abSViacheslav Ovsiienko pkts, pkts_n, (olx)); \ 84eb8121abSViacheslav Ovsiienko } 85eb8121abSViacheslav Ovsiienko 86eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx}, 87eb8121abSViacheslav Ovsiienko 88c0583d98SJerin Jacob static __rte_always_inline uint32_t 893cc08bc6SXueming Li rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); 90ff1807a3SNélio Laranjeiro 91c0583d98SJerin Jacob static __rte_always_inline int 9278142aacSNélio Laranjeiro mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 932e633f1fSYongseok Koh uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); 94ff1807a3SNélio Laranjeiro 95c0583d98SJerin Jacob static __rte_always_inline uint32_t 966ba07449SXueming Li rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); 97ff1807a3SNélio Laranjeiro 983e1f82a1SYongseok Koh static __rte_always_inline void 993e1f82a1SYongseok Koh rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, 1003e1f82a1SYongseok Koh volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res); 1013e1f82a1SYongseok Koh 1027d6bf6b8SYongseok Koh static __rte_always_inline void 1033a22f387SMatan Azrad mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx, 1043a22f387SMatan Azrad const unsigned int strd_n); 1057d6bf6b8SYongseok Koh 10635c090eaSMatan Azrad static int 10735c090eaSMatan Azrad mlx5_queue_state_modify(struct rte_eth_dev *dev, 10835c090eaSMatan Azrad struct mlx5_mp_arg_queue_state_modify *sm); 10935c090eaSMatan Azrad 1102579543fSMatan Azrad static inline void 1112579543fSMatan Azrad mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp, 1122579543fSMatan Azrad volatile struct mlx5_cqe *restrict cqe, 1132579543fSMatan Azrad uint32_t phcsum); 1142579543fSMatan Azrad 1152579543fSMatan Azrad static inline void 1162579543fSMatan Azrad mlx5_lro_update_hdr(uint8_t *restrict padd, 1172579543fSMatan Azrad volatile struct mlx5_cqe *restrict cqe, 1182579543fSMatan Azrad uint32_t len); 1192579543fSMatan Azrad 120ea16068cSYongseok Koh uint32_t mlx5_ptype_table[] __rte_cache_aligned = { 121ea16068cSYongseok Koh [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ 122ea16068cSYongseok Koh }; 123ea16068cSYongseok Koh 1245f8ba81cSXueming Li uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned; 1255f8ba81cSXueming Li uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned; 1265f8ba81cSXueming Li 127ea16068cSYongseok Koh /** 128ea16068cSYongseok Koh * Build a table to translate Rx completion flags to packet type. 129ea16068cSYongseok Koh * 130ea16068cSYongseok Koh * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 131ea16068cSYongseok Koh */ 132ea16068cSYongseok Koh void 133ea16068cSYongseok Koh mlx5_set_ptype_table(void) 134ea16068cSYongseok Koh { 135ea16068cSYongseok Koh unsigned int i; 136ea16068cSYongseok Koh uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table; 137ea16068cSYongseok Koh 1389807f113SYongseok Koh /* Last entry must not be overwritten, reserved for errored packet. */ 1399807f113SYongseok Koh for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i) 140ea16068cSYongseok Koh (*p)[i] = RTE_PTYPE_UNKNOWN; 1416cb559d6SYongseok Koh /* 1426cb559d6SYongseok Koh * The index to the array should have: 143ea16068cSYongseok Koh * bit[1:0] = l3_hdr_type 144ea16068cSYongseok Koh * bit[4:2] = l4_hdr_type 145ea16068cSYongseok Koh * bit[5] = ip_frag 146ea16068cSYongseok Koh * bit[6] = tunneled 147ea16068cSYongseok Koh * bit[7] = outer_l3_type 14899c12dccSNélio Laranjeiro */ 1493ca63b88SShahaf Shuler /* L2 */ 1503ca63b88SShahaf Shuler (*p)[0x00] = RTE_PTYPE_L2_ETHER; 151ea16068cSYongseok Koh /* L3 */ 152ea16068cSYongseok Koh (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 153ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 154ea16068cSYongseok Koh (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 155ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 156ea16068cSYongseok Koh /* Fragmented */ 157ea16068cSYongseok Koh (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 158ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 159ea16068cSYongseok Koh (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 160ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 161ea16068cSYongseok Koh /* TCP */ 162ea16068cSYongseok Koh (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 163ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 164ea16068cSYongseok Koh (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 165ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 1660915e287SBin Huang (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 1670915e287SBin Huang RTE_PTYPE_L4_TCP; 1680915e287SBin Huang (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 1690915e287SBin Huang RTE_PTYPE_L4_TCP; 1700915e287SBin Huang (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 1710915e287SBin Huang RTE_PTYPE_L4_TCP; 1720915e287SBin Huang (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 1730915e287SBin Huang RTE_PTYPE_L4_TCP; 174ea16068cSYongseok Koh /* UDP */ 175ea16068cSYongseok Koh (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 176ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 177ea16068cSYongseok Koh (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 178ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 179ea16068cSYongseok Koh /* Repeat with outer_l3_type being set. Just in case. */ 180ea16068cSYongseok Koh (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 181ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 182ea16068cSYongseok Koh (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 183ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 184ea16068cSYongseok Koh (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 185ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 186ea16068cSYongseok Koh (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 187ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 188ea16068cSYongseok Koh (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 189ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 190ea16068cSYongseok Koh (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 191ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 1920915e287SBin Huang (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 1930915e287SBin Huang RTE_PTYPE_L4_TCP; 1940915e287SBin Huang (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 1950915e287SBin Huang RTE_PTYPE_L4_TCP; 1960915e287SBin Huang (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 1970915e287SBin Huang RTE_PTYPE_L4_TCP; 1980915e287SBin Huang (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 1990915e287SBin Huang RTE_PTYPE_L4_TCP; 200ea16068cSYongseok Koh (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 201ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 202ea16068cSYongseok Koh (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 203ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 204ea16068cSYongseok Koh /* Tunneled - L3 */ 2053cc08bc6SXueming Li (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 206ea16068cSYongseok Koh (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 207ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 208ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 209ea16068cSYongseok Koh (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 210ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 211ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 2123cc08bc6SXueming Li (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; 213ea16068cSYongseok Koh (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 214ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 215ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 216ea16068cSYongseok Koh (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 217ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 218ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 219ea16068cSYongseok Koh /* Tunneled - Fragmented */ 220ea16068cSYongseok Koh (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 221ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 222ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 223ea16068cSYongseok Koh (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 224ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 225ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 226ea16068cSYongseok Koh (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 227ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 228ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 229ea16068cSYongseok Koh (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 230ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 231ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 232ea16068cSYongseok Koh /* Tunneled - TCP */ 233ea16068cSYongseok Koh (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 234ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2356c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 236ea16068cSYongseok Koh (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 237ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2386c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 2390915e287SBin Huang (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2400915e287SBin Huang RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2410915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2420915e287SBin Huang (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2430915e287SBin Huang RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2440915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2450915e287SBin Huang (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2460915e287SBin Huang RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2470915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2480915e287SBin Huang (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2490915e287SBin Huang RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2500915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 251ea16068cSYongseok Koh (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 252ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2536c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 254ea16068cSYongseok Koh (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 255ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2566c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 2570915e287SBin Huang (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2580915e287SBin Huang RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2590915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2600915e287SBin Huang (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2610915e287SBin Huang RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2620915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2630915e287SBin Huang (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2640915e287SBin Huang RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2650915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2660915e287SBin Huang (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2670915e287SBin Huang RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2680915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 269ea16068cSYongseok Koh /* Tunneled - UDP */ 270ea16068cSYongseok Koh (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 271ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2726c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 273ea16068cSYongseok Koh (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 274ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2756c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 276ea16068cSYongseok Koh (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 277ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2786c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 279ea16068cSYongseok Koh (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 280ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2816c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 282ea16068cSYongseok Koh } 283fdcb0f53SNélio Laranjeiro 2842e22920bSAdrien Mazarguil /** 2855f8ba81cSXueming Li * Build a table to translate packet to checksum type of Verbs. 2865f8ba81cSXueming Li */ 2875f8ba81cSXueming Li void 2885f8ba81cSXueming Li mlx5_set_cksum_table(void) 2895f8ba81cSXueming Li { 2905f8ba81cSXueming Li unsigned int i; 2915f8ba81cSXueming Li uint8_t v; 2925f8ba81cSXueming Li 2935f8ba81cSXueming Li /* 2945f8ba81cSXueming Li * The index should have: 2955f8ba81cSXueming Li * bit[0] = PKT_TX_TCP_SEG 2965f8ba81cSXueming Li * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM 2975f8ba81cSXueming Li * bit[4] = PKT_TX_IP_CKSUM 2985f8ba81cSXueming Li * bit[8] = PKT_TX_OUTER_IP_CKSUM 2995f8ba81cSXueming Li * bit[9] = tunnel 3005f8ba81cSXueming Li */ 3015f8ba81cSXueming Li for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) { 3025f8ba81cSXueming Li v = 0; 3035f8ba81cSXueming Li if (i & (1 << 9)) { 3045f8ba81cSXueming Li /* Tunneled packet. */ 3055f8ba81cSXueming Li if (i & (1 << 8)) /* Outer IP. */ 3065f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_CSUM; 3075f8ba81cSXueming Li if (i & (1 << 4)) /* Inner IP. */ 3085f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_INNER_CSUM; 3095f8ba81cSXueming Li if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ 3105f8ba81cSXueming Li v |= MLX5_ETH_WQE_L4_INNER_CSUM; 3115f8ba81cSXueming Li } else { 3125f8ba81cSXueming Li /* No tunnel. */ 3135f8ba81cSXueming Li if (i & (1 << 4)) /* IP. */ 3145f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_CSUM; 3155f8ba81cSXueming Li if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ 3165f8ba81cSXueming Li v |= MLX5_ETH_WQE_L4_CSUM; 3175f8ba81cSXueming Li } 3185f8ba81cSXueming Li mlx5_cksum_table[i] = v; 3195f8ba81cSXueming Li } 3205f8ba81cSXueming Li } 3215f8ba81cSXueming Li 3225f8ba81cSXueming Li /** 3235f8ba81cSXueming Li * Build a table to translate packet type of mbuf to SWP type of Verbs. 3245f8ba81cSXueming Li */ 3255f8ba81cSXueming Li void 3265f8ba81cSXueming Li mlx5_set_swp_types_table(void) 3275f8ba81cSXueming Li { 3285f8ba81cSXueming Li unsigned int i; 3295f8ba81cSXueming Li uint8_t v; 3305f8ba81cSXueming Li 3315f8ba81cSXueming Li /* 3325f8ba81cSXueming Li * The index should have: 3335f8ba81cSXueming Li * bit[0:1] = PKT_TX_L4_MASK 3345f8ba81cSXueming Li * bit[4] = PKT_TX_IPV6 3355f8ba81cSXueming Li * bit[8] = PKT_TX_OUTER_IPV6 3365f8ba81cSXueming Li * bit[9] = PKT_TX_OUTER_UDP 3375f8ba81cSXueming Li */ 3385f8ba81cSXueming Li for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) { 3395f8ba81cSXueming Li v = 0; 3405f8ba81cSXueming Li if (i & (1 << 8)) 3415f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_OUTER_IPV6; 3425f8ba81cSXueming Li if (i & (1 << 9)) 3435f8ba81cSXueming Li v |= MLX5_ETH_WQE_L4_OUTER_UDP; 3445f8ba81cSXueming Li if (i & (1 << 4)) 3455f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_INNER_IPV6; 3465f8ba81cSXueming Li if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52)) 3475f8ba81cSXueming Li v |= MLX5_ETH_WQE_L4_INNER_UDP; 3485f8ba81cSXueming Li mlx5_swp_types_table[i] = v; 3495f8ba81cSXueming Li } 3505f8ba81cSXueming Li } 3515f8ba81cSXueming Li 3525f8ba81cSXueming Li /** 35318a1c200SViacheslav Ovsiienko * Set Software Parser flags and offsets in Ethernet Segment of WQE. 35418a1c200SViacheslav Ovsiienko * Flags must be preliminary initialized to zero. 35518a1c200SViacheslav Ovsiienko * 35618a1c200SViacheslav Ovsiienko * @param loc 35718a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 35818a1c200SViacheslav Ovsiienko * @param swp_flags 35918a1c200SViacheslav Ovsiienko * Pointer to store Software Parser flags 36018a1c200SViacheslav Ovsiienko * @param olx 36118a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 36218a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 36318a1c200SViacheslav Ovsiienko * 36418a1c200SViacheslav Ovsiienko * @return 36518a1c200SViacheslav Ovsiienko * Software Parser offsets packed in dword. 36618a1c200SViacheslav Ovsiienko * Software Parser flags are set by pointer. 36718a1c200SViacheslav Ovsiienko */ 36818a1c200SViacheslav Ovsiienko static __rte_always_inline uint32_t 36918a1c200SViacheslav Ovsiienko txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc, 37018a1c200SViacheslav Ovsiienko uint8_t *swp_flags, 37118a1c200SViacheslav Ovsiienko unsigned int olx) 37218a1c200SViacheslav Ovsiienko { 37318a1c200SViacheslav Ovsiienko uint64_t ol, tunnel; 37418a1c200SViacheslav Ovsiienko unsigned int idx, off; 37518a1c200SViacheslav Ovsiienko uint32_t set; 37618a1c200SViacheslav Ovsiienko 37718a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(SWP)) 37818a1c200SViacheslav Ovsiienko return 0; 37918a1c200SViacheslav Ovsiienko ol = loc->mbuf->ol_flags; 38018a1c200SViacheslav Ovsiienko tunnel = ol & PKT_TX_TUNNEL_MASK; 38118a1c200SViacheslav Ovsiienko /* 38218a1c200SViacheslav Ovsiienko * Check whether Software Parser is required. 38318a1c200SViacheslav Ovsiienko * Only customized tunnels may ask for. 38418a1c200SViacheslav Ovsiienko */ 38518a1c200SViacheslav Ovsiienko if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)) 38618a1c200SViacheslav Ovsiienko return 0; 38718a1c200SViacheslav Ovsiienko /* 38818a1c200SViacheslav Ovsiienko * The index should have: 38918a1c200SViacheslav Ovsiienko * bit[0:1] = PKT_TX_L4_MASK 39018a1c200SViacheslav Ovsiienko * bit[4] = PKT_TX_IPV6 39118a1c200SViacheslav Ovsiienko * bit[8] = PKT_TX_OUTER_IPV6 39218a1c200SViacheslav Ovsiienko * bit[9] = PKT_TX_OUTER_UDP 39318a1c200SViacheslav Ovsiienko */ 39418a1c200SViacheslav Ovsiienko idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52; 39518a1c200SViacheslav Ovsiienko idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0; 39618a1c200SViacheslav Ovsiienko *swp_flags = mlx5_swp_types_table[idx]; 39718a1c200SViacheslav Ovsiienko /* 39818a1c200SViacheslav Ovsiienko * Set offsets for SW parser. Since ConnectX-5, SW parser just 39918a1c200SViacheslav Ovsiienko * complements HW parser. SW parser starts to engage only if HW parser 40018a1c200SViacheslav Ovsiienko * can't reach a header. For the older devices, HW parser will not kick 40118a1c200SViacheslav Ovsiienko * in if any of SWP offsets is set. Therefore, all of the L3 offsets 40218a1c200SViacheslav Ovsiienko * should be set regardless of HW offload. 40318a1c200SViacheslav Ovsiienko */ 40418a1c200SViacheslav Ovsiienko off = loc->mbuf->outer_l2_len; 40518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT) 40618a1c200SViacheslav Ovsiienko off += sizeof(struct rte_vlan_hdr); 40718a1c200SViacheslav Ovsiienko set = (off >> 1) << 8; /* Outer L3 offset. */ 40818a1c200SViacheslav Ovsiienko off += loc->mbuf->outer_l3_len; 40918a1c200SViacheslav Ovsiienko if (tunnel == PKT_TX_TUNNEL_UDP) 41018a1c200SViacheslav Ovsiienko set |= off >> 1; /* Outer L4 offset. */ 41118a1c200SViacheslav Ovsiienko if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */ 41218a1c200SViacheslav Ovsiienko const uint64_t csum = ol & PKT_TX_L4_MASK; 41318a1c200SViacheslav Ovsiienko off += loc->mbuf->l2_len; 41418a1c200SViacheslav Ovsiienko set |= (off >> 1) << 24; /* Inner L3 offset. */ 41518a1c200SViacheslav Ovsiienko if (csum == PKT_TX_TCP_CKSUM || 41618a1c200SViacheslav Ovsiienko csum == PKT_TX_UDP_CKSUM || 41718a1c200SViacheslav Ovsiienko (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) { 41818a1c200SViacheslav Ovsiienko off += loc->mbuf->l3_len; 41918a1c200SViacheslav Ovsiienko set |= (off >> 1) << 16; /* Inner L4 offset. */ 42018a1c200SViacheslav Ovsiienko } 42118a1c200SViacheslav Ovsiienko } 42218a1c200SViacheslav Ovsiienko set = rte_cpu_to_le_32(set); 42318a1c200SViacheslav Ovsiienko return set; 42418a1c200SViacheslav Ovsiienko } 42518a1c200SViacheslav Ovsiienko 42618a1c200SViacheslav Ovsiienko /** 42718a1c200SViacheslav Ovsiienko * Convert the Checksum offloads to Verbs. 42818a1c200SViacheslav Ovsiienko * 42918a1c200SViacheslav Ovsiienko * @param buf 43018a1c200SViacheslav Ovsiienko * Pointer to the mbuf. 43118a1c200SViacheslav Ovsiienko * 43218a1c200SViacheslav Ovsiienko * @return 43318a1c200SViacheslav Ovsiienko * Converted checksum flags. 43418a1c200SViacheslav Ovsiienko */ 43518a1c200SViacheslav Ovsiienko static __rte_always_inline uint8_t 43618a1c200SViacheslav Ovsiienko txq_ol_cksum_to_cs(struct rte_mbuf *buf) 43718a1c200SViacheslav Ovsiienko { 43818a1c200SViacheslav Ovsiienko uint32_t idx; 43918a1c200SViacheslav Ovsiienko uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK); 44018a1c200SViacheslav Ovsiienko const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK | 44118a1c200SViacheslav Ovsiienko PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM; 44218a1c200SViacheslav Ovsiienko 44318a1c200SViacheslav Ovsiienko /* 44418a1c200SViacheslav Ovsiienko * The index should have: 44518a1c200SViacheslav Ovsiienko * bit[0] = PKT_TX_TCP_SEG 44618a1c200SViacheslav Ovsiienko * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM 44718a1c200SViacheslav Ovsiienko * bit[4] = PKT_TX_IP_CKSUM 44818a1c200SViacheslav Ovsiienko * bit[8] = PKT_TX_OUTER_IP_CKSUM 44918a1c200SViacheslav Ovsiienko * bit[9] = tunnel 45018a1c200SViacheslav Ovsiienko */ 45118a1c200SViacheslav Ovsiienko idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9); 45218a1c200SViacheslav Ovsiienko return mlx5_cksum_table[idx]; 45318a1c200SViacheslav Ovsiienko } 45418a1c200SViacheslav Ovsiienko 45518a1c200SViacheslav Ovsiienko /** 45626f04883STom Barbette * Internal function to compute the number of used descriptors in an RX queue 4578788fec1SOlivier Matz * 45826f04883STom Barbette * @param rxq 45926f04883STom Barbette * The Rx queue. 4608788fec1SOlivier Matz * 4618788fec1SOlivier Matz * @return 46226f04883STom Barbette * The number of used rx descriptor. 4638788fec1SOlivier Matz */ 46426f04883STom Barbette static uint32_t 46526f04883STom Barbette rx_queue_count(struct mlx5_rxq_data *rxq) 4668788fec1SOlivier Matz { 4678788fec1SOlivier Matz struct rxq_zip *zip = &rxq->zip; 4688788fec1SOlivier Matz volatile struct mlx5_cqe *cqe; 4698788fec1SOlivier Matz const unsigned int cqe_n = (1 << rxq->cqe_n); 4708788fec1SOlivier Matz const unsigned int cqe_cnt = cqe_n - 1; 4718788fec1SOlivier Matz unsigned int cq_ci; 4728788fec1SOlivier Matz unsigned int used; 4738788fec1SOlivier Matz 4748788fec1SOlivier Matz /* if we are processing a compressed cqe */ 4758788fec1SOlivier Matz if (zip->ai) { 4768788fec1SOlivier Matz used = zip->cqe_cnt - zip->ca; 4778788fec1SOlivier Matz cq_ci = zip->cq_ci; 4788788fec1SOlivier Matz } else { 4798788fec1SOlivier Matz used = 0; 4808788fec1SOlivier Matz cq_ci = rxq->cq_ci; 4818788fec1SOlivier Matz } 4828788fec1SOlivier Matz cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 48388c07335SMatan Azrad while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) { 4848788fec1SOlivier Matz int8_t op_own; 4858788fec1SOlivier Matz unsigned int n; 4868788fec1SOlivier Matz 4878788fec1SOlivier Matz op_own = cqe->op_own; 4888788fec1SOlivier Matz if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) 4896b30a6a8SShachar Beiser n = rte_be_to_cpu_32(cqe->byte_cnt); 4908788fec1SOlivier Matz else 4918788fec1SOlivier Matz n = 1; 4928788fec1SOlivier Matz cq_ci += n; 4938788fec1SOlivier Matz used += n; 4948788fec1SOlivier Matz cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 4958788fec1SOlivier Matz } 4968788fec1SOlivier Matz used = RTE_MIN(used, (1U << rxq->elts_n) - 1); 49726f04883STom Barbette return used; 49826f04883STom Barbette } 49926f04883STom Barbette 50026f04883STom Barbette /** 50126f04883STom Barbette * DPDK callback to check the status of a rx descriptor. 50226f04883STom Barbette * 50326f04883STom Barbette * @param rx_queue 50426f04883STom Barbette * The Rx queue. 50526f04883STom Barbette * @param[in] offset 50626f04883STom Barbette * The index of the descriptor in the ring. 50726f04883STom Barbette * 50826f04883STom Barbette * @return 50926f04883STom Barbette * The status of the tx descriptor. 51026f04883STom Barbette */ 51126f04883STom Barbette int 51226f04883STom Barbette mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) 51326f04883STom Barbette { 51426f04883STom Barbette struct mlx5_rxq_data *rxq = rx_queue; 51526f04883STom Barbette struct mlx5_rxq_ctrl *rxq_ctrl = 51626f04883STom Barbette container_of(rxq, struct mlx5_rxq_ctrl, rxq); 51726f04883STom Barbette struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv); 51826f04883STom Barbette 51926f04883STom Barbette if (dev->rx_pkt_burst != mlx5_rx_burst) { 52026f04883STom Barbette rte_errno = ENOTSUP; 52126f04883STom Barbette return -rte_errno; 52226f04883STom Barbette } 52326f04883STom Barbette if (offset >= (1 << rxq->elts_n)) { 52426f04883STom Barbette rte_errno = EINVAL; 52526f04883STom Barbette return -rte_errno; 52626f04883STom Barbette } 52726f04883STom Barbette if (offset < rx_queue_count(rxq)) 5288788fec1SOlivier Matz return RTE_ETH_RX_DESC_DONE; 5298788fec1SOlivier Matz return RTE_ETH_RX_DESC_AVAIL; 5308788fec1SOlivier Matz } 5318788fec1SOlivier Matz 5328788fec1SOlivier Matz /** 53326f04883STom Barbette * DPDK callback to get the number of used descriptors in a RX queue 53426f04883STom Barbette * 53526f04883STom Barbette * @param dev 53626f04883STom Barbette * Pointer to the device structure. 53726f04883STom Barbette * 53826f04883STom Barbette * @param rx_queue_id 53926f04883STom Barbette * The Rx queue. 54026f04883STom Barbette * 54126f04883STom Barbette * @return 54226f04883STom Barbette * The number of used rx descriptor. 54326f04883STom Barbette * -EINVAL if the queue is invalid 54426f04883STom Barbette */ 54526f04883STom Barbette uint32_t 54626f04883STom Barbette mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 54726f04883STom Barbette { 548dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 54926f04883STom Barbette struct mlx5_rxq_data *rxq; 55026f04883STom Barbette 55126f04883STom Barbette if (dev->rx_pkt_burst != mlx5_rx_burst) { 55226f04883STom Barbette rte_errno = ENOTSUP; 55326f04883STom Barbette return -rte_errno; 55426f04883STom Barbette } 55526f04883STom Barbette rxq = (*priv->rxqs)[rx_queue_id]; 55626f04883STom Barbette if (!rxq) { 55726f04883STom Barbette rte_errno = EINVAL; 55826f04883STom Barbette return -rte_errno; 55926f04883STom Barbette } 56026f04883STom Barbette return rx_queue_count(rxq); 56126f04883STom Barbette } 56226f04883STom Barbette 563066cfecdSMatan Azrad #define MLX5_SYSTEM_LOG_DIR "/var/log" 564066cfecdSMatan Azrad /** 565066cfecdSMatan Azrad * Dump debug information to log file. 566066cfecdSMatan Azrad * 567066cfecdSMatan Azrad * @param fname 568066cfecdSMatan Azrad * The file name. 569066cfecdSMatan Azrad * @param hex_title 570066cfecdSMatan Azrad * If not NULL this string is printed as a header to the output 571066cfecdSMatan Azrad * and the output will be in hexadecimal view. 572066cfecdSMatan Azrad * @param buf 573066cfecdSMatan Azrad * This is the buffer address to print out. 574066cfecdSMatan Azrad * @param len 575066cfecdSMatan Azrad * The number of bytes to dump out. 576066cfecdSMatan Azrad */ 577066cfecdSMatan Azrad void 578066cfecdSMatan Azrad mlx5_dump_debug_information(const char *fname, const char *hex_title, 579066cfecdSMatan Azrad const void *buf, unsigned int hex_len) 580066cfecdSMatan Azrad { 581066cfecdSMatan Azrad FILE *fd; 582066cfecdSMatan Azrad 583066cfecdSMatan Azrad MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname); 584066cfecdSMatan Azrad fd = fopen(path, "a+"); 585066cfecdSMatan Azrad if (!fd) { 586066cfecdSMatan Azrad DRV_LOG(WARNING, "cannot open %s for debug dump\n", 587066cfecdSMatan Azrad path); 588066cfecdSMatan Azrad MKSTR(path2, "./%s", fname); 589066cfecdSMatan Azrad fd = fopen(path2, "a+"); 590066cfecdSMatan Azrad if (!fd) { 591066cfecdSMatan Azrad DRV_LOG(ERR, "cannot open %s for debug dump\n", 592066cfecdSMatan Azrad path2); 593066cfecdSMatan Azrad return; 594066cfecdSMatan Azrad } 595066cfecdSMatan Azrad DRV_LOG(INFO, "New debug dump in file %s\n", path2); 596066cfecdSMatan Azrad } else { 597066cfecdSMatan Azrad DRV_LOG(INFO, "New debug dump in file %s\n", path); 598066cfecdSMatan Azrad } 599066cfecdSMatan Azrad if (hex_title) 600066cfecdSMatan Azrad rte_hexdump(fd, hex_title, buf, hex_len); 601066cfecdSMatan Azrad else 602066cfecdSMatan Azrad fprintf(fd, "%s", (const char *)buf); 603066cfecdSMatan Azrad fprintf(fd, "\n\n\n"); 604066cfecdSMatan Azrad fclose(fd); 605066cfecdSMatan Azrad } 606066cfecdSMatan Azrad 60726f04883STom Barbette /** 60835c090eaSMatan Azrad * Move QP from error state to running state and initialize indexes. 609957e45fbSMatan Azrad * 61035c090eaSMatan Azrad * @param txq_ctrl 61135c090eaSMatan Azrad * Pointer to TX queue control structure. 612957e45fbSMatan Azrad * 613957e45fbSMatan Azrad * @return 61435c090eaSMatan Azrad * 0 on success, else -1. 615957e45fbSMatan Azrad */ 616957e45fbSMatan Azrad static int 61735c090eaSMatan Azrad tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl) 618957e45fbSMatan Azrad { 61935c090eaSMatan Azrad struct mlx5_mp_arg_queue_state_modify sm = { 62035c090eaSMatan Azrad .is_wq = 0, 62135c090eaSMatan Azrad .queue_id = txq_ctrl->txq.idx, 622957e45fbSMatan Azrad }; 62335c090eaSMatan Azrad 62435c090eaSMatan Azrad if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm)) 62535c090eaSMatan Azrad return -1; 62635c090eaSMatan Azrad txq_ctrl->txq.wqe_ci = 0; 62735c090eaSMatan Azrad txq_ctrl->txq.wqe_pi = 0; 62835c090eaSMatan Azrad txq_ctrl->txq.elts_comp = 0; 629957e45fbSMatan Azrad return 0; 630957e45fbSMatan Azrad } 631957e45fbSMatan Azrad 632957e45fbSMatan Azrad /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */ 633957e45fbSMatan Azrad static int 634957e45fbSMatan Azrad check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) 635957e45fbSMatan Azrad { 636957e45fbSMatan Azrad static const uint8_t magic[] = "seen"; 637957e45fbSMatan Azrad int ret = 1; 638957e45fbSMatan Azrad unsigned int i; 639957e45fbSMatan Azrad 640957e45fbSMatan Azrad for (i = 0; i < sizeof(magic); ++i) 641957e45fbSMatan Azrad if (!ret || err_cqe->rsvd1[i] != magic[i]) { 642957e45fbSMatan Azrad ret = 0; 643957e45fbSMatan Azrad err_cqe->rsvd1[i] = magic[i]; 644957e45fbSMatan Azrad } 645957e45fbSMatan Azrad return ret; 646957e45fbSMatan Azrad } 647957e45fbSMatan Azrad 648957e45fbSMatan Azrad /** 649957e45fbSMatan Azrad * Handle error CQE. 650957e45fbSMatan Azrad * 651957e45fbSMatan Azrad * @param txq 652957e45fbSMatan Azrad * Pointer to TX queue structure. 653957e45fbSMatan Azrad * @param error_cqe 654957e45fbSMatan Azrad * Pointer to the error CQE. 655957e45fbSMatan Azrad * 656957e45fbSMatan Azrad * @return 657da1df1ccSViacheslav Ovsiienko * Negative value if queue recovery failed, 658da1df1ccSViacheslav Ovsiienko * the last Tx buffer element to free otherwise. 659957e45fbSMatan Azrad */ 660da1df1ccSViacheslav Ovsiienko int 66118a1c200SViacheslav Ovsiienko mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq, 662957e45fbSMatan Azrad volatile struct mlx5_err_cqe *err_cqe) 663957e45fbSMatan Azrad { 664957e45fbSMatan Azrad if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) { 665957e45fbSMatan Azrad const uint16_t wqe_m = ((1 << txq->wqe_n) - 1); 666957e45fbSMatan Azrad struct mlx5_txq_ctrl *txq_ctrl = 667957e45fbSMatan Azrad container_of(txq, struct mlx5_txq_ctrl, txq); 668957e45fbSMatan Azrad uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter); 669957e45fbSMatan Azrad int seen = check_err_cqe_seen(err_cqe); 670957e45fbSMatan Azrad 671957e45fbSMatan Azrad if (!seen && txq_ctrl->dump_file_n < 672957e45fbSMatan Azrad txq_ctrl->priv->config.max_dump_files_num) { 673957e45fbSMatan Azrad MKSTR(err_str, "Unexpected CQE error syndrome " 674957e45fbSMatan Azrad "0x%02x CQN = %u SQN = %u wqe_counter = %u " 675957e45fbSMatan Azrad "wq_ci = %u cq_ci = %u", err_cqe->syndrome, 67638b4b397SViacheslav Ovsiienko txq->cqe_s, txq->qp_num_8s >> 8, 677957e45fbSMatan Azrad rte_be_to_cpu_16(err_cqe->wqe_counter), 678957e45fbSMatan Azrad txq->wqe_ci, txq->cq_ci); 679957e45fbSMatan Azrad MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u", 680957e45fbSMatan Azrad PORT_ID(txq_ctrl->priv), txq->idx, 681957e45fbSMatan Azrad txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc()); 682957e45fbSMatan Azrad mlx5_dump_debug_information(name, NULL, err_str, 0); 683957e45fbSMatan Azrad mlx5_dump_debug_information(name, "MLX5 Error CQ:", 684957e45fbSMatan Azrad (const void *)((uintptr_t) 68538b4b397SViacheslav Ovsiienko txq->cqes), 686957e45fbSMatan Azrad sizeof(*err_cqe) * 687957e45fbSMatan Azrad (1 << txq->cqe_n)); 688957e45fbSMatan Azrad mlx5_dump_debug_information(name, "MLX5 Error SQ:", 689957e45fbSMatan Azrad (const void *)((uintptr_t) 690a6bd4911SViacheslav Ovsiienko txq->wqes), 691957e45fbSMatan Azrad MLX5_WQE_SIZE * 692957e45fbSMatan Azrad (1 << txq->wqe_n)); 693957e45fbSMatan Azrad txq_ctrl->dump_file_n++; 694957e45fbSMatan Azrad } 695957e45fbSMatan Azrad if (!seen) 696957e45fbSMatan Azrad /* 697957e45fbSMatan Azrad * Count errors in WQEs units. 698957e45fbSMatan Azrad * Later it can be improved to count error packets, 699957e45fbSMatan Azrad * for example, by SQ parsing to find how much packets 700957e45fbSMatan Azrad * should be counted for each WQE. 701957e45fbSMatan Azrad */ 702957e45fbSMatan Azrad txq->stats.oerrors += ((txq->wqe_ci & wqe_m) - 703957e45fbSMatan Azrad new_wqe_pi) & wqe_m; 70435c090eaSMatan Azrad if (tx_recover_qp(txq_ctrl) == 0) { 705957e45fbSMatan Azrad txq->cq_ci++; 706957e45fbSMatan Azrad /* Release all the remaining buffers. */ 707957e45fbSMatan Azrad return txq->elts_head; 708957e45fbSMatan Azrad } 709957e45fbSMatan Azrad /* Recovering failed - try again later on the same WQE. */ 710da1df1ccSViacheslav Ovsiienko return -1; 711957e45fbSMatan Azrad } else { 712957e45fbSMatan Azrad txq->cq_ci++; 713957e45fbSMatan Azrad } 714957e45fbSMatan Azrad /* Do not release buffers. */ 715957e45fbSMatan Azrad return txq->elts_tail; 716957e45fbSMatan Azrad } 717957e45fbSMatan Azrad 718957e45fbSMatan Azrad /** 71967fa62bcSAdrien Mazarguil * Translate RX completion flags to packet type. 72067fa62bcSAdrien Mazarguil * 7213cc08bc6SXueming Li * @param[in] rxq 7223cc08bc6SXueming Li * Pointer to RX queue structure. 7236218063bSNélio Laranjeiro * @param[in] cqe 7246218063bSNélio Laranjeiro * Pointer to CQE. 72567fa62bcSAdrien Mazarguil * 72678a38edfSJianfeng Tan * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 72778a38edfSJianfeng Tan * 72867fa62bcSAdrien Mazarguil * @return 72967fa62bcSAdrien Mazarguil * Packet type for struct rte_mbuf. 73067fa62bcSAdrien Mazarguil */ 73167fa62bcSAdrien Mazarguil static inline uint32_t 7323cc08bc6SXueming Li rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) 73367fa62bcSAdrien Mazarguil { 734ea16068cSYongseok Koh uint8_t idx; 735ea16068cSYongseok Koh uint8_t pinfo = cqe->pkt_info; 736ea16068cSYongseok Koh uint16_t ptype = cqe->hdr_type_etc; 73767fa62bcSAdrien Mazarguil 738ea16068cSYongseok Koh /* 739ea16068cSYongseok Koh * The index to the array should have: 740ea16068cSYongseok Koh * bit[1:0] = l3_hdr_type 741ea16068cSYongseok Koh * bit[4:2] = l4_hdr_type 742ea16068cSYongseok Koh * bit[5] = ip_frag 743ea16068cSYongseok Koh * bit[6] = tunneled 744ea16068cSYongseok Koh * bit[7] = outer_l3_type 745ea16068cSYongseok Koh */ 746ea16068cSYongseok Koh idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10); 7473cc08bc6SXueming Li return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6)); 74867fa62bcSAdrien Mazarguil } 74967fa62bcSAdrien Mazarguil 75067fa62bcSAdrien Mazarguil /** 7516bb506ccSMatan Azrad * Initialize Rx WQ and indexes. 7526bb506ccSMatan Azrad * 7536bb506ccSMatan Azrad * @param[in] rxq 7546bb506ccSMatan Azrad * Pointer to RX queue structure. 7556bb506ccSMatan Azrad */ 7566bb506ccSMatan Azrad void 7576bb506ccSMatan Azrad mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) 7586bb506ccSMatan Azrad { 7596bb506ccSMatan Azrad const unsigned int wqe_n = 1 << rxq->elts_n; 7606bb506ccSMatan Azrad unsigned int i; 7616bb506ccSMatan Azrad 7626bb506ccSMatan Azrad for (i = 0; (i != wqe_n); ++i) { 7636bb506ccSMatan Azrad volatile struct mlx5_wqe_data_seg *scat; 7646bb506ccSMatan Azrad uintptr_t addr; 7656bb506ccSMatan Azrad uint32_t byte_count; 7666bb506ccSMatan Azrad 7676bb506ccSMatan Azrad if (mlx5_rxq_mprq_enabled(rxq)) { 7686bb506ccSMatan Azrad struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i]; 7696bb506ccSMatan Azrad 7706bb506ccSMatan Azrad scat = &((volatile struct mlx5_wqe_mprq *) 7716bb506ccSMatan Azrad rxq->wqes)[i].dseg; 7723a22f387SMatan Azrad addr = (uintptr_t)mlx5_mprq_buf_addr(buf, 7733a22f387SMatan Azrad 1 << rxq->strd_num_n); 7746bb506ccSMatan Azrad byte_count = (1 << rxq->strd_sz_n) * 7756bb506ccSMatan Azrad (1 << rxq->strd_num_n); 7766bb506ccSMatan Azrad } else { 7776bb506ccSMatan Azrad struct rte_mbuf *buf = (*rxq->elts)[i]; 7786bb506ccSMatan Azrad 7796bb506ccSMatan Azrad scat = &((volatile struct mlx5_wqe_data_seg *) 7806bb506ccSMatan Azrad rxq->wqes)[i]; 7816bb506ccSMatan Azrad addr = rte_pktmbuf_mtod(buf, uintptr_t); 7826bb506ccSMatan Azrad byte_count = DATA_LEN(buf); 7836bb506ccSMatan Azrad } 7846bb506ccSMatan Azrad /* scat->addr must be able to store a pointer. */ 7856bb506ccSMatan Azrad assert(sizeof(scat->addr) >= sizeof(uintptr_t)); 7866bb506ccSMatan Azrad *scat = (struct mlx5_wqe_data_seg){ 7876bb506ccSMatan Azrad .addr = rte_cpu_to_be_64(addr), 7886bb506ccSMatan Azrad .byte_count = rte_cpu_to_be_32(byte_count), 7896bb506ccSMatan Azrad .lkey = mlx5_rx_addr2mr(rxq, addr), 7906bb506ccSMatan Azrad }; 7916bb506ccSMatan Azrad } 7926bb506ccSMatan Azrad rxq->consumed_strd = 0; 7936bb506ccSMatan Azrad rxq->decompressed = 0; 7946bb506ccSMatan Azrad rxq->rq_pi = 0; 7956bb506ccSMatan Azrad rxq->zip = (struct rxq_zip){ 7966bb506ccSMatan Azrad .ai = 0, 7976bb506ccSMatan Azrad }; 7986bb506ccSMatan Azrad /* Update doorbell counter. */ 7996bb506ccSMatan Azrad rxq->rq_ci = wqe_n >> rxq->sges_n; 8006bb506ccSMatan Azrad rte_cio_wmb(); 8016bb506ccSMatan Azrad *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 8026bb506ccSMatan Azrad } 8036bb506ccSMatan Azrad 8046bb506ccSMatan Azrad /** 80584537d3cSDekel Peled * Modify a Verbs/DevX queue state. 8062d77cb61SMatan Azrad * This must be called from the primary process. 8072d77cb61SMatan Azrad * 8082d77cb61SMatan Azrad * @param dev 8092d77cb61SMatan Azrad * Pointer to Ethernet device. 8102d77cb61SMatan Azrad * @param sm 8112d77cb61SMatan Azrad * State modify request parameters. 8122d77cb61SMatan Azrad * 8132d77cb61SMatan Azrad * @return 8142d77cb61SMatan Azrad * 0 in case of success else non-zero value and rte_errno is set. 8152d77cb61SMatan Azrad */ 8162d77cb61SMatan Azrad int 8172d77cb61SMatan Azrad mlx5_queue_state_modify_primary(struct rte_eth_dev *dev, 8182d77cb61SMatan Azrad const struct mlx5_mp_arg_queue_state_modify *sm) 8192d77cb61SMatan Azrad { 8202d77cb61SMatan Azrad int ret; 8212d77cb61SMatan Azrad struct mlx5_priv *priv = dev->data->dev_private; 8222d77cb61SMatan Azrad 8232d77cb61SMatan Azrad if (sm->is_wq) { 8242d77cb61SMatan Azrad struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id]; 8252d77cb61SMatan Azrad struct mlx5_rxq_ctrl *rxq_ctrl = 8262d77cb61SMatan Azrad container_of(rxq, struct mlx5_rxq_ctrl, rxq); 8272d77cb61SMatan Azrad 82884537d3cSDekel Peled if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) { 82984537d3cSDekel Peled struct ibv_wq_attr mod = { 83084537d3cSDekel Peled .attr_mask = IBV_WQ_ATTR_STATE, 83184537d3cSDekel Peled .wq_state = sm->state, 83284537d3cSDekel Peled }; 83384537d3cSDekel Peled 83493403560SDekel Peled ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod); 83584537d3cSDekel Peled } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */ 83684537d3cSDekel Peled struct mlx5_devx_modify_rq_attr rq_attr; 83784537d3cSDekel Peled 83884537d3cSDekel Peled memset(&rq_attr, 0, sizeof(rq_attr)); 83984537d3cSDekel Peled if (sm->state == IBV_WQS_RESET) { 84084537d3cSDekel Peled rq_attr.rq_state = MLX5_RQC_STATE_ERR; 84184537d3cSDekel Peled rq_attr.state = MLX5_RQC_STATE_RST; 84284537d3cSDekel Peled } else if (sm->state == IBV_WQS_RDY) { 84384537d3cSDekel Peled rq_attr.rq_state = MLX5_RQC_STATE_RST; 84484537d3cSDekel Peled rq_attr.state = MLX5_RQC_STATE_RDY; 84584537d3cSDekel Peled } else if (sm->state == IBV_WQS_ERR) { 84684537d3cSDekel Peled rq_attr.rq_state = MLX5_RQC_STATE_RDY; 84784537d3cSDekel Peled rq_attr.state = MLX5_RQC_STATE_ERR; 84884537d3cSDekel Peled } 84984537d3cSDekel Peled ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, 85084537d3cSDekel Peled &rq_attr); 85184537d3cSDekel Peled } 8522d77cb61SMatan Azrad if (ret) { 8532d77cb61SMatan Azrad DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s\n", 8542d77cb61SMatan Azrad sm->state, strerror(errno)); 8552d77cb61SMatan Azrad rte_errno = errno; 8562d77cb61SMatan Azrad return ret; 8572d77cb61SMatan Azrad } 85835c090eaSMatan Azrad } else { 85935c090eaSMatan Azrad struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id]; 86035c090eaSMatan Azrad struct mlx5_txq_ctrl *txq_ctrl = 86135c090eaSMatan Azrad container_of(txq, struct mlx5_txq_ctrl, txq); 86235c090eaSMatan Azrad struct ibv_qp_attr mod = { 86335c090eaSMatan Azrad .qp_state = IBV_QPS_RESET, 86435c090eaSMatan Azrad .port_num = (uint8_t)priv->ibv_port, 86535c090eaSMatan Azrad }; 86635c090eaSMatan Azrad struct ibv_qp *qp = txq_ctrl->ibv->qp; 86735c090eaSMatan Azrad 86835c090eaSMatan Azrad ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); 86935c090eaSMatan Azrad if (ret) { 87035c090eaSMatan Azrad DRV_LOG(ERR, "Cannot change the Tx QP state to RESET " 87135c090eaSMatan Azrad "%s\n", strerror(errno)); 87235c090eaSMatan Azrad rte_errno = errno; 87335c090eaSMatan Azrad return ret; 87435c090eaSMatan Azrad } 87535c090eaSMatan Azrad mod.qp_state = IBV_QPS_INIT; 87635c090eaSMatan Azrad ret = mlx5_glue->modify_qp(qp, &mod, 87735c090eaSMatan Azrad (IBV_QP_STATE | IBV_QP_PORT)); 87835c090eaSMatan Azrad if (ret) { 87935c090eaSMatan Azrad DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s\n", 88035c090eaSMatan Azrad strerror(errno)); 88135c090eaSMatan Azrad rte_errno = errno; 88235c090eaSMatan Azrad return ret; 88335c090eaSMatan Azrad } 88435c090eaSMatan Azrad mod.qp_state = IBV_QPS_RTR; 88535c090eaSMatan Azrad ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); 88635c090eaSMatan Azrad if (ret) { 88735c090eaSMatan Azrad DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s\n", 88835c090eaSMatan Azrad strerror(errno)); 88935c090eaSMatan Azrad rte_errno = errno; 89035c090eaSMatan Azrad return ret; 89135c090eaSMatan Azrad } 89235c090eaSMatan Azrad mod.qp_state = IBV_QPS_RTS; 89335c090eaSMatan Azrad ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); 89435c090eaSMatan Azrad if (ret) { 89535c090eaSMatan Azrad DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s\n", 89635c090eaSMatan Azrad strerror(errno)); 89735c090eaSMatan Azrad rte_errno = errno; 89835c090eaSMatan Azrad return ret; 89935c090eaSMatan Azrad } 9002d77cb61SMatan Azrad } 9012d77cb61SMatan Azrad return 0; 9022d77cb61SMatan Azrad } 9032d77cb61SMatan Azrad 9042d77cb61SMatan Azrad /** 9052d77cb61SMatan Azrad * Modify a Verbs queue state. 9062d77cb61SMatan Azrad * 9072d77cb61SMatan Azrad * @param dev 9082d77cb61SMatan Azrad * Pointer to Ethernet device. 9092d77cb61SMatan Azrad * @param sm 9102d77cb61SMatan Azrad * State modify request parameters. 9112d77cb61SMatan Azrad * 9122d77cb61SMatan Azrad * @return 9132d77cb61SMatan Azrad * 0 in case of success else non-zero value. 9142d77cb61SMatan Azrad */ 9152d77cb61SMatan Azrad static int 9162d77cb61SMatan Azrad mlx5_queue_state_modify(struct rte_eth_dev *dev, 9172d77cb61SMatan Azrad struct mlx5_mp_arg_queue_state_modify *sm) 9182d77cb61SMatan Azrad { 9192d77cb61SMatan Azrad int ret = 0; 9202d77cb61SMatan Azrad 9212d77cb61SMatan Azrad switch (rte_eal_process_type()) { 9222d77cb61SMatan Azrad case RTE_PROC_PRIMARY: 9232d77cb61SMatan Azrad ret = mlx5_queue_state_modify_primary(dev, sm); 9242d77cb61SMatan Azrad break; 9252d77cb61SMatan Azrad case RTE_PROC_SECONDARY: 9262d77cb61SMatan Azrad ret = mlx5_mp_req_queue_state_modify(dev, sm); 9272d77cb61SMatan Azrad break; 9282d77cb61SMatan Azrad default: 9292d77cb61SMatan Azrad break; 9302d77cb61SMatan Azrad } 9312d77cb61SMatan Azrad return ret; 9322d77cb61SMatan Azrad } 9332d77cb61SMatan Azrad 9342d77cb61SMatan Azrad /** 93588c07335SMatan Azrad * Handle a Rx error. 93688c07335SMatan Azrad * The function inserts the RQ state to reset when the first error CQE is 93788c07335SMatan Azrad * shown, then drains the CQ by the caller function loop. When the CQ is empty, 93888c07335SMatan Azrad * it moves the RQ state to ready and initializes the RQ. 93988c07335SMatan Azrad * Next CQE identification and error counting are in the caller responsibility. 94088c07335SMatan Azrad * 94188c07335SMatan Azrad * @param[in] rxq 94288c07335SMatan Azrad * Pointer to RX queue structure. 943a06ce954SDekel Peled * @param[in] vec 944a06ce954SDekel Peled * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ. 945a06ce954SDekel Peled * 0 when called from non-vectorized Rx burst. 94688c07335SMatan Azrad * 94788c07335SMatan Azrad * @return 94888c07335SMatan Azrad * -1 in case of recovery error, otherwise the CQE status. 94988c07335SMatan Azrad */ 95088c07335SMatan Azrad int 951a06ce954SDekel Peled mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) 95288c07335SMatan Azrad { 95388c07335SMatan Azrad const uint16_t cqe_n = 1 << rxq->cqe_n; 95488c07335SMatan Azrad const uint16_t cqe_mask = cqe_n - 1; 95588c07335SMatan Azrad const unsigned int wqe_n = 1 << rxq->elts_n; 95688c07335SMatan Azrad struct mlx5_rxq_ctrl *rxq_ctrl = 95788c07335SMatan Azrad container_of(rxq, struct mlx5_rxq_ctrl, rxq); 95888c07335SMatan Azrad union { 95988c07335SMatan Azrad volatile struct mlx5_cqe *cqe; 96088c07335SMatan Azrad volatile struct mlx5_err_cqe *err_cqe; 96188c07335SMatan Azrad } u = { 96288c07335SMatan Azrad .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask], 96388c07335SMatan Azrad }; 9642d77cb61SMatan Azrad struct mlx5_mp_arg_queue_state_modify sm; 96588c07335SMatan Azrad int ret; 96688c07335SMatan Azrad 96788c07335SMatan Azrad switch (rxq->err_state) { 96888c07335SMatan Azrad case MLX5_RXQ_ERR_STATE_NO_ERROR: 96988c07335SMatan Azrad rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET; 97088c07335SMatan Azrad /* Fall-through */ 97188c07335SMatan Azrad case MLX5_RXQ_ERR_STATE_NEED_RESET: 9722d77cb61SMatan Azrad sm.is_wq = 1; 9732d77cb61SMatan Azrad sm.queue_id = rxq->idx; 9742d77cb61SMatan Azrad sm.state = IBV_WQS_RESET; 9752d77cb61SMatan Azrad if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm)) 97688c07335SMatan Azrad return -1; 97788c07335SMatan Azrad if (rxq_ctrl->dump_file_n < 97888c07335SMatan Azrad rxq_ctrl->priv->config.max_dump_files_num) { 97988c07335SMatan Azrad MKSTR(err_str, "Unexpected CQE error syndrome " 98088c07335SMatan Azrad "0x%02x CQN = %u RQN = %u wqe_counter = %u" 98188c07335SMatan Azrad " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome, 9822d77cb61SMatan Azrad rxq->cqn, rxq_ctrl->wqn, 98388c07335SMatan Azrad rte_be_to_cpu_16(u.err_cqe->wqe_counter), 98488c07335SMatan Azrad rxq->rq_ci << rxq->sges_n, rxq->cq_ci); 98588c07335SMatan Azrad MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u", 98688c07335SMatan Azrad rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc()); 98788c07335SMatan Azrad mlx5_dump_debug_information(name, NULL, err_str, 0); 98888c07335SMatan Azrad mlx5_dump_debug_information(name, "MLX5 Error CQ:", 98988c07335SMatan Azrad (const void *)((uintptr_t) 99088c07335SMatan Azrad rxq->cqes), 99188c07335SMatan Azrad sizeof(*u.cqe) * cqe_n); 99288c07335SMatan Azrad mlx5_dump_debug_information(name, "MLX5 Error RQ:", 99388c07335SMatan Azrad (const void *)((uintptr_t) 99488c07335SMatan Azrad rxq->wqes), 99588c07335SMatan Azrad 16 * wqe_n); 99688c07335SMatan Azrad rxq_ctrl->dump_file_n++; 99788c07335SMatan Azrad } 99888c07335SMatan Azrad rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY; 99988c07335SMatan Azrad /* Fall-through */ 100088c07335SMatan Azrad case MLX5_RXQ_ERR_STATE_NEED_READY: 100188c07335SMatan Azrad ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci); 100288c07335SMatan Azrad if (ret == MLX5_CQE_STATUS_HW_OWN) { 100388c07335SMatan Azrad rte_cio_wmb(); 100488c07335SMatan Azrad *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 100588c07335SMatan Azrad rte_cio_wmb(); 100688c07335SMatan Azrad /* 100788c07335SMatan Azrad * The RQ consumer index must be zeroed while moving 100888c07335SMatan Azrad * from RESET state to RDY state. 100988c07335SMatan Azrad */ 101088c07335SMatan Azrad *rxq->rq_db = rte_cpu_to_be_32(0); 101188c07335SMatan Azrad rte_cio_wmb(); 10122d77cb61SMatan Azrad sm.is_wq = 1; 10132d77cb61SMatan Azrad sm.queue_id = rxq->idx; 10142d77cb61SMatan Azrad sm.state = IBV_WQS_RDY; 10152d77cb61SMatan Azrad if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), 10162d77cb61SMatan Azrad &sm)) 101788c07335SMatan Azrad return -1; 1018a06ce954SDekel Peled if (vec) { 101988c07335SMatan Azrad const uint16_t q_mask = wqe_n - 1; 102088c07335SMatan Azrad uint16_t elt_idx; 102188c07335SMatan Azrad struct rte_mbuf **elt; 102288c07335SMatan Azrad int i; 102388c07335SMatan Azrad unsigned int n = wqe_n - (rxq->rq_ci - 102488c07335SMatan Azrad rxq->rq_pi); 102588c07335SMatan Azrad 102688c07335SMatan Azrad for (i = 0; i < (int)n; ++i) { 102788c07335SMatan Azrad elt_idx = (rxq->rq_ci + i) & q_mask; 102888c07335SMatan Azrad elt = &(*rxq->elts)[elt_idx]; 102988c07335SMatan Azrad *elt = rte_mbuf_raw_alloc(rxq->mp); 103088c07335SMatan Azrad if (!*elt) { 103188c07335SMatan Azrad for (i--; i >= 0; --i) { 103288c07335SMatan Azrad elt_idx = (rxq->rq_ci + 103388c07335SMatan Azrad i) & q_mask; 103488c07335SMatan Azrad elt = &(*rxq->elts) 103588c07335SMatan Azrad [elt_idx]; 103688c07335SMatan Azrad rte_pktmbuf_free_seg 103788c07335SMatan Azrad (*elt); 103888c07335SMatan Azrad } 103988c07335SMatan Azrad return -1; 104088c07335SMatan Azrad } 104188c07335SMatan Azrad } 1042a06ce954SDekel Peled for (i = 0; i < (int)wqe_n; ++i) { 1043a06ce954SDekel Peled elt = &(*rxq->elts)[i]; 1044a06ce954SDekel Peled DATA_LEN(*elt) = 1045a06ce954SDekel Peled (uint16_t)((*elt)->buf_len - 1046a06ce954SDekel Peled rte_pktmbuf_headroom(*elt)); 1047a06ce954SDekel Peled } 1048a06ce954SDekel Peled /* Padding with a fake mbuf for vec Rx. */ 1049a06ce954SDekel Peled for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 1050a06ce954SDekel Peled (*rxq->elts)[wqe_n + i] = 1051a06ce954SDekel Peled &rxq->fake_mbuf; 105288c07335SMatan Azrad } 105388c07335SMatan Azrad mlx5_rxq_initialize(rxq); 105488c07335SMatan Azrad rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; 105588c07335SMatan Azrad } 105688c07335SMatan Azrad return ret; 105788c07335SMatan Azrad default: 105888c07335SMatan Azrad return -1; 105988c07335SMatan Azrad } 106088c07335SMatan Azrad } 106188c07335SMatan Azrad 106288c07335SMatan Azrad /** 106399c12dccSNélio Laranjeiro * Get size of the next packet for a given CQE. For compressed CQEs, the 106499c12dccSNélio Laranjeiro * consumer index is updated only once all packets of the current one have 106599c12dccSNélio Laranjeiro * been processed. 106699c12dccSNélio Laranjeiro * 106799c12dccSNélio Laranjeiro * @param rxq 106899c12dccSNélio Laranjeiro * Pointer to RX queue. 106999c12dccSNélio Laranjeiro * @param cqe 107099c12dccSNélio Laranjeiro * CQE to process. 10712e633f1fSYongseok Koh * @param[out] mcqe 10722e633f1fSYongseok Koh * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not 10732e633f1fSYongseok Koh * written. 107499c12dccSNélio Laranjeiro * 107599c12dccSNélio Laranjeiro * @return 107688c07335SMatan Azrad * 0 in case of empty CQE, otherwise the packet size in bytes. 107799c12dccSNélio Laranjeiro */ 107899c12dccSNélio Laranjeiro static inline int 107978142aacSNélio Laranjeiro mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 10802e633f1fSYongseok Koh uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) 108199c12dccSNélio Laranjeiro { 108299c12dccSNélio Laranjeiro struct rxq_zip *zip = &rxq->zip; 108399c12dccSNélio Laranjeiro uint16_t cqe_n = cqe_cnt + 1; 108488c07335SMatan Azrad int len; 1085d2e842d0SYongseok Koh uint16_t idx, end; 108699c12dccSNélio Laranjeiro 108788c07335SMatan Azrad do { 108888c07335SMatan Azrad len = 0; 108999c12dccSNélio Laranjeiro /* Process compressed data in the CQE and mini arrays. */ 109099c12dccSNélio Laranjeiro if (zip->ai) { 109199c12dccSNélio Laranjeiro volatile struct mlx5_mini_cqe8 (*mc)[8] = 109299c12dccSNélio Laranjeiro (volatile struct mlx5_mini_cqe8 (*)[8]) 109388c07335SMatan Azrad (uintptr_t)(&(*rxq->cqes)[zip->ca & 109488c07335SMatan Azrad cqe_cnt].pkt_info); 109599c12dccSNélio Laranjeiro 10966b30a6a8SShachar Beiser len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); 10972e633f1fSYongseok Koh *mcqe = &(*mc)[zip->ai & 7]; 109899c12dccSNélio Laranjeiro if ((++zip->ai & 7) == 0) { 1099d2e842d0SYongseok Koh /* Invalidate consumed CQEs */ 1100d2e842d0SYongseok Koh idx = zip->ca; 1101d2e842d0SYongseok Koh end = zip->na; 1102d2e842d0SYongseok Koh while (idx != end) { 1103d2e842d0SYongseok Koh (*rxq->cqes)[idx & cqe_cnt].op_own = 1104d2e842d0SYongseok Koh MLX5_CQE_INVALIDATE; 1105d2e842d0SYongseok Koh ++idx; 1106d2e842d0SYongseok Koh } 110799c12dccSNélio Laranjeiro /* 110888c07335SMatan Azrad * Increment consumer index to skip the number 110988c07335SMatan Azrad * of CQEs consumed. Hardware leaves holes in 111088c07335SMatan Azrad * the CQ ring for software use. 111199c12dccSNélio Laranjeiro */ 111299c12dccSNélio Laranjeiro zip->ca = zip->na; 111399c12dccSNélio Laranjeiro zip->na += 8; 111499c12dccSNélio Laranjeiro } 111599c12dccSNélio Laranjeiro if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { 1116d2e842d0SYongseok Koh /* Invalidate the rest */ 1117d2e842d0SYongseok Koh idx = zip->ca; 1118d2e842d0SYongseok Koh end = zip->cq_ci; 111999c12dccSNélio Laranjeiro 112099c12dccSNélio Laranjeiro while (idx != end) { 112197267b8eSNelio Laranjeiro (*rxq->cqes)[idx & cqe_cnt].op_own = 112299c12dccSNélio Laranjeiro MLX5_CQE_INVALIDATE; 112399c12dccSNélio Laranjeiro ++idx; 112499c12dccSNélio Laranjeiro } 112599c12dccSNélio Laranjeiro rxq->cq_ci = zip->cq_ci; 112699c12dccSNélio Laranjeiro zip->ai = 0; 112799c12dccSNélio Laranjeiro } 112888c07335SMatan Azrad /* 112988c07335SMatan Azrad * No compressed data, get next CQE and verify if it is 113088c07335SMatan Azrad * compressed. 113188c07335SMatan Azrad */ 113299c12dccSNélio Laranjeiro } else { 113399c12dccSNélio Laranjeiro int ret; 113499c12dccSNélio Laranjeiro int8_t op_own; 113599c12dccSNélio Laranjeiro 113697267b8eSNelio Laranjeiro ret = check_cqe(cqe, cqe_n, rxq->cq_ci); 113788c07335SMatan Azrad if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { 113888c07335SMatan Azrad if (unlikely(ret == MLX5_CQE_STATUS_ERR || 113988c07335SMatan Azrad rxq->err_state)) { 114088c07335SMatan Azrad ret = mlx5_rx_err_handle(rxq, 0); 114188c07335SMatan Azrad if (ret == MLX5_CQE_STATUS_HW_OWN || 114288c07335SMatan Azrad ret == -1) 114399c12dccSNélio Laranjeiro return 0; 114488c07335SMatan Azrad } else { 114588c07335SMatan Azrad return 0; 114688c07335SMatan Azrad } 114788c07335SMatan Azrad } 114899c12dccSNélio Laranjeiro ++rxq->cq_ci; 114999c12dccSNélio Laranjeiro op_own = cqe->op_own; 115099c12dccSNélio Laranjeiro if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { 115199c12dccSNélio Laranjeiro volatile struct mlx5_mini_cqe8 (*mc)[8] = 115299c12dccSNélio Laranjeiro (volatile struct mlx5_mini_cqe8 (*)[8]) 115388c07335SMatan Azrad (uintptr_t)(&(*rxq->cqes) 115488c07335SMatan Azrad [rxq->cq_ci & 11554aff4bcbSYongseok Koh cqe_cnt].pkt_info); 115699c12dccSNélio Laranjeiro 115799c12dccSNélio Laranjeiro /* Fix endianness. */ 11586b30a6a8SShachar Beiser zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); 115999c12dccSNélio Laranjeiro /* 116088c07335SMatan Azrad * Current mini array position is the one 116188c07335SMatan Azrad * returned by check_cqe64(). 116299c12dccSNélio Laranjeiro * 116388c07335SMatan Azrad * If completion comprises several mini arrays, 116488c07335SMatan Azrad * as a special case the second one is located 116588c07335SMatan Azrad * 7 CQEs after the initial CQE instead of 8 116688c07335SMatan Azrad * for subsequent ones. 116799c12dccSNélio Laranjeiro */ 1168d2e842d0SYongseok Koh zip->ca = rxq->cq_ci; 116999c12dccSNélio Laranjeiro zip->na = zip->ca + 7; 117099c12dccSNélio Laranjeiro /* Compute the next non compressed CQE. */ 117199c12dccSNélio Laranjeiro --rxq->cq_ci; 117299c12dccSNélio Laranjeiro zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; 117399c12dccSNélio Laranjeiro /* Get packet size to return. */ 11746b30a6a8SShachar Beiser len = rte_be_to_cpu_32((*mc)[0].byte_cnt); 11752e633f1fSYongseok Koh *mcqe = &(*mc)[0]; 117699c12dccSNélio Laranjeiro zip->ai = 1; 117788c07335SMatan Azrad /* Prefetch all to be invalidated */ 1178d2e842d0SYongseok Koh idx = zip->ca; 1179d2e842d0SYongseok Koh end = zip->cq_ci; 1180d2e842d0SYongseok Koh while (idx != end) { 118188c07335SMatan Azrad rte_prefetch0(&(*rxq->cqes)[(idx) & 118288c07335SMatan Azrad cqe_cnt]); 1183d2e842d0SYongseok Koh ++idx; 1184d2e842d0SYongseok Koh } 118599c12dccSNélio Laranjeiro } else { 11866b30a6a8SShachar Beiser len = rte_be_to_cpu_32(cqe->byte_cnt); 118799c12dccSNélio Laranjeiro } 118899c12dccSNélio Laranjeiro } 118988c07335SMatan Azrad if (unlikely(rxq->err_state)) { 119088c07335SMatan Azrad cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 119188c07335SMatan Azrad ++rxq->stats.idropped; 119288c07335SMatan Azrad } else { 119399c12dccSNélio Laranjeiro return len; 119499c12dccSNélio Laranjeiro } 119588c07335SMatan Azrad } while (1); 119688c07335SMatan Azrad } 119799c12dccSNélio Laranjeiro 119899c12dccSNélio Laranjeiro /** 119967fa62bcSAdrien Mazarguil * Translate RX completion flags to offload flags. 120067fa62bcSAdrien Mazarguil * 12016218063bSNélio Laranjeiro * @param[in] cqe 12026218063bSNélio Laranjeiro * Pointer to CQE. 120367fa62bcSAdrien Mazarguil * 120467fa62bcSAdrien Mazarguil * @return 120567fa62bcSAdrien Mazarguil * Offload flags (ol_flags) for struct rte_mbuf. 120667fa62bcSAdrien Mazarguil */ 120767fa62bcSAdrien Mazarguil static inline uint32_t 12086ba07449SXueming Li rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) 120967fa62bcSAdrien Mazarguil { 121067fa62bcSAdrien Mazarguil uint32_t ol_flags = 0; 12116b30a6a8SShachar Beiser uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); 121267fa62bcSAdrien Mazarguil 12130603df73SNélio Laranjeiro ol_flags = 12140603df73SNélio Laranjeiro TRANSPOSE(flags, 12150603df73SNélio Laranjeiro MLX5_CQE_RX_L3_HDR_VALID, 12160603df73SNélio Laranjeiro PKT_RX_IP_CKSUM_GOOD) | 12170603df73SNélio Laranjeiro TRANSPOSE(flags, 12180603df73SNélio Laranjeiro MLX5_CQE_RX_L4_HDR_VALID, 121983e9d9a3SNelio Laranjeiro PKT_RX_L4_CKSUM_GOOD); 122067fa62bcSAdrien Mazarguil return ol_flags; 122167fa62bcSAdrien Mazarguil } 122267fa62bcSAdrien Mazarguil 122367fa62bcSAdrien Mazarguil /** 12243e1f82a1SYongseok Koh * Fill in mbuf fields from RX completion flags. 12253e1f82a1SYongseok Koh * Note that pkt->ol_flags should be initialized outside of this function. 12263e1f82a1SYongseok Koh * 12273e1f82a1SYongseok Koh * @param rxq 12283e1f82a1SYongseok Koh * Pointer to RX queue. 12293e1f82a1SYongseok Koh * @param pkt 12303e1f82a1SYongseok Koh * mbuf to fill. 12313e1f82a1SYongseok Koh * @param cqe 12323e1f82a1SYongseok Koh * CQE to process. 12333e1f82a1SYongseok Koh * @param rss_hash_res 12343e1f82a1SYongseok Koh * Packet RSS Hash result. 12353e1f82a1SYongseok Koh */ 12363e1f82a1SYongseok Koh static inline void 12373e1f82a1SYongseok Koh rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, 12383e1f82a1SYongseok Koh volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res) 12393e1f82a1SYongseok Koh { 12403e1f82a1SYongseok Koh /* Update packet information. */ 12413e1f82a1SYongseok Koh pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe); 12423e1f82a1SYongseok Koh if (rss_hash_res && rxq->rss_hash) { 12433e1f82a1SYongseok Koh pkt->hash.rss = rss_hash_res; 12443e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_RSS_HASH; 12453e1f82a1SYongseok Koh } 12463e1f82a1SYongseok Koh if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { 12473e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_FDIR; 12483e1f82a1SYongseok Koh if (cqe->sop_drop_qpn != 12493e1f82a1SYongseok Koh rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { 12503e1f82a1SYongseok Koh uint32_t mark = cqe->sop_drop_qpn; 12513e1f82a1SYongseok Koh 12523e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_FDIR_ID; 12533e1f82a1SYongseok Koh pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); 12543e1f82a1SYongseok Koh } 12553e1f82a1SYongseok Koh } 12563e1f82a1SYongseok Koh if (rxq->csum) 12573e1f82a1SYongseok Koh pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); 12583e1f82a1SYongseok Koh if (rxq->vlan_strip && 12593e1f82a1SYongseok Koh (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { 12603e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 12613e1f82a1SYongseok Koh pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info); 12623e1f82a1SYongseok Koh } 12633e1f82a1SYongseok Koh if (rxq->hw_timestamp) { 12643e1f82a1SYongseok Koh pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp); 12653e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_TIMESTAMP; 12663e1f82a1SYongseok Koh } 12673e1f82a1SYongseok Koh } 12683e1f82a1SYongseok Koh 12693e1f82a1SYongseok Koh /** 12702e22920bSAdrien Mazarguil * DPDK callback for RX. 12712e22920bSAdrien Mazarguil * 12722e22920bSAdrien Mazarguil * @param dpdk_rxq 12732e22920bSAdrien Mazarguil * Generic pointer to RX queue structure. 12742e22920bSAdrien Mazarguil * @param[out] pkts 12752e22920bSAdrien Mazarguil * Array to store received packets. 12762e22920bSAdrien Mazarguil * @param pkts_n 12772e22920bSAdrien Mazarguil * Maximum number of packets in array. 12782e22920bSAdrien Mazarguil * 12792e22920bSAdrien Mazarguil * @return 12802e22920bSAdrien Mazarguil * Number of packets successfully received (<= pkts_n). 12812e22920bSAdrien Mazarguil */ 12822e22920bSAdrien Mazarguil uint16_t 12832e22920bSAdrien Mazarguil mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 12842e22920bSAdrien Mazarguil { 128578142aacSNélio Laranjeiro struct mlx5_rxq_data *rxq = dpdk_rxq; 1286b4b12e55SNélio Laranjeiro const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; 1287e2f116eeSNélio Laranjeiro const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; 12889964b965SNélio Laranjeiro const unsigned int sges_n = rxq->sges_n; 12899964b965SNélio Laranjeiro struct rte_mbuf *pkt = NULL; 12909964b965SNélio Laranjeiro struct rte_mbuf *seg = NULL; 129197267b8eSNelio Laranjeiro volatile struct mlx5_cqe *cqe = 129297267b8eSNelio Laranjeiro &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 12939964b965SNélio Laranjeiro unsigned int i = 0; 12949964b965SNélio Laranjeiro unsigned int rq_ci = rxq->rq_ci << sges_n; 12954e66a6feSNelio Laranjeiro int len = 0; /* keep its value across iterations. */ 12962e22920bSAdrien Mazarguil 12979964b965SNélio Laranjeiro while (pkts_n) { 12989964b965SNélio Laranjeiro unsigned int idx = rq_ci & wqe_cnt; 12997d6bf6b8SYongseok Koh volatile struct mlx5_wqe_data_seg *wqe = 13007d6bf6b8SYongseok Koh &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; 13019964b965SNélio Laranjeiro struct rte_mbuf *rep = (*rxq->elts)[idx]; 13022e633f1fSYongseok Koh volatile struct mlx5_mini_cqe8 *mcqe = NULL; 13032e633f1fSYongseok Koh uint32_t rss_hash_res; 13049964b965SNélio Laranjeiro 13059964b965SNélio Laranjeiro if (pkt) 13069964b965SNélio Laranjeiro NEXT(seg) = rep; 13079964b965SNélio Laranjeiro seg = rep; 13089964b965SNélio Laranjeiro rte_prefetch0(seg); 13096218063bSNélio Laranjeiro rte_prefetch0(cqe); 13109964b965SNélio Laranjeiro rte_prefetch0(wqe); 1311fbfd9955SOlivier Matz rep = rte_mbuf_raw_alloc(rxq->mp); 13122e22920bSAdrien Mazarguil if (unlikely(rep == NULL)) { 131315a756b6SSagi Grimberg ++rxq->stats.rx_nombuf; 131415a756b6SSagi Grimberg if (!pkt) { 131515a756b6SSagi Grimberg /* 131615a756b6SSagi Grimberg * no buffers before we even started, 131715a756b6SSagi Grimberg * bail out silently. 131815a756b6SSagi Grimberg */ 131915a756b6SSagi Grimberg break; 132015a756b6SSagi Grimberg } 1321a1bdb71aSNélio Laranjeiro while (pkt != seg) { 1322a1bdb71aSNélio Laranjeiro assert(pkt != (*rxq->elts)[idx]); 1323fe5fe382SNélio Laranjeiro rep = NEXT(pkt); 13248f094a9aSOlivier Matz NEXT(pkt) = NULL; 13258f094a9aSOlivier Matz NB_SEGS(pkt) = 1; 13261f88c0a2SOlivier Matz rte_mbuf_raw_free(pkt); 1327fe5fe382SNélio Laranjeiro pkt = rep; 13289964b965SNélio Laranjeiro } 13296218063bSNélio Laranjeiro break; 13302e22920bSAdrien Mazarguil } 13319964b965SNélio Laranjeiro if (!pkt) { 133297267b8eSNelio Laranjeiro cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 13332e633f1fSYongseok Koh len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); 1334ecf60761SNélio Laranjeiro if (!len) { 13351f88c0a2SOlivier Matz rte_mbuf_raw_free(rep); 13366218063bSNélio Laranjeiro break; 13376218063bSNélio Laranjeiro } 13389964b965SNélio Laranjeiro pkt = seg; 13399964b965SNélio Laranjeiro assert(len >= (rxq->crc_present << 2)); 13400ac64846SMaxime Leroy pkt->ol_flags = 0; 13412e633f1fSYongseok Koh /* If compressed, take hash result from mini-CQE. */ 13422e633f1fSYongseok Koh rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ? 13432e633f1fSYongseok Koh cqe->rx_hash_res : 13442e633f1fSYongseok Koh mcqe->rx_hash_result); 13453e1f82a1SYongseok Koh rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); 13466218063bSNélio Laranjeiro if (rxq->crc_present) 134735b2d13fSOlivier Matz len -= RTE_ETHER_CRC_LEN; 13486218063bSNélio Laranjeiro PKT_LEN(pkt) = len; 13492579543fSMatan Azrad if (cqe->lro_num_seg > 1) { 13502579543fSMatan Azrad mlx5_lro_update_hdr 13512579543fSMatan Azrad (rte_pktmbuf_mtod(pkt, uint8_t *), cqe, 13522579543fSMatan Azrad len); 13532579543fSMatan Azrad pkt->ol_flags |= PKT_RX_LRO; 13542579543fSMatan Azrad pkt->tso_segsz = len / cqe->lro_num_seg; 13552579543fSMatan Azrad } 13569964b965SNélio Laranjeiro } 13579964b965SNélio Laranjeiro DATA_LEN(rep) = DATA_LEN(seg); 13589964b965SNélio Laranjeiro PKT_LEN(rep) = PKT_LEN(seg); 13599964b965SNélio Laranjeiro SET_DATA_OFF(rep, DATA_OFF(seg)); 13609964b965SNélio Laranjeiro PORT(rep) = PORT(seg); 13619964b965SNélio Laranjeiro (*rxq->elts)[idx] = rep; 13629964b965SNélio Laranjeiro /* 13639964b965SNélio Laranjeiro * Fill NIC descriptor with the new buffer. The lkey and size 13649964b965SNélio Laranjeiro * of the buffers are already known, only the buffer address 13659964b965SNélio Laranjeiro * changes. 13669964b965SNélio Laranjeiro */ 13676b30a6a8SShachar Beiser wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); 1368974f1e7eSYongseok Koh /* If there's only one MR, no need to replace LKey in WQE. */ 1369974f1e7eSYongseok Koh if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 1370974f1e7eSYongseok Koh wqe->lkey = mlx5_rx_mb2mr(rxq, rep); 13719964b965SNélio Laranjeiro if (len > DATA_LEN(seg)) { 13729964b965SNélio Laranjeiro len -= DATA_LEN(seg); 13739964b965SNélio Laranjeiro ++NB_SEGS(pkt); 13749964b965SNélio Laranjeiro ++rq_ci; 13759964b965SNélio Laranjeiro continue; 13769964b965SNélio Laranjeiro } 13779964b965SNélio Laranjeiro DATA_LEN(seg) = len; 137887011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 137987011737SAdrien Mazarguil /* Increment bytes counter. */ 13809964b965SNélio Laranjeiro rxq->stats.ibytes += PKT_LEN(pkt); 138187011737SAdrien Mazarguil #endif 13826218063bSNélio Laranjeiro /* Return packet. */ 13836218063bSNélio Laranjeiro *(pkts++) = pkt; 13849964b965SNélio Laranjeiro pkt = NULL; 13859964b965SNélio Laranjeiro --pkts_n; 13869964b965SNélio Laranjeiro ++i; 13879964b965SNélio Laranjeiro /* Align consumer index to the next stride. */ 13889964b965SNélio Laranjeiro rq_ci >>= sges_n; 13896218063bSNélio Laranjeiro ++rq_ci; 13909964b965SNélio Laranjeiro rq_ci <<= sges_n; 13912e22920bSAdrien Mazarguil } 13929964b965SNélio Laranjeiro if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci))) 13932e22920bSAdrien Mazarguil return 0; 13946218063bSNélio Laranjeiro /* Update the consumer index. */ 13959964b965SNélio Laranjeiro rxq->rq_ci = rq_ci >> sges_n; 13964fe7f662SYongseok Koh rte_cio_wmb(); 13976b30a6a8SShachar Beiser *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 13984fe7f662SYongseok Koh rte_cio_wmb(); 13996b30a6a8SShachar Beiser *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 140087011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 140187011737SAdrien Mazarguil /* Increment packets counter. */ 14029964b965SNélio Laranjeiro rxq->stats.ipackets += i; 140387011737SAdrien Mazarguil #endif 14049964b965SNélio Laranjeiro return i; 14052e22920bSAdrien Mazarguil } 14062e22920bSAdrien Mazarguil 1407e4c2a16eSMatan Azrad /** 1408e4c2a16eSMatan Azrad * Update LRO packet TCP header. 1409e4c2a16eSMatan Azrad * The HW LRO feature doesn't update the TCP header after coalescing the 1410e4c2a16eSMatan Azrad * TCP segments but supplies information in CQE to fill it by SW. 1411e4c2a16eSMatan Azrad * 1412e4c2a16eSMatan Azrad * @param tcp 1413e4c2a16eSMatan Azrad * Pointer to the TCP header. 1414e4c2a16eSMatan Azrad * @param cqe 1415e4c2a16eSMatan Azrad * Pointer to the completion entry.. 1416e4c2a16eSMatan Azrad * @param phcsum 1417e4c2a16eSMatan Azrad * The L3 pseudo-header checksum. 1418e4c2a16eSMatan Azrad */ 1419e4c2a16eSMatan Azrad static inline void 1420e4c2a16eSMatan Azrad mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp, 1421e4c2a16eSMatan Azrad volatile struct mlx5_cqe *restrict cqe, 1422e4c2a16eSMatan Azrad uint32_t phcsum) 1423e4c2a16eSMatan Azrad { 1424e4c2a16eSMatan Azrad uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) & 1425e4c2a16eSMatan Azrad MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT; 1426e4c2a16eSMatan Azrad /* 1427e4c2a16eSMatan Azrad * The HW calculates only the TCP payload checksum, need to complete 1428e4c2a16eSMatan Azrad * the TCP header checksum and the L3 pseudo-header checksum. 1429e4c2a16eSMatan Azrad */ 1430e4c2a16eSMatan Azrad uint32_t csum = phcsum + cqe->csum; 1431e4c2a16eSMatan Azrad 1432e4c2a16eSMatan Azrad if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK || 1433e4c2a16eSMatan Azrad l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) { 1434e4c2a16eSMatan Azrad tcp->tcp_flags |= RTE_TCP_ACK_FLAG; 1435e4c2a16eSMatan Azrad tcp->recv_ack = cqe->lro_ack_seq_num; 1436e4c2a16eSMatan Azrad tcp->rx_win = cqe->lro_tcp_win; 1437e4c2a16eSMatan Azrad } 1438e4c2a16eSMatan Azrad if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK) 1439e4c2a16eSMatan Azrad tcp->tcp_flags |= RTE_TCP_PSH_FLAG; 1440e4c2a16eSMatan Azrad tcp->cksum = 0; 1441e4c2a16eSMatan Azrad csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4); 1442e4c2a16eSMatan Azrad csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); 1443e4c2a16eSMatan Azrad csum = (~csum) & 0xffff; 1444e4c2a16eSMatan Azrad if (csum == 0) 1445e4c2a16eSMatan Azrad csum = 0xffff; 1446e4c2a16eSMatan Azrad tcp->cksum = csum; 1447e4c2a16eSMatan Azrad } 1448e4c2a16eSMatan Azrad 1449e4c2a16eSMatan Azrad /** 1450e4c2a16eSMatan Azrad * Update LRO packet headers. 1451e4c2a16eSMatan Azrad * The HW LRO feature doesn't update the L3/TCP headers after coalescing the 1452e4c2a16eSMatan Azrad * TCP segments but supply information in CQE to fill it by SW. 1453e4c2a16eSMatan Azrad * 1454e4c2a16eSMatan Azrad * @param padd 1455e4c2a16eSMatan Azrad * The packet address. 1456e4c2a16eSMatan Azrad * @param cqe 1457e4c2a16eSMatan Azrad * Pointer to the completion entry.. 1458e4c2a16eSMatan Azrad * @param len 1459e4c2a16eSMatan Azrad * The packet length. 1460e4c2a16eSMatan Azrad */ 1461e4c2a16eSMatan Azrad static inline void 1462e4c2a16eSMatan Azrad mlx5_lro_update_hdr(uint8_t *restrict padd, 1463e4c2a16eSMatan Azrad volatile struct mlx5_cqe *restrict cqe, 1464e4c2a16eSMatan Azrad uint32_t len) 1465e4c2a16eSMatan Azrad { 1466e4c2a16eSMatan Azrad union { 1467e4c2a16eSMatan Azrad struct rte_ether_hdr *eth; 1468e4c2a16eSMatan Azrad struct rte_vlan_hdr *vlan; 1469e4c2a16eSMatan Azrad struct rte_ipv4_hdr *ipv4; 1470e4c2a16eSMatan Azrad struct rte_ipv6_hdr *ipv6; 1471e4c2a16eSMatan Azrad struct rte_tcp_hdr *tcp; 1472e4c2a16eSMatan Azrad uint8_t *hdr; 1473e4c2a16eSMatan Azrad } h = { 1474e4c2a16eSMatan Azrad .hdr = padd, 1475e4c2a16eSMatan Azrad }; 1476e4c2a16eSMatan Azrad uint16_t proto = h.eth->ether_type; 1477e4c2a16eSMatan Azrad uint32_t phcsum; 1478e4c2a16eSMatan Azrad 1479e4c2a16eSMatan Azrad h.eth++; 1480e4c2a16eSMatan Azrad while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) || 1481e4c2a16eSMatan Azrad proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { 1482e4c2a16eSMatan Azrad proto = h.vlan->eth_proto; 1483e4c2a16eSMatan Azrad h.vlan++; 1484e4c2a16eSMatan Azrad } 1485e4c2a16eSMatan Azrad if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) { 1486e4c2a16eSMatan Azrad h.ipv4->time_to_live = cqe->lro_min_ttl; 1487e4c2a16eSMatan Azrad h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd)); 1488e4c2a16eSMatan Azrad h.ipv4->hdr_checksum = 0; 1489e4c2a16eSMatan Azrad h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4); 1490e4c2a16eSMatan Azrad phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0); 1491e4c2a16eSMatan Azrad h.ipv4++; 1492e4c2a16eSMatan Azrad } else { 1493e4c2a16eSMatan Azrad h.ipv6->hop_limits = cqe->lro_min_ttl; 1494e4c2a16eSMatan Azrad h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) - 1495e4c2a16eSMatan Azrad sizeof(*h.ipv6)); 1496e4c2a16eSMatan Azrad phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0); 1497e4c2a16eSMatan Azrad h.ipv6++; 1498e4c2a16eSMatan Azrad } 1499e4c2a16eSMatan Azrad mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum); 1500e4c2a16eSMatan Azrad } 1501e4c2a16eSMatan Azrad 15027d6bf6b8SYongseok Koh void 15037d6bf6b8SYongseok Koh mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque) 15047d6bf6b8SYongseok Koh { 15057d6bf6b8SYongseok Koh struct mlx5_mprq_buf *buf = opaque; 15067d6bf6b8SYongseok Koh 15077d6bf6b8SYongseok Koh if (rte_atomic16_read(&buf->refcnt) == 1) { 15087d6bf6b8SYongseok Koh rte_mempool_put(buf->mp, buf); 15097d6bf6b8SYongseok Koh } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) { 15107d6bf6b8SYongseok Koh rte_atomic16_set(&buf->refcnt, 1); 15117d6bf6b8SYongseok Koh rte_mempool_put(buf->mp, buf); 15127d6bf6b8SYongseok Koh } 15137d6bf6b8SYongseok Koh } 15147d6bf6b8SYongseok Koh 15157d6bf6b8SYongseok Koh void 15167d6bf6b8SYongseok Koh mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf) 15177d6bf6b8SYongseok Koh { 15187d6bf6b8SYongseok Koh mlx5_mprq_buf_free_cb(NULL, buf); 15197d6bf6b8SYongseok Koh } 15207d6bf6b8SYongseok Koh 15217d6bf6b8SYongseok Koh static inline void 15223a22f387SMatan Azrad mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx, 15233a22f387SMatan Azrad const unsigned int strd_n) 15247d6bf6b8SYongseok Koh { 15257d6bf6b8SYongseok Koh struct mlx5_mprq_buf *rep = rxq->mprq_repl; 15267d6bf6b8SYongseok Koh volatile struct mlx5_wqe_data_seg *wqe = 15277d6bf6b8SYongseok Koh &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; 15287d6bf6b8SYongseok Koh void *addr; 15297d6bf6b8SYongseok Koh 15307d6bf6b8SYongseok Koh assert(rep != NULL); 15317d6bf6b8SYongseok Koh /* Replace MPRQ buf. */ 15327d6bf6b8SYongseok Koh (*rxq->mprq_bufs)[rq_idx] = rep; 15337d6bf6b8SYongseok Koh /* Replace WQE. */ 15343a22f387SMatan Azrad addr = mlx5_mprq_buf_addr(rep, strd_n); 15357d6bf6b8SYongseok Koh wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); 15367d6bf6b8SYongseok Koh /* If there's only one MR, no need to replace LKey in WQE. */ 15377d6bf6b8SYongseok Koh if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 15387d6bf6b8SYongseok Koh wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); 15397d6bf6b8SYongseok Koh /* Stash a mbuf for next replacement. */ 15407d6bf6b8SYongseok Koh if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) 15417d6bf6b8SYongseok Koh rxq->mprq_repl = rep; 15427d6bf6b8SYongseok Koh else 15437d6bf6b8SYongseok Koh rxq->mprq_repl = NULL; 15447d6bf6b8SYongseok Koh } 15457d6bf6b8SYongseok Koh 15467d6bf6b8SYongseok Koh /** 15477d6bf6b8SYongseok Koh * DPDK callback for RX with Multi-Packet RQ support. 15487d6bf6b8SYongseok Koh * 15497d6bf6b8SYongseok Koh * @param dpdk_rxq 15507d6bf6b8SYongseok Koh * Generic pointer to RX queue structure. 15517d6bf6b8SYongseok Koh * @param[out] pkts 15527d6bf6b8SYongseok Koh * Array to store received packets. 15537d6bf6b8SYongseok Koh * @param pkts_n 15547d6bf6b8SYongseok Koh * Maximum number of packets in array. 15557d6bf6b8SYongseok Koh * 15567d6bf6b8SYongseok Koh * @return 15577d6bf6b8SYongseok Koh * Number of packets successfully received (<= pkts_n). 15587d6bf6b8SYongseok Koh */ 15597d6bf6b8SYongseok Koh uint16_t 15607d6bf6b8SYongseok Koh mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 15617d6bf6b8SYongseok Koh { 15627d6bf6b8SYongseok Koh struct mlx5_rxq_data *rxq = dpdk_rxq; 15637d6bf6b8SYongseok Koh const unsigned int strd_n = 1 << rxq->strd_num_n; 15647d6bf6b8SYongseok Koh const unsigned int strd_sz = 1 << rxq->strd_sz_n; 15657d6bf6b8SYongseok Koh const unsigned int strd_shift = 15667d6bf6b8SYongseok Koh MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; 15677d6bf6b8SYongseok Koh const unsigned int cq_mask = (1 << rxq->cqe_n) - 1; 15687d6bf6b8SYongseok Koh const unsigned int wq_mask = (1 << rxq->elts_n) - 1; 15697d6bf6b8SYongseok Koh volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; 15707d6bf6b8SYongseok Koh unsigned int i = 0; 15713afdf157SXueming Li uint32_t rq_ci = rxq->rq_ci; 15721787eb7bSYongseok Koh uint16_t consumed_strd = rxq->consumed_strd; 1573a496e093SMatan Azrad uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM; 15747d6bf6b8SYongseok Koh struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; 15757d6bf6b8SYongseok Koh 15767d6bf6b8SYongseok Koh while (i < pkts_n) { 15777d6bf6b8SYongseok Koh struct rte_mbuf *pkt; 15787d6bf6b8SYongseok Koh void *addr; 15797d6bf6b8SYongseok Koh int ret; 15807d6bf6b8SYongseok Koh unsigned int len; 15811787eb7bSYongseok Koh uint16_t strd_cnt; 15821787eb7bSYongseok Koh uint16_t strd_idx; 15837d6bf6b8SYongseok Koh uint32_t offset; 15847d6bf6b8SYongseok Koh uint32_t byte_cnt; 15852e633f1fSYongseok Koh volatile struct mlx5_mini_cqe8 *mcqe = NULL; 15861787eb7bSYongseok Koh uint32_t rss_hash_res = 0; 1587e4c2a16eSMatan Azrad uint8_t lro_num_seg; 15887d6bf6b8SYongseok Koh 15891787eb7bSYongseok Koh if (consumed_strd == strd_n) { 15907d6bf6b8SYongseok Koh /* Replace WQE only if the buffer is still in use. */ 15917d6bf6b8SYongseok Koh if (rte_atomic16_read(&buf->refcnt) > 1) { 15923a22f387SMatan Azrad mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n); 15937d6bf6b8SYongseok Koh /* Release the old buffer. */ 15947d6bf6b8SYongseok Koh mlx5_mprq_buf_free(buf); 15957d6bf6b8SYongseok Koh } else if (unlikely(rxq->mprq_repl == NULL)) { 15967d6bf6b8SYongseok Koh struct mlx5_mprq_buf *rep; 15977d6bf6b8SYongseok Koh 15987d6bf6b8SYongseok Koh /* 15997d6bf6b8SYongseok Koh * Currently, the MPRQ mempool is out of buffer 16007d6bf6b8SYongseok Koh * and doing memcpy regardless of the size of Rx 16017d6bf6b8SYongseok Koh * packet. Retry allocation to get back to 16027d6bf6b8SYongseok Koh * normal. 16037d6bf6b8SYongseok Koh */ 16047d6bf6b8SYongseok Koh if (!rte_mempool_get(rxq->mprq_mp, 16057d6bf6b8SYongseok Koh (void **)&rep)) 16067d6bf6b8SYongseok Koh rxq->mprq_repl = rep; 16077d6bf6b8SYongseok Koh } 16087d6bf6b8SYongseok Koh /* Advance to the next WQE. */ 16091787eb7bSYongseok Koh consumed_strd = 0; 16107d6bf6b8SYongseok Koh ++rq_ci; 16117d6bf6b8SYongseok Koh buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; 16127d6bf6b8SYongseok Koh } 16137d6bf6b8SYongseok Koh cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; 16142e633f1fSYongseok Koh ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); 16157d6bf6b8SYongseok Koh if (!ret) 16167d6bf6b8SYongseok Koh break; 16177d6bf6b8SYongseok Koh byte_cnt = ret; 16181787eb7bSYongseok Koh strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> 16197d6bf6b8SYongseok Koh MLX5_MPRQ_STRIDE_NUM_SHIFT; 16201787eb7bSYongseok Koh assert(strd_cnt); 16211787eb7bSYongseok Koh consumed_strd += strd_cnt; 16227d6bf6b8SYongseok Koh if (byte_cnt & MLX5_MPRQ_FILLER_MASK) 16237d6bf6b8SYongseok Koh continue; 16241787eb7bSYongseok Koh if (mcqe == NULL) { 16251787eb7bSYongseok Koh rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res); 16261787eb7bSYongseok Koh strd_idx = rte_be_to_cpu_16(cqe->wqe_counter); 16271787eb7bSYongseok Koh } else { 16281787eb7bSYongseok Koh /* mini-CQE for MPRQ doesn't have hash result. */ 16291787eb7bSYongseok Koh strd_idx = rte_be_to_cpu_16(mcqe->stride_idx); 16301787eb7bSYongseok Koh } 16311787eb7bSYongseok Koh assert(strd_idx < strd_n); 16321787eb7bSYongseok Koh assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask)); 1633e4c2a16eSMatan Azrad lro_num_seg = cqe->lro_num_seg; 16347d6bf6b8SYongseok Koh /* 16357d6bf6b8SYongseok Koh * Currently configured to receive a packet per a stride. But if 16367d6bf6b8SYongseok Koh * MTU is adjusted through kernel interface, device could 16377d6bf6b8SYongseok Koh * consume multiple strides without raising an error. In this 16387d6bf6b8SYongseok Koh * case, the packet should be dropped because it is bigger than 16397d6bf6b8SYongseok Koh * the max_rx_pkt_len. 16407d6bf6b8SYongseok Koh */ 1641e4c2a16eSMatan Azrad if (unlikely(!lro_num_seg && strd_cnt > 1)) { 16427d6bf6b8SYongseok Koh ++rxq->stats.idropped; 16437d6bf6b8SYongseok Koh continue; 16447d6bf6b8SYongseok Koh } 16457d6bf6b8SYongseok Koh pkt = rte_pktmbuf_alloc(rxq->mp); 16467d6bf6b8SYongseok Koh if (unlikely(pkt == NULL)) { 16477d6bf6b8SYongseok Koh ++rxq->stats.rx_nombuf; 16487d6bf6b8SYongseok Koh break; 16497d6bf6b8SYongseok Koh } 16507d6bf6b8SYongseok Koh len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; 16517d6bf6b8SYongseok Koh assert((int)len >= (rxq->crc_present << 2)); 16527d6bf6b8SYongseok Koh if (rxq->crc_present) 165335b2d13fSOlivier Matz len -= RTE_ETHER_CRC_LEN; 16541787eb7bSYongseok Koh offset = strd_idx * strd_sz + strd_shift; 16553a22f387SMatan Azrad addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); 16567d6bf6b8SYongseok Koh /* 16577d6bf6b8SYongseok Koh * Memcpy packets to the target mbuf if: 16587d6bf6b8SYongseok Koh * - The size of packet is smaller than mprq_max_memcpy_len. 16597d6bf6b8SYongseok Koh * - Out of buffer in the Mempool for Multi-Packet RQ. 16607d6bf6b8SYongseok Koh */ 16617d6bf6b8SYongseok Koh if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) { 16627d6bf6b8SYongseok Koh /* 16637d6bf6b8SYongseok Koh * When memcpy'ing packet due to out-of-buffer, the 16647d6bf6b8SYongseok Koh * packet must be smaller than the target mbuf. 16657d6bf6b8SYongseok Koh */ 16667d6bf6b8SYongseok Koh if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) { 16677d6bf6b8SYongseok Koh rte_pktmbuf_free_seg(pkt); 16687d6bf6b8SYongseok Koh ++rxq->stats.idropped; 16697d6bf6b8SYongseok Koh continue; 16707d6bf6b8SYongseok Koh } 16717d6bf6b8SYongseok Koh rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len); 1672be39124eSMatan Azrad DATA_LEN(pkt) = len; 16737d6bf6b8SYongseok Koh } else { 16747d6bf6b8SYongseok Koh rte_iova_t buf_iova; 16757d6bf6b8SYongseok Koh struct rte_mbuf_ext_shared_info *shinfo; 16761787eb7bSYongseok Koh uint16_t buf_len = strd_cnt * strd_sz; 1677e4c2a16eSMatan Azrad void *buf_addr; 16787d6bf6b8SYongseok Koh 16797d6bf6b8SYongseok Koh /* Increment the refcnt of the whole chunk. */ 16807d6bf6b8SYongseok Koh rte_atomic16_add_return(&buf->refcnt, 1); 16817d6bf6b8SYongseok Koh assert((uint16_t)rte_atomic16_read(&buf->refcnt) <= 16827d6bf6b8SYongseok Koh strd_n + 1); 1683a496e093SMatan Azrad buf_addr = RTE_PTR_SUB(addr, headroom_sz); 16847d6bf6b8SYongseok Koh /* 16857d6bf6b8SYongseok Koh * MLX5 device doesn't use iova but it is necessary in a 16867d6bf6b8SYongseok Koh * case where the Rx packet is transmitted via a 16877d6bf6b8SYongseok Koh * different PMD. 16887d6bf6b8SYongseok Koh */ 16897d6bf6b8SYongseok Koh buf_iova = rte_mempool_virt2iova(buf) + 1690e4c2a16eSMatan Azrad RTE_PTR_DIFF(buf_addr, buf); 16913a22f387SMatan Azrad shinfo = &buf->shinfos[strd_idx]; 16923a22f387SMatan Azrad rte_mbuf_ext_refcnt_set(shinfo, 1); 16937d6bf6b8SYongseok Koh /* 16947d6bf6b8SYongseok Koh * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when 16957d6bf6b8SYongseok Koh * attaching the stride to mbuf and more offload flags 16967d6bf6b8SYongseok Koh * will be added below by calling rxq_cq_to_mbuf(). 16977d6bf6b8SYongseok Koh * Other fields will be overwritten. 16987d6bf6b8SYongseok Koh */ 1699e4c2a16eSMatan Azrad rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, 1700e4c2a16eSMatan Azrad buf_len, shinfo); 1701a496e093SMatan Azrad /* Set mbuf head-room. */ 1702a496e093SMatan Azrad pkt->data_off = headroom_sz; 17037d6bf6b8SYongseok Koh assert(pkt->ol_flags == EXT_ATTACHED_MBUF); 17047d6bf6b8SYongseok Koh /* 17057d6bf6b8SYongseok Koh * Prevent potential overflow due to MTU change through 17067d6bf6b8SYongseok Koh * kernel interface. 17077d6bf6b8SYongseok Koh */ 17087d6bf6b8SYongseok Koh if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) { 17097d6bf6b8SYongseok Koh rte_pktmbuf_free_seg(pkt); 17107d6bf6b8SYongseok Koh ++rxq->stats.idropped; 17117d6bf6b8SYongseok Koh continue; 17127d6bf6b8SYongseok Koh } 1713be39124eSMatan Azrad DATA_LEN(pkt) = len; 1714be39124eSMatan Azrad /* 1715be39124eSMatan Azrad * LRO packet may consume all the stride memory, in this 1716be39124eSMatan Azrad * case packet head-room space is not guaranteed so must 1717be39124eSMatan Azrad * to add an empty mbuf for the head-room. 1718be39124eSMatan Azrad */ 1719be39124eSMatan Azrad if (!rxq->strd_headroom_en) { 1720be39124eSMatan Azrad struct rte_mbuf *headroom_mbuf = 1721be39124eSMatan Azrad rte_pktmbuf_alloc(rxq->mp); 1722be39124eSMatan Azrad 1723be39124eSMatan Azrad if (unlikely(headroom_mbuf == NULL)) { 1724be39124eSMatan Azrad rte_pktmbuf_free_seg(pkt); 1725be39124eSMatan Azrad ++rxq->stats.rx_nombuf; 1726be39124eSMatan Azrad break; 1727be39124eSMatan Azrad } 1728be39124eSMatan Azrad PORT(pkt) = rxq->port_id; 1729be39124eSMatan Azrad NEXT(headroom_mbuf) = pkt; 1730be39124eSMatan Azrad pkt = headroom_mbuf; 1731be39124eSMatan Azrad NB_SEGS(pkt) = 2; 1732be39124eSMatan Azrad } 17337d6bf6b8SYongseok Koh } 17347d6bf6b8SYongseok Koh rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); 1735e4c2a16eSMatan Azrad if (lro_num_seg > 1) { 1736e4c2a16eSMatan Azrad mlx5_lro_update_hdr(addr, cqe, len); 1737e4c2a16eSMatan Azrad pkt->ol_flags |= PKT_RX_LRO; 1738e4c2a16eSMatan Azrad pkt->tso_segsz = strd_sz; 1739e4c2a16eSMatan Azrad } 17407d6bf6b8SYongseok Koh PKT_LEN(pkt) = len; 17417d6bf6b8SYongseok Koh PORT(pkt) = rxq->port_id; 17427d6bf6b8SYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 17437d6bf6b8SYongseok Koh /* Increment bytes counter. */ 17447d6bf6b8SYongseok Koh rxq->stats.ibytes += PKT_LEN(pkt); 17457d6bf6b8SYongseok Koh #endif 17467d6bf6b8SYongseok Koh /* Return packet. */ 17477d6bf6b8SYongseok Koh *(pkts++) = pkt; 17487d6bf6b8SYongseok Koh ++i; 17497d6bf6b8SYongseok Koh } 17507d6bf6b8SYongseok Koh /* Update the consumer indexes. */ 17511787eb7bSYongseok Koh rxq->consumed_strd = consumed_strd; 17520cfdc180SYongseok Koh rte_cio_wmb(); 17537d6bf6b8SYongseok Koh *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 17547d6bf6b8SYongseok Koh if (rq_ci != rxq->rq_ci) { 17557d6bf6b8SYongseok Koh rxq->rq_ci = rq_ci; 17560cfdc180SYongseok Koh rte_cio_wmb(); 17577d6bf6b8SYongseok Koh *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 17587d6bf6b8SYongseok Koh } 17597d6bf6b8SYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 17607d6bf6b8SYongseok Koh /* Increment packets counter. */ 17617d6bf6b8SYongseok Koh rxq->stats.ipackets += i; 17627d6bf6b8SYongseok Koh #endif 17637d6bf6b8SYongseok Koh return i; 17647d6bf6b8SYongseok Koh } 17657d6bf6b8SYongseok Koh 17662e22920bSAdrien Mazarguil /** 17672e22920bSAdrien Mazarguil * Dummy DPDK callback for TX. 17682e22920bSAdrien Mazarguil * 17692e22920bSAdrien Mazarguil * This function is used to temporarily replace the real callback during 17702e22920bSAdrien Mazarguil * unsafe control operations on the queue, or in case of error. 17712e22920bSAdrien Mazarguil * 17722e22920bSAdrien Mazarguil * @param dpdk_txq 17732e22920bSAdrien Mazarguil * Generic pointer to TX queue structure. 17742e22920bSAdrien Mazarguil * @param[in] pkts 17752e22920bSAdrien Mazarguil * Packets to transmit. 17762e22920bSAdrien Mazarguil * @param pkts_n 17772e22920bSAdrien Mazarguil * Number of packets in array. 17782e22920bSAdrien Mazarguil * 17792e22920bSAdrien Mazarguil * @return 17802e22920bSAdrien Mazarguil * Number of packets successfully transmitted (<= pkts_n). 17812e22920bSAdrien Mazarguil */ 17822e22920bSAdrien Mazarguil uint16_t 178356f08e16SNélio Laranjeiro removed_tx_burst(void *dpdk_txq __rte_unused, 178456f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 178556f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 17862e22920bSAdrien Mazarguil { 17872aac5b5dSYongseok Koh rte_mb(); 17882e22920bSAdrien Mazarguil return 0; 17892e22920bSAdrien Mazarguil } 17902e22920bSAdrien Mazarguil 17912e22920bSAdrien Mazarguil /** 17922e22920bSAdrien Mazarguil * Dummy DPDK callback for RX. 17932e22920bSAdrien Mazarguil * 17942e22920bSAdrien Mazarguil * This function is used to temporarily replace the real callback during 17952e22920bSAdrien Mazarguil * unsafe control operations on the queue, or in case of error. 17962e22920bSAdrien Mazarguil * 17972e22920bSAdrien Mazarguil * @param dpdk_rxq 17982e22920bSAdrien Mazarguil * Generic pointer to RX queue structure. 17992e22920bSAdrien Mazarguil * @param[out] pkts 18002e22920bSAdrien Mazarguil * Array to store received packets. 18012e22920bSAdrien Mazarguil * @param pkts_n 18022e22920bSAdrien Mazarguil * Maximum number of packets in array. 18032e22920bSAdrien Mazarguil * 18042e22920bSAdrien Mazarguil * @return 18052e22920bSAdrien Mazarguil * Number of packets successfully received (<= pkts_n). 18062e22920bSAdrien Mazarguil */ 18072e22920bSAdrien Mazarguil uint16_t 180856f08e16SNélio Laranjeiro removed_rx_burst(void *dpdk_txq __rte_unused, 180956f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 181056f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 18112e22920bSAdrien Mazarguil { 18122aac5b5dSYongseok Koh rte_mb(); 18132e22920bSAdrien Mazarguil return 0; 18142e22920bSAdrien Mazarguil } 18156cb559d6SYongseok Koh 18166cb559d6SYongseok Koh /* 18176cb559d6SYongseok Koh * Vectorized Rx/Tx routines are not compiled in when required vector 18186cb559d6SYongseok Koh * instructions are not supported on a target architecture. The following null 18196cb559d6SYongseok Koh * stubs are needed for linkage when those are not included outside of this file 18206cb559d6SYongseok Koh * (e.g. mlx5_rxtx_vec_sse.c for x86). 18216cb559d6SYongseok Koh */ 18226cb559d6SYongseok Koh 182381bede55SKeith Wiles __rte_weak uint16_t 182456f08e16SNélio Laranjeiro mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, 182556f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 182656f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 18276cb559d6SYongseok Koh { 18286cb559d6SYongseok Koh return 0; 18296cb559d6SYongseok Koh } 18306cb559d6SYongseok Koh 183181bede55SKeith Wiles __rte_weak int 1832af4f09f2SNélio Laranjeiro mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) 18336cb559d6SYongseok Koh { 18346cb559d6SYongseok Koh return -ENOTSUP; 18356cb559d6SYongseok Koh } 18366cb559d6SYongseok Koh 183781bede55SKeith Wiles __rte_weak int 1838af4f09f2SNélio Laranjeiro mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) 18396cb559d6SYongseok Koh { 18406cb559d6SYongseok Koh return -ENOTSUP; 18416cb559d6SYongseok Koh } 1842a6bd4911SViacheslav Ovsiienko 1843a6bd4911SViacheslav Ovsiienko /** 184418a1c200SViacheslav Ovsiienko * Free the mbufs from the linear array of pointers. 184518a1c200SViacheslav Ovsiienko * 184618a1c200SViacheslav Ovsiienko * @param pkts 184718a1c200SViacheslav Ovsiienko * Pointer to array of packets to be free. 184818a1c200SViacheslav Ovsiienko * @param pkts_n 184918a1c200SViacheslav Ovsiienko * Number of packets to be freed. 185018a1c200SViacheslav Ovsiienko * @param olx 185118a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 185218a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 185318a1c200SViacheslav Ovsiienko */ 185418a1c200SViacheslav Ovsiienko static __rte_always_inline void 185518a1c200SViacheslav Ovsiienko mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts, 185618a1c200SViacheslav Ovsiienko unsigned int pkts_n, 185718a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 185818a1c200SViacheslav Ovsiienko { 185918a1c200SViacheslav Ovsiienko struct rte_mempool *pool = NULL; 186018a1c200SViacheslav Ovsiienko struct rte_mbuf **p_free = NULL; 186118a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 186218a1c200SViacheslav Ovsiienko unsigned int n_free = 0; 186318a1c200SViacheslav Ovsiienko 186418a1c200SViacheslav Ovsiienko /* 186518a1c200SViacheslav Ovsiienko * The implemented algorithm eliminates 186618a1c200SViacheslav Ovsiienko * copying pointers to temporary array 186718a1c200SViacheslav Ovsiienko * for rte_mempool_put_bulk() calls. 186818a1c200SViacheslav Ovsiienko */ 186918a1c200SViacheslav Ovsiienko assert(pkts); 187018a1c200SViacheslav Ovsiienko assert(pkts_n); 187118a1c200SViacheslav Ovsiienko for (;;) { 187218a1c200SViacheslav Ovsiienko for (;;) { 187318a1c200SViacheslav Ovsiienko /* 187418a1c200SViacheslav Ovsiienko * Decrement mbuf reference counter, detach 187518a1c200SViacheslav Ovsiienko * indirect and external buffers if needed. 187618a1c200SViacheslav Ovsiienko */ 187718a1c200SViacheslav Ovsiienko mbuf = rte_pktmbuf_prefree_seg(*pkts); 187818a1c200SViacheslav Ovsiienko if (likely(mbuf != NULL)) { 187918a1c200SViacheslav Ovsiienko assert(mbuf == *pkts); 188018a1c200SViacheslav Ovsiienko if (likely(n_free != 0)) { 188118a1c200SViacheslav Ovsiienko if (unlikely(pool != mbuf->pool)) 188218a1c200SViacheslav Ovsiienko /* From different pool. */ 188318a1c200SViacheslav Ovsiienko break; 188418a1c200SViacheslav Ovsiienko } else { 188518a1c200SViacheslav Ovsiienko /* Start new scan array. */ 188618a1c200SViacheslav Ovsiienko pool = mbuf->pool; 188718a1c200SViacheslav Ovsiienko p_free = pkts; 188818a1c200SViacheslav Ovsiienko } 188918a1c200SViacheslav Ovsiienko ++n_free; 189018a1c200SViacheslav Ovsiienko ++pkts; 189118a1c200SViacheslav Ovsiienko --pkts_n; 189218a1c200SViacheslav Ovsiienko if (unlikely(pkts_n == 0)) { 189318a1c200SViacheslav Ovsiienko mbuf = NULL; 189418a1c200SViacheslav Ovsiienko break; 189518a1c200SViacheslav Ovsiienko } 189618a1c200SViacheslav Ovsiienko } else { 189718a1c200SViacheslav Ovsiienko /* 189818a1c200SViacheslav Ovsiienko * This happens if mbuf is still referenced. 189918a1c200SViacheslav Ovsiienko * We can't put it back to the pool, skip. 190018a1c200SViacheslav Ovsiienko */ 190118a1c200SViacheslav Ovsiienko ++pkts; 190218a1c200SViacheslav Ovsiienko --pkts_n; 190318a1c200SViacheslav Ovsiienko if (unlikely(n_free != 0)) 190418a1c200SViacheslav Ovsiienko /* There is some array to free.*/ 190518a1c200SViacheslav Ovsiienko break; 190618a1c200SViacheslav Ovsiienko if (unlikely(pkts_n == 0)) 190718a1c200SViacheslav Ovsiienko /* Last mbuf, nothing to free. */ 190818a1c200SViacheslav Ovsiienko return; 190918a1c200SViacheslav Ovsiienko } 191018a1c200SViacheslav Ovsiienko } 191118a1c200SViacheslav Ovsiienko for (;;) { 191218a1c200SViacheslav Ovsiienko /* 191318a1c200SViacheslav Ovsiienko * This loop is implemented to avoid multiple 191418a1c200SViacheslav Ovsiienko * inlining of rte_mempool_put_bulk(). 191518a1c200SViacheslav Ovsiienko */ 191618a1c200SViacheslav Ovsiienko assert(pool); 191718a1c200SViacheslav Ovsiienko assert(p_free); 191818a1c200SViacheslav Ovsiienko assert(n_free); 191918a1c200SViacheslav Ovsiienko /* 192018a1c200SViacheslav Ovsiienko * Free the array of pre-freed mbufs 192118a1c200SViacheslav Ovsiienko * belonging to the same memory pool. 192218a1c200SViacheslav Ovsiienko */ 192318a1c200SViacheslav Ovsiienko rte_mempool_put_bulk(pool, (void *)p_free, n_free); 192418a1c200SViacheslav Ovsiienko if (unlikely(mbuf != NULL)) { 192518a1c200SViacheslav Ovsiienko /* There is the request to start new scan. */ 192618a1c200SViacheslav Ovsiienko pool = mbuf->pool; 192718a1c200SViacheslav Ovsiienko p_free = pkts++; 192818a1c200SViacheslav Ovsiienko n_free = 1; 192918a1c200SViacheslav Ovsiienko --pkts_n; 193018a1c200SViacheslav Ovsiienko if (likely(pkts_n != 0)) 193118a1c200SViacheslav Ovsiienko break; 193218a1c200SViacheslav Ovsiienko /* 193318a1c200SViacheslav Ovsiienko * This is the last mbuf to be freed. 193418a1c200SViacheslav Ovsiienko * Do one more loop iteration to complete. 193518a1c200SViacheslav Ovsiienko * This is rare case of the last unique mbuf. 193618a1c200SViacheslav Ovsiienko */ 193718a1c200SViacheslav Ovsiienko mbuf = NULL; 193818a1c200SViacheslav Ovsiienko continue; 193918a1c200SViacheslav Ovsiienko } 194018a1c200SViacheslav Ovsiienko if (likely(pkts_n == 0)) 194118a1c200SViacheslav Ovsiienko return; 194218a1c200SViacheslav Ovsiienko n_free = 0; 194318a1c200SViacheslav Ovsiienko break; 194418a1c200SViacheslav Ovsiienko } 194518a1c200SViacheslav Ovsiienko } 194618a1c200SViacheslav Ovsiienko } 194718a1c200SViacheslav Ovsiienko 194818a1c200SViacheslav Ovsiienko /** 194918a1c200SViacheslav Ovsiienko * Free the mbuf from the elts ring buffer till new tail. 195018a1c200SViacheslav Ovsiienko * 195118a1c200SViacheslav Ovsiienko * @param txq 195218a1c200SViacheslav Ovsiienko * Pointer to Tx queue structure. 195318a1c200SViacheslav Ovsiienko * @param tail 195418a1c200SViacheslav Ovsiienko * Index in elts to free up to, becomes new elts tail. 195518a1c200SViacheslav Ovsiienko * @param olx 195618a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 195718a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 195818a1c200SViacheslav Ovsiienko */ 195918a1c200SViacheslav Ovsiienko static __rte_always_inline void 196018a1c200SViacheslav Ovsiienko mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq, 196118a1c200SViacheslav Ovsiienko uint16_t tail, 196218a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 196318a1c200SViacheslav Ovsiienko { 196418a1c200SViacheslav Ovsiienko uint16_t n_elts = tail - txq->elts_tail; 196518a1c200SViacheslav Ovsiienko 196618a1c200SViacheslav Ovsiienko assert(n_elts); 196718a1c200SViacheslav Ovsiienko assert(n_elts <= txq->elts_s); 196818a1c200SViacheslav Ovsiienko /* 196918a1c200SViacheslav Ovsiienko * Implement a loop to support ring buffer wraparound 197018a1c200SViacheslav Ovsiienko * with single inlining of mlx5_tx_free_mbuf(). 197118a1c200SViacheslav Ovsiienko */ 197218a1c200SViacheslav Ovsiienko do { 197318a1c200SViacheslav Ovsiienko unsigned int part; 197418a1c200SViacheslav Ovsiienko 197518a1c200SViacheslav Ovsiienko part = txq->elts_s - (txq->elts_tail & txq->elts_m); 197618a1c200SViacheslav Ovsiienko part = RTE_MIN(part, n_elts); 197718a1c200SViacheslav Ovsiienko assert(part); 197818a1c200SViacheslav Ovsiienko assert(part <= txq->elts_s); 197918a1c200SViacheslav Ovsiienko mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m], 198018a1c200SViacheslav Ovsiienko part, olx); 198118a1c200SViacheslav Ovsiienko txq->elts_tail += part; 198218a1c200SViacheslav Ovsiienko n_elts -= part; 198318a1c200SViacheslav Ovsiienko } while (n_elts); 198418a1c200SViacheslav Ovsiienko } 198518a1c200SViacheslav Ovsiienko 198618a1c200SViacheslav Ovsiienko /** 198718a1c200SViacheslav Ovsiienko * Store the mbuf being sent into elts ring buffer. 198818a1c200SViacheslav Ovsiienko * On Tx completion these mbufs will be freed. 198918a1c200SViacheslav Ovsiienko * 199018a1c200SViacheslav Ovsiienko * @param txq 199118a1c200SViacheslav Ovsiienko * Pointer to Tx queue structure. 199218a1c200SViacheslav Ovsiienko * @param pkts 199318a1c200SViacheslav Ovsiienko * Pointer to array of packets to be stored. 199418a1c200SViacheslav Ovsiienko * @param pkts_n 199518a1c200SViacheslav Ovsiienko * Number of packets to be stored. 199618a1c200SViacheslav Ovsiienko * @param olx 199718a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 199818a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 199918a1c200SViacheslav Ovsiienko */ 200018a1c200SViacheslav Ovsiienko static __rte_always_inline void 200118a1c200SViacheslav Ovsiienko mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq, 200218a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 200318a1c200SViacheslav Ovsiienko unsigned int pkts_n, 200418a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 200518a1c200SViacheslav Ovsiienko { 200618a1c200SViacheslav Ovsiienko unsigned int part; 200718a1c200SViacheslav Ovsiienko struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts; 200818a1c200SViacheslav Ovsiienko 200918a1c200SViacheslav Ovsiienko assert(pkts); 201018a1c200SViacheslav Ovsiienko assert(pkts_n); 201118a1c200SViacheslav Ovsiienko part = txq->elts_s - (txq->elts_head & txq->elts_m); 201218a1c200SViacheslav Ovsiienko assert(part); 201318a1c200SViacheslav Ovsiienko assert(part <= txq->elts_s); 201418a1c200SViacheslav Ovsiienko /* This code is a good candidate for vectorizing with SIMD. */ 201518a1c200SViacheslav Ovsiienko rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)), 201618a1c200SViacheslav Ovsiienko (void *)pkts, 201718a1c200SViacheslav Ovsiienko RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *)); 201818a1c200SViacheslav Ovsiienko txq->elts_head += pkts_n; 201918a1c200SViacheslav Ovsiienko if (unlikely(part < pkts_n)) 202018a1c200SViacheslav Ovsiienko /* The copy is wrapping around the elts array. */ 202118a1c200SViacheslav Ovsiienko rte_memcpy((void *)elts, (void *)(pkts + part), 202218a1c200SViacheslav Ovsiienko (pkts_n - part) * sizeof(struct rte_mbuf *)); 202318a1c200SViacheslav Ovsiienko } 202418a1c200SViacheslav Ovsiienko 202518a1c200SViacheslav Ovsiienko /** 2026da1df1ccSViacheslav Ovsiienko * Update completion queue consuming index via doorbell 2027da1df1ccSViacheslav Ovsiienko * and flush the completed data buffers. 2028da1df1ccSViacheslav Ovsiienko * 2029da1df1ccSViacheslav Ovsiienko * @param txq 2030da1df1ccSViacheslav Ovsiienko * Pointer to TX queue structure. 2031da1df1ccSViacheslav Ovsiienko * @param valid CQE pointer 2032da1df1ccSViacheslav Ovsiienko * if not NULL update txq->wqe_pi and flush the buffers 2033da1df1ccSViacheslav Ovsiienko * @param itail 2034da1df1ccSViacheslav Ovsiienko * if not negative - flush the buffers till this index. 2035da1df1ccSViacheslav Ovsiienko * @param olx 2036da1df1ccSViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 2037da1df1ccSViacheslav Ovsiienko * compile time and may be used for optimization. 2038da1df1ccSViacheslav Ovsiienko */ 2039da1df1ccSViacheslav Ovsiienko static __rte_always_inline void 2040da1df1ccSViacheslav Ovsiienko mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq, 2041da1df1ccSViacheslav Ovsiienko volatile struct mlx5_cqe *last_cqe, 2042da1df1ccSViacheslav Ovsiienko int itail, 2043da1df1ccSViacheslav Ovsiienko unsigned int olx __rte_unused) 2044da1df1ccSViacheslav Ovsiienko { 2045da1df1ccSViacheslav Ovsiienko uint16_t tail; 2046da1df1ccSViacheslav Ovsiienko 2047da1df1ccSViacheslav Ovsiienko if (likely(last_cqe != NULL)) { 2048da1df1ccSViacheslav Ovsiienko txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter); 2049da1df1ccSViacheslav Ovsiienko tail = ((volatile struct mlx5_wqe_cseg *) 2050da1df1ccSViacheslav Ovsiienko (txq->wqes + (txq->wqe_pi & txq->wqe_m)))->misc; 2051da1df1ccSViacheslav Ovsiienko } else if (itail >= 0) { 2052da1df1ccSViacheslav Ovsiienko tail = (uint16_t)itail; 2053da1df1ccSViacheslav Ovsiienko } else { 2054da1df1ccSViacheslav Ovsiienko return; 2055da1df1ccSViacheslav Ovsiienko } 2056da1df1ccSViacheslav Ovsiienko rte_compiler_barrier(); 2057da1df1ccSViacheslav Ovsiienko *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci); 2058da1df1ccSViacheslav Ovsiienko if (likely(tail != txq->elts_tail)) { 2059da1df1ccSViacheslav Ovsiienko mlx5_tx_free_elts(txq, tail, olx); 2060da1df1ccSViacheslav Ovsiienko assert(tail == txq->elts_tail); 2061da1df1ccSViacheslav Ovsiienko } 2062da1df1ccSViacheslav Ovsiienko } 2063da1df1ccSViacheslav Ovsiienko 2064da1df1ccSViacheslav Ovsiienko /** 206518a1c200SViacheslav Ovsiienko * Manage TX completions. This routine checks the CQ for 206618a1c200SViacheslav Ovsiienko * arrived CQEs, deduces the last accomplished WQE in SQ, 206718a1c200SViacheslav Ovsiienko * updates SQ producing index and frees all completed mbufs. 206818a1c200SViacheslav Ovsiienko * 206918a1c200SViacheslav Ovsiienko * @param txq 207018a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 207118a1c200SViacheslav Ovsiienko * @param olx 207218a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 207318a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 207418a1c200SViacheslav Ovsiienko * 207518a1c200SViacheslav Ovsiienko * NOTE: not inlined intentionally, it makes tx_burst 207618a1c200SViacheslav Ovsiienko * routine smaller, simple and faster - from experiments. 207718a1c200SViacheslav Ovsiienko */ 207818a1c200SViacheslav Ovsiienko static void 207918a1c200SViacheslav Ovsiienko mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, 208018a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 208118a1c200SViacheslav Ovsiienko { 2082318ea4cfSViacheslav Ovsiienko unsigned int count = MLX5_TX_COMP_MAX_CQE; 2083da1df1ccSViacheslav Ovsiienko volatile struct mlx5_cqe *last_cqe = NULL; 208418a1c200SViacheslav Ovsiienko int ret; 208518a1c200SViacheslav Ovsiienko 2086da1df1ccSViacheslav Ovsiienko static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value"); 2087da1df1ccSViacheslav Ovsiienko static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value"); 208818a1c200SViacheslav Ovsiienko do { 208918a1c200SViacheslav Ovsiienko volatile struct mlx5_cqe *cqe; 209018a1c200SViacheslav Ovsiienko 209118a1c200SViacheslav Ovsiienko cqe = &txq->cqes[txq->cq_ci & txq->cqe_m]; 209218a1c200SViacheslav Ovsiienko ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci); 209318a1c200SViacheslav Ovsiienko if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { 209418a1c200SViacheslav Ovsiienko if (likely(ret != MLX5_CQE_STATUS_ERR)) { 209518a1c200SViacheslav Ovsiienko /* No new CQEs in completion queue. */ 209618a1c200SViacheslav Ovsiienko assert(ret == MLX5_CQE_STATUS_HW_OWN); 2097318ea4cfSViacheslav Ovsiienko break; 209818a1c200SViacheslav Ovsiienko } 2099da1df1ccSViacheslav Ovsiienko /* 2100da1df1ccSViacheslav Ovsiienko * Some error occurred, try to restart. 2101da1df1ccSViacheslav Ovsiienko * We have no barrier after WQE related Doorbell 2102da1df1ccSViacheslav Ovsiienko * written, make sure all writes are completed 2103da1df1ccSViacheslav Ovsiienko * here, before we might perform SQ reset. 2104da1df1ccSViacheslav Ovsiienko */ 210518a1c200SViacheslav Ovsiienko rte_wmb(); 2106da1df1ccSViacheslav Ovsiienko ret = mlx5_tx_error_cqe_handle 210718a1c200SViacheslav Ovsiienko (txq, (volatile struct mlx5_err_cqe *)cqe); 2108da1df1ccSViacheslav Ovsiienko /* 2109da1df1ccSViacheslav Ovsiienko * Flush buffers, update consuming index 2110da1df1ccSViacheslav Ovsiienko * if recovery succeeded. Otherwise 2111da1df1ccSViacheslav Ovsiienko * just try to recover later. 2112da1df1ccSViacheslav Ovsiienko */ 2113da1df1ccSViacheslav Ovsiienko last_cqe = NULL; 2114da1df1ccSViacheslav Ovsiienko break; 2115318ea4cfSViacheslav Ovsiienko } 211618a1c200SViacheslav Ovsiienko /* Normal transmit completion. */ 211718a1c200SViacheslav Ovsiienko ++txq->cq_ci; 2118da1df1ccSViacheslav Ovsiienko last_cqe = cqe; 211918a1c200SViacheslav Ovsiienko #ifndef NDEBUG 212018a1c200SViacheslav Ovsiienko if (txq->cq_pi) 212118a1c200SViacheslav Ovsiienko --txq->cq_pi; 212218a1c200SViacheslav Ovsiienko #endif 2123318ea4cfSViacheslav Ovsiienko /* 2124318ea4cfSViacheslav Ovsiienko * We have to restrict the amount of processed CQEs 2125318ea4cfSViacheslav Ovsiienko * in one tx_burst routine call. The CQ may be large 2126318ea4cfSViacheslav Ovsiienko * and many CQEs may be updated by the NIC in one 2127318ea4cfSViacheslav Ovsiienko * transaction. Buffers freeing is time consuming, 2128318ea4cfSViacheslav Ovsiienko * multiple iterations may introduce significant 2129318ea4cfSViacheslav Ovsiienko * latency. 2130318ea4cfSViacheslav Ovsiienko */ 2131318ea4cfSViacheslav Ovsiienko } while (--count); 2132da1df1ccSViacheslav Ovsiienko mlx5_tx_comp_flush(txq, last_cqe, ret, olx); 213318a1c200SViacheslav Ovsiienko } 213418a1c200SViacheslav Ovsiienko 213518a1c200SViacheslav Ovsiienko /** 213618a1c200SViacheslav Ovsiienko * Check if the completion request flag should be set in the last WQE. 213718a1c200SViacheslav Ovsiienko * Both pushed mbufs and WQEs are monitored and the completion request 213818a1c200SViacheslav Ovsiienko * flag is set if any of thresholds is reached. 213918a1c200SViacheslav Ovsiienko * 214018a1c200SViacheslav Ovsiienko * @param txq 214118a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 214218a1c200SViacheslav Ovsiienko * @param loc 214318a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 21444dec9c79SViacheslav Ovsiienko * @param multi, 21454dec9c79SViacheslav Ovsiienko * Routine is called from multi-segment sending loop, 21464dec9c79SViacheslav Ovsiienko * do not correct the elts_head according to the pkts_copy. 214718a1c200SViacheslav Ovsiienko * @param olx 214818a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 214918a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 215018a1c200SViacheslav Ovsiienko */ 215118a1c200SViacheslav Ovsiienko static __rte_always_inline void 215218a1c200SViacheslav Ovsiienko mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, 215318a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 21544dec9c79SViacheslav Ovsiienko bool multi, 21555a93e173SViacheslav Ovsiienko unsigned int olx) 215618a1c200SViacheslav Ovsiienko { 21575a93e173SViacheslav Ovsiienko uint16_t head = txq->elts_head; 21585a93e173SViacheslav Ovsiienko unsigned int part; 215918a1c200SViacheslav Ovsiienko 21604dec9c79SViacheslav Ovsiienko part = (MLX5_TXOFF_CONFIG(INLINE) || multi) ? 21614dec9c79SViacheslav Ovsiienko 0 : loc->pkts_sent - loc->pkts_copy; 21625a93e173SViacheslav Ovsiienko head += part; 216318a1c200SViacheslav Ovsiienko if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH || 21645a93e173SViacheslav Ovsiienko (MLX5_TXOFF_CONFIG(INLINE) && 21655a93e173SViacheslav Ovsiienko (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) { 216618a1c200SViacheslav Ovsiienko volatile struct mlx5_wqe *last = loc->wqe_last; 216718a1c200SViacheslav Ovsiienko 216818a1c200SViacheslav Ovsiienko txq->elts_comp = head; 21695a93e173SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(INLINE)) 217018a1c200SViacheslav Ovsiienko txq->wqe_comp = txq->wqe_ci; 217118a1c200SViacheslav Ovsiienko /* Request unconditional completion on last WQE. */ 217218a1c200SViacheslav Ovsiienko last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << 217318a1c200SViacheslav Ovsiienko MLX5_COMP_MODE_OFFSET); 217418a1c200SViacheslav Ovsiienko /* Save elts_head in unused "immediate" field of WQE. */ 217518a1c200SViacheslav Ovsiienko last->cseg.misc = head; 217618a1c200SViacheslav Ovsiienko /* 217718a1c200SViacheslav Ovsiienko * A CQE slot must always be available. Count the 217818a1c200SViacheslav Ovsiienko * issued CEQ "always" request instead of production 217918a1c200SViacheslav Ovsiienko * index due to here can be CQE with errors and 218018a1c200SViacheslav Ovsiienko * difference with ci may become inconsistent. 218118a1c200SViacheslav Ovsiienko */ 218218a1c200SViacheslav Ovsiienko assert(txq->cqe_s > ++txq->cq_pi); 218318a1c200SViacheslav Ovsiienko } 218418a1c200SViacheslav Ovsiienko } 218518a1c200SViacheslav Ovsiienko 218618a1c200SViacheslav Ovsiienko /** 2187a6bd4911SViacheslav Ovsiienko * DPDK callback to check the status of a tx descriptor. 2188a6bd4911SViacheslav Ovsiienko * 2189a6bd4911SViacheslav Ovsiienko * @param tx_queue 2190a6bd4911SViacheslav Ovsiienko * The tx queue. 2191a6bd4911SViacheslav Ovsiienko * @param[in] offset 2192a6bd4911SViacheslav Ovsiienko * The index of the descriptor in the ring. 2193a6bd4911SViacheslav Ovsiienko * 2194a6bd4911SViacheslav Ovsiienko * @return 2195a6bd4911SViacheslav Ovsiienko * The status of the tx descriptor. 2196a6bd4911SViacheslav Ovsiienko */ 2197a6bd4911SViacheslav Ovsiienko int 2198a6bd4911SViacheslav Ovsiienko mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) 2199a6bd4911SViacheslav Ovsiienko { 220018a1c200SViacheslav Ovsiienko struct mlx5_txq_data *restrict txq = tx_queue; 220118a1c200SViacheslav Ovsiienko uint16_t used; 220218a1c200SViacheslav Ovsiienko 220318a1c200SViacheslav Ovsiienko mlx5_tx_handle_completion(txq, 0); 220418a1c200SViacheslav Ovsiienko used = txq->elts_head - txq->elts_tail; 220518a1c200SViacheslav Ovsiienko if (offset < used) 2206a6bd4911SViacheslav Ovsiienko return RTE_ETH_TX_DESC_FULL; 220718a1c200SViacheslav Ovsiienko return RTE_ETH_TX_DESC_DONE; 220818a1c200SViacheslav Ovsiienko } 220918a1c200SViacheslav Ovsiienko 221018a1c200SViacheslav Ovsiienko /** 221118a1c200SViacheslav Ovsiienko * Build the Control Segment with specified opcode: 221218a1c200SViacheslav Ovsiienko * - MLX5_OPCODE_SEND 221318a1c200SViacheslav Ovsiienko * - MLX5_OPCODE_ENHANCED_MPSW 221418a1c200SViacheslav Ovsiienko * - MLX5_OPCODE_TSO 221518a1c200SViacheslav Ovsiienko * 221618a1c200SViacheslav Ovsiienko * @param txq 221718a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 221818a1c200SViacheslav Ovsiienko * @param loc 221918a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 222018a1c200SViacheslav Ovsiienko * @param wqe 222118a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Control Segment. 222218a1c200SViacheslav Ovsiienko * @param ds 222318a1c200SViacheslav Ovsiienko * Supposed length of WQE in segments. 222418a1c200SViacheslav Ovsiienko * @param opcode 222518a1c200SViacheslav Ovsiienko * SQ WQE opcode to put into Control Segment. 222618a1c200SViacheslav Ovsiienko * @param olx 222718a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 222818a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 222918a1c200SViacheslav Ovsiienko */ 223018a1c200SViacheslav Ovsiienko static __rte_always_inline void 223118a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq, 223218a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc __rte_unused, 223318a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 223418a1c200SViacheslav Ovsiienko unsigned int ds, 223518a1c200SViacheslav Ovsiienko unsigned int opcode, 223618a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 223718a1c200SViacheslav Ovsiienko { 223818a1c200SViacheslav Ovsiienko struct mlx5_wqe_cseg *restrict cs = &wqe->cseg; 223918a1c200SViacheslav Ovsiienko 224018a1c200SViacheslav Ovsiienko cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode); 224118a1c200SViacheslav Ovsiienko cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); 224218a1c200SViacheslav Ovsiienko cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << 224318a1c200SViacheslav Ovsiienko MLX5_COMP_MODE_OFFSET); 224418a1c200SViacheslav Ovsiienko cs->misc = RTE_BE32(0); 224518a1c200SViacheslav Ovsiienko } 224618a1c200SViacheslav Ovsiienko 224718a1c200SViacheslav Ovsiienko /** 224818a1c200SViacheslav Ovsiienko * Build the Ethernet Segment without inlined data. 224918a1c200SViacheslav Ovsiienko * Supports Software Parser, Checksums and VLAN 225018a1c200SViacheslav Ovsiienko * insertion Tx offload features. 225118a1c200SViacheslav Ovsiienko * 225218a1c200SViacheslav Ovsiienko * @param txq 225318a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 225418a1c200SViacheslav Ovsiienko * @param loc 225518a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 225618a1c200SViacheslav Ovsiienko * @param wqe 225718a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet Segment. 225818a1c200SViacheslav Ovsiienko * @param olx 225918a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 226018a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 226118a1c200SViacheslav Ovsiienko */ 226218a1c200SViacheslav Ovsiienko static __rte_always_inline void 226318a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused, 226418a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 226518a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 226618a1c200SViacheslav Ovsiienko unsigned int olx) 226718a1c200SViacheslav Ovsiienko { 226818a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es = &wqe->eseg; 226918a1c200SViacheslav Ovsiienko uint32_t csum; 227018a1c200SViacheslav Ovsiienko 227118a1c200SViacheslav Ovsiienko /* 227218a1c200SViacheslav Ovsiienko * Calculate and set check sum flags first, dword field 227318a1c200SViacheslav Ovsiienko * in segment may be shared with Software Parser flags. 227418a1c200SViacheslav Ovsiienko */ 227518a1c200SViacheslav Ovsiienko csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; 227618a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_le_32(csum); 227718a1c200SViacheslav Ovsiienko /* 227818a1c200SViacheslav Ovsiienko * Calculate and set Software Parser offsets and flags. 227918a1c200SViacheslav Ovsiienko * These flags a set for custom UDP and IP tunnel packets. 228018a1c200SViacheslav Ovsiienko */ 228118a1c200SViacheslav Ovsiienko es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); 228218a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 228318a1c200SViacheslav Ovsiienko es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? 228418a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_METADATA ? 228518a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0 : 0; 228618a1c200SViacheslav Ovsiienko /* Engage VLAN tag insertion feature if requested. */ 228718a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 228818a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { 228918a1c200SViacheslav Ovsiienko /* 229018a1c200SViacheslav Ovsiienko * We should get here only if device support 229118a1c200SViacheslav Ovsiienko * this feature correctly. 229218a1c200SViacheslav Ovsiienko */ 229318a1c200SViacheslav Ovsiienko assert(txq->vlan_en); 229418a1c200SViacheslav Ovsiienko es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT | 229518a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 229618a1c200SViacheslav Ovsiienko } else { 229718a1c200SViacheslav Ovsiienko es->inline_hdr = RTE_BE32(0); 229818a1c200SViacheslav Ovsiienko } 229918a1c200SViacheslav Ovsiienko } 230018a1c200SViacheslav Ovsiienko 230118a1c200SViacheslav Ovsiienko /** 230218a1c200SViacheslav Ovsiienko * Build the Ethernet Segment with minimal inlined data 230318a1c200SViacheslav Ovsiienko * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is 230418a1c200SViacheslav Ovsiienko * used to fill the gap in single WQEBB WQEs. 230518a1c200SViacheslav Ovsiienko * Supports Software Parser, Checksums and VLAN 230618a1c200SViacheslav Ovsiienko * insertion Tx offload features. 230718a1c200SViacheslav Ovsiienko * 230818a1c200SViacheslav Ovsiienko * @param txq 230918a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 231018a1c200SViacheslav Ovsiienko * @param loc 231118a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 231218a1c200SViacheslav Ovsiienko * @param wqe 231318a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet Segment. 231418a1c200SViacheslav Ovsiienko * @param vlan 231518a1c200SViacheslav Ovsiienko * Length of VLAN tag insertion if any. 231618a1c200SViacheslav Ovsiienko * @param olx 231718a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 231818a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 231918a1c200SViacheslav Ovsiienko */ 232018a1c200SViacheslav Ovsiienko static __rte_always_inline void 232118a1c200SViacheslav Ovsiienko mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused, 232218a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 232318a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 232418a1c200SViacheslav Ovsiienko unsigned int vlan, 232518a1c200SViacheslav Ovsiienko unsigned int olx) 232618a1c200SViacheslav Ovsiienko { 232718a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es = &wqe->eseg; 232818a1c200SViacheslav Ovsiienko uint32_t csum; 232918a1c200SViacheslav Ovsiienko uint8_t *psrc, *pdst; 233018a1c200SViacheslav Ovsiienko 233118a1c200SViacheslav Ovsiienko /* 233218a1c200SViacheslav Ovsiienko * Calculate and set check sum flags first, dword field 233318a1c200SViacheslav Ovsiienko * in segment may be shared with Software Parser flags. 233418a1c200SViacheslav Ovsiienko */ 233518a1c200SViacheslav Ovsiienko csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; 233618a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_le_32(csum); 233718a1c200SViacheslav Ovsiienko /* 233818a1c200SViacheslav Ovsiienko * Calculate and set Software Parser offsets and flags. 233918a1c200SViacheslav Ovsiienko * These flags a set for custom UDP and IP tunnel packets. 234018a1c200SViacheslav Ovsiienko */ 234118a1c200SViacheslav Ovsiienko es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); 234218a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 234318a1c200SViacheslav Ovsiienko es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? 234418a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_METADATA ? 234518a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0 : 0; 234618a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 234718a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 234818a1c200SViacheslav Ovsiienko sizeof(rte_v128u32_t)), 234918a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 235018a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 235118a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 235218a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr) + 235318a1c200SViacheslav Ovsiienko 2 * RTE_ETHER_ADDR_LEN), 235418a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 235518a1c200SViacheslav Ovsiienko psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); 235618a1c200SViacheslav Ovsiienko es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE); 235718a1c200SViacheslav Ovsiienko es->inline_data = *(unaligned_uint16_t *)psrc; 235818a1c200SViacheslav Ovsiienko psrc += sizeof(uint16_t); 235918a1c200SViacheslav Ovsiienko pdst = (uint8_t *)(es + 1); 236018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { 236118a1c200SViacheslav Ovsiienko /* Implement VLAN tag insertion as part inline data. */ 236218a1c200SViacheslav Ovsiienko memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t)); 236318a1c200SViacheslav Ovsiienko pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); 236418a1c200SViacheslav Ovsiienko psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); 236518a1c200SViacheslav Ovsiienko /* Insert VLAN ethertype + VLAN tag. */ 236618a1c200SViacheslav Ovsiienko *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 236718a1c200SViacheslav Ovsiienko ((RTE_ETHER_TYPE_VLAN << 16) | 236818a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 236918a1c200SViacheslav Ovsiienko pdst += sizeof(struct rte_vlan_hdr); 237018a1c200SViacheslav Ovsiienko /* Copy the rest two bytes from packet data. */ 237118a1c200SViacheslav Ovsiienko assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); 237218a1c200SViacheslav Ovsiienko *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc; 237318a1c200SViacheslav Ovsiienko } else { 237418a1c200SViacheslav Ovsiienko /* Fill the gap in the title WQEBB with inline data. */ 237518a1c200SViacheslav Ovsiienko rte_mov16(pdst, psrc); 237618a1c200SViacheslav Ovsiienko } 237718a1c200SViacheslav Ovsiienko } 237818a1c200SViacheslav Ovsiienko 237918a1c200SViacheslav Ovsiienko /** 238018a1c200SViacheslav Ovsiienko * Build the Ethernet Segment with entire packet 238118a1c200SViacheslav Ovsiienko * data inlining. Checks the boundary of WQEBB and 238218a1c200SViacheslav Ovsiienko * ring buffer wrapping, supports Software Parser, 238318a1c200SViacheslav Ovsiienko * Checksums and VLAN insertion Tx offload features. 238418a1c200SViacheslav Ovsiienko * 238518a1c200SViacheslav Ovsiienko * @param txq 238618a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 238718a1c200SViacheslav Ovsiienko * @param loc 238818a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 238918a1c200SViacheslav Ovsiienko * @param wqe 239018a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet Segment. 239118a1c200SViacheslav Ovsiienko * @param vlan 239218a1c200SViacheslav Ovsiienko * Length of VLAN tag insertion if any. 239318a1c200SViacheslav Ovsiienko * @param inlen 239418a1c200SViacheslav Ovsiienko * Length of data to inline (VLAN included, if any). 239518a1c200SViacheslav Ovsiienko * @param tso 239618a1c200SViacheslav Ovsiienko * TSO flag, set mss field from the packet. 239718a1c200SViacheslav Ovsiienko * @param olx 239818a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 239918a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 240018a1c200SViacheslav Ovsiienko * 240118a1c200SViacheslav Ovsiienko * @return 240218a1c200SViacheslav Ovsiienko * Pointer to the next Data Segment (aligned and wrapped around). 240318a1c200SViacheslav Ovsiienko */ 240418a1c200SViacheslav Ovsiienko static __rte_always_inline struct mlx5_wqe_dseg * 240518a1c200SViacheslav Ovsiienko mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq, 240618a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 240718a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 240818a1c200SViacheslav Ovsiienko unsigned int vlan, 240918a1c200SViacheslav Ovsiienko unsigned int inlen, 241018a1c200SViacheslav Ovsiienko unsigned int tso, 241118a1c200SViacheslav Ovsiienko unsigned int olx) 241218a1c200SViacheslav Ovsiienko { 241318a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es = &wqe->eseg; 241418a1c200SViacheslav Ovsiienko uint32_t csum; 241518a1c200SViacheslav Ovsiienko uint8_t *psrc, *pdst; 241618a1c200SViacheslav Ovsiienko unsigned int part; 241718a1c200SViacheslav Ovsiienko 241818a1c200SViacheslav Ovsiienko /* 241918a1c200SViacheslav Ovsiienko * Calculate and set check sum flags first, dword field 242018a1c200SViacheslav Ovsiienko * in segment may be shared with Software Parser flags. 242118a1c200SViacheslav Ovsiienko */ 242218a1c200SViacheslav Ovsiienko csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; 242318a1c200SViacheslav Ovsiienko if (tso) { 242418a1c200SViacheslav Ovsiienko csum <<= 24; 242518a1c200SViacheslav Ovsiienko csum |= loc->mbuf->tso_segsz; 242618a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_be_32(csum); 242718a1c200SViacheslav Ovsiienko } else { 242818a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_le_32(csum); 242918a1c200SViacheslav Ovsiienko } 243018a1c200SViacheslav Ovsiienko /* 243118a1c200SViacheslav Ovsiienko * Calculate and set Software Parser offsets and flags. 243218a1c200SViacheslav Ovsiienko * These flags a set for custom UDP and IP tunnel packets. 243318a1c200SViacheslav Ovsiienko */ 243418a1c200SViacheslav Ovsiienko es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); 243518a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 243618a1c200SViacheslav Ovsiienko es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? 243718a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_METADATA ? 243818a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0 : 0; 243918a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 244018a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 244118a1c200SViacheslav Ovsiienko sizeof(rte_v128u32_t)), 244218a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 244318a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 244418a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 244518a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr) + 244618a1c200SViacheslav Ovsiienko 2 * RTE_ETHER_ADDR_LEN), 244718a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 244818a1c200SViacheslav Ovsiienko psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); 244918a1c200SViacheslav Ovsiienko es->inline_hdr_sz = rte_cpu_to_be_16(inlen); 245018a1c200SViacheslav Ovsiienko es->inline_data = *(unaligned_uint16_t *)psrc; 245118a1c200SViacheslav Ovsiienko psrc += sizeof(uint16_t); 245218a1c200SViacheslav Ovsiienko pdst = (uint8_t *)(es + 1); 245318a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { 245418a1c200SViacheslav Ovsiienko /* Implement VLAN tag insertion as part inline data. */ 245518a1c200SViacheslav Ovsiienko memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t)); 245618a1c200SViacheslav Ovsiienko pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); 245718a1c200SViacheslav Ovsiienko psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); 245818a1c200SViacheslav Ovsiienko /* Insert VLAN ethertype + VLAN tag. */ 245918a1c200SViacheslav Ovsiienko *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 246018a1c200SViacheslav Ovsiienko ((RTE_ETHER_TYPE_VLAN << 16) | 246118a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 246218a1c200SViacheslav Ovsiienko pdst += sizeof(struct rte_vlan_hdr); 246318a1c200SViacheslav Ovsiienko /* Copy the rest two bytes from packet data. */ 246418a1c200SViacheslav Ovsiienko assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); 246518a1c200SViacheslav Ovsiienko *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc; 246618a1c200SViacheslav Ovsiienko psrc += sizeof(uint16_t); 246718a1c200SViacheslav Ovsiienko } else { 246818a1c200SViacheslav Ovsiienko /* Fill the gap in the title WQEBB with inline data. */ 246918a1c200SViacheslav Ovsiienko rte_mov16(pdst, psrc); 247018a1c200SViacheslav Ovsiienko psrc += sizeof(rte_v128u32_t); 247118a1c200SViacheslav Ovsiienko } 247218a1c200SViacheslav Ovsiienko pdst = (uint8_t *)(es + 2); 247318a1c200SViacheslav Ovsiienko assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); 247418a1c200SViacheslav Ovsiienko assert(pdst < (uint8_t *)txq->wqes_end); 247518a1c200SViacheslav Ovsiienko inlen -= MLX5_ESEG_MIN_INLINE_SIZE; 247618a1c200SViacheslav Ovsiienko if (!inlen) { 247718a1c200SViacheslav Ovsiienko assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); 247818a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 247918a1c200SViacheslav Ovsiienko } 248018a1c200SViacheslav Ovsiienko /* 248118a1c200SViacheslav Ovsiienko * The WQEBB space availability is checked by caller. 248218a1c200SViacheslav Ovsiienko * Here we should be aware of WQE ring buffer wraparound only. 248318a1c200SViacheslav Ovsiienko */ 248418a1c200SViacheslav Ovsiienko part = (uint8_t *)txq->wqes_end - pdst; 248518a1c200SViacheslav Ovsiienko part = RTE_MIN(part, inlen); 248618a1c200SViacheslav Ovsiienko do { 248718a1c200SViacheslav Ovsiienko rte_memcpy(pdst, psrc, part); 248818a1c200SViacheslav Ovsiienko inlen -= part; 248918a1c200SViacheslav Ovsiienko if (likely(!inlen)) { 249018a1c200SViacheslav Ovsiienko /* 249118a1c200SViacheslav Ovsiienko * If return value is not used by the caller 249218a1c200SViacheslav Ovsiienko * the code below will be optimized out. 249318a1c200SViacheslav Ovsiienko */ 249418a1c200SViacheslav Ovsiienko pdst += part; 249518a1c200SViacheslav Ovsiienko pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); 249618a1c200SViacheslav Ovsiienko if (unlikely(pdst >= (uint8_t *)txq->wqes_end)) 249718a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 249818a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 249918a1c200SViacheslav Ovsiienko } 250018a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 250118a1c200SViacheslav Ovsiienko psrc += part; 250218a1c200SViacheslav Ovsiienko part = inlen; 250318a1c200SViacheslav Ovsiienko } while (true); 250418a1c200SViacheslav Ovsiienko } 250518a1c200SViacheslav Ovsiienko 250618a1c200SViacheslav Ovsiienko /** 250718a1c200SViacheslav Ovsiienko * Copy data from chain of mbuf to the specified linear buffer. 250818a1c200SViacheslav Ovsiienko * Checksums and VLAN insertion Tx offload features. If data 250918a1c200SViacheslav Ovsiienko * from some mbuf copied completely this mbuf is freed. Local 251018a1c200SViacheslav Ovsiienko * structure is used to keep the byte stream state. 251118a1c200SViacheslav Ovsiienko * 251218a1c200SViacheslav Ovsiienko * @param pdst 251318a1c200SViacheslav Ovsiienko * Pointer to the destination linear buffer. 251418a1c200SViacheslav Ovsiienko * @param loc 251518a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 251618a1c200SViacheslav Ovsiienko * @param len 251718a1c200SViacheslav Ovsiienko * Length of data to be copied. 251818a1c200SViacheslav Ovsiienko * @param olx 251918a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 252018a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 252118a1c200SViacheslav Ovsiienko */ 252218a1c200SViacheslav Ovsiienko static __rte_always_inline void 252318a1c200SViacheslav Ovsiienko mlx5_tx_mseg_memcpy(uint8_t *pdst, 252418a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 252518a1c200SViacheslav Ovsiienko unsigned int len, 252618a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 252718a1c200SViacheslav Ovsiienko { 252818a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 252918a1c200SViacheslav Ovsiienko unsigned int part, dlen; 253018a1c200SViacheslav Ovsiienko uint8_t *psrc; 253118a1c200SViacheslav Ovsiienko 253218a1c200SViacheslav Ovsiienko assert(len); 253318a1c200SViacheslav Ovsiienko do { 253418a1c200SViacheslav Ovsiienko /* Allow zero length packets, must check first. */ 253518a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_data_len(loc->mbuf); 253618a1c200SViacheslav Ovsiienko if (dlen <= loc->mbuf_off) { 253718a1c200SViacheslav Ovsiienko /* Exhausted packet, just free. */ 253818a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 253918a1c200SViacheslav Ovsiienko loc->mbuf = mbuf->next; 254018a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(mbuf); 254118a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 254218a1c200SViacheslav Ovsiienko assert(loc->mbuf_nseg > 1); 254318a1c200SViacheslav Ovsiienko assert(loc->mbuf); 254418a1c200SViacheslav Ovsiienko --loc->mbuf_nseg; 254518a1c200SViacheslav Ovsiienko continue; 254618a1c200SViacheslav Ovsiienko } 254718a1c200SViacheslav Ovsiienko dlen -= loc->mbuf_off; 254818a1c200SViacheslav Ovsiienko psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *, 254918a1c200SViacheslav Ovsiienko loc->mbuf_off); 255018a1c200SViacheslav Ovsiienko part = RTE_MIN(len, dlen); 255118a1c200SViacheslav Ovsiienko rte_memcpy(pdst, psrc, part); 255218a1c200SViacheslav Ovsiienko loc->mbuf_off += part; 255318a1c200SViacheslav Ovsiienko len -= part; 255418a1c200SViacheslav Ovsiienko if (!len) { 255518a1c200SViacheslav Ovsiienko if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) { 255618a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 255718a1c200SViacheslav Ovsiienko /* Exhausted packet, just free. */ 255818a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 255918a1c200SViacheslav Ovsiienko loc->mbuf = mbuf->next; 256018a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(mbuf); 256118a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 256218a1c200SViacheslav Ovsiienko assert(loc->mbuf_nseg >= 1); 256318a1c200SViacheslav Ovsiienko --loc->mbuf_nseg; 256418a1c200SViacheslav Ovsiienko } 256518a1c200SViacheslav Ovsiienko return; 256618a1c200SViacheslav Ovsiienko } 256718a1c200SViacheslav Ovsiienko pdst += part; 256818a1c200SViacheslav Ovsiienko } while (true); 256918a1c200SViacheslav Ovsiienko } 257018a1c200SViacheslav Ovsiienko 257118a1c200SViacheslav Ovsiienko /** 257218a1c200SViacheslav Ovsiienko * Build the Ethernet Segment with inlined data from 257318a1c200SViacheslav Ovsiienko * multi-segment packet. Checks the boundary of WQEBB 257418a1c200SViacheslav Ovsiienko * and ring buffer wrapping, supports Software Parser, 257518a1c200SViacheslav Ovsiienko * Checksums and VLAN insertion Tx offload features. 257618a1c200SViacheslav Ovsiienko * 257718a1c200SViacheslav Ovsiienko * @param txq 257818a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 257918a1c200SViacheslav Ovsiienko * @param loc 258018a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 258118a1c200SViacheslav Ovsiienko * @param wqe 258218a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet Segment. 258318a1c200SViacheslav Ovsiienko * @param vlan 258418a1c200SViacheslav Ovsiienko * Length of VLAN tag insertion if any. 258518a1c200SViacheslav Ovsiienko * @param inlen 258618a1c200SViacheslav Ovsiienko * Length of data to inline (VLAN included, if any). 258718a1c200SViacheslav Ovsiienko * @param tso 258818a1c200SViacheslav Ovsiienko * TSO flag, set mss field from the packet. 258918a1c200SViacheslav Ovsiienko * @param olx 259018a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 259118a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 259218a1c200SViacheslav Ovsiienko * 259318a1c200SViacheslav Ovsiienko * @return 259418a1c200SViacheslav Ovsiienko * Pointer to the next Data Segment (aligned and 259518a1c200SViacheslav Ovsiienko * possible NOT wrapped around - caller should do 259618a1c200SViacheslav Ovsiienko * wrapping check on its own). 259718a1c200SViacheslav Ovsiienko */ 259818a1c200SViacheslav Ovsiienko static __rte_always_inline struct mlx5_wqe_dseg * 259918a1c200SViacheslav Ovsiienko mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq, 260018a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 260118a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 260218a1c200SViacheslav Ovsiienko unsigned int vlan, 260318a1c200SViacheslav Ovsiienko unsigned int inlen, 260418a1c200SViacheslav Ovsiienko unsigned int tso, 260518a1c200SViacheslav Ovsiienko unsigned int olx) 260618a1c200SViacheslav Ovsiienko { 260718a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es = &wqe->eseg; 260818a1c200SViacheslav Ovsiienko uint32_t csum; 260918a1c200SViacheslav Ovsiienko uint8_t *pdst; 261018a1c200SViacheslav Ovsiienko unsigned int part; 261118a1c200SViacheslav Ovsiienko 261218a1c200SViacheslav Ovsiienko /* 261318a1c200SViacheslav Ovsiienko * Calculate and set check sum flags first, uint32_t field 261418a1c200SViacheslav Ovsiienko * in segment may be shared with Software Parser flags. 261518a1c200SViacheslav Ovsiienko */ 261618a1c200SViacheslav Ovsiienko csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; 261718a1c200SViacheslav Ovsiienko if (tso) { 261818a1c200SViacheslav Ovsiienko csum <<= 24; 261918a1c200SViacheslav Ovsiienko csum |= loc->mbuf->tso_segsz; 262018a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_be_32(csum); 262118a1c200SViacheslav Ovsiienko } else { 262218a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_le_32(csum); 262318a1c200SViacheslav Ovsiienko } 262418a1c200SViacheslav Ovsiienko /* 262518a1c200SViacheslav Ovsiienko * Calculate and set Software Parser offsets and flags. 262618a1c200SViacheslav Ovsiienko * These flags a set for custom UDP and IP tunnel packets. 262718a1c200SViacheslav Ovsiienko */ 262818a1c200SViacheslav Ovsiienko es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); 262918a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 263018a1c200SViacheslav Ovsiienko es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? 263118a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_METADATA ? 263218a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0 : 0; 263318a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 263418a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 263518a1c200SViacheslav Ovsiienko sizeof(rte_v128u32_t)), 263618a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 263718a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 263818a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 263918a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr) + 264018a1c200SViacheslav Ovsiienko 2 * RTE_ETHER_ADDR_LEN), 264118a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 26427014ef5bSViacheslav Ovsiienko assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); 264318a1c200SViacheslav Ovsiienko es->inline_hdr_sz = rte_cpu_to_be_16(inlen); 264418a1c200SViacheslav Ovsiienko pdst = (uint8_t *)&es->inline_data; 264518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { 264618a1c200SViacheslav Ovsiienko /* Implement VLAN tag insertion as part inline data. */ 264718a1c200SViacheslav Ovsiienko mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx); 264818a1c200SViacheslav Ovsiienko pdst += 2 * RTE_ETHER_ADDR_LEN; 264918a1c200SViacheslav Ovsiienko *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 265018a1c200SViacheslav Ovsiienko ((RTE_ETHER_TYPE_VLAN << 16) | 265118a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 265218a1c200SViacheslav Ovsiienko pdst += sizeof(struct rte_vlan_hdr); 265318a1c200SViacheslav Ovsiienko inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr); 265418a1c200SViacheslav Ovsiienko } 265518a1c200SViacheslav Ovsiienko assert(pdst < (uint8_t *)txq->wqes_end); 265618a1c200SViacheslav Ovsiienko /* 265718a1c200SViacheslav Ovsiienko * The WQEBB space availability is checked by caller. 265818a1c200SViacheslav Ovsiienko * Here we should be aware of WQE ring buffer wraparound only. 265918a1c200SViacheslav Ovsiienko */ 266018a1c200SViacheslav Ovsiienko part = (uint8_t *)txq->wqes_end - pdst; 266118a1c200SViacheslav Ovsiienko part = RTE_MIN(part, inlen); 266218a1c200SViacheslav Ovsiienko assert(part); 266318a1c200SViacheslav Ovsiienko do { 266418a1c200SViacheslav Ovsiienko mlx5_tx_mseg_memcpy(pdst, loc, part, olx); 266518a1c200SViacheslav Ovsiienko inlen -= part; 266618a1c200SViacheslav Ovsiienko if (likely(!inlen)) { 266718a1c200SViacheslav Ovsiienko pdst += part; 266818a1c200SViacheslav Ovsiienko pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); 266918a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 267018a1c200SViacheslav Ovsiienko } 267118a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 267218a1c200SViacheslav Ovsiienko part = inlen; 267318a1c200SViacheslav Ovsiienko } while (true); 267418a1c200SViacheslav Ovsiienko } 267518a1c200SViacheslav Ovsiienko 267618a1c200SViacheslav Ovsiienko /** 267718a1c200SViacheslav Ovsiienko * Build the Data Segment of pointer type. 267818a1c200SViacheslav Ovsiienko * 267918a1c200SViacheslav Ovsiienko * @param txq 268018a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 268118a1c200SViacheslav Ovsiienko * @param loc 268218a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 268318a1c200SViacheslav Ovsiienko * @param dseg 268418a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Data Segment. 268518a1c200SViacheslav Ovsiienko * @param buf 268618a1c200SViacheslav Ovsiienko * Data buffer to point. 268718a1c200SViacheslav Ovsiienko * @param len 268818a1c200SViacheslav Ovsiienko * Data buffer length. 268918a1c200SViacheslav Ovsiienko * @param olx 269018a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 269118a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 269218a1c200SViacheslav Ovsiienko */ 269318a1c200SViacheslav Ovsiienko static __rte_always_inline void 269418a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq, 269518a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 269618a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg, 269718a1c200SViacheslav Ovsiienko uint8_t *buf, 269818a1c200SViacheslav Ovsiienko unsigned int len, 269918a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 270018a1c200SViacheslav Ovsiienko 270118a1c200SViacheslav Ovsiienko { 270218a1c200SViacheslav Ovsiienko assert(len); 270318a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32(len); 270418a1c200SViacheslav Ovsiienko dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); 270518a1c200SViacheslav Ovsiienko dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); 270618a1c200SViacheslav Ovsiienko } 270718a1c200SViacheslav Ovsiienko 270818a1c200SViacheslav Ovsiienko /** 270918a1c200SViacheslav Ovsiienko * Build the Data Segment of pointer type or inline 271018a1c200SViacheslav Ovsiienko * if data length is less than buffer in minimal 271118a1c200SViacheslav Ovsiienko * Data Segment size. 271218a1c200SViacheslav Ovsiienko * 271318a1c200SViacheslav Ovsiienko * @param txq 271418a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 271518a1c200SViacheslav Ovsiienko * @param loc 271618a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 271718a1c200SViacheslav Ovsiienko * @param dseg 271818a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Data Segment. 271918a1c200SViacheslav Ovsiienko * @param buf 272018a1c200SViacheslav Ovsiienko * Data buffer to point. 272118a1c200SViacheslav Ovsiienko * @param len 272218a1c200SViacheslav Ovsiienko * Data buffer length. 272318a1c200SViacheslav Ovsiienko * @param olx 272418a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 272518a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 272618a1c200SViacheslav Ovsiienko */ 272718a1c200SViacheslav Ovsiienko static __rte_always_inline void 272818a1c200SViacheslav Ovsiienko mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq, 272918a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 273018a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg, 273118a1c200SViacheslav Ovsiienko uint8_t *buf, 273218a1c200SViacheslav Ovsiienko unsigned int len, 273318a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 273418a1c200SViacheslav Ovsiienko 273518a1c200SViacheslav Ovsiienko { 273618a1c200SViacheslav Ovsiienko uintptr_t dst, src; 273718a1c200SViacheslav Ovsiienko 273818a1c200SViacheslav Ovsiienko assert(len); 273918a1c200SViacheslav Ovsiienko if (len > MLX5_DSEG_MIN_INLINE_SIZE) { 274018a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32(len); 274118a1c200SViacheslav Ovsiienko dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); 274218a1c200SViacheslav Ovsiienko dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); 274318a1c200SViacheslav Ovsiienko 274418a1c200SViacheslav Ovsiienko return; 274518a1c200SViacheslav Ovsiienko } 274618a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE); 274718a1c200SViacheslav Ovsiienko /* Unrolled implementation of generic rte_memcpy. */ 274818a1c200SViacheslav Ovsiienko dst = (uintptr_t)&dseg->inline_data[0]; 274918a1c200SViacheslav Ovsiienko src = (uintptr_t)buf; 275018a1c200SViacheslav Ovsiienko if (len & 0x08) { 2751*f3d0c07bSAli Alnubani #ifdef RTE_ARCH_STRICT_ALIGN 2752*f3d0c07bSAli Alnubani assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t))); 2753*f3d0c07bSAli Alnubani *(uint32_t *)dst = *(unaligned_uint32_t *)src; 2754*f3d0c07bSAli Alnubani dst += sizeof(uint32_t); 2755*f3d0c07bSAli Alnubani src += sizeof(uint32_t); 2756*f3d0c07bSAli Alnubani *(uint32_t *)dst = *(unaligned_uint32_t *)src; 2757*f3d0c07bSAli Alnubani dst += sizeof(uint32_t); 2758*f3d0c07bSAli Alnubani src += sizeof(uint32_t); 2759*f3d0c07bSAli Alnubani #else 2760*f3d0c07bSAli Alnubani *(uint64_t *)dst = *(unaligned_uint64_t *)src; 276118a1c200SViacheslav Ovsiienko dst += sizeof(uint64_t); 276218a1c200SViacheslav Ovsiienko src += sizeof(uint64_t); 2763*f3d0c07bSAli Alnubani #endif 276418a1c200SViacheslav Ovsiienko } 276518a1c200SViacheslav Ovsiienko if (len & 0x04) { 2766*f3d0c07bSAli Alnubani *(uint32_t *)dst = *(unaligned_uint32_t *)src; 276718a1c200SViacheslav Ovsiienko dst += sizeof(uint32_t); 276818a1c200SViacheslav Ovsiienko src += sizeof(uint32_t); 276918a1c200SViacheslav Ovsiienko } 277018a1c200SViacheslav Ovsiienko if (len & 0x02) { 2771*f3d0c07bSAli Alnubani *(uint16_t *)dst = *(unaligned_uint16_t *)src; 277218a1c200SViacheslav Ovsiienko dst += sizeof(uint16_t); 277318a1c200SViacheslav Ovsiienko src += sizeof(uint16_t); 277418a1c200SViacheslav Ovsiienko } 277518a1c200SViacheslav Ovsiienko if (len & 0x01) 277618a1c200SViacheslav Ovsiienko *(uint8_t *)dst = *(uint8_t *)src; 277718a1c200SViacheslav Ovsiienko } 277818a1c200SViacheslav Ovsiienko 277918a1c200SViacheslav Ovsiienko /** 278018a1c200SViacheslav Ovsiienko * Build the Data Segment of inlined data from single 278118a1c200SViacheslav Ovsiienko * segment packet, no VLAN insertion. 278218a1c200SViacheslav Ovsiienko * 278318a1c200SViacheslav Ovsiienko * @param txq 278418a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 278518a1c200SViacheslav Ovsiienko * @param loc 278618a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 278718a1c200SViacheslav Ovsiienko * @param dseg 278818a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Data Segment. 278918a1c200SViacheslav Ovsiienko * @param buf 279018a1c200SViacheslav Ovsiienko * Data buffer to point. 279118a1c200SViacheslav Ovsiienko * @param len 279218a1c200SViacheslav Ovsiienko * Data buffer length. 279318a1c200SViacheslav Ovsiienko * @param olx 279418a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 279518a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 279618a1c200SViacheslav Ovsiienko * 279718a1c200SViacheslav Ovsiienko * @return 279818a1c200SViacheslav Ovsiienko * Pointer to the next Data Segment after inlined data. 279918a1c200SViacheslav Ovsiienko * Ring buffer wraparound check is needed. We do not 280018a1c200SViacheslav Ovsiienko * do it here because it may not be needed for the 280118a1c200SViacheslav Ovsiienko * last packet in the eMPW session. 280218a1c200SViacheslav Ovsiienko */ 280318a1c200SViacheslav Ovsiienko static __rte_always_inline struct mlx5_wqe_dseg * 280418a1c200SViacheslav Ovsiienko mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, 280518a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc __rte_unused, 280618a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg, 280718a1c200SViacheslav Ovsiienko uint8_t *buf, 280818a1c200SViacheslav Ovsiienko unsigned int len, 280918a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 281018a1c200SViacheslav Ovsiienko { 281118a1c200SViacheslav Ovsiienko unsigned int part; 281218a1c200SViacheslav Ovsiienko uint8_t *pdst; 281318a1c200SViacheslav Ovsiienko 281418a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE); 281518a1c200SViacheslav Ovsiienko pdst = &dseg->inline_data[0]; 281618a1c200SViacheslav Ovsiienko /* 281718a1c200SViacheslav Ovsiienko * The WQEBB space availability is checked by caller. 281818a1c200SViacheslav Ovsiienko * Here we should be aware of WQE ring buffer wraparound only. 281918a1c200SViacheslav Ovsiienko */ 282018a1c200SViacheslav Ovsiienko part = (uint8_t *)txq->wqes_end - pdst; 282118a1c200SViacheslav Ovsiienko part = RTE_MIN(part, len); 282218a1c200SViacheslav Ovsiienko do { 282318a1c200SViacheslav Ovsiienko rte_memcpy(pdst, buf, part); 282418a1c200SViacheslav Ovsiienko len -= part; 282518a1c200SViacheslav Ovsiienko if (likely(!len)) { 282618a1c200SViacheslav Ovsiienko pdst += part; 282718a1c200SViacheslav Ovsiienko pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); 282818a1c200SViacheslav Ovsiienko /* Note: no final wraparound check here. */ 282918a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 283018a1c200SViacheslav Ovsiienko } 283118a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 283218a1c200SViacheslav Ovsiienko buf += part; 283318a1c200SViacheslav Ovsiienko part = len; 283418a1c200SViacheslav Ovsiienko } while (true); 283518a1c200SViacheslav Ovsiienko } 283618a1c200SViacheslav Ovsiienko 283718a1c200SViacheslav Ovsiienko /** 283818a1c200SViacheslav Ovsiienko * Build the Data Segment of inlined data from single 283918a1c200SViacheslav Ovsiienko * segment packet with VLAN insertion. 284018a1c200SViacheslav Ovsiienko * 284118a1c200SViacheslav Ovsiienko * @param txq 284218a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 284318a1c200SViacheslav Ovsiienko * @param loc 284418a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 284518a1c200SViacheslav Ovsiienko * @param dseg 284618a1c200SViacheslav Ovsiienko * Pointer to the dseg fill with built Data Segment. 284718a1c200SViacheslav Ovsiienko * @param buf 284818a1c200SViacheslav Ovsiienko * Data buffer to point. 284918a1c200SViacheslav Ovsiienko * @param len 285018a1c200SViacheslav Ovsiienko * Data buffer length. 285118a1c200SViacheslav Ovsiienko * @param olx 285218a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 285318a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 285418a1c200SViacheslav Ovsiienko * 285518a1c200SViacheslav Ovsiienko * @return 285618a1c200SViacheslav Ovsiienko * Pointer to the next Data Segment after inlined data. 285718a1c200SViacheslav Ovsiienko * Ring buffer wraparound check is needed. 285818a1c200SViacheslav Ovsiienko */ 285918a1c200SViacheslav Ovsiienko static __rte_always_inline struct mlx5_wqe_dseg * 286018a1c200SViacheslav Ovsiienko mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, 286118a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc __rte_unused, 286218a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg, 286318a1c200SViacheslav Ovsiienko uint8_t *buf, 286418a1c200SViacheslav Ovsiienko unsigned int len, 286518a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 286618a1c200SViacheslav Ovsiienko 286718a1c200SViacheslav Ovsiienko { 286818a1c200SViacheslav Ovsiienko unsigned int part; 286918a1c200SViacheslav Ovsiienko uint8_t *pdst; 287018a1c200SViacheslav Ovsiienko 287118a1c200SViacheslav Ovsiienko assert(len > MLX5_ESEG_MIN_INLINE_SIZE); 287218a1c200SViacheslav Ovsiienko static_assert(MLX5_DSEG_MIN_INLINE_SIZE == 287318a1c200SViacheslav Ovsiienko (2 * RTE_ETHER_ADDR_LEN), 287418a1c200SViacheslav Ovsiienko "invalid Data Segment data size"); 287518a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) | 287618a1c200SViacheslav Ovsiienko MLX5_ETH_WQE_DATA_INLINE); 287718a1c200SViacheslav Ovsiienko pdst = &dseg->inline_data[0]; 287818a1c200SViacheslav Ovsiienko memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE); 287918a1c200SViacheslav Ovsiienko buf += MLX5_DSEG_MIN_INLINE_SIZE; 288018a1c200SViacheslav Ovsiienko pdst += MLX5_DSEG_MIN_INLINE_SIZE; 28817fd9ffe9SViacheslav Ovsiienko len -= MLX5_DSEG_MIN_INLINE_SIZE; 288218a1c200SViacheslav Ovsiienko /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */ 288318a1c200SViacheslav Ovsiienko assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); 28847fd9ffe9SViacheslav Ovsiienko if (unlikely(pdst >= (uint8_t *)txq->wqes_end)) 28857fd9ffe9SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 288618a1c200SViacheslav Ovsiienko *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) | 288718a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 288818a1c200SViacheslav Ovsiienko pdst += sizeof(struct rte_vlan_hdr); 288918a1c200SViacheslav Ovsiienko /* 289018a1c200SViacheslav Ovsiienko * The WQEBB space availability is checked by caller. 289118a1c200SViacheslav Ovsiienko * Here we should be aware of WQE ring buffer wraparound only. 289218a1c200SViacheslav Ovsiienko */ 289318a1c200SViacheslav Ovsiienko part = (uint8_t *)txq->wqes_end - pdst; 289418a1c200SViacheslav Ovsiienko part = RTE_MIN(part, len); 289518a1c200SViacheslav Ovsiienko do { 289618a1c200SViacheslav Ovsiienko rte_memcpy(pdst, buf, part); 289718a1c200SViacheslav Ovsiienko len -= part; 289818a1c200SViacheslav Ovsiienko if (likely(!len)) { 289918a1c200SViacheslav Ovsiienko pdst += part; 290018a1c200SViacheslav Ovsiienko pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); 290118a1c200SViacheslav Ovsiienko /* Note: no final wraparound check here. */ 290218a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 290318a1c200SViacheslav Ovsiienko } 290418a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 290518a1c200SViacheslav Ovsiienko buf += part; 290618a1c200SViacheslav Ovsiienko part = len; 290718a1c200SViacheslav Ovsiienko } while (true); 290818a1c200SViacheslav Ovsiienko } 290918a1c200SViacheslav Ovsiienko 291018a1c200SViacheslav Ovsiienko /** 291118a1c200SViacheslav Ovsiienko * Build the Ethernet Segment with optionally inlined data with 291218a1c200SViacheslav Ovsiienko * VLAN insertion and following Data Segments (if any) from 291318a1c200SViacheslav Ovsiienko * multi-segment packet. Used by ordinary send and TSO. 291418a1c200SViacheslav Ovsiienko * 291518a1c200SViacheslav Ovsiienko * @param txq 291618a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 291718a1c200SViacheslav Ovsiienko * @param loc 291818a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 291918a1c200SViacheslav Ovsiienko * @param wqe 292018a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet/Data Segments. 292118a1c200SViacheslav Ovsiienko * @param vlan 292218a1c200SViacheslav Ovsiienko * Length of VLAN header to insert, 0 means no VLAN insertion. 292318a1c200SViacheslav Ovsiienko * @param inlen 292418a1c200SViacheslav Ovsiienko * Data length to inline. For TSO this parameter specifies 292518a1c200SViacheslav Ovsiienko * exact value, for ordinary send routine can be aligned by 292618a1c200SViacheslav Ovsiienko * caller to provide better WQE space saving and data buffer 292718a1c200SViacheslav Ovsiienko * start address alignment. This length includes VLAN header 292818a1c200SViacheslav Ovsiienko * being inserted. 292918a1c200SViacheslav Ovsiienko * @param tso 293018a1c200SViacheslav Ovsiienko * Zero means ordinary send, inlined data can be extended, 293118a1c200SViacheslav Ovsiienko * otherwise this is TSO, inlined data length is fixed. 293218a1c200SViacheslav Ovsiienko * @param olx 293318a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 293418a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 293518a1c200SViacheslav Ovsiienko * 293618a1c200SViacheslav Ovsiienko * @return 293718a1c200SViacheslav Ovsiienko * Actual size of built WQE in segments. 293818a1c200SViacheslav Ovsiienko */ 293918a1c200SViacheslav Ovsiienko static __rte_always_inline unsigned int 294018a1c200SViacheslav Ovsiienko mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq, 294118a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 294218a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 294318a1c200SViacheslav Ovsiienko unsigned int vlan, 294418a1c200SViacheslav Ovsiienko unsigned int inlen, 294518a1c200SViacheslav Ovsiienko unsigned int tso, 294618a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 294718a1c200SViacheslav Ovsiienko { 294818a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 294918a1c200SViacheslav Ovsiienko unsigned int ds; 295018a1c200SViacheslav Ovsiienko 295118a1c200SViacheslav Ovsiienko assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen); 295218a1c200SViacheslav Ovsiienko loc->mbuf_nseg = NB_SEGS(loc->mbuf); 295318a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 295418a1c200SViacheslav Ovsiienko 295518a1c200SViacheslav Ovsiienko dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx); 295618a1c200SViacheslav Ovsiienko if (!loc->mbuf_nseg) 295718a1c200SViacheslav Ovsiienko goto dseg_done; 295818a1c200SViacheslav Ovsiienko /* 295918a1c200SViacheslav Ovsiienko * There are still some mbuf remaining, not inlined. 296018a1c200SViacheslav Ovsiienko * The first mbuf may be partially inlined and we 296118a1c200SViacheslav Ovsiienko * must process the possible non-zero data offset. 296218a1c200SViacheslav Ovsiienko */ 296318a1c200SViacheslav Ovsiienko if (loc->mbuf_off) { 296418a1c200SViacheslav Ovsiienko unsigned int dlen; 296518a1c200SViacheslav Ovsiienko uint8_t *dptr; 296618a1c200SViacheslav Ovsiienko 296718a1c200SViacheslav Ovsiienko /* 296818a1c200SViacheslav Ovsiienko * Exhausted packets must be dropped before. 296918a1c200SViacheslav Ovsiienko * Non-zero offset means there are some data 297018a1c200SViacheslav Ovsiienko * remained in the packet. 297118a1c200SViacheslav Ovsiienko */ 297218a1c200SViacheslav Ovsiienko assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf)); 297318a1c200SViacheslav Ovsiienko assert(rte_pktmbuf_data_len(loc->mbuf)); 297418a1c200SViacheslav Ovsiienko dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *, 297518a1c200SViacheslav Ovsiienko loc->mbuf_off); 297618a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off; 297718a1c200SViacheslav Ovsiienko /* 297818a1c200SViacheslav Ovsiienko * Build the pointer/minimal data Data Segment. 297918a1c200SViacheslav Ovsiienko * Do ring buffer wrapping check in advance. 298018a1c200SViacheslav Ovsiienko */ 298118a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 298218a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 298318a1c200SViacheslav Ovsiienko mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx); 298418a1c200SViacheslav Ovsiienko /* Store the mbuf to be freed on completion. */ 298518a1c200SViacheslav Ovsiienko assert(loc->elts_free); 298618a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 298718a1c200SViacheslav Ovsiienko --loc->elts_free; 298818a1c200SViacheslav Ovsiienko ++dseg; 298918a1c200SViacheslav Ovsiienko if (--loc->mbuf_nseg == 0) 299018a1c200SViacheslav Ovsiienko goto dseg_done; 299118a1c200SViacheslav Ovsiienko loc->mbuf = loc->mbuf->next; 299218a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 299318a1c200SViacheslav Ovsiienko } 299418a1c200SViacheslav Ovsiienko do { 299518a1c200SViacheslav Ovsiienko if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) { 299618a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 299718a1c200SViacheslav Ovsiienko 299818a1c200SViacheslav Ovsiienko /* Zero length segment found, just skip. */ 299918a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 300018a1c200SViacheslav Ovsiienko loc->mbuf = loc->mbuf->next; 300118a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(mbuf); 300218a1c200SViacheslav Ovsiienko if (--loc->mbuf_nseg == 0) 300318a1c200SViacheslav Ovsiienko break; 300418a1c200SViacheslav Ovsiienko } else { 300518a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 300618a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 300718a1c200SViacheslav Ovsiienko mlx5_tx_dseg_iptr 300818a1c200SViacheslav Ovsiienko (txq, loc, dseg, 300918a1c200SViacheslav Ovsiienko rte_pktmbuf_mtod(loc->mbuf, uint8_t *), 301018a1c200SViacheslav Ovsiienko rte_pktmbuf_data_len(loc->mbuf), olx); 301118a1c200SViacheslav Ovsiienko assert(loc->elts_free); 301218a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 301318a1c200SViacheslav Ovsiienko --loc->elts_free; 301418a1c200SViacheslav Ovsiienko ++dseg; 301518a1c200SViacheslav Ovsiienko if (--loc->mbuf_nseg == 0) 301618a1c200SViacheslav Ovsiienko break; 301718a1c200SViacheslav Ovsiienko loc->mbuf = loc->mbuf->next; 301818a1c200SViacheslav Ovsiienko } 301918a1c200SViacheslav Ovsiienko } while (true); 302018a1c200SViacheslav Ovsiienko 302118a1c200SViacheslav Ovsiienko dseg_done: 302218a1c200SViacheslav Ovsiienko /* Calculate actual segments used from the dseg pointer. */ 302318a1c200SViacheslav Ovsiienko if ((uintptr_t)wqe < (uintptr_t)dseg) 302418a1c200SViacheslav Ovsiienko ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE; 302518a1c200SViacheslav Ovsiienko else 302618a1c200SViacheslav Ovsiienko ds = (((uintptr_t)dseg - (uintptr_t)wqe) + 302718a1c200SViacheslav Ovsiienko txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE; 302818a1c200SViacheslav Ovsiienko return ds; 302918a1c200SViacheslav Ovsiienko } 303018a1c200SViacheslav Ovsiienko 303118a1c200SViacheslav Ovsiienko /** 303218a1c200SViacheslav Ovsiienko * Tx one packet function for multi-segment TSO. Supports all 303318a1c200SViacheslav Ovsiienko * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs, 303418a1c200SViacheslav Ovsiienko * sends one packet per WQE. 303518a1c200SViacheslav Ovsiienko * 303618a1c200SViacheslav Ovsiienko * This routine is responsible for storing processed mbuf 303718a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head. 303818a1c200SViacheslav Ovsiienko * 303918a1c200SViacheslav Ovsiienko * @param txq 304018a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 304118a1c200SViacheslav Ovsiienko * @param loc 304218a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 304318a1c200SViacheslav Ovsiienko * @param olx 304418a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 304518a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 304618a1c200SViacheslav Ovsiienko * 304718a1c200SViacheslav Ovsiienko * @return 304818a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 304918a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 305018a1c200SViacheslav Ovsiienko * Local context variables partially updated. 305118a1c200SViacheslav Ovsiienko */ 305218a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 305318a1c200SViacheslav Ovsiienko mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq, 305418a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 305518a1c200SViacheslav Ovsiienko unsigned int olx) 305618a1c200SViacheslav Ovsiienko { 305718a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 305818a1c200SViacheslav Ovsiienko unsigned int ds, dlen, inlen, ntcp, vlan = 0; 305918a1c200SViacheslav Ovsiienko 306018a1c200SViacheslav Ovsiienko /* 306118a1c200SViacheslav Ovsiienko * Calculate data length to be inlined to estimate 306218a1c200SViacheslav Ovsiienko * the required space in WQE ring buffer. 306318a1c200SViacheslav Ovsiienko */ 306418a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_pkt_len(loc->mbuf); 306518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) 306618a1c200SViacheslav Ovsiienko vlan = sizeof(struct rte_vlan_hdr); 306718a1c200SViacheslav Ovsiienko inlen = loc->mbuf->l2_len + vlan + 306818a1c200SViacheslav Ovsiienko loc->mbuf->l3_len + loc->mbuf->l4_len; 306918a1c200SViacheslav Ovsiienko if (unlikely((!inlen || !loc->mbuf->tso_segsz))) 307018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 307118a1c200SViacheslav Ovsiienko if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK) 307218a1c200SViacheslav Ovsiienko inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len; 307318a1c200SViacheslav Ovsiienko /* Packet must contain all TSO headers. */ 307418a1c200SViacheslav Ovsiienko if (unlikely(inlen > MLX5_MAX_TSO_HEADER || 307518a1c200SViacheslav Ovsiienko inlen <= MLX5_ESEG_MIN_INLINE_SIZE || 307618a1c200SViacheslav Ovsiienko inlen > (dlen + vlan))) 307718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 307818a1c200SViacheslav Ovsiienko assert(inlen >= txq->inlen_mode); 307918a1c200SViacheslav Ovsiienko /* 308018a1c200SViacheslav Ovsiienko * Check whether there are enough free WQEBBs: 308118a1c200SViacheslav Ovsiienko * - Control Segment 308218a1c200SViacheslav Ovsiienko * - Ethernet Segment 308318a1c200SViacheslav Ovsiienko * - First Segment of inlined Ethernet data 308418a1c200SViacheslav Ovsiienko * - ... data continued ... 308518a1c200SViacheslav Ovsiienko * - Data Segments of pointer/min inline type 308618a1c200SViacheslav Ovsiienko */ 308718a1c200SViacheslav Ovsiienko ds = NB_SEGS(loc->mbuf) + 2 + (inlen - 308818a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE + 308918a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE + 309018a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 309118a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < ((ds + 3) / 4))) 309218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 309318a1c200SViacheslav Ovsiienko /* Check for maximal WQE size. */ 309418a1c200SViacheslav Ovsiienko if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) 309518a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 309618a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 309718a1c200SViacheslav Ovsiienko /* Update sent data bytes/packets counters. */ 309818a1c200SViacheslav Ovsiienko ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) / 309918a1c200SViacheslav Ovsiienko loc->mbuf->tso_segsz; 310018a1c200SViacheslav Ovsiienko /* 310118a1c200SViacheslav Ovsiienko * One will be added for mbuf itself 310218a1c200SViacheslav Ovsiienko * at the end of the mlx5_tx_burst from 310318a1c200SViacheslav Ovsiienko * loc->pkts_sent field. 310418a1c200SViacheslav Ovsiienko */ 310518a1c200SViacheslav Ovsiienko --ntcp; 310618a1c200SViacheslav Ovsiienko txq->stats.opackets += ntcp; 310718a1c200SViacheslav Ovsiienko txq->stats.obytes += dlen + vlan + ntcp * inlen; 310818a1c200SViacheslav Ovsiienko #endif 310918a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 311018a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 311118a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx); 311218a1c200SViacheslav Ovsiienko ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx); 311318a1c200SViacheslav Ovsiienko wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); 311418a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 311518a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 31165a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 31174dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, true, olx); 311818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 311918a1c200SViacheslav Ovsiienko } 312018a1c200SViacheslav Ovsiienko 312118a1c200SViacheslav Ovsiienko /** 312218a1c200SViacheslav Ovsiienko * Tx one packet function for multi-segment SEND. Supports all 312318a1c200SViacheslav Ovsiienko * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs, 312418a1c200SViacheslav Ovsiienko * sends one packet per WQE, without any data inlining in 312518a1c200SViacheslav Ovsiienko * Ethernet Segment. 312618a1c200SViacheslav Ovsiienko * 312718a1c200SViacheslav Ovsiienko * This routine is responsible for storing processed mbuf 312818a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head. 312918a1c200SViacheslav Ovsiienko * 313018a1c200SViacheslav Ovsiienko * @param txq 313118a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 313218a1c200SViacheslav Ovsiienko * @param loc 313318a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 313418a1c200SViacheslav Ovsiienko * @param olx 313518a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 313618a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 313718a1c200SViacheslav Ovsiienko * 313818a1c200SViacheslav Ovsiienko * @return 313918a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 314018a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 314118a1c200SViacheslav Ovsiienko * Local context variables partially updated. 314218a1c200SViacheslav Ovsiienko */ 314318a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 314418a1c200SViacheslav Ovsiienko mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq, 314518a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 314618a1c200SViacheslav Ovsiienko unsigned int olx) 314718a1c200SViacheslav Ovsiienko { 314818a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 314918a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 315018a1c200SViacheslav Ovsiienko unsigned int ds, nseg; 315118a1c200SViacheslav Ovsiienko 315218a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) > 1); 315318a1c200SViacheslav Ovsiienko /* 315418a1c200SViacheslav Ovsiienko * No inline at all, it means the CPU cycles saving 315518a1c200SViacheslav Ovsiienko * is prioritized at configuration, we should not 315618a1c200SViacheslav Ovsiienko * copy any packet data to WQE. 315718a1c200SViacheslav Ovsiienko */ 315818a1c200SViacheslav Ovsiienko nseg = NB_SEGS(loc->mbuf); 315918a1c200SViacheslav Ovsiienko ds = 2 + nseg; 316018a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < ((ds + 3) / 4))) 316118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 316218a1c200SViacheslav Ovsiienko /* Check for maximal WQE size. */ 316318a1c200SViacheslav Ovsiienko if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) 316418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 316518a1c200SViacheslav Ovsiienko /* 316618a1c200SViacheslav Ovsiienko * Some Tx offloads may cause an error if 316718a1c200SViacheslav Ovsiienko * packet is not long enough, check against 316818a1c200SViacheslav Ovsiienko * assumed minimal length. 316918a1c200SViacheslav Ovsiienko */ 317018a1c200SViacheslav Ovsiienko if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE) 317118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 317218a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 317318a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 317418a1c200SViacheslav Ovsiienko txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf); 317518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 317618a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) 317718a1c200SViacheslav Ovsiienko txq->stats.obytes += sizeof(struct rte_vlan_hdr); 317818a1c200SViacheslav Ovsiienko #endif 317918a1c200SViacheslav Ovsiienko /* 318018a1c200SViacheslav Ovsiienko * SEND WQE, one WQEBB: 318118a1c200SViacheslav Ovsiienko * - Control Segment, SEND opcode 318218a1c200SViacheslav Ovsiienko * - Ethernet Segment, optional VLAN, no inline 318318a1c200SViacheslav Ovsiienko * - Data Segments, pointer only type 318418a1c200SViacheslav Ovsiienko */ 318518a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 318618a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 318718a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx); 318818a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(txq, loc, wqe, olx); 318918a1c200SViacheslav Ovsiienko dseg = &wqe->dseg[0]; 319018a1c200SViacheslav Ovsiienko do { 319118a1c200SViacheslav Ovsiienko if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) { 319218a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 319318a1c200SViacheslav Ovsiienko 319418a1c200SViacheslav Ovsiienko /* 319518a1c200SViacheslav Ovsiienko * Zero length segment found, have to 319618a1c200SViacheslav Ovsiienko * correct total size of WQE in segments. 319718a1c200SViacheslav Ovsiienko * It is supposed to be rare occasion, so 319818a1c200SViacheslav Ovsiienko * in normal case (no zero length segments) 319918a1c200SViacheslav Ovsiienko * we avoid extra writing to the Control 320018a1c200SViacheslav Ovsiienko * Segment. 320118a1c200SViacheslav Ovsiienko */ 320218a1c200SViacheslav Ovsiienko --ds; 320318a1c200SViacheslav Ovsiienko wqe->cseg.sq_ds -= RTE_BE32(1); 320418a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 320518a1c200SViacheslav Ovsiienko loc->mbuf = mbuf->next; 320618a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(mbuf); 320718a1c200SViacheslav Ovsiienko if (--nseg == 0) 320818a1c200SViacheslav Ovsiienko break; 320918a1c200SViacheslav Ovsiienko } else { 321018a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr 321118a1c200SViacheslav Ovsiienko (txq, loc, dseg, 321218a1c200SViacheslav Ovsiienko rte_pktmbuf_mtod(loc->mbuf, uint8_t *), 321318a1c200SViacheslav Ovsiienko rte_pktmbuf_data_len(loc->mbuf), olx); 321418a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 321518a1c200SViacheslav Ovsiienko --loc->elts_free; 321618a1c200SViacheslav Ovsiienko if (--nseg == 0) 321718a1c200SViacheslav Ovsiienko break; 321818a1c200SViacheslav Ovsiienko ++dseg; 321918a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 322018a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 322118a1c200SViacheslav Ovsiienko loc->mbuf = loc->mbuf->next; 322218a1c200SViacheslav Ovsiienko } 322318a1c200SViacheslav Ovsiienko } while (true); 322418a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 322518a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 32265a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 32274dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, true, olx); 322818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 322918a1c200SViacheslav Ovsiienko } 323018a1c200SViacheslav Ovsiienko 323118a1c200SViacheslav Ovsiienko /** 323218a1c200SViacheslav Ovsiienko * Tx one packet function for multi-segment SEND. Supports all 323318a1c200SViacheslav Ovsiienko * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs, 323418a1c200SViacheslav Ovsiienko * sends one packet per WQE, with data inlining in 323518a1c200SViacheslav Ovsiienko * Ethernet Segment and minimal Data Segments. 323618a1c200SViacheslav Ovsiienko * 323718a1c200SViacheslav Ovsiienko * This routine is responsible for storing processed mbuf 323818a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head. 323918a1c200SViacheslav Ovsiienko * 324018a1c200SViacheslav Ovsiienko * @param txq 324118a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 324218a1c200SViacheslav Ovsiienko * @param loc 324318a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 324418a1c200SViacheslav Ovsiienko * @param olx 324518a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 324618a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 324718a1c200SViacheslav Ovsiienko * 324818a1c200SViacheslav Ovsiienko * @return 324918a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 325018a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 325118a1c200SViacheslav Ovsiienko * Local context variables partially updated. 325218a1c200SViacheslav Ovsiienko */ 325318a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 325418a1c200SViacheslav Ovsiienko mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, 325518a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 325618a1c200SViacheslav Ovsiienko unsigned int olx) 325718a1c200SViacheslav Ovsiienko { 325818a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 325918a1c200SViacheslav Ovsiienko unsigned int ds, inlen, dlen, vlan = 0; 326018a1c200SViacheslav Ovsiienko 326118a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 326218a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) > 1); 326318a1c200SViacheslav Ovsiienko /* 326418a1c200SViacheslav Ovsiienko * First calculate data length to be inlined 326518a1c200SViacheslav Ovsiienko * to estimate the required space for WQE. 326618a1c200SViacheslav Ovsiienko */ 326718a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_pkt_len(loc->mbuf); 326818a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) 326918a1c200SViacheslav Ovsiienko vlan = sizeof(struct rte_vlan_hdr); 327018a1c200SViacheslav Ovsiienko inlen = dlen + vlan; 327118a1c200SViacheslav Ovsiienko /* Check against minimal length. */ 327218a1c200SViacheslav Ovsiienko if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE) 327318a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 327418a1c200SViacheslav Ovsiienko assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); 327518a1c200SViacheslav Ovsiienko if (inlen > txq->inlen_send) { 327618a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 327718a1c200SViacheslav Ovsiienko unsigned int nxlen; 327818a1c200SViacheslav Ovsiienko uintptr_t start; 327918a1c200SViacheslav Ovsiienko 328018a1c200SViacheslav Ovsiienko /* 328118a1c200SViacheslav Ovsiienko * Packet length exceeds the allowed inline 328218a1c200SViacheslav Ovsiienko * data length, check whether the minimal 328318a1c200SViacheslav Ovsiienko * inlining is required. 328418a1c200SViacheslav Ovsiienko */ 328518a1c200SViacheslav Ovsiienko if (txq->inlen_mode) { 328618a1c200SViacheslav Ovsiienko assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE); 328718a1c200SViacheslav Ovsiienko assert(txq->inlen_mode <= txq->inlen_send); 328818a1c200SViacheslav Ovsiienko inlen = txq->inlen_mode; 328918a1c200SViacheslav Ovsiienko } else { 329018a1c200SViacheslav Ovsiienko if (!vlan || txq->vlan_en) { 329118a1c200SViacheslav Ovsiienko /* 329218a1c200SViacheslav Ovsiienko * VLAN insertion will be done inside by HW. 329318a1c200SViacheslav Ovsiienko * It is not utmost effective - VLAN flag is 329418a1c200SViacheslav Ovsiienko * checked twice, but we should proceed the 329518a1c200SViacheslav Ovsiienko * inlining length correctly and take into 329618a1c200SViacheslav Ovsiienko * account the VLAN header being inserted. 329718a1c200SViacheslav Ovsiienko */ 329818a1c200SViacheslav Ovsiienko return mlx5_tx_packet_multi_send 329918a1c200SViacheslav Ovsiienko (txq, loc, olx); 330018a1c200SViacheslav Ovsiienko } 330118a1c200SViacheslav Ovsiienko inlen = MLX5_ESEG_MIN_INLINE_SIZE; 330218a1c200SViacheslav Ovsiienko } 330318a1c200SViacheslav Ovsiienko /* 330418a1c200SViacheslav Ovsiienko * Now we know the minimal amount of data is requested 330518a1c200SViacheslav Ovsiienko * to inline. Check whether we should inline the buffers 330618a1c200SViacheslav Ovsiienko * from the chain beginning to eliminate some mbufs. 330718a1c200SViacheslav Ovsiienko */ 330818a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 330918a1c200SViacheslav Ovsiienko nxlen = rte_pktmbuf_data_len(mbuf); 331018a1c200SViacheslav Ovsiienko if (unlikely(nxlen <= txq->inlen_send)) { 331118a1c200SViacheslav Ovsiienko /* We can inline first mbuf at least. */ 331218a1c200SViacheslav Ovsiienko if (nxlen < inlen) { 331318a1c200SViacheslav Ovsiienko unsigned int smlen; 331418a1c200SViacheslav Ovsiienko 331518a1c200SViacheslav Ovsiienko /* Scan mbufs till inlen filled. */ 331618a1c200SViacheslav Ovsiienko do { 331718a1c200SViacheslav Ovsiienko smlen = nxlen; 331818a1c200SViacheslav Ovsiienko mbuf = NEXT(mbuf); 331918a1c200SViacheslav Ovsiienko assert(mbuf); 332018a1c200SViacheslav Ovsiienko nxlen = rte_pktmbuf_data_len(mbuf); 332118a1c200SViacheslav Ovsiienko nxlen += smlen; 332218a1c200SViacheslav Ovsiienko } while (unlikely(nxlen < inlen)); 332318a1c200SViacheslav Ovsiienko if (unlikely(nxlen > txq->inlen_send)) { 332418a1c200SViacheslav Ovsiienko /* We cannot inline entire mbuf. */ 332518a1c200SViacheslav Ovsiienko smlen = inlen - smlen; 332618a1c200SViacheslav Ovsiienko start = rte_pktmbuf_mtod_offset 332718a1c200SViacheslav Ovsiienko (mbuf, uintptr_t, smlen); 332818a1c200SViacheslav Ovsiienko goto do_align; 332918a1c200SViacheslav Ovsiienko } 333018a1c200SViacheslav Ovsiienko } 333118a1c200SViacheslav Ovsiienko do { 333218a1c200SViacheslav Ovsiienko inlen = nxlen; 333318a1c200SViacheslav Ovsiienko mbuf = NEXT(mbuf); 333418a1c200SViacheslav Ovsiienko /* There should be not end of packet. */ 333518a1c200SViacheslav Ovsiienko assert(mbuf); 333618a1c200SViacheslav Ovsiienko nxlen = inlen + rte_pktmbuf_data_len(mbuf); 333718a1c200SViacheslav Ovsiienko } while (unlikely(nxlen < txq->inlen_send)); 333818a1c200SViacheslav Ovsiienko } 333918a1c200SViacheslav Ovsiienko start = rte_pktmbuf_mtod(mbuf, uintptr_t); 334018a1c200SViacheslav Ovsiienko /* 334118a1c200SViacheslav Ovsiienko * Check whether we can do inline to align start 334218a1c200SViacheslav Ovsiienko * address of data buffer to cacheline. 334318a1c200SViacheslav Ovsiienko */ 334418a1c200SViacheslav Ovsiienko do_align: 334518a1c200SViacheslav Ovsiienko start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1); 334618a1c200SViacheslav Ovsiienko if (unlikely(start)) { 334718a1c200SViacheslav Ovsiienko start += inlen; 334818a1c200SViacheslav Ovsiienko if (start <= txq->inlen_send) 334918a1c200SViacheslav Ovsiienko inlen = start; 335018a1c200SViacheslav Ovsiienko } 335118a1c200SViacheslav Ovsiienko } 335218a1c200SViacheslav Ovsiienko /* 335318a1c200SViacheslav Ovsiienko * Check whether there are enough free WQEBBs: 335418a1c200SViacheslav Ovsiienko * - Control Segment 335518a1c200SViacheslav Ovsiienko * - Ethernet Segment 335618a1c200SViacheslav Ovsiienko * - First Segment of inlined Ethernet data 335718a1c200SViacheslav Ovsiienko * - ... data continued ... 335818a1c200SViacheslav Ovsiienko * - Data Segments of pointer/min inline type 335918a1c200SViacheslav Ovsiienko * 336018a1c200SViacheslav Ovsiienko * Estimate the number of Data Segments conservatively, 336118a1c200SViacheslav Ovsiienko * supposing no any mbufs is being freed during inlining. 336218a1c200SViacheslav Ovsiienko */ 336318a1c200SViacheslav Ovsiienko assert(inlen <= txq->inlen_send); 336418a1c200SViacheslav Ovsiienko ds = NB_SEGS(loc->mbuf) + 2 + (inlen - 336518a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE + 336618a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE + 336718a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 336818a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < ((ds + 3) / 4))) 336918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 337018a1c200SViacheslav Ovsiienko /* Check for maximal WQE size. */ 337118a1c200SViacheslav Ovsiienko if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) 337218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 337318a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 337418a1c200SViacheslav Ovsiienko /* Update sent data bytes/packets counters. */ 337518a1c200SViacheslav Ovsiienko txq->stats.obytes += dlen + vlan; 337618a1c200SViacheslav Ovsiienko #endif 337718a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 337818a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 337918a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx); 338018a1c200SViacheslav Ovsiienko ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx); 338118a1c200SViacheslav Ovsiienko wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); 338218a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 338318a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 33845a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 33854dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, true, olx); 338618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 338718a1c200SViacheslav Ovsiienko } 338818a1c200SViacheslav Ovsiienko 338918a1c200SViacheslav Ovsiienko /** 339018a1c200SViacheslav Ovsiienko * Tx burst function for multi-segment packets. Supports all 339118a1c200SViacheslav Ovsiienko * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs, 339218a1c200SViacheslav Ovsiienko * sends one packet per WQE. Function stops sending if it 339318a1c200SViacheslav Ovsiienko * encounters the single-segment packet. 339418a1c200SViacheslav Ovsiienko * 339518a1c200SViacheslav Ovsiienko * This routine is responsible for storing processed mbuf 339618a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head. 339718a1c200SViacheslav Ovsiienko * 339818a1c200SViacheslav Ovsiienko * @param txq 339918a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 340018a1c200SViacheslav Ovsiienko * @param[in] pkts 340118a1c200SViacheslav Ovsiienko * Packets to transmit. 340218a1c200SViacheslav Ovsiienko * @param pkts_n 340318a1c200SViacheslav Ovsiienko * Number of packets in array. 340418a1c200SViacheslav Ovsiienko * @param loc 340518a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 340618a1c200SViacheslav Ovsiienko * @param olx 340718a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 340818a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 340918a1c200SViacheslav Ovsiienko * 341018a1c200SViacheslav Ovsiienko * @return 341118a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 341218a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 341318a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered. 341418a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered. 341518a1c200SViacheslav Ovsiienko * Local context variables updated. 341618a1c200SViacheslav Ovsiienko */ 341718a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 341818a1c200SViacheslav Ovsiienko mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq, 341918a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 342018a1c200SViacheslav Ovsiienko unsigned int pkts_n, 342118a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 342218a1c200SViacheslav Ovsiienko unsigned int olx) 342318a1c200SViacheslav Ovsiienko { 342418a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 342518a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 342618a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 342718a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 342818a1c200SViacheslav Ovsiienko for (;;) { 342918a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 343018a1c200SViacheslav Ovsiienko 343118a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) > 1); 343218a1c200SViacheslav Ovsiienko /* 343318a1c200SViacheslav Ovsiienko * Estimate the number of free elts quickly but 343418a1c200SViacheslav Ovsiienko * conservatively. Some segment may be fully inlined 343518a1c200SViacheslav Ovsiienko * and freed, ignore this here - precise estimation 343618a1c200SViacheslav Ovsiienko * is costly. 343718a1c200SViacheslav Ovsiienko */ 343818a1c200SViacheslav Ovsiienko if (loc->elts_free < NB_SEGS(loc->mbuf)) 343918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 344018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 344118a1c200SViacheslav Ovsiienko unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) { 344218a1c200SViacheslav Ovsiienko /* Proceed with multi-segment TSO. */ 344318a1c200SViacheslav Ovsiienko ret = mlx5_tx_packet_multi_tso(txq, loc, olx); 344418a1c200SViacheslav Ovsiienko } else if (MLX5_TXOFF_CONFIG(INLINE)) { 344518a1c200SViacheslav Ovsiienko /* Proceed with multi-segment SEND with inlining. */ 344618a1c200SViacheslav Ovsiienko ret = mlx5_tx_packet_multi_inline(txq, loc, olx); 344718a1c200SViacheslav Ovsiienko } else { 344818a1c200SViacheslav Ovsiienko /* Proceed with multi-segment SEND w/o inlining. */ 344918a1c200SViacheslav Ovsiienko ret = mlx5_tx_packet_multi_send(txq, loc, olx); 345018a1c200SViacheslav Ovsiienko } 345118a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_EXIT) 345218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 345318a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_ERROR) 345418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 345518a1c200SViacheslav Ovsiienko /* WQE is built, go to the next packet. */ 345618a1c200SViacheslav Ovsiienko ++loc->pkts_sent; 345718a1c200SViacheslav Ovsiienko --pkts_n; 345818a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) 345918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 346018a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 346118a1c200SViacheslav Ovsiienko if (pkts_n > 1) 346218a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 346318a1c200SViacheslav Ovsiienko if (likely(NB_SEGS(loc->mbuf) > 1)) 346418a1c200SViacheslav Ovsiienko continue; 346518a1c200SViacheslav Ovsiienko /* Here ends the series of multi-segment packets. */ 346618a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 346730d87a5eSYongseok Koh unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) 346818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_TSO; 346918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 347018a1c200SViacheslav Ovsiienko } 347118a1c200SViacheslav Ovsiienko assert(false); 347218a1c200SViacheslav Ovsiienko } 347318a1c200SViacheslav Ovsiienko 347418a1c200SViacheslav Ovsiienko /** 347518a1c200SViacheslav Ovsiienko * Tx burst function for single-segment packets with TSO. 347618a1c200SViacheslav Ovsiienko * Supports all types of Tx offloads, except multi-packets. 347718a1c200SViacheslav Ovsiienko * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE. 347818a1c200SViacheslav Ovsiienko * Function stops sending if it encounters the multi-segment 347918a1c200SViacheslav Ovsiienko * packet or packet without TSO requested. 348018a1c200SViacheslav Ovsiienko * 348118a1c200SViacheslav Ovsiienko * The routine is responsible for storing processed mbuf 348218a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head if inline 348318a1c200SViacheslav Ovsiienko * offloads is requested due to possible early freeing 348418a1c200SViacheslav Ovsiienko * of the inlined mbufs (can not store pkts array in elts 348518a1c200SViacheslav Ovsiienko * as a batch). 348618a1c200SViacheslav Ovsiienko * 348718a1c200SViacheslav Ovsiienko * @param txq 348818a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 348918a1c200SViacheslav Ovsiienko * @param[in] pkts 349018a1c200SViacheslav Ovsiienko * Packets to transmit. 349118a1c200SViacheslav Ovsiienko * @param pkts_n 349218a1c200SViacheslav Ovsiienko * Number of packets in array. 349318a1c200SViacheslav Ovsiienko * @param loc 349418a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 349518a1c200SViacheslav Ovsiienko * @param olx 349618a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 349718a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 349818a1c200SViacheslav Ovsiienko * 349918a1c200SViacheslav Ovsiienko * @return 350018a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 350118a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 350218a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered. 350318a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. 350418a1c200SViacheslav Ovsiienko * Local context variables updated. 350518a1c200SViacheslav Ovsiienko */ 350618a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 350718a1c200SViacheslav Ovsiienko mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq, 350818a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 350918a1c200SViacheslav Ovsiienko unsigned int pkts_n, 351018a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 351118a1c200SViacheslav Ovsiienko unsigned int olx) 351218a1c200SViacheslav Ovsiienko { 351318a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 351418a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 351518a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 351618a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 351718a1c200SViacheslav Ovsiienko for (;;) { 351818a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 351918a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 352018a1c200SViacheslav Ovsiienko unsigned int ds, dlen, hlen, ntcp, vlan = 0; 352118a1c200SViacheslav Ovsiienko uint8_t *dptr; 352218a1c200SViacheslav Ovsiienko 352318a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) == 1); 352418a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_data_len(loc->mbuf); 352518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 352618a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { 352718a1c200SViacheslav Ovsiienko vlan = sizeof(struct rte_vlan_hdr); 352818a1c200SViacheslav Ovsiienko } 352918a1c200SViacheslav Ovsiienko /* 353018a1c200SViacheslav Ovsiienko * First calculate the WQE size to check 353118a1c200SViacheslav Ovsiienko * whether we have enough space in ring buffer. 353218a1c200SViacheslav Ovsiienko */ 353318a1c200SViacheslav Ovsiienko hlen = loc->mbuf->l2_len + vlan + 353418a1c200SViacheslav Ovsiienko loc->mbuf->l3_len + loc->mbuf->l4_len; 353518a1c200SViacheslav Ovsiienko if (unlikely((!hlen || !loc->mbuf->tso_segsz))) 353618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 353718a1c200SViacheslav Ovsiienko if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK) 353818a1c200SViacheslav Ovsiienko hlen += loc->mbuf->outer_l2_len + 353918a1c200SViacheslav Ovsiienko loc->mbuf->outer_l3_len; 354018a1c200SViacheslav Ovsiienko /* Segment must contain all TSO headers. */ 354118a1c200SViacheslav Ovsiienko if (unlikely(hlen > MLX5_MAX_TSO_HEADER || 354218a1c200SViacheslav Ovsiienko hlen <= MLX5_ESEG_MIN_INLINE_SIZE || 354318a1c200SViacheslav Ovsiienko hlen > (dlen + vlan))) 354418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 354518a1c200SViacheslav Ovsiienko /* 354618a1c200SViacheslav Ovsiienko * Check whether there are enough free WQEBBs: 354718a1c200SViacheslav Ovsiienko * - Control Segment 354818a1c200SViacheslav Ovsiienko * - Ethernet Segment 354918a1c200SViacheslav Ovsiienko * - First Segment of inlined Ethernet data 355018a1c200SViacheslav Ovsiienko * - ... data continued ... 355118a1c200SViacheslav Ovsiienko * - Finishing Data Segment of pointer type 355218a1c200SViacheslav Ovsiienko */ 355318a1c200SViacheslav Ovsiienko ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE + 355418a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 355518a1c200SViacheslav Ovsiienko if (loc->wqe_free < ((ds + 3) / 4)) 355618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 355718a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 355818a1c200SViacheslav Ovsiienko /* Update sent data bytes/packets counters. */ 355918a1c200SViacheslav Ovsiienko ntcp = (dlen + vlan - hlen + 356018a1c200SViacheslav Ovsiienko loc->mbuf->tso_segsz - 1) / 356118a1c200SViacheslav Ovsiienko loc->mbuf->tso_segsz; 356218a1c200SViacheslav Ovsiienko /* 356318a1c200SViacheslav Ovsiienko * One will be added for mbuf itself at the end 356418a1c200SViacheslav Ovsiienko * of the mlx5_tx_burst from loc->pkts_sent field. 356518a1c200SViacheslav Ovsiienko */ 356618a1c200SViacheslav Ovsiienko --ntcp; 356718a1c200SViacheslav Ovsiienko txq->stats.opackets += ntcp; 356818a1c200SViacheslav Ovsiienko txq->stats.obytes += dlen + vlan + ntcp * hlen; 356918a1c200SViacheslav Ovsiienko #endif 357018a1c200SViacheslav Ovsiienko /* 357118a1c200SViacheslav Ovsiienko * Build the TSO WQE: 357218a1c200SViacheslav Ovsiienko * - Control Segment 357318a1c200SViacheslav Ovsiienko * - Ethernet Segment with hlen bytes inlined 357418a1c200SViacheslav Ovsiienko * - Data Segment of pointer type 357518a1c200SViacheslav Ovsiienko */ 357618a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 357718a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 357818a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, ds, 357918a1c200SViacheslav Ovsiienko MLX5_OPCODE_TSO, olx); 358018a1c200SViacheslav Ovsiienko dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx); 358118a1c200SViacheslav Ovsiienko dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan; 358218a1c200SViacheslav Ovsiienko dlen -= hlen - vlan; 358318a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx); 358418a1c200SViacheslav Ovsiienko /* 358518a1c200SViacheslav Ovsiienko * WQE is built, update the loop parameters 358618a1c200SViacheslav Ovsiienko * and go to the next packet. 358718a1c200SViacheslav Ovsiienko */ 358818a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 358918a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 359018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(INLINE)) 359118a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 359218a1c200SViacheslav Ovsiienko --loc->elts_free; 359318a1c200SViacheslav Ovsiienko ++loc->pkts_sent; 359418a1c200SViacheslav Ovsiienko --pkts_n; 35955a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 35964dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 359718a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) 359818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 359918a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 360018a1c200SViacheslav Ovsiienko if (pkts_n > 1) 360118a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 360218a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(MULTI) && 360318a1c200SViacheslav Ovsiienko unlikely(NB_SEGS(loc->mbuf) > 1)) 360418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 360530d87a5eSYongseok Koh if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))) 360618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 360718a1c200SViacheslav Ovsiienko /* Continue with the next TSO packet. */ 360818a1c200SViacheslav Ovsiienko } 360918a1c200SViacheslav Ovsiienko assert(false); 361018a1c200SViacheslav Ovsiienko } 361118a1c200SViacheslav Ovsiienko 361218a1c200SViacheslav Ovsiienko /** 361318a1c200SViacheslav Ovsiienko * Analyze the packet and select the best method to send. 361418a1c200SViacheslav Ovsiienko * 361518a1c200SViacheslav Ovsiienko * @param txq 361618a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 361718a1c200SViacheslav Ovsiienko * @param loc 361818a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 361918a1c200SViacheslav Ovsiienko * @param olx 362018a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 362118a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 362218a1c200SViacheslav Ovsiienko * @param newp 362318a1c200SViacheslav Ovsiienko * The predefined flag whether do complete check for 362418a1c200SViacheslav Ovsiienko * multi-segment packets and TSO. 362518a1c200SViacheslav Ovsiienko * 362618a1c200SViacheslav Ovsiienko * @return 362718a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. 362818a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO. 362918a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND. 363018a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW. 363118a1c200SViacheslav Ovsiienko */ 363218a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 363318a1c200SViacheslav Ovsiienko mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq, 363418a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 363518a1c200SViacheslav Ovsiienko unsigned int olx, 363618a1c200SViacheslav Ovsiienko bool newp) 363718a1c200SViacheslav Ovsiienko { 363818a1c200SViacheslav Ovsiienko /* Check for multi-segment packet. */ 363918a1c200SViacheslav Ovsiienko if (newp && 364018a1c200SViacheslav Ovsiienko MLX5_TXOFF_CONFIG(MULTI) && 364118a1c200SViacheslav Ovsiienko unlikely(NB_SEGS(loc->mbuf) > 1)) 364218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 364318a1c200SViacheslav Ovsiienko /* Check for TSO packet. */ 364418a1c200SViacheslav Ovsiienko if (newp && 364518a1c200SViacheslav Ovsiienko MLX5_TXOFF_CONFIG(TSO) && 364618a1c200SViacheslav Ovsiienko unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) 364718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_TSO; 364818a1c200SViacheslav Ovsiienko /* Check if eMPW is enabled at all. */ 364918a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(EMPW)) 365018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 365118a1c200SViacheslav Ovsiienko /* Check if eMPW can be engaged. */ 365218a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 365318a1c200SViacheslav Ovsiienko unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) && 365418a1c200SViacheslav Ovsiienko (!MLX5_TXOFF_CONFIG(INLINE) || 365518a1c200SViacheslav Ovsiienko unlikely((rte_pktmbuf_data_len(loc->mbuf) + 365618a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) { 365718a1c200SViacheslav Ovsiienko /* 365818a1c200SViacheslav Ovsiienko * eMPW does not support VLAN insertion offload, 365918a1c200SViacheslav Ovsiienko * we have to inline the entire packet but 366018a1c200SViacheslav Ovsiienko * packet is too long for inlining. 366118a1c200SViacheslav Ovsiienko */ 366218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 366318a1c200SViacheslav Ovsiienko } 366418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EMPW; 366518a1c200SViacheslav Ovsiienko } 366618a1c200SViacheslav Ovsiienko 366718a1c200SViacheslav Ovsiienko /** 366818a1c200SViacheslav Ovsiienko * Check the next packet attributes to match with the eMPW batch ones. 366918a1c200SViacheslav Ovsiienko * 367018a1c200SViacheslav Ovsiienko * @param txq 367118a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 367218a1c200SViacheslav Ovsiienko * @param es 367318a1c200SViacheslav Ovsiienko * Pointer to Ethernet Segment of eMPW batch. 367418a1c200SViacheslav Ovsiienko * @param loc 367518a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 367618a1c200SViacheslav Ovsiienko * @param olx 367718a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 367818a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 367918a1c200SViacheslav Ovsiienko * 368018a1c200SViacheslav Ovsiienko * @return 368118a1c200SViacheslav Ovsiienko * true - packet match with eMPW batch attributes. 368218a1c200SViacheslav Ovsiienko * false - no match, eMPW should be restarted. 368318a1c200SViacheslav Ovsiienko */ 368418a1c200SViacheslav Ovsiienko static __rte_always_inline bool 368518a1c200SViacheslav Ovsiienko mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused, 368618a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es, 368718a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 368818a1c200SViacheslav Ovsiienko unsigned int olx) 368918a1c200SViacheslav Ovsiienko { 369018a1c200SViacheslav Ovsiienko uint8_t swp_flags = 0; 369118a1c200SViacheslav Ovsiienko 369218a1c200SViacheslav Ovsiienko /* Compare the checksum flags, if any. */ 369318a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(CSUM) && 369418a1c200SViacheslav Ovsiienko txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags) 369518a1c200SViacheslav Ovsiienko return false; 369618a1c200SViacheslav Ovsiienko /* Compare the Software Parser offsets and flags. */ 369718a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(SWP) && 369818a1c200SViacheslav Ovsiienko (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) || 369918a1c200SViacheslav Ovsiienko es->swp_flags != swp_flags)) 370018a1c200SViacheslav Ovsiienko return false; 370118a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 370218a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(METADATA) && 370318a1c200SViacheslav Ovsiienko es->metadata != (loc->mbuf->ol_flags & PKT_TX_METADATA ? 370418a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0)) 370518a1c200SViacheslav Ovsiienko return false; 370618a1c200SViacheslav Ovsiienko /* There must be no VLAN packets in eMPW loop. */ 370718a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN)) 370818a1c200SViacheslav Ovsiienko assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)); 370918a1c200SViacheslav Ovsiienko return true; 371018a1c200SViacheslav Ovsiienko } 371118a1c200SViacheslav Ovsiienko 371218a1c200SViacheslav Ovsiienko /* 371318a1c200SViacheslav Ovsiienko * Update send loop variables and WQE for eMPW loop 371418a1c200SViacheslav Ovsiienko * without data inlining. Number of Data Segments is 371518a1c200SViacheslav Ovsiienko * equal to the number of sent packets. 371618a1c200SViacheslav Ovsiienko * 371718a1c200SViacheslav Ovsiienko * @param txq 371818a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 371918a1c200SViacheslav Ovsiienko * @param loc 372018a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 372118a1c200SViacheslav Ovsiienko * @param ds 372218a1c200SViacheslav Ovsiienko * Number of packets/Data Segments/Packets. 372318a1c200SViacheslav Ovsiienko * @param slen 372418a1c200SViacheslav Ovsiienko * Accumulated statistics, bytes sent 372518a1c200SViacheslav Ovsiienko * @param olx 372618a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 372718a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 372818a1c200SViacheslav Ovsiienko * 372918a1c200SViacheslav Ovsiienko * @return 373018a1c200SViacheslav Ovsiienko * true - packet match with eMPW batch attributes. 373118a1c200SViacheslav Ovsiienko * false - no match, eMPW should be restarted. 373218a1c200SViacheslav Ovsiienko */ 373318a1c200SViacheslav Ovsiienko static __rte_always_inline void 373418a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, 373518a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 373618a1c200SViacheslav Ovsiienko unsigned int ds, 373718a1c200SViacheslav Ovsiienko unsigned int slen, 37385a93e173SViacheslav Ovsiienko unsigned int olx) 373918a1c200SViacheslav Ovsiienko { 374018a1c200SViacheslav Ovsiienko assert(!MLX5_TXOFF_CONFIG(INLINE)); 374118a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 374218a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 374318a1c200SViacheslav Ovsiienko txq->stats.obytes += slen; 374418a1c200SViacheslav Ovsiienko #else 374518a1c200SViacheslav Ovsiienko (void)slen; 374618a1c200SViacheslav Ovsiienko #endif 374718a1c200SViacheslav Ovsiienko loc->elts_free -= ds; 374818a1c200SViacheslav Ovsiienko loc->pkts_sent += ds; 374918a1c200SViacheslav Ovsiienko ds += 2; 375018a1c200SViacheslav Ovsiienko loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); 375118a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 375218a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 37535a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 37544dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 375518a1c200SViacheslav Ovsiienko } 375618a1c200SViacheslav Ovsiienko 375718a1c200SViacheslav Ovsiienko /* 375818a1c200SViacheslav Ovsiienko * Update send loop variables and WQE for eMPW loop 375918a1c200SViacheslav Ovsiienko * with data inlining. Gets the size of pushed descriptors 376018a1c200SViacheslav Ovsiienko * and data to the WQE. 376118a1c200SViacheslav Ovsiienko * 376218a1c200SViacheslav Ovsiienko * @param txq 376318a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 376418a1c200SViacheslav Ovsiienko * @param loc 376518a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 376618a1c200SViacheslav Ovsiienko * @param len 376718a1c200SViacheslav Ovsiienko * Total size of descriptor/data in bytes. 376818a1c200SViacheslav Ovsiienko * @param slen 376918a1c200SViacheslav Ovsiienko * Accumulated statistics, data bytes sent. 377018a1c200SViacheslav Ovsiienko * @param olx 377118a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 377218a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 377318a1c200SViacheslav Ovsiienko * 377418a1c200SViacheslav Ovsiienko * @return 377518a1c200SViacheslav Ovsiienko * true - packet match with eMPW batch attributes. 377618a1c200SViacheslav Ovsiienko * false - no match, eMPW should be restarted. 377718a1c200SViacheslav Ovsiienko */ 377818a1c200SViacheslav Ovsiienko static __rte_always_inline void 377918a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq, 378018a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 378118a1c200SViacheslav Ovsiienko unsigned int len, 378218a1c200SViacheslav Ovsiienko unsigned int slen, 378318a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 378418a1c200SViacheslav Ovsiienko { 378518a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 378618a1c200SViacheslav Ovsiienko assert((len % MLX5_WSEG_SIZE) == 0); 378718a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 378818a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 378918a1c200SViacheslav Ovsiienko txq->stats.obytes += slen; 379018a1c200SViacheslav Ovsiienko #else 379118a1c200SViacheslav Ovsiienko (void)slen; 379218a1c200SViacheslav Ovsiienko #endif 379318a1c200SViacheslav Ovsiienko len = len / MLX5_WSEG_SIZE + 2; 379418a1c200SViacheslav Ovsiienko loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len); 379518a1c200SViacheslav Ovsiienko txq->wqe_ci += (len + 3) / 4; 379618a1c200SViacheslav Ovsiienko loc->wqe_free -= (len + 3) / 4; 37975a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 37984dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 379918a1c200SViacheslav Ovsiienko } 380018a1c200SViacheslav Ovsiienko 380118a1c200SViacheslav Ovsiienko /** 380218a1c200SViacheslav Ovsiienko * The set of Tx burst functions for single-segment packets 380318a1c200SViacheslav Ovsiienko * without TSO and with Multi-Packet Writing feature support. 380418a1c200SViacheslav Ovsiienko * Supports all types of Tx offloads, except multi-packets 380518a1c200SViacheslav Ovsiienko * and TSO. 380618a1c200SViacheslav Ovsiienko * 380718a1c200SViacheslav Ovsiienko * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends 380818a1c200SViacheslav Ovsiienko * as many packet per WQE as it can. If eMPW is not configured 380918a1c200SViacheslav Ovsiienko * or packet can not be sent with eMPW (VLAN insertion) the 381018a1c200SViacheslav Ovsiienko * ordinary SEND opcode is used and only one packet placed 381118a1c200SViacheslav Ovsiienko * in WQE. 381218a1c200SViacheslav Ovsiienko * 381318a1c200SViacheslav Ovsiienko * Functions stop sending if it encounters the multi-segment 381418a1c200SViacheslav Ovsiienko * packet or packet with TSO requested. 381518a1c200SViacheslav Ovsiienko * 381618a1c200SViacheslav Ovsiienko * The routines are responsible for storing processed mbuf 381718a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head if inlining 381818a1c200SViacheslav Ovsiienko * offload is requested. Otherwise the copying mbufs to elts 381918a1c200SViacheslav Ovsiienko * can be postponed and completed at the end of burst routine. 382018a1c200SViacheslav Ovsiienko * 382118a1c200SViacheslav Ovsiienko * @param txq 382218a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 382318a1c200SViacheslav Ovsiienko * @param[in] pkts 382418a1c200SViacheslav Ovsiienko * Packets to transmit. 382518a1c200SViacheslav Ovsiienko * @param pkts_n 382618a1c200SViacheslav Ovsiienko * Number of packets in array. 382718a1c200SViacheslav Ovsiienko * @param loc 382818a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 382918a1c200SViacheslav Ovsiienko * @param olx 383018a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 383118a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 383218a1c200SViacheslav Ovsiienko * 383318a1c200SViacheslav Ovsiienko * @return 383418a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 383518a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 383618a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. 383718a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_TSO - TSO packet encountered. 383818a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_SINGLE - used inside functions set. 383918a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EMPW - used inside functions set. 384018a1c200SViacheslav Ovsiienko * 384118a1c200SViacheslav Ovsiienko * Local context variables updated. 384218a1c200SViacheslav Ovsiienko * 384318a1c200SViacheslav Ovsiienko * 384418a1c200SViacheslav Ovsiienko * The routine sends packets with MLX5_OPCODE_EMPW 384518a1c200SViacheslav Ovsiienko * without inlining, this is dedicated optimized branch. 384618a1c200SViacheslav Ovsiienko * No VLAN insertion is supported. 384718a1c200SViacheslav Ovsiienko */ 384818a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 384918a1c200SViacheslav Ovsiienko mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq, 385018a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 385118a1c200SViacheslav Ovsiienko unsigned int pkts_n, 385218a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 385318a1c200SViacheslav Ovsiienko unsigned int olx) 385418a1c200SViacheslav Ovsiienko { 385518a1c200SViacheslav Ovsiienko /* 385618a1c200SViacheslav Ovsiienko * Subroutine is the part of mlx5_tx_burst_single() 385718a1c200SViacheslav Ovsiienko * and sends single-segment packet with eMPW opcode 385818a1c200SViacheslav Ovsiienko * without data inlining. 385918a1c200SViacheslav Ovsiienko */ 386018a1c200SViacheslav Ovsiienko assert(!MLX5_TXOFF_CONFIG(INLINE)); 386118a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(EMPW)); 386218a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 386318a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 386418a1c200SViacheslav Ovsiienko static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size"); 386518a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 386618a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 386718a1c200SViacheslav Ovsiienko for (;;) { 386818a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 386918a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict eseg; 387018a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 387118a1c200SViacheslav Ovsiienko unsigned int part, loop; 387218a1c200SViacheslav Ovsiienko unsigned int slen = 0; 387318a1c200SViacheslav Ovsiienko 387418a1c200SViacheslav Ovsiienko next_empw: 387518a1c200SViacheslav Ovsiienko part = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS); 387618a1c200SViacheslav Ovsiienko if (unlikely(loc->elts_free < part)) { 387718a1c200SViacheslav Ovsiienko /* We have no enough elts to save all mbufs. */ 387818a1c200SViacheslav Ovsiienko if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS)) 387918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 388018a1c200SViacheslav Ovsiienko /* But we still able to send at least minimal eMPW. */ 388118a1c200SViacheslav Ovsiienko part = loc->elts_free; 388218a1c200SViacheslav Ovsiienko } 388318a1c200SViacheslav Ovsiienko /* Check whether we have enough WQEs */ 388418a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) { 388518a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < 388618a1c200SViacheslav Ovsiienko ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4))) 388718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 388818a1c200SViacheslav Ovsiienko part = (loc->wqe_free * 4) - 2; 388918a1c200SViacheslav Ovsiienko } 389018a1c200SViacheslav Ovsiienko if (likely(part > 1)) 389118a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 389218a1c200SViacheslav Ovsiienko loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m); 389318a1c200SViacheslav Ovsiienko /* 389418a1c200SViacheslav Ovsiienko * Build eMPW title WQEBB: 389518a1c200SViacheslav Ovsiienko * - Control Segment, eMPW opcode 389618a1c200SViacheslav Ovsiienko * - Ethernet Segment, no inline 389718a1c200SViacheslav Ovsiienko */ 389818a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2, 389918a1c200SViacheslav Ovsiienko MLX5_OPCODE_ENHANCED_MPSW, olx); 390018a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(txq, loc, loc->wqe_last, 390118a1c200SViacheslav Ovsiienko olx & ~MLX5_TXOFF_CONFIG_VLAN); 390218a1c200SViacheslav Ovsiienko eseg = &loc->wqe_last->eseg; 390318a1c200SViacheslav Ovsiienko dseg = &loc->wqe_last->dseg[0]; 390418a1c200SViacheslav Ovsiienko loop = part; 390518a1c200SViacheslav Ovsiienko for (;;) { 390618a1c200SViacheslav Ovsiienko uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf); 390718a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 390818a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 390918a1c200SViacheslav Ovsiienko slen += dlen; 391018a1c200SViacheslav Ovsiienko #endif 391118a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr 391218a1c200SViacheslav Ovsiienko (txq, loc, dseg, 391318a1c200SViacheslav Ovsiienko rte_pktmbuf_mtod(loc->mbuf, uint8_t *), 391418a1c200SViacheslav Ovsiienko dlen, olx); 391518a1c200SViacheslav Ovsiienko if (unlikely(--loop == 0)) 391618a1c200SViacheslav Ovsiienko break; 391718a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 391818a1c200SViacheslav Ovsiienko if (likely(loop > 1)) 391918a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 392018a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, true); 392118a1c200SViacheslav Ovsiienko /* 392218a1c200SViacheslav Ovsiienko * Unroll the completion code to avoid 392318a1c200SViacheslav Ovsiienko * returning variable value - it results in 392418a1c200SViacheslav Ovsiienko * unoptimized sequent checking in caller. 392518a1c200SViacheslav Ovsiienko */ 392618a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_MULTI) { 392718a1c200SViacheslav Ovsiienko part -= loop; 392818a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 392918a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 393018a1c200SViacheslav Ovsiienko !loc->wqe_free)) 393118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 393218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 393318a1c200SViacheslav Ovsiienko } 393418a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_TSO) { 393518a1c200SViacheslav Ovsiienko part -= loop; 393618a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 393718a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 393818a1c200SViacheslav Ovsiienko !loc->wqe_free)) 393918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 394018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_TSO; 394118a1c200SViacheslav Ovsiienko } 394218a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) { 394318a1c200SViacheslav Ovsiienko part -= loop; 394418a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 394518a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 394618a1c200SViacheslav Ovsiienko !loc->wqe_free)) 394718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 394818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 394918a1c200SViacheslav Ovsiienko } 395018a1c200SViacheslav Ovsiienko if (ret != MLX5_TXCMP_CODE_EMPW) { 395118a1c200SViacheslav Ovsiienko assert(false); 395218a1c200SViacheslav Ovsiienko part -= loop; 395318a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 395418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 395518a1c200SViacheslav Ovsiienko } 395618a1c200SViacheslav Ovsiienko /* 395718a1c200SViacheslav Ovsiienko * Check whether packet parameters coincide 395818a1c200SViacheslav Ovsiienko * within assumed eMPW batch: 395918a1c200SViacheslav Ovsiienko * - check sum settings 396018a1c200SViacheslav Ovsiienko * - metadata value 396118a1c200SViacheslav Ovsiienko * - software parser settings 396218a1c200SViacheslav Ovsiienko */ 396318a1c200SViacheslav Ovsiienko if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) { 396418a1c200SViacheslav Ovsiienko assert(loop); 396518a1c200SViacheslav Ovsiienko part -= loop; 396618a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 396718a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 396818a1c200SViacheslav Ovsiienko !loc->wqe_free)) 396918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 39705a93e173SViacheslav Ovsiienko pkts_n -= part; 397118a1c200SViacheslav Ovsiienko goto next_empw; 397218a1c200SViacheslav Ovsiienko } 397318a1c200SViacheslav Ovsiienko /* Packet attributes match, continue the same eMPW. */ 397418a1c200SViacheslav Ovsiienko ++dseg; 397518a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 397618a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 397718a1c200SViacheslav Ovsiienko } 397818a1c200SViacheslav Ovsiienko /* eMPW is built successfully, update loop parameters. */ 397918a1c200SViacheslav Ovsiienko assert(!loop); 398018a1c200SViacheslav Ovsiienko assert(pkts_n >= part); 398118a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 398218a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 398318a1c200SViacheslav Ovsiienko txq->stats.obytes += slen; 398418a1c200SViacheslav Ovsiienko #endif 398518a1c200SViacheslav Ovsiienko loc->elts_free -= part; 398618a1c200SViacheslav Ovsiienko loc->pkts_sent += part; 398718a1c200SViacheslav Ovsiienko txq->wqe_ci += (2 + part + 3) / 4; 398818a1c200SViacheslav Ovsiienko loc->wqe_free -= (2 + part + 3) / 4; 398918a1c200SViacheslav Ovsiienko pkts_n -= part; 39905a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 39914dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 399218a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) 399318a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 399418a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 399518a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, true); 399618a1c200SViacheslav Ovsiienko if (unlikely(ret != MLX5_TXCMP_CODE_EMPW)) 399718a1c200SViacheslav Ovsiienko return ret; 399818a1c200SViacheslav Ovsiienko /* Continue sending eMPW batches. */ 399918a1c200SViacheslav Ovsiienko } 400018a1c200SViacheslav Ovsiienko assert(false); 400118a1c200SViacheslav Ovsiienko } 400218a1c200SViacheslav Ovsiienko 400318a1c200SViacheslav Ovsiienko /** 400418a1c200SViacheslav Ovsiienko * The routine sends packets with MLX5_OPCODE_EMPW 400518a1c200SViacheslav Ovsiienko * with inlining, optionally supports VLAN insertion. 400618a1c200SViacheslav Ovsiienko */ 400718a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 400818a1c200SViacheslav Ovsiienko mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, 400918a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 401018a1c200SViacheslav Ovsiienko unsigned int pkts_n, 401118a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 401218a1c200SViacheslav Ovsiienko unsigned int olx) 401318a1c200SViacheslav Ovsiienko { 401418a1c200SViacheslav Ovsiienko /* 401518a1c200SViacheslav Ovsiienko * Subroutine is the part of mlx5_tx_burst_single() 401618a1c200SViacheslav Ovsiienko * and sends single-segment packet with eMPW opcode 401718a1c200SViacheslav Ovsiienko * with data inlining. 401818a1c200SViacheslav Ovsiienko */ 401918a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 402018a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(EMPW)); 402118a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 402218a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 402318a1c200SViacheslav Ovsiienko static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size"); 402418a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 402518a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 402618a1c200SViacheslav Ovsiienko for (;;) { 402718a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 402818a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict eseg; 402918a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 40305a93e173SViacheslav Ovsiienko unsigned int room, part, nlim; 403118a1c200SViacheslav Ovsiienko unsigned int slen = 0; 403218a1c200SViacheslav Ovsiienko 40335a93e173SViacheslav Ovsiienko /* 40345a93e173SViacheslav Ovsiienko * Limits the amount of packets in one WQE 40355a93e173SViacheslav Ovsiienko * to improve CQE latency generation. 40365a93e173SViacheslav Ovsiienko */ 40375a93e173SViacheslav Ovsiienko nlim = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS); 403818a1c200SViacheslav Ovsiienko /* Check whether we have minimal amount WQEs */ 403918a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < 404018a1c200SViacheslav Ovsiienko ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4))) 404118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 404218a1c200SViacheslav Ovsiienko if (likely(pkts_n > 1)) 404318a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 404418a1c200SViacheslav Ovsiienko loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m); 404518a1c200SViacheslav Ovsiienko /* 404618a1c200SViacheslav Ovsiienko * Build eMPW title WQEBB: 404718a1c200SViacheslav Ovsiienko * - Control Segment, eMPW opcode, zero DS 404818a1c200SViacheslav Ovsiienko * - Ethernet Segment, no inline 404918a1c200SViacheslav Ovsiienko */ 405018a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0, 405118a1c200SViacheslav Ovsiienko MLX5_OPCODE_ENHANCED_MPSW, olx); 405218a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(txq, loc, loc->wqe_last, 405318a1c200SViacheslav Ovsiienko olx & ~MLX5_TXOFF_CONFIG_VLAN); 405418a1c200SViacheslav Ovsiienko eseg = &loc->wqe_last->eseg; 405518a1c200SViacheslav Ovsiienko dseg = &loc->wqe_last->dseg[0]; 405618a1c200SViacheslav Ovsiienko room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE, 405718a1c200SViacheslav Ovsiienko loc->wqe_free) * MLX5_WQE_SIZE - 405818a1c200SViacheslav Ovsiienko MLX5_WQE_CSEG_SIZE - 405918a1c200SViacheslav Ovsiienko MLX5_WQE_ESEG_SIZE; 406018a1c200SViacheslav Ovsiienko /* Build WQE till we have space, packets and resources. */ 406118a1c200SViacheslav Ovsiienko part = room; 406218a1c200SViacheslav Ovsiienko for (;;) { 406318a1c200SViacheslav Ovsiienko uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf); 406418a1c200SViacheslav Ovsiienko uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); 406518a1c200SViacheslav Ovsiienko unsigned int tlen; 406618a1c200SViacheslav Ovsiienko 406718a1c200SViacheslav Ovsiienko assert(room >= MLX5_WQE_DSEG_SIZE); 406818a1c200SViacheslav Ovsiienko assert((room % MLX5_WQE_DSEG_SIZE) == 0); 406918a1c200SViacheslav Ovsiienko assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end); 407018a1c200SViacheslav Ovsiienko /* 407118a1c200SViacheslav Ovsiienko * Some Tx offloads may cause an error if 407218a1c200SViacheslav Ovsiienko * packet is not long enough, check against 407318a1c200SViacheslav Ovsiienko * assumed minimal length. 407418a1c200SViacheslav Ovsiienko */ 407518a1c200SViacheslav Ovsiienko if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) { 407618a1c200SViacheslav Ovsiienko part -= room; 407718a1c200SViacheslav Ovsiienko if (unlikely(!part)) 407818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 407918a1c200SViacheslav Ovsiienko /* 408018a1c200SViacheslav Ovsiienko * We have some successfully built 408118a1c200SViacheslav Ovsiienko * packet Data Segments to send. 408218a1c200SViacheslav Ovsiienko */ 408318a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 408418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 408518a1c200SViacheslav Ovsiienko } 408618a1c200SViacheslav Ovsiienko /* Inline or not inline - that's the Question. */ 408718a1c200SViacheslav Ovsiienko if (dlen > txq->inlen_empw) 408818a1c200SViacheslav Ovsiienko goto pointer_empw; 408918a1c200SViacheslav Ovsiienko /* Inline entire packet, optional VLAN insertion. */ 409018a1c200SViacheslav Ovsiienko tlen = sizeof(dseg->bcount) + dlen; 409118a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 409218a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { 409318a1c200SViacheslav Ovsiienko /* 409418a1c200SViacheslav Ovsiienko * The packet length must be checked in 409518a1c200SViacheslav Ovsiienko * mlx5_tx_able_to_empw() and packet 409618a1c200SViacheslav Ovsiienko * fits into inline length guaranteed. 409718a1c200SViacheslav Ovsiienko */ 409818a1c200SViacheslav Ovsiienko assert((dlen + sizeof(struct rte_vlan_hdr)) <= 409918a1c200SViacheslav Ovsiienko txq->inlen_empw); 410018a1c200SViacheslav Ovsiienko tlen += sizeof(struct rte_vlan_hdr); 410118a1c200SViacheslav Ovsiienko if (room < tlen) 410218a1c200SViacheslav Ovsiienko break; 410318a1c200SViacheslav Ovsiienko dseg = mlx5_tx_dseg_vlan(txq, loc, dseg, 410418a1c200SViacheslav Ovsiienko dptr, dlen, olx); 410518a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 410618a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 410718a1c200SViacheslav Ovsiienko slen += sizeof(struct rte_vlan_hdr); 410818a1c200SViacheslav Ovsiienko #endif 410918a1c200SViacheslav Ovsiienko } else { 411018a1c200SViacheslav Ovsiienko if (room < tlen) 411118a1c200SViacheslav Ovsiienko break; 411218a1c200SViacheslav Ovsiienko dseg = mlx5_tx_dseg_empw(txq, loc, dseg, 411318a1c200SViacheslav Ovsiienko dptr, dlen, olx); 411418a1c200SViacheslav Ovsiienko } 411518a1c200SViacheslav Ovsiienko tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE); 411618a1c200SViacheslav Ovsiienko assert(room >= tlen); 411718a1c200SViacheslav Ovsiienko room -= tlen; 411818a1c200SViacheslav Ovsiienko /* 411918a1c200SViacheslav Ovsiienko * Packet data are completely inlined, 412018a1c200SViacheslav Ovsiienko * free the packet immediately. 412118a1c200SViacheslav Ovsiienko */ 412218a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(loc->mbuf); 412318a1c200SViacheslav Ovsiienko goto next_mbuf; 412418a1c200SViacheslav Ovsiienko pointer_empw: 412518a1c200SViacheslav Ovsiienko /* 412618a1c200SViacheslav Ovsiienko * Not inlinable VLAN packets are 412718a1c200SViacheslav Ovsiienko * proceeded outside of this routine. 412818a1c200SViacheslav Ovsiienko */ 412918a1c200SViacheslav Ovsiienko assert(room >= MLX5_WQE_DSEG_SIZE); 413018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN)) 413118a1c200SViacheslav Ovsiienko assert(!(loc->mbuf->ol_flags & 413218a1c200SViacheslav Ovsiienko PKT_TX_VLAN_PKT)); 413318a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx); 413418a1c200SViacheslav Ovsiienko /* We have to store mbuf in elts.*/ 413518a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 413618a1c200SViacheslav Ovsiienko room -= MLX5_WQE_DSEG_SIZE; 413718a1c200SViacheslav Ovsiienko /* Ring buffer wraparound is checked at the loop end.*/ 413818a1c200SViacheslav Ovsiienko ++dseg; 413918a1c200SViacheslav Ovsiienko next_mbuf: 414018a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 414118a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 414218a1c200SViacheslav Ovsiienko slen += dlen; 414318a1c200SViacheslav Ovsiienko #endif 414418a1c200SViacheslav Ovsiienko loc->pkts_sent++; 414518a1c200SViacheslav Ovsiienko loc->elts_free--; 414618a1c200SViacheslav Ovsiienko pkts_n--; 414718a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free)) { 414818a1c200SViacheslav Ovsiienko /* 414918a1c200SViacheslav Ovsiienko * We have no resources/packets to 415018a1c200SViacheslav Ovsiienko * continue build descriptors. 415118a1c200SViacheslav Ovsiienko */ 415218a1c200SViacheslav Ovsiienko part -= room; 415318a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 415418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 415518a1c200SViacheslav Ovsiienko } 415618a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 415718a1c200SViacheslav Ovsiienko if (likely(pkts_n > 1)) 415818a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 415918a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, true); 416018a1c200SViacheslav Ovsiienko /* 416118a1c200SViacheslav Ovsiienko * Unroll the completion code to avoid 416218a1c200SViacheslav Ovsiienko * returning variable value - it results in 416318a1c200SViacheslav Ovsiienko * unoptimized sequent checking in caller. 416418a1c200SViacheslav Ovsiienko */ 416518a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_MULTI) { 416618a1c200SViacheslav Ovsiienko part -= room; 416718a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 416818a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 416918a1c200SViacheslav Ovsiienko !loc->wqe_free)) 417018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 417118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 417218a1c200SViacheslav Ovsiienko } 417318a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_TSO) { 417418a1c200SViacheslav Ovsiienko part -= room; 417518a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 417618a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 417718a1c200SViacheslav Ovsiienko !loc->wqe_free)) 417818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 417918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_TSO; 418018a1c200SViacheslav Ovsiienko } 418118a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) { 418218a1c200SViacheslav Ovsiienko part -= room; 418318a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 418418a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 418518a1c200SViacheslav Ovsiienko !loc->wqe_free)) 418618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 418718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 418818a1c200SViacheslav Ovsiienko } 418918a1c200SViacheslav Ovsiienko if (ret != MLX5_TXCMP_CODE_EMPW) { 419018a1c200SViacheslav Ovsiienko assert(false); 419118a1c200SViacheslav Ovsiienko part -= room; 419218a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 419318a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 419418a1c200SViacheslav Ovsiienko } 41955a93e173SViacheslav Ovsiienko /* Check if we have minimal room left. */ 41965a93e173SViacheslav Ovsiienko nlim--; 41975a93e173SViacheslav Ovsiienko if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE)) 41985a93e173SViacheslav Ovsiienko break; 419918a1c200SViacheslav Ovsiienko /* 420018a1c200SViacheslav Ovsiienko * Check whether packet parameters coincide 420118a1c200SViacheslav Ovsiienko * within assumed eMPW batch: 420218a1c200SViacheslav Ovsiienko * - check sum settings 420318a1c200SViacheslav Ovsiienko * - metadata value 420418a1c200SViacheslav Ovsiienko * - software parser settings 420518a1c200SViacheslav Ovsiienko */ 420618a1c200SViacheslav Ovsiienko if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) 420718a1c200SViacheslav Ovsiienko break; 420818a1c200SViacheslav Ovsiienko /* Packet attributes match, continue the same eMPW. */ 420918a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 421018a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 421118a1c200SViacheslav Ovsiienko } 421218a1c200SViacheslav Ovsiienko /* 421318a1c200SViacheslav Ovsiienko * We get here to close an existing eMPW 421418a1c200SViacheslav Ovsiienko * session and start the new one. 421518a1c200SViacheslav Ovsiienko */ 421618a1c200SViacheslav Ovsiienko assert(pkts_n); 421718a1c200SViacheslav Ovsiienko part -= room; 421818a1c200SViacheslav Ovsiienko if (unlikely(!part)) 421918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 422018a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 422118a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 422218a1c200SViacheslav Ovsiienko !loc->wqe_free)) 422318a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 42245a93e173SViacheslav Ovsiienko /* Continue the loop with new eMPW session. */ 422518a1c200SViacheslav Ovsiienko } 422618a1c200SViacheslav Ovsiienko assert(false); 422718a1c200SViacheslav Ovsiienko } 422818a1c200SViacheslav Ovsiienko 422918a1c200SViacheslav Ovsiienko /** 423018a1c200SViacheslav Ovsiienko * The routine sends packets with ordinary MLX5_OPCODE_SEND. 423118a1c200SViacheslav Ovsiienko * Data inlining and VLAN insertion are supported. 423218a1c200SViacheslav Ovsiienko */ 423318a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 423418a1c200SViacheslav Ovsiienko mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq, 423518a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 423618a1c200SViacheslav Ovsiienko unsigned int pkts_n, 423718a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 423818a1c200SViacheslav Ovsiienko unsigned int olx) 423918a1c200SViacheslav Ovsiienko { 424018a1c200SViacheslav Ovsiienko /* 424118a1c200SViacheslav Ovsiienko * Subroutine is the part of mlx5_tx_burst_single() 424218a1c200SViacheslav Ovsiienko * and sends single-segment packet with SEND opcode. 424318a1c200SViacheslav Ovsiienko */ 424418a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 424518a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 424618a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 424718a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 424818a1c200SViacheslav Ovsiienko for (;;) { 424918a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 425018a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 425118a1c200SViacheslav Ovsiienko 425218a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) == 1); 425318a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(INLINE)) { 425418a1c200SViacheslav Ovsiienko unsigned int inlen, vlan = 0; 425518a1c200SViacheslav Ovsiienko 425618a1c200SViacheslav Ovsiienko inlen = rte_pktmbuf_data_len(loc->mbuf); 425718a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 425818a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { 425918a1c200SViacheslav Ovsiienko vlan = sizeof(struct rte_vlan_hdr); 426018a1c200SViacheslav Ovsiienko inlen += vlan; 426118a1c200SViacheslav Ovsiienko static_assert((sizeof(struct rte_vlan_hdr) + 426218a1c200SViacheslav Ovsiienko sizeof(struct rte_ether_hdr)) == 426318a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE, 426418a1c200SViacheslav Ovsiienko "invalid min inline data size"); 426518a1c200SViacheslav Ovsiienko } 426618a1c200SViacheslav Ovsiienko /* 426718a1c200SViacheslav Ovsiienko * If inlining is enabled at configuration time 426818a1c200SViacheslav Ovsiienko * the limit must be not less than minimal size. 426918a1c200SViacheslav Ovsiienko * Otherwise we would do extra check for data 427018a1c200SViacheslav Ovsiienko * size to avoid crashes due to length overflow. 427118a1c200SViacheslav Ovsiienko */ 427218a1c200SViacheslav Ovsiienko assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); 427318a1c200SViacheslav Ovsiienko if (inlen <= txq->inlen_send) { 427418a1c200SViacheslav Ovsiienko unsigned int seg_n, wqe_n; 427518a1c200SViacheslav Ovsiienko 427618a1c200SViacheslav Ovsiienko rte_prefetch0(rte_pktmbuf_mtod 427718a1c200SViacheslav Ovsiienko (loc->mbuf, uint8_t *)); 427818a1c200SViacheslav Ovsiienko /* Check against minimal length. */ 427918a1c200SViacheslav Ovsiienko if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE) 428018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 428118a1c200SViacheslav Ovsiienko /* 428218a1c200SViacheslav Ovsiienko * Completely inlined packet data WQE: 428318a1c200SViacheslav Ovsiienko * - Control Segment, SEND opcode 428418a1c200SViacheslav Ovsiienko * - Ethernet Segment, no VLAN insertion 428518a1c200SViacheslav Ovsiienko * - Data inlined, VLAN optionally inserted 428618a1c200SViacheslav Ovsiienko * - Alignment to MLX5_WSEG_SIZE 428718a1c200SViacheslav Ovsiienko * Have to estimate amount of WQEBBs 428818a1c200SViacheslav Ovsiienko */ 428918a1c200SViacheslav Ovsiienko seg_n = (inlen + 3 * MLX5_WSEG_SIZE - 429018a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE + 429118a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 429218a1c200SViacheslav Ovsiienko /* Check if there are enough WQEBBs. */ 429318a1c200SViacheslav Ovsiienko wqe_n = (seg_n + 3) / 4; 429418a1c200SViacheslav Ovsiienko if (wqe_n > loc->wqe_free) 429518a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 429618a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 429718a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 429818a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, seg_n, 429918a1c200SViacheslav Ovsiienko MLX5_OPCODE_SEND, olx); 430018a1c200SViacheslav Ovsiienko mlx5_tx_eseg_data(txq, loc, wqe, 430118a1c200SViacheslav Ovsiienko vlan, inlen, 0, olx); 430218a1c200SViacheslav Ovsiienko txq->wqe_ci += wqe_n; 430318a1c200SViacheslav Ovsiienko loc->wqe_free -= wqe_n; 430418a1c200SViacheslav Ovsiienko /* 430518a1c200SViacheslav Ovsiienko * Packet data are completely inlined, 430618a1c200SViacheslav Ovsiienko * free the packet immediately. 430718a1c200SViacheslav Ovsiienko */ 430818a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(loc->mbuf); 430918a1c200SViacheslav Ovsiienko } else if (!MLX5_TXOFF_CONFIG(EMPW) && 431018a1c200SViacheslav Ovsiienko txq->inlen_mode) { 431118a1c200SViacheslav Ovsiienko /* 431218a1c200SViacheslav Ovsiienko * If minimal inlining is requested the eMPW 431318a1c200SViacheslav Ovsiienko * feature should be disabled due to data is 431418a1c200SViacheslav Ovsiienko * inlined into Ethernet Segment, which can 431518a1c200SViacheslav Ovsiienko * not contain inlined data for eMPW due to 431618a1c200SViacheslav Ovsiienko * segment shared for all packets. 431718a1c200SViacheslav Ovsiienko */ 431818a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 431918a1c200SViacheslav Ovsiienko unsigned int ds; 432018a1c200SViacheslav Ovsiienko uint8_t *dptr; 432118a1c200SViacheslav Ovsiienko 432218a1c200SViacheslav Ovsiienko /* 432318a1c200SViacheslav Ovsiienko * The inline-mode settings require 432418a1c200SViacheslav Ovsiienko * to inline the specified amount of 432518a1c200SViacheslav Ovsiienko * data bytes to the Ethernet Segment. 432618a1c200SViacheslav Ovsiienko * We should check the free space in 432718a1c200SViacheslav Ovsiienko * WQE ring buffer to inline partially. 432818a1c200SViacheslav Ovsiienko */ 432918a1c200SViacheslav Ovsiienko assert(txq->inlen_send >= txq->inlen_mode); 433018a1c200SViacheslav Ovsiienko assert(inlen > txq->inlen_mode); 433118a1c200SViacheslav Ovsiienko assert(txq->inlen_mode >= 433218a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE); 433318a1c200SViacheslav Ovsiienko /* 433418a1c200SViacheslav Ovsiienko * Check whether there are enough free WQEBBs: 433518a1c200SViacheslav Ovsiienko * - Control Segment 433618a1c200SViacheslav Ovsiienko * - Ethernet Segment 433718a1c200SViacheslav Ovsiienko * - First Segment of inlined Ethernet data 433818a1c200SViacheslav Ovsiienko * - ... data continued ... 433918a1c200SViacheslav Ovsiienko * - Finishing Data Segment of pointer type 434018a1c200SViacheslav Ovsiienko */ 434118a1c200SViacheslav Ovsiienko ds = (MLX5_WQE_CSEG_SIZE + 434218a1c200SViacheslav Ovsiienko MLX5_WQE_ESEG_SIZE + 434318a1c200SViacheslav Ovsiienko MLX5_WQE_DSEG_SIZE + 434418a1c200SViacheslav Ovsiienko txq->inlen_mode - 434518a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE + 434618a1c200SViacheslav Ovsiienko MLX5_WQE_DSEG_SIZE + 434718a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 434818a1c200SViacheslav Ovsiienko if (loc->wqe_free < ((ds + 3) / 4)) 434918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 435018a1c200SViacheslav Ovsiienko /* 435118a1c200SViacheslav Ovsiienko * Build the ordinary SEND WQE: 435218a1c200SViacheslav Ovsiienko * - Control Segment 435318a1c200SViacheslav Ovsiienko * - Ethernet Segment, inline inlen_mode bytes 435418a1c200SViacheslav Ovsiienko * - Data Segment of pointer type 435518a1c200SViacheslav Ovsiienko */ 435618a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 435718a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 435818a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, ds, 435918a1c200SViacheslav Ovsiienko MLX5_OPCODE_SEND, olx); 436018a1c200SViacheslav Ovsiienko dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, 436118a1c200SViacheslav Ovsiienko txq->inlen_mode, 436218a1c200SViacheslav Ovsiienko 0, olx); 436318a1c200SViacheslav Ovsiienko dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + 436418a1c200SViacheslav Ovsiienko txq->inlen_mode - vlan; 436518a1c200SViacheslav Ovsiienko inlen -= txq->inlen_mode; 436618a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(txq, loc, dseg, 436718a1c200SViacheslav Ovsiienko dptr, inlen, olx); 436818a1c200SViacheslav Ovsiienko /* 436918a1c200SViacheslav Ovsiienko * WQE is built, update the loop parameters 437018a1c200SViacheslav Ovsiienko * and got to the next packet. 437118a1c200SViacheslav Ovsiienko */ 437218a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 437318a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 437418a1c200SViacheslav Ovsiienko /* We have to store mbuf in elts.*/ 437518a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 437618a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = 437718a1c200SViacheslav Ovsiienko loc->mbuf; 437818a1c200SViacheslav Ovsiienko --loc->elts_free; 437918a1c200SViacheslav Ovsiienko } else { 438018a1c200SViacheslav Ovsiienko uint8_t *dptr; 438118a1c200SViacheslav Ovsiienko unsigned int dlen; 438218a1c200SViacheslav Ovsiienko 438318a1c200SViacheslav Ovsiienko /* 438418a1c200SViacheslav Ovsiienko * Partially inlined packet data WQE, we have 438518a1c200SViacheslav Ovsiienko * some space in title WQEBB, we can fill it 438618a1c200SViacheslav Ovsiienko * with some packet data. It takes one WQEBB, 438718a1c200SViacheslav Ovsiienko * it is available, no extra space check: 438818a1c200SViacheslav Ovsiienko * - Control Segment, SEND opcode 438918a1c200SViacheslav Ovsiienko * - Ethernet Segment, no VLAN insertion 439018a1c200SViacheslav Ovsiienko * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data 439118a1c200SViacheslav Ovsiienko * - Data Segment, pointer type 439218a1c200SViacheslav Ovsiienko * 439318a1c200SViacheslav Ovsiienko * We also get here if VLAN insertion is not 439418a1c200SViacheslav Ovsiienko * supported by HW, the inline is enabled. 439518a1c200SViacheslav Ovsiienko */ 439618a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 439718a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 439818a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, 4, 439918a1c200SViacheslav Ovsiienko MLX5_OPCODE_SEND, olx); 440018a1c200SViacheslav Ovsiienko mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx); 440118a1c200SViacheslav Ovsiienko dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + 440218a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE - vlan; 440318a1c200SViacheslav Ovsiienko /* 440418a1c200SViacheslav Ovsiienko * The length check is performed above, by 440518a1c200SViacheslav Ovsiienko * comparing with txq->inlen_send. We should 440618a1c200SViacheslav Ovsiienko * not get overflow here. 440718a1c200SViacheslav Ovsiienko */ 440818a1c200SViacheslav Ovsiienko assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE); 440918a1c200SViacheslav Ovsiienko dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE; 441018a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1], 441118a1c200SViacheslav Ovsiienko dptr, dlen, olx); 441218a1c200SViacheslav Ovsiienko ++txq->wqe_ci; 441318a1c200SViacheslav Ovsiienko --loc->wqe_free; 441418a1c200SViacheslav Ovsiienko /* We have to store mbuf in elts.*/ 441518a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 441618a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = 441718a1c200SViacheslav Ovsiienko loc->mbuf; 441818a1c200SViacheslav Ovsiienko --loc->elts_free; 441918a1c200SViacheslav Ovsiienko } 442018a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 442118a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 442218a1c200SViacheslav Ovsiienko txq->stats.obytes += vlan + 442318a1c200SViacheslav Ovsiienko rte_pktmbuf_data_len(loc->mbuf); 442418a1c200SViacheslav Ovsiienko #endif 442518a1c200SViacheslav Ovsiienko } else { 442618a1c200SViacheslav Ovsiienko /* 442718a1c200SViacheslav Ovsiienko * No inline at all, it means the CPU cycles saving 442818a1c200SViacheslav Ovsiienko * is prioritized at configuration, we should not 442918a1c200SViacheslav Ovsiienko * copy any packet data to WQE. 443018a1c200SViacheslav Ovsiienko * 443118a1c200SViacheslav Ovsiienko * SEND WQE, one WQEBB: 443218a1c200SViacheslav Ovsiienko * - Control Segment, SEND opcode 443318a1c200SViacheslav Ovsiienko * - Ethernet Segment, optional VLAN, no inline 443418a1c200SViacheslav Ovsiienko * - Data Segment, pointer type 443518a1c200SViacheslav Ovsiienko */ 443618a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 443718a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 443818a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, 3, 443918a1c200SViacheslav Ovsiienko MLX5_OPCODE_SEND, olx); 444018a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(txq, loc, wqe, olx); 444118a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr 444218a1c200SViacheslav Ovsiienko (txq, loc, &wqe->dseg[0], 444318a1c200SViacheslav Ovsiienko rte_pktmbuf_mtod(loc->mbuf, uint8_t *), 444418a1c200SViacheslav Ovsiienko rte_pktmbuf_data_len(loc->mbuf), olx); 444518a1c200SViacheslav Ovsiienko ++txq->wqe_ci; 444618a1c200SViacheslav Ovsiienko --loc->wqe_free; 444718a1c200SViacheslav Ovsiienko /* 444818a1c200SViacheslav Ovsiienko * We should not store mbuf pointer in elts 444918a1c200SViacheslav Ovsiienko * if no inlining is configured, this is done 445018a1c200SViacheslav Ovsiienko * by calling routine in a batch copy. 445118a1c200SViacheslav Ovsiienko */ 445218a1c200SViacheslav Ovsiienko assert(!MLX5_TXOFF_CONFIG(INLINE)); 445318a1c200SViacheslav Ovsiienko --loc->elts_free; 445418a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 445518a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 445618a1c200SViacheslav Ovsiienko txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf); 445718a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 445818a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) 445918a1c200SViacheslav Ovsiienko txq->stats.obytes += 446018a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr); 446118a1c200SViacheslav Ovsiienko #endif 446218a1c200SViacheslav Ovsiienko } 446318a1c200SViacheslav Ovsiienko ++loc->pkts_sent; 446418a1c200SViacheslav Ovsiienko --pkts_n; 44655a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 44664dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 446718a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) 446818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 446918a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 447018a1c200SViacheslav Ovsiienko if (pkts_n > 1) 447118a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 447218a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, true); 447318a1c200SViacheslav Ovsiienko if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE)) 447418a1c200SViacheslav Ovsiienko return ret; 447518a1c200SViacheslav Ovsiienko } 447618a1c200SViacheslav Ovsiienko assert(false); 447718a1c200SViacheslav Ovsiienko } 447818a1c200SViacheslav Ovsiienko 447918a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 448018a1c200SViacheslav Ovsiienko mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq, 448118a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 448218a1c200SViacheslav Ovsiienko unsigned int pkts_n, 448318a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 448418a1c200SViacheslav Ovsiienko unsigned int olx) 448518a1c200SViacheslav Ovsiienko { 448618a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 448718a1c200SViacheslav Ovsiienko 448818a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, false); 448918a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) 449018a1c200SViacheslav Ovsiienko goto ordinary_send; 449118a1c200SViacheslav Ovsiienko assert(ret == MLX5_TXCMP_CODE_EMPW); 449218a1c200SViacheslav Ovsiienko for (;;) { 449318a1c200SViacheslav Ovsiienko /* Optimize for inline/no inline eMPW send. */ 449418a1c200SViacheslav Ovsiienko ret = (MLX5_TXOFF_CONFIG(INLINE)) ? 449518a1c200SViacheslav Ovsiienko mlx5_tx_burst_empw_inline 449618a1c200SViacheslav Ovsiienko (txq, pkts, pkts_n, loc, olx) : 449718a1c200SViacheslav Ovsiienko mlx5_tx_burst_empw_simple 449818a1c200SViacheslav Ovsiienko (txq, pkts, pkts_n, loc, olx); 449918a1c200SViacheslav Ovsiienko if (ret != MLX5_TXCMP_CODE_SINGLE) 450018a1c200SViacheslav Ovsiienko return ret; 450118a1c200SViacheslav Ovsiienko /* The resources to send one packet should remain. */ 450218a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 450318a1c200SViacheslav Ovsiienko ordinary_send: 450418a1c200SViacheslav Ovsiienko ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx); 450518a1c200SViacheslav Ovsiienko assert(ret != MLX5_TXCMP_CODE_SINGLE); 450618a1c200SViacheslav Ovsiienko if (ret != MLX5_TXCMP_CODE_EMPW) 450718a1c200SViacheslav Ovsiienko return ret; 450818a1c200SViacheslav Ovsiienko /* The resources to send one packet should remain. */ 450918a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 451018a1c200SViacheslav Ovsiienko } 4511a6bd4911SViacheslav Ovsiienko } 4512a6bd4911SViacheslav Ovsiienko 4513a6bd4911SViacheslav Ovsiienko /** 4514eb8121abSViacheslav Ovsiienko * DPDK Tx callback template. This is configured template 4515eb8121abSViacheslav Ovsiienko * used to generate routines optimized for specified offload setup. 4516eb8121abSViacheslav Ovsiienko * One of this generated functions is chosen at SQ configuration 4517eb8121abSViacheslav Ovsiienko * time. 4518eb8121abSViacheslav Ovsiienko * 4519eb8121abSViacheslav Ovsiienko * @param txq 4520eb8121abSViacheslav Ovsiienko * Generic pointer to TX queue structure. 4521eb8121abSViacheslav Ovsiienko * @param[in] pkts 4522eb8121abSViacheslav Ovsiienko * Packets to transmit. 4523eb8121abSViacheslav Ovsiienko * @param pkts_n 4524eb8121abSViacheslav Ovsiienko * Number of packets in array. 4525eb8121abSViacheslav Ovsiienko * @param olx 4526eb8121abSViacheslav Ovsiienko * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx 4527eb8121abSViacheslav Ovsiienko * values. Should be static to take compile time static configuration 4528eb8121abSViacheslav Ovsiienko * advantages. 4529eb8121abSViacheslav Ovsiienko * 4530eb8121abSViacheslav Ovsiienko * @return 4531eb8121abSViacheslav Ovsiienko * Number of packets successfully transmitted (<= pkts_n). 4532eb8121abSViacheslav Ovsiienko */ 4533eb8121abSViacheslav Ovsiienko static __rte_always_inline uint16_t 4534eb8121abSViacheslav Ovsiienko mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq, 4535eb8121abSViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 4536eb8121abSViacheslav Ovsiienko uint16_t pkts_n, 4537eb8121abSViacheslav Ovsiienko unsigned int olx) 4538eb8121abSViacheslav Ovsiienko { 453918a1c200SViacheslav Ovsiienko struct mlx5_txq_local loc; 454018a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 454118a1c200SViacheslav Ovsiienko unsigned int part; 454218a1c200SViacheslav Ovsiienko 454318a1c200SViacheslav Ovsiienko assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); 454418a1c200SViacheslav Ovsiienko assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); 4545f32a3f52SViacheslav Ovsiienko if (unlikely(!pkts_n)) 4546f32a3f52SViacheslav Ovsiienko return 0; 4547f32a3f52SViacheslav Ovsiienko loc.pkts_sent = 0; 4548f32a3f52SViacheslav Ovsiienko loc.pkts_copy = 0; 4549f32a3f52SViacheslav Ovsiienko loc.wqe_last = NULL; 4550f32a3f52SViacheslav Ovsiienko 4551f32a3f52SViacheslav Ovsiienko send_loop: 4552f32a3f52SViacheslav Ovsiienko loc.pkts_loop = loc.pkts_sent; 455318a1c200SViacheslav Ovsiienko /* 455418a1c200SViacheslav Ovsiienko * Check if there are some CQEs, if any: 455518a1c200SViacheslav Ovsiienko * - process an encountered errors 455618a1c200SViacheslav Ovsiienko * - process the completed WQEs 455718a1c200SViacheslav Ovsiienko * - free related mbufs 455818a1c200SViacheslav Ovsiienko * - doorbell the NIC about processed CQEs 455918a1c200SViacheslav Ovsiienko */ 4560f32a3f52SViacheslav Ovsiienko rte_prefetch0(*(pkts + loc.pkts_sent)); 456118a1c200SViacheslav Ovsiienko mlx5_tx_handle_completion(txq, olx); 456218a1c200SViacheslav Ovsiienko /* 456318a1c200SViacheslav Ovsiienko * Calculate the number of available resources - elts and WQEs. 456418a1c200SViacheslav Ovsiienko * There are two possible different scenarios: 456518a1c200SViacheslav Ovsiienko * - no data inlining into WQEs, one WQEBB may contains upto 456618a1c200SViacheslav Ovsiienko * four packets, in this case elts become scarce resource 456718a1c200SViacheslav Ovsiienko * - data inlining into WQEs, one packet may require multiple 456818a1c200SViacheslav Ovsiienko * WQEBBs, the WQEs become the limiting factor. 456918a1c200SViacheslav Ovsiienko */ 457018a1c200SViacheslav Ovsiienko assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); 457118a1c200SViacheslav Ovsiienko loc.elts_free = txq->elts_s - 457218a1c200SViacheslav Ovsiienko (uint16_t)(txq->elts_head - txq->elts_tail); 457318a1c200SViacheslav Ovsiienko assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); 457418a1c200SViacheslav Ovsiienko loc.wqe_free = txq->wqe_s - 457518a1c200SViacheslav Ovsiienko (uint16_t)(txq->wqe_ci - txq->wqe_pi); 457618a1c200SViacheslav Ovsiienko if (unlikely(!loc.elts_free || !loc.wqe_free)) 457785125863SViacheslav Ovsiienko goto burst_exit; 457818a1c200SViacheslav Ovsiienko for (;;) { 457918a1c200SViacheslav Ovsiienko /* 458018a1c200SViacheslav Ovsiienko * Fetch the packet from array. Usually this is 458118a1c200SViacheslav Ovsiienko * the first packet in series of multi/single 458218a1c200SViacheslav Ovsiienko * segment packets. 458318a1c200SViacheslav Ovsiienko */ 458418a1c200SViacheslav Ovsiienko loc.mbuf = *(pkts + loc.pkts_sent); 458518a1c200SViacheslav Ovsiienko /* Dedicated branch for multi-segment packets. */ 458618a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(MULTI) && 458718a1c200SViacheslav Ovsiienko unlikely(NB_SEGS(loc.mbuf) > 1)) { 458818a1c200SViacheslav Ovsiienko /* 458918a1c200SViacheslav Ovsiienko * Multi-segment packet encountered. 459018a1c200SViacheslav Ovsiienko * Hardware is able to process it only 459118a1c200SViacheslav Ovsiienko * with SEND/TSO opcodes, one packet 459218a1c200SViacheslav Ovsiienko * per WQE, do it in dedicated routine. 459318a1c200SViacheslav Ovsiienko */ 459418a1c200SViacheslav Ovsiienko enter_send_multi: 459518a1c200SViacheslav Ovsiienko assert(loc.pkts_sent >= loc.pkts_copy); 459618a1c200SViacheslav Ovsiienko part = loc.pkts_sent - loc.pkts_copy; 459718a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(INLINE) && part) { 459818a1c200SViacheslav Ovsiienko /* 459918a1c200SViacheslav Ovsiienko * There are some single-segment mbufs not 460018a1c200SViacheslav Ovsiienko * stored in elts. The mbufs must be in the 460118a1c200SViacheslav Ovsiienko * same order as WQEs, so we must copy the 460218a1c200SViacheslav Ovsiienko * mbufs to elts here, before the coming 460318a1c200SViacheslav Ovsiienko * multi-segment packet mbufs is appended. 460418a1c200SViacheslav Ovsiienko */ 460518a1c200SViacheslav Ovsiienko mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, 460618a1c200SViacheslav Ovsiienko part, olx); 460718a1c200SViacheslav Ovsiienko loc.pkts_copy = loc.pkts_sent; 460818a1c200SViacheslav Ovsiienko } 460918a1c200SViacheslav Ovsiienko assert(pkts_n > loc.pkts_sent); 461018a1c200SViacheslav Ovsiienko ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx); 461118a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(INLINE)) 461218a1c200SViacheslav Ovsiienko loc.pkts_copy = loc.pkts_sent; 461318a1c200SViacheslav Ovsiienko /* 461418a1c200SViacheslav Ovsiienko * These returned code checks are supposed 461518a1c200SViacheslav Ovsiienko * to be optimized out due to routine inlining. 461618a1c200SViacheslav Ovsiienko */ 461718a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_EXIT) { 461818a1c200SViacheslav Ovsiienko /* 461918a1c200SViacheslav Ovsiienko * The routine returns this code when 462018a1c200SViacheslav Ovsiienko * all packets are sent or there is no 462118a1c200SViacheslav Ovsiienko * enough resources to complete request. 462218a1c200SViacheslav Ovsiienko */ 462318a1c200SViacheslav Ovsiienko break; 462418a1c200SViacheslav Ovsiienko } 462518a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_ERROR) { 462618a1c200SViacheslav Ovsiienko /* 462718a1c200SViacheslav Ovsiienko * The routine returns this code when 462818a1c200SViacheslav Ovsiienko * some error in the incoming packets 462918a1c200SViacheslav Ovsiienko * format occurred. 463018a1c200SViacheslav Ovsiienko */ 463118a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 463218a1c200SViacheslav Ovsiienko break; 463318a1c200SViacheslav Ovsiienko } 463418a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) { 463518a1c200SViacheslav Ovsiienko /* 463618a1c200SViacheslav Ovsiienko * The single-segment packet was encountered 463718a1c200SViacheslav Ovsiienko * in the array, try to send it with the 463818a1c200SViacheslav Ovsiienko * best optimized way, possible engaging eMPW. 463918a1c200SViacheslav Ovsiienko */ 464018a1c200SViacheslav Ovsiienko goto enter_send_single; 464118a1c200SViacheslav Ovsiienko } 464218a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 464318a1c200SViacheslav Ovsiienko ret == MLX5_TXCMP_CODE_TSO) { 464418a1c200SViacheslav Ovsiienko /* 464518a1c200SViacheslav Ovsiienko * The single-segment TSO packet was 464618a1c200SViacheslav Ovsiienko * encountered in the array. 464718a1c200SViacheslav Ovsiienko */ 464818a1c200SViacheslav Ovsiienko goto enter_send_tso; 464918a1c200SViacheslav Ovsiienko } 465018a1c200SViacheslav Ovsiienko /* We must not get here. Something is going wrong. */ 465118a1c200SViacheslav Ovsiienko assert(false); 465218a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 465318a1c200SViacheslav Ovsiienko break; 465418a1c200SViacheslav Ovsiienko } 465518a1c200SViacheslav Ovsiienko /* Dedicated branch for single-segment TSO packets. */ 465618a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 465718a1c200SViacheslav Ovsiienko unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) { 465818a1c200SViacheslav Ovsiienko /* 465918a1c200SViacheslav Ovsiienko * TSO might require special way for inlining 466018a1c200SViacheslav Ovsiienko * (dedicated parameters) and is sent with 466118a1c200SViacheslav Ovsiienko * MLX5_OPCODE_TSO opcode only, provide this 466218a1c200SViacheslav Ovsiienko * in dedicated branch. 466318a1c200SViacheslav Ovsiienko */ 466418a1c200SViacheslav Ovsiienko enter_send_tso: 466518a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc.mbuf) == 1); 466618a1c200SViacheslav Ovsiienko assert(pkts_n > loc.pkts_sent); 466718a1c200SViacheslav Ovsiienko ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx); 466818a1c200SViacheslav Ovsiienko /* 466918a1c200SViacheslav Ovsiienko * These returned code checks are supposed 467018a1c200SViacheslav Ovsiienko * to be optimized out due to routine inlining. 467118a1c200SViacheslav Ovsiienko */ 467218a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_EXIT) 467318a1c200SViacheslav Ovsiienko break; 467418a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_ERROR) { 467518a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 467618a1c200SViacheslav Ovsiienko break; 467718a1c200SViacheslav Ovsiienko } 467818a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) 467918a1c200SViacheslav Ovsiienko goto enter_send_single; 468018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(MULTI) && 468118a1c200SViacheslav Ovsiienko ret == MLX5_TXCMP_CODE_MULTI) { 468218a1c200SViacheslav Ovsiienko /* 468318a1c200SViacheslav Ovsiienko * The multi-segment packet was 468418a1c200SViacheslav Ovsiienko * encountered in the array. 468518a1c200SViacheslav Ovsiienko */ 468618a1c200SViacheslav Ovsiienko goto enter_send_multi; 468718a1c200SViacheslav Ovsiienko } 468818a1c200SViacheslav Ovsiienko /* We must not get here. Something is going wrong. */ 468918a1c200SViacheslav Ovsiienko assert(false); 469018a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 469118a1c200SViacheslav Ovsiienko break; 469218a1c200SViacheslav Ovsiienko } 469318a1c200SViacheslav Ovsiienko /* 469418a1c200SViacheslav Ovsiienko * The dedicated branch for the single-segment packets 469518a1c200SViacheslav Ovsiienko * without TSO. Often these ones can be sent using 469618a1c200SViacheslav Ovsiienko * MLX5_OPCODE_EMPW with multiple packets in one WQE. 469718a1c200SViacheslav Ovsiienko * The routine builds the WQEs till it encounters 469818a1c200SViacheslav Ovsiienko * the TSO or multi-segment packet (in case if these 469918a1c200SViacheslav Ovsiienko * offloads are requested at SQ configuration time). 470018a1c200SViacheslav Ovsiienko */ 470118a1c200SViacheslav Ovsiienko enter_send_single: 470218a1c200SViacheslav Ovsiienko assert(pkts_n > loc.pkts_sent); 470318a1c200SViacheslav Ovsiienko ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx); 470418a1c200SViacheslav Ovsiienko /* 470518a1c200SViacheslav Ovsiienko * These returned code checks are supposed 470618a1c200SViacheslav Ovsiienko * to be optimized out due to routine inlining. 470718a1c200SViacheslav Ovsiienko */ 470818a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_EXIT) 470918a1c200SViacheslav Ovsiienko break; 471018a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_ERROR) { 471118a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 471218a1c200SViacheslav Ovsiienko break; 471318a1c200SViacheslav Ovsiienko } 471418a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(MULTI) && 471518a1c200SViacheslav Ovsiienko ret == MLX5_TXCMP_CODE_MULTI) { 471618a1c200SViacheslav Ovsiienko /* 471718a1c200SViacheslav Ovsiienko * The multi-segment packet was 471818a1c200SViacheslav Ovsiienko * encountered in the array. 471918a1c200SViacheslav Ovsiienko */ 472018a1c200SViacheslav Ovsiienko goto enter_send_multi; 472118a1c200SViacheslav Ovsiienko } 472218a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 472318a1c200SViacheslav Ovsiienko ret == MLX5_TXCMP_CODE_TSO) { 472418a1c200SViacheslav Ovsiienko /* 472518a1c200SViacheslav Ovsiienko * The single-segment TSO packet was 472618a1c200SViacheslav Ovsiienko * encountered in the array. 472718a1c200SViacheslav Ovsiienko */ 472818a1c200SViacheslav Ovsiienko goto enter_send_tso; 472918a1c200SViacheslav Ovsiienko } 473018a1c200SViacheslav Ovsiienko /* We must not get here. Something is going wrong. */ 473118a1c200SViacheslav Ovsiienko assert(false); 473218a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 473318a1c200SViacheslav Ovsiienko break; 473418a1c200SViacheslav Ovsiienko } 473518a1c200SViacheslav Ovsiienko /* 473618a1c200SViacheslav Ovsiienko * Main Tx loop is completed, do the rest: 473718a1c200SViacheslav Ovsiienko * - set completion request if thresholds are reached 473818a1c200SViacheslav Ovsiienko * - doorbell the hardware 473918a1c200SViacheslav Ovsiienko * - copy the rest of mbufs to elts (if any) 474018a1c200SViacheslav Ovsiienko */ 474118a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy); 474218a1c200SViacheslav Ovsiienko /* Take a shortcut if nothing is sent. */ 4743f32a3f52SViacheslav Ovsiienko if (unlikely(loc.pkts_sent == loc.pkts_loop)) 474485125863SViacheslav Ovsiienko goto burst_exit; 474518a1c200SViacheslav Ovsiienko /* 474618a1c200SViacheslav Ovsiienko * Ring QP doorbell immediately after WQE building completion 474718a1c200SViacheslav Ovsiienko * to improve latencies. The pure software related data treatment 474818a1c200SViacheslav Ovsiienko * can be completed after doorbell. Tx CQEs for this SQ are 474918a1c200SViacheslav Ovsiienko * processed in this thread only by the polling. 475018a1c200SViacheslav Ovsiienko */ 475118a1c200SViacheslav Ovsiienko mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0); 47525a93e173SViacheslav Ovsiienko /* Not all of the mbufs may be stored into elts yet. */ 4753f32a3f52SViacheslav Ovsiienko part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy; 475418a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(INLINE) && part) { 475518a1c200SViacheslav Ovsiienko /* 475618a1c200SViacheslav Ovsiienko * There are some single-segment mbufs not stored in elts. 47575a93e173SViacheslav Ovsiienko * It can be only if the last packet was single-segment. 475818a1c200SViacheslav Ovsiienko * The copying is gathered into one place due to it is 475918a1c200SViacheslav Ovsiienko * a good opportunity to optimize that with SIMD. 476018a1c200SViacheslav Ovsiienko * Unfortunately if inlining is enabled the gaps in 476118a1c200SViacheslav Ovsiienko * pointer array may happen due to early freeing of the 476218a1c200SViacheslav Ovsiienko * inlined mbufs. 476318a1c200SViacheslav Ovsiienko */ 476418a1c200SViacheslav Ovsiienko mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx); 4765f32a3f52SViacheslav Ovsiienko loc.pkts_copy = loc.pkts_sent; 476618a1c200SViacheslav Ovsiienko } 476718a1c200SViacheslav Ovsiienko assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); 476818a1c200SViacheslav Ovsiienko assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); 4769f32a3f52SViacheslav Ovsiienko if (pkts_n > loc.pkts_sent) { 4770f32a3f52SViacheslav Ovsiienko /* 4771f32a3f52SViacheslav Ovsiienko * If burst size is large there might be no enough CQE 4772f32a3f52SViacheslav Ovsiienko * fetched from completion queue and no enough resources 4773f32a3f52SViacheslav Ovsiienko * freed to send all the packets. 4774f32a3f52SViacheslav Ovsiienko */ 4775f32a3f52SViacheslav Ovsiienko goto send_loop; 4776f32a3f52SViacheslav Ovsiienko } 477785125863SViacheslav Ovsiienko burst_exit: 477885125863SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 477985125863SViacheslav Ovsiienko /* Increment sent packets counter. */ 478085125863SViacheslav Ovsiienko txq->stats.opackets += loc.pkts_sent; 478185125863SViacheslav Ovsiienko #endif 478218a1c200SViacheslav Ovsiienko return loc.pkts_sent; 4783eb8121abSViacheslav Ovsiienko } 4784eb8121abSViacheslav Ovsiienko 4785eb8121abSViacheslav Ovsiienko /* Generate routines with Enhanced Multi-Packet Write support. */ 4786eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(full_empw, 4787eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW) 4788eb8121abSViacheslav Ovsiienko 4789eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(none_empw, 4790eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW) 4791eb8121abSViacheslav Ovsiienko 4792eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(md_empw, 4793eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4794eb8121abSViacheslav Ovsiienko 4795eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mt_empw, 4796eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4797eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4798eb8121abSViacheslav Ovsiienko 4799eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtsc_empw, 4800eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4801eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4802eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4803eb8121abSViacheslav Ovsiienko 4804eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mti_empw, 4805eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4806eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4807eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4808eb8121abSViacheslav Ovsiienko 4809eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtv_empw, 4810eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4811eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4812eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4813eb8121abSViacheslav Ovsiienko 4814eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtiv_empw, 4815eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4816eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4817eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4818eb8121abSViacheslav Ovsiienko 4819eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sc_empw, 4820eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4821eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4822eb8121abSViacheslav Ovsiienko 4823eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sci_empw, 4824eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4825eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4826eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4827eb8121abSViacheslav Ovsiienko 4828eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(scv_empw, 4829eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4830eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4831eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4832eb8121abSViacheslav Ovsiienko 4833eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sciv_empw, 4834eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4835eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4836eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4837eb8121abSViacheslav Ovsiienko 4838eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(i_empw, 4839eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4840eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4841eb8121abSViacheslav Ovsiienko 4842eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(v_empw, 4843eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4844eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4845eb8121abSViacheslav Ovsiienko 4846eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(iv_empw, 4847eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4848eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4849eb8121abSViacheslav Ovsiienko 4850eb8121abSViacheslav Ovsiienko /* Generate routines without Enhanced Multi-Packet Write support. */ 4851eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(full, 4852eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_FULL) 4853eb8121abSViacheslav Ovsiienko 4854eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(none, 4855eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_NONE) 4856eb8121abSViacheslav Ovsiienko 4857eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(md, 4858eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4859eb8121abSViacheslav Ovsiienko 4860eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mt, 4861eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4862eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4863eb8121abSViacheslav Ovsiienko 4864eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtsc, 4865eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4866eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4867eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4868eb8121abSViacheslav Ovsiienko 4869eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mti, 4870eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4871eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4872eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4873eb8121abSViacheslav Ovsiienko 4874eb8121abSViacheslav Ovsiienko 4875eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtv, 4876eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4877eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4878eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4879eb8121abSViacheslav Ovsiienko 4880eb8121abSViacheslav Ovsiienko 4881eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtiv, 4882eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4883eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4884eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4885eb8121abSViacheslav Ovsiienko 4886eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sc, 4887eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4888eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4889eb8121abSViacheslav Ovsiienko 4890eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sci, 4891eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4892eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4893eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4894eb8121abSViacheslav Ovsiienko 4895eb8121abSViacheslav Ovsiienko 4896eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(scv, 4897eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4898eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4899eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4900eb8121abSViacheslav Ovsiienko 4901eb8121abSViacheslav Ovsiienko 4902eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sciv, 4903eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4904eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4905eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4906eb8121abSViacheslav Ovsiienko 4907eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(i, 4908eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4909eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4910eb8121abSViacheslav Ovsiienko 4911eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(v, 4912eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4913eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4914eb8121abSViacheslav Ovsiienko 4915eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(iv, 4916eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4917eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4918eb8121abSViacheslav Ovsiienko 4919eb8121abSViacheslav Ovsiienko /* 4920eb8121abSViacheslav Ovsiienko * Array of declared and compiled Tx burst function and corresponding 4921eb8121abSViacheslav Ovsiienko * supported offloads set. The array is used to select the Tx burst 4922eb8121abSViacheslav Ovsiienko * function for specified offloads set at Tx queue configuration time. 4923eb8121abSViacheslav Ovsiienko */ 4924eb8121abSViacheslav Ovsiienko const struct { 4925eb8121abSViacheslav Ovsiienko eth_tx_burst_t func; 4926eb8121abSViacheslav Ovsiienko unsigned int olx; 4927eb8121abSViacheslav Ovsiienko } txoff_func[] = { 4928eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(full_empw, 4929eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4930eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4931eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4932eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4933eb8121abSViacheslav Ovsiienko 4934eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(none_empw, 4935eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW) 4936eb8121abSViacheslav Ovsiienko 4937eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(md_empw, 4938eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4939eb8121abSViacheslav Ovsiienko 4940eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mt_empw, 4941eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4942eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4943eb8121abSViacheslav Ovsiienko 4944eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtsc_empw, 4945eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4946eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4947eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4948eb8121abSViacheslav Ovsiienko 4949eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mti_empw, 4950eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4951eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4952eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4953eb8121abSViacheslav Ovsiienko 4954eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtv_empw, 4955eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4956eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4957eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4958eb8121abSViacheslav Ovsiienko 4959eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtiv_empw, 4960eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4961eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4962eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4963eb8121abSViacheslav Ovsiienko 4964eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sc_empw, 4965eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4966eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4967eb8121abSViacheslav Ovsiienko 4968eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sci_empw, 4969eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4970eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4971eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4972eb8121abSViacheslav Ovsiienko 4973eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(scv_empw, 4974eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4975eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4976eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4977eb8121abSViacheslav Ovsiienko 4978eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sciv_empw, 4979eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4980eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4981eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4982eb8121abSViacheslav Ovsiienko 4983eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(i_empw, 4984eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4985eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4986eb8121abSViacheslav Ovsiienko 4987eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(v_empw, 4988eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4989eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4990eb8121abSViacheslav Ovsiienko 4991eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(iv_empw, 4992eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4993eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4994eb8121abSViacheslav Ovsiienko 4995eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(full, 4996eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4997eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4998eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4999eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5000eb8121abSViacheslav Ovsiienko 5001eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(none, 5002eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_NONE) 5003eb8121abSViacheslav Ovsiienko 5004eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(md, 5005eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5006eb8121abSViacheslav Ovsiienko 5007eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mt, 5008eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5009eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5010eb8121abSViacheslav Ovsiienko 5011eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtsc, 5012eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5013eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5014eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5015eb8121abSViacheslav Ovsiienko 5016eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mti, 5017eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5018eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 5019eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5020eb8121abSViacheslav Ovsiienko 5021eb8121abSViacheslav Ovsiienko 5022eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtv, 5023eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5024eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 5025eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5026eb8121abSViacheslav Ovsiienko 5027eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtiv, 5028eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5029eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 5030eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5031eb8121abSViacheslav Ovsiienko 5032eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sc, 5033eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5034eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5035eb8121abSViacheslav Ovsiienko 5036eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sci, 5037eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5038eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 5039eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5040eb8121abSViacheslav Ovsiienko 5041eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(scv, 5042eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5043eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 5044eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5045eb8121abSViacheslav Ovsiienko 5046eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sciv, 5047eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5048eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 5049eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5050eb8121abSViacheslav Ovsiienko 5051eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(i, 5052eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 5053eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5054eb8121abSViacheslav Ovsiienko 5055eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(v, 5056eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 5057eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5058eb8121abSViacheslav Ovsiienko 5059eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(iv, 5060eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 5061eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5062eb8121abSViacheslav Ovsiienko }; 5063eb8121abSViacheslav Ovsiienko 5064eb8121abSViacheslav Ovsiienko /** 5065eb8121abSViacheslav Ovsiienko * Configure the Tx function to use. The routine checks configured 5066eb8121abSViacheslav Ovsiienko * Tx offloads for the device and selects appropriate Tx burst 5067eb8121abSViacheslav Ovsiienko * routine. There are multiple Tx burst routines compiled from 5068eb8121abSViacheslav Ovsiienko * the same template in the most optimal way for the dedicated 5069eb8121abSViacheslav Ovsiienko * Tx offloads set. 5070a6bd4911SViacheslav Ovsiienko * 5071a6bd4911SViacheslav Ovsiienko * @param dev 5072a6bd4911SViacheslav Ovsiienko * Pointer to private data structure. 5073a6bd4911SViacheslav Ovsiienko * 5074a6bd4911SViacheslav Ovsiienko * @return 5075a6bd4911SViacheslav Ovsiienko * Pointer to selected Tx burst function. 5076a6bd4911SViacheslav Ovsiienko */ 5077a6bd4911SViacheslav Ovsiienko eth_tx_burst_t 5078a6bd4911SViacheslav Ovsiienko mlx5_select_tx_function(struct rte_eth_dev *dev) 5079a6bd4911SViacheslav Ovsiienko { 5080eb8121abSViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 5081eb8121abSViacheslav Ovsiienko struct mlx5_dev_config *config = &priv->config; 5082eb8121abSViacheslav Ovsiienko uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 5083eb8121abSViacheslav Ovsiienko unsigned int diff = 0, olx = 0, i, m; 5084eb8121abSViacheslav Ovsiienko 5085eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <= 5086eb8121abSViacheslav Ovsiienko MLX5_DSEG_MAX, "invalid WQE max size"); 5087eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE, 5088eb8121abSViacheslav Ovsiienko "invalid WQE Control Segment size"); 5089eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE, 5090eb8121abSViacheslav Ovsiienko "invalid WQE Ethernet Segment size"); 5091eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE, 5092eb8121abSViacheslav Ovsiienko "invalid WQE Data Segment size"); 5093eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE, 5094eb8121abSViacheslav Ovsiienko "invalid WQE size"); 5095eb8121abSViacheslav Ovsiienko assert(priv); 5096eb8121abSViacheslav Ovsiienko if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) { 5097eb8121abSViacheslav Ovsiienko /* We should support Multi-Segment Packets. */ 5098eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_MULTI; 5099eb8121abSViacheslav Ovsiienko } 5100eb8121abSViacheslav Ovsiienko if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | 5101eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 5102eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_GRE_TNL_TSO | 5103eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_IP_TNL_TSO | 5104eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_UDP_TNL_TSO)) { 5105eb8121abSViacheslav Ovsiienko /* We should support TCP Send Offload. */ 5106eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_TSO; 5107eb8121abSViacheslav Ovsiienko } 5108eb8121abSViacheslav Ovsiienko if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | 5109eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_UDP_TNL_TSO | 5110eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { 5111eb8121abSViacheslav Ovsiienko /* We should support Software Parser for Tunnels. */ 5112eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_SWP; 5113eb8121abSViacheslav Ovsiienko } 5114eb8121abSViacheslav Ovsiienko if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | 5115eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_UDP_CKSUM | 5116eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_TCP_CKSUM | 5117eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { 5118eb8121abSViacheslav Ovsiienko /* We should support IP/TCP/UDP Checksums. */ 5119eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_CSUM; 5120eb8121abSViacheslav Ovsiienko } 5121eb8121abSViacheslav Ovsiienko if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) { 5122eb8121abSViacheslav Ovsiienko /* We should support VLAN insertion. */ 5123eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_VLAN; 5124eb8121abSViacheslav Ovsiienko } 5125eb8121abSViacheslav Ovsiienko if (priv->txqs_n && (*priv->txqs)[0]) { 5126eb8121abSViacheslav Ovsiienko struct mlx5_txq_data *txd = (*priv->txqs)[0]; 5127eb8121abSViacheslav Ovsiienko 5128eb8121abSViacheslav Ovsiienko if (txd->inlen_send) { 5129eb8121abSViacheslav Ovsiienko /* 5130eb8121abSViacheslav Ovsiienko * Check the data inline requirements. Data inline 5131eb8121abSViacheslav Ovsiienko * is enabled on per device basis, we can check 5132eb8121abSViacheslav Ovsiienko * the first Tx queue only. 5133eb8121abSViacheslav Ovsiienko * 5134eb8121abSViacheslav Ovsiienko * If device does not support VLAN insertion in WQE 5135eb8121abSViacheslav Ovsiienko * and some queues are requested to perform VLAN 5136eb8121abSViacheslav Ovsiienko * insertion offload than inline must be enabled. 5137eb8121abSViacheslav Ovsiienko */ 5138eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_INLINE; 5139eb8121abSViacheslav Ovsiienko } 5140eb8121abSViacheslav Ovsiienko } 5141eb8121abSViacheslav Ovsiienko if (config->mps == MLX5_MPW_ENHANCED && 5142eb8121abSViacheslav Ovsiienko config->txq_inline_min <= 0) { 5143eb8121abSViacheslav Ovsiienko /* 5144eb8121abSViacheslav Ovsiienko * The NIC supports Enhanced Multi-Packet Write. 5145eb8121abSViacheslav Ovsiienko * We do not support legacy MPW due to its 5146eb8121abSViacheslav Ovsiienko * hardware related problems, so we just ignore 5147eb8121abSViacheslav Ovsiienko * legacy MLX5_MPW settings. There should be no 5148eb8121abSViacheslav Ovsiienko * minimal required inline data. 5149eb8121abSViacheslav Ovsiienko */ 5150eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_EMPW; 5151eb8121abSViacheslav Ovsiienko } 5152eb8121abSViacheslav Ovsiienko if (tx_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) { 5153eb8121abSViacheslav Ovsiienko /* We should support Flow metadata. */ 5154eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_METADATA; 5155eb8121abSViacheslav Ovsiienko } 5156eb8121abSViacheslav Ovsiienko /* 5157eb8121abSViacheslav Ovsiienko * Scan the routines table to find the minimal 5158eb8121abSViacheslav Ovsiienko * satisfying routine with requested offloads. 5159eb8121abSViacheslav Ovsiienko */ 5160eb8121abSViacheslav Ovsiienko m = RTE_DIM(txoff_func); 5161eb8121abSViacheslav Ovsiienko for (i = 0; i < RTE_DIM(txoff_func); i++) { 5162eb8121abSViacheslav Ovsiienko unsigned int tmp; 5163eb8121abSViacheslav Ovsiienko 5164eb8121abSViacheslav Ovsiienko tmp = txoff_func[i].olx; 5165eb8121abSViacheslav Ovsiienko if (tmp == olx) { 5166eb8121abSViacheslav Ovsiienko /* Meets requested offloads exactly.*/ 5167eb8121abSViacheslav Ovsiienko m = i; 5168eb8121abSViacheslav Ovsiienko break; 5169eb8121abSViacheslav Ovsiienko } 5170eb8121abSViacheslav Ovsiienko if ((tmp & olx) != olx) { 5171eb8121abSViacheslav Ovsiienko /* Does not meet requested offloads at all. */ 5172eb8121abSViacheslav Ovsiienko continue; 5173eb8121abSViacheslav Ovsiienko } 5174eb8121abSViacheslav Ovsiienko if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW) 5175eb8121abSViacheslav Ovsiienko /* Do not enable eMPW if not configured. */ 5176eb8121abSViacheslav Ovsiienko continue; 5177eb8121abSViacheslav Ovsiienko if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE) 5178eb8121abSViacheslav Ovsiienko /* Do not enable inlining if not configured. */ 5179eb8121abSViacheslav Ovsiienko continue; 5180eb8121abSViacheslav Ovsiienko /* 5181eb8121abSViacheslav Ovsiienko * Some routine meets the requirements. 5182eb8121abSViacheslav Ovsiienko * Check whether it has minimal amount 5183eb8121abSViacheslav Ovsiienko * of not requested offloads. 5184eb8121abSViacheslav Ovsiienko */ 5185eb8121abSViacheslav Ovsiienko tmp = __builtin_popcountl(tmp & ~olx); 5186eb8121abSViacheslav Ovsiienko if (m >= RTE_DIM(txoff_func) || tmp < diff) { 5187eb8121abSViacheslav Ovsiienko /* First or better match, save and continue. */ 5188eb8121abSViacheslav Ovsiienko m = i; 5189eb8121abSViacheslav Ovsiienko diff = tmp; 5190eb8121abSViacheslav Ovsiienko continue; 5191eb8121abSViacheslav Ovsiienko } 5192eb8121abSViacheslav Ovsiienko if (tmp == diff) { 5193eb8121abSViacheslav Ovsiienko tmp = txoff_func[i].olx ^ txoff_func[m].olx; 5194eb8121abSViacheslav Ovsiienko if (__builtin_ffsl(txoff_func[i].olx & ~tmp) < 5195eb8121abSViacheslav Ovsiienko __builtin_ffsl(txoff_func[m].olx & ~tmp)) { 5196eb8121abSViacheslav Ovsiienko /* Lighter not requested offload. */ 5197eb8121abSViacheslav Ovsiienko m = i; 5198eb8121abSViacheslav Ovsiienko } 5199eb8121abSViacheslav Ovsiienko } 5200eb8121abSViacheslav Ovsiienko } 5201eb8121abSViacheslav Ovsiienko if (m >= RTE_DIM(txoff_func)) { 5202eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "port %u has no selected Tx function" 5203eb8121abSViacheslav Ovsiienko " for requested offloads %04X", 5204eb8121abSViacheslav Ovsiienko dev->data->port_id, olx); 5205eb8121abSViacheslav Ovsiienko return NULL; 5206eb8121abSViacheslav Ovsiienko } 5207eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "port %u has selected Tx function" 5208eb8121abSViacheslav Ovsiienko " supporting offloads %04X/%04X", 5209eb8121abSViacheslav Ovsiienko dev->data->port_id, olx, txoff_func[m].olx); 5210eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI) 5211eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tMULTI (multi segment)"); 5212eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO) 5213eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tTSO (TCP send offload)"); 5214eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP) 5215eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tSWP (software parser)"); 5216eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM) 5217eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tCSUM (checksum offload)"); 5218eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE) 5219eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tINLIN (inline data)"); 5220eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN) 5221eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)"); 5222eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA) 5223eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)"); 5224eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) 5225eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)"); 5226eb8121abSViacheslav Ovsiienko return txoff_func[m].func; 5227a6bd4911SViacheslav Ovsiienko } 5228