18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 22e22920bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 3eb8121abSViacheslav Ovsiienko * Copyright 2015-2019 Mellanox Technologies, Ltd 42e22920bSAdrien Mazarguil */ 52e22920bSAdrien Mazarguil 62e22920bSAdrien Mazarguil #include <assert.h> 72e22920bSAdrien Mazarguil #include <stdint.h> 82e22920bSAdrien Mazarguil #include <string.h> 92e22920bSAdrien Mazarguil #include <stdlib.h> 102e22920bSAdrien Mazarguil 112e22920bSAdrien Mazarguil /* Verbs header. */ 122e22920bSAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 132e22920bSAdrien Mazarguil #ifdef PEDANTIC 14fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 152e22920bSAdrien Mazarguil #endif 162e22920bSAdrien Mazarguil #include <infiniband/verbs.h> 1743e9d979SShachar Beiser #include <infiniband/mlx5dv.h> 182e22920bSAdrien Mazarguil #ifdef PEDANTIC 19fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 202e22920bSAdrien Mazarguil #endif 212e22920bSAdrien Mazarguil 222e22920bSAdrien Mazarguil #include <rte_mbuf.h> 232e22920bSAdrien Mazarguil #include <rte_mempool.h> 242e22920bSAdrien Mazarguil #include <rte_prefetch.h> 252e22920bSAdrien Mazarguil #include <rte_common.h> 262e22920bSAdrien Mazarguil #include <rte_branch_prediction.h> 276218063bSNélio Laranjeiro #include <rte_ether.h> 2888c07335SMatan Azrad #include <rte_cycles.h> 292e22920bSAdrien Mazarguil 302e22920bSAdrien Mazarguil #include "mlx5.h" 312e22920bSAdrien Mazarguil #include "mlx5_utils.h" 322e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 33f3db9489SYaacov Hazan #include "mlx5_autoconf.h" 342e22920bSAdrien Mazarguil #include "mlx5_defs.h" 356218063bSNélio Laranjeiro #include "mlx5_prm.h" 366218063bSNélio Laranjeiro 37eb8121abSViacheslav Ovsiienko /* TX burst subroutines return codes. */ 38eb8121abSViacheslav Ovsiienko enum mlx5_txcmp_code { 39eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_EXIT = 0, 40eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_ERROR, 41eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_SINGLE, 42eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_MULTI, 43eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_TSO, 44eb8121abSViacheslav Ovsiienko MLX5_TXCMP_CODE_EMPW, 45eb8121abSViacheslav Ovsiienko }; 46eb8121abSViacheslav Ovsiienko 47eb8121abSViacheslav Ovsiienko /* 48eb8121abSViacheslav Ovsiienko * These defines are used to configure Tx burst routine option set 49eb8121abSViacheslav Ovsiienko * supported at compile time. The not specified options are optimized out 50eb8121abSViacheslav Ovsiienko * out due to if conditions can be explicitly calculated at compile time. 51eb8121abSViacheslav Ovsiienko * The offloads with bigger runtime check (require more CPU cycles to 52eb8121abSViacheslav Ovsiienko * skip) overhead should have the bigger index - this is needed to 53eb8121abSViacheslav Ovsiienko * select the better matching routine function if no exact match and 54eb8121abSViacheslav Ovsiienko * some offloads are not actually requested. 55eb8121abSViacheslav Ovsiienko */ 56eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/ 57eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/ 58eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/ 59eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */ 60eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */ 61eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/ 62eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */ 63eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/ 64eb8121abSViacheslav Ovsiienko 65eb8121abSViacheslav Ovsiienko /* The most common offloads groups. */ 66eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_NONE 0 67eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \ 68eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_TSO | \ 69eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | \ 70eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_CSUM | \ 71eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | \ 72eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | \ 73eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 74eb8121abSViacheslav Ovsiienko 75eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask) 76eb8121abSViacheslav Ovsiienko 77eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_DECL(func, olx) \ 78eb8121abSViacheslav Ovsiienko static uint16_t mlx5_tx_burst_##func(void *txq, \ 79eb8121abSViacheslav Ovsiienko struct rte_mbuf **pkts, \ 80eb8121abSViacheslav Ovsiienko uint16_t pkts_n) \ 81eb8121abSViacheslav Ovsiienko { \ 82eb8121abSViacheslav Ovsiienko return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \ 83eb8121abSViacheslav Ovsiienko pkts, pkts_n, (olx)); \ 84eb8121abSViacheslav Ovsiienko } 85eb8121abSViacheslav Ovsiienko 86eb8121abSViacheslav Ovsiienko #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx}, 87eb8121abSViacheslav Ovsiienko 88c0583d98SJerin Jacob static __rte_always_inline uint32_t 893cc08bc6SXueming Li rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); 90ff1807a3SNélio Laranjeiro 91c0583d98SJerin Jacob static __rte_always_inline int 9278142aacSNélio Laranjeiro mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 932e633f1fSYongseok Koh uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); 94ff1807a3SNélio Laranjeiro 95c0583d98SJerin Jacob static __rte_always_inline uint32_t 966ba07449SXueming Li rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); 97ff1807a3SNélio Laranjeiro 983e1f82a1SYongseok Koh static __rte_always_inline void 993e1f82a1SYongseok Koh rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, 1003e1f82a1SYongseok Koh volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res); 1013e1f82a1SYongseok Koh 1027d6bf6b8SYongseok Koh static __rte_always_inline void 1033a22f387SMatan Azrad mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx, 1043a22f387SMatan Azrad const unsigned int strd_n); 1057d6bf6b8SYongseok Koh 10635c090eaSMatan Azrad static int 10735c090eaSMatan Azrad mlx5_queue_state_modify(struct rte_eth_dev *dev, 10835c090eaSMatan Azrad struct mlx5_mp_arg_queue_state_modify *sm); 10935c090eaSMatan Azrad 1102579543fSMatan Azrad static inline void 1112579543fSMatan Azrad mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp, 1122579543fSMatan Azrad volatile struct mlx5_cqe *restrict cqe, 1132579543fSMatan Azrad uint32_t phcsum); 1142579543fSMatan Azrad 1152579543fSMatan Azrad static inline void 1162579543fSMatan Azrad mlx5_lro_update_hdr(uint8_t *restrict padd, 1172579543fSMatan Azrad volatile struct mlx5_cqe *restrict cqe, 1182579543fSMatan Azrad uint32_t len); 1192579543fSMatan Azrad 120ea16068cSYongseok Koh uint32_t mlx5_ptype_table[] __rte_cache_aligned = { 121ea16068cSYongseok Koh [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ 122ea16068cSYongseok Koh }; 123ea16068cSYongseok Koh 1245f8ba81cSXueming Li uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned; 1255f8ba81cSXueming Li uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned; 1265f8ba81cSXueming Li 127ea16068cSYongseok Koh /** 128ea16068cSYongseok Koh * Build a table to translate Rx completion flags to packet type. 129ea16068cSYongseok Koh * 130ea16068cSYongseok Koh * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 131ea16068cSYongseok Koh */ 132ea16068cSYongseok Koh void 133ea16068cSYongseok Koh mlx5_set_ptype_table(void) 134ea16068cSYongseok Koh { 135ea16068cSYongseok Koh unsigned int i; 136ea16068cSYongseok Koh uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table; 137ea16068cSYongseok Koh 1389807f113SYongseok Koh /* Last entry must not be overwritten, reserved for errored packet. */ 1399807f113SYongseok Koh for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i) 140ea16068cSYongseok Koh (*p)[i] = RTE_PTYPE_UNKNOWN; 1416cb559d6SYongseok Koh /* 1426cb559d6SYongseok Koh * The index to the array should have: 143ea16068cSYongseok Koh * bit[1:0] = l3_hdr_type 144ea16068cSYongseok Koh * bit[4:2] = l4_hdr_type 145ea16068cSYongseok Koh * bit[5] = ip_frag 146ea16068cSYongseok Koh * bit[6] = tunneled 147ea16068cSYongseok Koh * bit[7] = outer_l3_type 14899c12dccSNélio Laranjeiro */ 1493ca63b88SShahaf Shuler /* L2 */ 1503ca63b88SShahaf Shuler (*p)[0x00] = RTE_PTYPE_L2_ETHER; 151ea16068cSYongseok Koh /* L3 */ 152ea16068cSYongseok Koh (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 153ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 154ea16068cSYongseok Koh (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 155ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 156ea16068cSYongseok Koh /* Fragmented */ 157ea16068cSYongseok Koh (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 158ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 159ea16068cSYongseok Koh (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 160ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 161ea16068cSYongseok Koh /* TCP */ 162ea16068cSYongseok Koh (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 163ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 164ea16068cSYongseok Koh (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 165ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 1660915e287SBin Huang (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 1670915e287SBin Huang RTE_PTYPE_L4_TCP; 1680915e287SBin Huang (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 1690915e287SBin Huang RTE_PTYPE_L4_TCP; 1700915e287SBin Huang (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 1710915e287SBin Huang RTE_PTYPE_L4_TCP; 1720915e287SBin Huang (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 1730915e287SBin Huang RTE_PTYPE_L4_TCP; 174ea16068cSYongseok Koh /* UDP */ 175ea16068cSYongseok Koh (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 176ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 177ea16068cSYongseok Koh (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 178ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 179ea16068cSYongseok Koh /* Repeat with outer_l3_type being set. Just in case. */ 180ea16068cSYongseok Koh (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 181ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 182ea16068cSYongseok Koh (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 183ea16068cSYongseok Koh RTE_PTYPE_L4_NONFRAG; 184ea16068cSYongseok Koh (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 185ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 186ea16068cSYongseok Koh (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 187ea16068cSYongseok Koh RTE_PTYPE_L4_FRAG; 188ea16068cSYongseok Koh (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 189ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 190ea16068cSYongseok Koh (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 191ea16068cSYongseok Koh RTE_PTYPE_L4_TCP; 1920915e287SBin Huang (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 1930915e287SBin Huang RTE_PTYPE_L4_TCP; 1940915e287SBin Huang (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 1950915e287SBin Huang RTE_PTYPE_L4_TCP; 1960915e287SBin Huang (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 1970915e287SBin Huang RTE_PTYPE_L4_TCP; 1980915e287SBin Huang (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 1990915e287SBin Huang RTE_PTYPE_L4_TCP; 200ea16068cSYongseok Koh (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 201ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 202ea16068cSYongseok Koh (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 203ea16068cSYongseok Koh RTE_PTYPE_L4_UDP; 204ea16068cSYongseok Koh /* Tunneled - L3 */ 2053cc08bc6SXueming Li (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 206ea16068cSYongseok Koh (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 207ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 208ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 209ea16068cSYongseok Koh (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 210ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 211ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 2123cc08bc6SXueming Li (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; 213ea16068cSYongseok Koh (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 214ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 215ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 216ea16068cSYongseok Koh (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 217ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 218ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_NONFRAG; 219ea16068cSYongseok Koh /* Tunneled - Fragmented */ 220ea16068cSYongseok Koh (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 221ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 222ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 223ea16068cSYongseok Koh (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 224ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 225ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 226ea16068cSYongseok Koh (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 227ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 228ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 229ea16068cSYongseok Koh (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 230ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 231ea16068cSYongseok Koh RTE_PTYPE_INNER_L4_FRAG; 232ea16068cSYongseok Koh /* Tunneled - TCP */ 233ea16068cSYongseok Koh (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 234ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2356c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 236ea16068cSYongseok Koh (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 237ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2386c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 2390915e287SBin Huang (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2400915e287SBin Huang RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2410915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2420915e287SBin Huang (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2430915e287SBin Huang RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2440915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2450915e287SBin Huang (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2460915e287SBin Huang RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2470915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2480915e287SBin Huang (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2490915e287SBin Huang RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2500915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 251ea16068cSYongseok Koh (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 252ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2536c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 254ea16068cSYongseok Koh (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 255ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2566c897093SYongseok Koh RTE_PTYPE_INNER_L4_TCP; 2570915e287SBin Huang (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2580915e287SBin Huang RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2590915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2600915e287SBin Huang (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2610915e287SBin Huang RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2620915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2630915e287SBin Huang (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2640915e287SBin Huang RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2650915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 2660915e287SBin Huang (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2670915e287SBin Huang RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2680915e287SBin Huang RTE_PTYPE_INNER_L4_TCP; 269ea16068cSYongseok Koh /* Tunneled - UDP */ 270ea16068cSYongseok Koh (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 271ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2726c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 273ea16068cSYongseok Koh (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 274ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2756c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 276ea16068cSYongseok Koh (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 277ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2786c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 279ea16068cSYongseok Koh (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 280ea16068cSYongseok Koh RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2816c897093SYongseok Koh RTE_PTYPE_INNER_L4_UDP; 282ea16068cSYongseok Koh } 283fdcb0f53SNélio Laranjeiro 2842e22920bSAdrien Mazarguil /** 2855f8ba81cSXueming Li * Build a table to translate packet to checksum type of Verbs. 2865f8ba81cSXueming Li */ 2875f8ba81cSXueming Li void 2885f8ba81cSXueming Li mlx5_set_cksum_table(void) 2895f8ba81cSXueming Li { 2905f8ba81cSXueming Li unsigned int i; 2915f8ba81cSXueming Li uint8_t v; 2925f8ba81cSXueming Li 2935f8ba81cSXueming Li /* 2945f8ba81cSXueming Li * The index should have: 2955f8ba81cSXueming Li * bit[0] = PKT_TX_TCP_SEG 2965f8ba81cSXueming Li * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM 2975f8ba81cSXueming Li * bit[4] = PKT_TX_IP_CKSUM 2985f8ba81cSXueming Li * bit[8] = PKT_TX_OUTER_IP_CKSUM 2995f8ba81cSXueming Li * bit[9] = tunnel 3005f8ba81cSXueming Li */ 3015f8ba81cSXueming Li for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) { 3025f8ba81cSXueming Li v = 0; 3035f8ba81cSXueming Li if (i & (1 << 9)) { 3045f8ba81cSXueming Li /* Tunneled packet. */ 3055f8ba81cSXueming Li if (i & (1 << 8)) /* Outer IP. */ 3065f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_CSUM; 3075f8ba81cSXueming Li if (i & (1 << 4)) /* Inner IP. */ 3085f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_INNER_CSUM; 3095f8ba81cSXueming Li if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ 3105f8ba81cSXueming Li v |= MLX5_ETH_WQE_L4_INNER_CSUM; 3115f8ba81cSXueming Li } else { 3125f8ba81cSXueming Li /* No tunnel. */ 3135f8ba81cSXueming Li if (i & (1 << 4)) /* IP. */ 3145f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_CSUM; 3155f8ba81cSXueming Li if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ 3165f8ba81cSXueming Li v |= MLX5_ETH_WQE_L4_CSUM; 3175f8ba81cSXueming Li } 3185f8ba81cSXueming Li mlx5_cksum_table[i] = v; 3195f8ba81cSXueming Li } 3205f8ba81cSXueming Li } 3215f8ba81cSXueming Li 3225f8ba81cSXueming Li /** 3235f8ba81cSXueming Li * Build a table to translate packet type of mbuf to SWP type of Verbs. 3245f8ba81cSXueming Li */ 3255f8ba81cSXueming Li void 3265f8ba81cSXueming Li mlx5_set_swp_types_table(void) 3275f8ba81cSXueming Li { 3285f8ba81cSXueming Li unsigned int i; 3295f8ba81cSXueming Li uint8_t v; 3305f8ba81cSXueming Li 3315f8ba81cSXueming Li /* 3325f8ba81cSXueming Li * The index should have: 3335f8ba81cSXueming Li * bit[0:1] = PKT_TX_L4_MASK 3345f8ba81cSXueming Li * bit[4] = PKT_TX_IPV6 3355f8ba81cSXueming Li * bit[8] = PKT_TX_OUTER_IPV6 3365f8ba81cSXueming Li * bit[9] = PKT_TX_OUTER_UDP 3375f8ba81cSXueming Li */ 3385f8ba81cSXueming Li for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) { 3395f8ba81cSXueming Li v = 0; 3405f8ba81cSXueming Li if (i & (1 << 8)) 3415f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_OUTER_IPV6; 3425f8ba81cSXueming Li if (i & (1 << 9)) 3435f8ba81cSXueming Li v |= MLX5_ETH_WQE_L4_OUTER_UDP; 3445f8ba81cSXueming Li if (i & (1 << 4)) 3455f8ba81cSXueming Li v |= MLX5_ETH_WQE_L3_INNER_IPV6; 3465f8ba81cSXueming Li if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52)) 3475f8ba81cSXueming Li v |= MLX5_ETH_WQE_L4_INNER_UDP; 3485f8ba81cSXueming Li mlx5_swp_types_table[i] = v; 3495f8ba81cSXueming Li } 3505f8ba81cSXueming Li } 3515f8ba81cSXueming Li 3525f8ba81cSXueming Li /** 35318a1c200SViacheslav Ovsiienko * Set Software Parser flags and offsets in Ethernet Segment of WQE. 35418a1c200SViacheslav Ovsiienko * Flags must be preliminary initialized to zero. 35518a1c200SViacheslav Ovsiienko * 35618a1c200SViacheslav Ovsiienko * @param loc 35718a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 35818a1c200SViacheslav Ovsiienko * @param swp_flags 35918a1c200SViacheslav Ovsiienko * Pointer to store Software Parser flags 36018a1c200SViacheslav Ovsiienko * @param olx 36118a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 36218a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 36318a1c200SViacheslav Ovsiienko * 36418a1c200SViacheslav Ovsiienko * @return 36518a1c200SViacheslav Ovsiienko * Software Parser offsets packed in dword. 36618a1c200SViacheslav Ovsiienko * Software Parser flags are set by pointer. 36718a1c200SViacheslav Ovsiienko */ 36818a1c200SViacheslav Ovsiienko static __rte_always_inline uint32_t 36918a1c200SViacheslav Ovsiienko txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc, 37018a1c200SViacheslav Ovsiienko uint8_t *swp_flags, 37118a1c200SViacheslav Ovsiienko unsigned int olx) 37218a1c200SViacheslav Ovsiienko { 37318a1c200SViacheslav Ovsiienko uint64_t ol, tunnel; 37418a1c200SViacheslav Ovsiienko unsigned int idx, off; 37518a1c200SViacheslav Ovsiienko uint32_t set; 37618a1c200SViacheslav Ovsiienko 37718a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(SWP)) 37818a1c200SViacheslav Ovsiienko return 0; 37918a1c200SViacheslav Ovsiienko ol = loc->mbuf->ol_flags; 38018a1c200SViacheslav Ovsiienko tunnel = ol & PKT_TX_TUNNEL_MASK; 38118a1c200SViacheslav Ovsiienko /* 38218a1c200SViacheslav Ovsiienko * Check whether Software Parser is required. 38318a1c200SViacheslav Ovsiienko * Only customized tunnels may ask for. 38418a1c200SViacheslav Ovsiienko */ 38518a1c200SViacheslav Ovsiienko if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)) 38618a1c200SViacheslav Ovsiienko return 0; 38718a1c200SViacheslav Ovsiienko /* 38818a1c200SViacheslav Ovsiienko * The index should have: 38918a1c200SViacheslav Ovsiienko * bit[0:1] = PKT_TX_L4_MASK 39018a1c200SViacheslav Ovsiienko * bit[4] = PKT_TX_IPV6 39118a1c200SViacheslav Ovsiienko * bit[8] = PKT_TX_OUTER_IPV6 39218a1c200SViacheslav Ovsiienko * bit[9] = PKT_TX_OUTER_UDP 39318a1c200SViacheslav Ovsiienko */ 39418a1c200SViacheslav Ovsiienko idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52; 39518a1c200SViacheslav Ovsiienko idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0; 39618a1c200SViacheslav Ovsiienko *swp_flags = mlx5_swp_types_table[idx]; 39718a1c200SViacheslav Ovsiienko /* 39818a1c200SViacheslav Ovsiienko * Set offsets for SW parser. Since ConnectX-5, SW parser just 39918a1c200SViacheslav Ovsiienko * complements HW parser. SW parser starts to engage only if HW parser 40018a1c200SViacheslav Ovsiienko * can't reach a header. For the older devices, HW parser will not kick 40118a1c200SViacheslav Ovsiienko * in if any of SWP offsets is set. Therefore, all of the L3 offsets 40218a1c200SViacheslav Ovsiienko * should be set regardless of HW offload. 40318a1c200SViacheslav Ovsiienko */ 40418a1c200SViacheslav Ovsiienko off = loc->mbuf->outer_l2_len; 40518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT) 40618a1c200SViacheslav Ovsiienko off += sizeof(struct rte_vlan_hdr); 40718a1c200SViacheslav Ovsiienko set = (off >> 1) << 8; /* Outer L3 offset. */ 40818a1c200SViacheslav Ovsiienko off += loc->mbuf->outer_l3_len; 40918a1c200SViacheslav Ovsiienko if (tunnel == PKT_TX_TUNNEL_UDP) 41018a1c200SViacheslav Ovsiienko set |= off >> 1; /* Outer L4 offset. */ 41118a1c200SViacheslav Ovsiienko if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */ 41218a1c200SViacheslav Ovsiienko const uint64_t csum = ol & PKT_TX_L4_MASK; 41318a1c200SViacheslav Ovsiienko off += loc->mbuf->l2_len; 41418a1c200SViacheslav Ovsiienko set |= (off >> 1) << 24; /* Inner L3 offset. */ 41518a1c200SViacheslav Ovsiienko if (csum == PKT_TX_TCP_CKSUM || 41618a1c200SViacheslav Ovsiienko csum == PKT_TX_UDP_CKSUM || 41718a1c200SViacheslav Ovsiienko (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) { 41818a1c200SViacheslav Ovsiienko off += loc->mbuf->l3_len; 41918a1c200SViacheslav Ovsiienko set |= (off >> 1) << 16; /* Inner L4 offset. */ 42018a1c200SViacheslav Ovsiienko } 42118a1c200SViacheslav Ovsiienko } 42218a1c200SViacheslav Ovsiienko set = rte_cpu_to_le_32(set); 42318a1c200SViacheslav Ovsiienko return set; 42418a1c200SViacheslav Ovsiienko } 42518a1c200SViacheslav Ovsiienko 42618a1c200SViacheslav Ovsiienko /** 42718a1c200SViacheslav Ovsiienko * Convert the Checksum offloads to Verbs. 42818a1c200SViacheslav Ovsiienko * 42918a1c200SViacheslav Ovsiienko * @param buf 43018a1c200SViacheslav Ovsiienko * Pointer to the mbuf. 43118a1c200SViacheslav Ovsiienko * 43218a1c200SViacheslav Ovsiienko * @return 43318a1c200SViacheslav Ovsiienko * Converted checksum flags. 43418a1c200SViacheslav Ovsiienko */ 43518a1c200SViacheslav Ovsiienko static __rte_always_inline uint8_t 43618a1c200SViacheslav Ovsiienko txq_ol_cksum_to_cs(struct rte_mbuf *buf) 43718a1c200SViacheslav Ovsiienko { 43818a1c200SViacheslav Ovsiienko uint32_t idx; 43918a1c200SViacheslav Ovsiienko uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK); 44018a1c200SViacheslav Ovsiienko const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK | 44118a1c200SViacheslav Ovsiienko PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM; 44218a1c200SViacheslav Ovsiienko 44318a1c200SViacheslav Ovsiienko /* 44418a1c200SViacheslav Ovsiienko * The index should have: 44518a1c200SViacheslav Ovsiienko * bit[0] = PKT_TX_TCP_SEG 44618a1c200SViacheslav Ovsiienko * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM 44718a1c200SViacheslav Ovsiienko * bit[4] = PKT_TX_IP_CKSUM 44818a1c200SViacheslav Ovsiienko * bit[8] = PKT_TX_OUTER_IP_CKSUM 44918a1c200SViacheslav Ovsiienko * bit[9] = tunnel 45018a1c200SViacheslav Ovsiienko */ 45118a1c200SViacheslav Ovsiienko idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9); 45218a1c200SViacheslav Ovsiienko return mlx5_cksum_table[idx]; 45318a1c200SViacheslav Ovsiienko } 45418a1c200SViacheslav Ovsiienko 45518a1c200SViacheslav Ovsiienko /** 45626f04883STom Barbette * Internal function to compute the number of used descriptors in an RX queue 4578788fec1SOlivier Matz * 45826f04883STom Barbette * @param rxq 45926f04883STom Barbette * The Rx queue. 4608788fec1SOlivier Matz * 4618788fec1SOlivier Matz * @return 46226f04883STom Barbette * The number of used rx descriptor. 4638788fec1SOlivier Matz */ 46426f04883STom Barbette static uint32_t 46526f04883STom Barbette rx_queue_count(struct mlx5_rxq_data *rxq) 4668788fec1SOlivier Matz { 4678788fec1SOlivier Matz struct rxq_zip *zip = &rxq->zip; 4688788fec1SOlivier Matz volatile struct mlx5_cqe *cqe; 4698788fec1SOlivier Matz const unsigned int cqe_n = (1 << rxq->cqe_n); 4708788fec1SOlivier Matz const unsigned int cqe_cnt = cqe_n - 1; 4718788fec1SOlivier Matz unsigned int cq_ci; 4728788fec1SOlivier Matz unsigned int used; 4738788fec1SOlivier Matz 4748788fec1SOlivier Matz /* if we are processing a compressed cqe */ 4758788fec1SOlivier Matz if (zip->ai) { 4768788fec1SOlivier Matz used = zip->cqe_cnt - zip->ca; 4778788fec1SOlivier Matz cq_ci = zip->cq_ci; 4788788fec1SOlivier Matz } else { 4798788fec1SOlivier Matz used = 0; 4808788fec1SOlivier Matz cq_ci = rxq->cq_ci; 4818788fec1SOlivier Matz } 4828788fec1SOlivier Matz cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 48388c07335SMatan Azrad while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) { 4848788fec1SOlivier Matz int8_t op_own; 4858788fec1SOlivier Matz unsigned int n; 4868788fec1SOlivier Matz 4878788fec1SOlivier Matz op_own = cqe->op_own; 4888788fec1SOlivier Matz if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) 4896b30a6a8SShachar Beiser n = rte_be_to_cpu_32(cqe->byte_cnt); 4908788fec1SOlivier Matz else 4918788fec1SOlivier Matz n = 1; 4928788fec1SOlivier Matz cq_ci += n; 4938788fec1SOlivier Matz used += n; 4948788fec1SOlivier Matz cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 4958788fec1SOlivier Matz } 4968788fec1SOlivier Matz used = RTE_MIN(used, (1U << rxq->elts_n) - 1); 49726f04883STom Barbette return used; 49826f04883STom Barbette } 49926f04883STom Barbette 50026f04883STom Barbette /** 50126f04883STom Barbette * DPDK callback to check the status of a rx descriptor. 50226f04883STom Barbette * 50326f04883STom Barbette * @param rx_queue 50426f04883STom Barbette * The Rx queue. 50526f04883STom Barbette * @param[in] offset 50626f04883STom Barbette * The index of the descriptor in the ring. 50726f04883STom Barbette * 50826f04883STom Barbette * @return 50926f04883STom Barbette * The status of the tx descriptor. 51026f04883STom Barbette */ 51126f04883STom Barbette int 51226f04883STom Barbette mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) 51326f04883STom Barbette { 51426f04883STom Barbette struct mlx5_rxq_data *rxq = rx_queue; 51526f04883STom Barbette struct mlx5_rxq_ctrl *rxq_ctrl = 51626f04883STom Barbette container_of(rxq, struct mlx5_rxq_ctrl, rxq); 51726f04883STom Barbette struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv); 51826f04883STom Barbette 51926f04883STom Barbette if (dev->rx_pkt_burst != mlx5_rx_burst) { 52026f04883STom Barbette rte_errno = ENOTSUP; 52126f04883STom Barbette return -rte_errno; 52226f04883STom Barbette } 52326f04883STom Barbette if (offset >= (1 << rxq->elts_n)) { 52426f04883STom Barbette rte_errno = EINVAL; 52526f04883STom Barbette return -rte_errno; 52626f04883STom Barbette } 52726f04883STom Barbette if (offset < rx_queue_count(rxq)) 5288788fec1SOlivier Matz return RTE_ETH_RX_DESC_DONE; 5298788fec1SOlivier Matz return RTE_ETH_RX_DESC_AVAIL; 5308788fec1SOlivier Matz } 5318788fec1SOlivier Matz 5328788fec1SOlivier Matz /** 53326f04883STom Barbette * DPDK callback to get the number of used descriptors in a RX queue 53426f04883STom Barbette * 53526f04883STom Barbette * @param dev 53626f04883STom Barbette * Pointer to the device structure. 53726f04883STom Barbette * 53826f04883STom Barbette * @param rx_queue_id 53926f04883STom Barbette * The Rx queue. 54026f04883STom Barbette * 54126f04883STom Barbette * @return 54226f04883STom Barbette * The number of used rx descriptor. 54326f04883STom Barbette * -EINVAL if the queue is invalid 54426f04883STom Barbette */ 54526f04883STom Barbette uint32_t 54626f04883STom Barbette mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 54726f04883STom Barbette { 548dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 54926f04883STom Barbette struct mlx5_rxq_data *rxq; 55026f04883STom Barbette 55126f04883STom Barbette if (dev->rx_pkt_burst != mlx5_rx_burst) { 55226f04883STom Barbette rte_errno = ENOTSUP; 55326f04883STom Barbette return -rte_errno; 55426f04883STom Barbette } 55526f04883STom Barbette rxq = (*priv->rxqs)[rx_queue_id]; 55626f04883STom Barbette if (!rxq) { 55726f04883STom Barbette rte_errno = EINVAL; 55826f04883STom Barbette return -rte_errno; 55926f04883STom Barbette } 56026f04883STom Barbette return rx_queue_count(rxq); 56126f04883STom Barbette } 56226f04883STom Barbette 563066cfecdSMatan Azrad #define MLX5_SYSTEM_LOG_DIR "/var/log" 564066cfecdSMatan Azrad /** 565066cfecdSMatan Azrad * Dump debug information to log file. 566066cfecdSMatan Azrad * 567066cfecdSMatan Azrad * @param fname 568066cfecdSMatan Azrad * The file name. 569066cfecdSMatan Azrad * @param hex_title 570066cfecdSMatan Azrad * If not NULL this string is printed as a header to the output 571066cfecdSMatan Azrad * and the output will be in hexadecimal view. 572066cfecdSMatan Azrad * @param buf 573066cfecdSMatan Azrad * This is the buffer address to print out. 574066cfecdSMatan Azrad * @param len 575066cfecdSMatan Azrad * The number of bytes to dump out. 576066cfecdSMatan Azrad */ 577066cfecdSMatan Azrad void 578066cfecdSMatan Azrad mlx5_dump_debug_information(const char *fname, const char *hex_title, 579066cfecdSMatan Azrad const void *buf, unsigned int hex_len) 580066cfecdSMatan Azrad { 581066cfecdSMatan Azrad FILE *fd; 582066cfecdSMatan Azrad 583066cfecdSMatan Azrad MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname); 584066cfecdSMatan Azrad fd = fopen(path, "a+"); 585066cfecdSMatan Azrad if (!fd) { 586*06fa6988SDekel Peled DRV_LOG(WARNING, "cannot open %s for debug dump", path); 587066cfecdSMatan Azrad MKSTR(path2, "./%s", fname); 588066cfecdSMatan Azrad fd = fopen(path2, "a+"); 589066cfecdSMatan Azrad if (!fd) { 590*06fa6988SDekel Peled DRV_LOG(ERR, "cannot open %s for debug dump", path2); 591066cfecdSMatan Azrad return; 592066cfecdSMatan Azrad } 593*06fa6988SDekel Peled DRV_LOG(INFO, "New debug dump in file %s", path2); 594066cfecdSMatan Azrad } else { 595*06fa6988SDekel Peled DRV_LOG(INFO, "New debug dump in file %s", path); 596066cfecdSMatan Azrad } 597066cfecdSMatan Azrad if (hex_title) 598066cfecdSMatan Azrad rte_hexdump(fd, hex_title, buf, hex_len); 599066cfecdSMatan Azrad else 600066cfecdSMatan Azrad fprintf(fd, "%s", (const char *)buf); 601066cfecdSMatan Azrad fprintf(fd, "\n\n\n"); 602066cfecdSMatan Azrad fclose(fd); 603066cfecdSMatan Azrad } 604066cfecdSMatan Azrad 60526f04883STom Barbette /** 60635c090eaSMatan Azrad * Move QP from error state to running state and initialize indexes. 607957e45fbSMatan Azrad * 60835c090eaSMatan Azrad * @param txq_ctrl 60935c090eaSMatan Azrad * Pointer to TX queue control structure. 610957e45fbSMatan Azrad * 611957e45fbSMatan Azrad * @return 61235c090eaSMatan Azrad * 0 on success, else -1. 613957e45fbSMatan Azrad */ 614957e45fbSMatan Azrad static int 61535c090eaSMatan Azrad tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl) 616957e45fbSMatan Azrad { 61735c090eaSMatan Azrad struct mlx5_mp_arg_queue_state_modify sm = { 61835c090eaSMatan Azrad .is_wq = 0, 61935c090eaSMatan Azrad .queue_id = txq_ctrl->txq.idx, 620957e45fbSMatan Azrad }; 62135c090eaSMatan Azrad 62235c090eaSMatan Azrad if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm)) 62335c090eaSMatan Azrad return -1; 62435c090eaSMatan Azrad txq_ctrl->txq.wqe_ci = 0; 62535c090eaSMatan Azrad txq_ctrl->txq.wqe_pi = 0; 62635c090eaSMatan Azrad txq_ctrl->txq.elts_comp = 0; 627957e45fbSMatan Azrad return 0; 628957e45fbSMatan Azrad } 629957e45fbSMatan Azrad 630957e45fbSMatan Azrad /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */ 631957e45fbSMatan Azrad static int 632957e45fbSMatan Azrad check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) 633957e45fbSMatan Azrad { 634957e45fbSMatan Azrad static const uint8_t magic[] = "seen"; 635957e45fbSMatan Azrad int ret = 1; 636957e45fbSMatan Azrad unsigned int i; 637957e45fbSMatan Azrad 638957e45fbSMatan Azrad for (i = 0; i < sizeof(magic); ++i) 639957e45fbSMatan Azrad if (!ret || err_cqe->rsvd1[i] != magic[i]) { 640957e45fbSMatan Azrad ret = 0; 641957e45fbSMatan Azrad err_cqe->rsvd1[i] = magic[i]; 642957e45fbSMatan Azrad } 643957e45fbSMatan Azrad return ret; 644957e45fbSMatan Azrad } 645957e45fbSMatan Azrad 646957e45fbSMatan Azrad /** 647957e45fbSMatan Azrad * Handle error CQE. 648957e45fbSMatan Azrad * 649957e45fbSMatan Azrad * @param txq 650957e45fbSMatan Azrad * Pointer to TX queue structure. 651957e45fbSMatan Azrad * @param error_cqe 652957e45fbSMatan Azrad * Pointer to the error CQE. 653957e45fbSMatan Azrad * 654957e45fbSMatan Azrad * @return 655da1df1ccSViacheslav Ovsiienko * Negative value if queue recovery failed, 656da1df1ccSViacheslav Ovsiienko * the last Tx buffer element to free otherwise. 657957e45fbSMatan Azrad */ 658da1df1ccSViacheslav Ovsiienko int 65918a1c200SViacheslav Ovsiienko mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq, 660957e45fbSMatan Azrad volatile struct mlx5_err_cqe *err_cqe) 661957e45fbSMatan Azrad { 662957e45fbSMatan Azrad if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) { 663957e45fbSMatan Azrad const uint16_t wqe_m = ((1 << txq->wqe_n) - 1); 664957e45fbSMatan Azrad struct mlx5_txq_ctrl *txq_ctrl = 665957e45fbSMatan Azrad container_of(txq, struct mlx5_txq_ctrl, txq); 666957e45fbSMatan Azrad uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter); 667957e45fbSMatan Azrad int seen = check_err_cqe_seen(err_cqe); 668957e45fbSMatan Azrad 669957e45fbSMatan Azrad if (!seen && txq_ctrl->dump_file_n < 670957e45fbSMatan Azrad txq_ctrl->priv->config.max_dump_files_num) { 671957e45fbSMatan Azrad MKSTR(err_str, "Unexpected CQE error syndrome " 672957e45fbSMatan Azrad "0x%02x CQN = %u SQN = %u wqe_counter = %u " 673957e45fbSMatan Azrad "wq_ci = %u cq_ci = %u", err_cqe->syndrome, 67438b4b397SViacheslav Ovsiienko txq->cqe_s, txq->qp_num_8s >> 8, 675957e45fbSMatan Azrad rte_be_to_cpu_16(err_cqe->wqe_counter), 676957e45fbSMatan Azrad txq->wqe_ci, txq->cq_ci); 677957e45fbSMatan Azrad MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u", 678957e45fbSMatan Azrad PORT_ID(txq_ctrl->priv), txq->idx, 679957e45fbSMatan Azrad txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc()); 680957e45fbSMatan Azrad mlx5_dump_debug_information(name, NULL, err_str, 0); 681957e45fbSMatan Azrad mlx5_dump_debug_information(name, "MLX5 Error CQ:", 682957e45fbSMatan Azrad (const void *)((uintptr_t) 68338b4b397SViacheslav Ovsiienko txq->cqes), 684957e45fbSMatan Azrad sizeof(*err_cqe) * 685957e45fbSMatan Azrad (1 << txq->cqe_n)); 686957e45fbSMatan Azrad mlx5_dump_debug_information(name, "MLX5 Error SQ:", 687957e45fbSMatan Azrad (const void *)((uintptr_t) 688a6bd4911SViacheslav Ovsiienko txq->wqes), 689957e45fbSMatan Azrad MLX5_WQE_SIZE * 690957e45fbSMatan Azrad (1 << txq->wqe_n)); 691957e45fbSMatan Azrad txq_ctrl->dump_file_n++; 692957e45fbSMatan Azrad } 693957e45fbSMatan Azrad if (!seen) 694957e45fbSMatan Azrad /* 695957e45fbSMatan Azrad * Count errors in WQEs units. 696957e45fbSMatan Azrad * Later it can be improved to count error packets, 697957e45fbSMatan Azrad * for example, by SQ parsing to find how much packets 698957e45fbSMatan Azrad * should be counted for each WQE. 699957e45fbSMatan Azrad */ 700957e45fbSMatan Azrad txq->stats.oerrors += ((txq->wqe_ci & wqe_m) - 701957e45fbSMatan Azrad new_wqe_pi) & wqe_m; 70235c090eaSMatan Azrad if (tx_recover_qp(txq_ctrl) == 0) { 703957e45fbSMatan Azrad txq->cq_ci++; 704957e45fbSMatan Azrad /* Release all the remaining buffers. */ 705957e45fbSMatan Azrad return txq->elts_head; 706957e45fbSMatan Azrad } 707957e45fbSMatan Azrad /* Recovering failed - try again later on the same WQE. */ 708da1df1ccSViacheslav Ovsiienko return -1; 709957e45fbSMatan Azrad } else { 710957e45fbSMatan Azrad txq->cq_ci++; 711957e45fbSMatan Azrad } 712957e45fbSMatan Azrad /* Do not release buffers. */ 713957e45fbSMatan Azrad return txq->elts_tail; 714957e45fbSMatan Azrad } 715957e45fbSMatan Azrad 716957e45fbSMatan Azrad /** 71767fa62bcSAdrien Mazarguil * Translate RX completion flags to packet type. 71867fa62bcSAdrien Mazarguil * 7193cc08bc6SXueming Li * @param[in] rxq 7203cc08bc6SXueming Li * Pointer to RX queue structure. 7216218063bSNélio Laranjeiro * @param[in] cqe 7226218063bSNélio Laranjeiro * Pointer to CQE. 72367fa62bcSAdrien Mazarguil * 72478a38edfSJianfeng Tan * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 72578a38edfSJianfeng Tan * 72667fa62bcSAdrien Mazarguil * @return 72767fa62bcSAdrien Mazarguil * Packet type for struct rte_mbuf. 72867fa62bcSAdrien Mazarguil */ 72967fa62bcSAdrien Mazarguil static inline uint32_t 7303cc08bc6SXueming Li rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) 73167fa62bcSAdrien Mazarguil { 732ea16068cSYongseok Koh uint8_t idx; 733ea16068cSYongseok Koh uint8_t pinfo = cqe->pkt_info; 734ea16068cSYongseok Koh uint16_t ptype = cqe->hdr_type_etc; 73567fa62bcSAdrien Mazarguil 736ea16068cSYongseok Koh /* 737ea16068cSYongseok Koh * The index to the array should have: 738ea16068cSYongseok Koh * bit[1:0] = l3_hdr_type 739ea16068cSYongseok Koh * bit[4:2] = l4_hdr_type 740ea16068cSYongseok Koh * bit[5] = ip_frag 741ea16068cSYongseok Koh * bit[6] = tunneled 742ea16068cSYongseok Koh * bit[7] = outer_l3_type 743ea16068cSYongseok Koh */ 744ea16068cSYongseok Koh idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10); 7453cc08bc6SXueming Li return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6)); 74667fa62bcSAdrien Mazarguil } 74767fa62bcSAdrien Mazarguil 74867fa62bcSAdrien Mazarguil /** 7496bb506ccSMatan Azrad * Initialize Rx WQ and indexes. 7506bb506ccSMatan Azrad * 7516bb506ccSMatan Azrad * @param[in] rxq 7526bb506ccSMatan Azrad * Pointer to RX queue structure. 7536bb506ccSMatan Azrad */ 7546bb506ccSMatan Azrad void 7556bb506ccSMatan Azrad mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) 7566bb506ccSMatan Azrad { 7576bb506ccSMatan Azrad const unsigned int wqe_n = 1 << rxq->elts_n; 7586bb506ccSMatan Azrad unsigned int i; 7596bb506ccSMatan Azrad 7606bb506ccSMatan Azrad for (i = 0; (i != wqe_n); ++i) { 7616bb506ccSMatan Azrad volatile struct mlx5_wqe_data_seg *scat; 7626bb506ccSMatan Azrad uintptr_t addr; 7636bb506ccSMatan Azrad uint32_t byte_count; 7646bb506ccSMatan Azrad 7656bb506ccSMatan Azrad if (mlx5_rxq_mprq_enabled(rxq)) { 7666bb506ccSMatan Azrad struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i]; 7676bb506ccSMatan Azrad 7686bb506ccSMatan Azrad scat = &((volatile struct mlx5_wqe_mprq *) 7696bb506ccSMatan Azrad rxq->wqes)[i].dseg; 7703a22f387SMatan Azrad addr = (uintptr_t)mlx5_mprq_buf_addr(buf, 7713a22f387SMatan Azrad 1 << rxq->strd_num_n); 7726bb506ccSMatan Azrad byte_count = (1 << rxq->strd_sz_n) * 7736bb506ccSMatan Azrad (1 << rxq->strd_num_n); 7746bb506ccSMatan Azrad } else { 7756bb506ccSMatan Azrad struct rte_mbuf *buf = (*rxq->elts)[i]; 7766bb506ccSMatan Azrad 7776bb506ccSMatan Azrad scat = &((volatile struct mlx5_wqe_data_seg *) 7786bb506ccSMatan Azrad rxq->wqes)[i]; 7796bb506ccSMatan Azrad addr = rte_pktmbuf_mtod(buf, uintptr_t); 7806bb506ccSMatan Azrad byte_count = DATA_LEN(buf); 7816bb506ccSMatan Azrad } 7826bb506ccSMatan Azrad /* scat->addr must be able to store a pointer. */ 7836bb506ccSMatan Azrad assert(sizeof(scat->addr) >= sizeof(uintptr_t)); 7846bb506ccSMatan Azrad *scat = (struct mlx5_wqe_data_seg){ 7856bb506ccSMatan Azrad .addr = rte_cpu_to_be_64(addr), 7866bb506ccSMatan Azrad .byte_count = rte_cpu_to_be_32(byte_count), 7876bb506ccSMatan Azrad .lkey = mlx5_rx_addr2mr(rxq, addr), 7886bb506ccSMatan Azrad }; 7896bb506ccSMatan Azrad } 7906bb506ccSMatan Azrad rxq->consumed_strd = 0; 7916bb506ccSMatan Azrad rxq->decompressed = 0; 7926bb506ccSMatan Azrad rxq->rq_pi = 0; 7936bb506ccSMatan Azrad rxq->zip = (struct rxq_zip){ 7946bb506ccSMatan Azrad .ai = 0, 7956bb506ccSMatan Azrad }; 7966bb506ccSMatan Azrad /* Update doorbell counter. */ 7976bb506ccSMatan Azrad rxq->rq_ci = wqe_n >> rxq->sges_n; 7986bb506ccSMatan Azrad rte_cio_wmb(); 7996bb506ccSMatan Azrad *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 8006bb506ccSMatan Azrad } 8016bb506ccSMatan Azrad 8026bb506ccSMatan Azrad /** 80384537d3cSDekel Peled * Modify a Verbs/DevX queue state. 8042d77cb61SMatan Azrad * This must be called from the primary process. 8052d77cb61SMatan Azrad * 8062d77cb61SMatan Azrad * @param dev 8072d77cb61SMatan Azrad * Pointer to Ethernet device. 8082d77cb61SMatan Azrad * @param sm 8092d77cb61SMatan Azrad * State modify request parameters. 8102d77cb61SMatan Azrad * 8112d77cb61SMatan Azrad * @return 8122d77cb61SMatan Azrad * 0 in case of success else non-zero value and rte_errno is set. 8132d77cb61SMatan Azrad */ 8142d77cb61SMatan Azrad int 8152d77cb61SMatan Azrad mlx5_queue_state_modify_primary(struct rte_eth_dev *dev, 8162d77cb61SMatan Azrad const struct mlx5_mp_arg_queue_state_modify *sm) 8172d77cb61SMatan Azrad { 8182d77cb61SMatan Azrad int ret; 8192d77cb61SMatan Azrad struct mlx5_priv *priv = dev->data->dev_private; 8202d77cb61SMatan Azrad 8212d77cb61SMatan Azrad if (sm->is_wq) { 8222d77cb61SMatan Azrad struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id]; 8232d77cb61SMatan Azrad struct mlx5_rxq_ctrl *rxq_ctrl = 8242d77cb61SMatan Azrad container_of(rxq, struct mlx5_rxq_ctrl, rxq); 8252d77cb61SMatan Azrad 82684537d3cSDekel Peled if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) { 82784537d3cSDekel Peled struct ibv_wq_attr mod = { 82884537d3cSDekel Peled .attr_mask = IBV_WQ_ATTR_STATE, 82984537d3cSDekel Peled .wq_state = sm->state, 83084537d3cSDekel Peled }; 83184537d3cSDekel Peled 83293403560SDekel Peled ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod); 83384537d3cSDekel Peled } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */ 83484537d3cSDekel Peled struct mlx5_devx_modify_rq_attr rq_attr; 83584537d3cSDekel Peled 83684537d3cSDekel Peled memset(&rq_attr, 0, sizeof(rq_attr)); 83784537d3cSDekel Peled if (sm->state == IBV_WQS_RESET) { 83884537d3cSDekel Peled rq_attr.rq_state = MLX5_RQC_STATE_ERR; 83984537d3cSDekel Peled rq_attr.state = MLX5_RQC_STATE_RST; 84084537d3cSDekel Peled } else if (sm->state == IBV_WQS_RDY) { 84184537d3cSDekel Peled rq_attr.rq_state = MLX5_RQC_STATE_RST; 84284537d3cSDekel Peled rq_attr.state = MLX5_RQC_STATE_RDY; 84384537d3cSDekel Peled } else if (sm->state == IBV_WQS_ERR) { 84484537d3cSDekel Peled rq_attr.rq_state = MLX5_RQC_STATE_RDY; 84584537d3cSDekel Peled rq_attr.state = MLX5_RQC_STATE_ERR; 84684537d3cSDekel Peled } 84784537d3cSDekel Peled ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, 84884537d3cSDekel Peled &rq_attr); 84984537d3cSDekel Peled } 8502d77cb61SMatan Azrad if (ret) { 851*06fa6988SDekel Peled DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s", 8522d77cb61SMatan Azrad sm->state, strerror(errno)); 8532d77cb61SMatan Azrad rte_errno = errno; 8542d77cb61SMatan Azrad return ret; 8552d77cb61SMatan Azrad } 85635c090eaSMatan Azrad } else { 85735c090eaSMatan Azrad struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id]; 85835c090eaSMatan Azrad struct mlx5_txq_ctrl *txq_ctrl = 85935c090eaSMatan Azrad container_of(txq, struct mlx5_txq_ctrl, txq); 86035c090eaSMatan Azrad struct ibv_qp_attr mod = { 86135c090eaSMatan Azrad .qp_state = IBV_QPS_RESET, 86235c090eaSMatan Azrad .port_num = (uint8_t)priv->ibv_port, 86335c090eaSMatan Azrad }; 864894c4a8eSOri Kam struct ibv_qp *qp = txq_ctrl->obj->qp; 86535c090eaSMatan Azrad 86635c090eaSMatan Azrad ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); 86735c090eaSMatan Azrad if (ret) { 86835c090eaSMatan Azrad DRV_LOG(ERR, "Cannot change the Tx QP state to RESET " 869*06fa6988SDekel Peled "%s", strerror(errno)); 87035c090eaSMatan Azrad rte_errno = errno; 87135c090eaSMatan Azrad return ret; 87235c090eaSMatan Azrad } 87335c090eaSMatan Azrad mod.qp_state = IBV_QPS_INIT; 87435c090eaSMatan Azrad ret = mlx5_glue->modify_qp(qp, &mod, 87535c090eaSMatan Azrad (IBV_QP_STATE | IBV_QP_PORT)); 87635c090eaSMatan Azrad if (ret) { 877*06fa6988SDekel Peled DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s", 87835c090eaSMatan Azrad strerror(errno)); 87935c090eaSMatan Azrad rte_errno = errno; 88035c090eaSMatan Azrad return ret; 88135c090eaSMatan Azrad } 88235c090eaSMatan Azrad mod.qp_state = IBV_QPS_RTR; 88335c090eaSMatan Azrad ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); 88435c090eaSMatan Azrad if (ret) { 885*06fa6988SDekel Peled DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s", 88635c090eaSMatan Azrad strerror(errno)); 88735c090eaSMatan Azrad rte_errno = errno; 88835c090eaSMatan Azrad return ret; 88935c090eaSMatan Azrad } 89035c090eaSMatan Azrad mod.qp_state = IBV_QPS_RTS; 89135c090eaSMatan Azrad ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); 89235c090eaSMatan Azrad if (ret) { 893*06fa6988SDekel Peled DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s", 89435c090eaSMatan Azrad strerror(errno)); 89535c090eaSMatan Azrad rte_errno = errno; 89635c090eaSMatan Azrad return ret; 89735c090eaSMatan Azrad } 8982d77cb61SMatan Azrad } 8992d77cb61SMatan Azrad return 0; 9002d77cb61SMatan Azrad } 9012d77cb61SMatan Azrad 9022d77cb61SMatan Azrad /** 9032d77cb61SMatan Azrad * Modify a Verbs queue state. 9042d77cb61SMatan Azrad * 9052d77cb61SMatan Azrad * @param dev 9062d77cb61SMatan Azrad * Pointer to Ethernet device. 9072d77cb61SMatan Azrad * @param sm 9082d77cb61SMatan Azrad * State modify request parameters. 9092d77cb61SMatan Azrad * 9102d77cb61SMatan Azrad * @return 9112d77cb61SMatan Azrad * 0 in case of success else non-zero value. 9122d77cb61SMatan Azrad */ 9132d77cb61SMatan Azrad static int 9142d77cb61SMatan Azrad mlx5_queue_state_modify(struct rte_eth_dev *dev, 9152d77cb61SMatan Azrad struct mlx5_mp_arg_queue_state_modify *sm) 9162d77cb61SMatan Azrad { 9172d77cb61SMatan Azrad int ret = 0; 9182d77cb61SMatan Azrad 9192d77cb61SMatan Azrad switch (rte_eal_process_type()) { 9202d77cb61SMatan Azrad case RTE_PROC_PRIMARY: 9212d77cb61SMatan Azrad ret = mlx5_queue_state_modify_primary(dev, sm); 9222d77cb61SMatan Azrad break; 9232d77cb61SMatan Azrad case RTE_PROC_SECONDARY: 9242d77cb61SMatan Azrad ret = mlx5_mp_req_queue_state_modify(dev, sm); 9252d77cb61SMatan Azrad break; 9262d77cb61SMatan Azrad default: 9272d77cb61SMatan Azrad break; 9282d77cb61SMatan Azrad } 9292d77cb61SMatan Azrad return ret; 9302d77cb61SMatan Azrad } 9312d77cb61SMatan Azrad 9322d77cb61SMatan Azrad /** 93388c07335SMatan Azrad * Handle a Rx error. 93488c07335SMatan Azrad * The function inserts the RQ state to reset when the first error CQE is 93588c07335SMatan Azrad * shown, then drains the CQ by the caller function loop. When the CQ is empty, 93688c07335SMatan Azrad * it moves the RQ state to ready and initializes the RQ. 93788c07335SMatan Azrad * Next CQE identification and error counting are in the caller responsibility. 93888c07335SMatan Azrad * 93988c07335SMatan Azrad * @param[in] rxq 94088c07335SMatan Azrad * Pointer to RX queue structure. 941a06ce954SDekel Peled * @param[in] vec 942a06ce954SDekel Peled * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ. 943a06ce954SDekel Peled * 0 when called from non-vectorized Rx burst. 94488c07335SMatan Azrad * 94588c07335SMatan Azrad * @return 94688c07335SMatan Azrad * -1 in case of recovery error, otherwise the CQE status. 94788c07335SMatan Azrad */ 94888c07335SMatan Azrad int 949a06ce954SDekel Peled mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) 95088c07335SMatan Azrad { 95188c07335SMatan Azrad const uint16_t cqe_n = 1 << rxq->cqe_n; 95288c07335SMatan Azrad const uint16_t cqe_mask = cqe_n - 1; 95388c07335SMatan Azrad const unsigned int wqe_n = 1 << rxq->elts_n; 95488c07335SMatan Azrad struct mlx5_rxq_ctrl *rxq_ctrl = 95588c07335SMatan Azrad container_of(rxq, struct mlx5_rxq_ctrl, rxq); 95688c07335SMatan Azrad union { 95788c07335SMatan Azrad volatile struct mlx5_cqe *cqe; 95888c07335SMatan Azrad volatile struct mlx5_err_cqe *err_cqe; 95988c07335SMatan Azrad } u = { 96088c07335SMatan Azrad .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask], 96188c07335SMatan Azrad }; 9622d77cb61SMatan Azrad struct mlx5_mp_arg_queue_state_modify sm; 96388c07335SMatan Azrad int ret; 96488c07335SMatan Azrad 96588c07335SMatan Azrad switch (rxq->err_state) { 96688c07335SMatan Azrad case MLX5_RXQ_ERR_STATE_NO_ERROR: 96788c07335SMatan Azrad rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET; 96888c07335SMatan Azrad /* Fall-through */ 96988c07335SMatan Azrad case MLX5_RXQ_ERR_STATE_NEED_RESET: 9702d77cb61SMatan Azrad sm.is_wq = 1; 9712d77cb61SMatan Azrad sm.queue_id = rxq->idx; 9722d77cb61SMatan Azrad sm.state = IBV_WQS_RESET; 9732d77cb61SMatan Azrad if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm)) 97488c07335SMatan Azrad return -1; 97588c07335SMatan Azrad if (rxq_ctrl->dump_file_n < 97688c07335SMatan Azrad rxq_ctrl->priv->config.max_dump_files_num) { 97788c07335SMatan Azrad MKSTR(err_str, "Unexpected CQE error syndrome " 97888c07335SMatan Azrad "0x%02x CQN = %u RQN = %u wqe_counter = %u" 97988c07335SMatan Azrad " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome, 9802d77cb61SMatan Azrad rxq->cqn, rxq_ctrl->wqn, 98188c07335SMatan Azrad rte_be_to_cpu_16(u.err_cqe->wqe_counter), 98288c07335SMatan Azrad rxq->rq_ci << rxq->sges_n, rxq->cq_ci); 98388c07335SMatan Azrad MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u", 98488c07335SMatan Azrad rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc()); 98588c07335SMatan Azrad mlx5_dump_debug_information(name, NULL, err_str, 0); 98688c07335SMatan Azrad mlx5_dump_debug_information(name, "MLX5 Error CQ:", 98788c07335SMatan Azrad (const void *)((uintptr_t) 98888c07335SMatan Azrad rxq->cqes), 98988c07335SMatan Azrad sizeof(*u.cqe) * cqe_n); 99088c07335SMatan Azrad mlx5_dump_debug_information(name, "MLX5 Error RQ:", 99188c07335SMatan Azrad (const void *)((uintptr_t) 99288c07335SMatan Azrad rxq->wqes), 99388c07335SMatan Azrad 16 * wqe_n); 99488c07335SMatan Azrad rxq_ctrl->dump_file_n++; 99588c07335SMatan Azrad } 99688c07335SMatan Azrad rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY; 99788c07335SMatan Azrad /* Fall-through */ 99888c07335SMatan Azrad case MLX5_RXQ_ERR_STATE_NEED_READY: 99988c07335SMatan Azrad ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci); 100088c07335SMatan Azrad if (ret == MLX5_CQE_STATUS_HW_OWN) { 100188c07335SMatan Azrad rte_cio_wmb(); 100288c07335SMatan Azrad *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 100388c07335SMatan Azrad rte_cio_wmb(); 100488c07335SMatan Azrad /* 100588c07335SMatan Azrad * The RQ consumer index must be zeroed while moving 100688c07335SMatan Azrad * from RESET state to RDY state. 100788c07335SMatan Azrad */ 100888c07335SMatan Azrad *rxq->rq_db = rte_cpu_to_be_32(0); 100988c07335SMatan Azrad rte_cio_wmb(); 10102d77cb61SMatan Azrad sm.is_wq = 1; 10112d77cb61SMatan Azrad sm.queue_id = rxq->idx; 10122d77cb61SMatan Azrad sm.state = IBV_WQS_RDY; 10132d77cb61SMatan Azrad if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), 10142d77cb61SMatan Azrad &sm)) 101588c07335SMatan Azrad return -1; 1016a06ce954SDekel Peled if (vec) { 101788c07335SMatan Azrad const uint16_t q_mask = wqe_n - 1; 101888c07335SMatan Azrad uint16_t elt_idx; 101988c07335SMatan Azrad struct rte_mbuf **elt; 102088c07335SMatan Azrad int i; 102188c07335SMatan Azrad unsigned int n = wqe_n - (rxq->rq_ci - 102288c07335SMatan Azrad rxq->rq_pi); 102388c07335SMatan Azrad 102488c07335SMatan Azrad for (i = 0; i < (int)n; ++i) { 102588c07335SMatan Azrad elt_idx = (rxq->rq_ci + i) & q_mask; 102688c07335SMatan Azrad elt = &(*rxq->elts)[elt_idx]; 102788c07335SMatan Azrad *elt = rte_mbuf_raw_alloc(rxq->mp); 102888c07335SMatan Azrad if (!*elt) { 102988c07335SMatan Azrad for (i--; i >= 0; --i) { 103088c07335SMatan Azrad elt_idx = (rxq->rq_ci + 103188c07335SMatan Azrad i) & q_mask; 103288c07335SMatan Azrad elt = &(*rxq->elts) 103388c07335SMatan Azrad [elt_idx]; 103488c07335SMatan Azrad rte_pktmbuf_free_seg 103588c07335SMatan Azrad (*elt); 103688c07335SMatan Azrad } 103788c07335SMatan Azrad return -1; 103888c07335SMatan Azrad } 103988c07335SMatan Azrad } 1040a06ce954SDekel Peled for (i = 0; i < (int)wqe_n; ++i) { 1041a06ce954SDekel Peled elt = &(*rxq->elts)[i]; 1042a06ce954SDekel Peled DATA_LEN(*elt) = 1043a06ce954SDekel Peled (uint16_t)((*elt)->buf_len - 1044a06ce954SDekel Peled rte_pktmbuf_headroom(*elt)); 1045a06ce954SDekel Peled } 1046a06ce954SDekel Peled /* Padding with a fake mbuf for vec Rx. */ 1047a06ce954SDekel Peled for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 1048a06ce954SDekel Peled (*rxq->elts)[wqe_n + i] = 1049a06ce954SDekel Peled &rxq->fake_mbuf; 105088c07335SMatan Azrad } 105188c07335SMatan Azrad mlx5_rxq_initialize(rxq); 105288c07335SMatan Azrad rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; 105388c07335SMatan Azrad } 105488c07335SMatan Azrad return ret; 105588c07335SMatan Azrad default: 105688c07335SMatan Azrad return -1; 105788c07335SMatan Azrad } 105888c07335SMatan Azrad } 105988c07335SMatan Azrad 106088c07335SMatan Azrad /** 106199c12dccSNélio Laranjeiro * Get size of the next packet for a given CQE. For compressed CQEs, the 106299c12dccSNélio Laranjeiro * consumer index is updated only once all packets of the current one have 106399c12dccSNélio Laranjeiro * been processed. 106499c12dccSNélio Laranjeiro * 106599c12dccSNélio Laranjeiro * @param rxq 106699c12dccSNélio Laranjeiro * Pointer to RX queue. 106799c12dccSNélio Laranjeiro * @param cqe 106899c12dccSNélio Laranjeiro * CQE to process. 10692e633f1fSYongseok Koh * @param[out] mcqe 10702e633f1fSYongseok Koh * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not 10712e633f1fSYongseok Koh * written. 107299c12dccSNélio Laranjeiro * 107399c12dccSNélio Laranjeiro * @return 107488c07335SMatan Azrad * 0 in case of empty CQE, otherwise the packet size in bytes. 107599c12dccSNélio Laranjeiro */ 107699c12dccSNélio Laranjeiro static inline int 107778142aacSNélio Laranjeiro mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 10782e633f1fSYongseok Koh uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) 107999c12dccSNélio Laranjeiro { 108099c12dccSNélio Laranjeiro struct rxq_zip *zip = &rxq->zip; 108199c12dccSNélio Laranjeiro uint16_t cqe_n = cqe_cnt + 1; 108288c07335SMatan Azrad int len; 1083d2e842d0SYongseok Koh uint16_t idx, end; 108499c12dccSNélio Laranjeiro 108588c07335SMatan Azrad do { 108688c07335SMatan Azrad len = 0; 108799c12dccSNélio Laranjeiro /* Process compressed data in the CQE and mini arrays. */ 108899c12dccSNélio Laranjeiro if (zip->ai) { 108999c12dccSNélio Laranjeiro volatile struct mlx5_mini_cqe8 (*mc)[8] = 109099c12dccSNélio Laranjeiro (volatile struct mlx5_mini_cqe8 (*)[8]) 109188c07335SMatan Azrad (uintptr_t)(&(*rxq->cqes)[zip->ca & 109288c07335SMatan Azrad cqe_cnt].pkt_info); 109399c12dccSNélio Laranjeiro 10946b30a6a8SShachar Beiser len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); 10952e633f1fSYongseok Koh *mcqe = &(*mc)[zip->ai & 7]; 109699c12dccSNélio Laranjeiro if ((++zip->ai & 7) == 0) { 1097d2e842d0SYongseok Koh /* Invalidate consumed CQEs */ 1098d2e842d0SYongseok Koh idx = zip->ca; 1099d2e842d0SYongseok Koh end = zip->na; 1100d2e842d0SYongseok Koh while (idx != end) { 1101d2e842d0SYongseok Koh (*rxq->cqes)[idx & cqe_cnt].op_own = 1102d2e842d0SYongseok Koh MLX5_CQE_INVALIDATE; 1103d2e842d0SYongseok Koh ++idx; 1104d2e842d0SYongseok Koh } 110599c12dccSNélio Laranjeiro /* 110688c07335SMatan Azrad * Increment consumer index to skip the number 110788c07335SMatan Azrad * of CQEs consumed. Hardware leaves holes in 110888c07335SMatan Azrad * the CQ ring for software use. 110999c12dccSNélio Laranjeiro */ 111099c12dccSNélio Laranjeiro zip->ca = zip->na; 111199c12dccSNélio Laranjeiro zip->na += 8; 111299c12dccSNélio Laranjeiro } 111399c12dccSNélio Laranjeiro if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { 1114d2e842d0SYongseok Koh /* Invalidate the rest */ 1115d2e842d0SYongseok Koh idx = zip->ca; 1116d2e842d0SYongseok Koh end = zip->cq_ci; 111799c12dccSNélio Laranjeiro 111899c12dccSNélio Laranjeiro while (idx != end) { 111997267b8eSNelio Laranjeiro (*rxq->cqes)[idx & cqe_cnt].op_own = 112099c12dccSNélio Laranjeiro MLX5_CQE_INVALIDATE; 112199c12dccSNélio Laranjeiro ++idx; 112299c12dccSNélio Laranjeiro } 112399c12dccSNélio Laranjeiro rxq->cq_ci = zip->cq_ci; 112499c12dccSNélio Laranjeiro zip->ai = 0; 112599c12dccSNélio Laranjeiro } 112688c07335SMatan Azrad /* 112788c07335SMatan Azrad * No compressed data, get next CQE and verify if it is 112888c07335SMatan Azrad * compressed. 112988c07335SMatan Azrad */ 113099c12dccSNélio Laranjeiro } else { 113199c12dccSNélio Laranjeiro int ret; 113299c12dccSNélio Laranjeiro int8_t op_own; 113399c12dccSNélio Laranjeiro 113497267b8eSNelio Laranjeiro ret = check_cqe(cqe, cqe_n, rxq->cq_ci); 113588c07335SMatan Azrad if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { 113688c07335SMatan Azrad if (unlikely(ret == MLX5_CQE_STATUS_ERR || 113788c07335SMatan Azrad rxq->err_state)) { 113888c07335SMatan Azrad ret = mlx5_rx_err_handle(rxq, 0); 113988c07335SMatan Azrad if (ret == MLX5_CQE_STATUS_HW_OWN || 114088c07335SMatan Azrad ret == -1) 114199c12dccSNélio Laranjeiro return 0; 114288c07335SMatan Azrad } else { 114388c07335SMatan Azrad return 0; 114488c07335SMatan Azrad } 114588c07335SMatan Azrad } 114699c12dccSNélio Laranjeiro ++rxq->cq_ci; 114799c12dccSNélio Laranjeiro op_own = cqe->op_own; 114899c12dccSNélio Laranjeiro if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { 114999c12dccSNélio Laranjeiro volatile struct mlx5_mini_cqe8 (*mc)[8] = 115099c12dccSNélio Laranjeiro (volatile struct mlx5_mini_cqe8 (*)[8]) 115188c07335SMatan Azrad (uintptr_t)(&(*rxq->cqes) 115288c07335SMatan Azrad [rxq->cq_ci & 11534aff4bcbSYongseok Koh cqe_cnt].pkt_info); 115499c12dccSNélio Laranjeiro 115599c12dccSNélio Laranjeiro /* Fix endianness. */ 11566b30a6a8SShachar Beiser zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); 115799c12dccSNélio Laranjeiro /* 115888c07335SMatan Azrad * Current mini array position is the one 115988c07335SMatan Azrad * returned by check_cqe64(). 116099c12dccSNélio Laranjeiro * 116188c07335SMatan Azrad * If completion comprises several mini arrays, 116288c07335SMatan Azrad * as a special case the second one is located 116388c07335SMatan Azrad * 7 CQEs after the initial CQE instead of 8 116488c07335SMatan Azrad * for subsequent ones. 116599c12dccSNélio Laranjeiro */ 1166d2e842d0SYongseok Koh zip->ca = rxq->cq_ci; 116799c12dccSNélio Laranjeiro zip->na = zip->ca + 7; 116899c12dccSNélio Laranjeiro /* Compute the next non compressed CQE. */ 116999c12dccSNélio Laranjeiro --rxq->cq_ci; 117099c12dccSNélio Laranjeiro zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; 117199c12dccSNélio Laranjeiro /* Get packet size to return. */ 11726b30a6a8SShachar Beiser len = rte_be_to_cpu_32((*mc)[0].byte_cnt); 11732e633f1fSYongseok Koh *mcqe = &(*mc)[0]; 117499c12dccSNélio Laranjeiro zip->ai = 1; 117588c07335SMatan Azrad /* Prefetch all to be invalidated */ 1176d2e842d0SYongseok Koh idx = zip->ca; 1177d2e842d0SYongseok Koh end = zip->cq_ci; 1178d2e842d0SYongseok Koh while (idx != end) { 117988c07335SMatan Azrad rte_prefetch0(&(*rxq->cqes)[(idx) & 118088c07335SMatan Azrad cqe_cnt]); 1181d2e842d0SYongseok Koh ++idx; 1182d2e842d0SYongseok Koh } 118399c12dccSNélio Laranjeiro } else { 11846b30a6a8SShachar Beiser len = rte_be_to_cpu_32(cqe->byte_cnt); 118599c12dccSNélio Laranjeiro } 118699c12dccSNélio Laranjeiro } 118788c07335SMatan Azrad if (unlikely(rxq->err_state)) { 118888c07335SMatan Azrad cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 118988c07335SMatan Azrad ++rxq->stats.idropped; 119088c07335SMatan Azrad } else { 119199c12dccSNélio Laranjeiro return len; 119299c12dccSNélio Laranjeiro } 119388c07335SMatan Azrad } while (1); 119488c07335SMatan Azrad } 119599c12dccSNélio Laranjeiro 119699c12dccSNélio Laranjeiro /** 119767fa62bcSAdrien Mazarguil * Translate RX completion flags to offload flags. 119867fa62bcSAdrien Mazarguil * 11996218063bSNélio Laranjeiro * @param[in] cqe 12006218063bSNélio Laranjeiro * Pointer to CQE. 120167fa62bcSAdrien Mazarguil * 120267fa62bcSAdrien Mazarguil * @return 120367fa62bcSAdrien Mazarguil * Offload flags (ol_flags) for struct rte_mbuf. 120467fa62bcSAdrien Mazarguil */ 120567fa62bcSAdrien Mazarguil static inline uint32_t 12066ba07449SXueming Li rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) 120767fa62bcSAdrien Mazarguil { 120867fa62bcSAdrien Mazarguil uint32_t ol_flags = 0; 12096b30a6a8SShachar Beiser uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); 121067fa62bcSAdrien Mazarguil 12110603df73SNélio Laranjeiro ol_flags = 12120603df73SNélio Laranjeiro TRANSPOSE(flags, 12130603df73SNélio Laranjeiro MLX5_CQE_RX_L3_HDR_VALID, 12140603df73SNélio Laranjeiro PKT_RX_IP_CKSUM_GOOD) | 12150603df73SNélio Laranjeiro TRANSPOSE(flags, 12160603df73SNélio Laranjeiro MLX5_CQE_RX_L4_HDR_VALID, 121783e9d9a3SNelio Laranjeiro PKT_RX_L4_CKSUM_GOOD); 121867fa62bcSAdrien Mazarguil return ol_flags; 121967fa62bcSAdrien Mazarguil } 122067fa62bcSAdrien Mazarguil 122167fa62bcSAdrien Mazarguil /** 12223e1f82a1SYongseok Koh * Fill in mbuf fields from RX completion flags. 12233e1f82a1SYongseok Koh * Note that pkt->ol_flags should be initialized outside of this function. 12243e1f82a1SYongseok Koh * 12253e1f82a1SYongseok Koh * @param rxq 12263e1f82a1SYongseok Koh * Pointer to RX queue. 12273e1f82a1SYongseok Koh * @param pkt 12283e1f82a1SYongseok Koh * mbuf to fill. 12293e1f82a1SYongseok Koh * @param cqe 12303e1f82a1SYongseok Koh * CQE to process. 12313e1f82a1SYongseok Koh * @param rss_hash_res 12323e1f82a1SYongseok Koh * Packet RSS Hash result. 12333e1f82a1SYongseok Koh */ 12343e1f82a1SYongseok Koh static inline void 12353e1f82a1SYongseok Koh rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, 12363e1f82a1SYongseok Koh volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res) 12373e1f82a1SYongseok Koh { 12383e1f82a1SYongseok Koh /* Update packet information. */ 12393e1f82a1SYongseok Koh pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe); 12403e1f82a1SYongseok Koh if (rss_hash_res && rxq->rss_hash) { 12413e1f82a1SYongseok Koh pkt->hash.rss = rss_hash_res; 12423e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_RSS_HASH; 12433e1f82a1SYongseok Koh } 12443e1f82a1SYongseok Koh if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { 12453e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_FDIR; 12463e1f82a1SYongseok Koh if (cqe->sop_drop_qpn != 12473e1f82a1SYongseok Koh rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { 12483e1f82a1SYongseok Koh uint32_t mark = cqe->sop_drop_qpn; 12493e1f82a1SYongseok Koh 12503e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_FDIR_ID; 12513e1f82a1SYongseok Koh pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); 12523e1f82a1SYongseok Koh } 12533e1f82a1SYongseok Koh } 12543e1f82a1SYongseok Koh if (rxq->csum) 12553e1f82a1SYongseok Koh pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); 12563e1f82a1SYongseok Koh if (rxq->vlan_strip && 12573e1f82a1SYongseok Koh (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { 12583e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 12593e1f82a1SYongseok Koh pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info); 12603e1f82a1SYongseok Koh } 12613e1f82a1SYongseok Koh if (rxq->hw_timestamp) { 12623e1f82a1SYongseok Koh pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp); 12633e1f82a1SYongseok Koh pkt->ol_flags |= PKT_RX_TIMESTAMP; 12643e1f82a1SYongseok Koh } 12653e1f82a1SYongseok Koh } 12663e1f82a1SYongseok Koh 12673e1f82a1SYongseok Koh /** 12682e22920bSAdrien Mazarguil * DPDK callback for RX. 12692e22920bSAdrien Mazarguil * 12702e22920bSAdrien Mazarguil * @param dpdk_rxq 12712e22920bSAdrien Mazarguil * Generic pointer to RX queue structure. 12722e22920bSAdrien Mazarguil * @param[out] pkts 12732e22920bSAdrien Mazarguil * Array to store received packets. 12742e22920bSAdrien Mazarguil * @param pkts_n 12752e22920bSAdrien Mazarguil * Maximum number of packets in array. 12762e22920bSAdrien Mazarguil * 12772e22920bSAdrien Mazarguil * @return 12782e22920bSAdrien Mazarguil * Number of packets successfully received (<= pkts_n). 12792e22920bSAdrien Mazarguil */ 12802e22920bSAdrien Mazarguil uint16_t 12812e22920bSAdrien Mazarguil mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 12822e22920bSAdrien Mazarguil { 128378142aacSNélio Laranjeiro struct mlx5_rxq_data *rxq = dpdk_rxq; 1284b4b12e55SNélio Laranjeiro const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; 1285e2f116eeSNélio Laranjeiro const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; 12869964b965SNélio Laranjeiro const unsigned int sges_n = rxq->sges_n; 12879964b965SNélio Laranjeiro struct rte_mbuf *pkt = NULL; 12889964b965SNélio Laranjeiro struct rte_mbuf *seg = NULL; 128997267b8eSNelio Laranjeiro volatile struct mlx5_cqe *cqe = 129097267b8eSNelio Laranjeiro &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 12919964b965SNélio Laranjeiro unsigned int i = 0; 12929964b965SNélio Laranjeiro unsigned int rq_ci = rxq->rq_ci << sges_n; 12934e66a6feSNelio Laranjeiro int len = 0; /* keep its value across iterations. */ 12942e22920bSAdrien Mazarguil 12959964b965SNélio Laranjeiro while (pkts_n) { 12969964b965SNélio Laranjeiro unsigned int idx = rq_ci & wqe_cnt; 12977d6bf6b8SYongseok Koh volatile struct mlx5_wqe_data_seg *wqe = 12987d6bf6b8SYongseok Koh &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; 12999964b965SNélio Laranjeiro struct rte_mbuf *rep = (*rxq->elts)[idx]; 13002e633f1fSYongseok Koh volatile struct mlx5_mini_cqe8 *mcqe = NULL; 13012e633f1fSYongseok Koh uint32_t rss_hash_res; 13029964b965SNélio Laranjeiro 13039964b965SNélio Laranjeiro if (pkt) 13049964b965SNélio Laranjeiro NEXT(seg) = rep; 13059964b965SNélio Laranjeiro seg = rep; 13069964b965SNélio Laranjeiro rte_prefetch0(seg); 13076218063bSNélio Laranjeiro rte_prefetch0(cqe); 13089964b965SNélio Laranjeiro rte_prefetch0(wqe); 1309fbfd9955SOlivier Matz rep = rte_mbuf_raw_alloc(rxq->mp); 13102e22920bSAdrien Mazarguil if (unlikely(rep == NULL)) { 131115a756b6SSagi Grimberg ++rxq->stats.rx_nombuf; 131215a756b6SSagi Grimberg if (!pkt) { 131315a756b6SSagi Grimberg /* 131415a756b6SSagi Grimberg * no buffers before we even started, 131515a756b6SSagi Grimberg * bail out silently. 131615a756b6SSagi Grimberg */ 131715a756b6SSagi Grimberg break; 131815a756b6SSagi Grimberg } 1319a1bdb71aSNélio Laranjeiro while (pkt != seg) { 1320a1bdb71aSNélio Laranjeiro assert(pkt != (*rxq->elts)[idx]); 1321fe5fe382SNélio Laranjeiro rep = NEXT(pkt); 13228f094a9aSOlivier Matz NEXT(pkt) = NULL; 13238f094a9aSOlivier Matz NB_SEGS(pkt) = 1; 13241f88c0a2SOlivier Matz rte_mbuf_raw_free(pkt); 1325fe5fe382SNélio Laranjeiro pkt = rep; 13269964b965SNélio Laranjeiro } 13276218063bSNélio Laranjeiro break; 13282e22920bSAdrien Mazarguil } 13299964b965SNélio Laranjeiro if (!pkt) { 133097267b8eSNelio Laranjeiro cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 13312e633f1fSYongseok Koh len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); 1332ecf60761SNélio Laranjeiro if (!len) { 13331f88c0a2SOlivier Matz rte_mbuf_raw_free(rep); 13346218063bSNélio Laranjeiro break; 13356218063bSNélio Laranjeiro } 13369964b965SNélio Laranjeiro pkt = seg; 13379964b965SNélio Laranjeiro assert(len >= (rxq->crc_present << 2)); 13380ac64846SMaxime Leroy pkt->ol_flags = 0; 13392e633f1fSYongseok Koh /* If compressed, take hash result from mini-CQE. */ 13402e633f1fSYongseok Koh rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ? 13412e633f1fSYongseok Koh cqe->rx_hash_res : 13422e633f1fSYongseok Koh mcqe->rx_hash_result); 13433e1f82a1SYongseok Koh rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); 13446218063bSNélio Laranjeiro if (rxq->crc_present) 134535b2d13fSOlivier Matz len -= RTE_ETHER_CRC_LEN; 13466218063bSNélio Laranjeiro PKT_LEN(pkt) = len; 13472579543fSMatan Azrad if (cqe->lro_num_seg > 1) { 13482579543fSMatan Azrad mlx5_lro_update_hdr 13492579543fSMatan Azrad (rte_pktmbuf_mtod(pkt, uint8_t *), cqe, 13502579543fSMatan Azrad len); 13512579543fSMatan Azrad pkt->ol_flags |= PKT_RX_LRO; 13522579543fSMatan Azrad pkt->tso_segsz = len / cqe->lro_num_seg; 13532579543fSMatan Azrad } 13549964b965SNélio Laranjeiro } 13559964b965SNélio Laranjeiro DATA_LEN(rep) = DATA_LEN(seg); 13569964b965SNélio Laranjeiro PKT_LEN(rep) = PKT_LEN(seg); 13579964b965SNélio Laranjeiro SET_DATA_OFF(rep, DATA_OFF(seg)); 13589964b965SNélio Laranjeiro PORT(rep) = PORT(seg); 13599964b965SNélio Laranjeiro (*rxq->elts)[idx] = rep; 13609964b965SNélio Laranjeiro /* 13619964b965SNélio Laranjeiro * Fill NIC descriptor with the new buffer. The lkey and size 13629964b965SNélio Laranjeiro * of the buffers are already known, only the buffer address 13639964b965SNélio Laranjeiro * changes. 13649964b965SNélio Laranjeiro */ 13656b30a6a8SShachar Beiser wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); 1366974f1e7eSYongseok Koh /* If there's only one MR, no need to replace LKey in WQE. */ 1367974f1e7eSYongseok Koh if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 1368974f1e7eSYongseok Koh wqe->lkey = mlx5_rx_mb2mr(rxq, rep); 13699964b965SNélio Laranjeiro if (len > DATA_LEN(seg)) { 13709964b965SNélio Laranjeiro len -= DATA_LEN(seg); 13719964b965SNélio Laranjeiro ++NB_SEGS(pkt); 13729964b965SNélio Laranjeiro ++rq_ci; 13739964b965SNélio Laranjeiro continue; 13749964b965SNélio Laranjeiro } 13759964b965SNélio Laranjeiro DATA_LEN(seg) = len; 137687011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 137787011737SAdrien Mazarguil /* Increment bytes counter. */ 13789964b965SNélio Laranjeiro rxq->stats.ibytes += PKT_LEN(pkt); 137987011737SAdrien Mazarguil #endif 13806218063bSNélio Laranjeiro /* Return packet. */ 13816218063bSNélio Laranjeiro *(pkts++) = pkt; 13829964b965SNélio Laranjeiro pkt = NULL; 13839964b965SNélio Laranjeiro --pkts_n; 13849964b965SNélio Laranjeiro ++i; 13859964b965SNélio Laranjeiro /* Align consumer index to the next stride. */ 13869964b965SNélio Laranjeiro rq_ci >>= sges_n; 13876218063bSNélio Laranjeiro ++rq_ci; 13889964b965SNélio Laranjeiro rq_ci <<= sges_n; 13892e22920bSAdrien Mazarguil } 13909964b965SNélio Laranjeiro if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci))) 13912e22920bSAdrien Mazarguil return 0; 13926218063bSNélio Laranjeiro /* Update the consumer index. */ 13939964b965SNélio Laranjeiro rxq->rq_ci = rq_ci >> sges_n; 13944fe7f662SYongseok Koh rte_cio_wmb(); 13956b30a6a8SShachar Beiser *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 13964fe7f662SYongseok Koh rte_cio_wmb(); 13976b30a6a8SShachar Beiser *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 139887011737SAdrien Mazarguil #ifdef MLX5_PMD_SOFT_COUNTERS 139987011737SAdrien Mazarguil /* Increment packets counter. */ 14009964b965SNélio Laranjeiro rxq->stats.ipackets += i; 140187011737SAdrien Mazarguil #endif 14029964b965SNélio Laranjeiro return i; 14032e22920bSAdrien Mazarguil } 14042e22920bSAdrien Mazarguil 1405e4c2a16eSMatan Azrad /** 1406e4c2a16eSMatan Azrad * Update LRO packet TCP header. 1407e4c2a16eSMatan Azrad * The HW LRO feature doesn't update the TCP header after coalescing the 1408e4c2a16eSMatan Azrad * TCP segments but supplies information in CQE to fill it by SW. 1409e4c2a16eSMatan Azrad * 1410e4c2a16eSMatan Azrad * @param tcp 1411e4c2a16eSMatan Azrad * Pointer to the TCP header. 1412e4c2a16eSMatan Azrad * @param cqe 1413e4c2a16eSMatan Azrad * Pointer to the completion entry.. 1414e4c2a16eSMatan Azrad * @param phcsum 1415e4c2a16eSMatan Azrad * The L3 pseudo-header checksum. 1416e4c2a16eSMatan Azrad */ 1417e4c2a16eSMatan Azrad static inline void 1418e4c2a16eSMatan Azrad mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp, 1419e4c2a16eSMatan Azrad volatile struct mlx5_cqe *restrict cqe, 1420e4c2a16eSMatan Azrad uint32_t phcsum) 1421e4c2a16eSMatan Azrad { 1422e4c2a16eSMatan Azrad uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) & 1423e4c2a16eSMatan Azrad MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT; 1424e4c2a16eSMatan Azrad /* 1425e4c2a16eSMatan Azrad * The HW calculates only the TCP payload checksum, need to complete 1426e4c2a16eSMatan Azrad * the TCP header checksum and the L3 pseudo-header checksum. 1427e4c2a16eSMatan Azrad */ 1428e4c2a16eSMatan Azrad uint32_t csum = phcsum + cqe->csum; 1429e4c2a16eSMatan Azrad 1430e4c2a16eSMatan Azrad if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK || 1431e4c2a16eSMatan Azrad l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) { 1432e4c2a16eSMatan Azrad tcp->tcp_flags |= RTE_TCP_ACK_FLAG; 1433e4c2a16eSMatan Azrad tcp->recv_ack = cqe->lro_ack_seq_num; 1434e4c2a16eSMatan Azrad tcp->rx_win = cqe->lro_tcp_win; 1435e4c2a16eSMatan Azrad } 1436e4c2a16eSMatan Azrad if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK) 1437e4c2a16eSMatan Azrad tcp->tcp_flags |= RTE_TCP_PSH_FLAG; 1438e4c2a16eSMatan Azrad tcp->cksum = 0; 1439e4c2a16eSMatan Azrad csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4); 1440e4c2a16eSMatan Azrad csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); 1441e4c2a16eSMatan Azrad csum = (~csum) & 0xffff; 1442e4c2a16eSMatan Azrad if (csum == 0) 1443e4c2a16eSMatan Azrad csum = 0xffff; 1444e4c2a16eSMatan Azrad tcp->cksum = csum; 1445e4c2a16eSMatan Azrad } 1446e4c2a16eSMatan Azrad 1447e4c2a16eSMatan Azrad /** 1448e4c2a16eSMatan Azrad * Update LRO packet headers. 1449e4c2a16eSMatan Azrad * The HW LRO feature doesn't update the L3/TCP headers after coalescing the 1450e4c2a16eSMatan Azrad * TCP segments but supply information in CQE to fill it by SW. 1451e4c2a16eSMatan Azrad * 1452e4c2a16eSMatan Azrad * @param padd 1453e4c2a16eSMatan Azrad * The packet address. 1454e4c2a16eSMatan Azrad * @param cqe 1455e4c2a16eSMatan Azrad * Pointer to the completion entry.. 1456e4c2a16eSMatan Azrad * @param len 1457e4c2a16eSMatan Azrad * The packet length. 1458e4c2a16eSMatan Azrad */ 1459e4c2a16eSMatan Azrad static inline void 1460e4c2a16eSMatan Azrad mlx5_lro_update_hdr(uint8_t *restrict padd, 1461e4c2a16eSMatan Azrad volatile struct mlx5_cqe *restrict cqe, 1462e4c2a16eSMatan Azrad uint32_t len) 1463e4c2a16eSMatan Azrad { 1464e4c2a16eSMatan Azrad union { 1465e4c2a16eSMatan Azrad struct rte_ether_hdr *eth; 1466e4c2a16eSMatan Azrad struct rte_vlan_hdr *vlan; 1467e4c2a16eSMatan Azrad struct rte_ipv4_hdr *ipv4; 1468e4c2a16eSMatan Azrad struct rte_ipv6_hdr *ipv6; 1469e4c2a16eSMatan Azrad struct rte_tcp_hdr *tcp; 1470e4c2a16eSMatan Azrad uint8_t *hdr; 1471e4c2a16eSMatan Azrad } h = { 1472e4c2a16eSMatan Azrad .hdr = padd, 1473e4c2a16eSMatan Azrad }; 1474e4c2a16eSMatan Azrad uint16_t proto = h.eth->ether_type; 1475e4c2a16eSMatan Azrad uint32_t phcsum; 1476e4c2a16eSMatan Azrad 1477e4c2a16eSMatan Azrad h.eth++; 1478e4c2a16eSMatan Azrad while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) || 1479e4c2a16eSMatan Azrad proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { 1480e4c2a16eSMatan Azrad proto = h.vlan->eth_proto; 1481e4c2a16eSMatan Azrad h.vlan++; 1482e4c2a16eSMatan Azrad } 1483e4c2a16eSMatan Azrad if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) { 1484e4c2a16eSMatan Azrad h.ipv4->time_to_live = cqe->lro_min_ttl; 1485e4c2a16eSMatan Azrad h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd)); 1486e4c2a16eSMatan Azrad h.ipv4->hdr_checksum = 0; 1487e4c2a16eSMatan Azrad h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4); 1488e4c2a16eSMatan Azrad phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0); 1489e4c2a16eSMatan Azrad h.ipv4++; 1490e4c2a16eSMatan Azrad } else { 1491e4c2a16eSMatan Azrad h.ipv6->hop_limits = cqe->lro_min_ttl; 1492e4c2a16eSMatan Azrad h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) - 1493e4c2a16eSMatan Azrad sizeof(*h.ipv6)); 1494e4c2a16eSMatan Azrad phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0); 1495e4c2a16eSMatan Azrad h.ipv6++; 1496e4c2a16eSMatan Azrad } 1497e4c2a16eSMatan Azrad mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum); 1498e4c2a16eSMatan Azrad } 1499e4c2a16eSMatan Azrad 15007d6bf6b8SYongseok Koh void 15017d6bf6b8SYongseok Koh mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque) 15027d6bf6b8SYongseok Koh { 15037d6bf6b8SYongseok Koh struct mlx5_mprq_buf *buf = opaque; 15047d6bf6b8SYongseok Koh 15057d6bf6b8SYongseok Koh if (rte_atomic16_read(&buf->refcnt) == 1) { 15067d6bf6b8SYongseok Koh rte_mempool_put(buf->mp, buf); 15077d6bf6b8SYongseok Koh } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) { 15087d6bf6b8SYongseok Koh rte_atomic16_set(&buf->refcnt, 1); 15097d6bf6b8SYongseok Koh rte_mempool_put(buf->mp, buf); 15107d6bf6b8SYongseok Koh } 15117d6bf6b8SYongseok Koh } 15127d6bf6b8SYongseok Koh 15137d6bf6b8SYongseok Koh void 15147d6bf6b8SYongseok Koh mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf) 15157d6bf6b8SYongseok Koh { 15167d6bf6b8SYongseok Koh mlx5_mprq_buf_free_cb(NULL, buf); 15177d6bf6b8SYongseok Koh } 15187d6bf6b8SYongseok Koh 15197d6bf6b8SYongseok Koh static inline void 15203a22f387SMatan Azrad mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx, 15213a22f387SMatan Azrad const unsigned int strd_n) 15227d6bf6b8SYongseok Koh { 15237d6bf6b8SYongseok Koh struct mlx5_mprq_buf *rep = rxq->mprq_repl; 15247d6bf6b8SYongseok Koh volatile struct mlx5_wqe_data_seg *wqe = 15257d6bf6b8SYongseok Koh &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; 15267d6bf6b8SYongseok Koh void *addr; 15277d6bf6b8SYongseok Koh 15287d6bf6b8SYongseok Koh assert(rep != NULL); 15297d6bf6b8SYongseok Koh /* Replace MPRQ buf. */ 15307d6bf6b8SYongseok Koh (*rxq->mprq_bufs)[rq_idx] = rep; 15317d6bf6b8SYongseok Koh /* Replace WQE. */ 15323a22f387SMatan Azrad addr = mlx5_mprq_buf_addr(rep, strd_n); 15337d6bf6b8SYongseok Koh wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); 15347d6bf6b8SYongseok Koh /* If there's only one MR, no need to replace LKey in WQE. */ 15357d6bf6b8SYongseok Koh if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 15367d6bf6b8SYongseok Koh wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); 15377d6bf6b8SYongseok Koh /* Stash a mbuf for next replacement. */ 15387d6bf6b8SYongseok Koh if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) 15397d6bf6b8SYongseok Koh rxq->mprq_repl = rep; 15407d6bf6b8SYongseok Koh else 15417d6bf6b8SYongseok Koh rxq->mprq_repl = NULL; 15427d6bf6b8SYongseok Koh } 15437d6bf6b8SYongseok Koh 15447d6bf6b8SYongseok Koh /** 15457d6bf6b8SYongseok Koh * DPDK callback for RX with Multi-Packet RQ support. 15467d6bf6b8SYongseok Koh * 15477d6bf6b8SYongseok Koh * @param dpdk_rxq 15487d6bf6b8SYongseok Koh * Generic pointer to RX queue structure. 15497d6bf6b8SYongseok Koh * @param[out] pkts 15507d6bf6b8SYongseok Koh * Array to store received packets. 15517d6bf6b8SYongseok Koh * @param pkts_n 15527d6bf6b8SYongseok Koh * Maximum number of packets in array. 15537d6bf6b8SYongseok Koh * 15547d6bf6b8SYongseok Koh * @return 15557d6bf6b8SYongseok Koh * Number of packets successfully received (<= pkts_n). 15567d6bf6b8SYongseok Koh */ 15577d6bf6b8SYongseok Koh uint16_t 15587d6bf6b8SYongseok Koh mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 15597d6bf6b8SYongseok Koh { 15607d6bf6b8SYongseok Koh struct mlx5_rxq_data *rxq = dpdk_rxq; 15617d6bf6b8SYongseok Koh const unsigned int strd_n = 1 << rxq->strd_num_n; 15627d6bf6b8SYongseok Koh const unsigned int strd_sz = 1 << rxq->strd_sz_n; 15637d6bf6b8SYongseok Koh const unsigned int strd_shift = 15647d6bf6b8SYongseok Koh MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; 15657d6bf6b8SYongseok Koh const unsigned int cq_mask = (1 << rxq->cqe_n) - 1; 15667d6bf6b8SYongseok Koh const unsigned int wq_mask = (1 << rxq->elts_n) - 1; 15677d6bf6b8SYongseok Koh volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; 15687d6bf6b8SYongseok Koh unsigned int i = 0; 15693afdf157SXueming Li uint32_t rq_ci = rxq->rq_ci; 15701787eb7bSYongseok Koh uint16_t consumed_strd = rxq->consumed_strd; 1571a496e093SMatan Azrad uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM; 15727d6bf6b8SYongseok Koh struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; 15737d6bf6b8SYongseok Koh 15747d6bf6b8SYongseok Koh while (i < pkts_n) { 15757d6bf6b8SYongseok Koh struct rte_mbuf *pkt; 15767d6bf6b8SYongseok Koh void *addr; 15777d6bf6b8SYongseok Koh int ret; 15787d6bf6b8SYongseok Koh unsigned int len; 15791787eb7bSYongseok Koh uint16_t strd_cnt; 15801787eb7bSYongseok Koh uint16_t strd_idx; 15817d6bf6b8SYongseok Koh uint32_t offset; 15827d6bf6b8SYongseok Koh uint32_t byte_cnt; 15832e633f1fSYongseok Koh volatile struct mlx5_mini_cqe8 *mcqe = NULL; 15841787eb7bSYongseok Koh uint32_t rss_hash_res = 0; 1585e4c2a16eSMatan Azrad uint8_t lro_num_seg; 15867d6bf6b8SYongseok Koh 15871787eb7bSYongseok Koh if (consumed_strd == strd_n) { 15887d6bf6b8SYongseok Koh /* Replace WQE only if the buffer is still in use. */ 15897d6bf6b8SYongseok Koh if (rte_atomic16_read(&buf->refcnt) > 1) { 15903a22f387SMatan Azrad mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n); 15917d6bf6b8SYongseok Koh /* Release the old buffer. */ 15927d6bf6b8SYongseok Koh mlx5_mprq_buf_free(buf); 15937d6bf6b8SYongseok Koh } else if (unlikely(rxq->mprq_repl == NULL)) { 15947d6bf6b8SYongseok Koh struct mlx5_mprq_buf *rep; 15957d6bf6b8SYongseok Koh 15967d6bf6b8SYongseok Koh /* 15977d6bf6b8SYongseok Koh * Currently, the MPRQ mempool is out of buffer 15987d6bf6b8SYongseok Koh * and doing memcpy regardless of the size of Rx 15997d6bf6b8SYongseok Koh * packet. Retry allocation to get back to 16007d6bf6b8SYongseok Koh * normal. 16017d6bf6b8SYongseok Koh */ 16027d6bf6b8SYongseok Koh if (!rte_mempool_get(rxq->mprq_mp, 16037d6bf6b8SYongseok Koh (void **)&rep)) 16047d6bf6b8SYongseok Koh rxq->mprq_repl = rep; 16057d6bf6b8SYongseok Koh } 16067d6bf6b8SYongseok Koh /* Advance to the next WQE. */ 16071787eb7bSYongseok Koh consumed_strd = 0; 16087d6bf6b8SYongseok Koh ++rq_ci; 16097d6bf6b8SYongseok Koh buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; 16107d6bf6b8SYongseok Koh } 16117d6bf6b8SYongseok Koh cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; 16122e633f1fSYongseok Koh ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); 16137d6bf6b8SYongseok Koh if (!ret) 16147d6bf6b8SYongseok Koh break; 16157d6bf6b8SYongseok Koh byte_cnt = ret; 16161787eb7bSYongseok Koh strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> 16177d6bf6b8SYongseok Koh MLX5_MPRQ_STRIDE_NUM_SHIFT; 16181787eb7bSYongseok Koh assert(strd_cnt); 16191787eb7bSYongseok Koh consumed_strd += strd_cnt; 16207d6bf6b8SYongseok Koh if (byte_cnt & MLX5_MPRQ_FILLER_MASK) 16217d6bf6b8SYongseok Koh continue; 16221787eb7bSYongseok Koh if (mcqe == NULL) { 16231787eb7bSYongseok Koh rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res); 16241787eb7bSYongseok Koh strd_idx = rte_be_to_cpu_16(cqe->wqe_counter); 16251787eb7bSYongseok Koh } else { 16261787eb7bSYongseok Koh /* mini-CQE for MPRQ doesn't have hash result. */ 16271787eb7bSYongseok Koh strd_idx = rte_be_to_cpu_16(mcqe->stride_idx); 16281787eb7bSYongseok Koh } 16291787eb7bSYongseok Koh assert(strd_idx < strd_n); 16301787eb7bSYongseok Koh assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask)); 1631e4c2a16eSMatan Azrad lro_num_seg = cqe->lro_num_seg; 16327d6bf6b8SYongseok Koh /* 16337d6bf6b8SYongseok Koh * Currently configured to receive a packet per a stride. But if 16347d6bf6b8SYongseok Koh * MTU is adjusted through kernel interface, device could 16357d6bf6b8SYongseok Koh * consume multiple strides without raising an error. In this 16367d6bf6b8SYongseok Koh * case, the packet should be dropped because it is bigger than 16377d6bf6b8SYongseok Koh * the max_rx_pkt_len. 16387d6bf6b8SYongseok Koh */ 1639e4c2a16eSMatan Azrad if (unlikely(!lro_num_seg && strd_cnt > 1)) { 16407d6bf6b8SYongseok Koh ++rxq->stats.idropped; 16417d6bf6b8SYongseok Koh continue; 16427d6bf6b8SYongseok Koh } 16437d6bf6b8SYongseok Koh pkt = rte_pktmbuf_alloc(rxq->mp); 16447d6bf6b8SYongseok Koh if (unlikely(pkt == NULL)) { 16457d6bf6b8SYongseok Koh ++rxq->stats.rx_nombuf; 16467d6bf6b8SYongseok Koh break; 16477d6bf6b8SYongseok Koh } 16487d6bf6b8SYongseok Koh len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; 16497d6bf6b8SYongseok Koh assert((int)len >= (rxq->crc_present << 2)); 16507d6bf6b8SYongseok Koh if (rxq->crc_present) 165135b2d13fSOlivier Matz len -= RTE_ETHER_CRC_LEN; 16521787eb7bSYongseok Koh offset = strd_idx * strd_sz + strd_shift; 16533a22f387SMatan Azrad addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); 16547d6bf6b8SYongseok Koh /* 16557d6bf6b8SYongseok Koh * Memcpy packets to the target mbuf if: 16567d6bf6b8SYongseok Koh * - The size of packet is smaller than mprq_max_memcpy_len. 16577d6bf6b8SYongseok Koh * - Out of buffer in the Mempool for Multi-Packet RQ. 16587d6bf6b8SYongseok Koh */ 16597d6bf6b8SYongseok Koh if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) { 16607d6bf6b8SYongseok Koh /* 16617d6bf6b8SYongseok Koh * When memcpy'ing packet due to out-of-buffer, the 16627d6bf6b8SYongseok Koh * packet must be smaller than the target mbuf. 16637d6bf6b8SYongseok Koh */ 16647d6bf6b8SYongseok Koh if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) { 16657d6bf6b8SYongseok Koh rte_pktmbuf_free_seg(pkt); 16667d6bf6b8SYongseok Koh ++rxq->stats.idropped; 16677d6bf6b8SYongseok Koh continue; 16687d6bf6b8SYongseok Koh } 16697d6bf6b8SYongseok Koh rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len); 1670be39124eSMatan Azrad DATA_LEN(pkt) = len; 16717d6bf6b8SYongseok Koh } else { 16727d6bf6b8SYongseok Koh rte_iova_t buf_iova; 16737d6bf6b8SYongseok Koh struct rte_mbuf_ext_shared_info *shinfo; 16741787eb7bSYongseok Koh uint16_t buf_len = strd_cnt * strd_sz; 1675e4c2a16eSMatan Azrad void *buf_addr; 16767d6bf6b8SYongseok Koh 16777d6bf6b8SYongseok Koh /* Increment the refcnt of the whole chunk. */ 16787d6bf6b8SYongseok Koh rte_atomic16_add_return(&buf->refcnt, 1); 16797d6bf6b8SYongseok Koh assert((uint16_t)rte_atomic16_read(&buf->refcnt) <= 16807d6bf6b8SYongseok Koh strd_n + 1); 1681a496e093SMatan Azrad buf_addr = RTE_PTR_SUB(addr, headroom_sz); 16827d6bf6b8SYongseok Koh /* 16837d6bf6b8SYongseok Koh * MLX5 device doesn't use iova but it is necessary in a 16847d6bf6b8SYongseok Koh * case where the Rx packet is transmitted via a 16857d6bf6b8SYongseok Koh * different PMD. 16867d6bf6b8SYongseok Koh */ 16877d6bf6b8SYongseok Koh buf_iova = rte_mempool_virt2iova(buf) + 1688e4c2a16eSMatan Azrad RTE_PTR_DIFF(buf_addr, buf); 16893a22f387SMatan Azrad shinfo = &buf->shinfos[strd_idx]; 16903a22f387SMatan Azrad rte_mbuf_ext_refcnt_set(shinfo, 1); 16917d6bf6b8SYongseok Koh /* 16927d6bf6b8SYongseok Koh * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when 16937d6bf6b8SYongseok Koh * attaching the stride to mbuf and more offload flags 16947d6bf6b8SYongseok Koh * will be added below by calling rxq_cq_to_mbuf(). 16957d6bf6b8SYongseok Koh * Other fields will be overwritten. 16967d6bf6b8SYongseok Koh */ 1697e4c2a16eSMatan Azrad rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, 1698e4c2a16eSMatan Azrad buf_len, shinfo); 1699a496e093SMatan Azrad /* Set mbuf head-room. */ 1700a496e093SMatan Azrad pkt->data_off = headroom_sz; 17017d6bf6b8SYongseok Koh assert(pkt->ol_flags == EXT_ATTACHED_MBUF); 17027d6bf6b8SYongseok Koh /* 17037d6bf6b8SYongseok Koh * Prevent potential overflow due to MTU change through 17047d6bf6b8SYongseok Koh * kernel interface. 17057d6bf6b8SYongseok Koh */ 17067d6bf6b8SYongseok Koh if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) { 17077d6bf6b8SYongseok Koh rte_pktmbuf_free_seg(pkt); 17087d6bf6b8SYongseok Koh ++rxq->stats.idropped; 17097d6bf6b8SYongseok Koh continue; 17107d6bf6b8SYongseok Koh } 1711be39124eSMatan Azrad DATA_LEN(pkt) = len; 1712be39124eSMatan Azrad /* 1713be39124eSMatan Azrad * LRO packet may consume all the stride memory, in this 1714be39124eSMatan Azrad * case packet head-room space is not guaranteed so must 1715be39124eSMatan Azrad * to add an empty mbuf for the head-room. 1716be39124eSMatan Azrad */ 1717be39124eSMatan Azrad if (!rxq->strd_headroom_en) { 1718be39124eSMatan Azrad struct rte_mbuf *headroom_mbuf = 1719be39124eSMatan Azrad rte_pktmbuf_alloc(rxq->mp); 1720be39124eSMatan Azrad 1721be39124eSMatan Azrad if (unlikely(headroom_mbuf == NULL)) { 1722be39124eSMatan Azrad rte_pktmbuf_free_seg(pkt); 1723be39124eSMatan Azrad ++rxq->stats.rx_nombuf; 1724be39124eSMatan Azrad break; 1725be39124eSMatan Azrad } 1726be39124eSMatan Azrad PORT(pkt) = rxq->port_id; 1727be39124eSMatan Azrad NEXT(headroom_mbuf) = pkt; 1728be39124eSMatan Azrad pkt = headroom_mbuf; 1729be39124eSMatan Azrad NB_SEGS(pkt) = 2; 1730be39124eSMatan Azrad } 17317d6bf6b8SYongseok Koh } 17327d6bf6b8SYongseok Koh rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); 1733e4c2a16eSMatan Azrad if (lro_num_seg > 1) { 1734e4c2a16eSMatan Azrad mlx5_lro_update_hdr(addr, cqe, len); 1735e4c2a16eSMatan Azrad pkt->ol_flags |= PKT_RX_LRO; 1736e4c2a16eSMatan Azrad pkt->tso_segsz = strd_sz; 1737e4c2a16eSMatan Azrad } 17387d6bf6b8SYongseok Koh PKT_LEN(pkt) = len; 17397d6bf6b8SYongseok Koh PORT(pkt) = rxq->port_id; 17407d6bf6b8SYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 17417d6bf6b8SYongseok Koh /* Increment bytes counter. */ 17427d6bf6b8SYongseok Koh rxq->stats.ibytes += PKT_LEN(pkt); 17437d6bf6b8SYongseok Koh #endif 17447d6bf6b8SYongseok Koh /* Return packet. */ 17457d6bf6b8SYongseok Koh *(pkts++) = pkt; 17467d6bf6b8SYongseok Koh ++i; 17477d6bf6b8SYongseok Koh } 17487d6bf6b8SYongseok Koh /* Update the consumer indexes. */ 17491787eb7bSYongseok Koh rxq->consumed_strd = consumed_strd; 17500cfdc180SYongseok Koh rte_cio_wmb(); 17517d6bf6b8SYongseok Koh *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 17527d6bf6b8SYongseok Koh if (rq_ci != rxq->rq_ci) { 17537d6bf6b8SYongseok Koh rxq->rq_ci = rq_ci; 17540cfdc180SYongseok Koh rte_cio_wmb(); 17557d6bf6b8SYongseok Koh *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 17567d6bf6b8SYongseok Koh } 17577d6bf6b8SYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 17587d6bf6b8SYongseok Koh /* Increment packets counter. */ 17597d6bf6b8SYongseok Koh rxq->stats.ipackets += i; 17607d6bf6b8SYongseok Koh #endif 17617d6bf6b8SYongseok Koh return i; 17627d6bf6b8SYongseok Koh } 17637d6bf6b8SYongseok Koh 17642e22920bSAdrien Mazarguil /** 17652e22920bSAdrien Mazarguil * Dummy DPDK callback for TX. 17662e22920bSAdrien Mazarguil * 17672e22920bSAdrien Mazarguil * This function is used to temporarily replace the real callback during 17682e22920bSAdrien Mazarguil * unsafe control operations on the queue, or in case of error. 17692e22920bSAdrien Mazarguil * 17702e22920bSAdrien Mazarguil * @param dpdk_txq 17712e22920bSAdrien Mazarguil * Generic pointer to TX queue structure. 17722e22920bSAdrien Mazarguil * @param[in] pkts 17732e22920bSAdrien Mazarguil * Packets to transmit. 17742e22920bSAdrien Mazarguil * @param pkts_n 17752e22920bSAdrien Mazarguil * Number of packets in array. 17762e22920bSAdrien Mazarguil * 17772e22920bSAdrien Mazarguil * @return 17782e22920bSAdrien Mazarguil * Number of packets successfully transmitted (<= pkts_n). 17792e22920bSAdrien Mazarguil */ 17802e22920bSAdrien Mazarguil uint16_t 178156f08e16SNélio Laranjeiro removed_tx_burst(void *dpdk_txq __rte_unused, 178256f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 178356f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 17842e22920bSAdrien Mazarguil { 17852aac5b5dSYongseok Koh rte_mb(); 17862e22920bSAdrien Mazarguil return 0; 17872e22920bSAdrien Mazarguil } 17882e22920bSAdrien Mazarguil 17892e22920bSAdrien Mazarguil /** 17902e22920bSAdrien Mazarguil * Dummy DPDK callback for RX. 17912e22920bSAdrien Mazarguil * 17922e22920bSAdrien Mazarguil * This function is used to temporarily replace the real callback during 17932e22920bSAdrien Mazarguil * unsafe control operations on the queue, or in case of error. 17942e22920bSAdrien Mazarguil * 17952e22920bSAdrien Mazarguil * @param dpdk_rxq 17962e22920bSAdrien Mazarguil * Generic pointer to RX queue structure. 17972e22920bSAdrien Mazarguil * @param[out] pkts 17982e22920bSAdrien Mazarguil * Array to store received packets. 17992e22920bSAdrien Mazarguil * @param pkts_n 18002e22920bSAdrien Mazarguil * Maximum number of packets in array. 18012e22920bSAdrien Mazarguil * 18022e22920bSAdrien Mazarguil * @return 18032e22920bSAdrien Mazarguil * Number of packets successfully received (<= pkts_n). 18042e22920bSAdrien Mazarguil */ 18052e22920bSAdrien Mazarguil uint16_t 180656f08e16SNélio Laranjeiro removed_rx_burst(void *dpdk_txq __rte_unused, 180756f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 180856f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 18092e22920bSAdrien Mazarguil { 18102aac5b5dSYongseok Koh rte_mb(); 18112e22920bSAdrien Mazarguil return 0; 18122e22920bSAdrien Mazarguil } 18136cb559d6SYongseok Koh 18146cb559d6SYongseok Koh /* 18156cb559d6SYongseok Koh * Vectorized Rx/Tx routines are not compiled in when required vector 18166cb559d6SYongseok Koh * instructions are not supported on a target architecture. The following null 18176cb559d6SYongseok Koh * stubs are needed for linkage when those are not included outside of this file 18186cb559d6SYongseok Koh * (e.g. mlx5_rxtx_vec_sse.c for x86). 18196cb559d6SYongseok Koh */ 18206cb559d6SYongseok Koh 182181bede55SKeith Wiles __rte_weak uint16_t 182256f08e16SNélio Laranjeiro mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, 182356f08e16SNélio Laranjeiro struct rte_mbuf **pkts __rte_unused, 182456f08e16SNélio Laranjeiro uint16_t pkts_n __rte_unused) 18256cb559d6SYongseok Koh { 18266cb559d6SYongseok Koh return 0; 18276cb559d6SYongseok Koh } 18286cb559d6SYongseok Koh 182981bede55SKeith Wiles __rte_weak int 1830af4f09f2SNélio Laranjeiro mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) 18316cb559d6SYongseok Koh { 18326cb559d6SYongseok Koh return -ENOTSUP; 18336cb559d6SYongseok Koh } 18346cb559d6SYongseok Koh 183581bede55SKeith Wiles __rte_weak int 1836af4f09f2SNélio Laranjeiro mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) 18376cb559d6SYongseok Koh { 18386cb559d6SYongseok Koh return -ENOTSUP; 18396cb559d6SYongseok Koh } 1840a6bd4911SViacheslav Ovsiienko 1841a6bd4911SViacheslav Ovsiienko /** 184218a1c200SViacheslav Ovsiienko * Free the mbufs from the linear array of pointers. 184318a1c200SViacheslav Ovsiienko * 184418a1c200SViacheslav Ovsiienko * @param pkts 184518a1c200SViacheslav Ovsiienko * Pointer to array of packets to be free. 184618a1c200SViacheslav Ovsiienko * @param pkts_n 184718a1c200SViacheslav Ovsiienko * Number of packets to be freed. 184818a1c200SViacheslav Ovsiienko * @param olx 184918a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 185018a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 185118a1c200SViacheslav Ovsiienko */ 185218a1c200SViacheslav Ovsiienko static __rte_always_inline void 185318a1c200SViacheslav Ovsiienko mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts, 185418a1c200SViacheslav Ovsiienko unsigned int pkts_n, 185518a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 185618a1c200SViacheslav Ovsiienko { 185718a1c200SViacheslav Ovsiienko struct rte_mempool *pool = NULL; 185818a1c200SViacheslav Ovsiienko struct rte_mbuf **p_free = NULL; 185918a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 186018a1c200SViacheslav Ovsiienko unsigned int n_free = 0; 186118a1c200SViacheslav Ovsiienko 186218a1c200SViacheslav Ovsiienko /* 186318a1c200SViacheslav Ovsiienko * The implemented algorithm eliminates 186418a1c200SViacheslav Ovsiienko * copying pointers to temporary array 186518a1c200SViacheslav Ovsiienko * for rte_mempool_put_bulk() calls. 186618a1c200SViacheslav Ovsiienko */ 186718a1c200SViacheslav Ovsiienko assert(pkts); 186818a1c200SViacheslav Ovsiienko assert(pkts_n); 186918a1c200SViacheslav Ovsiienko for (;;) { 187018a1c200SViacheslav Ovsiienko for (;;) { 187118a1c200SViacheslav Ovsiienko /* 187218a1c200SViacheslav Ovsiienko * Decrement mbuf reference counter, detach 187318a1c200SViacheslav Ovsiienko * indirect and external buffers if needed. 187418a1c200SViacheslav Ovsiienko */ 187518a1c200SViacheslav Ovsiienko mbuf = rte_pktmbuf_prefree_seg(*pkts); 187618a1c200SViacheslav Ovsiienko if (likely(mbuf != NULL)) { 187718a1c200SViacheslav Ovsiienko assert(mbuf == *pkts); 187818a1c200SViacheslav Ovsiienko if (likely(n_free != 0)) { 187918a1c200SViacheslav Ovsiienko if (unlikely(pool != mbuf->pool)) 188018a1c200SViacheslav Ovsiienko /* From different pool. */ 188118a1c200SViacheslav Ovsiienko break; 188218a1c200SViacheslav Ovsiienko } else { 188318a1c200SViacheslav Ovsiienko /* Start new scan array. */ 188418a1c200SViacheslav Ovsiienko pool = mbuf->pool; 188518a1c200SViacheslav Ovsiienko p_free = pkts; 188618a1c200SViacheslav Ovsiienko } 188718a1c200SViacheslav Ovsiienko ++n_free; 188818a1c200SViacheslav Ovsiienko ++pkts; 188918a1c200SViacheslav Ovsiienko --pkts_n; 189018a1c200SViacheslav Ovsiienko if (unlikely(pkts_n == 0)) { 189118a1c200SViacheslav Ovsiienko mbuf = NULL; 189218a1c200SViacheslav Ovsiienko break; 189318a1c200SViacheslav Ovsiienko } 189418a1c200SViacheslav Ovsiienko } else { 189518a1c200SViacheslav Ovsiienko /* 189618a1c200SViacheslav Ovsiienko * This happens if mbuf is still referenced. 189718a1c200SViacheslav Ovsiienko * We can't put it back to the pool, skip. 189818a1c200SViacheslav Ovsiienko */ 189918a1c200SViacheslav Ovsiienko ++pkts; 190018a1c200SViacheslav Ovsiienko --pkts_n; 190118a1c200SViacheslav Ovsiienko if (unlikely(n_free != 0)) 190218a1c200SViacheslav Ovsiienko /* There is some array to free.*/ 190318a1c200SViacheslav Ovsiienko break; 190418a1c200SViacheslav Ovsiienko if (unlikely(pkts_n == 0)) 190518a1c200SViacheslav Ovsiienko /* Last mbuf, nothing to free. */ 190618a1c200SViacheslav Ovsiienko return; 190718a1c200SViacheslav Ovsiienko } 190818a1c200SViacheslav Ovsiienko } 190918a1c200SViacheslav Ovsiienko for (;;) { 191018a1c200SViacheslav Ovsiienko /* 191118a1c200SViacheslav Ovsiienko * This loop is implemented to avoid multiple 191218a1c200SViacheslav Ovsiienko * inlining of rte_mempool_put_bulk(). 191318a1c200SViacheslav Ovsiienko */ 191418a1c200SViacheslav Ovsiienko assert(pool); 191518a1c200SViacheslav Ovsiienko assert(p_free); 191618a1c200SViacheslav Ovsiienko assert(n_free); 191718a1c200SViacheslav Ovsiienko /* 191818a1c200SViacheslav Ovsiienko * Free the array of pre-freed mbufs 191918a1c200SViacheslav Ovsiienko * belonging to the same memory pool. 192018a1c200SViacheslav Ovsiienko */ 192118a1c200SViacheslav Ovsiienko rte_mempool_put_bulk(pool, (void *)p_free, n_free); 192218a1c200SViacheslav Ovsiienko if (unlikely(mbuf != NULL)) { 192318a1c200SViacheslav Ovsiienko /* There is the request to start new scan. */ 192418a1c200SViacheslav Ovsiienko pool = mbuf->pool; 192518a1c200SViacheslav Ovsiienko p_free = pkts++; 192618a1c200SViacheslav Ovsiienko n_free = 1; 192718a1c200SViacheslav Ovsiienko --pkts_n; 192818a1c200SViacheslav Ovsiienko if (likely(pkts_n != 0)) 192918a1c200SViacheslav Ovsiienko break; 193018a1c200SViacheslav Ovsiienko /* 193118a1c200SViacheslav Ovsiienko * This is the last mbuf to be freed. 193218a1c200SViacheslav Ovsiienko * Do one more loop iteration to complete. 193318a1c200SViacheslav Ovsiienko * This is rare case of the last unique mbuf. 193418a1c200SViacheslav Ovsiienko */ 193518a1c200SViacheslav Ovsiienko mbuf = NULL; 193618a1c200SViacheslav Ovsiienko continue; 193718a1c200SViacheslav Ovsiienko } 193818a1c200SViacheslav Ovsiienko if (likely(pkts_n == 0)) 193918a1c200SViacheslav Ovsiienko return; 194018a1c200SViacheslav Ovsiienko n_free = 0; 194118a1c200SViacheslav Ovsiienko break; 194218a1c200SViacheslav Ovsiienko } 194318a1c200SViacheslav Ovsiienko } 194418a1c200SViacheslav Ovsiienko } 194518a1c200SViacheslav Ovsiienko 194618a1c200SViacheslav Ovsiienko /** 194718a1c200SViacheslav Ovsiienko * Free the mbuf from the elts ring buffer till new tail. 194818a1c200SViacheslav Ovsiienko * 194918a1c200SViacheslav Ovsiienko * @param txq 195018a1c200SViacheslav Ovsiienko * Pointer to Tx queue structure. 195118a1c200SViacheslav Ovsiienko * @param tail 195218a1c200SViacheslav Ovsiienko * Index in elts to free up to, becomes new elts tail. 195318a1c200SViacheslav Ovsiienko * @param olx 195418a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 195518a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 195618a1c200SViacheslav Ovsiienko */ 195718a1c200SViacheslav Ovsiienko static __rte_always_inline void 195818a1c200SViacheslav Ovsiienko mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq, 195918a1c200SViacheslav Ovsiienko uint16_t tail, 196018a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 196118a1c200SViacheslav Ovsiienko { 196218a1c200SViacheslav Ovsiienko uint16_t n_elts = tail - txq->elts_tail; 196318a1c200SViacheslav Ovsiienko 196418a1c200SViacheslav Ovsiienko assert(n_elts); 196518a1c200SViacheslav Ovsiienko assert(n_elts <= txq->elts_s); 196618a1c200SViacheslav Ovsiienko /* 196718a1c200SViacheslav Ovsiienko * Implement a loop to support ring buffer wraparound 196818a1c200SViacheslav Ovsiienko * with single inlining of mlx5_tx_free_mbuf(). 196918a1c200SViacheslav Ovsiienko */ 197018a1c200SViacheslav Ovsiienko do { 197118a1c200SViacheslav Ovsiienko unsigned int part; 197218a1c200SViacheslav Ovsiienko 197318a1c200SViacheslav Ovsiienko part = txq->elts_s - (txq->elts_tail & txq->elts_m); 197418a1c200SViacheslav Ovsiienko part = RTE_MIN(part, n_elts); 197518a1c200SViacheslav Ovsiienko assert(part); 197618a1c200SViacheslav Ovsiienko assert(part <= txq->elts_s); 197718a1c200SViacheslav Ovsiienko mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m], 197818a1c200SViacheslav Ovsiienko part, olx); 197918a1c200SViacheslav Ovsiienko txq->elts_tail += part; 198018a1c200SViacheslav Ovsiienko n_elts -= part; 198118a1c200SViacheslav Ovsiienko } while (n_elts); 198218a1c200SViacheslav Ovsiienko } 198318a1c200SViacheslav Ovsiienko 198418a1c200SViacheslav Ovsiienko /** 198518a1c200SViacheslav Ovsiienko * Store the mbuf being sent into elts ring buffer. 198618a1c200SViacheslav Ovsiienko * On Tx completion these mbufs will be freed. 198718a1c200SViacheslav Ovsiienko * 198818a1c200SViacheslav Ovsiienko * @param txq 198918a1c200SViacheslav Ovsiienko * Pointer to Tx queue structure. 199018a1c200SViacheslav Ovsiienko * @param pkts 199118a1c200SViacheslav Ovsiienko * Pointer to array of packets to be stored. 199218a1c200SViacheslav Ovsiienko * @param pkts_n 199318a1c200SViacheslav Ovsiienko * Number of packets to be stored. 199418a1c200SViacheslav Ovsiienko * @param olx 199518a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 199618a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 199718a1c200SViacheslav Ovsiienko */ 199818a1c200SViacheslav Ovsiienko static __rte_always_inline void 199918a1c200SViacheslav Ovsiienko mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq, 200018a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 200118a1c200SViacheslav Ovsiienko unsigned int pkts_n, 200218a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 200318a1c200SViacheslav Ovsiienko { 200418a1c200SViacheslav Ovsiienko unsigned int part; 200518a1c200SViacheslav Ovsiienko struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts; 200618a1c200SViacheslav Ovsiienko 200718a1c200SViacheslav Ovsiienko assert(pkts); 200818a1c200SViacheslav Ovsiienko assert(pkts_n); 200918a1c200SViacheslav Ovsiienko part = txq->elts_s - (txq->elts_head & txq->elts_m); 201018a1c200SViacheslav Ovsiienko assert(part); 201118a1c200SViacheslav Ovsiienko assert(part <= txq->elts_s); 201218a1c200SViacheslav Ovsiienko /* This code is a good candidate for vectorizing with SIMD. */ 201318a1c200SViacheslav Ovsiienko rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)), 201418a1c200SViacheslav Ovsiienko (void *)pkts, 201518a1c200SViacheslav Ovsiienko RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *)); 201618a1c200SViacheslav Ovsiienko txq->elts_head += pkts_n; 201718a1c200SViacheslav Ovsiienko if (unlikely(part < pkts_n)) 201818a1c200SViacheslav Ovsiienko /* The copy is wrapping around the elts array. */ 201918a1c200SViacheslav Ovsiienko rte_memcpy((void *)elts, (void *)(pkts + part), 202018a1c200SViacheslav Ovsiienko (pkts_n - part) * sizeof(struct rte_mbuf *)); 202118a1c200SViacheslav Ovsiienko } 202218a1c200SViacheslav Ovsiienko 202318a1c200SViacheslav Ovsiienko /** 2024da1df1ccSViacheslav Ovsiienko * Update completion queue consuming index via doorbell 2025da1df1ccSViacheslav Ovsiienko * and flush the completed data buffers. 2026da1df1ccSViacheslav Ovsiienko * 2027da1df1ccSViacheslav Ovsiienko * @param txq 2028da1df1ccSViacheslav Ovsiienko * Pointer to TX queue structure. 2029da1df1ccSViacheslav Ovsiienko * @param valid CQE pointer 2030da1df1ccSViacheslav Ovsiienko * if not NULL update txq->wqe_pi and flush the buffers 2031da1df1ccSViacheslav Ovsiienko * @param itail 2032da1df1ccSViacheslav Ovsiienko * if not negative - flush the buffers till this index. 2033da1df1ccSViacheslav Ovsiienko * @param olx 2034da1df1ccSViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 2035da1df1ccSViacheslav Ovsiienko * compile time and may be used for optimization. 2036da1df1ccSViacheslav Ovsiienko */ 2037da1df1ccSViacheslav Ovsiienko static __rte_always_inline void 2038da1df1ccSViacheslav Ovsiienko mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq, 2039da1df1ccSViacheslav Ovsiienko volatile struct mlx5_cqe *last_cqe, 2040da1df1ccSViacheslav Ovsiienko int itail, 2041da1df1ccSViacheslav Ovsiienko unsigned int olx __rte_unused) 2042da1df1ccSViacheslav Ovsiienko { 2043da1df1ccSViacheslav Ovsiienko uint16_t tail; 2044da1df1ccSViacheslav Ovsiienko 2045da1df1ccSViacheslav Ovsiienko if (likely(last_cqe != NULL)) { 2046da1df1ccSViacheslav Ovsiienko txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter); 2047da1df1ccSViacheslav Ovsiienko tail = ((volatile struct mlx5_wqe_cseg *) 2048da1df1ccSViacheslav Ovsiienko (txq->wqes + (txq->wqe_pi & txq->wqe_m)))->misc; 2049da1df1ccSViacheslav Ovsiienko } else if (itail >= 0) { 2050da1df1ccSViacheslav Ovsiienko tail = (uint16_t)itail; 2051da1df1ccSViacheslav Ovsiienko } else { 2052da1df1ccSViacheslav Ovsiienko return; 2053da1df1ccSViacheslav Ovsiienko } 2054da1df1ccSViacheslav Ovsiienko rte_compiler_barrier(); 2055da1df1ccSViacheslav Ovsiienko *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci); 2056da1df1ccSViacheslav Ovsiienko if (likely(tail != txq->elts_tail)) { 2057da1df1ccSViacheslav Ovsiienko mlx5_tx_free_elts(txq, tail, olx); 2058da1df1ccSViacheslav Ovsiienko assert(tail == txq->elts_tail); 2059da1df1ccSViacheslav Ovsiienko } 2060da1df1ccSViacheslav Ovsiienko } 2061da1df1ccSViacheslav Ovsiienko 2062da1df1ccSViacheslav Ovsiienko /** 206318a1c200SViacheslav Ovsiienko * Manage TX completions. This routine checks the CQ for 206418a1c200SViacheslav Ovsiienko * arrived CQEs, deduces the last accomplished WQE in SQ, 206518a1c200SViacheslav Ovsiienko * updates SQ producing index and frees all completed mbufs. 206618a1c200SViacheslav Ovsiienko * 206718a1c200SViacheslav Ovsiienko * @param txq 206818a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 206918a1c200SViacheslav Ovsiienko * @param olx 207018a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 207118a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 207218a1c200SViacheslav Ovsiienko * 207318a1c200SViacheslav Ovsiienko * NOTE: not inlined intentionally, it makes tx_burst 207418a1c200SViacheslav Ovsiienko * routine smaller, simple and faster - from experiments. 207518a1c200SViacheslav Ovsiienko */ 207618a1c200SViacheslav Ovsiienko static void 207718a1c200SViacheslav Ovsiienko mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, 207818a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 207918a1c200SViacheslav Ovsiienko { 2080318ea4cfSViacheslav Ovsiienko unsigned int count = MLX5_TX_COMP_MAX_CQE; 2081da1df1ccSViacheslav Ovsiienko volatile struct mlx5_cqe *last_cqe = NULL; 208218a1c200SViacheslav Ovsiienko int ret; 208318a1c200SViacheslav Ovsiienko 2084da1df1ccSViacheslav Ovsiienko static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value"); 2085da1df1ccSViacheslav Ovsiienko static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value"); 208618a1c200SViacheslav Ovsiienko do { 208718a1c200SViacheslav Ovsiienko volatile struct mlx5_cqe *cqe; 208818a1c200SViacheslav Ovsiienko 208918a1c200SViacheslav Ovsiienko cqe = &txq->cqes[txq->cq_ci & txq->cqe_m]; 209018a1c200SViacheslav Ovsiienko ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci); 209118a1c200SViacheslav Ovsiienko if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { 209218a1c200SViacheslav Ovsiienko if (likely(ret != MLX5_CQE_STATUS_ERR)) { 209318a1c200SViacheslav Ovsiienko /* No new CQEs in completion queue. */ 209418a1c200SViacheslav Ovsiienko assert(ret == MLX5_CQE_STATUS_HW_OWN); 2095318ea4cfSViacheslav Ovsiienko break; 209618a1c200SViacheslav Ovsiienko } 2097da1df1ccSViacheslav Ovsiienko /* 2098da1df1ccSViacheslav Ovsiienko * Some error occurred, try to restart. 2099da1df1ccSViacheslav Ovsiienko * We have no barrier after WQE related Doorbell 2100da1df1ccSViacheslav Ovsiienko * written, make sure all writes are completed 2101da1df1ccSViacheslav Ovsiienko * here, before we might perform SQ reset. 2102da1df1ccSViacheslav Ovsiienko */ 210318a1c200SViacheslav Ovsiienko rte_wmb(); 2104da1df1ccSViacheslav Ovsiienko ret = mlx5_tx_error_cqe_handle 210518a1c200SViacheslav Ovsiienko (txq, (volatile struct mlx5_err_cqe *)cqe); 2106da1df1ccSViacheslav Ovsiienko /* 2107da1df1ccSViacheslav Ovsiienko * Flush buffers, update consuming index 2108da1df1ccSViacheslav Ovsiienko * if recovery succeeded. Otherwise 2109da1df1ccSViacheslav Ovsiienko * just try to recover later. 2110da1df1ccSViacheslav Ovsiienko */ 2111da1df1ccSViacheslav Ovsiienko last_cqe = NULL; 2112da1df1ccSViacheslav Ovsiienko break; 2113318ea4cfSViacheslav Ovsiienko } 211418a1c200SViacheslav Ovsiienko /* Normal transmit completion. */ 211518a1c200SViacheslav Ovsiienko ++txq->cq_ci; 2116da1df1ccSViacheslav Ovsiienko last_cqe = cqe; 211718a1c200SViacheslav Ovsiienko #ifndef NDEBUG 211818a1c200SViacheslav Ovsiienko if (txq->cq_pi) 211918a1c200SViacheslav Ovsiienko --txq->cq_pi; 212018a1c200SViacheslav Ovsiienko #endif 2121318ea4cfSViacheslav Ovsiienko /* 2122318ea4cfSViacheslav Ovsiienko * We have to restrict the amount of processed CQEs 2123318ea4cfSViacheslav Ovsiienko * in one tx_burst routine call. The CQ may be large 2124318ea4cfSViacheslav Ovsiienko * and many CQEs may be updated by the NIC in one 2125318ea4cfSViacheslav Ovsiienko * transaction. Buffers freeing is time consuming, 2126318ea4cfSViacheslav Ovsiienko * multiple iterations may introduce significant 2127318ea4cfSViacheslav Ovsiienko * latency. 2128318ea4cfSViacheslav Ovsiienko */ 2129318ea4cfSViacheslav Ovsiienko } while (--count); 2130da1df1ccSViacheslav Ovsiienko mlx5_tx_comp_flush(txq, last_cqe, ret, olx); 213118a1c200SViacheslav Ovsiienko } 213218a1c200SViacheslav Ovsiienko 213318a1c200SViacheslav Ovsiienko /** 213418a1c200SViacheslav Ovsiienko * Check if the completion request flag should be set in the last WQE. 213518a1c200SViacheslav Ovsiienko * Both pushed mbufs and WQEs are monitored and the completion request 213618a1c200SViacheslav Ovsiienko * flag is set if any of thresholds is reached. 213718a1c200SViacheslav Ovsiienko * 213818a1c200SViacheslav Ovsiienko * @param txq 213918a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 214018a1c200SViacheslav Ovsiienko * @param loc 214118a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 21424dec9c79SViacheslav Ovsiienko * @param multi, 21434dec9c79SViacheslav Ovsiienko * Routine is called from multi-segment sending loop, 21444dec9c79SViacheslav Ovsiienko * do not correct the elts_head according to the pkts_copy. 214518a1c200SViacheslav Ovsiienko * @param olx 214618a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 214718a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 214818a1c200SViacheslav Ovsiienko */ 214918a1c200SViacheslav Ovsiienko static __rte_always_inline void 215018a1c200SViacheslav Ovsiienko mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, 215118a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 21524dec9c79SViacheslav Ovsiienko bool multi, 21535a93e173SViacheslav Ovsiienko unsigned int olx) 215418a1c200SViacheslav Ovsiienko { 21555a93e173SViacheslav Ovsiienko uint16_t head = txq->elts_head; 21565a93e173SViacheslav Ovsiienko unsigned int part; 215718a1c200SViacheslav Ovsiienko 21584dec9c79SViacheslav Ovsiienko part = (MLX5_TXOFF_CONFIG(INLINE) || multi) ? 21594dec9c79SViacheslav Ovsiienko 0 : loc->pkts_sent - loc->pkts_copy; 21605a93e173SViacheslav Ovsiienko head += part; 216118a1c200SViacheslav Ovsiienko if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH || 21625a93e173SViacheslav Ovsiienko (MLX5_TXOFF_CONFIG(INLINE) && 21635a93e173SViacheslav Ovsiienko (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) { 216418a1c200SViacheslav Ovsiienko volatile struct mlx5_wqe *last = loc->wqe_last; 216518a1c200SViacheslav Ovsiienko 216618a1c200SViacheslav Ovsiienko txq->elts_comp = head; 21675a93e173SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(INLINE)) 216818a1c200SViacheslav Ovsiienko txq->wqe_comp = txq->wqe_ci; 216918a1c200SViacheslav Ovsiienko /* Request unconditional completion on last WQE. */ 217018a1c200SViacheslav Ovsiienko last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << 217118a1c200SViacheslav Ovsiienko MLX5_COMP_MODE_OFFSET); 217218a1c200SViacheslav Ovsiienko /* Save elts_head in unused "immediate" field of WQE. */ 217318a1c200SViacheslav Ovsiienko last->cseg.misc = head; 217418a1c200SViacheslav Ovsiienko /* 217518a1c200SViacheslav Ovsiienko * A CQE slot must always be available. Count the 217618a1c200SViacheslav Ovsiienko * issued CEQ "always" request instead of production 217718a1c200SViacheslav Ovsiienko * index due to here can be CQE with errors and 217818a1c200SViacheslav Ovsiienko * difference with ci may become inconsistent. 217918a1c200SViacheslav Ovsiienko */ 218018a1c200SViacheslav Ovsiienko assert(txq->cqe_s > ++txq->cq_pi); 218118a1c200SViacheslav Ovsiienko } 218218a1c200SViacheslav Ovsiienko } 218318a1c200SViacheslav Ovsiienko 218418a1c200SViacheslav Ovsiienko /** 2185a6bd4911SViacheslav Ovsiienko * DPDK callback to check the status of a tx descriptor. 2186a6bd4911SViacheslav Ovsiienko * 2187a6bd4911SViacheslav Ovsiienko * @param tx_queue 2188a6bd4911SViacheslav Ovsiienko * The tx queue. 2189a6bd4911SViacheslav Ovsiienko * @param[in] offset 2190a6bd4911SViacheslav Ovsiienko * The index of the descriptor in the ring. 2191a6bd4911SViacheslav Ovsiienko * 2192a6bd4911SViacheslav Ovsiienko * @return 2193a6bd4911SViacheslav Ovsiienko * The status of the tx descriptor. 2194a6bd4911SViacheslav Ovsiienko */ 2195a6bd4911SViacheslav Ovsiienko int 2196a6bd4911SViacheslav Ovsiienko mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) 2197a6bd4911SViacheslav Ovsiienko { 219818a1c200SViacheslav Ovsiienko struct mlx5_txq_data *restrict txq = tx_queue; 219918a1c200SViacheslav Ovsiienko uint16_t used; 220018a1c200SViacheslav Ovsiienko 220118a1c200SViacheslav Ovsiienko mlx5_tx_handle_completion(txq, 0); 220218a1c200SViacheslav Ovsiienko used = txq->elts_head - txq->elts_tail; 220318a1c200SViacheslav Ovsiienko if (offset < used) 2204a6bd4911SViacheslav Ovsiienko return RTE_ETH_TX_DESC_FULL; 220518a1c200SViacheslav Ovsiienko return RTE_ETH_TX_DESC_DONE; 220618a1c200SViacheslav Ovsiienko } 220718a1c200SViacheslav Ovsiienko 220818a1c200SViacheslav Ovsiienko /** 220918a1c200SViacheslav Ovsiienko * Build the Control Segment with specified opcode: 221018a1c200SViacheslav Ovsiienko * - MLX5_OPCODE_SEND 221118a1c200SViacheslav Ovsiienko * - MLX5_OPCODE_ENHANCED_MPSW 221218a1c200SViacheslav Ovsiienko * - MLX5_OPCODE_TSO 221318a1c200SViacheslav Ovsiienko * 221418a1c200SViacheslav Ovsiienko * @param txq 221518a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 221618a1c200SViacheslav Ovsiienko * @param loc 221718a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 221818a1c200SViacheslav Ovsiienko * @param wqe 221918a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Control Segment. 222018a1c200SViacheslav Ovsiienko * @param ds 222118a1c200SViacheslav Ovsiienko * Supposed length of WQE in segments. 222218a1c200SViacheslav Ovsiienko * @param opcode 222318a1c200SViacheslav Ovsiienko * SQ WQE opcode to put into Control Segment. 222418a1c200SViacheslav Ovsiienko * @param olx 222518a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 222618a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 222718a1c200SViacheslav Ovsiienko */ 222818a1c200SViacheslav Ovsiienko static __rte_always_inline void 222918a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq, 223018a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc __rte_unused, 223118a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 223218a1c200SViacheslav Ovsiienko unsigned int ds, 223318a1c200SViacheslav Ovsiienko unsigned int opcode, 223418a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 223518a1c200SViacheslav Ovsiienko { 223618a1c200SViacheslav Ovsiienko struct mlx5_wqe_cseg *restrict cs = &wqe->cseg; 223718a1c200SViacheslav Ovsiienko 223818a1c200SViacheslav Ovsiienko cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode); 223918a1c200SViacheslav Ovsiienko cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); 224018a1c200SViacheslav Ovsiienko cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << 224118a1c200SViacheslav Ovsiienko MLX5_COMP_MODE_OFFSET); 224218a1c200SViacheslav Ovsiienko cs->misc = RTE_BE32(0); 224318a1c200SViacheslav Ovsiienko } 224418a1c200SViacheslav Ovsiienko 224518a1c200SViacheslav Ovsiienko /** 224618a1c200SViacheslav Ovsiienko * Build the Ethernet Segment without inlined data. 224718a1c200SViacheslav Ovsiienko * Supports Software Parser, Checksums and VLAN 224818a1c200SViacheslav Ovsiienko * insertion Tx offload features. 224918a1c200SViacheslav Ovsiienko * 225018a1c200SViacheslav Ovsiienko * @param txq 225118a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 225218a1c200SViacheslav Ovsiienko * @param loc 225318a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 225418a1c200SViacheslav Ovsiienko * @param wqe 225518a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet Segment. 225618a1c200SViacheslav Ovsiienko * @param olx 225718a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 225818a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 225918a1c200SViacheslav Ovsiienko */ 226018a1c200SViacheslav Ovsiienko static __rte_always_inline void 226118a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused, 226218a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 226318a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 226418a1c200SViacheslav Ovsiienko unsigned int olx) 226518a1c200SViacheslav Ovsiienko { 226618a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es = &wqe->eseg; 226718a1c200SViacheslav Ovsiienko uint32_t csum; 226818a1c200SViacheslav Ovsiienko 226918a1c200SViacheslav Ovsiienko /* 227018a1c200SViacheslav Ovsiienko * Calculate and set check sum flags first, dword field 227118a1c200SViacheslav Ovsiienko * in segment may be shared with Software Parser flags. 227218a1c200SViacheslav Ovsiienko */ 227318a1c200SViacheslav Ovsiienko csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; 227418a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_le_32(csum); 227518a1c200SViacheslav Ovsiienko /* 227618a1c200SViacheslav Ovsiienko * Calculate and set Software Parser offsets and flags. 227718a1c200SViacheslav Ovsiienko * These flags a set for custom UDP and IP tunnel packets. 227818a1c200SViacheslav Ovsiienko */ 227918a1c200SViacheslav Ovsiienko es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); 228018a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 228118a1c200SViacheslav Ovsiienko es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? 228218a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_METADATA ? 228318a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0 : 0; 228418a1c200SViacheslav Ovsiienko /* Engage VLAN tag insertion feature if requested. */ 228518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 228618a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { 228718a1c200SViacheslav Ovsiienko /* 228818a1c200SViacheslav Ovsiienko * We should get here only if device support 228918a1c200SViacheslav Ovsiienko * this feature correctly. 229018a1c200SViacheslav Ovsiienko */ 229118a1c200SViacheslav Ovsiienko assert(txq->vlan_en); 229218a1c200SViacheslav Ovsiienko es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT | 229318a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 229418a1c200SViacheslav Ovsiienko } else { 229518a1c200SViacheslav Ovsiienko es->inline_hdr = RTE_BE32(0); 229618a1c200SViacheslav Ovsiienko } 229718a1c200SViacheslav Ovsiienko } 229818a1c200SViacheslav Ovsiienko 229918a1c200SViacheslav Ovsiienko /** 230018a1c200SViacheslav Ovsiienko * Build the Ethernet Segment with minimal inlined data 230118a1c200SViacheslav Ovsiienko * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is 230218a1c200SViacheslav Ovsiienko * used to fill the gap in single WQEBB WQEs. 230318a1c200SViacheslav Ovsiienko * Supports Software Parser, Checksums and VLAN 230418a1c200SViacheslav Ovsiienko * insertion Tx offload features. 230518a1c200SViacheslav Ovsiienko * 230618a1c200SViacheslav Ovsiienko * @param txq 230718a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 230818a1c200SViacheslav Ovsiienko * @param loc 230918a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 231018a1c200SViacheslav Ovsiienko * @param wqe 231118a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet Segment. 231218a1c200SViacheslav Ovsiienko * @param vlan 231318a1c200SViacheslav Ovsiienko * Length of VLAN tag insertion if any. 231418a1c200SViacheslav Ovsiienko * @param olx 231518a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 231618a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 231718a1c200SViacheslav Ovsiienko */ 231818a1c200SViacheslav Ovsiienko static __rte_always_inline void 231918a1c200SViacheslav Ovsiienko mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused, 232018a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 232118a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 232218a1c200SViacheslav Ovsiienko unsigned int vlan, 232318a1c200SViacheslav Ovsiienko unsigned int olx) 232418a1c200SViacheslav Ovsiienko { 232518a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es = &wqe->eseg; 232618a1c200SViacheslav Ovsiienko uint32_t csum; 232718a1c200SViacheslav Ovsiienko uint8_t *psrc, *pdst; 232818a1c200SViacheslav Ovsiienko 232918a1c200SViacheslav Ovsiienko /* 233018a1c200SViacheslav Ovsiienko * Calculate and set check sum flags first, dword field 233118a1c200SViacheslav Ovsiienko * in segment may be shared with Software Parser flags. 233218a1c200SViacheslav Ovsiienko */ 233318a1c200SViacheslav Ovsiienko csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; 233418a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_le_32(csum); 233518a1c200SViacheslav Ovsiienko /* 233618a1c200SViacheslav Ovsiienko * Calculate and set Software Parser offsets and flags. 233718a1c200SViacheslav Ovsiienko * These flags a set for custom UDP and IP tunnel packets. 233818a1c200SViacheslav Ovsiienko */ 233918a1c200SViacheslav Ovsiienko es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); 234018a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 234118a1c200SViacheslav Ovsiienko es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? 234218a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_METADATA ? 234318a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0 : 0; 234418a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 234518a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 234618a1c200SViacheslav Ovsiienko sizeof(rte_v128u32_t)), 234718a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 234818a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 234918a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 235018a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr) + 235118a1c200SViacheslav Ovsiienko 2 * RTE_ETHER_ADDR_LEN), 235218a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 235318a1c200SViacheslav Ovsiienko psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); 235418a1c200SViacheslav Ovsiienko es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE); 235518a1c200SViacheslav Ovsiienko es->inline_data = *(unaligned_uint16_t *)psrc; 235618a1c200SViacheslav Ovsiienko psrc += sizeof(uint16_t); 235718a1c200SViacheslav Ovsiienko pdst = (uint8_t *)(es + 1); 235818a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { 235918a1c200SViacheslav Ovsiienko /* Implement VLAN tag insertion as part inline data. */ 236018a1c200SViacheslav Ovsiienko memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t)); 236118a1c200SViacheslav Ovsiienko pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); 236218a1c200SViacheslav Ovsiienko psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); 236318a1c200SViacheslav Ovsiienko /* Insert VLAN ethertype + VLAN tag. */ 236418a1c200SViacheslav Ovsiienko *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 236518a1c200SViacheslav Ovsiienko ((RTE_ETHER_TYPE_VLAN << 16) | 236618a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 236718a1c200SViacheslav Ovsiienko pdst += sizeof(struct rte_vlan_hdr); 236818a1c200SViacheslav Ovsiienko /* Copy the rest two bytes from packet data. */ 236918a1c200SViacheslav Ovsiienko assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); 237018a1c200SViacheslav Ovsiienko *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc; 237118a1c200SViacheslav Ovsiienko } else { 237218a1c200SViacheslav Ovsiienko /* Fill the gap in the title WQEBB with inline data. */ 237318a1c200SViacheslav Ovsiienko rte_mov16(pdst, psrc); 237418a1c200SViacheslav Ovsiienko } 237518a1c200SViacheslav Ovsiienko } 237618a1c200SViacheslav Ovsiienko 237718a1c200SViacheslav Ovsiienko /** 237818a1c200SViacheslav Ovsiienko * Build the Ethernet Segment with entire packet 237918a1c200SViacheslav Ovsiienko * data inlining. Checks the boundary of WQEBB and 238018a1c200SViacheslav Ovsiienko * ring buffer wrapping, supports Software Parser, 238118a1c200SViacheslav Ovsiienko * Checksums and VLAN insertion Tx offload features. 238218a1c200SViacheslav Ovsiienko * 238318a1c200SViacheslav Ovsiienko * @param txq 238418a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 238518a1c200SViacheslav Ovsiienko * @param loc 238618a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 238718a1c200SViacheslav Ovsiienko * @param wqe 238818a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet Segment. 238918a1c200SViacheslav Ovsiienko * @param vlan 239018a1c200SViacheslav Ovsiienko * Length of VLAN tag insertion if any. 239118a1c200SViacheslav Ovsiienko * @param inlen 239218a1c200SViacheslav Ovsiienko * Length of data to inline (VLAN included, if any). 239318a1c200SViacheslav Ovsiienko * @param tso 239418a1c200SViacheslav Ovsiienko * TSO flag, set mss field from the packet. 239518a1c200SViacheslav Ovsiienko * @param olx 239618a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 239718a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 239818a1c200SViacheslav Ovsiienko * 239918a1c200SViacheslav Ovsiienko * @return 240018a1c200SViacheslav Ovsiienko * Pointer to the next Data Segment (aligned and wrapped around). 240118a1c200SViacheslav Ovsiienko */ 240218a1c200SViacheslav Ovsiienko static __rte_always_inline struct mlx5_wqe_dseg * 240318a1c200SViacheslav Ovsiienko mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq, 240418a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 240518a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 240618a1c200SViacheslav Ovsiienko unsigned int vlan, 240718a1c200SViacheslav Ovsiienko unsigned int inlen, 240818a1c200SViacheslav Ovsiienko unsigned int tso, 240918a1c200SViacheslav Ovsiienko unsigned int olx) 241018a1c200SViacheslav Ovsiienko { 241118a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es = &wqe->eseg; 241218a1c200SViacheslav Ovsiienko uint32_t csum; 241318a1c200SViacheslav Ovsiienko uint8_t *psrc, *pdst; 241418a1c200SViacheslav Ovsiienko unsigned int part; 241518a1c200SViacheslav Ovsiienko 241618a1c200SViacheslav Ovsiienko /* 241718a1c200SViacheslav Ovsiienko * Calculate and set check sum flags first, dword field 241818a1c200SViacheslav Ovsiienko * in segment may be shared with Software Parser flags. 241918a1c200SViacheslav Ovsiienko */ 242018a1c200SViacheslav Ovsiienko csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; 242118a1c200SViacheslav Ovsiienko if (tso) { 242218a1c200SViacheslav Ovsiienko csum <<= 24; 242318a1c200SViacheslav Ovsiienko csum |= loc->mbuf->tso_segsz; 242418a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_be_32(csum); 242518a1c200SViacheslav Ovsiienko } else { 242618a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_le_32(csum); 242718a1c200SViacheslav Ovsiienko } 242818a1c200SViacheslav Ovsiienko /* 242918a1c200SViacheslav Ovsiienko * Calculate and set Software Parser offsets and flags. 243018a1c200SViacheslav Ovsiienko * These flags a set for custom UDP and IP tunnel packets. 243118a1c200SViacheslav Ovsiienko */ 243218a1c200SViacheslav Ovsiienko es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); 243318a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 243418a1c200SViacheslav Ovsiienko es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? 243518a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_METADATA ? 243618a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0 : 0; 243718a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 243818a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 243918a1c200SViacheslav Ovsiienko sizeof(rte_v128u32_t)), 244018a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 244118a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 244218a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 244318a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr) + 244418a1c200SViacheslav Ovsiienko 2 * RTE_ETHER_ADDR_LEN), 244518a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 244618a1c200SViacheslav Ovsiienko psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); 244718a1c200SViacheslav Ovsiienko es->inline_hdr_sz = rte_cpu_to_be_16(inlen); 244818a1c200SViacheslav Ovsiienko es->inline_data = *(unaligned_uint16_t *)psrc; 244918a1c200SViacheslav Ovsiienko psrc += sizeof(uint16_t); 245018a1c200SViacheslav Ovsiienko pdst = (uint8_t *)(es + 1); 245118a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { 245218a1c200SViacheslav Ovsiienko /* Implement VLAN tag insertion as part inline data. */ 245318a1c200SViacheslav Ovsiienko memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t)); 245418a1c200SViacheslav Ovsiienko pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); 245518a1c200SViacheslav Ovsiienko psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); 245618a1c200SViacheslav Ovsiienko /* Insert VLAN ethertype + VLAN tag. */ 245718a1c200SViacheslav Ovsiienko *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 245818a1c200SViacheslav Ovsiienko ((RTE_ETHER_TYPE_VLAN << 16) | 245918a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 246018a1c200SViacheslav Ovsiienko pdst += sizeof(struct rte_vlan_hdr); 246118a1c200SViacheslav Ovsiienko /* Copy the rest two bytes from packet data. */ 246218a1c200SViacheslav Ovsiienko assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); 246318a1c200SViacheslav Ovsiienko *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc; 246418a1c200SViacheslav Ovsiienko psrc += sizeof(uint16_t); 246518a1c200SViacheslav Ovsiienko } else { 246618a1c200SViacheslav Ovsiienko /* Fill the gap in the title WQEBB with inline data. */ 246718a1c200SViacheslav Ovsiienko rte_mov16(pdst, psrc); 246818a1c200SViacheslav Ovsiienko psrc += sizeof(rte_v128u32_t); 246918a1c200SViacheslav Ovsiienko } 247018a1c200SViacheslav Ovsiienko pdst = (uint8_t *)(es + 2); 247118a1c200SViacheslav Ovsiienko assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); 247218a1c200SViacheslav Ovsiienko assert(pdst < (uint8_t *)txq->wqes_end); 247318a1c200SViacheslav Ovsiienko inlen -= MLX5_ESEG_MIN_INLINE_SIZE; 247418a1c200SViacheslav Ovsiienko if (!inlen) { 247518a1c200SViacheslav Ovsiienko assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); 247618a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 247718a1c200SViacheslav Ovsiienko } 247818a1c200SViacheslav Ovsiienko /* 247918a1c200SViacheslav Ovsiienko * The WQEBB space availability is checked by caller. 248018a1c200SViacheslav Ovsiienko * Here we should be aware of WQE ring buffer wraparound only. 248118a1c200SViacheslav Ovsiienko */ 248218a1c200SViacheslav Ovsiienko part = (uint8_t *)txq->wqes_end - pdst; 248318a1c200SViacheslav Ovsiienko part = RTE_MIN(part, inlen); 248418a1c200SViacheslav Ovsiienko do { 248518a1c200SViacheslav Ovsiienko rte_memcpy(pdst, psrc, part); 248618a1c200SViacheslav Ovsiienko inlen -= part; 248718a1c200SViacheslav Ovsiienko if (likely(!inlen)) { 248818a1c200SViacheslav Ovsiienko /* 248918a1c200SViacheslav Ovsiienko * If return value is not used by the caller 249018a1c200SViacheslav Ovsiienko * the code below will be optimized out. 249118a1c200SViacheslav Ovsiienko */ 249218a1c200SViacheslav Ovsiienko pdst += part; 249318a1c200SViacheslav Ovsiienko pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); 249418a1c200SViacheslav Ovsiienko if (unlikely(pdst >= (uint8_t *)txq->wqes_end)) 249518a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 249618a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 249718a1c200SViacheslav Ovsiienko } 249818a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 249918a1c200SViacheslav Ovsiienko psrc += part; 250018a1c200SViacheslav Ovsiienko part = inlen; 250118a1c200SViacheslav Ovsiienko } while (true); 250218a1c200SViacheslav Ovsiienko } 250318a1c200SViacheslav Ovsiienko 250418a1c200SViacheslav Ovsiienko /** 250518a1c200SViacheslav Ovsiienko * Copy data from chain of mbuf to the specified linear buffer. 250618a1c200SViacheslav Ovsiienko * Checksums and VLAN insertion Tx offload features. If data 250718a1c200SViacheslav Ovsiienko * from some mbuf copied completely this mbuf is freed. Local 250818a1c200SViacheslav Ovsiienko * structure is used to keep the byte stream state. 250918a1c200SViacheslav Ovsiienko * 251018a1c200SViacheslav Ovsiienko * @param pdst 251118a1c200SViacheslav Ovsiienko * Pointer to the destination linear buffer. 251218a1c200SViacheslav Ovsiienko * @param loc 251318a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 251418a1c200SViacheslav Ovsiienko * @param len 251518a1c200SViacheslav Ovsiienko * Length of data to be copied. 251618a1c200SViacheslav Ovsiienko * @param olx 251718a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 251818a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 251918a1c200SViacheslav Ovsiienko */ 252018a1c200SViacheslav Ovsiienko static __rte_always_inline void 252118a1c200SViacheslav Ovsiienko mlx5_tx_mseg_memcpy(uint8_t *pdst, 252218a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 252318a1c200SViacheslav Ovsiienko unsigned int len, 252418a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 252518a1c200SViacheslav Ovsiienko { 252618a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 252718a1c200SViacheslav Ovsiienko unsigned int part, dlen; 252818a1c200SViacheslav Ovsiienko uint8_t *psrc; 252918a1c200SViacheslav Ovsiienko 253018a1c200SViacheslav Ovsiienko assert(len); 253118a1c200SViacheslav Ovsiienko do { 253218a1c200SViacheslav Ovsiienko /* Allow zero length packets, must check first. */ 253318a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_data_len(loc->mbuf); 253418a1c200SViacheslav Ovsiienko if (dlen <= loc->mbuf_off) { 253518a1c200SViacheslav Ovsiienko /* Exhausted packet, just free. */ 253618a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 253718a1c200SViacheslav Ovsiienko loc->mbuf = mbuf->next; 253818a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(mbuf); 253918a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 254018a1c200SViacheslav Ovsiienko assert(loc->mbuf_nseg > 1); 254118a1c200SViacheslav Ovsiienko assert(loc->mbuf); 254218a1c200SViacheslav Ovsiienko --loc->mbuf_nseg; 254318a1c200SViacheslav Ovsiienko continue; 254418a1c200SViacheslav Ovsiienko } 254518a1c200SViacheslav Ovsiienko dlen -= loc->mbuf_off; 254618a1c200SViacheslav Ovsiienko psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *, 254718a1c200SViacheslav Ovsiienko loc->mbuf_off); 254818a1c200SViacheslav Ovsiienko part = RTE_MIN(len, dlen); 254918a1c200SViacheslav Ovsiienko rte_memcpy(pdst, psrc, part); 255018a1c200SViacheslav Ovsiienko loc->mbuf_off += part; 255118a1c200SViacheslav Ovsiienko len -= part; 255218a1c200SViacheslav Ovsiienko if (!len) { 255318a1c200SViacheslav Ovsiienko if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) { 255418a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 255518a1c200SViacheslav Ovsiienko /* Exhausted packet, just free. */ 255618a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 255718a1c200SViacheslav Ovsiienko loc->mbuf = mbuf->next; 255818a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(mbuf); 255918a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 256018a1c200SViacheslav Ovsiienko assert(loc->mbuf_nseg >= 1); 256118a1c200SViacheslav Ovsiienko --loc->mbuf_nseg; 256218a1c200SViacheslav Ovsiienko } 256318a1c200SViacheslav Ovsiienko return; 256418a1c200SViacheslav Ovsiienko } 256518a1c200SViacheslav Ovsiienko pdst += part; 256618a1c200SViacheslav Ovsiienko } while (true); 256718a1c200SViacheslav Ovsiienko } 256818a1c200SViacheslav Ovsiienko 256918a1c200SViacheslav Ovsiienko /** 257018a1c200SViacheslav Ovsiienko * Build the Ethernet Segment with inlined data from 257118a1c200SViacheslav Ovsiienko * multi-segment packet. Checks the boundary of WQEBB 257218a1c200SViacheslav Ovsiienko * and ring buffer wrapping, supports Software Parser, 257318a1c200SViacheslav Ovsiienko * Checksums and VLAN insertion Tx offload features. 257418a1c200SViacheslav Ovsiienko * 257518a1c200SViacheslav Ovsiienko * @param txq 257618a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 257718a1c200SViacheslav Ovsiienko * @param loc 257818a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 257918a1c200SViacheslav Ovsiienko * @param wqe 258018a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet Segment. 258118a1c200SViacheslav Ovsiienko * @param vlan 258218a1c200SViacheslav Ovsiienko * Length of VLAN tag insertion if any. 258318a1c200SViacheslav Ovsiienko * @param inlen 258418a1c200SViacheslav Ovsiienko * Length of data to inline (VLAN included, if any). 258518a1c200SViacheslav Ovsiienko * @param tso 258618a1c200SViacheslav Ovsiienko * TSO flag, set mss field from the packet. 258718a1c200SViacheslav Ovsiienko * @param olx 258818a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 258918a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 259018a1c200SViacheslav Ovsiienko * 259118a1c200SViacheslav Ovsiienko * @return 259218a1c200SViacheslav Ovsiienko * Pointer to the next Data Segment (aligned and 259318a1c200SViacheslav Ovsiienko * possible NOT wrapped around - caller should do 259418a1c200SViacheslav Ovsiienko * wrapping check on its own). 259518a1c200SViacheslav Ovsiienko */ 259618a1c200SViacheslav Ovsiienko static __rte_always_inline struct mlx5_wqe_dseg * 259718a1c200SViacheslav Ovsiienko mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq, 259818a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 259918a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 260018a1c200SViacheslav Ovsiienko unsigned int vlan, 260118a1c200SViacheslav Ovsiienko unsigned int inlen, 260218a1c200SViacheslav Ovsiienko unsigned int tso, 260318a1c200SViacheslav Ovsiienko unsigned int olx) 260418a1c200SViacheslav Ovsiienko { 260518a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es = &wqe->eseg; 260618a1c200SViacheslav Ovsiienko uint32_t csum; 260718a1c200SViacheslav Ovsiienko uint8_t *pdst; 260818a1c200SViacheslav Ovsiienko unsigned int part; 260918a1c200SViacheslav Ovsiienko 261018a1c200SViacheslav Ovsiienko /* 261118a1c200SViacheslav Ovsiienko * Calculate and set check sum flags first, uint32_t field 261218a1c200SViacheslav Ovsiienko * in segment may be shared with Software Parser flags. 261318a1c200SViacheslav Ovsiienko */ 261418a1c200SViacheslav Ovsiienko csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; 261518a1c200SViacheslav Ovsiienko if (tso) { 261618a1c200SViacheslav Ovsiienko csum <<= 24; 261718a1c200SViacheslav Ovsiienko csum |= loc->mbuf->tso_segsz; 261818a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_be_32(csum); 261918a1c200SViacheslav Ovsiienko } else { 262018a1c200SViacheslav Ovsiienko es->flags = rte_cpu_to_le_32(csum); 262118a1c200SViacheslav Ovsiienko } 262218a1c200SViacheslav Ovsiienko /* 262318a1c200SViacheslav Ovsiienko * Calculate and set Software Parser offsets and flags. 262418a1c200SViacheslav Ovsiienko * These flags a set for custom UDP and IP tunnel packets. 262518a1c200SViacheslav Ovsiienko */ 262618a1c200SViacheslav Ovsiienko es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); 262718a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 262818a1c200SViacheslav Ovsiienko es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? 262918a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_METADATA ? 263018a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0 : 0; 263118a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 263218a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 263318a1c200SViacheslav Ovsiienko sizeof(rte_v128u32_t)), 263418a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 263518a1c200SViacheslav Ovsiienko static_assert(MLX5_ESEG_MIN_INLINE_SIZE == 263618a1c200SViacheslav Ovsiienko (sizeof(uint16_t) + 263718a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr) + 263818a1c200SViacheslav Ovsiienko 2 * RTE_ETHER_ADDR_LEN), 263918a1c200SViacheslav Ovsiienko "invalid Ethernet Segment data size"); 26407014ef5bSViacheslav Ovsiienko assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); 264118a1c200SViacheslav Ovsiienko es->inline_hdr_sz = rte_cpu_to_be_16(inlen); 264218a1c200SViacheslav Ovsiienko pdst = (uint8_t *)&es->inline_data; 264318a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { 264418a1c200SViacheslav Ovsiienko /* Implement VLAN tag insertion as part inline data. */ 264518a1c200SViacheslav Ovsiienko mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx); 264618a1c200SViacheslav Ovsiienko pdst += 2 * RTE_ETHER_ADDR_LEN; 264718a1c200SViacheslav Ovsiienko *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 264818a1c200SViacheslav Ovsiienko ((RTE_ETHER_TYPE_VLAN << 16) | 264918a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 265018a1c200SViacheslav Ovsiienko pdst += sizeof(struct rte_vlan_hdr); 265118a1c200SViacheslav Ovsiienko inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr); 265218a1c200SViacheslav Ovsiienko } 265318a1c200SViacheslav Ovsiienko assert(pdst < (uint8_t *)txq->wqes_end); 265418a1c200SViacheslav Ovsiienko /* 265518a1c200SViacheslav Ovsiienko * The WQEBB space availability is checked by caller. 265618a1c200SViacheslav Ovsiienko * Here we should be aware of WQE ring buffer wraparound only. 265718a1c200SViacheslav Ovsiienko */ 265818a1c200SViacheslav Ovsiienko part = (uint8_t *)txq->wqes_end - pdst; 265918a1c200SViacheslav Ovsiienko part = RTE_MIN(part, inlen); 266018a1c200SViacheslav Ovsiienko assert(part); 266118a1c200SViacheslav Ovsiienko do { 266218a1c200SViacheslav Ovsiienko mlx5_tx_mseg_memcpy(pdst, loc, part, olx); 266318a1c200SViacheslav Ovsiienko inlen -= part; 266418a1c200SViacheslav Ovsiienko if (likely(!inlen)) { 266518a1c200SViacheslav Ovsiienko pdst += part; 266618a1c200SViacheslav Ovsiienko pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); 266718a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 266818a1c200SViacheslav Ovsiienko } 266918a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 267018a1c200SViacheslav Ovsiienko part = inlen; 267118a1c200SViacheslav Ovsiienko } while (true); 267218a1c200SViacheslav Ovsiienko } 267318a1c200SViacheslav Ovsiienko 267418a1c200SViacheslav Ovsiienko /** 267518a1c200SViacheslav Ovsiienko * Build the Data Segment of pointer type. 267618a1c200SViacheslav Ovsiienko * 267718a1c200SViacheslav Ovsiienko * @param txq 267818a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 267918a1c200SViacheslav Ovsiienko * @param loc 268018a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 268118a1c200SViacheslav Ovsiienko * @param dseg 268218a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Data Segment. 268318a1c200SViacheslav Ovsiienko * @param buf 268418a1c200SViacheslav Ovsiienko * Data buffer to point. 268518a1c200SViacheslav Ovsiienko * @param len 268618a1c200SViacheslav Ovsiienko * Data buffer length. 268718a1c200SViacheslav Ovsiienko * @param olx 268818a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 268918a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 269018a1c200SViacheslav Ovsiienko */ 269118a1c200SViacheslav Ovsiienko static __rte_always_inline void 269218a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq, 269318a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 269418a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg, 269518a1c200SViacheslav Ovsiienko uint8_t *buf, 269618a1c200SViacheslav Ovsiienko unsigned int len, 269718a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 269818a1c200SViacheslav Ovsiienko 269918a1c200SViacheslav Ovsiienko { 270018a1c200SViacheslav Ovsiienko assert(len); 270118a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32(len); 270218a1c200SViacheslav Ovsiienko dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); 270318a1c200SViacheslav Ovsiienko dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); 270418a1c200SViacheslav Ovsiienko } 270518a1c200SViacheslav Ovsiienko 270618a1c200SViacheslav Ovsiienko /** 270718a1c200SViacheslav Ovsiienko * Build the Data Segment of pointer type or inline 270818a1c200SViacheslav Ovsiienko * if data length is less than buffer in minimal 270918a1c200SViacheslav Ovsiienko * Data Segment size. 271018a1c200SViacheslav Ovsiienko * 271118a1c200SViacheslav Ovsiienko * @param txq 271218a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 271318a1c200SViacheslav Ovsiienko * @param loc 271418a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 271518a1c200SViacheslav Ovsiienko * @param dseg 271618a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Data Segment. 271718a1c200SViacheslav Ovsiienko * @param buf 271818a1c200SViacheslav Ovsiienko * Data buffer to point. 271918a1c200SViacheslav Ovsiienko * @param len 272018a1c200SViacheslav Ovsiienko * Data buffer length. 272118a1c200SViacheslav Ovsiienko * @param olx 272218a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 272318a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 272418a1c200SViacheslav Ovsiienko */ 272518a1c200SViacheslav Ovsiienko static __rte_always_inline void 272618a1c200SViacheslav Ovsiienko mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq, 272718a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 272818a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg, 272918a1c200SViacheslav Ovsiienko uint8_t *buf, 273018a1c200SViacheslav Ovsiienko unsigned int len, 273118a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 273218a1c200SViacheslav Ovsiienko 273318a1c200SViacheslav Ovsiienko { 273418a1c200SViacheslav Ovsiienko uintptr_t dst, src; 273518a1c200SViacheslav Ovsiienko 273618a1c200SViacheslav Ovsiienko assert(len); 273718a1c200SViacheslav Ovsiienko if (len > MLX5_DSEG_MIN_INLINE_SIZE) { 273818a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32(len); 273918a1c200SViacheslav Ovsiienko dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); 274018a1c200SViacheslav Ovsiienko dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); 274118a1c200SViacheslav Ovsiienko 274218a1c200SViacheslav Ovsiienko return; 274318a1c200SViacheslav Ovsiienko } 274418a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE); 274518a1c200SViacheslav Ovsiienko /* Unrolled implementation of generic rte_memcpy. */ 274618a1c200SViacheslav Ovsiienko dst = (uintptr_t)&dseg->inline_data[0]; 274718a1c200SViacheslav Ovsiienko src = (uintptr_t)buf; 274818a1c200SViacheslav Ovsiienko if (len & 0x08) { 2749f3d0c07bSAli Alnubani #ifdef RTE_ARCH_STRICT_ALIGN 2750f3d0c07bSAli Alnubani assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t))); 2751f3d0c07bSAli Alnubani *(uint32_t *)dst = *(unaligned_uint32_t *)src; 2752f3d0c07bSAli Alnubani dst += sizeof(uint32_t); 2753f3d0c07bSAli Alnubani src += sizeof(uint32_t); 2754f3d0c07bSAli Alnubani *(uint32_t *)dst = *(unaligned_uint32_t *)src; 2755f3d0c07bSAli Alnubani dst += sizeof(uint32_t); 2756f3d0c07bSAli Alnubani src += sizeof(uint32_t); 2757f3d0c07bSAli Alnubani #else 2758f3d0c07bSAli Alnubani *(uint64_t *)dst = *(unaligned_uint64_t *)src; 275918a1c200SViacheslav Ovsiienko dst += sizeof(uint64_t); 276018a1c200SViacheslav Ovsiienko src += sizeof(uint64_t); 2761f3d0c07bSAli Alnubani #endif 276218a1c200SViacheslav Ovsiienko } 276318a1c200SViacheslav Ovsiienko if (len & 0x04) { 2764f3d0c07bSAli Alnubani *(uint32_t *)dst = *(unaligned_uint32_t *)src; 276518a1c200SViacheslav Ovsiienko dst += sizeof(uint32_t); 276618a1c200SViacheslav Ovsiienko src += sizeof(uint32_t); 276718a1c200SViacheslav Ovsiienko } 276818a1c200SViacheslav Ovsiienko if (len & 0x02) { 2769f3d0c07bSAli Alnubani *(uint16_t *)dst = *(unaligned_uint16_t *)src; 277018a1c200SViacheslav Ovsiienko dst += sizeof(uint16_t); 277118a1c200SViacheslav Ovsiienko src += sizeof(uint16_t); 277218a1c200SViacheslav Ovsiienko } 277318a1c200SViacheslav Ovsiienko if (len & 0x01) 277418a1c200SViacheslav Ovsiienko *(uint8_t *)dst = *(uint8_t *)src; 277518a1c200SViacheslav Ovsiienko } 277618a1c200SViacheslav Ovsiienko 277718a1c200SViacheslav Ovsiienko /** 277818a1c200SViacheslav Ovsiienko * Build the Data Segment of inlined data from single 277918a1c200SViacheslav Ovsiienko * segment packet, no VLAN insertion. 278018a1c200SViacheslav Ovsiienko * 278118a1c200SViacheslav Ovsiienko * @param txq 278218a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 278318a1c200SViacheslav Ovsiienko * @param loc 278418a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 278518a1c200SViacheslav Ovsiienko * @param dseg 278618a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Data Segment. 278718a1c200SViacheslav Ovsiienko * @param buf 278818a1c200SViacheslav Ovsiienko * Data buffer to point. 278918a1c200SViacheslav Ovsiienko * @param len 279018a1c200SViacheslav Ovsiienko * Data buffer length. 279118a1c200SViacheslav Ovsiienko * @param olx 279218a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 279318a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 279418a1c200SViacheslav Ovsiienko * 279518a1c200SViacheslav Ovsiienko * @return 279618a1c200SViacheslav Ovsiienko * Pointer to the next Data Segment after inlined data. 279718a1c200SViacheslav Ovsiienko * Ring buffer wraparound check is needed. We do not 279818a1c200SViacheslav Ovsiienko * do it here because it may not be needed for the 279918a1c200SViacheslav Ovsiienko * last packet in the eMPW session. 280018a1c200SViacheslav Ovsiienko */ 280118a1c200SViacheslav Ovsiienko static __rte_always_inline struct mlx5_wqe_dseg * 280218a1c200SViacheslav Ovsiienko mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, 280318a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc __rte_unused, 280418a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg, 280518a1c200SViacheslav Ovsiienko uint8_t *buf, 280618a1c200SViacheslav Ovsiienko unsigned int len, 280718a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 280818a1c200SViacheslav Ovsiienko { 280918a1c200SViacheslav Ovsiienko unsigned int part; 281018a1c200SViacheslav Ovsiienko uint8_t *pdst; 281118a1c200SViacheslav Ovsiienko 281218a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE); 281318a1c200SViacheslav Ovsiienko pdst = &dseg->inline_data[0]; 281418a1c200SViacheslav Ovsiienko /* 281518a1c200SViacheslav Ovsiienko * The WQEBB space availability is checked by caller. 281618a1c200SViacheslav Ovsiienko * Here we should be aware of WQE ring buffer wraparound only. 281718a1c200SViacheslav Ovsiienko */ 281818a1c200SViacheslav Ovsiienko part = (uint8_t *)txq->wqes_end - pdst; 281918a1c200SViacheslav Ovsiienko part = RTE_MIN(part, len); 282018a1c200SViacheslav Ovsiienko do { 282118a1c200SViacheslav Ovsiienko rte_memcpy(pdst, buf, part); 282218a1c200SViacheslav Ovsiienko len -= part; 282318a1c200SViacheslav Ovsiienko if (likely(!len)) { 282418a1c200SViacheslav Ovsiienko pdst += part; 282518a1c200SViacheslav Ovsiienko pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); 282618a1c200SViacheslav Ovsiienko /* Note: no final wraparound check here. */ 282718a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 282818a1c200SViacheslav Ovsiienko } 282918a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 283018a1c200SViacheslav Ovsiienko buf += part; 283118a1c200SViacheslav Ovsiienko part = len; 283218a1c200SViacheslav Ovsiienko } while (true); 283318a1c200SViacheslav Ovsiienko } 283418a1c200SViacheslav Ovsiienko 283518a1c200SViacheslav Ovsiienko /** 283618a1c200SViacheslav Ovsiienko * Build the Data Segment of inlined data from single 283718a1c200SViacheslav Ovsiienko * segment packet with VLAN insertion. 283818a1c200SViacheslav Ovsiienko * 283918a1c200SViacheslav Ovsiienko * @param txq 284018a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 284118a1c200SViacheslav Ovsiienko * @param loc 284218a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 284318a1c200SViacheslav Ovsiienko * @param dseg 284418a1c200SViacheslav Ovsiienko * Pointer to the dseg fill with built Data Segment. 284518a1c200SViacheslav Ovsiienko * @param buf 284618a1c200SViacheslav Ovsiienko * Data buffer to point. 284718a1c200SViacheslav Ovsiienko * @param len 284818a1c200SViacheslav Ovsiienko * Data buffer length. 284918a1c200SViacheslav Ovsiienko * @param olx 285018a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 285118a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 285218a1c200SViacheslav Ovsiienko * 285318a1c200SViacheslav Ovsiienko * @return 285418a1c200SViacheslav Ovsiienko * Pointer to the next Data Segment after inlined data. 285518a1c200SViacheslav Ovsiienko * Ring buffer wraparound check is needed. 285618a1c200SViacheslav Ovsiienko */ 285718a1c200SViacheslav Ovsiienko static __rte_always_inline struct mlx5_wqe_dseg * 285818a1c200SViacheslav Ovsiienko mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, 285918a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc __rte_unused, 286018a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg, 286118a1c200SViacheslav Ovsiienko uint8_t *buf, 286218a1c200SViacheslav Ovsiienko unsigned int len, 286318a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 286418a1c200SViacheslav Ovsiienko 286518a1c200SViacheslav Ovsiienko { 286618a1c200SViacheslav Ovsiienko unsigned int part; 286718a1c200SViacheslav Ovsiienko uint8_t *pdst; 286818a1c200SViacheslav Ovsiienko 286918a1c200SViacheslav Ovsiienko assert(len > MLX5_ESEG_MIN_INLINE_SIZE); 287018a1c200SViacheslav Ovsiienko static_assert(MLX5_DSEG_MIN_INLINE_SIZE == 287118a1c200SViacheslav Ovsiienko (2 * RTE_ETHER_ADDR_LEN), 287218a1c200SViacheslav Ovsiienko "invalid Data Segment data size"); 287318a1c200SViacheslav Ovsiienko dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) | 287418a1c200SViacheslav Ovsiienko MLX5_ETH_WQE_DATA_INLINE); 287518a1c200SViacheslav Ovsiienko pdst = &dseg->inline_data[0]; 287618a1c200SViacheslav Ovsiienko memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE); 287718a1c200SViacheslav Ovsiienko buf += MLX5_DSEG_MIN_INLINE_SIZE; 287818a1c200SViacheslav Ovsiienko pdst += MLX5_DSEG_MIN_INLINE_SIZE; 28797fd9ffe9SViacheslav Ovsiienko len -= MLX5_DSEG_MIN_INLINE_SIZE; 288018a1c200SViacheslav Ovsiienko /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */ 288118a1c200SViacheslav Ovsiienko assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); 28827fd9ffe9SViacheslav Ovsiienko if (unlikely(pdst >= (uint8_t *)txq->wqes_end)) 28837fd9ffe9SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 288418a1c200SViacheslav Ovsiienko *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) | 288518a1c200SViacheslav Ovsiienko loc->mbuf->vlan_tci); 288618a1c200SViacheslav Ovsiienko pdst += sizeof(struct rte_vlan_hdr); 288718a1c200SViacheslav Ovsiienko /* 288818a1c200SViacheslav Ovsiienko * The WQEBB space availability is checked by caller. 288918a1c200SViacheslav Ovsiienko * Here we should be aware of WQE ring buffer wraparound only. 289018a1c200SViacheslav Ovsiienko */ 289118a1c200SViacheslav Ovsiienko part = (uint8_t *)txq->wqes_end - pdst; 289218a1c200SViacheslav Ovsiienko part = RTE_MIN(part, len); 289318a1c200SViacheslav Ovsiienko do { 289418a1c200SViacheslav Ovsiienko rte_memcpy(pdst, buf, part); 289518a1c200SViacheslav Ovsiienko len -= part; 289618a1c200SViacheslav Ovsiienko if (likely(!len)) { 289718a1c200SViacheslav Ovsiienko pdst += part; 289818a1c200SViacheslav Ovsiienko pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); 289918a1c200SViacheslav Ovsiienko /* Note: no final wraparound check here. */ 290018a1c200SViacheslav Ovsiienko return (struct mlx5_wqe_dseg *)pdst; 290118a1c200SViacheslav Ovsiienko } 290218a1c200SViacheslav Ovsiienko pdst = (uint8_t *)txq->wqes; 290318a1c200SViacheslav Ovsiienko buf += part; 290418a1c200SViacheslav Ovsiienko part = len; 290518a1c200SViacheslav Ovsiienko } while (true); 290618a1c200SViacheslav Ovsiienko } 290718a1c200SViacheslav Ovsiienko 290818a1c200SViacheslav Ovsiienko /** 290918a1c200SViacheslav Ovsiienko * Build the Ethernet Segment with optionally inlined data with 291018a1c200SViacheslav Ovsiienko * VLAN insertion and following Data Segments (if any) from 291118a1c200SViacheslav Ovsiienko * multi-segment packet. Used by ordinary send and TSO. 291218a1c200SViacheslav Ovsiienko * 291318a1c200SViacheslav Ovsiienko * @param txq 291418a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 291518a1c200SViacheslav Ovsiienko * @param loc 291618a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 291718a1c200SViacheslav Ovsiienko * @param wqe 291818a1c200SViacheslav Ovsiienko * Pointer to WQE to fill with built Ethernet/Data Segments. 291918a1c200SViacheslav Ovsiienko * @param vlan 292018a1c200SViacheslav Ovsiienko * Length of VLAN header to insert, 0 means no VLAN insertion. 292118a1c200SViacheslav Ovsiienko * @param inlen 292218a1c200SViacheslav Ovsiienko * Data length to inline. For TSO this parameter specifies 292318a1c200SViacheslav Ovsiienko * exact value, for ordinary send routine can be aligned by 292418a1c200SViacheslav Ovsiienko * caller to provide better WQE space saving and data buffer 292518a1c200SViacheslav Ovsiienko * start address alignment. This length includes VLAN header 292618a1c200SViacheslav Ovsiienko * being inserted. 292718a1c200SViacheslav Ovsiienko * @param tso 292818a1c200SViacheslav Ovsiienko * Zero means ordinary send, inlined data can be extended, 292918a1c200SViacheslav Ovsiienko * otherwise this is TSO, inlined data length is fixed. 293018a1c200SViacheslav Ovsiienko * @param olx 293118a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 293218a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 293318a1c200SViacheslav Ovsiienko * 293418a1c200SViacheslav Ovsiienko * @return 293518a1c200SViacheslav Ovsiienko * Actual size of built WQE in segments. 293618a1c200SViacheslav Ovsiienko */ 293718a1c200SViacheslav Ovsiienko static __rte_always_inline unsigned int 293818a1c200SViacheslav Ovsiienko mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq, 293918a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 294018a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe, 294118a1c200SViacheslav Ovsiienko unsigned int vlan, 294218a1c200SViacheslav Ovsiienko unsigned int inlen, 294318a1c200SViacheslav Ovsiienko unsigned int tso, 294418a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 294518a1c200SViacheslav Ovsiienko { 294618a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 294718a1c200SViacheslav Ovsiienko unsigned int ds; 294818a1c200SViacheslav Ovsiienko 294918a1c200SViacheslav Ovsiienko assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen); 295018a1c200SViacheslav Ovsiienko loc->mbuf_nseg = NB_SEGS(loc->mbuf); 295118a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 295218a1c200SViacheslav Ovsiienko 295318a1c200SViacheslav Ovsiienko dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx); 295418a1c200SViacheslav Ovsiienko if (!loc->mbuf_nseg) 295518a1c200SViacheslav Ovsiienko goto dseg_done; 295618a1c200SViacheslav Ovsiienko /* 295718a1c200SViacheslav Ovsiienko * There are still some mbuf remaining, not inlined. 295818a1c200SViacheslav Ovsiienko * The first mbuf may be partially inlined and we 295918a1c200SViacheslav Ovsiienko * must process the possible non-zero data offset. 296018a1c200SViacheslav Ovsiienko */ 296118a1c200SViacheslav Ovsiienko if (loc->mbuf_off) { 296218a1c200SViacheslav Ovsiienko unsigned int dlen; 296318a1c200SViacheslav Ovsiienko uint8_t *dptr; 296418a1c200SViacheslav Ovsiienko 296518a1c200SViacheslav Ovsiienko /* 296618a1c200SViacheslav Ovsiienko * Exhausted packets must be dropped before. 296718a1c200SViacheslav Ovsiienko * Non-zero offset means there are some data 296818a1c200SViacheslav Ovsiienko * remained in the packet. 296918a1c200SViacheslav Ovsiienko */ 297018a1c200SViacheslav Ovsiienko assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf)); 297118a1c200SViacheslav Ovsiienko assert(rte_pktmbuf_data_len(loc->mbuf)); 297218a1c200SViacheslav Ovsiienko dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *, 297318a1c200SViacheslav Ovsiienko loc->mbuf_off); 297418a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off; 297518a1c200SViacheslav Ovsiienko /* 297618a1c200SViacheslav Ovsiienko * Build the pointer/minimal data Data Segment. 297718a1c200SViacheslav Ovsiienko * Do ring buffer wrapping check in advance. 297818a1c200SViacheslav Ovsiienko */ 297918a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 298018a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 298118a1c200SViacheslav Ovsiienko mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx); 298218a1c200SViacheslav Ovsiienko /* Store the mbuf to be freed on completion. */ 298318a1c200SViacheslav Ovsiienko assert(loc->elts_free); 298418a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 298518a1c200SViacheslav Ovsiienko --loc->elts_free; 298618a1c200SViacheslav Ovsiienko ++dseg; 298718a1c200SViacheslav Ovsiienko if (--loc->mbuf_nseg == 0) 298818a1c200SViacheslav Ovsiienko goto dseg_done; 298918a1c200SViacheslav Ovsiienko loc->mbuf = loc->mbuf->next; 299018a1c200SViacheslav Ovsiienko loc->mbuf_off = 0; 299118a1c200SViacheslav Ovsiienko } 299218a1c200SViacheslav Ovsiienko do { 299318a1c200SViacheslav Ovsiienko if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) { 299418a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 299518a1c200SViacheslav Ovsiienko 299618a1c200SViacheslav Ovsiienko /* Zero length segment found, just skip. */ 299718a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 299818a1c200SViacheslav Ovsiienko loc->mbuf = loc->mbuf->next; 299918a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(mbuf); 300018a1c200SViacheslav Ovsiienko if (--loc->mbuf_nseg == 0) 300118a1c200SViacheslav Ovsiienko break; 300218a1c200SViacheslav Ovsiienko } else { 300318a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 300418a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 300518a1c200SViacheslav Ovsiienko mlx5_tx_dseg_iptr 300618a1c200SViacheslav Ovsiienko (txq, loc, dseg, 300718a1c200SViacheslav Ovsiienko rte_pktmbuf_mtod(loc->mbuf, uint8_t *), 300818a1c200SViacheslav Ovsiienko rte_pktmbuf_data_len(loc->mbuf), olx); 300918a1c200SViacheslav Ovsiienko assert(loc->elts_free); 301018a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 301118a1c200SViacheslav Ovsiienko --loc->elts_free; 301218a1c200SViacheslav Ovsiienko ++dseg; 301318a1c200SViacheslav Ovsiienko if (--loc->mbuf_nseg == 0) 301418a1c200SViacheslav Ovsiienko break; 301518a1c200SViacheslav Ovsiienko loc->mbuf = loc->mbuf->next; 301618a1c200SViacheslav Ovsiienko } 301718a1c200SViacheslav Ovsiienko } while (true); 301818a1c200SViacheslav Ovsiienko 301918a1c200SViacheslav Ovsiienko dseg_done: 302018a1c200SViacheslav Ovsiienko /* Calculate actual segments used from the dseg pointer. */ 302118a1c200SViacheslav Ovsiienko if ((uintptr_t)wqe < (uintptr_t)dseg) 302218a1c200SViacheslav Ovsiienko ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE; 302318a1c200SViacheslav Ovsiienko else 302418a1c200SViacheslav Ovsiienko ds = (((uintptr_t)dseg - (uintptr_t)wqe) + 302518a1c200SViacheslav Ovsiienko txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE; 302618a1c200SViacheslav Ovsiienko return ds; 302718a1c200SViacheslav Ovsiienko } 302818a1c200SViacheslav Ovsiienko 302918a1c200SViacheslav Ovsiienko /** 303018a1c200SViacheslav Ovsiienko * Tx one packet function for multi-segment TSO. Supports all 303118a1c200SViacheslav Ovsiienko * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs, 303218a1c200SViacheslav Ovsiienko * sends one packet per WQE. 303318a1c200SViacheslav Ovsiienko * 303418a1c200SViacheslav Ovsiienko * This routine is responsible for storing processed mbuf 303518a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head. 303618a1c200SViacheslav Ovsiienko * 303718a1c200SViacheslav Ovsiienko * @param txq 303818a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 303918a1c200SViacheslav Ovsiienko * @param loc 304018a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 304118a1c200SViacheslav Ovsiienko * @param olx 304218a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 304318a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 304418a1c200SViacheslav Ovsiienko * 304518a1c200SViacheslav Ovsiienko * @return 304618a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 304718a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 304818a1c200SViacheslav Ovsiienko * Local context variables partially updated. 304918a1c200SViacheslav Ovsiienko */ 305018a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 305118a1c200SViacheslav Ovsiienko mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq, 305218a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 305318a1c200SViacheslav Ovsiienko unsigned int olx) 305418a1c200SViacheslav Ovsiienko { 305518a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 305618a1c200SViacheslav Ovsiienko unsigned int ds, dlen, inlen, ntcp, vlan = 0; 305718a1c200SViacheslav Ovsiienko 305818a1c200SViacheslav Ovsiienko /* 305918a1c200SViacheslav Ovsiienko * Calculate data length to be inlined to estimate 306018a1c200SViacheslav Ovsiienko * the required space in WQE ring buffer. 306118a1c200SViacheslav Ovsiienko */ 306218a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_pkt_len(loc->mbuf); 306318a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) 306418a1c200SViacheslav Ovsiienko vlan = sizeof(struct rte_vlan_hdr); 306518a1c200SViacheslav Ovsiienko inlen = loc->mbuf->l2_len + vlan + 306618a1c200SViacheslav Ovsiienko loc->mbuf->l3_len + loc->mbuf->l4_len; 306718a1c200SViacheslav Ovsiienko if (unlikely((!inlen || !loc->mbuf->tso_segsz))) 306818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 306918a1c200SViacheslav Ovsiienko if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK) 307018a1c200SViacheslav Ovsiienko inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len; 307118a1c200SViacheslav Ovsiienko /* Packet must contain all TSO headers. */ 307218a1c200SViacheslav Ovsiienko if (unlikely(inlen > MLX5_MAX_TSO_HEADER || 307318a1c200SViacheslav Ovsiienko inlen <= MLX5_ESEG_MIN_INLINE_SIZE || 307418a1c200SViacheslav Ovsiienko inlen > (dlen + vlan))) 307518a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 307618a1c200SViacheslav Ovsiienko assert(inlen >= txq->inlen_mode); 307718a1c200SViacheslav Ovsiienko /* 307818a1c200SViacheslav Ovsiienko * Check whether there are enough free WQEBBs: 307918a1c200SViacheslav Ovsiienko * - Control Segment 308018a1c200SViacheslav Ovsiienko * - Ethernet Segment 308118a1c200SViacheslav Ovsiienko * - First Segment of inlined Ethernet data 308218a1c200SViacheslav Ovsiienko * - ... data continued ... 308318a1c200SViacheslav Ovsiienko * - Data Segments of pointer/min inline type 308418a1c200SViacheslav Ovsiienko */ 308518a1c200SViacheslav Ovsiienko ds = NB_SEGS(loc->mbuf) + 2 + (inlen - 308618a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE + 308718a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE + 308818a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 308918a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < ((ds + 3) / 4))) 309018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 309118a1c200SViacheslav Ovsiienko /* Check for maximal WQE size. */ 309218a1c200SViacheslav Ovsiienko if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) 309318a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 309418a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 309518a1c200SViacheslav Ovsiienko /* Update sent data bytes/packets counters. */ 309618a1c200SViacheslav Ovsiienko ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) / 309718a1c200SViacheslav Ovsiienko loc->mbuf->tso_segsz; 309818a1c200SViacheslav Ovsiienko /* 309918a1c200SViacheslav Ovsiienko * One will be added for mbuf itself 310018a1c200SViacheslav Ovsiienko * at the end of the mlx5_tx_burst from 310118a1c200SViacheslav Ovsiienko * loc->pkts_sent field. 310218a1c200SViacheslav Ovsiienko */ 310318a1c200SViacheslav Ovsiienko --ntcp; 310418a1c200SViacheslav Ovsiienko txq->stats.opackets += ntcp; 310518a1c200SViacheslav Ovsiienko txq->stats.obytes += dlen + vlan + ntcp * inlen; 310618a1c200SViacheslav Ovsiienko #endif 310718a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 310818a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 310918a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx); 311018a1c200SViacheslav Ovsiienko ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx); 311118a1c200SViacheslav Ovsiienko wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); 311218a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 311318a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 31145a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 31154dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, true, olx); 311618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 311718a1c200SViacheslav Ovsiienko } 311818a1c200SViacheslav Ovsiienko 311918a1c200SViacheslav Ovsiienko /** 312018a1c200SViacheslav Ovsiienko * Tx one packet function for multi-segment SEND. Supports all 312118a1c200SViacheslav Ovsiienko * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs, 312218a1c200SViacheslav Ovsiienko * sends one packet per WQE, without any data inlining in 312318a1c200SViacheslav Ovsiienko * Ethernet Segment. 312418a1c200SViacheslav Ovsiienko * 312518a1c200SViacheslav Ovsiienko * This routine is responsible for storing processed mbuf 312618a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head. 312718a1c200SViacheslav Ovsiienko * 312818a1c200SViacheslav Ovsiienko * @param txq 312918a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 313018a1c200SViacheslav Ovsiienko * @param loc 313118a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 313218a1c200SViacheslav Ovsiienko * @param olx 313318a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 313418a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 313518a1c200SViacheslav Ovsiienko * 313618a1c200SViacheslav Ovsiienko * @return 313718a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 313818a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 313918a1c200SViacheslav Ovsiienko * Local context variables partially updated. 314018a1c200SViacheslav Ovsiienko */ 314118a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 314218a1c200SViacheslav Ovsiienko mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq, 314318a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 314418a1c200SViacheslav Ovsiienko unsigned int olx) 314518a1c200SViacheslav Ovsiienko { 314618a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 314718a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 314818a1c200SViacheslav Ovsiienko unsigned int ds, nseg; 314918a1c200SViacheslav Ovsiienko 315018a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) > 1); 315118a1c200SViacheslav Ovsiienko /* 315218a1c200SViacheslav Ovsiienko * No inline at all, it means the CPU cycles saving 315318a1c200SViacheslav Ovsiienko * is prioritized at configuration, we should not 315418a1c200SViacheslav Ovsiienko * copy any packet data to WQE. 315518a1c200SViacheslav Ovsiienko */ 315618a1c200SViacheslav Ovsiienko nseg = NB_SEGS(loc->mbuf); 315718a1c200SViacheslav Ovsiienko ds = 2 + nseg; 315818a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < ((ds + 3) / 4))) 315918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 316018a1c200SViacheslav Ovsiienko /* Check for maximal WQE size. */ 316118a1c200SViacheslav Ovsiienko if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) 316218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 316318a1c200SViacheslav Ovsiienko /* 316418a1c200SViacheslav Ovsiienko * Some Tx offloads may cause an error if 316518a1c200SViacheslav Ovsiienko * packet is not long enough, check against 316618a1c200SViacheslav Ovsiienko * assumed minimal length. 316718a1c200SViacheslav Ovsiienko */ 316818a1c200SViacheslav Ovsiienko if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE) 316918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 317018a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 317118a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 317218a1c200SViacheslav Ovsiienko txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf); 317318a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 317418a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) 317518a1c200SViacheslav Ovsiienko txq->stats.obytes += sizeof(struct rte_vlan_hdr); 317618a1c200SViacheslav Ovsiienko #endif 317718a1c200SViacheslav Ovsiienko /* 317818a1c200SViacheslav Ovsiienko * SEND WQE, one WQEBB: 317918a1c200SViacheslav Ovsiienko * - Control Segment, SEND opcode 318018a1c200SViacheslav Ovsiienko * - Ethernet Segment, optional VLAN, no inline 318118a1c200SViacheslav Ovsiienko * - Data Segments, pointer only type 318218a1c200SViacheslav Ovsiienko */ 318318a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 318418a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 318518a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx); 318618a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(txq, loc, wqe, olx); 318718a1c200SViacheslav Ovsiienko dseg = &wqe->dseg[0]; 318818a1c200SViacheslav Ovsiienko do { 318918a1c200SViacheslav Ovsiienko if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) { 319018a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 319118a1c200SViacheslav Ovsiienko 319218a1c200SViacheslav Ovsiienko /* 319318a1c200SViacheslav Ovsiienko * Zero length segment found, have to 319418a1c200SViacheslav Ovsiienko * correct total size of WQE in segments. 319518a1c200SViacheslav Ovsiienko * It is supposed to be rare occasion, so 319618a1c200SViacheslav Ovsiienko * in normal case (no zero length segments) 319718a1c200SViacheslav Ovsiienko * we avoid extra writing to the Control 319818a1c200SViacheslav Ovsiienko * Segment. 319918a1c200SViacheslav Ovsiienko */ 320018a1c200SViacheslav Ovsiienko --ds; 320118a1c200SViacheslav Ovsiienko wqe->cseg.sq_ds -= RTE_BE32(1); 320218a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 320318a1c200SViacheslav Ovsiienko loc->mbuf = mbuf->next; 320418a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(mbuf); 320518a1c200SViacheslav Ovsiienko if (--nseg == 0) 320618a1c200SViacheslav Ovsiienko break; 320718a1c200SViacheslav Ovsiienko } else { 320818a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr 320918a1c200SViacheslav Ovsiienko (txq, loc, dseg, 321018a1c200SViacheslav Ovsiienko rte_pktmbuf_mtod(loc->mbuf, uint8_t *), 321118a1c200SViacheslav Ovsiienko rte_pktmbuf_data_len(loc->mbuf), olx); 321218a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 321318a1c200SViacheslav Ovsiienko --loc->elts_free; 321418a1c200SViacheslav Ovsiienko if (--nseg == 0) 321518a1c200SViacheslav Ovsiienko break; 321618a1c200SViacheslav Ovsiienko ++dseg; 321718a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 321818a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 321918a1c200SViacheslav Ovsiienko loc->mbuf = loc->mbuf->next; 322018a1c200SViacheslav Ovsiienko } 322118a1c200SViacheslav Ovsiienko } while (true); 322218a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 322318a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 32245a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 32254dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, true, olx); 322618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 322718a1c200SViacheslav Ovsiienko } 322818a1c200SViacheslav Ovsiienko 322918a1c200SViacheslav Ovsiienko /** 323018a1c200SViacheslav Ovsiienko * Tx one packet function for multi-segment SEND. Supports all 323118a1c200SViacheslav Ovsiienko * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs, 323218a1c200SViacheslav Ovsiienko * sends one packet per WQE, with data inlining in 323318a1c200SViacheslav Ovsiienko * Ethernet Segment and minimal Data Segments. 323418a1c200SViacheslav Ovsiienko * 323518a1c200SViacheslav Ovsiienko * This routine is responsible for storing processed mbuf 323618a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head. 323718a1c200SViacheslav Ovsiienko * 323818a1c200SViacheslav Ovsiienko * @param txq 323918a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 324018a1c200SViacheslav Ovsiienko * @param loc 324118a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 324218a1c200SViacheslav Ovsiienko * @param olx 324318a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 324418a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 324518a1c200SViacheslav Ovsiienko * 324618a1c200SViacheslav Ovsiienko * @return 324718a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 324818a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 324918a1c200SViacheslav Ovsiienko * Local context variables partially updated. 325018a1c200SViacheslav Ovsiienko */ 325118a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 325218a1c200SViacheslav Ovsiienko mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, 325318a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 325418a1c200SViacheslav Ovsiienko unsigned int olx) 325518a1c200SViacheslav Ovsiienko { 325618a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 325718a1c200SViacheslav Ovsiienko unsigned int ds, inlen, dlen, vlan = 0; 325818a1c200SViacheslav Ovsiienko 325918a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 326018a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) > 1); 326118a1c200SViacheslav Ovsiienko /* 326218a1c200SViacheslav Ovsiienko * First calculate data length to be inlined 326318a1c200SViacheslav Ovsiienko * to estimate the required space for WQE. 326418a1c200SViacheslav Ovsiienko */ 326518a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_pkt_len(loc->mbuf); 326618a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) 326718a1c200SViacheslav Ovsiienko vlan = sizeof(struct rte_vlan_hdr); 326818a1c200SViacheslav Ovsiienko inlen = dlen + vlan; 326918a1c200SViacheslav Ovsiienko /* Check against minimal length. */ 327018a1c200SViacheslav Ovsiienko if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE) 327118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 327218a1c200SViacheslav Ovsiienko assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); 327318a1c200SViacheslav Ovsiienko if (inlen > txq->inlen_send) { 327418a1c200SViacheslav Ovsiienko struct rte_mbuf *mbuf; 327518a1c200SViacheslav Ovsiienko unsigned int nxlen; 327618a1c200SViacheslav Ovsiienko uintptr_t start; 327718a1c200SViacheslav Ovsiienko 327818a1c200SViacheslav Ovsiienko /* 327918a1c200SViacheslav Ovsiienko * Packet length exceeds the allowed inline 328018a1c200SViacheslav Ovsiienko * data length, check whether the minimal 328118a1c200SViacheslav Ovsiienko * inlining is required. 328218a1c200SViacheslav Ovsiienko */ 328318a1c200SViacheslav Ovsiienko if (txq->inlen_mode) { 328418a1c200SViacheslav Ovsiienko assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE); 328518a1c200SViacheslav Ovsiienko assert(txq->inlen_mode <= txq->inlen_send); 328618a1c200SViacheslav Ovsiienko inlen = txq->inlen_mode; 328718a1c200SViacheslav Ovsiienko } else { 328818a1c200SViacheslav Ovsiienko if (!vlan || txq->vlan_en) { 328918a1c200SViacheslav Ovsiienko /* 329018a1c200SViacheslav Ovsiienko * VLAN insertion will be done inside by HW. 329118a1c200SViacheslav Ovsiienko * It is not utmost effective - VLAN flag is 329218a1c200SViacheslav Ovsiienko * checked twice, but we should proceed the 329318a1c200SViacheslav Ovsiienko * inlining length correctly and take into 329418a1c200SViacheslav Ovsiienko * account the VLAN header being inserted. 329518a1c200SViacheslav Ovsiienko */ 329618a1c200SViacheslav Ovsiienko return mlx5_tx_packet_multi_send 329718a1c200SViacheslav Ovsiienko (txq, loc, olx); 329818a1c200SViacheslav Ovsiienko } 329918a1c200SViacheslav Ovsiienko inlen = MLX5_ESEG_MIN_INLINE_SIZE; 330018a1c200SViacheslav Ovsiienko } 330118a1c200SViacheslav Ovsiienko /* 330218a1c200SViacheslav Ovsiienko * Now we know the minimal amount of data is requested 330318a1c200SViacheslav Ovsiienko * to inline. Check whether we should inline the buffers 330418a1c200SViacheslav Ovsiienko * from the chain beginning to eliminate some mbufs. 330518a1c200SViacheslav Ovsiienko */ 330618a1c200SViacheslav Ovsiienko mbuf = loc->mbuf; 330718a1c200SViacheslav Ovsiienko nxlen = rte_pktmbuf_data_len(mbuf); 330818a1c200SViacheslav Ovsiienko if (unlikely(nxlen <= txq->inlen_send)) { 330918a1c200SViacheslav Ovsiienko /* We can inline first mbuf at least. */ 331018a1c200SViacheslav Ovsiienko if (nxlen < inlen) { 331118a1c200SViacheslav Ovsiienko unsigned int smlen; 331218a1c200SViacheslav Ovsiienko 331318a1c200SViacheslav Ovsiienko /* Scan mbufs till inlen filled. */ 331418a1c200SViacheslav Ovsiienko do { 331518a1c200SViacheslav Ovsiienko smlen = nxlen; 331618a1c200SViacheslav Ovsiienko mbuf = NEXT(mbuf); 331718a1c200SViacheslav Ovsiienko assert(mbuf); 331818a1c200SViacheslav Ovsiienko nxlen = rte_pktmbuf_data_len(mbuf); 331918a1c200SViacheslav Ovsiienko nxlen += smlen; 332018a1c200SViacheslav Ovsiienko } while (unlikely(nxlen < inlen)); 332118a1c200SViacheslav Ovsiienko if (unlikely(nxlen > txq->inlen_send)) { 332218a1c200SViacheslav Ovsiienko /* We cannot inline entire mbuf. */ 332318a1c200SViacheslav Ovsiienko smlen = inlen - smlen; 332418a1c200SViacheslav Ovsiienko start = rte_pktmbuf_mtod_offset 332518a1c200SViacheslav Ovsiienko (mbuf, uintptr_t, smlen); 332618a1c200SViacheslav Ovsiienko goto do_align; 332718a1c200SViacheslav Ovsiienko } 332818a1c200SViacheslav Ovsiienko } 332918a1c200SViacheslav Ovsiienko do { 333018a1c200SViacheslav Ovsiienko inlen = nxlen; 333118a1c200SViacheslav Ovsiienko mbuf = NEXT(mbuf); 333218a1c200SViacheslav Ovsiienko /* There should be not end of packet. */ 333318a1c200SViacheslav Ovsiienko assert(mbuf); 333418a1c200SViacheslav Ovsiienko nxlen = inlen + rte_pktmbuf_data_len(mbuf); 333518a1c200SViacheslav Ovsiienko } while (unlikely(nxlen < txq->inlen_send)); 333618a1c200SViacheslav Ovsiienko } 333718a1c200SViacheslav Ovsiienko start = rte_pktmbuf_mtod(mbuf, uintptr_t); 333818a1c200SViacheslav Ovsiienko /* 333918a1c200SViacheslav Ovsiienko * Check whether we can do inline to align start 334018a1c200SViacheslav Ovsiienko * address of data buffer to cacheline. 334118a1c200SViacheslav Ovsiienko */ 334218a1c200SViacheslav Ovsiienko do_align: 334318a1c200SViacheslav Ovsiienko start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1); 334418a1c200SViacheslav Ovsiienko if (unlikely(start)) { 334518a1c200SViacheslav Ovsiienko start += inlen; 334618a1c200SViacheslav Ovsiienko if (start <= txq->inlen_send) 334718a1c200SViacheslav Ovsiienko inlen = start; 334818a1c200SViacheslav Ovsiienko } 334918a1c200SViacheslav Ovsiienko } 335018a1c200SViacheslav Ovsiienko /* 335118a1c200SViacheslav Ovsiienko * Check whether there are enough free WQEBBs: 335218a1c200SViacheslav Ovsiienko * - Control Segment 335318a1c200SViacheslav Ovsiienko * - Ethernet Segment 335418a1c200SViacheslav Ovsiienko * - First Segment of inlined Ethernet data 335518a1c200SViacheslav Ovsiienko * - ... data continued ... 335618a1c200SViacheslav Ovsiienko * - Data Segments of pointer/min inline type 335718a1c200SViacheslav Ovsiienko * 335818a1c200SViacheslav Ovsiienko * Estimate the number of Data Segments conservatively, 335918a1c200SViacheslav Ovsiienko * supposing no any mbufs is being freed during inlining. 336018a1c200SViacheslav Ovsiienko */ 336118a1c200SViacheslav Ovsiienko assert(inlen <= txq->inlen_send); 336218a1c200SViacheslav Ovsiienko ds = NB_SEGS(loc->mbuf) + 2 + (inlen - 336318a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE + 336418a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE + 336518a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 336618a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < ((ds + 3) / 4))) 336718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 336818a1c200SViacheslav Ovsiienko /* Check for maximal WQE size. */ 336918a1c200SViacheslav Ovsiienko if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) 337018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 337118a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 337218a1c200SViacheslav Ovsiienko /* Update sent data bytes/packets counters. */ 337318a1c200SViacheslav Ovsiienko txq->stats.obytes += dlen + vlan; 337418a1c200SViacheslav Ovsiienko #endif 337518a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 337618a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 337718a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx); 337818a1c200SViacheslav Ovsiienko ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx); 337918a1c200SViacheslav Ovsiienko wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); 338018a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 338118a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 33825a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 33834dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, true, olx); 338418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 338518a1c200SViacheslav Ovsiienko } 338618a1c200SViacheslav Ovsiienko 338718a1c200SViacheslav Ovsiienko /** 338818a1c200SViacheslav Ovsiienko * Tx burst function for multi-segment packets. Supports all 338918a1c200SViacheslav Ovsiienko * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs, 339018a1c200SViacheslav Ovsiienko * sends one packet per WQE. Function stops sending if it 339118a1c200SViacheslav Ovsiienko * encounters the single-segment packet. 339218a1c200SViacheslav Ovsiienko * 339318a1c200SViacheslav Ovsiienko * This routine is responsible for storing processed mbuf 339418a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head. 339518a1c200SViacheslav Ovsiienko * 339618a1c200SViacheslav Ovsiienko * @param txq 339718a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 339818a1c200SViacheslav Ovsiienko * @param[in] pkts 339918a1c200SViacheslav Ovsiienko * Packets to transmit. 340018a1c200SViacheslav Ovsiienko * @param pkts_n 340118a1c200SViacheslav Ovsiienko * Number of packets in array. 340218a1c200SViacheslav Ovsiienko * @param loc 340318a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 340418a1c200SViacheslav Ovsiienko * @param olx 340518a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 340618a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 340718a1c200SViacheslav Ovsiienko * 340818a1c200SViacheslav Ovsiienko * @return 340918a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 341018a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 341118a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered. 341218a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered. 341318a1c200SViacheslav Ovsiienko * Local context variables updated. 341418a1c200SViacheslav Ovsiienko */ 341518a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 341618a1c200SViacheslav Ovsiienko mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq, 341718a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 341818a1c200SViacheslav Ovsiienko unsigned int pkts_n, 341918a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 342018a1c200SViacheslav Ovsiienko unsigned int olx) 342118a1c200SViacheslav Ovsiienko { 342218a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 342318a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 342418a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 342518a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 342618a1c200SViacheslav Ovsiienko for (;;) { 342718a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 342818a1c200SViacheslav Ovsiienko 342918a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) > 1); 343018a1c200SViacheslav Ovsiienko /* 343118a1c200SViacheslav Ovsiienko * Estimate the number of free elts quickly but 343218a1c200SViacheslav Ovsiienko * conservatively. Some segment may be fully inlined 343318a1c200SViacheslav Ovsiienko * and freed, ignore this here - precise estimation 343418a1c200SViacheslav Ovsiienko * is costly. 343518a1c200SViacheslav Ovsiienko */ 343618a1c200SViacheslav Ovsiienko if (loc->elts_free < NB_SEGS(loc->mbuf)) 343718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 343818a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 343918a1c200SViacheslav Ovsiienko unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) { 344018a1c200SViacheslav Ovsiienko /* Proceed with multi-segment TSO. */ 344118a1c200SViacheslav Ovsiienko ret = mlx5_tx_packet_multi_tso(txq, loc, olx); 344218a1c200SViacheslav Ovsiienko } else if (MLX5_TXOFF_CONFIG(INLINE)) { 344318a1c200SViacheslav Ovsiienko /* Proceed with multi-segment SEND with inlining. */ 344418a1c200SViacheslav Ovsiienko ret = mlx5_tx_packet_multi_inline(txq, loc, olx); 344518a1c200SViacheslav Ovsiienko } else { 344618a1c200SViacheslav Ovsiienko /* Proceed with multi-segment SEND w/o inlining. */ 344718a1c200SViacheslav Ovsiienko ret = mlx5_tx_packet_multi_send(txq, loc, olx); 344818a1c200SViacheslav Ovsiienko } 344918a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_EXIT) 345018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 345118a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_ERROR) 345218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 345318a1c200SViacheslav Ovsiienko /* WQE is built, go to the next packet. */ 345418a1c200SViacheslav Ovsiienko ++loc->pkts_sent; 345518a1c200SViacheslav Ovsiienko --pkts_n; 345618a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) 345718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 345818a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 345918a1c200SViacheslav Ovsiienko if (pkts_n > 1) 346018a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 346118a1c200SViacheslav Ovsiienko if (likely(NB_SEGS(loc->mbuf) > 1)) 346218a1c200SViacheslav Ovsiienko continue; 346318a1c200SViacheslav Ovsiienko /* Here ends the series of multi-segment packets. */ 346418a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 346530d87a5eSYongseok Koh unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) 346618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_TSO; 346718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 346818a1c200SViacheslav Ovsiienko } 346918a1c200SViacheslav Ovsiienko assert(false); 347018a1c200SViacheslav Ovsiienko } 347118a1c200SViacheslav Ovsiienko 347218a1c200SViacheslav Ovsiienko /** 347318a1c200SViacheslav Ovsiienko * Tx burst function for single-segment packets with TSO. 347418a1c200SViacheslav Ovsiienko * Supports all types of Tx offloads, except multi-packets. 347518a1c200SViacheslav Ovsiienko * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE. 347618a1c200SViacheslav Ovsiienko * Function stops sending if it encounters the multi-segment 347718a1c200SViacheslav Ovsiienko * packet or packet without TSO requested. 347818a1c200SViacheslav Ovsiienko * 347918a1c200SViacheslav Ovsiienko * The routine is responsible for storing processed mbuf 348018a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head if inline 348118a1c200SViacheslav Ovsiienko * offloads is requested due to possible early freeing 348218a1c200SViacheslav Ovsiienko * of the inlined mbufs (can not store pkts array in elts 348318a1c200SViacheslav Ovsiienko * as a batch). 348418a1c200SViacheslav Ovsiienko * 348518a1c200SViacheslav Ovsiienko * @param txq 348618a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 348718a1c200SViacheslav Ovsiienko * @param[in] pkts 348818a1c200SViacheslav Ovsiienko * Packets to transmit. 348918a1c200SViacheslav Ovsiienko * @param pkts_n 349018a1c200SViacheslav Ovsiienko * Number of packets in array. 349118a1c200SViacheslav Ovsiienko * @param loc 349218a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 349318a1c200SViacheslav Ovsiienko * @param olx 349418a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 349518a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 349618a1c200SViacheslav Ovsiienko * 349718a1c200SViacheslav Ovsiienko * @return 349818a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 349918a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 350018a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered. 350118a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. 350218a1c200SViacheslav Ovsiienko * Local context variables updated. 350318a1c200SViacheslav Ovsiienko */ 350418a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 350518a1c200SViacheslav Ovsiienko mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq, 350618a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 350718a1c200SViacheslav Ovsiienko unsigned int pkts_n, 350818a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 350918a1c200SViacheslav Ovsiienko unsigned int olx) 351018a1c200SViacheslav Ovsiienko { 351118a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 351218a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 351318a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 351418a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 351518a1c200SViacheslav Ovsiienko for (;;) { 351618a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 351718a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 351818a1c200SViacheslav Ovsiienko unsigned int ds, dlen, hlen, ntcp, vlan = 0; 351918a1c200SViacheslav Ovsiienko uint8_t *dptr; 352018a1c200SViacheslav Ovsiienko 352118a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) == 1); 352218a1c200SViacheslav Ovsiienko dlen = rte_pktmbuf_data_len(loc->mbuf); 352318a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 352418a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { 352518a1c200SViacheslav Ovsiienko vlan = sizeof(struct rte_vlan_hdr); 352618a1c200SViacheslav Ovsiienko } 352718a1c200SViacheslav Ovsiienko /* 352818a1c200SViacheslav Ovsiienko * First calculate the WQE size to check 352918a1c200SViacheslav Ovsiienko * whether we have enough space in ring buffer. 353018a1c200SViacheslav Ovsiienko */ 353118a1c200SViacheslav Ovsiienko hlen = loc->mbuf->l2_len + vlan + 353218a1c200SViacheslav Ovsiienko loc->mbuf->l3_len + loc->mbuf->l4_len; 353318a1c200SViacheslav Ovsiienko if (unlikely((!hlen || !loc->mbuf->tso_segsz))) 353418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 353518a1c200SViacheslav Ovsiienko if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK) 353618a1c200SViacheslav Ovsiienko hlen += loc->mbuf->outer_l2_len + 353718a1c200SViacheslav Ovsiienko loc->mbuf->outer_l3_len; 353818a1c200SViacheslav Ovsiienko /* Segment must contain all TSO headers. */ 353918a1c200SViacheslav Ovsiienko if (unlikely(hlen > MLX5_MAX_TSO_HEADER || 354018a1c200SViacheslav Ovsiienko hlen <= MLX5_ESEG_MIN_INLINE_SIZE || 354118a1c200SViacheslav Ovsiienko hlen > (dlen + vlan))) 354218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 354318a1c200SViacheslav Ovsiienko /* 354418a1c200SViacheslav Ovsiienko * Check whether there are enough free WQEBBs: 354518a1c200SViacheslav Ovsiienko * - Control Segment 354618a1c200SViacheslav Ovsiienko * - Ethernet Segment 354718a1c200SViacheslav Ovsiienko * - First Segment of inlined Ethernet data 354818a1c200SViacheslav Ovsiienko * - ... data continued ... 354918a1c200SViacheslav Ovsiienko * - Finishing Data Segment of pointer type 355018a1c200SViacheslav Ovsiienko */ 355118a1c200SViacheslav Ovsiienko ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE + 355218a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 355318a1c200SViacheslav Ovsiienko if (loc->wqe_free < ((ds + 3) / 4)) 355418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 355518a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 355618a1c200SViacheslav Ovsiienko /* Update sent data bytes/packets counters. */ 355718a1c200SViacheslav Ovsiienko ntcp = (dlen + vlan - hlen + 355818a1c200SViacheslav Ovsiienko loc->mbuf->tso_segsz - 1) / 355918a1c200SViacheslav Ovsiienko loc->mbuf->tso_segsz; 356018a1c200SViacheslav Ovsiienko /* 356118a1c200SViacheslav Ovsiienko * One will be added for mbuf itself at the end 356218a1c200SViacheslav Ovsiienko * of the mlx5_tx_burst from loc->pkts_sent field. 356318a1c200SViacheslav Ovsiienko */ 356418a1c200SViacheslav Ovsiienko --ntcp; 356518a1c200SViacheslav Ovsiienko txq->stats.opackets += ntcp; 356618a1c200SViacheslav Ovsiienko txq->stats.obytes += dlen + vlan + ntcp * hlen; 356718a1c200SViacheslav Ovsiienko #endif 356818a1c200SViacheslav Ovsiienko /* 356918a1c200SViacheslav Ovsiienko * Build the TSO WQE: 357018a1c200SViacheslav Ovsiienko * - Control Segment 357118a1c200SViacheslav Ovsiienko * - Ethernet Segment with hlen bytes inlined 357218a1c200SViacheslav Ovsiienko * - Data Segment of pointer type 357318a1c200SViacheslav Ovsiienko */ 357418a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 357518a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 357618a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, ds, 357718a1c200SViacheslav Ovsiienko MLX5_OPCODE_TSO, olx); 357818a1c200SViacheslav Ovsiienko dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx); 357918a1c200SViacheslav Ovsiienko dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan; 358018a1c200SViacheslav Ovsiienko dlen -= hlen - vlan; 358118a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx); 358218a1c200SViacheslav Ovsiienko /* 358318a1c200SViacheslav Ovsiienko * WQE is built, update the loop parameters 358418a1c200SViacheslav Ovsiienko * and go to the next packet. 358518a1c200SViacheslav Ovsiienko */ 358618a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 358718a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 358818a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(INLINE)) 358918a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 359018a1c200SViacheslav Ovsiienko --loc->elts_free; 359118a1c200SViacheslav Ovsiienko ++loc->pkts_sent; 359218a1c200SViacheslav Ovsiienko --pkts_n; 35935a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 35944dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 359518a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) 359618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 359718a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 359818a1c200SViacheslav Ovsiienko if (pkts_n > 1) 359918a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 360018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(MULTI) && 360118a1c200SViacheslav Ovsiienko unlikely(NB_SEGS(loc->mbuf) > 1)) 360218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 360330d87a5eSYongseok Koh if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))) 360418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 360518a1c200SViacheslav Ovsiienko /* Continue with the next TSO packet. */ 360618a1c200SViacheslav Ovsiienko } 360718a1c200SViacheslav Ovsiienko assert(false); 360818a1c200SViacheslav Ovsiienko } 360918a1c200SViacheslav Ovsiienko 361018a1c200SViacheslav Ovsiienko /** 361118a1c200SViacheslav Ovsiienko * Analyze the packet and select the best method to send. 361218a1c200SViacheslav Ovsiienko * 361318a1c200SViacheslav Ovsiienko * @param txq 361418a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 361518a1c200SViacheslav Ovsiienko * @param loc 361618a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 361718a1c200SViacheslav Ovsiienko * @param olx 361818a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 361918a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 362018a1c200SViacheslav Ovsiienko * @param newp 362118a1c200SViacheslav Ovsiienko * The predefined flag whether do complete check for 362218a1c200SViacheslav Ovsiienko * multi-segment packets and TSO. 362318a1c200SViacheslav Ovsiienko * 362418a1c200SViacheslav Ovsiienko * @return 362518a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. 362618a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO. 362718a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND. 362818a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW. 362918a1c200SViacheslav Ovsiienko */ 363018a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 363118a1c200SViacheslav Ovsiienko mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq, 363218a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 363318a1c200SViacheslav Ovsiienko unsigned int olx, 363418a1c200SViacheslav Ovsiienko bool newp) 363518a1c200SViacheslav Ovsiienko { 363618a1c200SViacheslav Ovsiienko /* Check for multi-segment packet. */ 363718a1c200SViacheslav Ovsiienko if (newp && 363818a1c200SViacheslav Ovsiienko MLX5_TXOFF_CONFIG(MULTI) && 363918a1c200SViacheslav Ovsiienko unlikely(NB_SEGS(loc->mbuf) > 1)) 364018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 364118a1c200SViacheslav Ovsiienko /* Check for TSO packet. */ 364218a1c200SViacheslav Ovsiienko if (newp && 364318a1c200SViacheslav Ovsiienko MLX5_TXOFF_CONFIG(TSO) && 364418a1c200SViacheslav Ovsiienko unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) 364518a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_TSO; 364618a1c200SViacheslav Ovsiienko /* Check if eMPW is enabled at all. */ 364718a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(EMPW)) 364818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 364918a1c200SViacheslav Ovsiienko /* Check if eMPW can be engaged. */ 365018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 365118a1c200SViacheslav Ovsiienko unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) && 365218a1c200SViacheslav Ovsiienko (!MLX5_TXOFF_CONFIG(INLINE) || 365318a1c200SViacheslav Ovsiienko unlikely((rte_pktmbuf_data_len(loc->mbuf) + 365418a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) { 365518a1c200SViacheslav Ovsiienko /* 365618a1c200SViacheslav Ovsiienko * eMPW does not support VLAN insertion offload, 365718a1c200SViacheslav Ovsiienko * we have to inline the entire packet but 365818a1c200SViacheslav Ovsiienko * packet is too long for inlining. 365918a1c200SViacheslav Ovsiienko */ 366018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 366118a1c200SViacheslav Ovsiienko } 366218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EMPW; 366318a1c200SViacheslav Ovsiienko } 366418a1c200SViacheslav Ovsiienko 366518a1c200SViacheslav Ovsiienko /** 366618a1c200SViacheslav Ovsiienko * Check the next packet attributes to match with the eMPW batch ones. 366718a1c200SViacheslav Ovsiienko * 366818a1c200SViacheslav Ovsiienko * @param txq 366918a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 367018a1c200SViacheslav Ovsiienko * @param es 367118a1c200SViacheslav Ovsiienko * Pointer to Ethernet Segment of eMPW batch. 367218a1c200SViacheslav Ovsiienko * @param loc 367318a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 367418a1c200SViacheslav Ovsiienko * @param olx 367518a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 367618a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 367718a1c200SViacheslav Ovsiienko * 367818a1c200SViacheslav Ovsiienko * @return 367918a1c200SViacheslav Ovsiienko * true - packet match with eMPW batch attributes. 368018a1c200SViacheslav Ovsiienko * false - no match, eMPW should be restarted. 368118a1c200SViacheslav Ovsiienko */ 368218a1c200SViacheslav Ovsiienko static __rte_always_inline bool 368318a1c200SViacheslav Ovsiienko mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused, 368418a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict es, 368518a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 368618a1c200SViacheslav Ovsiienko unsigned int olx) 368718a1c200SViacheslav Ovsiienko { 368818a1c200SViacheslav Ovsiienko uint8_t swp_flags = 0; 368918a1c200SViacheslav Ovsiienko 369018a1c200SViacheslav Ovsiienko /* Compare the checksum flags, if any. */ 369118a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(CSUM) && 369218a1c200SViacheslav Ovsiienko txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags) 369318a1c200SViacheslav Ovsiienko return false; 369418a1c200SViacheslav Ovsiienko /* Compare the Software Parser offsets and flags. */ 369518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(SWP) && 369618a1c200SViacheslav Ovsiienko (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) || 369718a1c200SViacheslav Ovsiienko es->swp_flags != swp_flags)) 369818a1c200SViacheslav Ovsiienko return false; 369918a1c200SViacheslav Ovsiienko /* Fill metadata field if needed. */ 370018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(METADATA) && 370118a1c200SViacheslav Ovsiienko es->metadata != (loc->mbuf->ol_flags & PKT_TX_METADATA ? 370218a1c200SViacheslav Ovsiienko loc->mbuf->tx_metadata : 0)) 370318a1c200SViacheslav Ovsiienko return false; 370418a1c200SViacheslav Ovsiienko /* There must be no VLAN packets in eMPW loop. */ 370518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN)) 370618a1c200SViacheslav Ovsiienko assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)); 370718a1c200SViacheslav Ovsiienko return true; 370818a1c200SViacheslav Ovsiienko } 370918a1c200SViacheslav Ovsiienko 371018a1c200SViacheslav Ovsiienko /* 371118a1c200SViacheslav Ovsiienko * Update send loop variables and WQE for eMPW loop 371218a1c200SViacheslav Ovsiienko * without data inlining. Number of Data Segments is 371318a1c200SViacheslav Ovsiienko * equal to the number of sent packets. 371418a1c200SViacheslav Ovsiienko * 371518a1c200SViacheslav Ovsiienko * @param txq 371618a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 371718a1c200SViacheslav Ovsiienko * @param loc 371818a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 371918a1c200SViacheslav Ovsiienko * @param ds 372018a1c200SViacheslav Ovsiienko * Number of packets/Data Segments/Packets. 372118a1c200SViacheslav Ovsiienko * @param slen 372218a1c200SViacheslav Ovsiienko * Accumulated statistics, bytes sent 372318a1c200SViacheslav Ovsiienko * @param olx 372418a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 372518a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 372618a1c200SViacheslav Ovsiienko * 372718a1c200SViacheslav Ovsiienko * @return 372818a1c200SViacheslav Ovsiienko * true - packet match with eMPW batch attributes. 372918a1c200SViacheslav Ovsiienko * false - no match, eMPW should be restarted. 373018a1c200SViacheslav Ovsiienko */ 373118a1c200SViacheslav Ovsiienko static __rte_always_inline void 373218a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, 373318a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 373418a1c200SViacheslav Ovsiienko unsigned int ds, 373518a1c200SViacheslav Ovsiienko unsigned int slen, 37365a93e173SViacheslav Ovsiienko unsigned int olx) 373718a1c200SViacheslav Ovsiienko { 373818a1c200SViacheslav Ovsiienko assert(!MLX5_TXOFF_CONFIG(INLINE)); 373918a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 374018a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 374118a1c200SViacheslav Ovsiienko txq->stats.obytes += slen; 374218a1c200SViacheslav Ovsiienko #else 374318a1c200SViacheslav Ovsiienko (void)slen; 374418a1c200SViacheslav Ovsiienko #endif 374518a1c200SViacheslav Ovsiienko loc->elts_free -= ds; 374618a1c200SViacheslav Ovsiienko loc->pkts_sent += ds; 374718a1c200SViacheslav Ovsiienko ds += 2; 374818a1c200SViacheslav Ovsiienko loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); 374918a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 375018a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 37515a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 37524dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 375318a1c200SViacheslav Ovsiienko } 375418a1c200SViacheslav Ovsiienko 375518a1c200SViacheslav Ovsiienko /* 375618a1c200SViacheslav Ovsiienko * Update send loop variables and WQE for eMPW loop 375718a1c200SViacheslav Ovsiienko * with data inlining. Gets the size of pushed descriptors 375818a1c200SViacheslav Ovsiienko * and data to the WQE. 375918a1c200SViacheslav Ovsiienko * 376018a1c200SViacheslav Ovsiienko * @param txq 376118a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 376218a1c200SViacheslav Ovsiienko * @param loc 376318a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 376418a1c200SViacheslav Ovsiienko * @param len 376518a1c200SViacheslav Ovsiienko * Total size of descriptor/data in bytes. 376618a1c200SViacheslav Ovsiienko * @param slen 376718a1c200SViacheslav Ovsiienko * Accumulated statistics, data bytes sent. 376818a1c200SViacheslav Ovsiienko * @param olx 376918a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 377018a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 377118a1c200SViacheslav Ovsiienko * 377218a1c200SViacheslav Ovsiienko * @return 377318a1c200SViacheslav Ovsiienko * true - packet match with eMPW batch attributes. 377418a1c200SViacheslav Ovsiienko * false - no match, eMPW should be restarted. 377518a1c200SViacheslav Ovsiienko */ 377618a1c200SViacheslav Ovsiienko static __rte_always_inline void 377718a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq, 377818a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 377918a1c200SViacheslav Ovsiienko unsigned int len, 378018a1c200SViacheslav Ovsiienko unsigned int slen, 378118a1c200SViacheslav Ovsiienko unsigned int olx __rte_unused) 378218a1c200SViacheslav Ovsiienko { 378318a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 378418a1c200SViacheslav Ovsiienko assert((len % MLX5_WSEG_SIZE) == 0); 378518a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 378618a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 378718a1c200SViacheslav Ovsiienko txq->stats.obytes += slen; 378818a1c200SViacheslav Ovsiienko #else 378918a1c200SViacheslav Ovsiienko (void)slen; 379018a1c200SViacheslav Ovsiienko #endif 379118a1c200SViacheslav Ovsiienko len = len / MLX5_WSEG_SIZE + 2; 379218a1c200SViacheslav Ovsiienko loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len); 379318a1c200SViacheslav Ovsiienko txq->wqe_ci += (len + 3) / 4; 379418a1c200SViacheslav Ovsiienko loc->wqe_free -= (len + 3) / 4; 37955a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 37964dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 379718a1c200SViacheslav Ovsiienko } 379818a1c200SViacheslav Ovsiienko 379918a1c200SViacheslav Ovsiienko /** 380018a1c200SViacheslav Ovsiienko * The set of Tx burst functions for single-segment packets 380118a1c200SViacheslav Ovsiienko * without TSO and with Multi-Packet Writing feature support. 380218a1c200SViacheslav Ovsiienko * Supports all types of Tx offloads, except multi-packets 380318a1c200SViacheslav Ovsiienko * and TSO. 380418a1c200SViacheslav Ovsiienko * 380518a1c200SViacheslav Ovsiienko * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends 380618a1c200SViacheslav Ovsiienko * as many packet per WQE as it can. If eMPW is not configured 380718a1c200SViacheslav Ovsiienko * or packet can not be sent with eMPW (VLAN insertion) the 380818a1c200SViacheslav Ovsiienko * ordinary SEND opcode is used and only one packet placed 380918a1c200SViacheslav Ovsiienko * in WQE. 381018a1c200SViacheslav Ovsiienko * 381118a1c200SViacheslav Ovsiienko * Functions stop sending if it encounters the multi-segment 381218a1c200SViacheslav Ovsiienko * packet or packet with TSO requested. 381318a1c200SViacheslav Ovsiienko * 381418a1c200SViacheslav Ovsiienko * The routines are responsible for storing processed mbuf 381518a1c200SViacheslav Ovsiienko * into elts ring buffer and update elts_head if inlining 381618a1c200SViacheslav Ovsiienko * offload is requested. Otherwise the copying mbufs to elts 381718a1c200SViacheslav Ovsiienko * can be postponed and completed at the end of burst routine. 381818a1c200SViacheslav Ovsiienko * 381918a1c200SViacheslav Ovsiienko * @param txq 382018a1c200SViacheslav Ovsiienko * Pointer to TX queue structure. 382118a1c200SViacheslav Ovsiienko * @param[in] pkts 382218a1c200SViacheslav Ovsiienko * Packets to transmit. 382318a1c200SViacheslav Ovsiienko * @param pkts_n 382418a1c200SViacheslav Ovsiienko * Number of packets in array. 382518a1c200SViacheslav Ovsiienko * @param loc 382618a1c200SViacheslav Ovsiienko * Pointer to burst routine local context. 382718a1c200SViacheslav Ovsiienko * @param olx 382818a1c200SViacheslav Ovsiienko * Configured Tx offloads mask. It is fully defined at 382918a1c200SViacheslav Ovsiienko * compile time and may be used for optimization. 383018a1c200SViacheslav Ovsiienko * 383118a1c200SViacheslav Ovsiienko * @return 383218a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. 383318a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. 383418a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. 383518a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_TSO - TSO packet encountered. 383618a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_SINGLE - used inside functions set. 383718a1c200SViacheslav Ovsiienko * MLX5_TXCMP_CODE_EMPW - used inside functions set. 383818a1c200SViacheslav Ovsiienko * 383918a1c200SViacheslav Ovsiienko * Local context variables updated. 384018a1c200SViacheslav Ovsiienko * 384118a1c200SViacheslav Ovsiienko * 384218a1c200SViacheslav Ovsiienko * The routine sends packets with MLX5_OPCODE_EMPW 384318a1c200SViacheslav Ovsiienko * without inlining, this is dedicated optimized branch. 384418a1c200SViacheslav Ovsiienko * No VLAN insertion is supported. 384518a1c200SViacheslav Ovsiienko */ 384618a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 384718a1c200SViacheslav Ovsiienko mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq, 384818a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 384918a1c200SViacheslav Ovsiienko unsigned int pkts_n, 385018a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 385118a1c200SViacheslav Ovsiienko unsigned int olx) 385218a1c200SViacheslav Ovsiienko { 385318a1c200SViacheslav Ovsiienko /* 385418a1c200SViacheslav Ovsiienko * Subroutine is the part of mlx5_tx_burst_single() 385518a1c200SViacheslav Ovsiienko * and sends single-segment packet with eMPW opcode 385618a1c200SViacheslav Ovsiienko * without data inlining. 385718a1c200SViacheslav Ovsiienko */ 385818a1c200SViacheslav Ovsiienko assert(!MLX5_TXOFF_CONFIG(INLINE)); 385918a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(EMPW)); 386018a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 386118a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 386218a1c200SViacheslav Ovsiienko static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size"); 386318a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 386418a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 386518a1c200SViacheslav Ovsiienko for (;;) { 386618a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 386718a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict eseg; 386818a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 386918a1c200SViacheslav Ovsiienko unsigned int part, loop; 387018a1c200SViacheslav Ovsiienko unsigned int slen = 0; 387118a1c200SViacheslav Ovsiienko 387218a1c200SViacheslav Ovsiienko next_empw: 387318a1c200SViacheslav Ovsiienko part = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS); 387418a1c200SViacheslav Ovsiienko if (unlikely(loc->elts_free < part)) { 387518a1c200SViacheslav Ovsiienko /* We have no enough elts to save all mbufs. */ 387618a1c200SViacheslav Ovsiienko if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS)) 387718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 387818a1c200SViacheslav Ovsiienko /* But we still able to send at least minimal eMPW. */ 387918a1c200SViacheslav Ovsiienko part = loc->elts_free; 388018a1c200SViacheslav Ovsiienko } 388118a1c200SViacheslav Ovsiienko /* Check whether we have enough WQEs */ 388218a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) { 388318a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < 388418a1c200SViacheslav Ovsiienko ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4))) 388518a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 388618a1c200SViacheslav Ovsiienko part = (loc->wqe_free * 4) - 2; 388718a1c200SViacheslav Ovsiienko } 388818a1c200SViacheslav Ovsiienko if (likely(part > 1)) 388918a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 389018a1c200SViacheslav Ovsiienko loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m); 389118a1c200SViacheslav Ovsiienko /* 389218a1c200SViacheslav Ovsiienko * Build eMPW title WQEBB: 389318a1c200SViacheslav Ovsiienko * - Control Segment, eMPW opcode 389418a1c200SViacheslav Ovsiienko * - Ethernet Segment, no inline 389518a1c200SViacheslav Ovsiienko */ 389618a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2, 389718a1c200SViacheslav Ovsiienko MLX5_OPCODE_ENHANCED_MPSW, olx); 389818a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(txq, loc, loc->wqe_last, 389918a1c200SViacheslav Ovsiienko olx & ~MLX5_TXOFF_CONFIG_VLAN); 390018a1c200SViacheslav Ovsiienko eseg = &loc->wqe_last->eseg; 390118a1c200SViacheslav Ovsiienko dseg = &loc->wqe_last->dseg[0]; 390218a1c200SViacheslav Ovsiienko loop = part; 390318a1c200SViacheslav Ovsiienko for (;;) { 390418a1c200SViacheslav Ovsiienko uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf); 390518a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 390618a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 390718a1c200SViacheslav Ovsiienko slen += dlen; 390818a1c200SViacheslav Ovsiienko #endif 390918a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr 391018a1c200SViacheslav Ovsiienko (txq, loc, dseg, 391118a1c200SViacheslav Ovsiienko rte_pktmbuf_mtod(loc->mbuf, uint8_t *), 391218a1c200SViacheslav Ovsiienko dlen, olx); 391318a1c200SViacheslav Ovsiienko if (unlikely(--loop == 0)) 391418a1c200SViacheslav Ovsiienko break; 391518a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 391618a1c200SViacheslav Ovsiienko if (likely(loop > 1)) 391718a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 391818a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, true); 391918a1c200SViacheslav Ovsiienko /* 392018a1c200SViacheslav Ovsiienko * Unroll the completion code to avoid 392118a1c200SViacheslav Ovsiienko * returning variable value - it results in 392218a1c200SViacheslav Ovsiienko * unoptimized sequent checking in caller. 392318a1c200SViacheslav Ovsiienko */ 392418a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_MULTI) { 392518a1c200SViacheslav Ovsiienko part -= loop; 392618a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 392718a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 392818a1c200SViacheslav Ovsiienko !loc->wqe_free)) 392918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 393018a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 393118a1c200SViacheslav Ovsiienko } 393218a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_TSO) { 393318a1c200SViacheslav Ovsiienko part -= loop; 393418a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 393518a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 393618a1c200SViacheslav Ovsiienko !loc->wqe_free)) 393718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 393818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_TSO; 393918a1c200SViacheslav Ovsiienko } 394018a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) { 394118a1c200SViacheslav Ovsiienko part -= loop; 394218a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 394318a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 394418a1c200SViacheslav Ovsiienko !loc->wqe_free)) 394518a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 394618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 394718a1c200SViacheslav Ovsiienko } 394818a1c200SViacheslav Ovsiienko if (ret != MLX5_TXCMP_CODE_EMPW) { 394918a1c200SViacheslav Ovsiienko assert(false); 395018a1c200SViacheslav Ovsiienko part -= loop; 395118a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 395218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 395318a1c200SViacheslav Ovsiienko } 395418a1c200SViacheslav Ovsiienko /* 395518a1c200SViacheslav Ovsiienko * Check whether packet parameters coincide 395618a1c200SViacheslav Ovsiienko * within assumed eMPW batch: 395718a1c200SViacheslav Ovsiienko * - check sum settings 395818a1c200SViacheslav Ovsiienko * - metadata value 395918a1c200SViacheslav Ovsiienko * - software parser settings 396018a1c200SViacheslav Ovsiienko */ 396118a1c200SViacheslav Ovsiienko if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) { 396218a1c200SViacheslav Ovsiienko assert(loop); 396318a1c200SViacheslav Ovsiienko part -= loop; 396418a1c200SViacheslav Ovsiienko mlx5_tx_sdone_empw(txq, loc, part, slen, olx); 396518a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 396618a1c200SViacheslav Ovsiienko !loc->wqe_free)) 396718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 39685a93e173SViacheslav Ovsiienko pkts_n -= part; 396918a1c200SViacheslav Ovsiienko goto next_empw; 397018a1c200SViacheslav Ovsiienko } 397118a1c200SViacheslav Ovsiienko /* Packet attributes match, continue the same eMPW. */ 397218a1c200SViacheslav Ovsiienko ++dseg; 397318a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 397418a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 397518a1c200SViacheslav Ovsiienko } 397618a1c200SViacheslav Ovsiienko /* eMPW is built successfully, update loop parameters. */ 397718a1c200SViacheslav Ovsiienko assert(!loop); 397818a1c200SViacheslav Ovsiienko assert(pkts_n >= part); 397918a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 398018a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 398118a1c200SViacheslav Ovsiienko txq->stats.obytes += slen; 398218a1c200SViacheslav Ovsiienko #endif 398318a1c200SViacheslav Ovsiienko loc->elts_free -= part; 398418a1c200SViacheslav Ovsiienko loc->pkts_sent += part; 398518a1c200SViacheslav Ovsiienko txq->wqe_ci += (2 + part + 3) / 4; 398618a1c200SViacheslav Ovsiienko loc->wqe_free -= (2 + part + 3) / 4; 398718a1c200SViacheslav Ovsiienko pkts_n -= part; 39885a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 39894dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 399018a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) 399118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 399218a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 399318a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, true); 399418a1c200SViacheslav Ovsiienko if (unlikely(ret != MLX5_TXCMP_CODE_EMPW)) 399518a1c200SViacheslav Ovsiienko return ret; 399618a1c200SViacheslav Ovsiienko /* Continue sending eMPW batches. */ 399718a1c200SViacheslav Ovsiienko } 399818a1c200SViacheslav Ovsiienko assert(false); 399918a1c200SViacheslav Ovsiienko } 400018a1c200SViacheslav Ovsiienko 400118a1c200SViacheslav Ovsiienko /** 400218a1c200SViacheslav Ovsiienko * The routine sends packets with MLX5_OPCODE_EMPW 400318a1c200SViacheslav Ovsiienko * with inlining, optionally supports VLAN insertion. 400418a1c200SViacheslav Ovsiienko */ 400518a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 400618a1c200SViacheslav Ovsiienko mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, 400718a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 400818a1c200SViacheslav Ovsiienko unsigned int pkts_n, 400918a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 401018a1c200SViacheslav Ovsiienko unsigned int olx) 401118a1c200SViacheslav Ovsiienko { 401218a1c200SViacheslav Ovsiienko /* 401318a1c200SViacheslav Ovsiienko * Subroutine is the part of mlx5_tx_burst_single() 401418a1c200SViacheslav Ovsiienko * and sends single-segment packet with eMPW opcode 401518a1c200SViacheslav Ovsiienko * with data inlining. 401618a1c200SViacheslav Ovsiienko */ 401718a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 401818a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(EMPW)); 401918a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 402018a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 402118a1c200SViacheslav Ovsiienko static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size"); 402218a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 402318a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 402418a1c200SViacheslav Ovsiienko for (;;) { 402518a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 402618a1c200SViacheslav Ovsiienko struct mlx5_wqe_eseg *restrict eseg; 402718a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 40285a93e173SViacheslav Ovsiienko unsigned int room, part, nlim; 402918a1c200SViacheslav Ovsiienko unsigned int slen = 0; 403018a1c200SViacheslav Ovsiienko 40315a93e173SViacheslav Ovsiienko /* 40325a93e173SViacheslav Ovsiienko * Limits the amount of packets in one WQE 40335a93e173SViacheslav Ovsiienko * to improve CQE latency generation. 40345a93e173SViacheslav Ovsiienko */ 40355a93e173SViacheslav Ovsiienko nlim = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS); 403618a1c200SViacheslav Ovsiienko /* Check whether we have minimal amount WQEs */ 403718a1c200SViacheslav Ovsiienko if (unlikely(loc->wqe_free < 403818a1c200SViacheslav Ovsiienko ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4))) 403918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 404018a1c200SViacheslav Ovsiienko if (likely(pkts_n > 1)) 404118a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 404218a1c200SViacheslav Ovsiienko loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m); 404318a1c200SViacheslav Ovsiienko /* 404418a1c200SViacheslav Ovsiienko * Build eMPW title WQEBB: 404518a1c200SViacheslav Ovsiienko * - Control Segment, eMPW opcode, zero DS 404618a1c200SViacheslav Ovsiienko * - Ethernet Segment, no inline 404718a1c200SViacheslav Ovsiienko */ 404818a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0, 404918a1c200SViacheslav Ovsiienko MLX5_OPCODE_ENHANCED_MPSW, olx); 405018a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(txq, loc, loc->wqe_last, 405118a1c200SViacheslav Ovsiienko olx & ~MLX5_TXOFF_CONFIG_VLAN); 405218a1c200SViacheslav Ovsiienko eseg = &loc->wqe_last->eseg; 405318a1c200SViacheslav Ovsiienko dseg = &loc->wqe_last->dseg[0]; 405418a1c200SViacheslav Ovsiienko room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE, 405518a1c200SViacheslav Ovsiienko loc->wqe_free) * MLX5_WQE_SIZE - 405618a1c200SViacheslav Ovsiienko MLX5_WQE_CSEG_SIZE - 405718a1c200SViacheslav Ovsiienko MLX5_WQE_ESEG_SIZE; 405818a1c200SViacheslav Ovsiienko /* Build WQE till we have space, packets and resources. */ 405918a1c200SViacheslav Ovsiienko part = room; 406018a1c200SViacheslav Ovsiienko for (;;) { 406118a1c200SViacheslav Ovsiienko uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf); 406218a1c200SViacheslav Ovsiienko uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); 406318a1c200SViacheslav Ovsiienko unsigned int tlen; 406418a1c200SViacheslav Ovsiienko 406518a1c200SViacheslav Ovsiienko assert(room >= MLX5_WQE_DSEG_SIZE); 406618a1c200SViacheslav Ovsiienko assert((room % MLX5_WQE_DSEG_SIZE) == 0); 406718a1c200SViacheslav Ovsiienko assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end); 406818a1c200SViacheslav Ovsiienko /* 406918a1c200SViacheslav Ovsiienko * Some Tx offloads may cause an error if 407018a1c200SViacheslav Ovsiienko * packet is not long enough, check against 407118a1c200SViacheslav Ovsiienko * assumed minimal length. 407218a1c200SViacheslav Ovsiienko */ 407318a1c200SViacheslav Ovsiienko if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) { 407418a1c200SViacheslav Ovsiienko part -= room; 407518a1c200SViacheslav Ovsiienko if (unlikely(!part)) 407618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 407718a1c200SViacheslav Ovsiienko /* 407818a1c200SViacheslav Ovsiienko * We have some successfully built 407918a1c200SViacheslav Ovsiienko * packet Data Segments to send. 408018a1c200SViacheslav Ovsiienko */ 408118a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 408218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 408318a1c200SViacheslav Ovsiienko } 408418a1c200SViacheslav Ovsiienko /* Inline or not inline - that's the Question. */ 408518a1c200SViacheslav Ovsiienko if (dlen > txq->inlen_empw) 408618a1c200SViacheslav Ovsiienko goto pointer_empw; 408718a1c200SViacheslav Ovsiienko /* Inline entire packet, optional VLAN insertion. */ 408818a1c200SViacheslav Ovsiienko tlen = sizeof(dseg->bcount) + dlen; 408918a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 409018a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { 409118a1c200SViacheslav Ovsiienko /* 409218a1c200SViacheslav Ovsiienko * The packet length must be checked in 409318a1c200SViacheslav Ovsiienko * mlx5_tx_able_to_empw() and packet 409418a1c200SViacheslav Ovsiienko * fits into inline length guaranteed. 409518a1c200SViacheslav Ovsiienko */ 409618a1c200SViacheslav Ovsiienko assert((dlen + sizeof(struct rte_vlan_hdr)) <= 409718a1c200SViacheslav Ovsiienko txq->inlen_empw); 409818a1c200SViacheslav Ovsiienko tlen += sizeof(struct rte_vlan_hdr); 409918a1c200SViacheslav Ovsiienko if (room < tlen) 410018a1c200SViacheslav Ovsiienko break; 410118a1c200SViacheslav Ovsiienko dseg = mlx5_tx_dseg_vlan(txq, loc, dseg, 410218a1c200SViacheslav Ovsiienko dptr, dlen, olx); 410318a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 410418a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 410518a1c200SViacheslav Ovsiienko slen += sizeof(struct rte_vlan_hdr); 410618a1c200SViacheslav Ovsiienko #endif 410718a1c200SViacheslav Ovsiienko } else { 410818a1c200SViacheslav Ovsiienko if (room < tlen) 410918a1c200SViacheslav Ovsiienko break; 411018a1c200SViacheslav Ovsiienko dseg = mlx5_tx_dseg_empw(txq, loc, dseg, 411118a1c200SViacheslav Ovsiienko dptr, dlen, olx); 411218a1c200SViacheslav Ovsiienko } 411318a1c200SViacheslav Ovsiienko tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE); 411418a1c200SViacheslav Ovsiienko assert(room >= tlen); 411518a1c200SViacheslav Ovsiienko room -= tlen; 411618a1c200SViacheslav Ovsiienko /* 411718a1c200SViacheslav Ovsiienko * Packet data are completely inlined, 411818a1c200SViacheslav Ovsiienko * free the packet immediately. 411918a1c200SViacheslav Ovsiienko */ 412018a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(loc->mbuf); 412118a1c200SViacheslav Ovsiienko goto next_mbuf; 412218a1c200SViacheslav Ovsiienko pointer_empw: 412318a1c200SViacheslav Ovsiienko /* 412418a1c200SViacheslav Ovsiienko * Not inlinable VLAN packets are 412518a1c200SViacheslav Ovsiienko * proceeded outside of this routine. 412618a1c200SViacheslav Ovsiienko */ 412718a1c200SViacheslav Ovsiienko assert(room >= MLX5_WQE_DSEG_SIZE); 412818a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN)) 412918a1c200SViacheslav Ovsiienko assert(!(loc->mbuf->ol_flags & 413018a1c200SViacheslav Ovsiienko PKT_TX_VLAN_PKT)); 413118a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx); 413218a1c200SViacheslav Ovsiienko /* We have to store mbuf in elts.*/ 413318a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; 413418a1c200SViacheslav Ovsiienko room -= MLX5_WQE_DSEG_SIZE; 413518a1c200SViacheslav Ovsiienko /* Ring buffer wraparound is checked at the loop end.*/ 413618a1c200SViacheslav Ovsiienko ++dseg; 413718a1c200SViacheslav Ovsiienko next_mbuf: 413818a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 413918a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 414018a1c200SViacheslav Ovsiienko slen += dlen; 414118a1c200SViacheslav Ovsiienko #endif 414218a1c200SViacheslav Ovsiienko loc->pkts_sent++; 414318a1c200SViacheslav Ovsiienko loc->elts_free--; 414418a1c200SViacheslav Ovsiienko pkts_n--; 414518a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free)) { 414618a1c200SViacheslav Ovsiienko /* 414718a1c200SViacheslav Ovsiienko * We have no resources/packets to 414818a1c200SViacheslav Ovsiienko * continue build descriptors. 414918a1c200SViacheslav Ovsiienko */ 415018a1c200SViacheslav Ovsiienko part -= room; 415118a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 415218a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 415318a1c200SViacheslav Ovsiienko } 415418a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 415518a1c200SViacheslav Ovsiienko if (likely(pkts_n > 1)) 415618a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 415718a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, true); 415818a1c200SViacheslav Ovsiienko /* 415918a1c200SViacheslav Ovsiienko * Unroll the completion code to avoid 416018a1c200SViacheslav Ovsiienko * returning variable value - it results in 416118a1c200SViacheslav Ovsiienko * unoptimized sequent checking in caller. 416218a1c200SViacheslav Ovsiienko */ 416318a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_MULTI) { 416418a1c200SViacheslav Ovsiienko part -= room; 416518a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 416618a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 416718a1c200SViacheslav Ovsiienko !loc->wqe_free)) 416818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 416918a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_MULTI; 417018a1c200SViacheslav Ovsiienko } 417118a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_TSO) { 417218a1c200SViacheslav Ovsiienko part -= room; 417318a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 417418a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 417518a1c200SViacheslav Ovsiienko !loc->wqe_free)) 417618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 417718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_TSO; 417818a1c200SViacheslav Ovsiienko } 417918a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) { 418018a1c200SViacheslav Ovsiienko part -= room; 418118a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 418218a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 418318a1c200SViacheslav Ovsiienko !loc->wqe_free)) 418418a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 418518a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_SINGLE; 418618a1c200SViacheslav Ovsiienko } 418718a1c200SViacheslav Ovsiienko if (ret != MLX5_TXCMP_CODE_EMPW) { 418818a1c200SViacheslav Ovsiienko assert(false); 418918a1c200SViacheslav Ovsiienko part -= room; 419018a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 419118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 419218a1c200SViacheslav Ovsiienko } 41935a93e173SViacheslav Ovsiienko /* Check if we have minimal room left. */ 41945a93e173SViacheslav Ovsiienko nlim--; 41955a93e173SViacheslav Ovsiienko if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE)) 41965a93e173SViacheslav Ovsiienko break; 419718a1c200SViacheslav Ovsiienko /* 419818a1c200SViacheslav Ovsiienko * Check whether packet parameters coincide 419918a1c200SViacheslav Ovsiienko * within assumed eMPW batch: 420018a1c200SViacheslav Ovsiienko * - check sum settings 420118a1c200SViacheslav Ovsiienko * - metadata value 420218a1c200SViacheslav Ovsiienko * - software parser settings 420318a1c200SViacheslav Ovsiienko */ 420418a1c200SViacheslav Ovsiienko if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) 420518a1c200SViacheslav Ovsiienko break; 420618a1c200SViacheslav Ovsiienko /* Packet attributes match, continue the same eMPW. */ 420718a1c200SViacheslav Ovsiienko if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) 420818a1c200SViacheslav Ovsiienko dseg = (struct mlx5_wqe_dseg *)txq->wqes; 420918a1c200SViacheslav Ovsiienko } 421018a1c200SViacheslav Ovsiienko /* 421118a1c200SViacheslav Ovsiienko * We get here to close an existing eMPW 421218a1c200SViacheslav Ovsiienko * session and start the new one. 421318a1c200SViacheslav Ovsiienko */ 421418a1c200SViacheslav Ovsiienko assert(pkts_n); 421518a1c200SViacheslav Ovsiienko part -= room; 421618a1c200SViacheslav Ovsiienko if (unlikely(!part)) 421718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 421818a1c200SViacheslav Ovsiienko mlx5_tx_idone_empw(txq, loc, part, slen, olx); 421918a1c200SViacheslav Ovsiienko if (unlikely(!loc->elts_free || 422018a1c200SViacheslav Ovsiienko !loc->wqe_free)) 422118a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 42225a93e173SViacheslav Ovsiienko /* Continue the loop with new eMPW session. */ 422318a1c200SViacheslav Ovsiienko } 422418a1c200SViacheslav Ovsiienko assert(false); 422518a1c200SViacheslav Ovsiienko } 422618a1c200SViacheslav Ovsiienko 422718a1c200SViacheslav Ovsiienko /** 422818a1c200SViacheslav Ovsiienko * The routine sends packets with ordinary MLX5_OPCODE_SEND. 422918a1c200SViacheslav Ovsiienko * Data inlining and VLAN insertion are supported. 423018a1c200SViacheslav Ovsiienko */ 423118a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 423218a1c200SViacheslav Ovsiienko mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq, 423318a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 423418a1c200SViacheslav Ovsiienko unsigned int pkts_n, 423518a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 423618a1c200SViacheslav Ovsiienko unsigned int olx) 423718a1c200SViacheslav Ovsiienko { 423818a1c200SViacheslav Ovsiienko /* 423918a1c200SViacheslav Ovsiienko * Subroutine is the part of mlx5_tx_burst_single() 424018a1c200SViacheslav Ovsiienko * and sends single-segment packet with SEND opcode. 424118a1c200SViacheslav Ovsiienko */ 424218a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 424318a1c200SViacheslav Ovsiienko assert(pkts_n > loc->pkts_sent); 424418a1c200SViacheslav Ovsiienko pkts += loc->pkts_sent + 1; 424518a1c200SViacheslav Ovsiienko pkts_n -= loc->pkts_sent; 424618a1c200SViacheslav Ovsiienko for (;;) { 424718a1c200SViacheslav Ovsiienko struct mlx5_wqe *restrict wqe; 424818a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 424918a1c200SViacheslav Ovsiienko 425018a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc->mbuf) == 1); 425118a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(INLINE)) { 425218a1c200SViacheslav Ovsiienko unsigned int inlen, vlan = 0; 425318a1c200SViacheslav Ovsiienko 425418a1c200SViacheslav Ovsiienko inlen = rte_pktmbuf_data_len(loc->mbuf); 425518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 425618a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { 425718a1c200SViacheslav Ovsiienko vlan = sizeof(struct rte_vlan_hdr); 425818a1c200SViacheslav Ovsiienko inlen += vlan; 425918a1c200SViacheslav Ovsiienko static_assert((sizeof(struct rte_vlan_hdr) + 426018a1c200SViacheslav Ovsiienko sizeof(struct rte_ether_hdr)) == 426118a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE, 426218a1c200SViacheslav Ovsiienko "invalid min inline data size"); 426318a1c200SViacheslav Ovsiienko } 426418a1c200SViacheslav Ovsiienko /* 426518a1c200SViacheslav Ovsiienko * If inlining is enabled at configuration time 426618a1c200SViacheslav Ovsiienko * the limit must be not less than minimal size. 426718a1c200SViacheslav Ovsiienko * Otherwise we would do extra check for data 426818a1c200SViacheslav Ovsiienko * size to avoid crashes due to length overflow. 426918a1c200SViacheslav Ovsiienko */ 427018a1c200SViacheslav Ovsiienko assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); 427118a1c200SViacheslav Ovsiienko if (inlen <= txq->inlen_send) { 427218a1c200SViacheslav Ovsiienko unsigned int seg_n, wqe_n; 427318a1c200SViacheslav Ovsiienko 427418a1c200SViacheslav Ovsiienko rte_prefetch0(rte_pktmbuf_mtod 427518a1c200SViacheslav Ovsiienko (loc->mbuf, uint8_t *)); 427618a1c200SViacheslav Ovsiienko /* Check against minimal length. */ 427718a1c200SViacheslav Ovsiienko if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE) 427818a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_ERROR; 427918a1c200SViacheslav Ovsiienko /* 428018a1c200SViacheslav Ovsiienko * Completely inlined packet data WQE: 428118a1c200SViacheslav Ovsiienko * - Control Segment, SEND opcode 428218a1c200SViacheslav Ovsiienko * - Ethernet Segment, no VLAN insertion 428318a1c200SViacheslav Ovsiienko * - Data inlined, VLAN optionally inserted 428418a1c200SViacheslav Ovsiienko * - Alignment to MLX5_WSEG_SIZE 428518a1c200SViacheslav Ovsiienko * Have to estimate amount of WQEBBs 428618a1c200SViacheslav Ovsiienko */ 428718a1c200SViacheslav Ovsiienko seg_n = (inlen + 3 * MLX5_WSEG_SIZE - 428818a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE + 428918a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 429018a1c200SViacheslav Ovsiienko /* Check if there are enough WQEBBs. */ 429118a1c200SViacheslav Ovsiienko wqe_n = (seg_n + 3) / 4; 429218a1c200SViacheslav Ovsiienko if (wqe_n > loc->wqe_free) 429318a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 429418a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 429518a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 429618a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, seg_n, 429718a1c200SViacheslav Ovsiienko MLX5_OPCODE_SEND, olx); 429818a1c200SViacheslav Ovsiienko mlx5_tx_eseg_data(txq, loc, wqe, 429918a1c200SViacheslav Ovsiienko vlan, inlen, 0, olx); 430018a1c200SViacheslav Ovsiienko txq->wqe_ci += wqe_n; 430118a1c200SViacheslav Ovsiienko loc->wqe_free -= wqe_n; 430218a1c200SViacheslav Ovsiienko /* 430318a1c200SViacheslav Ovsiienko * Packet data are completely inlined, 430418a1c200SViacheslav Ovsiienko * free the packet immediately. 430518a1c200SViacheslav Ovsiienko */ 430618a1c200SViacheslav Ovsiienko rte_pktmbuf_free_seg(loc->mbuf); 430718a1c200SViacheslav Ovsiienko } else if (!MLX5_TXOFF_CONFIG(EMPW) && 430818a1c200SViacheslav Ovsiienko txq->inlen_mode) { 430918a1c200SViacheslav Ovsiienko /* 431018a1c200SViacheslav Ovsiienko * If minimal inlining is requested the eMPW 431118a1c200SViacheslav Ovsiienko * feature should be disabled due to data is 431218a1c200SViacheslav Ovsiienko * inlined into Ethernet Segment, which can 431318a1c200SViacheslav Ovsiienko * not contain inlined data for eMPW due to 431418a1c200SViacheslav Ovsiienko * segment shared for all packets. 431518a1c200SViacheslav Ovsiienko */ 431618a1c200SViacheslav Ovsiienko struct mlx5_wqe_dseg *restrict dseg; 431718a1c200SViacheslav Ovsiienko unsigned int ds; 431818a1c200SViacheslav Ovsiienko uint8_t *dptr; 431918a1c200SViacheslav Ovsiienko 432018a1c200SViacheslav Ovsiienko /* 432118a1c200SViacheslav Ovsiienko * The inline-mode settings require 432218a1c200SViacheslav Ovsiienko * to inline the specified amount of 432318a1c200SViacheslav Ovsiienko * data bytes to the Ethernet Segment. 432418a1c200SViacheslav Ovsiienko * We should check the free space in 432518a1c200SViacheslav Ovsiienko * WQE ring buffer to inline partially. 432618a1c200SViacheslav Ovsiienko */ 432718a1c200SViacheslav Ovsiienko assert(txq->inlen_send >= txq->inlen_mode); 432818a1c200SViacheslav Ovsiienko assert(inlen > txq->inlen_mode); 432918a1c200SViacheslav Ovsiienko assert(txq->inlen_mode >= 433018a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE); 433118a1c200SViacheslav Ovsiienko /* 433218a1c200SViacheslav Ovsiienko * Check whether there are enough free WQEBBs: 433318a1c200SViacheslav Ovsiienko * - Control Segment 433418a1c200SViacheslav Ovsiienko * - Ethernet Segment 433518a1c200SViacheslav Ovsiienko * - First Segment of inlined Ethernet data 433618a1c200SViacheslav Ovsiienko * - ... data continued ... 433718a1c200SViacheslav Ovsiienko * - Finishing Data Segment of pointer type 433818a1c200SViacheslav Ovsiienko */ 433918a1c200SViacheslav Ovsiienko ds = (MLX5_WQE_CSEG_SIZE + 434018a1c200SViacheslav Ovsiienko MLX5_WQE_ESEG_SIZE + 434118a1c200SViacheslav Ovsiienko MLX5_WQE_DSEG_SIZE + 434218a1c200SViacheslav Ovsiienko txq->inlen_mode - 434318a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE + 434418a1c200SViacheslav Ovsiienko MLX5_WQE_DSEG_SIZE + 434518a1c200SViacheslav Ovsiienko MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; 434618a1c200SViacheslav Ovsiienko if (loc->wqe_free < ((ds + 3) / 4)) 434718a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 434818a1c200SViacheslav Ovsiienko /* 434918a1c200SViacheslav Ovsiienko * Build the ordinary SEND WQE: 435018a1c200SViacheslav Ovsiienko * - Control Segment 435118a1c200SViacheslav Ovsiienko * - Ethernet Segment, inline inlen_mode bytes 435218a1c200SViacheslav Ovsiienko * - Data Segment of pointer type 435318a1c200SViacheslav Ovsiienko */ 435418a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 435518a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 435618a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, ds, 435718a1c200SViacheslav Ovsiienko MLX5_OPCODE_SEND, olx); 435818a1c200SViacheslav Ovsiienko dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, 435918a1c200SViacheslav Ovsiienko txq->inlen_mode, 436018a1c200SViacheslav Ovsiienko 0, olx); 436118a1c200SViacheslav Ovsiienko dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + 436218a1c200SViacheslav Ovsiienko txq->inlen_mode - vlan; 436318a1c200SViacheslav Ovsiienko inlen -= txq->inlen_mode; 436418a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(txq, loc, dseg, 436518a1c200SViacheslav Ovsiienko dptr, inlen, olx); 436618a1c200SViacheslav Ovsiienko /* 436718a1c200SViacheslav Ovsiienko * WQE is built, update the loop parameters 436818a1c200SViacheslav Ovsiienko * and got to the next packet. 436918a1c200SViacheslav Ovsiienko */ 437018a1c200SViacheslav Ovsiienko txq->wqe_ci += (ds + 3) / 4; 437118a1c200SViacheslav Ovsiienko loc->wqe_free -= (ds + 3) / 4; 437218a1c200SViacheslav Ovsiienko /* We have to store mbuf in elts.*/ 437318a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 437418a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = 437518a1c200SViacheslav Ovsiienko loc->mbuf; 437618a1c200SViacheslav Ovsiienko --loc->elts_free; 437718a1c200SViacheslav Ovsiienko } else { 437818a1c200SViacheslav Ovsiienko uint8_t *dptr; 437918a1c200SViacheslav Ovsiienko unsigned int dlen; 438018a1c200SViacheslav Ovsiienko 438118a1c200SViacheslav Ovsiienko /* 438218a1c200SViacheslav Ovsiienko * Partially inlined packet data WQE, we have 438318a1c200SViacheslav Ovsiienko * some space in title WQEBB, we can fill it 438418a1c200SViacheslav Ovsiienko * with some packet data. It takes one WQEBB, 438518a1c200SViacheslav Ovsiienko * it is available, no extra space check: 438618a1c200SViacheslav Ovsiienko * - Control Segment, SEND opcode 438718a1c200SViacheslav Ovsiienko * - Ethernet Segment, no VLAN insertion 438818a1c200SViacheslav Ovsiienko * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data 438918a1c200SViacheslav Ovsiienko * - Data Segment, pointer type 439018a1c200SViacheslav Ovsiienko * 439118a1c200SViacheslav Ovsiienko * We also get here if VLAN insertion is not 439218a1c200SViacheslav Ovsiienko * supported by HW, the inline is enabled. 439318a1c200SViacheslav Ovsiienko */ 439418a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 439518a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 439618a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, 4, 439718a1c200SViacheslav Ovsiienko MLX5_OPCODE_SEND, olx); 439818a1c200SViacheslav Ovsiienko mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx); 439918a1c200SViacheslav Ovsiienko dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + 440018a1c200SViacheslav Ovsiienko MLX5_ESEG_MIN_INLINE_SIZE - vlan; 440118a1c200SViacheslav Ovsiienko /* 440218a1c200SViacheslav Ovsiienko * The length check is performed above, by 440318a1c200SViacheslav Ovsiienko * comparing with txq->inlen_send. We should 440418a1c200SViacheslav Ovsiienko * not get overflow here. 440518a1c200SViacheslav Ovsiienko */ 440618a1c200SViacheslav Ovsiienko assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE); 440718a1c200SViacheslav Ovsiienko dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE; 440818a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1], 440918a1c200SViacheslav Ovsiienko dptr, dlen, olx); 441018a1c200SViacheslav Ovsiienko ++txq->wqe_ci; 441118a1c200SViacheslav Ovsiienko --loc->wqe_free; 441218a1c200SViacheslav Ovsiienko /* We have to store mbuf in elts.*/ 441318a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE)); 441418a1c200SViacheslav Ovsiienko txq->elts[txq->elts_head++ & txq->elts_m] = 441518a1c200SViacheslav Ovsiienko loc->mbuf; 441618a1c200SViacheslav Ovsiienko --loc->elts_free; 441718a1c200SViacheslav Ovsiienko } 441818a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 441918a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 442018a1c200SViacheslav Ovsiienko txq->stats.obytes += vlan + 442118a1c200SViacheslav Ovsiienko rte_pktmbuf_data_len(loc->mbuf); 442218a1c200SViacheslav Ovsiienko #endif 442318a1c200SViacheslav Ovsiienko } else { 442418a1c200SViacheslav Ovsiienko /* 442518a1c200SViacheslav Ovsiienko * No inline at all, it means the CPU cycles saving 442618a1c200SViacheslav Ovsiienko * is prioritized at configuration, we should not 442718a1c200SViacheslav Ovsiienko * copy any packet data to WQE. 442818a1c200SViacheslav Ovsiienko * 442918a1c200SViacheslav Ovsiienko * SEND WQE, one WQEBB: 443018a1c200SViacheslav Ovsiienko * - Control Segment, SEND opcode 443118a1c200SViacheslav Ovsiienko * - Ethernet Segment, optional VLAN, no inline 443218a1c200SViacheslav Ovsiienko * - Data Segment, pointer type 443318a1c200SViacheslav Ovsiienko */ 443418a1c200SViacheslav Ovsiienko wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); 443518a1c200SViacheslav Ovsiienko loc->wqe_last = wqe; 443618a1c200SViacheslav Ovsiienko mlx5_tx_cseg_init(txq, loc, wqe, 3, 443718a1c200SViacheslav Ovsiienko MLX5_OPCODE_SEND, olx); 443818a1c200SViacheslav Ovsiienko mlx5_tx_eseg_none(txq, loc, wqe, olx); 443918a1c200SViacheslav Ovsiienko mlx5_tx_dseg_ptr 444018a1c200SViacheslav Ovsiienko (txq, loc, &wqe->dseg[0], 444118a1c200SViacheslav Ovsiienko rte_pktmbuf_mtod(loc->mbuf, uint8_t *), 444218a1c200SViacheslav Ovsiienko rte_pktmbuf_data_len(loc->mbuf), olx); 444318a1c200SViacheslav Ovsiienko ++txq->wqe_ci; 444418a1c200SViacheslav Ovsiienko --loc->wqe_free; 444518a1c200SViacheslav Ovsiienko /* 444618a1c200SViacheslav Ovsiienko * We should not store mbuf pointer in elts 444718a1c200SViacheslav Ovsiienko * if no inlining is configured, this is done 444818a1c200SViacheslav Ovsiienko * by calling routine in a batch copy. 444918a1c200SViacheslav Ovsiienko */ 445018a1c200SViacheslav Ovsiienko assert(!MLX5_TXOFF_CONFIG(INLINE)); 445118a1c200SViacheslav Ovsiienko --loc->elts_free; 445218a1c200SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 445318a1c200SViacheslav Ovsiienko /* Update sent data bytes counter. */ 445418a1c200SViacheslav Ovsiienko txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf); 445518a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(VLAN) && 445618a1c200SViacheslav Ovsiienko loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) 445718a1c200SViacheslav Ovsiienko txq->stats.obytes += 445818a1c200SViacheslav Ovsiienko sizeof(struct rte_vlan_hdr); 445918a1c200SViacheslav Ovsiienko #endif 446018a1c200SViacheslav Ovsiienko } 446118a1c200SViacheslav Ovsiienko ++loc->pkts_sent; 446218a1c200SViacheslav Ovsiienko --pkts_n; 44635a93e173SViacheslav Ovsiienko /* Request CQE generation if limits are reached. */ 44644dec9c79SViacheslav Ovsiienko mlx5_tx_request_completion(txq, loc, false, olx); 446518a1c200SViacheslav Ovsiienko if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) 446618a1c200SViacheslav Ovsiienko return MLX5_TXCMP_CODE_EXIT; 446718a1c200SViacheslav Ovsiienko loc->mbuf = *pkts++; 446818a1c200SViacheslav Ovsiienko if (pkts_n > 1) 446918a1c200SViacheslav Ovsiienko rte_prefetch0(*pkts); 447018a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, true); 447118a1c200SViacheslav Ovsiienko if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE)) 447218a1c200SViacheslav Ovsiienko return ret; 447318a1c200SViacheslav Ovsiienko } 447418a1c200SViacheslav Ovsiienko assert(false); 447518a1c200SViacheslav Ovsiienko } 447618a1c200SViacheslav Ovsiienko 447718a1c200SViacheslav Ovsiienko static __rte_always_inline enum mlx5_txcmp_code 447818a1c200SViacheslav Ovsiienko mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq, 447918a1c200SViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 448018a1c200SViacheslav Ovsiienko unsigned int pkts_n, 448118a1c200SViacheslav Ovsiienko struct mlx5_txq_local *restrict loc, 448218a1c200SViacheslav Ovsiienko unsigned int olx) 448318a1c200SViacheslav Ovsiienko { 448418a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 448518a1c200SViacheslav Ovsiienko 448618a1c200SViacheslav Ovsiienko ret = mlx5_tx_able_to_empw(txq, loc, olx, false); 448718a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) 448818a1c200SViacheslav Ovsiienko goto ordinary_send; 448918a1c200SViacheslav Ovsiienko assert(ret == MLX5_TXCMP_CODE_EMPW); 449018a1c200SViacheslav Ovsiienko for (;;) { 449118a1c200SViacheslav Ovsiienko /* Optimize for inline/no inline eMPW send. */ 449218a1c200SViacheslav Ovsiienko ret = (MLX5_TXOFF_CONFIG(INLINE)) ? 449318a1c200SViacheslav Ovsiienko mlx5_tx_burst_empw_inline 449418a1c200SViacheslav Ovsiienko (txq, pkts, pkts_n, loc, olx) : 449518a1c200SViacheslav Ovsiienko mlx5_tx_burst_empw_simple 449618a1c200SViacheslav Ovsiienko (txq, pkts, pkts_n, loc, olx); 449718a1c200SViacheslav Ovsiienko if (ret != MLX5_TXCMP_CODE_SINGLE) 449818a1c200SViacheslav Ovsiienko return ret; 449918a1c200SViacheslav Ovsiienko /* The resources to send one packet should remain. */ 450018a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 450118a1c200SViacheslav Ovsiienko ordinary_send: 450218a1c200SViacheslav Ovsiienko ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx); 450318a1c200SViacheslav Ovsiienko assert(ret != MLX5_TXCMP_CODE_SINGLE); 450418a1c200SViacheslav Ovsiienko if (ret != MLX5_TXCMP_CODE_EMPW) 450518a1c200SViacheslav Ovsiienko return ret; 450618a1c200SViacheslav Ovsiienko /* The resources to send one packet should remain. */ 450718a1c200SViacheslav Ovsiienko assert(loc->elts_free && loc->wqe_free); 450818a1c200SViacheslav Ovsiienko } 4509a6bd4911SViacheslav Ovsiienko } 4510a6bd4911SViacheslav Ovsiienko 4511a6bd4911SViacheslav Ovsiienko /** 4512eb8121abSViacheslav Ovsiienko * DPDK Tx callback template. This is configured template 4513eb8121abSViacheslav Ovsiienko * used to generate routines optimized for specified offload setup. 4514eb8121abSViacheslav Ovsiienko * One of this generated functions is chosen at SQ configuration 4515eb8121abSViacheslav Ovsiienko * time. 4516eb8121abSViacheslav Ovsiienko * 4517eb8121abSViacheslav Ovsiienko * @param txq 4518eb8121abSViacheslav Ovsiienko * Generic pointer to TX queue structure. 4519eb8121abSViacheslav Ovsiienko * @param[in] pkts 4520eb8121abSViacheslav Ovsiienko * Packets to transmit. 4521eb8121abSViacheslav Ovsiienko * @param pkts_n 4522eb8121abSViacheslav Ovsiienko * Number of packets in array. 4523eb8121abSViacheslav Ovsiienko * @param olx 4524eb8121abSViacheslav Ovsiienko * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx 4525eb8121abSViacheslav Ovsiienko * values. Should be static to take compile time static configuration 4526eb8121abSViacheslav Ovsiienko * advantages. 4527eb8121abSViacheslav Ovsiienko * 4528eb8121abSViacheslav Ovsiienko * @return 4529eb8121abSViacheslav Ovsiienko * Number of packets successfully transmitted (<= pkts_n). 4530eb8121abSViacheslav Ovsiienko */ 4531eb8121abSViacheslav Ovsiienko static __rte_always_inline uint16_t 4532eb8121abSViacheslav Ovsiienko mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq, 4533eb8121abSViacheslav Ovsiienko struct rte_mbuf **restrict pkts, 4534eb8121abSViacheslav Ovsiienko uint16_t pkts_n, 4535eb8121abSViacheslav Ovsiienko unsigned int olx) 4536eb8121abSViacheslav Ovsiienko { 453718a1c200SViacheslav Ovsiienko struct mlx5_txq_local loc; 453818a1c200SViacheslav Ovsiienko enum mlx5_txcmp_code ret; 453918a1c200SViacheslav Ovsiienko unsigned int part; 454018a1c200SViacheslav Ovsiienko 454118a1c200SViacheslav Ovsiienko assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); 454218a1c200SViacheslav Ovsiienko assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); 4543f32a3f52SViacheslav Ovsiienko if (unlikely(!pkts_n)) 4544f32a3f52SViacheslav Ovsiienko return 0; 4545f32a3f52SViacheslav Ovsiienko loc.pkts_sent = 0; 4546f32a3f52SViacheslav Ovsiienko loc.pkts_copy = 0; 4547f32a3f52SViacheslav Ovsiienko loc.wqe_last = NULL; 4548f32a3f52SViacheslav Ovsiienko 4549f32a3f52SViacheslav Ovsiienko send_loop: 4550f32a3f52SViacheslav Ovsiienko loc.pkts_loop = loc.pkts_sent; 455118a1c200SViacheslav Ovsiienko /* 455218a1c200SViacheslav Ovsiienko * Check if there are some CQEs, if any: 455318a1c200SViacheslav Ovsiienko * - process an encountered errors 455418a1c200SViacheslav Ovsiienko * - process the completed WQEs 455518a1c200SViacheslav Ovsiienko * - free related mbufs 455618a1c200SViacheslav Ovsiienko * - doorbell the NIC about processed CQEs 455718a1c200SViacheslav Ovsiienko */ 4558f32a3f52SViacheslav Ovsiienko rte_prefetch0(*(pkts + loc.pkts_sent)); 455918a1c200SViacheslav Ovsiienko mlx5_tx_handle_completion(txq, olx); 456018a1c200SViacheslav Ovsiienko /* 456118a1c200SViacheslav Ovsiienko * Calculate the number of available resources - elts and WQEs. 456218a1c200SViacheslav Ovsiienko * There are two possible different scenarios: 456318a1c200SViacheslav Ovsiienko * - no data inlining into WQEs, one WQEBB may contains upto 456418a1c200SViacheslav Ovsiienko * four packets, in this case elts become scarce resource 456518a1c200SViacheslav Ovsiienko * - data inlining into WQEs, one packet may require multiple 456618a1c200SViacheslav Ovsiienko * WQEBBs, the WQEs become the limiting factor. 456718a1c200SViacheslav Ovsiienko */ 456818a1c200SViacheslav Ovsiienko assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); 456918a1c200SViacheslav Ovsiienko loc.elts_free = txq->elts_s - 457018a1c200SViacheslav Ovsiienko (uint16_t)(txq->elts_head - txq->elts_tail); 457118a1c200SViacheslav Ovsiienko assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); 457218a1c200SViacheslav Ovsiienko loc.wqe_free = txq->wqe_s - 457318a1c200SViacheslav Ovsiienko (uint16_t)(txq->wqe_ci - txq->wqe_pi); 457418a1c200SViacheslav Ovsiienko if (unlikely(!loc.elts_free || !loc.wqe_free)) 457585125863SViacheslav Ovsiienko goto burst_exit; 457618a1c200SViacheslav Ovsiienko for (;;) { 457718a1c200SViacheslav Ovsiienko /* 457818a1c200SViacheslav Ovsiienko * Fetch the packet from array. Usually this is 457918a1c200SViacheslav Ovsiienko * the first packet in series of multi/single 458018a1c200SViacheslav Ovsiienko * segment packets. 458118a1c200SViacheslav Ovsiienko */ 458218a1c200SViacheslav Ovsiienko loc.mbuf = *(pkts + loc.pkts_sent); 458318a1c200SViacheslav Ovsiienko /* Dedicated branch for multi-segment packets. */ 458418a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(MULTI) && 458518a1c200SViacheslav Ovsiienko unlikely(NB_SEGS(loc.mbuf) > 1)) { 458618a1c200SViacheslav Ovsiienko /* 458718a1c200SViacheslav Ovsiienko * Multi-segment packet encountered. 458818a1c200SViacheslav Ovsiienko * Hardware is able to process it only 458918a1c200SViacheslav Ovsiienko * with SEND/TSO opcodes, one packet 459018a1c200SViacheslav Ovsiienko * per WQE, do it in dedicated routine. 459118a1c200SViacheslav Ovsiienko */ 459218a1c200SViacheslav Ovsiienko enter_send_multi: 459318a1c200SViacheslav Ovsiienko assert(loc.pkts_sent >= loc.pkts_copy); 459418a1c200SViacheslav Ovsiienko part = loc.pkts_sent - loc.pkts_copy; 459518a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(INLINE) && part) { 459618a1c200SViacheslav Ovsiienko /* 459718a1c200SViacheslav Ovsiienko * There are some single-segment mbufs not 459818a1c200SViacheslav Ovsiienko * stored in elts. The mbufs must be in the 459918a1c200SViacheslav Ovsiienko * same order as WQEs, so we must copy the 460018a1c200SViacheslav Ovsiienko * mbufs to elts here, before the coming 460118a1c200SViacheslav Ovsiienko * multi-segment packet mbufs is appended. 460218a1c200SViacheslav Ovsiienko */ 460318a1c200SViacheslav Ovsiienko mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, 460418a1c200SViacheslav Ovsiienko part, olx); 460518a1c200SViacheslav Ovsiienko loc.pkts_copy = loc.pkts_sent; 460618a1c200SViacheslav Ovsiienko } 460718a1c200SViacheslav Ovsiienko assert(pkts_n > loc.pkts_sent); 460818a1c200SViacheslav Ovsiienko ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx); 460918a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(INLINE)) 461018a1c200SViacheslav Ovsiienko loc.pkts_copy = loc.pkts_sent; 461118a1c200SViacheslav Ovsiienko /* 461218a1c200SViacheslav Ovsiienko * These returned code checks are supposed 461318a1c200SViacheslav Ovsiienko * to be optimized out due to routine inlining. 461418a1c200SViacheslav Ovsiienko */ 461518a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_EXIT) { 461618a1c200SViacheslav Ovsiienko /* 461718a1c200SViacheslav Ovsiienko * The routine returns this code when 461818a1c200SViacheslav Ovsiienko * all packets are sent or there is no 461918a1c200SViacheslav Ovsiienko * enough resources to complete request. 462018a1c200SViacheslav Ovsiienko */ 462118a1c200SViacheslav Ovsiienko break; 462218a1c200SViacheslav Ovsiienko } 462318a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_ERROR) { 462418a1c200SViacheslav Ovsiienko /* 462518a1c200SViacheslav Ovsiienko * The routine returns this code when 462618a1c200SViacheslav Ovsiienko * some error in the incoming packets 462718a1c200SViacheslav Ovsiienko * format occurred. 462818a1c200SViacheslav Ovsiienko */ 462918a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 463018a1c200SViacheslav Ovsiienko break; 463118a1c200SViacheslav Ovsiienko } 463218a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) { 463318a1c200SViacheslav Ovsiienko /* 463418a1c200SViacheslav Ovsiienko * The single-segment packet was encountered 463518a1c200SViacheslav Ovsiienko * in the array, try to send it with the 463618a1c200SViacheslav Ovsiienko * best optimized way, possible engaging eMPW. 463718a1c200SViacheslav Ovsiienko */ 463818a1c200SViacheslav Ovsiienko goto enter_send_single; 463918a1c200SViacheslav Ovsiienko } 464018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 464118a1c200SViacheslav Ovsiienko ret == MLX5_TXCMP_CODE_TSO) { 464218a1c200SViacheslav Ovsiienko /* 464318a1c200SViacheslav Ovsiienko * The single-segment TSO packet was 464418a1c200SViacheslav Ovsiienko * encountered in the array. 464518a1c200SViacheslav Ovsiienko */ 464618a1c200SViacheslav Ovsiienko goto enter_send_tso; 464718a1c200SViacheslav Ovsiienko } 464818a1c200SViacheslav Ovsiienko /* We must not get here. Something is going wrong. */ 464918a1c200SViacheslav Ovsiienko assert(false); 465018a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 465118a1c200SViacheslav Ovsiienko break; 465218a1c200SViacheslav Ovsiienko } 465318a1c200SViacheslav Ovsiienko /* Dedicated branch for single-segment TSO packets. */ 465418a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 465518a1c200SViacheslav Ovsiienko unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) { 465618a1c200SViacheslav Ovsiienko /* 465718a1c200SViacheslav Ovsiienko * TSO might require special way for inlining 465818a1c200SViacheslav Ovsiienko * (dedicated parameters) and is sent with 465918a1c200SViacheslav Ovsiienko * MLX5_OPCODE_TSO opcode only, provide this 466018a1c200SViacheslav Ovsiienko * in dedicated branch. 466118a1c200SViacheslav Ovsiienko */ 466218a1c200SViacheslav Ovsiienko enter_send_tso: 466318a1c200SViacheslav Ovsiienko assert(NB_SEGS(loc.mbuf) == 1); 466418a1c200SViacheslav Ovsiienko assert(pkts_n > loc.pkts_sent); 466518a1c200SViacheslav Ovsiienko ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx); 466618a1c200SViacheslav Ovsiienko /* 466718a1c200SViacheslav Ovsiienko * These returned code checks are supposed 466818a1c200SViacheslav Ovsiienko * to be optimized out due to routine inlining. 466918a1c200SViacheslav Ovsiienko */ 467018a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_EXIT) 467118a1c200SViacheslav Ovsiienko break; 467218a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_ERROR) { 467318a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 467418a1c200SViacheslav Ovsiienko break; 467518a1c200SViacheslav Ovsiienko } 467618a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_SINGLE) 467718a1c200SViacheslav Ovsiienko goto enter_send_single; 467818a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(MULTI) && 467918a1c200SViacheslav Ovsiienko ret == MLX5_TXCMP_CODE_MULTI) { 468018a1c200SViacheslav Ovsiienko /* 468118a1c200SViacheslav Ovsiienko * The multi-segment packet was 468218a1c200SViacheslav Ovsiienko * encountered in the array. 468318a1c200SViacheslav Ovsiienko */ 468418a1c200SViacheslav Ovsiienko goto enter_send_multi; 468518a1c200SViacheslav Ovsiienko } 468618a1c200SViacheslav Ovsiienko /* We must not get here. Something is going wrong. */ 468718a1c200SViacheslav Ovsiienko assert(false); 468818a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 468918a1c200SViacheslav Ovsiienko break; 469018a1c200SViacheslav Ovsiienko } 469118a1c200SViacheslav Ovsiienko /* 469218a1c200SViacheslav Ovsiienko * The dedicated branch for the single-segment packets 469318a1c200SViacheslav Ovsiienko * without TSO. Often these ones can be sent using 469418a1c200SViacheslav Ovsiienko * MLX5_OPCODE_EMPW with multiple packets in one WQE. 469518a1c200SViacheslav Ovsiienko * The routine builds the WQEs till it encounters 469618a1c200SViacheslav Ovsiienko * the TSO or multi-segment packet (in case if these 469718a1c200SViacheslav Ovsiienko * offloads are requested at SQ configuration time). 469818a1c200SViacheslav Ovsiienko */ 469918a1c200SViacheslav Ovsiienko enter_send_single: 470018a1c200SViacheslav Ovsiienko assert(pkts_n > loc.pkts_sent); 470118a1c200SViacheslav Ovsiienko ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx); 470218a1c200SViacheslav Ovsiienko /* 470318a1c200SViacheslav Ovsiienko * These returned code checks are supposed 470418a1c200SViacheslav Ovsiienko * to be optimized out due to routine inlining. 470518a1c200SViacheslav Ovsiienko */ 470618a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_EXIT) 470718a1c200SViacheslav Ovsiienko break; 470818a1c200SViacheslav Ovsiienko if (ret == MLX5_TXCMP_CODE_ERROR) { 470918a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 471018a1c200SViacheslav Ovsiienko break; 471118a1c200SViacheslav Ovsiienko } 471218a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(MULTI) && 471318a1c200SViacheslav Ovsiienko ret == MLX5_TXCMP_CODE_MULTI) { 471418a1c200SViacheslav Ovsiienko /* 471518a1c200SViacheslav Ovsiienko * The multi-segment packet was 471618a1c200SViacheslav Ovsiienko * encountered in the array. 471718a1c200SViacheslav Ovsiienko */ 471818a1c200SViacheslav Ovsiienko goto enter_send_multi; 471918a1c200SViacheslav Ovsiienko } 472018a1c200SViacheslav Ovsiienko if (MLX5_TXOFF_CONFIG(TSO) && 472118a1c200SViacheslav Ovsiienko ret == MLX5_TXCMP_CODE_TSO) { 472218a1c200SViacheslav Ovsiienko /* 472318a1c200SViacheslav Ovsiienko * The single-segment TSO packet was 472418a1c200SViacheslav Ovsiienko * encountered in the array. 472518a1c200SViacheslav Ovsiienko */ 472618a1c200SViacheslav Ovsiienko goto enter_send_tso; 472718a1c200SViacheslav Ovsiienko } 472818a1c200SViacheslav Ovsiienko /* We must not get here. Something is going wrong. */ 472918a1c200SViacheslav Ovsiienko assert(false); 473018a1c200SViacheslav Ovsiienko txq->stats.oerrors++; 473118a1c200SViacheslav Ovsiienko break; 473218a1c200SViacheslav Ovsiienko } 473318a1c200SViacheslav Ovsiienko /* 473418a1c200SViacheslav Ovsiienko * Main Tx loop is completed, do the rest: 473518a1c200SViacheslav Ovsiienko * - set completion request if thresholds are reached 473618a1c200SViacheslav Ovsiienko * - doorbell the hardware 473718a1c200SViacheslav Ovsiienko * - copy the rest of mbufs to elts (if any) 473818a1c200SViacheslav Ovsiienko */ 473918a1c200SViacheslav Ovsiienko assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy); 474018a1c200SViacheslav Ovsiienko /* Take a shortcut if nothing is sent. */ 4741f32a3f52SViacheslav Ovsiienko if (unlikely(loc.pkts_sent == loc.pkts_loop)) 474285125863SViacheslav Ovsiienko goto burst_exit; 474318a1c200SViacheslav Ovsiienko /* 474418a1c200SViacheslav Ovsiienko * Ring QP doorbell immediately after WQE building completion 474518a1c200SViacheslav Ovsiienko * to improve latencies. The pure software related data treatment 474618a1c200SViacheslav Ovsiienko * can be completed after doorbell. Tx CQEs for this SQ are 474718a1c200SViacheslav Ovsiienko * processed in this thread only by the polling. 474818a1c200SViacheslav Ovsiienko */ 474918a1c200SViacheslav Ovsiienko mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0); 47505a93e173SViacheslav Ovsiienko /* Not all of the mbufs may be stored into elts yet. */ 4751f32a3f52SViacheslav Ovsiienko part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy; 475218a1c200SViacheslav Ovsiienko if (!MLX5_TXOFF_CONFIG(INLINE) && part) { 475318a1c200SViacheslav Ovsiienko /* 475418a1c200SViacheslav Ovsiienko * There are some single-segment mbufs not stored in elts. 47555a93e173SViacheslav Ovsiienko * It can be only if the last packet was single-segment. 475618a1c200SViacheslav Ovsiienko * The copying is gathered into one place due to it is 475718a1c200SViacheslav Ovsiienko * a good opportunity to optimize that with SIMD. 475818a1c200SViacheslav Ovsiienko * Unfortunately if inlining is enabled the gaps in 475918a1c200SViacheslav Ovsiienko * pointer array may happen due to early freeing of the 476018a1c200SViacheslav Ovsiienko * inlined mbufs. 476118a1c200SViacheslav Ovsiienko */ 476218a1c200SViacheslav Ovsiienko mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx); 4763f32a3f52SViacheslav Ovsiienko loc.pkts_copy = loc.pkts_sent; 476418a1c200SViacheslav Ovsiienko } 476518a1c200SViacheslav Ovsiienko assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); 476618a1c200SViacheslav Ovsiienko assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); 4767f32a3f52SViacheslav Ovsiienko if (pkts_n > loc.pkts_sent) { 4768f32a3f52SViacheslav Ovsiienko /* 4769f32a3f52SViacheslav Ovsiienko * If burst size is large there might be no enough CQE 4770f32a3f52SViacheslav Ovsiienko * fetched from completion queue and no enough resources 4771f32a3f52SViacheslav Ovsiienko * freed to send all the packets. 4772f32a3f52SViacheslav Ovsiienko */ 4773f32a3f52SViacheslav Ovsiienko goto send_loop; 4774f32a3f52SViacheslav Ovsiienko } 477585125863SViacheslav Ovsiienko burst_exit: 477685125863SViacheslav Ovsiienko #ifdef MLX5_PMD_SOFT_COUNTERS 477785125863SViacheslav Ovsiienko /* Increment sent packets counter. */ 477885125863SViacheslav Ovsiienko txq->stats.opackets += loc.pkts_sent; 477985125863SViacheslav Ovsiienko #endif 478018a1c200SViacheslav Ovsiienko return loc.pkts_sent; 4781eb8121abSViacheslav Ovsiienko } 4782eb8121abSViacheslav Ovsiienko 4783eb8121abSViacheslav Ovsiienko /* Generate routines with Enhanced Multi-Packet Write support. */ 4784eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(full_empw, 4785eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW) 4786eb8121abSViacheslav Ovsiienko 4787eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(none_empw, 4788eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW) 4789eb8121abSViacheslav Ovsiienko 4790eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(md_empw, 4791eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4792eb8121abSViacheslav Ovsiienko 4793eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mt_empw, 4794eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4795eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4796eb8121abSViacheslav Ovsiienko 4797eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtsc_empw, 4798eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4799eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4800eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4801eb8121abSViacheslav Ovsiienko 4802eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mti_empw, 4803eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4804eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4805eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4806eb8121abSViacheslav Ovsiienko 4807eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtv_empw, 4808eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4809eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4810eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4811eb8121abSViacheslav Ovsiienko 4812eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtiv_empw, 4813eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4814eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4815eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4816eb8121abSViacheslav Ovsiienko 4817eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sc_empw, 4818eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4819eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4820eb8121abSViacheslav Ovsiienko 4821eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sci_empw, 4822eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4823eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4824eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4825eb8121abSViacheslav Ovsiienko 4826eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(scv_empw, 4827eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4828eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4829eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4830eb8121abSViacheslav Ovsiienko 4831eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sciv_empw, 4832eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4833eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4834eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4835eb8121abSViacheslav Ovsiienko 4836eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(i_empw, 4837eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4838eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4839eb8121abSViacheslav Ovsiienko 4840eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(v_empw, 4841eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4842eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4843eb8121abSViacheslav Ovsiienko 4844eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(iv_empw, 4845eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4846eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4847eb8121abSViacheslav Ovsiienko 4848eb8121abSViacheslav Ovsiienko /* Generate routines without Enhanced Multi-Packet Write support. */ 4849eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(full, 4850eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_FULL) 4851eb8121abSViacheslav Ovsiienko 4852eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(none, 4853eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_NONE) 4854eb8121abSViacheslav Ovsiienko 4855eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(md, 4856eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4857eb8121abSViacheslav Ovsiienko 4858eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mt, 4859eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4860eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4861eb8121abSViacheslav Ovsiienko 4862eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtsc, 4863eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4864eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4865eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4866eb8121abSViacheslav Ovsiienko 4867eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mti, 4868eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4869eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4870eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4871eb8121abSViacheslav Ovsiienko 4872eb8121abSViacheslav Ovsiienko 4873eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtv, 4874eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4875eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4876eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4877eb8121abSViacheslav Ovsiienko 4878eb8121abSViacheslav Ovsiienko 4879eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(mtiv, 4880eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4881eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4882eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4883eb8121abSViacheslav Ovsiienko 4884eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sc, 4885eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4886eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4887eb8121abSViacheslav Ovsiienko 4888eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sci, 4889eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4890eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4891eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4892eb8121abSViacheslav Ovsiienko 4893eb8121abSViacheslav Ovsiienko 4894eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(scv, 4895eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4896eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4897eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4898eb8121abSViacheslav Ovsiienko 4899eb8121abSViacheslav Ovsiienko 4900eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(sciv, 4901eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4902eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4903eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4904eb8121abSViacheslav Ovsiienko 4905eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(i, 4906eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4907eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4908eb8121abSViacheslav Ovsiienko 4909eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(v, 4910eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4911eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4912eb8121abSViacheslav Ovsiienko 4913eb8121abSViacheslav Ovsiienko MLX5_TXOFF_DECL(iv, 4914eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4915eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4916eb8121abSViacheslav Ovsiienko 4917eb8121abSViacheslav Ovsiienko /* 4918eb8121abSViacheslav Ovsiienko * Array of declared and compiled Tx burst function and corresponding 4919eb8121abSViacheslav Ovsiienko * supported offloads set. The array is used to select the Tx burst 4920eb8121abSViacheslav Ovsiienko * function for specified offloads set at Tx queue configuration time. 4921eb8121abSViacheslav Ovsiienko */ 4922eb8121abSViacheslav Ovsiienko const struct { 4923eb8121abSViacheslav Ovsiienko eth_tx_burst_t func; 4924eb8121abSViacheslav Ovsiienko unsigned int olx; 4925eb8121abSViacheslav Ovsiienko } txoff_func[] = { 4926eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(full_empw, 4927eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4928eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4929eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4930eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4931eb8121abSViacheslav Ovsiienko 4932eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(none_empw, 4933eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW) 4934eb8121abSViacheslav Ovsiienko 4935eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(md_empw, 4936eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4937eb8121abSViacheslav Ovsiienko 4938eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mt_empw, 4939eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4940eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4941eb8121abSViacheslav Ovsiienko 4942eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtsc_empw, 4943eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4944eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4945eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4946eb8121abSViacheslav Ovsiienko 4947eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mti_empw, 4948eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4949eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4950eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4951eb8121abSViacheslav Ovsiienko 4952eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtv_empw, 4953eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4954eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4955eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4956eb8121abSViacheslav Ovsiienko 4957eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtiv_empw, 4958eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4959eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4960eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4961eb8121abSViacheslav Ovsiienko 4962eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sc_empw, 4963eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4964eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4965eb8121abSViacheslav Ovsiienko 4966eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sci_empw, 4967eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4968eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4969eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4970eb8121abSViacheslav Ovsiienko 4971eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(scv_empw, 4972eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4973eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4974eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4975eb8121abSViacheslav Ovsiienko 4976eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sciv_empw, 4977eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4978eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4979eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4980eb8121abSViacheslav Ovsiienko 4981eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(i_empw, 4982eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 4983eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4984eb8121abSViacheslav Ovsiienko 4985eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(v_empw, 4986eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 4987eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4988eb8121abSViacheslav Ovsiienko 4989eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(iv_empw, 4990eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4991eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) 4992eb8121abSViacheslav Ovsiienko 4993eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(full, 4994eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 4995eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 4996eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 4997eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 4998eb8121abSViacheslav Ovsiienko 4999eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(none, 5000eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_NONE) 5001eb8121abSViacheslav Ovsiienko 5002eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(md, 5003eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5004eb8121abSViacheslav Ovsiienko 5005eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mt, 5006eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5007eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5008eb8121abSViacheslav Ovsiienko 5009eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtsc, 5010eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5011eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5012eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5013eb8121abSViacheslav Ovsiienko 5014eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mti, 5015eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5016eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 5017eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5018eb8121abSViacheslav Ovsiienko 5019eb8121abSViacheslav Ovsiienko 5020eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtv, 5021eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5022eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 5023eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5024eb8121abSViacheslav Ovsiienko 5025eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(mtiv, 5026eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | 5027eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 5028eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5029eb8121abSViacheslav Ovsiienko 5030eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sc, 5031eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5032eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5033eb8121abSViacheslav Ovsiienko 5034eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sci, 5035eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5036eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 5037eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5038eb8121abSViacheslav Ovsiienko 5039eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(scv, 5040eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5041eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 5042eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5043eb8121abSViacheslav Ovsiienko 5044eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(sciv, 5045eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | 5046eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 5047eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5048eb8121abSViacheslav Ovsiienko 5049eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(i, 5050eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | 5051eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5052eb8121abSViacheslav Ovsiienko 5053eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(v, 5054eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_VLAN | 5055eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5056eb8121abSViacheslav Ovsiienko 5057eb8121abSViacheslav Ovsiienko MLX5_TXOFF_INFO(iv, 5058eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | 5059eb8121abSViacheslav Ovsiienko MLX5_TXOFF_CONFIG_METADATA) 5060eb8121abSViacheslav Ovsiienko }; 5061eb8121abSViacheslav Ovsiienko 5062eb8121abSViacheslav Ovsiienko /** 5063eb8121abSViacheslav Ovsiienko * Configure the Tx function to use. The routine checks configured 5064eb8121abSViacheslav Ovsiienko * Tx offloads for the device and selects appropriate Tx burst 5065eb8121abSViacheslav Ovsiienko * routine. There are multiple Tx burst routines compiled from 5066eb8121abSViacheslav Ovsiienko * the same template in the most optimal way for the dedicated 5067eb8121abSViacheslav Ovsiienko * Tx offloads set. 5068a6bd4911SViacheslav Ovsiienko * 5069a6bd4911SViacheslav Ovsiienko * @param dev 5070a6bd4911SViacheslav Ovsiienko * Pointer to private data structure. 5071a6bd4911SViacheslav Ovsiienko * 5072a6bd4911SViacheslav Ovsiienko * @return 5073a6bd4911SViacheslav Ovsiienko * Pointer to selected Tx burst function. 5074a6bd4911SViacheslav Ovsiienko */ 5075a6bd4911SViacheslav Ovsiienko eth_tx_burst_t 5076a6bd4911SViacheslav Ovsiienko mlx5_select_tx_function(struct rte_eth_dev *dev) 5077a6bd4911SViacheslav Ovsiienko { 5078eb8121abSViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 5079eb8121abSViacheslav Ovsiienko struct mlx5_dev_config *config = &priv->config; 5080eb8121abSViacheslav Ovsiienko uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 5081eb8121abSViacheslav Ovsiienko unsigned int diff = 0, olx = 0, i, m; 5082eb8121abSViacheslav Ovsiienko 5083eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <= 5084eb8121abSViacheslav Ovsiienko MLX5_DSEG_MAX, "invalid WQE max size"); 5085eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE, 5086eb8121abSViacheslav Ovsiienko "invalid WQE Control Segment size"); 5087eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE, 5088eb8121abSViacheslav Ovsiienko "invalid WQE Ethernet Segment size"); 5089eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE, 5090eb8121abSViacheslav Ovsiienko "invalid WQE Data Segment size"); 5091eb8121abSViacheslav Ovsiienko static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE, 5092eb8121abSViacheslav Ovsiienko "invalid WQE size"); 5093eb8121abSViacheslav Ovsiienko assert(priv); 5094eb8121abSViacheslav Ovsiienko if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) { 5095eb8121abSViacheslav Ovsiienko /* We should support Multi-Segment Packets. */ 5096eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_MULTI; 5097eb8121abSViacheslav Ovsiienko } 5098eb8121abSViacheslav Ovsiienko if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | 5099eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 5100eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_GRE_TNL_TSO | 5101eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_IP_TNL_TSO | 5102eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_UDP_TNL_TSO)) { 5103eb8121abSViacheslav Ovsiienko /* We should support TCP Send Offload. */ 5104eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_TSO; 5105eb8121abSViacheslav Ovsiienko } 5106eb8121abSViacheslav Ovsiienko if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | 5107eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_UDP_TNL_TSO | 5108eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { 5109eb8121abSViacheslav Ovsiienko /* We should support Software Parser for Tunnels. */ 5110eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_SWP; 5111eb8121abSViacheslav Ovsiienko } 5112eb8121abSViacheslav Ovsiienko if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | 5113eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_UDP_CKSUM | 5114eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_TCP_CKSUM | 5115eb8121abSViacheslav Ovsiienko DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { 5116eb8121abSViacheslav Ovsiienko /* We should support IP/TCP/UDP Checksums. */ 5117eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_CSUM; 5118eb8121abSViacheslav Ovsiienko } 5119eb8121abSViacheslav Ovsiienko if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) { 5120eb8121abSViacheslav Ovsiienko /* We should support VLAN insertion. */ 5121eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_VLAN; 5122eb8121abSViacheslav Ovsiienko } 5123eb8121abSViacheslav Ovsiienko if (priv->txqs_n && (*priv->txqs)[0]) { 5124eb8121abSViacheslav Ovsiienko struct mlx5_txq_data *txd = (*priv->txqs)[0]; 5125eb8121abSViacheslav Ovsiienko 5126eb8121abSViacheslav Ovsiienko if (txd->inlen_send) { 5127eb8121abSViacheslav Ovsiienko /* 5128eb8121abSViacheslav Ovsiienko * Check the data inline requirements. Data inline 5129eb8121abSViacheslav Ovsiienko * is enabled on per device basis, we can check 5130eb8121abSViacheslav Ovsiienko * the first Tx queue only. 5131eb8121abSViacheslav Ovsiienko * 5132eb8121abSViacheslav Ovsiienko * If device does not support VLAN insertion in WQE 5133eb8121abSViacheslav Ovsiienko * and some queues are requested to perform VLAN 5134eb8121abSViacheslav Ovsiienko * insertion offload than inline must be enabled. 5135eb8121abSViacheslav Ovsiienko */ 5136eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_INLINE; 5137eb8121abSViacheslav Ovsiienko } 5138eb8121abSViacheslav Ovsiienko } 5139eb8121abSViacheslav Ovsiienko if (config->mps == MLX5_MPW_ENHANCED && 5140eb8121abSViacheslav Ovsiienko config->txq_inline_min <= 0) { 5141eb8121abSViacheslav Ovsiienko /* 5142eb8121abSViacheslav Ovsiienko * The NIC supports Enhanced Multi-Packet Write. 5143eb8121abSViacheslav Ovsiienko * We do not support legacy MPW due to its 5144eb8121abSViacheslav Ovsiienko * hardware related problems, so we just ignore 5145eb8121abSViacheslav Ovsiienko * legacy MLX5_MPW settings. There should be no 5146eb8121abSViacheslav Ovsiienko * minimal required inline data. 5147eb8121abSViacheslav Ovsiienko */ 5148eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_EMPW; 5149eb8121abSViacheslav Ovsiienko } 5150eb8121abSViacheslav Ovsiienko if (tx_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) { 5151eb8121abSViacheslav Ovsiienko /* We should support Flow metadata. */ 5152eb8121abSViacheslav Ovsiienko olx |= MLX5_TXOFF_CONFIG_METADATA; 5153eb8121abSViacheslav Ovsiienko } 5154eb8121abSViacheslav Ovsiienko /* 5155eb8121abSViacheslav Ovsiienko * Scan the routines table to find the minimal 5156eb8121abSViacheslav Ovsiienko * satisfying routine with requested offloads. 5157eb8121abSViacheslav Ovsiienko */ 5158eb8121abSViacheslav Ovsiienko m = RTE_DIM(txoff_func); 5159eb8121abSViacheslav Ovsiienko for (i = 0; i < RTE_DIM(txoff_func); i++) { 5160eb8121abSViacheslav Ovsiienko unsigned int tmp; 5161eb8121abSViacheslav Ovsiienko 5162eb8121abSViacheslav Ovsiienko tmp = txoff_func[i].olx; 5163eb8121abSViacheslav Ovsiienko if (tmp == olx) { 5164eb8121abSViacheslav Ovsiienko /* Meets requested offloads exactly.*/ 5165eb8121abSViacheslav Ovsiienko m = i; 5166eb8121abSViacheslav Ovsiienko break; 5167eb8121abSViacheslav Ovsiienko } 5168eb8121abSViacheslav Ovsiienko if ((tmp & olx) != olx) { 5169eb8121abSViacheslav Ovsiienko /* Does not meet requested offloads at all. */ 5170eb8121abSViacheslav Ovsiienko continue; 5171eb8121abSViacheslav Ovsiienko } 5172eb8121abSViacheslav Ovsiienko if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW) 5173eb8121abSViacheslav Ovsiienko /* Do not enable eMPW if not configured. */ 5174eb8121abSViacheslav Ovsiienko continue; 5175eb8121abSViacheslav Ovsiienko if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE) 5176eb8121abSViacheslav Ovsiienko /* Do not enable inlining if not configured. */ 5177eb8121abSViacheslav Ovsiienko continue; 5178eb8121abSViacheslav Ovsiienko /* 5179eb8121abSViacheslav Ovsiienko * Some routine meets the requirements. 5180eb8121abSViacheslav Ovsiienko * Check whether it has minimal amount 5181eb8121abSViacheslav Ovsiienko * of not requested offloads. 5182eb8121abSViacheslav Ovsiienko */ 5183eb8121abSViacheslav Ovsiienko tmp = __builtin_popcountl(tmp & ~olx); 5184eb8121abSViacheslav Ovsiienko if (m >= RTE_DIM(txoff_func) || tmp < diff) { 5185eb8121abSViacheslav Ovsiienko /* First or better match, save and continue. */ 5186eb8121abSViacheslav Ovsiienko m = i; 5187eb8121abSViacheslav Ovsiienko diff = tmp; 5188eb8121abSViacheslav Ovsiienko continue; 5189eb8121abSViacheslav Ovsiienko } 5190eb8121abSViacheslav Ovsiienko if (tmp == diff) { 5191eb8121abSViacheslav Ovsiienko tmp = txoff_func[i].olx ^ txoff_func[m].olx; 5192eb8121abSViacheslav Ovsiienko if (__builtin_ffsl(txoff_func[i].olx & ~tmp) < 5193eb8121abSViacheslav Ovsiienko __builtin_ffsl(txoff_func[m].olx & ~tmp)) { 5194eb8121abSViacheslav Ovsiienko /* Lighter not requested offload. */ 5195eb8121abSViacheslav Ovsiienko m = i; 5196eb8121abSViacheslav Ovsiienko } 5197eb8121abSViacheslav Ovsiienko } 5198eb8121abSViacheslav Ovsiienko } 5199eb8121abSViacheslav Ovsiienko if (m >= RTE_DIM(txoff_func)) { 5200eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "port %u has no selected Tx function" 5201eb8121abSViacheslav Ovsiienko " for requested offloads %04X", 5202eb8121abSViacheslav Ovsiienko dev->data->port_id, olx); 5203eb8121abSViacheslav Ovsiienko return NULL; 5204eb8121abSViacheslav Ovsiienko } 5205eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "port %u has selected Tx function" 5206eb8121abSViacheslav Ovsiienko " supporting offloads %04X/%04X", 5207eb8121abSViacheslav Ovsiienko dev->data->port_id, olx, txoff_func[m].olx); 5208eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI) 5209eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tMULTI (multi segment)"); 5210eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO) 5211eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tTSO (TCP send offload)"); 5212eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP) 5213eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tSWP (software parser)"); 5214eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM) 5215eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tCSUM (checksum offload)"); 5216eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE) 5217eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tINLIN (inline data)"); 5218eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN) 5219eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)"); 5220eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA) 5221eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)"); 5222eb8121abSViacheslav Ovsiienko if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) 5223eb8121abSViacheslav Ovsiienko DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)"); 5224eb8121abSViacheslav Ovsiienko return txoff_func[m].func; 5225a6bd4911SViacheslav Ovsiienko } 5226