18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <stdint.h> 10771fa900SAdrien Mazarguil #include <stdlib.h> 11e72dd09bSNélio Laranjeiro #include <errno.h> 1225025da3SSpike Du #include <fcntl.h> 13771fa900SAdrien Mazarguil 14771fa900SAdrien Mazarguil #include <rte_malloc.h> 15df96fd0dSBruce Richardson #include <ethdev_driver.h> 16771fa900SAdrien Mazarguil #include <rte_pci.h> 171f37cb2bSDavid Marchand #include <bus_pci_driver.h> 18771fa900SAdrien Mazarguil #include <rte_common.h> 19e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 20e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 21e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 22f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 235dfa003dSMichael Baum #include <rte_eal_paging.h> 24f15db67dSMatan Azrad #include <rte_alarm.h> 2520698c9fSOphir Munk #include <rte_cycles.h> 2625025da3SSpike Du #include <rte_interrupts.h> 27771fa900SAdrien Mazarguil 287b4f1e6bSMatan Azrad #include <mlx5_glue.h> 297b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h> 3093e30982SMatan Azrad #include <mlx5_common.h> 31391b8bccSOphir Munk #include <mlx5_common_os.h> 32a4de9586SVu Pham #include <mlx5_common_mp.h> 3383c2047cSSuanming Mou #include <mlx5_malloc.h> 347b4f1e6bSMatan Azrad 357b4f1e6bSMatan Azrad #include "mlx5_defs.h" 36771fa900SAdrien Mazarguil #include "mlx5.h" 37771fa900SAdrien Mazarguil #include "mlx5_utils.h" 382e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 39151cbe3aSMichael Baum #include "mlx5_rx.h" 40377b69fbSMichael Baum #include "mlx5_tx.h" 41771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 4284c406e7SOri Kam #include "mlx5_flow.h" 43223f2c21SOphir Munk #include "mlx5_flow_os.h" 44efa79e68SOri Kam #include "rte_pmd_mlx5.h" 45771fa900SAdrien Mazarguil 46a7f34989SXueming Li #define MLX5_ETH_DRIVER_NAME mlx5_eth 47a7f34989SXueming Li 4899c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 4999c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5099c12dccSNélio Laranjeiro 5178c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 5278c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 5378c7a16dSYongseok Koh 547d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 557d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 567d6bf6b8SYongseok Koh 577d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 587d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 597d6bf6b8SYongseok Koh 60ecb16045SAlexander Kozyrev /* Device parameter to configure log 2 of the stride size for MPRQ. */ 61ecb16045SAlexander Kozyrev #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size" 62ecb16045SAlexander Kozyrev 637d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 647d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 657d6bf6b8SYongseok Koh 667d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 677d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 687d6bf6b8SYongseok Koh 69a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 702a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 712a66cf37SYaacov Hazan 72505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 73505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 74505f1fe4SViacheslav Ovsiienko 75505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 76505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 77505f1fe4SViacheslav Ovsiienko 78505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 79505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 80505f1fe4SViacheslav Ovsiienko 812a66cf37SYaacov Hazan /* 822a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 832a66cf37SYaacov Hazan * enabling inline send. 842a66cf37SYaacov Hazan */ 852a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 862a66cf37SYaacov Hazan 8709d8b416SYongseok Koh /* 8809d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 89a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9009d8b416SYongseok Koh */ 9109d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 9209d8b416SYongseok Koh 93230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 94230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 95230189d9SNélio Laranjeiro 96a6bd4911SViacheslav Ovsiienko /* 97a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 98a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 99a6bd4911SViacheslav Ovsiienko */ 1006ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1016ce84bd8SYongseok Koh 102a6bd4911SViacheslav Ovsiienko /* 103a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 104a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 105a6bd4911SViacheslav Ovsiienko */ 1066ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1076ce84bd8SYongseok Koh 108a6bd4911SViacheslav Ovsiienko /* 1098f848f32SViacheslav Ovsiienko * Device parameter to enable Tx scheduling on timestamps 1108f848f32SViacheslav Ovsiienko * and specify the packet pacing granularity in nanoseconds. 1118f848f32SViacheslav Ovsiienko */ 1128f848f32SViacheslav Ovsiienko #define MLX5_TX_PP "tx_pp" 1138f848f32SViacheslav Ovsiienko 1148f848f32SViacheslav Ovsiienko /* 1158f848f32SViacheslav Ovsiienko * Device parameter to specify skew in nanoseconds on Tx datapath, 1168f848f32SViacheslav Ovsiienko * it represents the time between SQ start WQE processing and 1178f848f32SViacheslav Ovsiienko * appearing actual packet data on the wire. 1188f848f32SViacheslav Ovsiienko */ 1198f848f32SViacheslav Ovsiienko #define MLX5_TX_SKEW "tx_skew" 1208f848f32SViacheslav Ovsiienko 1218f848f32SViacheslav Ovsiienko /* 122a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 123a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 124a6bd4911SViacheslav Ovsiienko */ 1255644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1265644d5b9SNelio Laranjeiro 1275644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1285644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1295644d5b9SNelio Laranjeiro 13078a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 13178a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 13278a54648SXueming Li 133e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 134e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 135e2b4925eSOri Kam 13651e72d38SOri Kam /* Activate DV flow steering. */ 13751e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 13851e72d38SOri Kam 1392d241515SViacheslav Ovsiienko /* Enable extensive flow metadata support. */ 1402d241515SViacheslav Ovsiienko #define MLX5_DV_XMETA_EN "dv_xmeta_en" 1412d241515SViacheslav Ovsiienko 1420f0ae73aSShiri Kuzin /* Device parameter to let the user manage the lacp traffic of bonded device */ 1430f0ae73aSShiri Kuzin #define MLX5_LACP_BY_USER "lacp_by_user" 1440f0ae73aSShiri Kuzin 145db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 146db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 147db209cc3SNélio Laranjeiro 1486de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1496de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1506de569f5SAdrien Mazarguil 151066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 152066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 153066cfecdSMatan Azrad 15421bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 15521bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 15621bb6c7eSDekel Peled 1571ad9a3d0SBing Zhao /* 1581ad9a3d0SBing Zhao * Device parameter to configure the total data buffer size for a single 1591ad9a3d0SBing Zhao * hairpin queue (logarithm value). 1601ad9a3d0SBing Zhao */ 1611ad9a3d0SBing Zhao #define MLX5_HP_BUF_SIZE "hp_buf_log_sz" 1621ad9a3d0SBing Zhao 163a1da6f62SSuanming Mou /* Flow memory reclaim mode. */ 164a1da6f62SSuanming Mou #define MLX5_RECLAIM_MEM "reclaim_mem_mode" 165a1da6f62SSuanming Mou 16650f95b23SSuanming Mou /* Decap will be used or not. */ 16750f95b23SSuanming Mou #define MLX5_DECAP_EN "decap_en" 1685522da6bSSuanming Mou 169e39226bdSJiawei Wang /* Device parameter to configure allow or prevent duplicate rules pattern. */ 170e39226bdSJiawei Wang #define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern" 171e39226bdSJiawei Wang 172febcac7bSBing Zhao /* Device parameter to configure the delay drop when creating Rxqs. */ 173febcac7bSBing Zhao #define MLX5_DELAY_DROP "delay_drop" 174febcac7bSBing Zhao 1751939eb6fSDariusz Sosnowski /* Device parameter to create the fdb default rule in PMD */ 1761939eb6fSDariusz Sosnowski #define MLX5_FDB_DEFAULT_RULE_EN "fdb_def_rule_en" 1771939eb6fSDariusz Sosnowski 1784d368e1dSXiaoyu Min /* HW steering counter configuration. */ 1794d368e1dSXiaoyu Min #define MLX5_HWS_CNT_SERVICE_CORE "service_core" 1804d368e1dSXiaoyu Min 1814d368e1dSXiaoyu Min /* HW steering counter's query interval. */ 1824d368e1dSXiaoyu Min #define MLX5_HWS_CNT_CYCLE_TIME "svc_cycle_time" 1834d368e1dSXiaoyu Min 184483181f7SDariusz Sosnowski /* Device parameter to control representor matching in ingress/egress flows with HWS. */ 185483181f7SDariusz Sosnowski #define MLX5_REPR_MATCHING_EN "repr_matching_en" 186483181f7SDariusz Sosnowski 187974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 188974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 189974f1e7eSYongseok Koh 1902e86c4e5SOphir Munk /** Driver-specific log messages type. */ 1912e86c4e5SOphir Munk int mlx5_logtype; 192a170a30dSNélio Laranjeiro 19391389890SOphir Munk static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = 19491389890SOphir Munk LIST_HEAD_INITIALIZER(); 195ef65067cSTal Shnaiderman static pthread_mutex_t mlx5_dev_ctx_list_mutex; 1965c761238SGregory Etelson static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { 197f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 1989cac7dedSGregory Etelson [MLX5_IPOOL_DECAP_ENCAP] = { 199014d1cbeSSuanming Mou .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), 200014d1cbeSSuanming Mou .trunk_size = 64, 201014d1cbeSSuanming Mou .grow_trunk = 3, 202014d1cbeSSuanming Mou .grow_shift = 2, 2032f3dc1f4SSuanming Mou .need_lock = 1, 204014d1cbeSSuanming Mou .release_mem_en = 1, 20583c2047cSSuanming Mou .malloc = mlx5_malloc, 20683c2047cSSuanming Mou .free = mlx5_free, 207014d1cbeSSuanming Mou .type = "mlx5_encap_decap_ipool", 208014d1cbeSSuanming Mou }, 2099cac7dedSGregory Etelson [MLX5_IPOOL_PUSH_VLAN] = { 2108acf8ac9SSuanming Mou .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), 2118acf8ac9SSuanming Mou .trunk_size = 64, 2128acf8ac9SSuanming Mou .grow_trunk = 3, 2138acf8ac9SSuanming Mou .grow_shift = 2, 2142f3dc1f4SSuanming Mou .need_lock = 1, 2158acf8ac9SSuanming Mou .release_mem_en = 1, 21683c2047cSSuanming Mou .malloc = mlx5_malloc, 21783c2047cSSuanming Mou .free = mlx5_free, 2188acf8ac9SSuanming Mou .type = "mlx5_push_vlan_ipool", 2198acf8ac9SSuanming Mou }, 2209cac7dedSGregory Etelson [MLX5_IPOOL_TAG] = { 2215f114269SSuanming Mou .size = sizeof(struct mlx5_flow_dv_tag_resource), 2225f114269SSuanming Mou .trunk_size = 64, 2235f114269SSuanming Mou .grow_trunk = 3, 2245f114269SSuanming Mou .grow_shift = 2, 2252f3dc1f4SSuanming Mou .need_lock = 1, 22607b51bb9SSuanming Mou .release_mem_en = 0, 22707b51bb9SSuanming Mou .per_core_cache = (1 << 16), 22883c2047cSSuanming Mou .malloc = mlx5_malloc, 22983c2047cSSuanming Mou .free = mlx5_free, 2305f114269SSuanming Mou .type = "mlx5_tag_ipool", 2315f114269SSuanming Mou }, 2329cac7dedSGregory Etelson [MLX5_IPOOL_PORT_ID] = { 233f3faf9eaSSuanming Mou .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), 234f3faf9eaSSuanming Mou .trunk_size = 64, 235f3faf9eaSSuanming Mou .grow_trunk = 3, 236f3faf9eaSSuanming Mou .grow_shift = 2, 2372f3dc1f4SSuanming Mou .need_lock = 1, 238f3faf9eaSSuanming Mou .release_mem_en = 1, 23983c2047cSSuanming Mou .malloc = mlx5_malloc, 24083c2047cSSuanming Mou .free = mlx5_free, 241f3faf9eaSSuanming Mou .type = "mlx5_port_id_ipool", 242f3faf9eaSSuanming Mou }, 2439cac7dedSGregory Etelson [MLX5_IPOOL_JUMP] = { 2447ac99475SSuanming Mou .size = sizeof(struct mlx5_flow_tbl_data_entry), 2457ac99475SSuanming Mou .trunk_size = 64, 2467ac99475SSuanming Mou .grow_trunk = 3, 2477ac99475SSuanming Mou .grow_shift = 2, 2482f3dc1f4SSuanming Mou .need_lock = 1, 2497ac99475SSuanming Mou .release_mem_en = 1, 25083c2047cSSuanming Mou .malloc = mlx5_malloc, 25183c2047cSSuanming Mou .free = mlx5_free, 2527ac99475SSuanming Mou .type = "mlx5_jump_ipool", 2537ac99475SSuanming Mou }, 2549cac7dedSGregory Etelson [MLX5_IPOOL_SAMPLE] = { 255b4c0ddbfSJiawei Wang .size = sizeof(struct mlx5_flow_dv_sample_resource), 256b4c0ddbfSJiawei Wang .trunk_size = 64, 257b4c0ddbfSJiawei Wang .grow_trunk = 3, 258b4c0ddbfSJiawei Wang .grow_shift = 2, 2592f3dc1f4SSuanming Mou .need_lock = 1, 260b4c0ddbfSJiawei Wang .release_mem_en = 1, 261b4c0ddbfSJiawei Wang .malloc = mlx5_malloc, 262b4c0ddbfSJiawei Wang .free = mlx5_free, 263b4c0ddbfSJiawei Wang .type = "mlx5_sample_ipool", 264b4c0ddbfSJiawei Wang }, 2659cac7dedSGregory Etelson [MLX5_IPOOL_DEST_ARRAY] = { 26600c10c22SJiawei Wang .size = sizeof(struct mlx5_flow_dv_dest_array_resource), 26700c10c22SJiawei Wang .trunk_size = 64, 26800c10c22SJiawei Wang .grow_trunk = 3, 26900c10c22SJiawei Wang .grow_shift = 2, 2702f3dc1f4SSuanming Mou .need_lock = 1, 27100c10c22SJiawei Wang .release_mem_en = 1, 27200c10c22SJiawei Wang .malloc = mlx5_malloc, 27300c10c22SJiawei Wang .free = mlx5_free, 27400c10c22SJiawei Wang .type = "mlx5_dest_array_ipool", 27500c10c22SJiawei Wang }, 2769cac7dedSGregory Etelson [MLX5_IPOOL_TUNNEL_ID] = { 2779cac7dedSGregory Etelson .size = sizeof(struct mlx5_flow_tunnel), 278495b2ed4SSuanming Mou .trunk_size = MLX5_MAX_TUNNELS, 2799cac7dedSGregory Etelson .need_lock = 1, 2809cac7dedSGregory Etelson .release_mem_en = 1, 2819cac7dedSGregory Etelson .type = "mlx5_tunnel_offload", 2829cac7dedSGregory Etelson }, 2839cac7dedSGregory Etelson [MLX5_IPOOL_TNL_TBL_ID] = { 2849cac7dedSGregory Etelson .size = 0, 2859cac7dedSGregory Etelson .need_lock = 1, 2869cac7dedSGregory Etelson .type = "mlx5_flow_tnl_tbl_ipool", 2879cac7dedSGregory Etelson }, 288b88341caSSuanming Mou #endif 2899cac7dedSGregory Etelson [MLX5_IPOOL_MTR] = { 29083306d6cSShun Hao /** 29183306d6cSShun Hao * The ipool index should grow continually from small to big, 29283306d6cSShun Hao * for meter idx, so not set grow_trunk to avoid meter index 29383306d6cSShun Hao * not jump continually. 29483306d6cSShun Hao */ 295e6100c7bSLi Zhang .size = sizeof(struct mlx5_legacy_flow_meter), 2968638e2b0SSuanming Mou .trunk_size = 64, 2972f3dc1f4SSuanming Mou .need_lock = 1, 2988638e2b0SSuanming Mou .release_mem_en = 1, 29983c2047cSSuanming Mou .malloc = mlx5_malloc, 30083c2047cSSuanming Mou .free = mlx5_free, 3018638e2b0SSuanming Mou .type = "mlx5_meter_ipool", 3028638e2b0SSuanming Mou }, 3039cac7dedSGregory Etelson [MLX5_IPOOL_MCP] = { 30490e6053aSSuanming Mou .size = sizeof(struct mlx5_flow_mreg_copy_resource), 30590e6053aSSuanming Mou .trunk_size = 64, 30690e6053aSSuanming Mou .grow_trunk = 3, 30790e6053aSSuanming Mou .grow_shift = 2, 3082f3dc1f4SSuanming Mou .need_lock = 1, 30990e6053aSSuanming Mou .release_mem_en = 1, 31083c2047cSSuanming Mou .malloc = mlx5_malloc, 31183c2047cSSuanming Mou .free = mlx5_free, 31290e6053aSSuanming Mou .type = "mlx5_mcp_ipool", 31390e6053aSSuanming Mou }, 3149cac7dedSGregory Etelson [MLX5_IPOOL_HRXQ] = { 315772dc0ebSSuanming Mou .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), 316772dc0ebSSuanming Mou .trunk_size = 64, 317772dc0ebSSuanming Mou .grow_trunk = 3, 318772dc0ebSSuanming Mou .grow_shift = 2, 3192f3dc1f4SSuanming Mou .need_lock = 1, 320772dc0ebSSuanming Mou .release_mem_en = 1, 32183c2047cSSuanming Mou .malloc = mlx5_malloc, 32283c2047cSSuanming Mou .free = mlx5_free, 323772dc0ebSSuanming Mou .type = "mlx5_hrxq_ipool", 324772dc0ebSSuanming Mou }, 3259cac7dedSGregory Etelson [MLX5_IPOOL_MLX5_FLOW] = { 3265c761238SGregory Etelson /* 3275c761238SGregory Etelson * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. 3285c761238SGregory Etelson * It set in run time according to PCI function configuration. 3295c761238SGregory Etelson */ 3305c761238SGregory Etelson .size = 0, 331b88341caSSuanming Mou .trunk_size = 64, 332b88341caSSuanming Mou .grow_trunk = 3, 333b88341caSSuanming Mou .grow_shift = 2, 3342f3dc1f4SSuanming Mou .need_lock = 1, 335b4edeaf3SSuanming Mou .release_mem_en = 0, 336b4edeaf3SSuanming Mou .per_core_cache = 1 << 19, 33783c2047cSSuanming Mou .malloc = mlx5_malloc, 33883c2047cSSuanming Mou .free = mlx5_free, 339b88341caSSuanming Mou .type = "mlx5_flow_handle_ipool", 340b88341caSSuanming Mou }, 3419cac7dedSGregory Etelson [MLX5_IPOOL_RTE_FLOW] = { 342ab612adcSSuanming Mou .size = sizeof(struct rte_flow), 343ab612adcSSuanming Mou .trunk_size = 4096, 344ab612adcSSuanming Mou .need_lock = 1, 345ab612adcSSuanming Mou .release_mem_en = 1, 34683c2047cSSuanming Mou .malloc = mlx5_malloc, 34783c2047cSSuanming Mou .free = mlx5_free, 348ab612adcSSuanming Mou .type = "rte_flow_ipool", 349ab612adcSSuanming Mou }, 3509cac7dedSGregory Etelson [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = { 3514ae8825cSXueming Li .size = 0, 3524ae8825cSXueming Li .need_lock = 1, 3534ae8825cSXueming Li .type = "mlx5_flow_rss_id_ipool", 3544ae8825cSXueming Li }, 3559cac7dedSGregory Etelson [MLX5_IPOOL_RSS_SHARED_ACTIONS] = { 3564a42ac1fSMatan Azrad .size = sizeof(struct mlx5_shared_action_rss), 3574a42ac1fSMatan Azrad .trunk_size = 64, 3584a42ac1fSMatan Azrad .grow_trunk = 3, 3594a42ac1fSMatan Azrad .grow_shift = 2, 3604a42ac1fSMatan Azrad .need_lock = 1, 3614a42ac1fSMatan Azrad .release_mem_en = 1, 3624a42ac1fSMatan Azrad .malloc = mlx5_malloc, 3634a42ac1fSMatan Azrad .free = mlx5_free, 3644a42ac1fSMatan Azrad .type = "mlx5_shared_action_rss", 3654a42ac1fSMatan Azrad }, 366afb4aa4fSLi Zhang [MLX5_IPOOL_MTR_POLICY] = { 367afb4aa4fSLi Zhang /** 368afb4aa4fSLi Zhang * The ipool index should grow continually from small to big, 369afb4aa4fSLi Zhang * for policy idx, so not set grow_trunk to avoid policy index 370afb4aa4fSLi Zhang * not jump continually. 371afb4aa4fSLi Zhang */ 372afb4aa4fSLi Zhang .size = sizeof(struct mlx5_flow_meter_sub_policy), 373afb4aa4fSLi Zhang .trunk_size = 64, 374afb4aa4fSLi Zhang .need_lock = 1, 375afb4aa4fSLi Zhang .release_mem_en = 1, 376afb4aa4fSLi Zhang .malloc = mlx5_malloc, 377afb4aa4fSLi Zhang .free = mlx5_free, 378afb4aa4fSLi Zhang .type = "mlx5_meter_policy_ipool", 379afb4aa4fSLi Zhang }, 380014d1cbeSSuanming Mou }; 381014d1cbeSSuanming Mou 382830d2091SOri Kam #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 383830d2091SOri Kam #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 384830d2091SOri Kam 385f7c3f3c2SSuanming Mou #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 1024 386860897d2SBing Zhao 387830d2091SOri Kam /** 388f926cce3SXueming Li * Decide whether representor ID is a HPF(host PF) port on BF2. 389f926cce3SXueming Li * 390f926cce3SXueming Li * @param dev 391f926cce3SXueming Li * Pointer to Ethernet device structure. 392f926cce3SXueming Li * 393f926cce3SXueming Li * @return 394f926cce3SXueming Li * Non-zero if HPF, otherwise 0. 395f926cce3SXueming Li */ 396f926cce3SXueming Li bool 397f926cce3SXueming Li mlx5_is_hpf(struct rte_eth_dev *dev) 398f926cce3SXueming Li { 399f926cce3SXueming Li struct mlx5_priv *priv = dev->data->dev_private; 400f926cce3SXueming Li uint16_t repr = MLX5_REPRESENTOR_REPR(priv->representor_id); 401f926cce3SXueming Li int type = MLX5_REPRESENTOR_TYPE(priv->representor_id); 402f926cce3SXueming Li 403f926cce3SXueming Li return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_VF && 404f926cce3SXueming Li MLX5_REPRESENTOR_REPR(-1) == repr; 405f926cce3SXueming Li } 406f926cce3SXueming Li 407f926cce3SXueming Li /** 408919488fbSXueming Li * Decide whether representor ID is a SF port representor. 409919488fbSXueming Li * 410919488fbSXueming Li * @param dev 411919488fbSXueming Li * Pointer to Ethernet device structure. 412919488fbSXueming Li * 413919488fbSXueming Li * @return 414919488fbSXueming Li * Non-zero if HPF, otherwise 0. 415919488fbSXueming Li */ 416919488fbSXueming Li bool 417919488fbSXueming Li mlx5_is_sf_repr(struct rte_eth_dev *dev) 418919488fbSXueming Li { 419919488fbSXueming Li struct mlx5_priv *priv = dev->data->dev_private; 420919488fbSXueming Li int type = MLX5_REPRESENTOR_TYPE(priv->representor_id); 421919488fbSXueming Li 422919488fbSXueming Li return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF; 423919488fbSXueming Li } 424919488fbSXueming Li 425919488fbSXueming Li /** 426f935ed4bSDekel Peled * Initialize the ASO aging management structure. 427f935ed4bSDekel Peled * 428f935ed4bSDekel Peled * @param[in] sh 429f935ed4bSDekel Peled * Pointer to mlx5_dev_ctx_shared object to free 430f935ed4bSDekel Peled * 431f935ed4bSDekel Peled * @return 432f935ed4bSDekel Peled * 0 on success, a negative errno value otherwise and rte_errno is set. 433f935ed4bSDekel Peled */ 434f935ed4bSDekel Peled int 435f935ed4bSDekel Peled mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) 436f935ed4bSDekel Peled { 437f935ed4bSDekel Peled int err; 438f935ed4bSDekel Peled 439f935ed4bSDekel Peled if (sh->aso_age_mng) 440f935ed4bSDekel Peled return 0; 441f935ed4bSDekel Peled sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng), 442f935ed4bSDekel Peled RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 443f935ed4bSDekel Peled if (!sh->aso_age_mng) { 444f935ed4bSDekel Peled DRV_LOG(ERR, "aso_age_mng allocation was failed."); 445f935ed4bSDekel Peled rte_errno = ENOMEM; 446f935ed4bSDekel Peled return -ENOMEM; 447f935ed4bSDekel Peled } 44848fbb0e9SAlexander Kozyrev err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT, 1); 449f935ed4bSDekel Peled if (err) { 450f935ed4bSDekel Peled mlx5_free(sh->aso_age_mng); 451f935ed4bSDekel Peled return -1; 452f935ed4bSDekel Peled } 4537cf2d15aSJiawei Wang rte_rwlock_init(&sh->aso_age_mng->resize_rwl); 454f935ed4bSDekel Peled rte_spinlock_init(&sh->aso_age_mng->free_sl); 455f935ed4bSDekel Peled LIST_INIT(&sh->aso_age_mng->free); 456f935ed4bSDekel Peled return 0; 457f935ed4bSDekel Peled } 458f935ed4bSDekel Peled 459f935ed4bSDekel Peled /** 460f935ed4bSDekel Peled * Close and release all the resources of the ASO aging management structure. 461f935ed4bSDekel Peled * 462f935ed4bSDekel Peled * @param[in] sh 463f935ed4bSDekel Peled * Pointer to mlx5_dev_ctx_shared object to free. 464f935ed4bSDekel Peled */ 465f935ed4bSDekel Peled static void 466f935ed4bSDekel Peled mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) 467f935ed4bSDekel Peled { 468f935ed4bSDekel Peled int i, j; 469f935ed4bSDekel Peled 47029efa63aSLi Zhang mlx5_aso_flow_hit_queue_poll_stop(sh); 47129efa63aSLi Zhang mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT); 472f935ed4bSDekel Peled if (sh->aso_age_mng->pools) { 473f935ed4bSDekel Peled struct mlx5_aso_age_pool *pool; 474f935ed4bSDekel Peled 475f935ed4bSDekel Peled for (i = 0; i < sh->aso_age_mng->next; ++i) { 476f935ed4bSDekel Peled pool = sh->aso_age_mng->pools[i]; 477f935ed4bSDekel Peled claim_zero(mlx5_devx_cmd_destroy 478f935ed4bSDekel Peled (pool->flow_hit_aso_obj)); 479f935ed4bSDekel Peled for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) 480f935ed4bSDekel Peled if (pool->actions[j].dr_action) 481f935ed4bSDekel Peled claim_zero 482223f2c21SOphir Munk (mlx5_flow_os_destroy_flow_action 483f935ed4bSDekel Peled (pool->actions[j].dr_action)); 484f935ed4bSDekel Peled mlx5_free(pool); 485f935ed4bSDekel Peled } 486f935ed4bSDekel Peled mlx5_free(sh->aso_age_mng->pools); 487f935ed4bSDekel Peled } 4887ad0b6d9SDekel Peled mlx5_free(sh->aso_age_mng); 489f935ed4bSDekel Peled } 490f935ed4bSDekel Peled 491f935ed4bSDekel Peled /** 492fa2d01c8SDong Zhou * Initialize the shared aging list information per port. 493fa2d01c8SDong Zhou * 494fa2d01c8SDong Zhou * @param[in] sh 4956e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 496fa2d01c8SDong Zhou */ 497fa2d01c8SDong Zhou static void 4986e88bc42SOphir Munk mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) 499fa2d01c8SDong Zhou { 500fa2d01c8SDong Zhou uint32_t i; 501fa2d01c8SDong Zhou struct mlx5_age_info *age_info; 502fa2d01c8SDong Zhou 50304a4de75SMichael Baum /* 50404a4de75SMichael Baum * In HW steering, aging information structure is initialized later 50504a4de75SMichael Baum * during configure function. 50604a4de75SMichael Baum */ 50704a4de75SMichael Baum if (sh->config.dv_flow_en == 2) 50804a4de75SMichael Baum return; 509fa2d01c8SDong Zhou for (i = 0; i < sh->max_port; i++) { 510fa2d01c8SDong Zhou age_info = &sh->port[i].age_info; 511fa2d01c8SDong Zhou age_info->flags = 0; 512fa2d01c8SDong Zhou TAILQ_INIT(&age_info->aged_counters); 513f9bc5274SMatan Azrad LIST_INIT(&age_info->aged_aso); 514fa2d01c8SDong Zhou rte_spinlock_init(&age_info->aged_sl); 515fa2d01c8SDong Zhou MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 516fa2d01c8SDong Zhou } 517fa2d01c8SDong Zhou } 518fa2d01c8SDong Zhou 519fa2d01c8SDong Zhou /** 520cf8971dbSMichael Baum * DV flow counter mode detect and config. 521cf8971dbSMichael Baum * 522cf8971dbSMichael Baum * @param dev 523cf8971dbSMichael Baum * Pointer to rte_eth_dev structure. 524cf8971dbSMichael Baum * 525cf8971dbSMichael Baum */ 526cf8971dbSMichael Baum void 527cf8971dbSMichael Baum mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused) 528cf8971dbSMichael Baum { 529cf8971dbSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 530cf8971dbSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 531cf8971dbSMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 532cf8971dbSMichael Baum struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; 533cf8971dbSMichael Baum bool fallback; 534cf8971dbSMichael Baum 535cf8971dbSMichael Baum #ifndef HAVE_IBV_DEVX_ASYNC 536cf8971dbSMichael Baum fallback = true; 537cf8971dbSMichael Baum #else 538cf8971dbSMichael Baum fallback = false; 539a13ec19cSMichael Baum if (!sh->cdev->config.devx || !sh->config.dv_flow_en || 540cf8971dbSMichael Baum !hca_attr->flow_counters_dump || 541cf8971dbSMichael Baum !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) || 542cf8971dbSMichael Baum (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP)) 543cf8971dbSMichael Baum fallback = true; 544cf8971dbSMichael Baum #endif 545cf8971dbSMichael Baum if (fallback) 546cf8971dbSMichael Baum DRV_LOG(INFO, "Use fall-back DV counter management. Flow " 547cf8971dbSMichael Baum "counter dump:%d, bulk_alloc_bitmap:0x%hhx.", 548cf8971dbSMichael Baum hca_attr->flow_counters_dump, 549cf8971dbSMichael Baum hca_attr->flow_counter_bulk_alloc_bitmap); 550cf8971dbSMichael Baum /* Initialize fallback mode only on the port initializes sh. */ 551cf8971dbSMichael Baum if (sh->refcnt == 1) 55204a4de75SMichael Baum sh->sws_cmng.counter_fallback = fallback; 55304a4de75SMichael Baum else if (fallback != sh->sws_cmng.counter_fallback) 554cf8971dbSMichael Baum DRV_LOG(WARNING, "Port %d in sh has different fallback mode " 555cf8971dbSMichael Baum "with others:%d.", PORT_ID(priv), fallback); 556cf8971dbSMichael Baum #endif 557cf8971dbSMichael Baum } 558cf8971dbSMichael Baum 559cf8971dbSMichael Baum /** 5605382d28cSMatan Azrad * Initialize the counters management structure. 5615382d28cSMatan Azrad * 5625382d28cSMatan Azrad * @param[in] sh 5636e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 564a94e89e4SMichael Baum * 565a94e89e4SMichael Baum * @return 566a94e89e4SMichael Baum * 0 on success, otherwise negative errno value and rte_errno is set. 5675382d28cSMatan Azrad */ 568a94e89e4SMichael Baum static int 5696e88bc42SOphir Munk mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) 5705382d28cSMatan Azrad { 57104a4de75SMichael Baum int i, j; 5725382d28cSMatan Azrad 57304a4de75SMichael Baum if (sh->config.dv_flow_en < 2) { 574a94e89e4SMichael Baum void *pools; 575a94e89e4SMichael Baum 576a94e89e4SMichael Baum pools = mlx5_malloc(MLX5_MEM_ZERO, 577a94e89e4SMichael Baum sizeof(struct mlx5_flow_counter_pool *) * 578a94e89e4SMichael Baum MLX5_COUNTER_POOLS_MAX_NUM, 579a94e89e4SMichael Baum 0, SOCKET_ID_ANY); 580a94e89e4SMichael Baum if (!pools) { 581a94e89e4SMichael Baum DRV_LOG(ERR, 582a94e89e4SMichael Baum "Counter management allocation was failed."); 583a94e89e4SMichael Baum rte_errno = ENOMEM; 584a94e89e4SMichael Baum return -rte_errno; 585a94e89e4SMichael Baum } 58604a4de75SMichael Baum memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng)); 58704a4de75SMichael Baum TAILQ_INIT(&sh->sws_cmng.flow_counters); 58804a4de75SMichael Baum sh->sws_cmng.min_id = MLX5_CNT_BATCH_OFFSET; 58904a4de75SMichael Baum sh->sws_cmng.max_id = -1; 59004a4de75SMichael Baum sh->sws_cmng.last_pool_idx = POOL_IDX_INVALID; 591a94e89e4SMichael Baum sh->sws_cmng.pools = pools; 59204a4de75SMichael Baum rte_spinlock_init(&sh->sws_cmng.pool_update_sl); 593994829e6SSuanming Mou for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) { 59404a4de75SMichael Baum TAILQ_INIT(&sh->sws_cmng.counters[i]); 59504a4de75SMichael Baum rte_spinlock_init(&sh->sws_cmng.csl[i]); 59604a4de75SMichael Baum } 59704a4de75SMichael Baum } else { 59804a4de75SMichael Baum struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr; 59904a4de75SMichael Baum uint32_t fw_max_nb_cnts = attr->max_flow_counter; 60004a4de75SMichael Baum uint8_t log_dcs = log2above(fw_max_nb_cnts) - 1; 60104a4de75SMichael Baum uint32_t max_nb_cnts = 0; 60204a4de75SMichael Baum 60304a4de75SMichael Baum for (i = 0, j = 0; j < MLX5_HWS_CNT_DCS_NUM; ++i) { 60404a4de75SMichael Baum int log_dcs_i = log_dcs - i; 60504a4de75SMichael Baum 60604a4de75SMichael Baum if (log_dcs_i < 0) 60704a4de75SMichael Baum break; 60804a4de75SMichael Baum if ((max_nb_cnts | RTE_BIT32(log_dcs_i)) > 60904a4de75SMichael Baum fw_max_nb_cnts) 61004a4de75SMichael Baum continue; 61104a4de75SMichael Baum max_nb_cnts |= RTE_BIT32(log_dcs_i); 61204a4de75SMichael Baum j++; 61304a4de75SMichael Baum } 61404a4de75SMichael Baum sh->hws_max_log_bulk_sz = log_dcs; 61504a4de75SMichael Baum sh->hws_max_nb_counters = max_nb_cnts; 616fa2d01c8SDong Zhou } 617a94e89e4SMichael Baum return 0; 6185382d28cSMatan Azrad } 6195382d28cSMatan Azrad 6205382d28cSMatan Azrad /** 6215382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 6225382d28cSMatan Azrad * 6235382d28cSMatan Azrad * @param[in] mng 6245382d28cSMatan Azrad * Pointer to the memory management structure. 6255382d28cSMatan Azrad */ 6265382d28cSMatan Azrad static void 6275382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 6285382d28cSMatan Azrad { 6295382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 6305382d28cSMatan Azrad 6315382d28cSMatan Azrad LIST_REMOVE(mng, next); 6328451e165SMichael Baum mlx5_os_wrapped_mkey_destroy(&mng->wm); 63383c2047cSSuanming Mou mlx5_free(mem); 6345382d28cSMatan Azrad } 6355382d28cSMatan Azrad 6365382d28cSMatan Azrad /** 6375382d28cSMatan Azrad * Close and release all the resources of the counters management. 6385382d28cSMatan Azrad * 6395382d28cSMatan Azrad * @param[in] sh 6406e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free. 6415382d28cSMatan Azrad */ 6425382d28cSMatan Azrad static void 6436e88bc42SOphir Munk mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) 6445382d28cSMatan Azrad { 6455382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 6463aa27915SSuanming Mou int i, j; 647f15db67dSMatan Azrad int retries = 1024; 6485382d28cSMatan Azrad 649f15db67dSMatan Azrad rte_errno = 0; 650f15db67dSMatan Azrad while (--retries) { 651f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 652f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 653f15db67dSMatan Azrad break; 654f15db67dSMatan Azrad rte_pause(); 655f15db67dSMatan Azrad } 6565382d28cSMatan Azrad 65704a4de75SMichael Baum if (sh->sws_cmng.pools) { 658994829e6SSuanming Mou struct mlx5_flow_counter_pool *pool; 65904a4de75SMichael Baum uint16_t n_valid = sh->sws_cmng.n_valid; 66004a4de75SMichael Baum bool fallback = sh->sws_cmng.counter_fallback; 661994829e6SSuanming Mou 6623aa27915SSuanming Mou for (i = 0; i < n_valid; ++i) { 66304a4de75SMichael Baum pool = sh->sws_cmng.pools[i]; 6642b5b1aebSSuanming Mou if (!fallback && pool->min_dcs) 6655af61440SMatan Azrad claim_zero(mlx5_devx_cmd_destroy 666fa2d01c8SDong Zhou (pool->min_dcs)); 6675382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 6682b5b1aebSSuanming Mou struct mlx5_flow_counter *cnt = 6692b5b1aebSSuanming Mou MLX5_POOL_GET_CNT(pool, j); 6702b5b1aebSSuanming Mou 6712b5b1aebSSuanming Mou if (cnt->action) 6725382d28cSMatan Azrad claim_zero 673223f2c21SOphir Munk (mlx5_flow_os_destroy_flow_action 6742b5b1aebSSuanming Mou (cnt->action)); 675a94e89e4SMichael Baum if (fallback && cnt->dcs_when_free) 6765382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 6772b5b1aebSSuanming Mou (cnt->dcs_when_free)); 6785382d28cSMatan Azrad } 67983c2047cSSuanming Mou mlx5_free(pool); 6805382d28cSMatan Azrad } 68104a4de75SMichael Baum mlx5_free(sh->sws_cmng.pools); 6825382d28cSMatan Azrad } 68304a4de75SMichael Baum mng = LIST_FIRST(&sh->sws_cmng.mem_mngs); 6845382d28cSMatan Azrad while (mng) { 6855382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 68604a4de75SMichael Baum mng = LIST_FIRST(&sh->sws_cmng.mem_mngs); 6875382d28cSMatan Azrad } 68804a4de75SMichael Baum memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng)); 6895382d28cSMatan Azrad } 6905382d28cSMatan Azrad 69129efa63aSLi Zhang /** 69229efa63aSLi Zhang * Initialize the aso flow meters management structure. 69329efa63aSLi Zhang * 69429efa63aSLi Zhang * @param[in] sh 69529efa63aSLi Zhang * Pointer to mlx5_dev_ctx_shared object to free 69629efa63aSLi Zhang */ 69729efa63aSLi Zhang int 698afb4aa4fSLi Zhang mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh) 69929efa63aSLi Zhang { 700afb4aa4fSLi Zhang if (!sh->mtrmng) { 701afb4aa4fSLi Zhang sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO, 702afb4aa4fSLi Zhang sizeof(*sh->mtrmng), 70329efa63aSLi Zhang RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 704afb4aa4fSLi Zhang if (!sh->mtrmng) { 705afb4aa4fSLi Zhang DRV_LOG(ERR, 706afb4aa4fSLi Zhang "meter management allocation was failed."); 70729efa63aSLi Zhang rte_errno = ENOMEM; 70829efa63aSLi Zhang return -ENOMEM; 70929efa63aSLi Zhang } 710afb4aa4fSLi Zhang if (sh->meter_aso_en) { 711afb4aa4fSLi Zhang rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl); 7127797b0feSJiawei Wang rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl); 713afb4aa4fSLi Zhang LIST_INIT(&sh->mtrmng->pools_mng.meters); 714afb4aa4fSLi Zhang } 715afb4aa4fSLi Zhang sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID; 71629efa63aSLi Zhang } 71729efa63aSLi Zhang return 0; 71829efa63aSLi Zhang } 71929efa63aSLi Zhang 72029efa63aSLi Zhang /** 72129efa63aSLi Zhang * Close and release all the resources of 72229efa63aSLi Zhang * the ASO flow meter management structure. 72329efa63aSLi Zhang * 72429efa63aSLi Zhang * @param[in] sh 72529efa63aSLi Zhang * Pointer to mlx5_dev_ctx_shared object to free. 72629efa63aSLi Zhang */ 72729efa63aSLi Zhang static void 72829efa63aSLi Zhang mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh) 72929efa63aSLi Zhang { 73029efa63aSLi Zhang struct mlx5_aso_mtr_pool *mtr_pool; 731afb4aa4fSLi Zhang struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng; 73229efa63aSLi Zhang uint32_t idx; 733c99b4f8bSLi Zhang #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO 734c99b4f8bSLi Zhang struct mlx5_aso_mtr *aso_mtr; 735c99b4f8bSLi Zhang int i; 736c99b4f8bSLi Zhang #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ 73729efa63aSLi Zhang 738afb4aa4fSLi Zhang if (sh->meter_aso_en) { 73929efa63aSLi Zhang mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER); 740afb4aa4fSLi Zhang idx = mtrmng->pools_mng.n_valid; 74129efa63aSLi Zhang while (idx--) { 742afb4aa4fSLi Zhang mtr_pool = mtrmng->pools_mng.pools[idx]; 743c99b4f8bSLi Zhang #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO 744c99b4f8bSLi Zhang for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) { 745c99b4f8bSLi Zhang aso_mtr = &mtr_pool->mtrs[i]; 746bf62fb76SShun Hao if (aso_mtr->fm.meter_action_g) 747afb4aa4fSLi Zhang claim_zero 748afb4aa4fSLi Zhang (mlx5_glue->destroy_flow_action 749bf62fb76SShun Hao (aso_mtr->fm.meter_action_g)); 750bf62fb76SShun Hao if (aso_mtr->fm.meter_action_y) 751bf62fb76SShun Hao claim_zero 752bf62fb76SShun Hao (mlx5_glue->destroy_flow_action 753bf62fb76SShun Hao (aso_mtr->fm.meter_action_y)); 754c99b4f8bSLi Zhang } 755c99b4f8bSLi Zhang #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ 75629efa63aSLi Zhang claim_zero(mlx5_devx_cmd_destroy 75729efa63aSLi Zhang (mtr_pool->devx_obj)); 758afb4aa4fSLi Zhang mtrmng->pools_mng.n_valid--; 75929efa63aSLi Zhang mlx5_free(mtr_pool); 76029efa63aSLi Zhang } 761afb4aa4fSLi Zhang mlx5_free(sh->mtrmng->pools_mng.pools); 762afb4aa4fSLi Zhang } 76329efa63aSLi Zhang mlx5_free(sh->mtrmng); 76429efa63aSLi Zhang sh->mtrmng = NULL; 76529efa63aSLi Zhang } 76629efa63aSLi Zhang 767f935ed4bSDekel Peled /* Send FLOW_AGED event if needed. */ 768f935ed4bSDekel Peled void 769f935ed4bSDekel Peled mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) 770f935ed4bSDekel Peled { 771f935ed4bSDekel Peled struct mlx5_age_info *age_info; 772f935ed4bSDekel Peled uint32_t i; 773f935ed4bSDekel Peled 774f935ed4bSDekel Peled for (i = 0; i < sh->max_port; i++) { 775f935ed4bSDekel Peled age_info = &sh->port[i].age_info; 776f935ed4bSDekel Peled if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) 777f935ed4bSDekel Peled continue; 778447d4d79SMichael Baum MLX5_AGE_UNSET(age_info, MLX5_AGE_EVENT_NEW); 779447d4d79SMichael Baum if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) { 780447d4d79SMichael Baum MLX5_AGE_UNSET(age_info, MLX5_AGE_TRIGGER); 781f935ed4bSDekel Peled rte_eth_dev_callback_process 782f935ed4bSDekel Peled (&rte_eth_devices[sh->port[i].devx_ih_port_id], 783f935ed4bSDekel Peled RTE_ETH_EVENT_FLOW_AGED, NULL); 784447d4d79SMichael Baum } 785f935ed4bSDekel Peled } 786f935ed4bSDekel Peled } 787f935ed4bSDekel Peled 788ee9e5fadSBing Zhao /* 789ee9e5fadSBing Zhao * Initialize the ASO connection tracking structure. 790ee9e5fadSBing Zhao * 791ee9e5fadSBing Zhao * @param[in] sh 792ee9e5fadSBing Zhao * Pointer to mlx5_dev_ctx_shared object. 793ee9e5fadSBing Zhao * 794ee9e5fadSBing Zhao * @return 795ee9e5fadSBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 796ee9e5fadSBing Zhao */ 797ee9e5fadSBing Zhao int 798ee9e5fadSBing Zhao mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh) 799ee9e5fadSBing Zhao { 800ee9e5fadSBing Zhao int err; 801ee9e5fadSBing Zhao 802ee9e5fadSBing Zhao if (sh->ct_mng) 803ee9e5fadSBing Zhao return 0; 804463170a7SSuanming Mou sh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng) + 805463170a7SSuanming Mou sizeof(struct mlx5_aso_sq) * MLX5_ASO_CT_SQ_NUM, 806ee9e5fadSBing Zhao RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 807ee9e5fadSBing Zhao if (!sh->ct_mng) { 808ee9e5fadSBing Zhao DRV_LOG(ERR, "ASO CT management allocation failed."); 809ee9e5fadSBing Zhao rte_errno = ENOMEM; 810ee9e5fadSBing Zhao return -rte_errno; 811ee9e5fadSBing Zhao } 81248fbb0e9SAlexander Kozyrev err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_CONNECTION_TRACKING, MLX5_ASO_CT_SQ_NUM); 813ee9e5fadSBing Zhao if (err) { 814ee9e5fadSBing Zhao mlx5_free(sh->ct_mng); 815ee9e5fadSBing Zhao /* rte_errno should be extracted from the failure. */ 816ee9e5fadSBing Zhao rte_errno = EINVAL; 817ee9e5fadSBing Zhao return -rte_errno; 818ee9e5fadSBing Zhao } 819ee9e5fadSBing Zhao rte_spinlock_init(&sh->ct_mng->ct_sl); 820ee9e5fadSBing Zhao rte_rwlock_init(&sh->ct_mng->resize_rwl); 821ee9e5fadSBing Zhao LIST_INIT(&sh->ct_mng->free_cts); 822ee9e5fadSBing Zhao return 0; 823ee9e5fadSBing Zhao } 824ee9e5fadSBing Zhao 8250af8a229SBing Zhao /* 8260af8a229SBing Zhao * Close and release all the resources of the 8270af8a229SBing Zhao * ASO connection tracking management structure. 8280af8a229SBing Zhao * 8290af8a229SBing Zhao * @param[in] sh 8300af8a229SBing Zhao * Pointer to mlx5_dev_ctx_shared object to free. 8310af8a229SBing Zhao */ 8320af8a229SBing Zhao static void 8330af8a229SBing Zhao mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh) 8340af8a229SBing Zhao { 8350af8a229SBing Zhao struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng; 8360af8a229SBing Zhao struct mlx5_aso_ct_pool *ct_pool; 8370af8a229SBing Zhao struct mlx5_aso_ct_action *ct; 8380af8a229SBing Zhao uint32_t idx; 8390af8a229SBing Zhao uint32_t val; 8400af8a229SBing Zhao uint32_t cnt; 8410af8a229SBing Zhao int i; 8420af8a229SBing Zhao 8430af8a229SBing Zhao mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_CONNECTION_TRACKING); 8440af8a229SBing Zhao idx = mng->next; 8450af8a229SBing Zhao while (idx--) { 8460af8a229SBing Zhao cnt = 0; 8470af8a229SBing Zhao ct_pool = mng->pools[idx]; 8480af8a229SBing Zhao for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) { 8490af8a229SBing Zhao ct = &ct_pool->actions[i]; 8500af8a229SBing Zhao val = __atomic_fetch_sub(&ct->refcnt, 1, 8510af8a229SBing Zhao __ATOMIC_RELAXED); 8520af8a229SBing Zhao MLX5_ASSERT(val == 1); 8530af8a229SBing Zhao if (val > 1) 8540af8a229SBing Zhao cnt++; 8550af8a229SBing Zhao #ifdef HAVE_MLX5_DR_ACTION_ASO_CT 8560af8a229SBing Zhao if (ct->dr_action_orig) 8570af8a229SBing Zhao claim_zero(mlx5_glue->destroy_flow_action 8580af8a229SBing Zhao (ct->dr_action_orig)); 8590af8a229SBing Zhao if (ct->dr_action_rply) 8600af8a229SBing Zhao claim_zero(mlx5_glue->destroy_flow_action 8610af8a229SBing Zhao (ct->dr_action_rply)); 8620af8a229SBing Zhao #endif 8630af8a229SBing Zhao } 8640af8a229SBing Zhao claim_zero(mlx5_devx_cmd_destroy(ct_pool->devx_obj)); 8650af8a229SBing Zhao if (cnt) { 8660af8a229SBing Zhao DRV_LOG(DEBUG, "%u ASO CT objects are being used in the pool %u", 8670af8a229SBing Zhao cnt, i); 8680af8a229SBing Zhao } 8690af8a229SBing Zhao mlx5_free(ct_pool); 8700af8a229SBing Zhao /* in case of failure. */ 8710af8a229SBing Zhao mng->next--; 8720af8a229SBing Zhao } 8730af8a229SBing Zhao mlx5_free(mng->pools); 8740af8a229SBing Zhao mlx5_free(mng); 8750af8a229SBing Zhao /* Management structure must be cleared to 0s during allocation. */ 8760af8a229SBing Zhao sh->ct_mng = NULL; 8770af8a229SBing Zhao } 8780af8a229SBing Zhao 8795382d28cSMatan Azrad /** 880014d1cbeSSuanming Mou * Initialize the flow resources' indexed mempool. 881014d1cbeSSuanming Mou * 882014d1cbeSSuanming Mou * @param[in] sh 8836e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 884014d1cbeSSuanming Mou */ 885014d1cbeSSuanming Mou static void 886a13ec19cSMichael Baum mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh) 887014d1cbeSSuanming Mou { 888014d1cbeSSuanming Mou uint8_t i; 8895c761238SGregory Etelson struct mlx5_indexed_pool_config cfg; 890014d1cbeSSuanming Mou 891a1da6f62SSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) { 8925c761238SGregory Etelson cfg = mlx5_ipool_cfg[i]; 8935c761238SGregory Etelson switch (i) { 8945c761238SGregory Etelson default: 8955c761238SGregory Etelson break; 8965c761238SGregory Etelson /* 8975c761238SGregory Etelson * Set MLX5_IPOOL_MLX5_FLOW ipool size 8985c761238SGregory Etelson * according to PCI function flow configuration. 8995c761238SGregory Etelson */ 9005c761238SGregory Etelson case MLX5_IPOOL_MLX5_FLOW: 901a13ec19cSMichael Baum cfg.size = sh->config.dv_flow_en ? 9025c761238SGregory Etelson sizeof(struct mlx5_flow_handle) : 9035c761238SGregory Etelson MLX5_FLOW_HANDLE_VERBS_SIZE; 9045c761238SGregory Etelson break; 9055c761238SGregory Etelson } 906a13ec19cSMichael Baum if (sh->config.reclaim_mode) { 9075c761238SGregory Etelson cfg.release_mem_en = 1; 908b4edeaf3SSuanming Mou cfg.per_core_cache = 0; 909cde19e86SSuanming Mou } else { 910cde19e86SSuanming Mou cfg.release_mem_en = 0; 911b4edeaf3SSuanming Mou } 9125c761238SGregory Etelson sh->ipool[i] = mlx5_ipool_create(&cfg); 913014d1cbeSSuanming Mou } 914a1da6f62SSuanming Mou } 915014d1cbeSSuanming Mou 9164f3d8d0eSMatan Azrad 917014d1cbeSSuanming Mou /** 918014d1cbeSSuanming Mou * Release the flow resources' indexed mempool. 919014d1cbeSSuanming Mou * 920014d1cbeSSuanming Mou * @param[in] sh 9216e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 922014d1cbeSSuanming Mou */ 923014d1cbeSSuanming Mou static void 9246e88bc42SOphir Munk mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh) 925014d1cbeSSuanming Mou { 926014d1cbeSSuanming Mou uint8_t i; 927014d1cbeSSuanming Mou 928014d1cbeSSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) 929014d1cbeSSuanming Mou mlx5_ipool_destroy(sh->ipool[i]); 9304f3d8d0eSMatan Azrad for (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i) 9314f3d8d0eSMatan Azrad if (sh->mdh_ipools[i]) 9324f3d8d0eSMatan Azrad mlx5_ipool_destroy(sh->mdh_ipools[i]); 933014d1cbeSSuanming Mou } 934014d1cbeSSuanming Mou 935daa38a89SBing Zhao /* 936daa38a89SBing Zhao * Check if dynamic flex parser for eCPRI already exists. 937daa38a89SBing Zhao * 938daa38a89SBing Zhao * @param dev 939daa38a89SBing Zhao * Pointer to Ethernet device structure. 940daa38a89SBing Zhao * 941daa38a89SBing Zhao * @return 942daa38a89SBing Zhao * true on exists, false on not. 943daa38a89SBing Zhao */ 944daa38a89SBing Zhao bool 945daa38a89SBing Zhao mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev) 946daa38a89SBing Zhao { 947daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 948575740d1SViacheslav Ovsiienko struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; 949daa38a89SBing Zhao 950daa38a89SBing Zhao return !!prf->obj; 951daa38a89SBing Zhao } 952daa38a89SBing Zhao 953daa38a89SBing Zhao /* 954daa38a89SBing Zhao * Allocation of a flex parser for eCPRI. Once created, this parser related 955daa38a89SBing Zhao * resources will be held until the device is closed. 956daa38a89SBing Zhao * 957daa38a89SBing Zhao * @param dev 958daa38a89SBing Zhao * Pointer to Ethernet device structure. 959daa38a89SBing Zhao * 960daa38a89SBing Zhao * @return 961daa38a89SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 962daa38a89SBing Zhao */ 963daa38a89SBing Zhao int 964daa38a89SBing Zhao mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev) 965daa38a89SBing Zhao { 966daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 967f1324a17SRongwei Liu struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex; 968575740d1SViacheslav Ovsiienko struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; 9691c506404SBing Zhao struct mlx5_devx_graph_node_attr node = { 9701c506404SBing Zhao .modify_field_select = 0, 9711c506404SBing Zhao }; 972f1324a17SRongwei Liu struct mlx5_ext_sample_id ids[8]; 9731c506404SBing Zhao int ret; 974daa38a89SBing Zhao 97553820561SMichael Baum if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) { 976d7c49561SBing Zhao DRV_LOG(ERR, "Dynamic flex parser is not supported " 977d7c49561SBing Zhao "for device %s.", priv->dev_data->name); 978d7c49561SBing Zhao return -ENOTSUP; 979d7c49561SBing Zhao } 9801c506404SBing Zhao node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; 9811c506404SBing Zhao /* 8 bytes now: 4B common header + 4B message body header. */ 9821c506404SBing Zhao node.header_length_base_value = 0x8; 9831c506404SBing Zhao /* After MAC layer: Ether / VLAN. */ 9841c506404SBing Zhao node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC; 9851c506404SBing Zhao /* Type of compared condition should be 0xAEFE in the L2 layer. */ 9861c506404SBing Zhao node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI; 9871c506404SBing Zhao /* Sample #0: type in common header. */ 9881c506404SBing Zhao node.sample[0].flow_match_sample_en = 1; 9891c506404SBing Zhao /* Fixed offset. */ 9901c506404SBing Zhao node.sample[0].flow_match_sample_offset_mode = 0x0; 9911c506404SBing Zhao /* Only the 2nd byte will be used. */ 9921c506404SBing Zhao node.sample[0].flow_match_sample_field_base_offset = 0x0; 9931c506404SBing Zhao /* Sample #1: message payload. */ 9941c506404SBing Zhao node.sample[1].flow_match_sample_en = 1; 9951c506404SBing Zhao /* Fixed offset. */ 9961c506404SBing Zhao node.sample[1].flow_match_sample_offset_mode = 0x0; 9971c506404SBing Zhao /* 9981c506404SBing Zhao * Only the first two bytes will be used right now, and its offset will 9991c506404SBing Zhao * start after the common header that with the length of a DW(u32). 10001c506404SBing Zhao */ 10011c506404SBing Zhao node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t); 1002ca1418ceSMichael Baum prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node); 10031c506404SBing Zhao if (!prf->obj) { 10041c506404SBing Zhao DRV_LOG(ERR, "Failed to create flex parser node object."); 10051c506404SBing Zhao return (rte_errno == 0) ? -ENODEV : -rte_errno; 10061c506404SBing Zhao } 10071c506404SBing Zhao prf->num = 2; 100800e57916SRongwei Liu ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num, NULL); 10091c506404SBing Zhao if (ret) { 10101c506404SBing Zhao DRV_LOG(ERR, "Failed to query sample IDs."); 10111c506404SBing Zhao return (rte_errno == 0) ? -ENODEV : -rte_errno; 10121c506404SBing Zhao } 10131c506404SBing Zhao prf->offset[0] = 0x0; 10141c506404SBing Zhao prf->offset[1] = sizeof(uint32_t); 1015f1324a17SRongwei Liu if (attr->ext_sample_id) { 1016f1324a17SRongwei Liu prf->ids[0] = ids[0].sample_id; 1017f1324a17SRongwei Liu prf->ids[1] = ids[1].sample_id; 1018f1324a17SRongwei Liu } else { 1019f1324a17SRongwei Liu prf->ids[0] = ids[0].id; 1020f1324a17SRongwei Liu prf->ids[1] = ids[1].id; 1021f1324a17SRongwei Liu } 1022daa38a89SBing Zhao return 0; 1023daa38a89SBing Zhao } 1024daa38a89SBing Zhao 10251c506404SBing Zhao /* 10261c506404SBing Zhao * Destroy the flex parser node, including the parser itself, input / output 10271c506404SBing Zhao * arcs and DW samples. Resources could be reused then. 10281c506404SBing Zhao * 10291c506404SBing Zhao * @param dev 10301c506404SBing Zhao * Pointer to Ethernet device structure. 10311c506404SBing Zhao */ 10321c506404SBing Zhao static void 10331c506404SBing Zhao mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev) 10341c506404SBing Zhao { 10351c506404SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 1036575740d1SViacheslav Ovsiienko struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; 10371c506404SBing Zhao 10381c506404SBing Zhao if (prf->obj) 10391c506404SBing Zhao mlx5_devx_cmd_destroy(prf->obj); 10401c506404SBing Zhao prf->obj = NULL; 10411c506404SBing Zhao } 10421c506404SBing Zhao 104300e57916SRongwei Liu /* 104400e57916SRongwei Liu * Allocation of a flex parser for srh. Once refcnt is zero, the resources held 104500e57916SRongwei Liu * by this parser will be freed. 104600e57916SRongwei Liu * @param dev 104700e57916SRongwei Liu * Pointer to Ethernet device structure. 104800e57916SRongwei Liu * 104900e57916SRongwei Liu * @return 105000e57916SRongwei Liu * 0 on success, a negative errno value otherwise and rte_errno is set. 105100e57916SRongwei Liu */ 105200e57916SRongwei Liu int 105300e57916SRongwei Liu mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev) 105400e57916SRongwei Liu { 105500e57916SRongwei Liu struct mlx5_devx_graph_node_attr node = { 105600e57916SRongwei Liu .modify_field_select = 0, 105700e57916SRongwei Liu }; 105800e57916SRongwei Liu struct mlx5_ext_sample_id ids[MLX5_GRAPH_NODE_SAMPLE_NUM]; 105900e57916SRongwei Liu struct mlx5_priv *priv = dev->data->dev_private; 106000e57916SRongwei Liu struct mlx5_common_dev_config *config = &priv->sh->cdev->config; 106100e57916SRongwei Liu void *ibv_ctx = priv->sh->cdev->ctx; 106200e57916SRongwei Liu int ret; 106300e57916SRongwei Liu 106400e57916SRongwei Liu memset(ids, 0xff, sizeof(ids)); 106500e57916SRongwei Liu if (!config->hca_attr.parse_graph_flex_node) { 106600e57916SRongwei Liu DRV_LOG(ERR, "Dynamic flex parser is not supported"); 106700e57916SRongwei Liu return -ENOTSUP; 106800e57916SRongwei Liu } 106900e57916SRongwei Liu if (__atomic_add_fetch(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) > 1) 107000e57916SRongwei Liu return 0; 107100e57916SRongwei Liu 107200e57916SRongwei Liu node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD; 107300e57916SRongwei Liu /* Srv6 first two DW are not counted in. */ 107400e57916SRongwei Liu node.header_length_base_value = 0x8; 107500e57916SRongwei Liu /* The unit is uint64_t. */ 107600e57916SRongwei Liu node.header_length_field_shift = 0x3; 107700e57916SRongwei Liu /* Header length is the 2nd byte. */ 107800e57916SRongwei Liu node.header_length_field_offset = 0x8; 107900e57916SRongwei Liu node.header_length_field_mask = 0xF; 108000e57916SRongwei Liu /* One byte next header protocol. */ 108100e57916SRongwei Liu node.next_header_field_size = 0x8; 108200e57916SRongwei Liu node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IP; 108300e57916SRongwei Liu node.in[0].compare_condition_value = IPPROTO_ROUTING; 108400e57916SRongwei Liu node.sample[0].flow_match_sample_en = 1; 108500e57916SRongwei Liu /* First come first serve no matter inner or outer. */ 108600e57916SRongwei Liu node.sample[0].flow_match_sample_tunnel_mode = MLX5_GRAPH_SAMPLE_TUNNEL_FIRST; 108700e57916SRongwei Liu node.out[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_TCP; 108800e57916SRongwei Liu node.out[0].compare_condition_value = IPPROTO_TCP; 108900e57916SRongwei Liu node.out[1].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_UDP; 109000e57916SRongwei Liu node.out[1].compare_condition_value = IPPROTO_UDP; 109100e57916SRongwei Liu node.out[2].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IPV6; 109200e57916SRongwei Liu node.out[2].compare_condition_value = IPPROTO_IPV6; 109300e57916SRongwei Liu priv->sh->srh_flex_parser.fp = mlx5_devx_cmd_create_flex_parser(ibv_ctx, &node); 109400e57916SRongwei Liu if (!priv->sh->srh_flex_parser.fp) { 109500e57916SRongwei Liu DRV_LOG(ERR, "Failed to create flex parser node object."); 109600e57916SRongwei Liu return (rte_errno == 0) ? -ENODEV : -rte_errno; 109700e57916SRongwei Liu } 109800e57916SRongwei Liu priv->sh->srh_flex_parser.num = 1; 109900e57916SRongwei Liu ret = mlx5_devx_cmd_query_parse_samples(priv->sh->srh_flex_parser.fp, ids, 110000e57916SRongwei Liu priv->sh->srh_flex_parser.num, 110100e57916SRongwei Liu &priv->sh->srh_flex_parser.anchor_id); 110200e57916SRongwei Liu if (ret) { 110300e57916SRongwei Liu DRV_LOG(ERR, "Failed to query sample IDs."); 110400e57916SRongwei Liu return (rte_errno == 0) ? -ENODEV : -rte_errno; 110500e57916SRongwei Liu } 110600e57916SRongwei Liu priv->sh->srh_flex_parser.offset[0] = 0x0; 110700e57916SRongwei Liu priv->sh->srh_flex_parser.ids[0].id = ids[0].id; 110800e57916SRongwei Liu return 0; 110900e57916SRongwei Liu } 111000e57916SRongwei Liu 111100e57916SRongwei Liu /* 111200e57916SRongwei Liu * Destroy the flex parser node, including the parser itself, input / output 111300e57916SRongwei Liu * arcs and DW samples. Resources could be reused then. 111400e57916SRongwei Liu * 111500e57916SRongwei Liu * @param dev 111600e57916SRongwei Liu * Pointer to Ethernet device structure 111700e57916SRongwei Liu */ 111800e57916SRongwei Liu void 111900e57916SRongwei Liu mlx5_free_srh_flex_parser(struct rte_eth_dev *dev) 112000e57916SRongwei Liu { 112100e57916SRongwei Liu struct mlx5_priv *priv = dev->data->dev_private; 112200e57916SRongwei Liu struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser; 112300e57916SRongwei Liu 112400e57916SRongwei Liu if (__atomic_sub_fetch(&fp->refcnt, 1, __ATOMIC_RELAXED)) 112500e57916SRongwei Liu return; 112600e57916SRongwei Liu if (fp->fp) 112700e57916SRongwei Liu mlx5_devx_cmd_destroy(fp->fp); 112800e57916SRongwei Liu fp->fp = NULL; 112900e57916SRongwei Liu fp->num = 0; 113000e57916SRongwei Liu } 113100e57916SRongwei Liu 1132d47fe9daSTal Shnaiderman uint32_t 1133d47fe9daSTal Shnaiderman mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr) 1134d47fe9daSTal Shnaiderman { 1135d47fe9daSTal Shnaiderman uint32_t sw_parsing_offloads = 0; 1136d47fe9daSTal Shnaiderman 1137d47fe9daSTal Shnaiderman if (attr->swp) { 1138d47fe9daSTal Shnaiderman sw_parsing_offloads |= MLX5_SW_PARSING_CAP; 1139d47fe9daSTal Shnaiderman if (attr->swp_csum) 1140d47fe9daSTal Shnaiderman sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP; 1141d47fe9daSTal Shnaiderman 1142d47fe9daSTal Shnaiderman if (attr->swp_lso) 1143d47fe9daSTal Shnaiderman sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP; 1144d47fe9daSTal Shnaiderman } 1145d47fe9daSTal Shnaiderman return sw_parsing_offloads; 1146d47fe9daSTal Shnaiderman } 1147d47fe9daSTal Shnaiderman 11486a86ee2eSTal Shnaiderman uint32_t 11496a86ee2eSTal Shnaiderman mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr) 11506a86ee2eSTal Shnaiderman { 11516a86ee2eSTal Shnaiderman uint32_t tn_offloads = 0; 11526a86ee2eSTal Shnaiderman 11536a86ee2eSTal Shnaiderman if (attr->tunnel_stateless_vxlan) 11546a86ee2eSTal Shnaiderman tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP; 11556a86ee2eSTal Shnaiderman if (attr->tunnel_stateless_gre) 11566a86ee2eSTal Shnaiderman tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP; 11576a86ee2eSTal Shnaiderman if (attr->tunnel_stateless_geneve_rx) 11586a86ee2eSTal Shnaiderman tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP; 11596a86ee2eSTal Shnaiderman return tn_offloads; 11606a86ee2eSTal Shnaiderman } 11616a86ee2eSTal Shnaiderman 11625dfa003dSMichael Baum /* Fill all fields of UAR structure. */ 1163a0bfe9d5SViacheslav Ovsiienko static int 11645dfa003dSMichael Baum mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh) 1165a0bfe9d5SViacheslav Ovsiienko { 11665dfa003dSMichael Baum int ret; 1167a0bfe9d5SViacheslav Ovsiienko 11685dfa003dSMichael Baum ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar); 11695dfa003dSMichael Baum if (ret) { 11705dfa003dSMichael Baum DRV_LOG(ERR, "Failed to prepare Tx DevX UAR."); 11715dfa003dSMichael Baum return -rte_errno; 1172a0bfe9d5SViacheslav Ovsiienko } 11735dfa003dSMichael Baum MLX5_ASSERT(sh->tx_uar.obj); 11745dfa003dSMichael Baum MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj)); 11755dfa003dSMichael Baum ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar); 11765dfa003dSMichael Baum if (ret) { 11775dfa003dSMichael Baum DRV_LOG(ERR, "Failed to prepare Rx DevX UAR."); 11785dfa003dSMichael Baum mlx5_devx_uar_release(&sh->tx_uar); 11795dfa003dSMichael Baum return -rte_errno; 1180a0bfe9d5SViacheslav Ovsiienko } 11815dfa003dSMichael Baum MLX5_ASSERT(sh->rx_uar.obj); 11825dfa003dSMichael Baum MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj)); 11835dfa003dSMichael Baum return 0; 1184a0bfe9d5SViacheslav Ovsiienko } 11855dfa003dSMichael Baum 11865dfa003dSMichael Baum static void 11875dfa003dSMichael Baum mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh) 11885dfa003dSMichael Baum { 11895dfa003dSMichael Baum mlx5_devx_uar_release(&sh->rx_uar); 11905dfa003dSMichael Baum mlx5_devx_uar_release(&sh->tx_uar); 1191a0bfe9d5SViacheslav Ovsiienko } 1192a0bfe9d5SViacheslav Ovsiienko 1193014d1cbeSSuanming Mou /** 1194fc59a1ecSMichael Baum * rte_mempool_walk() callback to unregister Rx mempools. 1195fc59a1ecSMichael Baum * It used when implicit mempool registration is disabled. 1196fec28ca0SDmitry Kozlyuk * 1197fec28ca0SDmitry Kozlyuk * @param mp 1198fec28ca0SDmitry Kozlyuk * The mempool being walked. 1199fec28ca0SDmitry Kozlyuk * @param arg 1200fec28ca0SDmitry Kozlyuk * Pointer to the device shared context. 1201fec28ca0SDmitry Kozlyuk */ 1202fec28ca0SDmitry Kozlyuk static void 1203fc59a1ecSMichael Baum mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg) 1204fec28ca0SDmitry Kozlyuk { 1205fec28ca0SDmitry Kozlyuk struct mlx5_dev_ctx_shared *sh = arg; 1206fec28ca0SDmitry Kozlyuk 1207fc59a1ecSMichael Baum mlx5_dev_mempool_unregister(sh->cdev, mp); 1208fec28ca0SDmitry Kozlyuk } 1209fec28ca0SDmitry Kozlyuk 1210fec28ca0SDmitry Kozlyuk /** 1211fec28ca0SDmitry Kozlyuk * Callback used when implicit mempool registration is disabled 1212fec28ca0SDmitry Kozlyuk * in order to track Rx mempool destruction. 1213fec28ca0SDmitry Kozlyuk * 1214fec28ca0SDmitry Kozlyuk * @param event 1215fec28ca0SDmitry Kozlyuk * Mempool life cycle event. 1216fec28ca0SDmitry Kozlyuk * @param mp 1217fec28ca0SDmitry Kozlyuk * An Rx mempool registered explicitly when the port is started. 1218fec28ca0SDmitry Kozlyuk * @param arg 1219fec28ca0SDmitry Kozlyuk * Pointer to a device shared context. 1220fec28ca0SDmitry Kozlyuk */ 1221fec28ca0SDmitry Kozlyuk static void 1222fec28ca0SDmitry Kozlyuk mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event, 1223fec28ca0SDmitry Kozlyuk struct rte_mempool *mp, void *arg) 1224fec28ca0SDmitry Kozlyuk { 1225fec28ca0SDmitry Kozlyuk struct mlx5_dev_ctx_shared *sh = arg; 1226fec28ca0SDmitry Kozlyuk 1227fec28ca0SDmitry Kozlyuk if (event == RTE_MEMPOOL_EVENT_DESTROY) 1228fc59a1ecSMichael Baum mlx5_dev_mempool_unregister(sh->cdev, mp); 1229fec28ca0SDmitry Kozlyuk } 1230fec28ca0SDmitry Kozlyuk 1231fec28ca0SDmitry Kozlyuk int 1232fec28ca0SDmitry Kozlyuk mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev) 1233fec28ca0SDmitry Kozlyuk { 1234fec28ca0SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1235fec28ca0SDmitry Kozlyuk struct mlx5_dev_ctx_shared *sh = priv->sh; 1236fec28ca0SDmitry Kozlyuk int ret; 1237fec28ca0SDmitry Kozlyuk 1238fec28ca0SDmitry Kozlyuk /* Check if we only need to track Rx mempool destruction. */ 123985209924SMichael Baum if (!sh->cdev->config.mr_mempool_reg_en) { 1240fec28ca0SDmitry Kozlyuk ret = rte_mempool_event_callback_register 1241fec28ca0SDmitry Kozlyuk (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh); 1242fec28ca0SDmitry Kozlyuk return ret == 0 || rte_errno == EEXIST ? 0 : ret; 1243fec28ca0SDmitry Kozlyuk } 1244fc59a1ecSMichael Baum return mlx5_dev_mempool_subscribe(sh->cdev); 1245fec28ca0SDmitry Kozlyuk } 1246fec28ca0SDmitry Kozlyuk 1247fec28ca0SDmitry Kozlyuk /** 1248a89f6433SRongwei Liu * Set up multiple TISs with different affinities according to 1249a89f6433SRongwei Liu * number of bonding ports 1250a89f6433SRongwei Liu * 1251a89f6433SRongwei Liu * @param priv 1252a89f6433SRongwei Liu * Pointer of shared context. 1253a89f6433SRongwei Liu * 1254a89f6433SRongwei Liu * @return 1255a89f6433SRongwei Liu * Zero on success, -1 otherwise. 1256a89f6433SRongwei Liu */ 1257a89f6433SRongwei Liu static int 1258a89f6433SRongwei Liu mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh) 1259a89f6433SRongwei Liu { 1260a89f6433SRongwei Liu int i; 1261a89f6433SRongwei Liu struct mlx5_devx_lag_context lag_ctx = { 0 }; 1262a89f6433SRongwei Liu struct mlx5_devx_tis_attr tis_attr = { 0 }; 1263a89f6433SRongwei Liu 1264a89f6433SRongwei Liu tis_attr.transport_domain = sh->td->id; 1265a89f6433SRongwei Liu if (sh->bond.n_port) { 1266a89f6433SRongwei Liu if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) { 1267a89f6433SRongwei Liu sh->lag.tx_remap_affinity[0] = 1268a89f6433SRongwei Liu lag_ctx.tx_remap_affinity_1; 1269a89f6433SRongwei Liu sh->lag.tx_remap_affinity[1] = 1270a89f6433SRongwei Liu lag_ctx.tx_remap_affinity_2; 1271a89f6433SRongwei Liu sh->lag.affinity_mode = lag_ctx.port_select_mode; 1272a89f6433SRongwei Liu } else { 1273a89f6433SRongwei Liu DRV_LOG(ERR, "Failed to query lag affinity."); 1274a89f6433SRongwei Liu return -1; 1275a89f6433SRongwei Liu } 1276a89f6433SRongwei Liu if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) { 1277a89f6433SRongwei Liu for (i = 0; i < sh->bond.n_port; i++) { 1278a89f6433SRongwei Liu tis_attr.lag_tx_port_affinity = 1279a89f6433SRongwei Liu MLX5_IFC_LAG_MAP_TIS_AFFINITY(i, 1280a89f6433SRongwei Liu sh->bond.n_port); 1281a89f6433SRongwei Liu sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, 1282a89f6433SRongwei Liu &tis_attr); 1283a89f6433SRongwei Liu if (!sh->tis[i]) { 1284a89f6433SRongwei Liu DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device" 1285a89f6433SRongwei Liu " %s.", i, sh->bond.n_port, 1286a89f6433SRongwei Liu sh->ibdev_name); 1287a89f6433SRongwei Liu return -1; 1288a89f6433SRongwei Liu } 1289a89f6433SRongwei Liu } 1290a89f6433SRongwei Liu DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n", 1291a89f6433SRongwei Liu sh->bond.n_port, lag_ctx.tx_remap_affinity_1, 1292a89f6433SRongwei Liu lag_ctx.tx_remap_affinity_2); 1293a89f6433SRongwei Liu return 0; 1294a89f6433SRongwei Liu } 1295a89f6433SRongwei Liu if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH) 1296a89f6433SRongwei Liu DRV_LOG(INFO, "Device %s enabled HW hash based LAG.", 1297a89f6433SRongwei Liu sh->ibdev_name); 1298a89f6433SRongwei Liu } 1299a89f6433SRongwei Liu tis_attr.lag_tx_port_affinity = 0; 1300a89f6433SRongwei Liu sh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr); 1301a89f6433SRongwei Liu if (!sh->tis[0]) { 1302a89f6433SRongwei Liu DRV_LOG(ERR, "Failed to TIS 0 for bonding device" 1303a89f6433SRongwei Liu " %s.", sh->ibdev_name); 1304a89f6433SRongwei Liu return -1; 1305a89f6433SRongwei Liu } 1306a89f6433SRongwei Liu return 0; 1307a89f6433SRongwei Liu } 1308a89f6433SRongwei Liu 1309a89f6433SRongwei Liu /** 1310a13ec19cSMichael Baum * Verify and store value for share device argument. 1311a13ec19cSMichael Baum * 1312a13ec19cSMichael Baum * @param[in] key 1313a13ec19cSMichael Baum * Key argument to verify. 1314a13ec19cSMichael Baum * @param[in] val 1315a13ec19cSMichael Baum * Value associated with key. 1316a13ec19cSMichael Baum * @param opaque 1317a13ec19cSMichael Baum * User data. 1318a13ec19cSMichael Baum * 1319a13ec19cSMichael Baum * @return 1320a13ec19cSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 1321a13ec19cSMichael Baum */ 1322a13ec19cSMichael Baum static int 1323a13ec19cSMichael Baum mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque) 1324a13ec19cSMichael Baum { 1325a13ec19cSMichael Baum struct mlx5_sh_config *config = opaque; 1326a13ec19cSMichael Baum signed long tmp; 1327a13ec19cSMichael Baum 1328a13ec19cSMichael Baum errno = 0; 1329a13ec19cSMichael Baum tmp = strtol(val, NULL, 0); 1330a13ec19cSMichael Baum if (errno) { 1331a13ec19cSMichael Baum rte_errno = errno; 1332a13ec19cSMichael Baum DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1333a13ec19cSMichael Baum return -rte_errno; 1334a13ec19cSMichael Baum } 1335a13ec19cSMichael Baum if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) { 1336a13ec19cSMichael Baum /* Negative values are acceptable for some keys only. */ 1337a13ec19cSMichael Baum rte_errno = EINVAL; 1338a13ec19cSMichael Baum DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val); 1339a13ec19cSMichael Baum return -rte_errno; 1340a13ec19cSMichael Baum } 1341a13ec19cSMichael Baum if (strcmp(MLX5_TX_PP, key) == 0) { 1342a13ec19cSMichael Baum unsigned long mod = tmp >= 0 ? tmp : -tmp; 1343a13ec19cSMichael Baum 1344a13ec19cSMichael Baum if (!mod) { 1345a13ec19cSMichael Baum DRV_LOG(ERR, "Zero Tx packet pacing parameter."); 1346a13ec19cSMichael Baum rte_errno = EINVAL; 1347a13ec19cSMichael Baum return -rte_errno; 1348a13ec19cSMichael Baum } 1349a13ec19cSMichael Baum config->tx_pp = tmp; 1350a13ec19cSMichael Baum } else if (strcmp(MLX5_TX_SKEW, key) == 0) { 1351a13ec19cSMichael Baum config->tx_skew = tmp; 1352a13ec19cSMichael Baum } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 1353a13ec19cSMichael Baum config->l3_vxlan_en = !!tmp; 1354a13ec19cSMichael Baum } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1355a13ec19cSMichael Baum config->vf_nl_en = !!tmp; 1356a13ec19cSMichael Baum } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1357a13ec19cSMichael Baum config->dv_esw_en = !!tmp; 1358a13ec19cSMichael Baum } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 1359d84c3cf7SSuanming Mou if (tmp > 2) { 1360d84c3cf7SSuanming Mou DRV_LOG(ERR, "Invalid %s parameter.", key); 1361d84c3cf7SSuanming Mou rte_errno = EINVAL; 1362d84c3cf7SSuanming Mou return -rte_errno; 1363d84c3cf7SSuanming Mou } 1364d84c3cf7SSuanming Mou config->dv_flow_en = tmp; 1365a13ec19cSMichael Baum } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { 1366a13ec19cSMichael Baum if (tmp != MLX5_XMETA_MODE_LEGACY && 1367a13ec19cSMichael Baum tmp != MLX5_XMETA_MODE_META16 && 1368a13ec19cSMichael Baum tmp != MLX5_XMETA_MODE_META32 && 1369ddb68e47SBing Zhao tmp != MLX5_XMETA_MODE_MISS_INFO && 1370ddb68e47SBing Zhao tmp != MLX5_XMETA_MODE_META32_HWS) { 1371a13ec19cSMichael Baum DRV_LOG(ERR, "Invalid extensive metadata parameter."); 1372a13ec19cSMichael Baum rte_errno = EINVAL; 1373a13ec19cSMichael Baum return -rte_errno; 1374a13ec19cSMichael Baum } 1375a13ec19cSMichael Baum if (tmp != MLX5_XMETA_MODE_MISS_INFO) 1376a13ec19cSMichael Baum config->dv_xmeta_en = tmp; 1377a13ec19cSMichael Baum else 1378a13ec19cSMichael Baum config->dv_miss_info = 1; 1379a13ec19cSMichael Baum } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) { 1380a13ec19cSMichael Baum config->lacp_by_user = !!tmp; 1381a13ec19cSMichael Baum } else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) { 1382a13ec19cSMichael Baum if (tmp != MLX5_RCM_NONE && 1383a13ec19cSMichael Baum tmp != MLX5_RCM_LIGHT && 1384a13ec19cSMichael Baum tmp != MLX5_RCM_AGGR) { 1385a13ec19cSMichael Baum DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val); 1386a13ec19cSMichael Baum rte_errno = EINVAL; 1387a13ec19cSMichael Baum return -rte_errno; 1388a13ec19cSMichael Baum } 1389a13ec19cSMichael Baum config->reclaim_mode = tmp; 1390a13ec19cSMichael Baum } else if (strcmp(MLX5_DECAP_EN, key) == 0) { 1391a13ec19cSMichael Baum config->decap_en = !!tmp; 1392a13ec19cSMichael Baum } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) { 1393a13ec19cSMichael Baum config->allow_duplicate_pattern = !!tmp; 13941939eb6fSDariusz Sosnowski } else if (strcmp(MLX5_FDB_DEFAULT_RULE_EN, key) == 0) { 13951939eb6fSDariusz Sosnowski config->fdb_def_rule = !!tmp; 13964d368e1dSXiaoyu Min } else if (strcmp(MLX5_HWS_CNT_SERVICE_CORE, key) == 0) { 13974d368e1dSXiaoyu Min config->cnt_svc.service_core = tmp; 13984d368e1dSXiaoyu Min } else if (strcmp(MLX5_HWS_CNT_CYCLE_TIME, key) == 0) { 13994d368e1dSXiaoyu Min config->cnt_svc.cycle_time = tmp; 1400483181f7SDariusz Sosnowski } else if (strcmp(MLX5_REPR_MATCHING_EN, key) == 0) { 1401483181f7SDariusz Sosnowski config->repr_matching = !!tmp; 1402a13ec19cSMichael Baum } 1403a13ec19cSMichael Baum return 0; 1404a13ec19cSMichael Baum } 1405a13ec19cSMichael Baum 1406a13ec19cSMichael Baum /** 1407a13ec19cSMichael Baum * Parse user device parameters and adjust them according to device 1408a13ec19cSMichael Baum * capabilities. 1409a13ec19cSMichael Baum * 1410a13ec19cSMichael Baum * @param sh 1411a13ec19cSMichael Baum * Pointer to shared device context. 1412a729d2f0SMichael Baum * @param mkvlist 1413a729d2f0SMichael Baum * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. 1414a13ec19cSMichael Baum * @param config 1415a13ec19cSMichael Baum * Pointer to shared device configuration structure. 1416a13ec19cSMichael Baum * 1417a13ec19cSMichael Baum * @return 1418a13ec19cSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 1419a13ec19cSMichael Baum */ 1420a13ec19cSMichael Baum static int 1421a13ec19cSMichael Baum mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh, 1422a729d2f0SMichael Baum struct mlx5_kvargs_ctrl *mkvlist, 1423a13ec19cSMichael Baum struct mlx5_sh_config *config) 1424a13ec19cSMichael Baum { 1425a729d2f0SMichael Baum const char **params = (const char *[]){ 1426a729d2f0SMichael Baum MLX5_TX_PP, 1427a729d2f0SMichael Baum MLX5_TX_SKEW, 1428a729d2f0SMichael Baum MLX5_L3_VXLAN_EN, 1429a729d2f0SMichael Baum MLX5_VF_NL_EN, 1430a729d2f0SMichael Baum MLX5_DV_ESW_EN, 1431a729d2f0SMichael Baum MLX5_DV_FLOW_EN, 1432a729d2f0SMichael Baum MLX5_DV_XMETA_EN, 1433a729d2f0SMichael Baum MLX5_LACP_BY_USER, 1434a729d2f0SMichael Baum MLX5_RECLAIM_MEM, 1435a729d2f0SMichael Baum MLX5_DECAP_EN, 1436a729d2f0SMichael Baum MLX5_ALLOW_DUPLICATE_PATTERN, 14371939eb6fSDariusz Sosnowski MLX5_FDB_DEFAULT_RULE_EN, 14384d368e1dSXiaoyu Min MLX5_HWS_CNT_SERVICE_CORE, 14394d368e1dSXiaoyu Min MLX5_HWS_CNT_CYCLE_TIME, 1440483181f7SDariusz Sosnowski MLX5_REPR_MATCHING_EN, 1441a729d2f0SMichael Baum NULL, 1442a729d2f0SMichael Baum }; 1443a13ec19cSMichael Baum int ret = 0; 1444a13ec19cSMichael Baum 1445a13ec19cSMichael Baum /* Default configuration. */ 1446a13ec19cSMichael Baum memset(config, 0, sizeof(*config)); 1447a13ec19cSMichael Baum config->vf_nl_en = 1; 1448a13ec19cSMichael Baum config->dv_esw_en = 1; 1449a13ec19cSMichael Baum config->dv_flow_en = 1; 1450a13ec19cSMichael Baum config->decap_en = 1; 1451a13ec19cSMichael Baum config->allow_duplicate_pattern = 1; 14521939eb6fSDariusz Sosnowski config->fdb_def_rule = 1; 14534d368e1dSXiaoyu Min config->cnt_svc.cycle_time = MLX5_CNT_SVC_CYCLE_TIME_DEFAULT; 14544d368e1dSXiaoyu Min config->cnt_svc.service_core = rte_get_main_lcore(); 1455483181f7SDariusz Sosnowski config->repr_matching = 1; 1456a729d2f0SMichael Baum if (mkvlist != NULL) { 1457a13ec19cSMichael Baum /* Process parameters. */ 1458a729d2f0SMichael Baum ret = mlx5_kvargs_process(mkvlist, params, 1459a13ec19cSMichael Baum mlx5_dev_args_check_handler, config); 1460a13ec19cSMichael Baum if (ret) { 1461a13ec19cSMichael Baum DRV_LOG(ERR, "Failed to process device arguments: %s", 1462a13ec19cSMichael Baum strerror(rte_errno)); 1463a13ec19cSMichael Baum return -rte_errno; 1464a13ec19cSMichael Baum } 1465a13ec19cSMichael Baum } 1466a13ec19cSMichael Baum /* Adjust parameters according to device capabilities. */ 1467a13ec19cSMichael Baum if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) { 1468a13ec19cSMichael Baum DRV_LOG(WARNING, "DV flow is not supported."); 1469a13ec19cSMichael Baum config->dv_flow_en = 0; 1470a13ec19cSMichael Baum } 1471a13ec19cSMichael Baum if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) { 1472a13ec19cSMichael Baum DRV_LOG(DEBUG, "E-Switch DV flow is not supported."); 1473a13ec19cSMichael Baum config->dv_esw_en = 0; 1474a13ec19cSMichael Baum } 147572d836b3SMichael Baum if (config->dv_esw_en && !config->dv_flow_en) { 147672d836b3SMichael Baum DRV_LOG(DEBUG, 147772d836b3SMichael Baum "E-Switch DV flow is supported only when DV flow is enabled."); 147872d836b3SMichael Baum config->dv_esw_en = 0; 147972d836b3SMichael Baum } 1480a13ec19cSMichael Baum if (config->dv_miss_info && config->dv_esw_en) 1481a13ec19cSMichael Baum config->dv_xmeta_en = MLX5_XMETA_MODE_META16; 1482a13ec19cSMichael Baum if (!config->dv_esw_en && 1483a13ec19cSMichael Baum config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 1484a13ec19cSMichael Baum DRV_LOG(WARNING, 1485a13ec19cSMichael Baum "Metadata mode %u is not supported (no E-Switch).", 1486a13ec19cSMichael Baum config->dv_xmeta_en); 1487a13ec19cSMichael Baum config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; 1488a13ec19cSMichael Baum } 1489483181f7SDariusz Sosnowski if (config->dv_flow_en != 2 && !config->repr_matching) { 1490483181f7SDariusz Sosnowski DRV_LOG(DEBUG, "Disabling representor matching is valid only " 1491483181f7SDariusz Sosnowski "when HW Steering is enabled."); 1492483181f7SDariusz Sosnowski config->repr_matching = 1; 1493483181f7SDariusz Sosnowski } 1494a13ec19cSMichael Baum if (config->tx_pp && !sh->dev_cap.txpp_en) { 1495a13ec19cSMichael Baum DRV_LOG(ERR, "Packet pacing is not supported."); 1496a13ec19cSMichael Baum rte_errno = ENODEV; 1497a13ec19cSMichael Baum return -rte_errno; 1498a13ec19cSMichael Baum } 149995cbaaa1SViacheslav Ovsiienko if (!config->tx_pp && config->tx_skew && 150095cbaaa1SViacheslav Ovsiienko !sh->cdev->config.hca_attr.wait_on_time) { 1501a13ec19cSMichael Baum DRV_LOG(WARNING, 1502a13ec19cSMichael Baum "\"tx_skew\" doesn't affect without \"tx_pp\"."); 1503a13ec19cSMichael Baum } 1504593f913aSMichael Baum /* Check for LRO support. */ 1505593f913aSMichael Baum if (mlx5_devx_obj_ops_en(sh) && sh->cdev->config.hca_attr.lro_cap) { 1506593f913aSMichael Baum /* TBD check tunnel lro caps. */ 1507593f913aSMichael Baum config->lro_allowed = 1; 1508593f913aSMichael Baum DRV_LOG(DEBUG, "LRO is allowed."); 1509593f913aSMichael Baum DRV_LOG(DEBUG, 1510593f913aSMichael Baum "LRO minimal size of TCP segment required for coalescing is %d bytes.", 1511593f913aSMichael Baum sh->cdev->config.hca_attr.lro_min_mss_size); 1512593f913aSMichael Baum } 1513a13ec19cSMichael Baum /* 1514a13ec19cSMichael Baum * If HW has bug working with tunnel packet decapsulation and scatter 1515a13ec19cSMichael Baum * FCS, and decapsulation is needed, clear the hw_fcs_strip bit. 1516a13ec19cSMichael Baum * Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore. 1517a13ec19cSMichael Baum */ 1518a13ec19cSMichael Baum if (sh->dev_cap.scatter_fcs_w_decap_disable && sh->config.decap_en) 1519a13ec19cSMichael Baum config->hw_fcs_strip = 0; 1520a13ec19cSMichael Baum else 1521a13ec19cSMichael Baum config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip; 1522a13ec19cSMichael Baum DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 1523a13ec19cSMichael Baum (config->hw_fcs_strip ? "" : "not ")); 1524a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"tx_pp\" is %d.", config->tx_pp); 1525a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"tx_skew\" is %d.", config->tx_skew); 1526a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"reclaim_mode\" is %u.", config->reclaim_mode); 1527a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"dv_esw_en\" is %u.", config->dv_esw_en); 1528a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"dv_flow_en\" is %u.", config->dv_flow_en); 1529a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"dv_xmeta_en\" is %u.", config->dv_xmeta_en); 1530a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"dv_miss_info\" is %u.", config->dv_miss_info); 1531a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"l3_vxlan_en\" is %u.", config->l3_vxlan_en); 1532a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"vf_nl_en\" is %u.", config->vf_nl_en); 1533a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"lacp_by_user\" is %u.", config->lacp_by_user); 1534a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en); 1535a13ec19cSMichael Baum DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.", 1536a13ec19cSMichael Baum config->allow_duplicate_pattern); 15371939eb6fSDariusz Sosnowski DRV_LOG(DEBUG, "\"fdb_def_rule_en\" is %u.", config->fdb_def_rule); 1538483181f7SDariusz Sosnowski DRV_LOG(DEBUG, "\"repr_matching_en\" is %u.", config->repr_matching); 1539a13ec19cSMichael Baum return 0; 1540a13ec19cSMichael Baum } 1541a13ec19cSMichael Baum 1542a13ec19cSMichael Baum /** 1543e3032e9cSMichael Baum * Configure realtime timestamp format. 1544e3032e9cSMichael Baum * 1545e3032e9cSMichael Baum * @param sh 1546e3032e9cSMichael Baum * Pointer to mlx5_dev_ctx_shared object. 1547e3032e9cSMichael Baum * @param hca_attr 1548e3032e9cSMichael Baum * Pointer to DevX HCA capabilities structure. 1549e3032e9cSMichael Baum */ 1550e3032e9cSMichael Baum void 1551e3032e9cSMichael Baum mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh, 1552e3032e9cSMichael Baum struct mlx5_hca_attr *hca_attr) 1553e3032e9cSMichael Baum { 1554e3032e9cSMichael Baum uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc); 1555e3032e9cSMichael Baum uint32_t reg[dw_cnt]; 1556e3032e9cSMichael Baum int ret = ENOTSUP; 1557e3032e9cSMichael Baum 1558e3032e9cSMichael Baum if (hca_attr->access_register_user) 1559e3032e9cSMichael Baum ret = mlx5_devx_cmd_register_read(sh->cdev->ctx, 1560e3032e9cSMichael Baum MLX5_REGISTER_ID_MTUTC, 0, 1561e3032e9cSMichael Baum reg, dw_cnt); 1562e3032e9cSMichael Baum if (!ret) { 1563e3032e9cSMichael Baum uint32_t ts_mode; 1564e3032e9cSMichael Baum 1565e3032e9cSMichael Baum /* MTUTC register is read successfully. */ 1566e3032e9cSMichael Baum ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode); 1567e3032e9cSMichael Baum if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME) 156887af0d1eSMichael Baum sh->dev_cap.rt_timestamp = 1; 1569e3032e9cSMichael Baum } else { 1570e3032e9cSMichael Baum /* Kernel does not support register reading. */ 1571e3032e9cSMichael Baum if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S)) 157287af0d1eSMichael Baum sh->dev_cap.rt_timestamp = 1; 1573e3032e9cSMichael Baum } 1574e3032e9cSMichael Baum } 1575e3032e9cSMichael Baum 1576e3032e9cSMichael Baum /** 157791389890SOphir Munk * Allocate shared device context. If there is multiport device the 157817e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 157991389890SOphir Munk * port dedicated device, the context will be used by only given 158017e19bc4SViacheslav Ovsiienko * port due to unification. 158117e19bc4SViacheslav Ovsiienko * 158291389890SOphir Munk * Routine first searches the context for the specified device name, 158317e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 158417e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 158591389890SOphir Munk * device context and parameters. 158617e19bc4SViacheslav Ovsiienko * 158717e19bc4SViacheslav Ovsiienko * @param[in] spawn 158891389890SOphir Munk * Pointer to the device attributes (name, port, etc). 1589a729d2f0SMichael Baum * @param mkvlist 1590a729d2f0SMichael Baum * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. 159117e19bc4SViacheslav Ovsiienko * 159217e19bc4SViacheslav Ovsiienko * @return 15936e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object on success, 159417e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 159517e19bc4SViacheslav Ovsiienko */ 15962eb4d010SOphir Munk struct mlx5_dev_ctx_shared * 1597a729d2f0SMichael Baum mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, 1598a729d2f0SMichael Baum struct mlx5_kvargs_ctrl *mkvlist) 159917e19bc4SViacheslav Ovsiienko { 16006e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh; 160117e19bc4SViacheslav Ovsiienko int err = 0; 160253e5a82fSViacheslav Ovsiienko uint32_t i; 160317e19bc4SViacheslav Ovsiienko 16048e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn); 160517e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 16068e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 160791389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 160817e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 160991389890SOphir Munk LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) { 1610ca1418ceSMichael Baum if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) { 161117e19bc4SViacheslav Ovsiienko sh->refcnt++; 161217e19bc4SViacheslav Ovsiienko goto exit; 161317e19bc4SViacheslav Ovsiienko } 161417e19bc4SViacheslav Ovsiienko } 1615ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 16168e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn->max_port); 16172175c4dcSSuanming Mou sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 16186e88bc42SOphir Munk sizeof(struct mlx5_dev_ctx_shared) + 16196be4c57aSMichael Baum spawn->max_port * sizeof(struct mlx5_dev_shared_port), 16202175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 162117e19bc4SViacheslav Ovsiienko if (!sh) { 16226be4c57aSMichael Baum DRV_LOG(ERR, "Shared context allocation failure."); 162317e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 162417e19bc4SViacheslav Ovsiienko goto exit; 162517e19bc4SViacheslav Ovsiienko } 1626887183efSMichael Baum pthread_mutex_init(&sh->txpp.mutex, NULL); 16277af08c8fSMichael Baum sh->numa_node = spawn->cdev->dev->numa_node; 16287af08c8fSMichael Baum sh->cdev = spawn->cdev; 1629cf004fd3SMichael Baum sh->esw_mode = !!(spawn->info.master || spawn->info.representor); 1630f5f4c482SXueming Li if (spawn->bond_info) 1631f5f4c482SXueming Li sh->bond = *spawn->bond_info; 163291d1cfafSMichael Baum err = mlx5_os_capabilities_prepare(sh); 163317e19bc4SViacheslav Ovsiienko if (err) { 163491d1cfafSMichael Baum DRV_LOG(ERR, "Fail to configure device capabilities."); 163517e19bc4SViacheslav Ovsiienko goto error; 163617e19bc4SViacheslav Ovsiienko } 1637a729d2f0SMichael Baum err = mlx5_shared_dev_ctx_args_config(sh, mkvlist, &sh->config); 1638a13ec19cSMichael Baum if (err) { 1639a13ec19cSMichael Baum DRV_LOG(ERR, "Failed to process device configure: %s", 1640a13ec19cSMichael Baum strerror(rte_errno)); 1641a13ec19cSMichael Baum goto error; 1642a13ec19cSMichael Baum } 164317e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 164417e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 1645ca1418ceSMichael Baum strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx), 1646f44b09f9SOphir Munk sizeof(sh->ibdev_name) - 1); 1647ca1418ceSMichael Baum strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx), 1648f44b09f9SOphir Munk sizeof(sh->ibdev_path) - 1); 164953e5a82fSViacheslav Ovsiienko /* 16506be4c57aSMichael Baum * Setting port_id to max unallowed value means there is no interrupt 16516be4c57aSMichael Baum * subhandler installed for the given port index i. 165253e5a82fSViacheslav Ovsiienko */ 165323242063SMatan Azrad for (i = 0; i < sh->max_port; i++) { 165453e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 165523242063SMatan Azrad sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; 165617f95513SDmitry Kozlyuk sh->port[i].nl_ih_port_id = RTE_MAX_ETHPORTS; 165723242063SMatan Azrad } 16586dc0cbc6SMichael Baum if (sh->cdev->config.devx) { 1659ca1418ceSMichael Baum sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx); 1660ae18a1aeSOri Kam if (!sh->td) { 1661ae18a1aeSOri Kam DRV_LOG(ERR, "TD allocation failure"); 16626be4c57aSMichael Baum rte_errno = ENOMEM; 1663ae18a1aeSOri Kam goto error; 1664ae18a1aeSOri Kam } 1665a89f6433SRongwei Liu if (mlx5_setup_tis(sh)) { 1666ae18a1aeSOri Kam DRV_LOG(ERR, "TIS allocation failure"); 16676be4c57aSMichael Baum rte_errno = ENOMEM; 1668ae18a1aeSOri Kam goto error; 1669ae18a1aeSOri Kam } 16705dfa003dSMichael Baum err = mlx5_rxtx_uars_prepare(sh); 1671a0bfe9d5SViacheslav Ovsiienko if (err) 1672fc4d4f73SViacheslav Ovsiienko goto error; 167324feb045SViacheslav Ovsiienko #ifndef RTE_ARCH_64 16745dfa003dSMichael Baum } else { 167524feb045SViacheslav Ovsiienko /* Initialize UAR access locks for 32bit implementations. */ 167624feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock_cq); 167724feb045SViacheslav Ovsiienko for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 167824feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock[i]); 167924feb045SViacheslav Ovsiienko #endif 16805dfa003dSMichael Baum } 16812eb4d010SOphir Munk mlx5_os_dev_shared_handler_install(sh); 16825d55a494STal Shnaiderman if (LIST_EMPTY(&mlx5_dev_ctx_list)) { 16835d55a494STal Shnaiderman err = mlx5_flow_os_init_workspace_once(); 16845d55a494STal Shnaiderman if (err) 16855d55a494STal Shnaiderman goto error; 16865d55a494STal Shnaiderman } 1687a94e89e4SMichael Baum err = mlx5_flow_counters_mng_init(sh); 1688a94e89e4SMichael Baum if (err) { 1689a94e89e4SMichael Baum DRV_LOG(ERR, "Fail to initialize counters manage."); 1690a94e89e4SMichael Baum goto error; 1691a94e89e4SMichael Baum } 1692fa2d01c8SDong Zhou mlx5_flow_aging_init(sh); 1693a13ec19cSMichael Baum mlx5_flow_ipool_create(sh); 16940e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 169591389890SOphir Munk LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); 1696f15f0c38SShiri Kuzin rte_spinlock_init(&sh->geneve_tlv_opt_sl); 169717e19bc4SViacheslav Ovsiienko exit: 169891389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 169917e19bc4SViacheslav Ovsiienko return sh; 170017e19bc4SViacheslav Ovsiienko error: 17016be4c57aSMichael Baum err = rte_errno; 1702d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 170391389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 17048e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 17056be4c57aSMichael Baum mlx5_rxtx_uars_release(sh); 1706a89f6433SRongwei Liu i = 0; 1707a89f6433SRongwei Liu do { 1708a89f6433SRongwei Liu if (sh->tis[i]) 1709a89f6433SRongwei Liu claim_zero(mlx5_devx_cmd_destroy(sh->tis[i])); 1710a89f6433SRongwei Liu } while (++i < (uint32_t)sh->bond.n_port); 17116be4c57aSMichael Baum if (sh->td) 17126be4c57aSMichael Baum claim_zero(mlx5_devx_cmd_destroy(sh->td)); 17132175c4dcSSuanming Mou mlx5_free(sh); 171417e19bc4SViacheslav Ovsiienko rte_errno = err; 171517e19bc4SViacheslav Ovsiienko return NULL; 171617e19bc4SViacheslav Ovsiienko } 171717e19bc4SViacheslav Ovsiienko 171817e19bc4SViacheslav Ovsiienko /** 171925025da3SSpike Du * Create LWM event_channel and interrupt handle for shared device 172025025da3SSpike Du * context. All rxqs sharing the device context share the event_channel. 172125025da3SSpike Du * A callback is registered in interrupt thread to receive the LWM event. 172225025da3SSpike Du * 172325025da3SSpike Du * @param[in] priv 172425025da3SSpike Du * Pointer to mlx5_priv instance. 172525025da3SSpike Du * 172625025da3SSpike Du * @return 172725025da3SSpike Du * 0 on success, negative with rte_errno set. 172825025da3SSpike Du */ 172925025da3SSpike Du int 173025025da3SSpike Du mlx5_lwm_setup(struct mlx5_priv *priv) 173125025da3SSpike Du { 173225025da3SSpike Du int fd_lwm; 173325025da3SSpike Du 173425025da3SSpike Du pthread_mutex_init(&priv->sh->lwm_config_lock, NULL); 173525025da3SSpike Du priv->sh->devx_channel_lwm = mlx5_os_devx_create_event_channel 173625025da3SSpike Du (priv->sh->cdev->ctx, 173725025da3SSpike Du MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA); 173825025da3SSpike Du if (!priv->sh->devx_channel_lwm) 173925025da3SSpike Du goto err; 174025025da3SSpike Du fd_lwm = mlx5_os_get_devx_channel_fd(priv->sh->devx_channel_lwm); 174125025da3SSpike Du priv->sh->intr_handle_lwm = mlx5_os_interrupt_handler_create 174225025da3SSpike Du (RTE_INTR_INSTANCE_F_SHARED, true, 174325025da3SSpike Du fd_lwm, mlx5_dev_interrupt_handler_lwm, priv); 174425025da3SSpike Du if (!priv->sh->intr_handle_lwm) 174525025da3SSpike Du goto err; 174625025da3SSpike Du return 0; 174725025da3SSpike Du err: 174825025da3SSpike Du if (priv->sh->devx_channel_lwm) { 174925025da3SSpike Du mlx5_os_devx_destroy_event_channel 175025025da3SSpike Du (priv->sh->devx_channel_lwm); 175125025da3SSpike Du priv->sh->devx_channel_lwm = NULL; 175225025da3SSpike Du } 175325025da3SSpike Du pthread_mutex_destroy(&priv->sh->lwm_config_lock); 175425025da3SSpike Du return -rte_errno; 175525025da3SSpike Du } 175625025da3SSpike Du 175725025da3SSpike Du /** 175825025da3SSpike Du * Destroy LWM event_channel and interrupt handle for shared device 175925025da3SSpike Du * context before free this context. The interrupt handler is also 176025025da3SSpike Du * unregistered. 176125025da3SSpike Du * 176225025da3SSpike Du * @param[in] sh 176325025da3SSpike Du * Pointer to shared device context. 176425025da3SSpike Du */ 176525025da3SSpike Du void 176625025da3SSpike Du mlx5_lwm_unset(struct mlx5_dev_ctx_shared *sh) 176725025da3SSpike Du { 176825025da3SSpike Du if (sh->intr_handle_lwm) { 176925025da3SSpike Du mlx5_os_interrupt_handler_destroy(sh->intr_handle_lwm, 177025025da3SSpike Du mlx5_dev_interrupt_handler_lwm, (void *)-1); 177125025da3SSpike Du sh->intr_handle_lwm = NULL; 177225025da3SSpike Du } 177325025da3SSpike Du if (sh->devx_channel_lwm) { 177425025da3SSpike Du mlx5_os_devx_destroy_event_channel 177525025da3SSpike Du (sh->devx_channel_lwm); 177625025da3SSpike Du sh->devx_channel_lwm = NULL; 177725025da3SSpike Du } 177825025da3SSpike Du pthread_mutex_destroy(&sh->lwm_config_lock); 177925025da3SSpike Du } 178025025da3SSpike Du 178125025da3SSpike Du /** 178217e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 178317e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 178417e19bc4SViacheslav Ovsiienko * 178517e19bc4SViacheslav Ovsiienko * @param[in] sh 17866e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 178717e19bc4SViacheslav Ovsiienko */ 17882eb4d010SOphir Munk void 178991389890SOphir Munk mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) 179017e19bc4SViacheslav Ovsiienko { 1791fec28ca0SDmitry Kozlyuk int ret; 1792a89f6433SRongwei Liu int i = 0; 1793fec28ca0SDmitry Kozlyuk 179491389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 17950afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG 179617e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 17976e88bc42SOphir Munk struct mlx5_dev_ctx_shared *lctx; 179817e19bc4SViacheslav Ovsiienko 179991389890SOphir Munk LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next) 180017e19bc4SViacheslav Ovsiienko if (lctx == sh) 180117e19bc4SViacheslav Ovsiienko break; 18028e46d4e1SAlexander Kozyrev MLX5_ASSERT(lctx); 180317e19bc4SViacheslav Ovsiienko if (lctx != sh) { 180417e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 180517e19bc4SViacheslav Ovsiienko goto exit; 180617e19bc4SViacheslav Ovsiienko } 180717e19bc4SViacheslav Ovsiienko #endif 18088e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 18098e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh->refcnt); 181017e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 18118e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 181217e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 181317e19bc4SViacheslav Ovsiienko goto exit; 1814fec28ca0SDmitry Kozlyuk /* Stop watching for mempool events and unregister all mempools. */ 1815fc59a1ecSMichael Baum if (!sh->cdev->config.mr_mempool_reg_en) { 1816fec28ca0SDmitry Kozlyuk ret = rte_mempool_event_callback_unregister 1817fec28ca0SDmitry Kozlyuk (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh); 1818fec28ca0SDmitry Kozlyuk if (ret == 0) 1819fc59a1ecSMichael Baum rte_mempool_walk 1820fc59a1ecSMichael Baum (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh); 1821fc59a1ecSMichael Baum } 18220e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 182317e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 1824ea823b2cSDmitry Kozlyuk /* Release resources on the last device removal. */ 1825ea823b2cSDmitry Kozlyuk if (LIST_EMPTY(&mlx5_dev_ctx_list)) { 1826ea823b2cSDmitry Kozlyuk mlx5_os_net_cleanup(); 18275d55a494STal Shnaiderman mlx5_flow_os_release_workspace(); 1828ea823b2cSDmitry Kozlyuk } 1829f4a08731SMichael Baum pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 18309086ac09SGregory Etelson if (sh->flex_parsers_dv) { 18319086ac09SGregory Etelson mlx5_list_destroy(sh->flex_parsers_dv); 18329086ac09SGregory Etelson sh->flex_parsers_dv = NULL; 18339086ac09SGregory Etelson } 183453e5a82fSViacheslav Ovsiienko /* 183553e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 183653e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 183753e5a82fSViacheslav Ovsiienko **/ 18385382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 1839ce12974cSMichael Baum if (sh->ct_mng) 1840ce12974cSMichael Baum mlx5_flow_aso_ct_mng_close(sh); 1841f935ed4bSDekel Peled if (sh->aso_age_mng) { 1842f935ed4bSDekel Peled mlx5_flow_aso_age_mng_close(sh); 1843f935ed4bSDekel Peled sh->aso_age_mng = NULL; 1844f935ed4bSDekel Peled } 184529efa63aSLi Zhang if (sh->mtrmng) 184629efa63aSLi Zhang mlx5_aso_flow_mtrs_mng_close(sh); 1847014d1cbeSSuanming Mou mlx5_flow_ipool_destroy(sh); 18482eb4d010SOphir Munk mlx5_os_dev_shared_handler_uninstall(sh); 18495dfa003dSMichael Baum mlx5_rxtx_uars_release(sh); 1850a89f6433SRongwei Liu do { 1851a89f6433SRongwei Liu if (sh->tis[i]) 1852a89f6433SRongwei Liu claim_zero(mlx5_devx_cmd_destroy(sh->tis[i])); 1853a89f6433SRongwei Liu } while (++i < sh->bond.n_port); 1854ae18a1aeSOri Kam if (sh->td) 1855ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 185644864503SSuanming Mou #ifdef HAVE_MLX5_HWS_SUPPORT 185744864503SSuanming Mou /* HWS manages geneve_tlv_option resource as global. */ 185844864503SSuanming Mou if (sh->config.dv_flow_en == 2) 185944864503SSuanming Mou flow_dev_geneve_tlv_option_resource_release(sh); 186044864503SSuanming Mou else 186144864503SSuanming Mou #endif 1862f15f0c38SShiri Kuzin MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL); 1863d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 186425025da3SSpike Du mlx5_lwm_unset(sh); 18652175c4dcSSuanming Mou mlx5_free(sh); 1866f4a08731SMichael Baum return; 186717e19bc4SViacheslav Ovsiienko exit: 186891389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 186917e19bc4SViacheslav Ovsiienko } 187017e19bc4SViacheslav Ovsiienko 1871771fa900SAdrien Mazarguil /** 1872afd7a625SXueming Li * Destroy table hash list. 187354534725SMatan Azrad * 187454534725SMatan Azrad * @param[in] priv 187554534725SMatan Azrad * Pointer to the private device data structure. 187654534725SMatan Azrad */ 18772eb4d010SOphir Munk void 187854534725SMatan Azrad mlx5_free_table_hash_list(struct mlx5_priv *priv) 187954534725SMatan Azrad { 18806e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 1881d1559d66SSuanming Mou struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ? 1882d1559d66SSuanming Mou &sh->groups : &sh->flow_tbls; 1883d1559d66SSuanming Mou if (*tbls == NULL) 188454534725SMatan Azrad return; 1885d1559d66SSuanming Mou mlx5_hlist_destroy(*tbls); 1886d1559d66SSuanming Mou *tbls = NULL; 188754534725SMatan Azrad } 188854534725SMatan Azrad 188922681deeSAlex Vesker #ifdef HAVE_MLX5_HWS_SUPPORT 1890d1559d66SSuanming Mou /** 1891d1559d66SSuanming Mou * Allocate HW steering group hash list. 1892d1559d66SSuanming Mou * 1893d1559d66SSuanming Mou * @param[in] priv 1894d1559d66SSuanming Mou * Pointer to the private device data structure. 1895d1559d66SSuanming Mou */ 1896d1559d66SSuanming Mou static int 1897d1559d66SSuanming Mou mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv) 1898d1559d66SSuanming Mou { 1899d1559d66SSuanming Mou int err = 0; 1900d1559d66SSuanming Mou struct mlx5_dev_ctx_shared *sh = priv->sh; 1901d1559d66SSuanming Mou char s[MLX5_NAME_SIZE]; 1902d1559d66SSuanming Mou 1903d1559d66SSuanming Mou MLX5_ASSERT(sh); 1904d1559d66SSuanming Mou snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name); 1905d1559d66SSuanming Mou sh->groups = mlx5_hlist_create 1906d1559d66SSuanming Mou (s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, 1907d1559d66SSuanming Mou false, true, sh, 1908d1559d66SSuanming Mou flow_hw_grp_create_cb, 1909d1559d66SSuanming Mou flow_hw_grp_match_cb, 1910d1559d66SSuanming Mou flow_hw_grp_remove_cb, 1911d1559d66SSuanming Mou flow_hw_grp_clone_cb, 1912d1559d66SSuanming Mou flow_hw_grp_clone_free_cb); 1913d1559d66SSuanming Mou if (!sh->groups) { 1914d1559d66SSuanming Mou DRV_LOG(ERR, "flow groups with hash creation failed."); 1915d1559d66SSuanming Mou err = ENOMEM; 1916d1559d66SSuanming Mou } 1917d1559d66SSuanming Mou return err; 1918d1559d66SSuanming Mou } 1919d1559d66SSuanming Mou #endif 1920d1559d66SSuanming Mou 1921d1559d66SSuanming Mou 192254534725SMatan Azrad /** 192354534725SMatan Azrad * Initialize flow table hash list and create the root tables entry 192454534725SMatan Azrad * for each domain. 192554534725SMatan Azrad * 192654534725SMatan Azrad * @param[in] priv 192754534725SMatan Azrad * Pointer to the private device data structure. 192854534725SMatan Azrad * 192954534725SMatan Azrad * @return 193054534725SMatan Azrad * Zero on success, positive error code otherwise. 193154534725SMatan Azrad */ 19322eb4d010SOphir Munk int 1933afd7a625SXueming Li mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) 193454534725SMatan Azrad { 1935afd7a625SXueming Li int err = 0; 1936d1559d66SSuanming Mou 1937afd7a625SXueming Li /* Tables are only used in DV and DR modes. */ 1938d9bad050SSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 19396e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 1940961b6774SMatan Azrad char s[MLX5_NAME_SIZE]; 194154534725SMatan Azrad 1942d9bad050SSuanming Mou #ifdef HAVE_MLX5_HWS_SUPPORT 1943d1559d66SSuanming Mou if (priv->sh->config.dv_flow_en == 2) 1944d1559d66SSuanming Mou return mlx5_alloc_hw_group_hash_list(priv); 1945d9bad050SSuanming Mou #endif 19468e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 194754534725SMatan Azrad snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); 1948e69a5922SXueming Li sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, 1949961b6774SMatan Azrad false, true, sh, 1950961b6774SMatan Azrad flow_dv_tbl_create_cb, 1951f5b0aed2SSuanming Mou flow_dv_tbl_match_cb, 1952961b6774SMatan Azrad flow_dv_tbl_remove_cb, 1953961b6774SMatan Azrad flow_dv_tbl_clone_cb, 1954961b6774SMatan Azrad flow_dv_tbl_clone_free_cb); 195554534725SMatan Azrad if (!sh->flow_tbls) { 195663783b01SDavid Marchand DRV_LOG(ERR, "flow tables with hash creation failed."); 195754534725SMatan Azrad err = ENOMEM; 195854534725SMatan Azrad return err; 195954534725SMatan Azrad } 196054534725SMatan Azrad #ifndef HAVE_MLX5DV_DR 1961afd7a625SXueming Li struct rte_flow_error error; 1962afd7a625SXueming Li struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id]; 1963afd7a625SXueming Li 196454534725SMatan Azrad /* 196554534725SMatan Azrad * In case we have not DR support, the zero tables should be created 196654534725SMatan Azrad * because DV expect to see them even if they cannot be created by 196754534725SMatan Azrad * RDMA-CORE. 196854534725SMatan Azrad */ 19692d2cef5dSLi Zhang if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, 19702d2cef5dSLi Zhang NULL, 0, 1, 0, &error) || 19712d2cef5dSLi Zhang !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, 19722d2cef5dSLi Zhang NULL, 0, 1, 0, &error) || 19732d2cef5dSLi Zhang !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, 19742d2cef5dSLi Zhang NULL, 0, 1, 0, &error)) { 197554534725SMatan Azrad err = ENOMEM; 197654534725SMatan Azrad goto error; 197754534725SMatan Azrad } 197854534725SMatan Azrad return err; 197954534725SMatan Azrad error: 198054534725SMatan Azrad mlx5_free_table_hash_list(priv); 198154534725SMatan Azrad #endif /* HAVE_MLX5DV_DR */ 1982afd7a625SXueming Li #endif 198354534725SMatan Azrad return err; 198454534725SMatan Azrad } 198554534725SMatan Azrad 198654534725SMatan Azrad /** 19874d803a72SOlga Shern * Retrieve integer value from environment variable. 19884d803a72SOlga Shern * 19894d803a72SOlga Shern * @param[in] name 19904d803a72SOlga Shern * Environment variable name. 19914d803a72SOlga Shern * 19924d803a72SOlga Shern * @return 19934d803a72SOlga Shern * Integer value, 0 if the variable is not set. 19944d803a72SOlga Shern */ 19954d803a72SOlga Shern int 19964d803a72SOlga Shern mlx5_getenv_int(const char *name) 19974d803a72SOlga Shern { 19984d803a72SOlga Shern const char *val = getenv(name); 19994d803a72SOlga Shern 20004d803a72SOlga Shern if (val == NULL) 20014d803a72SOlga Shern return 0; 20024d803a72SOlga Shern return atoi(val); 20034d803a72SOlga Shern } 20044d803a72SOlga Shern 20054d803a72SOlga Shern /** 2006c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 2007c9ba7523SRaslan Darawsheh * 2008c9ba7523SRaslan Darawsheh * @param[in] dev 2009c9ba7523SRaslan Darawsheh * A pointer to eth_dev 2010c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 2011c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 2012c9ba7523SRaslan Darawsheh * 2013c9ba7523SRaslan Darawsheh * @return 2014c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 2015c9ba7523SRaslan Darawsheh */ 2016c9ba7523SRaslan Darawsheh int 2017c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 2018c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 2019c9ba7523SRaslan Darawsheh { 20208e46d4e1SAlexander Kozyrev MLX5_ASSERT(udp_tunnel != NULL); 2021295968d1SFerruh Yigit if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN && 2022c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 2023c9ba7523SRaslan Darawsheh return 0; 2024295968d1SFerruh Yigit if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE && 2025c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 2026c9ba7523SRaslan Darawsheh return 0; 2027c9ba7523SRaslan Darawsheh return -ENOTSUP; 2028c9ba7523SRaslan Darawsheh } 2029c9ba7523SRaslan Darawsheh 2030c9ba7523SRaslan Darawsheh /** 2031120dc4a7SYongseok Koh * Initialize process private data structure. 2032120dc4a7SYongseok Koh * 2033120dc4a7SYongseok Koh * @param dev 2034120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 2035120dc4a7SYongseok Koh * 2036120dc4a7SYongseok Koh * @return 2037120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 2038120dc4a7SYongseok Koh */ 2039120dc4a7SYongseok Koh int 2040120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 2041120dc4a7SYongseok Koh { 2042120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 2043120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 2044120dc4a7SYongseok Koh size_t ppriv_size; 2045120dc4a7SYongseok Koh 20466dad8b3aSYunjian Wang mlx5_proc_priv_uninit(dev); 2047120dc4a7SYongseok Koh /* 2048120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 2049120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 2050120dc4a7SYongseok Koh */ 20515dfa003dSMichael Baum ppriv_size = sizeof(struct mlx5_proc_priv) + 20525dfa003dSMichael Baum priv->txqs_n * sizeof(struct mlx5_uar_data); 205384a22cbcSSuanming Mou ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size, 205484a22cbcSSuanming Mou RTE_CACHE_LINE_SIZE, dev->device->numa_node); 2055120dc4a7SYongseok Koh if (!ppriv) { 2056120dc4a7SYongseok Koh rte_errno = ENOMEM; 2057120dc4a7SYongseok Koh return -rte_errno; 2058120dc4a7SYongseok Koh } 205984a22cbcSSuanming Mou ppriv->uar_table_sz = priv->txqs_n; 2060120dc4a7SYongseok Koh dev->process_private = ppriv; 2061b6e9c33cSMichael Baum if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2062b6e9c33cSMichael Baum priv->sh->pppriv = ppriv; 2063120dc4a7SYongseok Koh return 0; 2064120dc4a7SYongseok Koh } 2065120dc4a7SYongseok Koh 2066120dc4a7SYongseok Koh /** 2067120dc4a7SYongseok Koh * Un-initialize process private data structure. 2068120dc4a7SYongseok Koh * 2069120dc4a7SYongseok Koh * @param dev 2070120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 2071120dc4a7SYongseok Koh */ 20722b36c30bSSuanming Mou void 2073120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 2074120dc4a7SYongseok Koh { 20759b31fc90SViacheslav Ovsiienko struct mlx5_proc_priv *ppriv = dev->process_private; 20769b31fc90SViacheslav Ovsiienko 20779b31fc90SViacheslav Ovsiienko if (!ppriv) 2078120dc4a7SYongseok Koh return; 20799b31fc90SViacheslav Ovsiienko if (ppriv->hca_bar) 20809b31fc90SViacheslav Ovsiienko mlx5_txpp_unmap_hca_bar(dev); 20812175c4dcSSuanming Mou mlx5_free(dev->process_private); 2082120dc4a7SYongseok Koh dev->process_private = NULL; 2083120dc4a7SYongseok Koh } 2084120dc4a7SYongseok Koh 2085120dc4a7SYongseok Koh /** 2086771fa900SAdrien Mazarguil * DPDK callback to close the device. 2087771fa900SAdrien Mazarguil * 2088771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 2089771fa900SAdrien Mazarguil * 2090771fa900SAdrien Mazarguil * @param dev 2091771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 2092771fa900SAdrien Mazarguil */ 2093b142387bSThomas Monjalon int 2094771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 2095771fa900SAdrien Mazarguil { 2096dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 20972e22920bSAdrien Mazarguil unsigned int i; 20986af6b973SNélio Laranjeiro int ret; 2099771fa900SAdrien Mazarguil 21002786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 21012786b7bfSSuanming Mou /* Check if process_private released. */ 21022786b7bfSSuanming Mou if (!dev->process_private) 2103b142387bSThomas Monjalon return 0; 21042786b7bfSSuanming Mou mlx5_tx_uar_uninit_secondary(dev); 21052786b7bfSSuanming Mou mlx5_proc_priv_uninit(dev); 21062786b7bfSSuanming Mou rte_eth_dev_release_port(dev); 2107b142387bSThomas Monjalon return 0; 21082786b7bfSSuanming Mou } 21092786b7bfSSuanming Mou if (!priv->sh) 2110b142387bSThomas Monjalon return 0; 2111*30ff1d25SViacheslav Ovsiienko if (priv->shared_refcnt) { 2112*30ff1d25SViacheslav Ovsiienko DRV_LOG(ERR, "port %u is shared host in use (%u)", 2113*30ff1d25SViacheslav Ovsiienko dev->data->port_id, priv->shared_refcnt); 2114*30ff1d25SViacheslav Ovsiienko rte_errno = EBUSY; 2115*30ff1d25SViacheslav Ovsiienko return -EBUSY; 2116*30ff1d25SViacheslav Ovsiienko } 2117a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 21180f99970bSNélio Laranjeiro dev->data->port_id, 2119ca1418ceSMichael Baum ((priv->sh->cdev->ctx != NULL) ? 2120ca1418ceSMichael Baum mlx5_os_get_ctx_device_name(priv->sh->cdev->ctx) : "")); 21218db7e3b6SBing Zhao /* 21228db7e3b6SBing Zhao * If default mreg copy action is removed at the stop stage, 21238db7e3b6SBing Zhao * the search will return none and nothing will be done anymore. 21248db7e3b6SBing Zhao */ 2125f64a7946SRongwei Liu if (priv->sh->config.dv_flow_en != 2) 21268db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 2127af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 21288db7e3b6SBing Zhao /* 21298db7e3b6SBing Zhao * If all the flows are already flushed in the device stop stage, 21308db7e3b6SBing Zhao * then this will return directly without any action. 21318db7e3b6SBing Zhao */ 2132b4edeaf3SSuanming Mou mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true); 21334b61b877SBing Zhao mlx5_action_handle_flush(dev); 213402e76468SSuanming Mou mlx5_flow_meter_flush(dev, NULL); 21352e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 2136a41f593fSFerruh Yigit dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 2137a41f593fSFerruh Yigit dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 21382aac5b5dSYongseok Koh rte_wmb(); 21392aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 21402e86c4e5SOphir Munk mlx5_mp_os_req_stop_rxtx(dev); 21411c506404SBing Zhao /* Free the eCPRI flex parser resource. */ 21421c506404SBing Zhao mlx5_flex_parser_ecpri_release(dev); 2143db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_cleanup(dev); 214422681deeSAlex Vesker #ifdef HAVE_MLX5_HWS_SUPPORT 21451939eb6fSDariusz Sosnowski flow_hw_destroy_vport_action(dev); 2146b401400dSSuanming Mou flow_hw_resource_release(dev); 21475bd0e3e6SDariusz Sosnowski flow_hw_clear_port_info(dev); 2148f1fecffaSDariusz Sosnowski if (priv->sh->config.dv_flow_en == 2) { 2149f1fecffaSDariusz Sosnowski flow_hw_clear_flow_metadata_config(); 21508a89038fSBing Zhao flow_hw_clear_tags_set(dev); 2151f1fecffaSDariusz Sosnowski } 2152b401400dSSuanming Mou #endif 21535cf0707fSXueming Li if (priv->rxq_privs != NULL) { 21542e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 215520698c9fSOphir Munk rte_delay_us_sleep(1000); 2156a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 2157af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 21582e22920bSAdrien Mazarguil priv->rxqs_n = 0; 21594cda06c3SXueming Li mlx5_free(priv->rxq_privs); 21604cda06c3SXueming Li priv->rxq_privs = NULL; 21614cda06c3SXueming Li } 21622e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 21632e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 216420698c9fSOphir Munk rte_delay_us_sleep(1000); 21656e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 2166af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 21672e22920bSAdrien Mazarguil priv->txqs_n = 0; 21682e22920bSAdrien Mazarguil priv->txqs = NULL; 21692e22920bSAdrien Mazarguil } 2170120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 2171e6988afdSMatan Azrad if (priv->q_counters) { 2172e6988afdSMatan Azrad mlx5_devx_cmd_destroy(priv->q_counters); 2173e6988afdSMatan Azrad priv->q_counters = NULL; 2174e6988afdSMatan Azrad } 217565b3cd0dSSuanming Mou if (priv->drop_queue.hrxq) 217665b3cd0dSSuanming Mou mlx5_drop_action_destroy(dev); 2177dd3c774fSViacheslav Ovsiienko if (priv->mreg_cp_tbl) 2178e69a5922SXueming Li mlx5_hlist_destroy(priv->mreg_cp_tbl); 21797d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 21802eb4d010SOphir Munk mlx5_os_free_shared_dr(priv); 218129c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 218283c2047cSSuanming Mou mlx5_free(priv->rss_conf.rss_key); 2183634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 218483c2047cSSuanming Mou mlx5_free(priv->reta_idx); 218587af0d1eSMichael Baum if (priv->sh->dev_cap.vf) 2186f00f6562SOphir Munk mlx5_os_mac_addr_flush(dev); 218726c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 218826c08b97SAdrien Mazarguil close(priv->nl_socket_route); 218926c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 219026c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 2191dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 2192dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 219323820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 2194f5479b68SNélio Laranjeiro if (ret) 2195a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 21960f99970bSNélio Laranjeiro dev->data->port_id); 219715c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 21984c7a0f5fSNélio Laranjeiro if (ret) 2199a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 22000f99970bSNélio Laranjeiro dev->data->port_id); 220193403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 220209cb5b58SNélio Laranjeiro if (ret) 220393403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 22040f99970bSNélio Laranjeiro dev->data->port_id); 2205311b17e6SMichael Baum ret = mlx5_ext_rxq_verify(dev); 2206311b17e6SMichael Baum if (ret) 2207311b17e6SMichael Baum DRV_LOG(WARNING, "Port %u some external RxQ still remain.", 2208311b17e6SMichael Baum dev->data->port_id); 2209af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 2210a1366b1aSNélio Laranjeiro if (ret) 2211a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 22120f99970bSNélio Laranjeiro dev->data->port_id); 2213894c4a8eSOri Kam ret = mlx5_txq_obj_verify(dev); 2214faf2667fSNélio Laranjeiro if (ret) 2215a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 22160f99970bSNélio Laranjeiro dev->data->port_id); 2217af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 22186e78005aSNélio Laranjeiro if (ret) 2219a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 22200f99970bSNélio Laranjeiro dev->data->port_id); 2221af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 22226af6b973SNélio Laranjeiro if (ret) 2223a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 2224a170a30dSNélio Laranjeiro dev->data->port_id); 2225679f46c7SMatan Azrad if (priv->hrxqs) 2226679f46c7SMatan Azrad mlx5_list_destroy(priv->hrxqs); 222780f872eeSMichael Baum mlx5_free(priv->ext_rxqs); 2228ef4ece4dSMichael Baum priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS; 2229ef4ece4dSMichael Baum /* 2230ef4ece4dSMichael Baum * The interrupt handler port id must be reset before priv is reset 2231ef4ece4dSMichael Baum * since 'mlx5_dev_interrupt_nl_cb' uses priv. 2232ef4ece4dSMichael Baum */ 2233ef4ece4dSMichael Baum rte_io_wmb(); 2234772dc0ebSSuanming Mou /* 2235772dc0ebSSuanming Mou * Free the shared context in last turn, because the cleanup 2236772dc0ebSSuanming Mou * routines above may use some shared fields, like 22377be78d02SJosh Soref * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving 2238772dc0ebSSuanming Mou * ifindex if Netlink fails. 2239772dc0ebSSuanming Mou */ 224091389890SOphir Munk mlx5_free_shared_dev_ctx(priv->sh); 22412b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 22422b730263SAdrien Mazarguil unsigned int c = 0; 2243d874a4eeSThomas Monjalon uint16_t port_id; 22442b730263SAdrien Mazarguil 224556bb3c84SXueming Li MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 2246dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 2247d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 22482b730263SAdrien Mazarguil 22492b730263SAdrien Mazarguil if (!opriv || 22502b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 2251d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 22522b730263SAdrien Mazarguil continue; 22532b730263SAdrien Mazarguil ++c; 2254f7e95215SViacheslav Ovsiienko break; 22552b730263SAdrien Mazarguil } 22562b730263SAdrien Mazarguil if (!c) 22572b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 22582b730263SAdrien Mazarguil } 2259771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 22602b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 226142603bbdSOphir Munk /* 226242603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 226342603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 226442603bbdSOphir Munk * it is freed when dev_private is freed. 226542603bbdSOphir Munk */ 226642603bbdSOphir Munk dev->data->mac_addrs = NULL; 2267b142387bSThomas Monjalon return 0; 2268771fa900SAdrien Mazarguil } 2269771fa900SAdrien Mazarguil 2270b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_ops = { 2271b012b4ceSOphir Munk .dev_configure = mlx5_dev_configure, 2272b012b4ceSOphir Munk .dev_start = mlx5_dev_start, 2273b012b4ceSOphir Munk .dev_stop = mlx5_dev_stop, 2274b012b4ceSOphir Munk .dev_set_link_down = mlx5_set_link_down, 2275b012b4ceSOphir Munk .dev_set_link_up = mlx5_set_link_up, 2276b012b4ceSOphir Munk .dev_close = mlx5_dev_close, 2277b012b4ceSOphir Munk .promiscuous_enable = mlx5_promiscuous_enable, 2278b012b4ceSOphir Munk .promiscuous_disable = mlx5_promiscuous_disable, 2279b012b4ceSOphir Munk .allmulticast_enable = mlx5_allmulticast_enable, 2280b012b4ceSOphir Munk .allmulticast_disable = mlx5_allmulticast_disable, 2281b012b4ceSOphir Munk .link_update = mlx5_link_update, 2282b012b4ceSOphir Munk .stats_get = mlx5_stats_get, 2283b012b4ceSOphir Munk .stats_reset = mlx5_stats_reset, 2284b012b4ceSOphir Munk .xstats_get = mlx5_xstats_get, 2285b012b4ceSOphir Munk .xstats_reset = mlx5_xstats_reset, 2286b012b4ceSOphir Munk .xstats_get_names = mlx5_xstats_get_names, 2287b012b4ceSOphir Munk .fw_version_get = mlx5_fw_version_get, 2288b012b4ceSOphir Munk .dev_infos_get = mlx5_dev_infos_get, 2289cb95feefSXueming Li .representor_info_get = mlx5_representor_info_get, 2290b012b4ceSOphir Munk .read_clock = mlx5_txpp_read_clock, 2291b012b4ceSOphir Munk .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 2292b012b4ceSOphir Munk .vlan_filter_set = mlx5_vlan_filter_set, 2293b012b4ceSOphir Munk .rx_queue_setup = mlx5_rx_queue_setup, 22945c9f3294SSpike Du .rx_queue_avail_thresh_set = mlx5_rx_queue_lwm_set, 22955c9f3294SSpike Du .rx_queue_avail_thresh_query = mlx5_rx_queue_lwm_query, 2296b012b4ceSOphir Munk .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 2297b012b4ceSOphir Munk .tx_queue_setup = mlx5_tx_queue_setup, 2298b012b4ceSOphir Munk .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 2299b012b4ceSOphir Munk .rx_queue_release = mlx5_rx_queue_release, 2300b012b4ceSOphir Munk .tx_queue_release = mlx5_tx_queue_release, 2301b012b4ceSOphir Munk .rx_queue_start = mlx5_rx_queue_start, 2302b012b4ceSOphir Munk .rx_queue_stop = mlx5_rx_queue_stop, 2303b012b4ceSOphir Munk .tx_queue_start = mlx5_tx_queue_start, 2304b012b4ceSOphir Munk .tx_queue_stop = mlx5_tx_queue_stop, 2305b012b4ceSOphir Munk .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 2306b012b4ceSOphir Munk .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 2307b012b4ceSOphir Munk .mac_addr_remove = mlx5_mac_addr_remove, 2308b012b4ceSOphir Munk .mac_addr_add = mlx5_mac_addr_add, 2309b012b4ceSOphir Munk .mac_addr_set = mlx5_mac_addr_set, 2310b012b4ceSOphir Munk .set_mc_addr_list = mlx5_set_mc_addr_list, 2311b012b4ceSOphir Munk .mtu_set = mlx5_dev_set_mtu, 2312b012b4ceSOphir Munk .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 2313b012b4ceSOphir Munk .vlan_offload_set = mlx5_vlan_offload_set, 2314b012b4ceSOphir Munk .reta_update = mlx5_dev_rss_reta_update, 2315b012b4ceSOphir Munk .reta_query = mlx5_dev_rss_reta_query, 2316b012b4ceSOphir Munk .rss_hash_update = mlx5_rss_hash_update, 2317b012b4ceSOphir Munk .rss_hash_conf_get = mlx5_rss_hash_conf_get, 2318fb7ad441SThomas Monjalon .flow_ops_get = mlx5_flow_ops_get, 2319b012b4ceSOphir Munk .rxq_info_get = mlx5_rxq_info_get, 2320b012b4ceSOphir Munk .txq_info_get = mlx5_txq_info_get, 2321b012b4ceSOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 2322b012b4ceSOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 2323b012b4ceSOphir Munk .rx_queue_intr_enable = mlx5_rx_intr_enable, 2324b012b4ceSOphir Munk .rx_queue_intr_disable = mlx5_rx_intr_disable, 2325b012b4ceSOphir Munk .is_removed = mlx5_is_removed, 2326b012b4ceSOphir Munk .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, 2327b012b4ceSOphir Munk .get_module_info = mlx5_get_module_info, 2328b012b4ceSOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 2329b012b4ceSOphir Munk .hairpin_cap_get = mlx5_hairpin_cap_get, 2330b012b4ceSOphir Munk .mtr_ops_get = mlx5_flow_meter_ops_get, 2331b012b4ceSOphir Munk .hairpin_bind = mlx5_hairpin_bind, 2332b012b4ceSOphir Munk .hairpin_unbind = mlx5_hairpin_unbind, 2333b012b4ceSOphir Munk .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, 2334b012b4ceSOphir Munk .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, 2335b012b4ceSOphir Munk .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, 2336b012b4ceSOphir Munk .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, 2337a8f0df6bSAlexander Kozyrev .get_monitor_addr = mlx5_get_monitor_addr, 2338b012b4ceSOphir Munk }; 2339b012b4ceSOphir Munk 2340b012b4ceSOphir Munk /* Available operations from secondary process. */ 2341b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_sec_ops = { 2342b012b4ceSOphir Munk .stats_get = mlx5_stats_get, 2343b012b4ceSOphir Munk .stats_reset = mlx5_stats_reset, 2344b012b4ceSOphir Munk .xstats_get = mlx5_xstats_get, 2345b012b4ceSOphir Munk .xstats_reset = mlx5_xstats_reset, 2346b012b4ceSOphir Munk .xstats_get_names = mlx5_xstats_get_names, 2347b012b4ceSOphir Munk .fw_version_get = mlx5_fw_version_get, 2348b012b4ceSOphir Munk .dev_infos_get = mlx5_dev_infos_get, 234992d16c83SXueming Li .representor_info_get = mlx5_representor_info_get, 2350b012b4ceSOphir Munk .read_clock = mlx5_txpp_read_clock, 2351b012b4ceSOphir Munk .rx_queue_start = mlx5_rx_queue_start, 2352b012b4ceSOphir Munk .rx_queue_stop = mlx5_rx_queue_stop, 2353b012b4ceSOphir Munk .tx_queue_start = mlx5_tx_queue_start, 2354b012b4ceSOphir Munk .tx_queue_stop = mlx5_tx_queue_stop, 2355b012b4ceSOphir Munk .rxq_info_get = mlx5_rxq_info_get, 2356b012b4ceSOphir Munk .txq_info_get = mlx5_txq_info_get, 2357b012b4ceSOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 2358b012b4ceSOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 2359b012b4ceSOphir Munk .get_module_info = mlx5_get_module_info, 2360b012b4ceSOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 2361b012b4ceSOphir Munk }; 2362b012b4ceSOphir Munk 2363b012b4ceSOphir Munk /* Available operations in flow isolated mode. */ 2364b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_ops_isolate = { 2365b012b4ceSOphir Munk .dev_configure = mlx5_dev_configure, 2366b012b4ceSOphir Munk .dev_start = mlx5_dev_start, 2367b012b4ceSOphir Munk .dev_stop = mlx5_dev_stop, 2368b012b4ceSOphir Munk .dev_set_link_down = mlx5_set_link_down, 2369b012b4ceSOphir Munk .dev_set_link_up = mlx5_set_link_up, 2370b012b4ceSOphir Munk .dev_close = mlx5_dev_close, 2371b012b4ceSOphir Munk .promiscuous_enable = mlx5_promiscuous_enable, 2372b012b4ceSOphir Munk .promiscuous_disable = mlx5_promiscuous_disable, 2373b012b4ceSOphir Munk .allmulticast_enable = mlx5_allmulticast_enable, 2374b012b4ceSOphir Munk .allmulticast_disable = mlx5_allmulticast_disable, 2375b012b4ceSOphir Munk .link_update = mlx5_link_update, 2376b012b4ceSOphir Munk .stats_get = mlx5_stats_get, 2377b012b4ceSOphir Munk .stats_reset = mlx5_stats_reset, 2378b012b4ceSOphir Munk .xstats_get = mlx5_xstats_get, 2379b012b4ceSOphir Munk .xstats_reset = mlx5_xstats_reset, 2380b012b4ceSOphir Munk .xstats_get_names = mlx5_xstats_get_names, 2381b012b4ceSOphir Munk .fw_version_get = mlx5_fw_version_get, 2382b012b4ceSOphir Munk .dev_infos_get = mlx5_dev_infos_get, 238392d16c83SXueming Li .representor_info_get = mlx5_representor_info_get, 2384b012b4ceSOphir Munk .read_clock = mlx5_txpp_read_clock, 2385b012b4ceSOphir Munk .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 2386b012b4ceSOphir Munk .vlan_filter_set = mlx5_vlan_filter_set, 2387b012b4ceSOphir Munk .rx_queue_setup = mlx5_rx_queue_setup, 2388b012b4ceSOphir Munk .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 2389b012b4ceSOphir Munk .tx_queue_setup = mlx5_tx_queue_setup, 2390b012b4ceSOphir Munk .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 2391b012b4ceSOphir Munk .rx_queue_release = mlx5_rx_queue_release, 2392b012b4ceSOphir Munk .tx_queue_release = mlx5_tx_queue_release, 2393b012b4ceSOphir Munk .rx_queue_start = mlx5_rx_queue_start, 2394b012b4ceSOphir Munk .rx_queue_stop = mlx5_rx_queue_stop, 2395b012b4ceSOphir Munk .tx_queue_start = mlx5_tx_queue_start, 2396b012b4ceSOphir Munk .tx_queue_stop = mlx5_tx_queue_stop, 2397b012b4ceSOphir Munk .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 2398b012b4ceSOphir Munk .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 2399b012b4ceSOphir Munk .mac_addr_remove = mlx5_mac_addr_remove, 2400b012b4ceSOphir Munk .mac_addr_add = mlx5_mac_addr_add, 2401b012b4ceSOphir Munk .mac_addr_set = mlx5_mac_addr_set, 2402b012b4ceSOphir Munk .set_mc_addr_list = mlx5_set_mc_addr_list, 2403b012b4ceSOphir Munk .mtu_set = mlx5_dev_set_mtu, 2404b012b4ceSOphir Munk .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 2405b012b4ceSOphir Munk .vlan_offload_set = mlx5_vlan_offload_set, 2406fb7ad441SThomas Monjalon .flow_ops_get = mlx5_flow_ops_get, 2407b012b4ceSOphir Munk .rxq_info_get = mlx5_rxq_info_get, 2408b012b4ceSOphir Munk .txq_info_get = mlx5_txq_info_get, 2409b012b4ceSOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 2410b012b4ceSOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 2411b012b4ceSOphir Munk .rx_queue_intr_enable = mlx5_rx_intr_enable, 2412b012b4ceSOphir Munk .rx_queue_intr_disable = mlx5_rx_intr_disable, 2413b012b4ceSOphir Munk .is_removed = mlx5_is_removed, 2414b012b4ceSOphir Munk .get_module_info = mlx5_get_module_info, 2415b012b4ceSOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 2416b012b4ceSOphir Munk .hairpin_cap_get = mlx5_hairpin_cap_get, 2417b012b4ceSOphir Munk .mtr_ops_get = mlx5_flow_meter_ops_get, 2418b012b4ceSOphir Munk .hairpin_bind = mlx5_hairpin_bind, 2419b012b4ceSOphir Munk .hairpin_unbind = mlx5_hairpin_unbind, 2420b012b4ceSOphir Munk .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, 2421b012b4ceSOphir Munk .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, 2422b012b4ceSOphir Munk .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, 2423b012b4ceSOphir Munk .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, 2424a8f0df6bSAlexander Kozyrev .get_monitor_addr = mlx5_get_monitor_addr, 2425b012b4ceSOphir Munk }; 2426b012b4ceSOphir Munk 2427e72dd09bSNélio Laranjeiro /** 2428e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 2429e72dd09bSNélio Laranjeiro * 2430e72dd09bSNélio Laranjeiro * @param[in] key 2431e72dd09bSNélio Laranjeiro * Key argument to verify. 2432e72dd09bSNélio Laranjeiro * @param[in] val 2433e72dd09bSNélio Laranjeiro * Value associated with key. 2434e72dd09bSNélio Laranjeiro * @param opaque 2435e72dd09bSNélio Laranjeiro * User data. 2436e72dd09bSNélio Laranjeiro * 2437e72dd09bSNélio Laranjeiro * @return 2438a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 2439e72dd09bSNélio Laranjeiro */ 2440e72dd09bSNélio Laranjeiro static int 244145a6df80SMichael Baum mlx5_port_args_check_handler(const char *key, const char *val, void *opaque) 2442e72dd09bSNélio Laranjeiro { 244345a6df80SMichael Baum struct mlx5_port_config *config = opaque; 24448f848f32SViacheslav Ovsiienko signed long tmp; 2445e72dd09bSNélio Laranjeiro 24466de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 2447a729d2f0SMichael Baum if (!strcmp(MLX5_REPRESENTOR, key)) 24486de569f5SAdrien Mazarguil return 0; 244999c12dccSNélio Laranjeiro errno = 0; 24508f848f32SViacheslav Ovsiienko tmp = strtol(val, NULL, 0); 245199c12dccSNélio Laranjeiro if (errno) { 2452a6d83b6aSNélio Laranjeiro rte_errno = errno; 2453a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 2454a6d83b6aSNélio Laranjeiro return -rte_errno; 245599c12dccSNélio Laranjeiro } 2456a13ec19cSMichael Baum if (tmp < 0) { 24578f848f32SViacheslav Ovsiienko /* Negative values are acceptable for some keys only. */ 24588f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 24598f848f32SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val); 24608f848f32SViacheslav Ovsiienko return -rte_errno; 24618f848f32SViacheslav Ovsiienko } 246299c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 246354c2d46bSAlexander Kozyrev if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) { 246454c2d46bSAlexander Kozyrev DRV_LOG(ERR, "invalid CQE compression " 246554c2d46bSAlexander Kozyrev "format parameter"); 246654c2d46bSAlexander Kozyrev rte_errno = EINVAL; 246754c2d46bSAlexander Kozyrev return -rte_errno; 246854c2d46bSAlexander Kozyrev } 24697fe24446SShahaf Shuler config->cqe_comp = !!tmp; 247054c2d46bSAlexander Kozyrev config->cqe_comp_fmt = tmp; 247178c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 247278c7a16dSYongseok Koh config->hw_padding = !!tmp; 24737d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 24747d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 24757d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 24760947ed38SMichael Baum config->mprq.log_stride_num = tmp; 2477ecb16045SAlexander Kozyrev } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { 24780947ed38SMichael Baum config->mprq.log_stride_size = tmp; 24797d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 24807d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 24817d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 24827d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 24832a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 2484505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 2485505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 2486505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 2487505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 2488505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 2489505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 2490505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 2491505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 2492505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 24932a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 24947fe24446SShahaf Shuler config->txqs_inline = tmp; 249509d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 2496a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 2497230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 2498f9de8718SShahaf Shuler config->mps = !!tmp; 24996ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 2500a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 25016ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 2502505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 2503505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 2504505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 25055644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 2506a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 25075644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 25087fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 2509066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 2510066cfecdSMatan Azrad config->max_dump_files_num = tmp; 251121bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 251287af0d1eSMichael Baum config->lro_timeout = tmp; 25131ad9a3d0SBing Zhao } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { 25141ad9a3d0SBing Zhao config->log_hp_size = tmp; 2515febcac7bSBing Zhao } else if (strcmp(MLX5_DELAY_DROP, key) == 0) { 2516ce78c518SBing Zhao config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD); 2517ce78c518SBing Zhao config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN); 2518e72dd09bSNélio Laranjeiro } 251999c12dccSNélio Laranjeiro return 0; 252099c12dccSNélio Laranjeiro } 2521e72dd09bSNélio Laranjeiro 2522e72dd09bSNélio Laranjeiro /** 252345a6df80SMichael Baum * Parse user port parameters and adjust them according to device capabilities. 2524e72dd09bSNélio Laranjeiro * 252545a6df80SMichael Baum * @param priv 252645a6df80SMichael Baum * Pointer to shared device context. 2527a729d2f0SMichael Baum * @param mkvlist 2528a729d2f0SMichael Baum * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. 252945a6df80SMichael Baum * @param config 253045a6df80SMichael Baum * Pointer to port configuration structure. 2531e72dd09bSNélio Laranjeiro * 2532e72dd09bSNélio Laranjeiro * @return 2533a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 2534e72dd09bSNélio Laranjeiro */ 25352eb4d010SOphir Munk int 2536a729d2f0SMichael Baum mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, 253745a6df80SMichael Baum struct mlx5_port_config *config) 2538e72dd09bSNélio Laranjeiro { 253945a6df80SMichael Baum struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr; 254045a6df80SMichael Baum struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap; 254145a6df80SMichael Baum bool devx = priv->sh->cdev->config.devx; 2542a729d2f0SMichael Baum const char **params = (const char *[]){ 2543a729d2f0SMichael Baum MLX5_RXQ_CQE_COMP_EN, 2544a729d2f0SMichael Baum MLX5_RXQ_PKT_PAD_EN, 2545a729d2f0SMichael Baum MLX5_RX_MPRQ_EN, 2546a729d2f0SMichael Baum MLX5_RX_MPRQ_LOG_STRIDE_NUM, 2547a729d2f0SMichael Baum MLX5_RX_MPRQ_LOG_STRIDE_SIZE, 2548a729d2f0SMichael Baum MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 2549a729d2f0SMichael Baum MLX5_RXQS_MIN_MPRQ, 2550a729d2f0SMichael Baum MLX5_TXQ_INLINE, 2551a729d2f0SMichael Baum MLX5_TXQ_INLINE_MIN, 2552a729d2f0SMichael Baum MLX5_TXQ_INLINE_MAX, 2553a729d2f0SMichael Baum MLX5_TXQ_INLINE_MPW, 2554a729d2f0SMichael Baum MLX5_TXQS_MIN_INLINE, 2555a729d2f0SMichael Baum MLX5_TXQS_MAX_VEC, 2556a729d2f0SMichael Baum MLX5_TXQ_MPW_EN, 2557a729d2f0SMichael Baum MLX5_TXQ_MPW_HDR_DSEG_EN, 2558a729d2f0SMichael Baum MLX5_TXQ_MAX_INLINE_LEN, 2559a729d2f0SMichael Baum MLX5_TX_VEC_EN, 2560a729d2f0SMichael Baum MLX5_RX_VEC_EN, 2561a729d2f0SMichael Baum MLX5_REPRESENTOR, 2562a729d2f0SMichael Baum MLX5_MAX_DUMP_FILES_NUM, 2563a729d2f0SMichael Baum MLX5_LRO_TIMEOUT_USEC, 2564a729d2f0SMichael Baum MLX5_HP_BUF_SIZE, 2565a729d2f0SMichael Baum MLX5_DELAY_DROP, 2566a729d2f0SMichael Baum NULL, 2567a729d2f0SMichael Baum }; 2568e72dd09bSNélio Laranjeiro int ret = 0; 2569e72dd09bSNélio Laranjeiro 257045a6df80SMichael Baum /* Default configuration. */ 257145a6df80SMichael Baum memset(config, 0, sizeof(*config)); 257245a6df80SMichael Baum config->mps = MLX5_ARG_UNSET; 257345a6df80SMichael Baum config->cqe_comp = 1; 257445a6df80SMichael Baum config->rx_vec_en = 1; 257545a6df80SMichael Baum config->txq_inline_max = MLX5_ARG_UNSET; 257645a6df80SMichael Baum config->txq_inline_min = MLX5_ARG_UNSET; 257745a6df80SMichael Baum config->txq_inline_mpw = MLX5_ARG_UNSET; 257845a6df80SMichael Baum config->txqs_inline = MLX5_ARG_UNSET; 257945a6df80SMichael Baum config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; 258045a6df80SMichael Baum config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; 258145a6df80SMichael Baum config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; 258245a6df80SMichael Baum config->log_hp_size = MLX5_ARG_UNSET; 258345a6df80SMichael Baum config->std_delay_drop = 0; 258445a6df80SMichael Baum config->hp_delay_drop = 0; 2585a729d2f0SMichael Baum if (mkvlist != NULL) { 2586e72dd09bSNélio Laranjeiro /* Process parameters. */ 2587a729d2f0SMichael Baum ret = mlx5_kvargs_process(mkvlist, params, 258845a6df80SMichael Baum mlx5_port_args_check_handler, config); 258945a6df80SMichael Baum if (ret) { 259045a6df80SMichael Baum DRV_LOG(ERR, "Failed to process port arguments: %s", 259145a6df80SMichael Baum strerror(rte_errno)); 259245a6df80SMichael Baum return -rte_errno; 259345a6df80SMichael Baum } 259445a6df80SMichael Baum } 259545a6df80SMichael Baum /* Adjust parameters according to device capabilities. */ 259645a6df80SMichael Baum if (config->hw_padding && !dev_cap->hw_padding) { 259745a6df80SMichael Baum DRV_LOG(DEBUG, "Rx end alignment padding isn't supported."); 259845a6df80SMichael Baum config->hw_padding = 0; 259945a6df80SMichael Baum } else if (config->hw_padding) { 260045a6df80SMichael Baum DRV_LOG(DEBUG, "Rx end alignment padding is enabled."); 260145a6df80SMichael Baum } 260245a6df80SMichael Baum /* 260345a6df80SMichael Baum * MPW is disabled by default, while the Enhanced MPW is enabled 260445a6df80SMichael Baum * by default. 260545a6df80SMichael Baum */ 260645a6df80SMichael Baum if (config->mps == MLX5_ARG_UNSET) 260745a6df80SMichael Baum config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ? 260845a6df80SMichael Baum MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED; 260945a6df80SMichael Baum else 261045a6df80SMichael Baum config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED; 261145a6df80SMichael Baum DRV_LOG(INFO, "%sMPS is %s", 261245a6df80SMichael Baum config->mps == MLX5_MPW_ENHANCED ? "enhanced " : 261345a6df80SMichael Baum config->mps == MLX5_MPW ? "legacy " : "", 261445a6df80SMichael Baum config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 2615593f913aSMichael Baum if (priv->sh->config.lro_allowed) { 261645a6df80SMichael Baum /* 261745a6df80SMichael Baum * If LRO timeout is not configured by application, 261845a6df80SMichael Baum * use the minimal supported value. 261945a6df80SMichael Baum */ 262045a6df80SMichael Baum if (!config->lro_timeout) 262145a6df80SMichael Baum config->lro_timeout = 262245a6df80SMichael Baum hca_attr->lro_timer_supported_periods[0]; 262345a6df80SMichael Baum DRV_LOG(DEBUG, "LRO session timeout set to %d usec.", 262445a6df80SMichael Baum config->lro_timeout); 262545a6df80SMichael Baum } 262645a6df80SMichael Baum if (config->cqe_comp && !dev_cap->cqe_comp) { 262745a6df80SMichael Baum DRV_LOG(WARNING, "Rx CQE 128B compression is not supported."); 262845a6df80SMichael Baum config->cqe_comp = 0; 262945a6df80SMichael Baum } 263045a6df80SMichael Baum if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX && 263145a6df80SMichael Baum (!devx || !hca_attr->mini_cqe_resp_flow_tag)) { 263245a6df80SMichael Baum DRV_LOG(WARNING, 263345a6df80SMichael Baum "Flow Tag CQE compression format isn't supported."); 263445a6df80SMichael Baum config->cqe_comp = 0; 263545a6df80SMichael Baum } 263645a6df80SMichael Baum if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX && 263745a6df80SMichael Baum (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) { 263845a6df80SMichael Baum DRV_LOG(WARNING, 263945a6df80SMichael Baum "L3/L4 Header CQE compression format isn't supported."); 264045a6df80SMichael Baum config->cqe_comp = 0; 264145a6df80SMichael Baum } 264245a6df80SMichael Baum DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.", 264345a6df80SMichael Baum config->cqe_comp ? "" : "not "); 264445a6df80SMichael Baum if ((config->std_delay_drop || config->hp_delay_drop) && 264545a6df80SMichael Baum !dev_cap->rq_delay_drop_en) { 264645a6df80SMichael Baum config->std_delay_drop = 0; 264745a6df80SMichael Baum config->hp_delay_drop = 0; 264845a6df80SMichael Baum DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.", 264945a6df80SMichael Baum priv->dev_port); 265045a6df80SMichael Baum } 265145a6df80SMichael Baum if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) { 265245a6df80SMichael Baum DRV_LOG(WARNING, "Multi-Packet RQ isn't supported."); 265345a6df80SMichael Baum config->mprq.enabled = 0; 265445a6df80SMichael Baum } 265545a6df80SMichael Baum if (config->max_dump_files_num == 0) 265645a6df80SMichael Baum config->max_dump_files_num = 128; 265745a6df80SMichael Baum /* Detect minimal data bytes to inline. */ 265845a6df80SMichael Baum mlx5_set_min_inline(priv); 265945a6df80SMichael Baum DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.", 266045a6df80SMichael Baum config->hw_vlan_insert ? "" : "not "); 266145a6df80SMichael Baum DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding); 266245a6df80SMichael Baum DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp); 266345a6df80SMichael Baum DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt); 266445a6df80SMichael Baum DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en); 266545a6df80SMichael Baum DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.", 266645a6df80SMichael Baum config->std_delay_drop); 266745a6df80SMichael Baum DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop); 266845a6df80SMichael Baum DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.", 266945a6df80SMichael Baum config->max_dump_files_num); 267045a6df80SMichael Baum DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size); 267145a6df80SMichael Baum DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled); 267245a6df80SMichael Baum DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.", 267345a6df80SMichael Baum config->mprq.log_stride_num); 267445a6df80SMichael Baum DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.", 267545a6df80SMichael Baum config->mprq.log_stride_size); 267645a6df80SMichael Baum DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.", 267745a6df80SMichael Baum config->mprq.max_memcpy_len); 267845a6df80SMichael Baum DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num); 267945a6df80SMichael Baum DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout); 268045a6df80SMichael Baum DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps); 268145a6df80SMichael Baum DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline); 268245a6df80SMichael Baum DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min); 268345a6df80SMichael Baum DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max); 268445a6df80SMichael Baum DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw); 268545a6df80SMichael Baum return 0; 2686e72dd09bSNélio Laranjeiro } 2687e72dd09bSNélio Laranjeiro 26887be600c8SYongseok Koh /** 2689a729d2f0SMichael Baum * Print the key for device argument. 2690a729d2f0SMichael Baum * 2691a729d2f0SMichael Baum * It is "dummy" handler whose whole purpose is to enable using 2692a729d2f0SMichael Baum * mlx5_kvargs_process() function which set devargs as used. 2693a729d2f0SMichael Baum * 2694a729d2f0SMichael Baum * @param key 2695a729d2f0SMichael Baum * Key argument. 2696a729d2f0SMichael Baum * @param val 2697a729d2f0SMichael Baum * Value associated with key, unused. 2698a729d2f0SMichael Baum * @param opaque 2699a729d2f0SMichael Baum * Unused, can be NULL. 2700a729d2f0SMichael Baum * 2701a729d2f0SMichael Baum * @return 2702a729d2f0SMichael Baum * 0 on success, function cannot fail. 2703a729d2f0SMichael Baum */ 2704a729d2f0SMichael Baum static int 2705a729d2f0SMichael Baum mlx5_dummy_handler(const char *key, const char *val, void *opaque) 2706a729d2f0SMichael Baum { 2707a729d2f0SMichael Baum DRV_LOG(DEBUG, "\tKey: \"%s\" is set as used.", key); 2708a729d2f0SMichael Baum RTE_SET_USED(opaque); 2709a729d2f0SMichael Baum RTE_SET_USED(val); 2710a729d2f0SMichael Baum return 0; 2711a729d2f0SMichael Baum } 2712a729d2f0SMichael Baum 2713a729d2f0SMichael Baum /** 2714a729d2f0SMichael Baum * Set requested devargs as used when device is already spawned. 2715a729d2f0SMichael Baum * 2716a729d2f0SMichael Baum * It is necessary since it is valid to ask probe again for existing device, 2717a729d2f0SMichael Baum * if its devargs don't assign as used, mlx5_kvargs_validate() will fail. 2718a729d2f0SMichael Baum * 2719a729d2f0SMichael Baum * @param name 2720a729d2f0SMichael Baum * Name of the existing device. 2721a729d2f0SMichael Baum * @param port_id 2722a729d2f0SMichael Baum * Port identifier of the device. 2723a729d2f0SMichael Baum * @param mkvlist 2724a729d2f0SMichael Baum * Pointer to mlx5 kvargs control to sign as used. 2725a729d2f0SMichael Baum */ 2726a729d2f0SMichael Baum void 2727a729d2f0SMichael Baum mlx5_port_args_set_used(const char *name, uint16_t port_id, 2728a729d2f0SMichael Baum struct mlx5_kvargs_ctrl *mkvlist) 2729a729d2f0SMichael Baum { 2730a729d2f0SMichael Baum const char **params = (const char *[]){ 2731a729d2f0SMichael Baum MLX5_RXQ_CQE_COMP_EN, 2732a729d2f0SMichael Baum MLX5_RXQ_PKT_PAD_EN, 2733a729d2f0SMichael Baum MLX5_RX_MPRQ_EN, 2734a729d2f0SMichael Baum MLX5_RX_MPRQ_LOG_STRIDE_NUM, 2735a729d2f0SMichael Baum MLX5_RX_MPRQ_LOG_STRIDE_SIZE, 2736a729d2f0SMichael Baum MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 2737a729d2f0SMichael Baum MLX5_RXQS_MIN_MPRQ, 2738a729d2f0SMichael Baum MLX5_TXQ_INLINE, 2739a729d2f0SMichael Baum MLX5_TXQ_INLINE_MIN, 2740a729d2f0SMichael Baum MLX5_TXQ_INLINE_MAX, 2741a729d2f0SMichael Baum MLX5_TXQ_INLINE_MPW, 2742a729d2f0SMichael Baum MLX5_TXQS_MIN_INLINE, 2743a729d2f0SMichael Baum MLX5_TXQS_MAX_VEC, 2744a729d2f0SMichael Baum MLX5_TXQ_MPW_EN, 2745a729d2f0SMichael Baum MLX5_TXQ_MPW_HDR_DSEG_EN, 2746a729d2f0SMichael Baum MLX5_TXQ_MAX_INLINE_LEN, 2747a729d2f0SMichael Baum MLX5_TX_VEC_EN, 2748a729d2f0SMichael Baum MLX5_RX_VEC_EN, 2749a729d2f0SMichael Baum MLX5_REPRESENTOR, 2750a729d2f0SMichael Baum MLX5_MAX_DUMP_FILES_NUM, 2751a729d2f0SMichael Baum MLX5_LRO_TIMEOUT_USEC, 2752a729d2f0SMichael Baum MLX5_HP_BUF_SIZE, 2753a729d2f0SMichael Baum MLX5_DELAY_DROP, 2754a729d2f0SMichael Baum NULL, 2755a729d2f0SMichael Baum }; 2756a729d2f0SMichael Baum 2757a729d2f0SMichael Baum /* Secondary process should not handle devargs. */ 2758a729d2f0SMichael Baum if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2759a729d2f0SMichael Baum return; 2760a729d2f0SMichael Baum MLX5_ASSERT(mkvlist != NULL); 2761a729d2f0SMichael Baum DRV_LOG(DEBUG, "Ethernet device \"%s\" for port %u " 2762a729d2f0SMichael Baum "already exists, set devargs as used:", name, port_id); 2763a729d2f0SMichael Baum /* This function cannot fail with this handler. */ 2764a729d2f0SMichael Baum mlx5_kvargs_process(mkvlist, params, mlx5_dummy_handler, NULL); 2765a729d2f0SMichael Baum } 2766a729d2f0SMichael Baum 2767a729d2f0SMichael Baum /** 2768a13ec19cSMichael Baum * Check sibling device configurations when probing again. 2769a13ec19cSMichael Baum * 2770a13ec19cSMichael Baum * Sibling devices sharing infiniband device context should have compatible 2771a13ec19cSMichael Baum * configurations. This regards representors and bonding device. 2772a13ec19cSMichael Baum * 2773a13ec19cSMichael Baum * @param cdev 2774a13ec19cSMichael Baum * Pointer to mlx5 device structure. 2775a729d2f0SMichael Baum * @param mkvlist 2776a729d2f0SMichael Baum * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. 2777a13ec19cSMichael Baum * 2778a13ec19cSMichael Baum * @return 2779a13ec19cSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 2780a13ec19cSMichael Baum */ 2781a13ec19cSMichael Baum int 2782a729d2f0SMichael Baum mlx5_probe_again_args_validate(struct mlx5_common_device *cdev, 2783a729d2f0SMichael Baum struct mlx5_kvargs_ctrl *mkvlist) 2784a13ec19cSMichael Baum { 2785a13ec19cSMichael Baum struct mlx5_dev_ctx_shared *sh = NULL; 2786a13ec19cSMichael Baum struct mlx5_sh_config *config; 2787a13ec19cSMichael Baum int ret; 2788a13ec19cSMichael Baum 2789a13ec19cSMichael Baum /* Secondary process should not handle devargs. */ 2790a13ec19cSMichael Baum if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2791a13ec19cSMichael Baum return 0; 2792a13ec19cSMichael Baum pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 2793a13ec19cSMichael Baum /* Search for IB context by common device pointer. */ 2794a13ec19cSMichael Baum LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) 2795a13ec19cSMichael Baum if (sh->cdev == cdev) 2796a13ec19cSMichael Baum break; 2797a13ec19cSMichael Baum pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 2798a13ec19cSMichael Baum /* There is sh for this device -> it isn't probe again. */ 2799a13ec19cSMichael Baum if (sh == NULL) 2800a13ec19cSMichael Baum return 0; 2801a13ec19cSMichael Baum config = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 2802a13ec19cSMichael Baum sizeof(struct mlx5_sh_config), 2803a13ec19cSMichael Baum RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 2804a13ec19cSMichael Baum if (config == NULL) { 2805a13ec19cSMichael Baum rte_errno = -ENOMEM; 2806a13ec19cSMichael Baum return -rte_errno; 2807a13ec19cSMichael Baum } 2808a13ec19cSMichael Baum /* 2809a13ec19cSMichael Baum * Creates a temporary IB context configure structure according to new 2810a13ec19cSMichael Baum * devargs attached in probing again. 2811a13ec19cSMichael Baum */ 2812a729d2f0SMichael Baum ret = mlx5_shared_dev_ctx_args_config(sh, mkvlist, config); 2813a13ec19cSMichael Baum if (ret) { 2814a13ec19cSMichael Baum DRV_LOG(ERR, "Failed to process device configure: %s", 2815a13ec19cSMichael Baum strerror(rte_errno)); 2816a13ec19cSMichael Baum mlx5_free(config); 2817a13ec19cSMichael Baum return ret; 2818a13ec19cSMichael Baum } 2819a13ec19cSMichael Baum /* 2820a13ec19cSMichael Baum * Checks the match between the temporary structure and the existing 2821a13ec19cSMichael Baum * IB context structure. 2822a13ec19cSMichael Baum */ 2823a13ec19cSMichael Baum if (sh->config.dv_flow_en ^ config->dv_flow_en) { 2824a13ec19cSMichael Baum DRV_LOG(ERR, "\"dv_flow_en\" " 2825a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2826a13ec19cSMichael Baum sh->ibdev_name); 2827a13ec19cSMichael Baum goto error; 2828a13ec19cSMichael Baum } 2829a13ec19cSMichael Baum if ((sh->config.dv_xmeta_en ^ config->dv_xmeta_en) || 2830a13ec19cSMichael Baum (sh->config.dv_miss_info ^ config->dv_miss_info)) { 2831a13ec19cSMichael Baum DRV_LOG(ERR, "\"dv_xmeta_en\" " 2832a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2833a13ec19cSMichael Baum sh->ibdev_name); 2834a13ec19cSMichael Baum goto error; 2835a13ec19cSMichael Baum } 2836a13ec19cSMichael Baum if (sh->config.dv_esw_en ^ config->dv_esw_en) { 2837a13ec19cSMichael Baum DRV_LOG(ERR, "\"dv_esw_en\" " 2838a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2839a13ec19cSMichael Baum sh->ibdev_name); 2840a13ec19cSMichael Baum goto error; 2841a13ec19cSMichael Baum } 2842a13ec19cSMichael Baum if (sh->config.reclaim_mode ^ config->reclaim_mode) { 2843a13ec19cSMichael Baum DRV_LOG(ERR, "\"reclaim_mode\" " 2844a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2845a13ec19cSMichael Baum sh->ibdev_name); 2846a13ec19cSMichael Baum goto error; 2847a13ec19cSMichael Baum } 2848a13ec19cSMichael Baum if (sh->config.allow_duplicate_pattern ^ 2849a13ec19cSMichael Baum config->allow_duplicate_pattern) { 2850a13ec19cSMichael Baum DRV_LOG(ERR, "\"allow_duplicate_pattern\" " 2851a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2852a13ec19cSMichael Baum sh->ibdev_name); 2853a13ec19cSMichael Baum goto error; 2854a13ec19cSMichael Baum } 28551939eb6fSDariusz Sosnowski if (sh->config.fdb_def_rule ^ config->fdb_def_rule) { 28561939eb6fSDariusz Sosnowski DRV_LOG(ERR, "\"fdb_def_rule_en\" configuration mismatch for shared %s context.", 28571939eb6fSDariusz Sosnowski sh->ibdev_name); 28581939eb6fSDariusz Sosnowski goto error; 28591939eb6fSDariusz Sosnowski } 2860a13ec19cSMichael Baum if (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) { 2861a13ec19cSMichael Baum DRV_LOG(ERR, "\"l3_vxlan_en\" " 2862a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2863a13ec19cSMichael Baum sh->ibdev_name); 2864a13ec19cSMichael Baum goto error; 2865a13ec19cSMichael Baum } 2866a13ec19cSMichael Baum if (sh->config.decap_en ^ config->decap_en) { 2867a13ec19cSMichael Baum DRV_LOG(ERR, "\"decap_en\" " 2868a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2869a13ec19cSMichael Baum sh->ibdev_name); 2870a13ec19cSMichael Baum goto error; 2871a13ec19cSMichael Baum } 2872a13ec19cSMichael Baum if (sh->config.lacp_by_user ^ config->lacp_by_user) { 2873a13ec19cSMichael Baum DRV_LOG(ERR, "\"lacp_by_user\" " 2874a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2875a13ec19cSMichael Baum sh->ibdev_name); 2876a13ec19cSMichael Baum goto error; 2877a13ec19cSMichael Baum } 2878a13ec19cSMichael Baum if (sh->config.tx_pp ^ config->tx_pp) { 2879a13ec19cSMichael Baum DRV_LOG(ERR, "\"tx_pp\" " 2880a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2881a13ec19cSMichael Baum sh->ibdev_name); 2882a13ec19cSMichael Baum goto error; 2883a13ec19cSMichael Baum } 2884a13ec19cSMichael Baum if (sh->config.tx_skew ^ config->tx_skew) { 2885a13ec19cSMichael Baum DRV_LOG(ERR, "\"tx_skew\" " 2886a13ec19cSMichael Baum "configuration mismatch for shared %s context.", 2887a13ec19cSMichael Baum sh->ibdev_name); 2888a13ec19cSMichael Baum goto error; 2889a13ec19cSMichael Baum } 2890a13ec19cSMichael Baum mlx5_free(config); 2891a13ec19cSMichael Baum return 0; 2892a13ec19cSMichael Baum error: 2893a13ec19cSMichael Baum mlx5_free(config); 2894a13ec19cSMichael Baum rte_errno = EINVAL; 2895a13ec19cSMichael Baum return -rte_errno; 2896a13ec19cSMichael Baum } 2897a13ec19cSMichael Baum 2898a13ec19cSMichael Baum /** 289938b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 290038b4b397SViacheslav Ovsiienko * while sending packets. 290138b4b397SViacheslav Ovsiienko * 290238b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 290338b4b397SViacheslav Ovsiienko * key is specified in devargs 290438b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 290538b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 2906ee76bddcSThomas Monjalon * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx 290738b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 290838b4b397SViacheslav Ovsiienko * 290945a6df80SMichael Baum * @param priv 291045a6df80SMichael Baum * Pointer to the private device data structure. 291138b4b397SViacheslav Ovsiienko */ 29122eb4d010SOphir Munk void 291345a6df80SMichael Baum mlx5_set_min_inline(struct mlx5_priv *priv) 291438b4b397SViacheslav Ovsiienko { 291545a6df80SMichael Baum struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr; 291645a6df80SMichael Baum struct mlx5_port_config *config = &priv->config; 291753820561SMichael Baum 291838b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 291938b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 292045a6df80SMichael Baum if (priv->pci_dev != NULL) { 292145a6df80SMichael Baum switch (priv->pci_dev->id.device_id) { 292238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 292338b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 292438b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 292538b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 292638b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 292756bb3c84SXueming Li "txq_inline_mix aligned to minimal ConnectX-4 required value %d", 292838b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 292956bb3c84SXueming Li config->txq_inline_min = 293056bb3c84SXueming Li MLX5_INLINE_HSIZE_L2; 293138b4b397SViacheslav Ovsiienko } 293238b4b397SViacheslav Ovsiienko break; 293338b4b397SViacheslav Ovsiienko } 293456bb3c84SXueming Li } 293538b4b397SViacheslav Ovsiienko goto exit; 293638b4b397SViacheslav Ovsiienko } 293753820561SMichael Baum if (hca_attr->eth_net_offloads) { 293838b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 293953820561SMichael Baum switch (hca_attr->wqe_inline_mode) { 294038b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 294138b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 294238b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 294338b4b397SViacheslav Ovsiienko goto exit; 294438b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 294538b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 294638b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 294738b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 294853820561SMichael Baum hca_attr->wqe_vlan_insert; 294938b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 295038b4b397SViacheslav Ovsiienko goto exit; 295138b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 295238b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 295353820561SMichael Baum if (!hca_attr->eth_virt) 295438b4b397SViacheslav Ovsiienko break; 295553820561SMichael Baum switch (hca_attr->vport_inline_mode) { 295638b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 295738b4b397SViacheslav Ovsiienko config->txq_inline_min = 295838b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 295938b4b397SViacheslav Ovsiienko goto exit; 296038b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 296138b4b397SViacheslav Ovsiienko config->txq_inline_min = 296238b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 296338b4b397SViacheslav Ovsiienko goto exit; 296438b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 296538b4b397SViacheslav Ovsiienko config->txq_inline_min = 296638b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 296738b4b397SViacheslav Ovsiienko goto exit; 296838b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 296938b4b397SViacheslav Ovsiienko config->txq_inline_min = 297038b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 297138b4b397SViacheslav Ovsiienko goto exit; 297238b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 297338b4b397SViacheslav Ovsiienko config->txq_inline_min = 297438b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 297538b4b397SViacheslav Ovsiienko goto exit; 297638b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 297738b4b397SViacheslav Ovsiienko config->txq_inline_min = 297838b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 297938b4b397SViacheslav Ovsiienko goto exit; 298038b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 298138b4b397SViacheslav Ovsiienko config->txq_inline_min = 298238b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 298338b4b397SViacheslav Ovsiienko goto exit; 298438b4b397SViacheslav Ovsiienko } 298538b4b397SViacheslav Ovsiienko } 298638b4b397SViacheslav Ovsiienko } 298745a6df80SMichael Baum if (priv->pci_dev == NULL) { 298856bb3c84SXueming Li config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 298956bb3c84SXueming Li goto exit; 299056bb3c84SXueming Li } 299138b4b397SViacheslav Ovsiienko /* 299238b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 299338b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 299438b4b397SViacheslav Ovsiienko * to determine old NICs. 299538b4b397SViacheslav Ovsiienko */ 299645a6df80SMichael Baum switch (priv->pci_dev->id.device_id) { 299738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 299838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 299938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 300038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 3001614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 300238b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 300338b4b397SViacheslav Ovsiienko break; 300438b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 300538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 300638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 300738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 300838b4b397SViacheslav Ovsiienko /* 300938b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 301038b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 301138b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 301238b4b397SViacheslav Ovsiienko */ 301338b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 301420215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 301538b4b397SViacheslav Ovsiienko break; 301638b4b397SViacheslav Ovsiienko default: 301738b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 301838b4b397SViacheslav Ovsiienko break; 301938b4b397SViacheslav Ovsiienko } 302038b4b397SViacheslav Ovsiienko exit: 302138b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 302238b4b397SViacheslav Ovsiienko } 302338b4b397SViacheslav Ovsiienko 302438b4b397SViacheslav Ovsiienko /** 302539139371SViacheslav Ovsiienko * Configures the metadata mask fields in the shared context. 302639139371SViacheslav Ovsiienko * 302739139371SViacheslav Ovsiienko * @param [in] dev 302839139371SViacheslav Ovsiienko * Pointer to Ethernet device. 302939139371SViacheslav Ovsiienko */ 30302eb4d010SOphir Munk void 303139139371SViacheslav Ovsiienko mlx5_set_metadata_mask(struct rte_eth_dev *dev) 303239139371SViacheslav Ovsiienko { 303339139371SViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 30346e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 303539139371SViacheslav Ovsiienko uint32_t meta, mark, reg_c0; 303639139371SViacheslav Ovsiienko 303739139371SViacheslav Ovsiienko reg_c0 = ~priv->vport_meta_mask; 3038a13ec19cSMichael Baum switch (sh->config.dv_xmeta_en) { 303939139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_LEGACY: 304039139371SViacheslav Ovsiienko meta = UINT32_MAX; 304139139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 304239139371SViacheslav Ovsiienko break; 304339139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META16: 304439139371SViacheslav Ovsiienko meta = reg_c0 >> rte_bsf32(reg_c0); 304539139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 304639139371SViacheslav Ovsiienko break; 304739139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META32: 304839139371SViacheslav Ovsiienko meta = UINT32_MAX; 304939139371SViacheslav Ovsiienko mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK; 305039139371SViacheslav Ovsiienko break; 3051ddb68e47SBing Zhao case MLX5_XMETA_MODE_META32_HWS: 3052ddb68e47SBing Zhao meta = UINT32_MAX; 3053ddb68e47SBing Zhao mark = MLX5_FLOW_MARK_MASK; 3054ddb68e47SBing Zhao break; 305539139371SViacheslav Ovsiienko default: 305639139371SViacheslav Ovsiienko meta = 0; 305739139371SViacheslav Ovsiienko mark = 0; 30588e46d4e1SAlexander Kozyrev MLX5_ASSERT(false); 305939139371SViacheslav Ovsiienko break; 306039139371SViacheslav Ovsiienko } 306139139371SViacheslav Ovsiienko if (sh->dv_mark_mask && sh->dv_mark_mask != mark) 30627be78d02SJosh Soref DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X", 306339139371SViacheslav Ovsiienko sh->dv_mark_mask, mark); 306439139371SViacheslav Ovsiienko else 306539139371SViacheslav Ovsiienko sh->dv_mark_mask = mark; 306639139371SViacheslav Ovsiienko if (sh->dv_meta_mask && sh->dv_meta_mask != meta) 30677be78d02SJosh Soref DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X", 306839139371SViacheslav Ovsiienko sh->dv_meta_mask, meta); 306939139371SViacheslav Ovsiienko else 307039139371SViacheslav Ovsiienko sh->dv_meta_mask = meta; 307139139371SViacheslav Ovsiienko if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) 30727be78d02SJosh Soref DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X", 307339139371SViacheslav Ovsiienko sh->dv_meta_mask, reg_c0); 307439139371SViacheslav Ovsiienko else 307539139371SViacheslav Ovsiienko sh->dv_regc0_mask = reg_c0; 3076a13ec19cSMichael Baum DRV_LOG(DEBUG, "metadata mode %u", sh->config.dv_xmeta_en); 307739139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask); 307839139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask); 307939139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask); 308039139371SViacheslav Ovsiienko } 308139139371SViacheslav Ovsiienko 3082efa79e68SOri Kam int 3083efa79e68SOri Kam rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) 3084efa79e68SOri Kam { 3085efa79e68SOri Kam static const char *const dynf_names[] = { 3086efa79e68SOri Kam RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, 30878f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_METADATA_NAME, 30888f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME 3089efa79e68SOri Kam }; 3090efa79e68SOri Kam unsigned int i; 3091efa79e68SOri Kam 3092efa79e68SOri Kam if (n < RTE_DIM(dynf_names)) 3093efa79e68SOri Kam return -ENOMEM; 3094efa79e68SOri Kam for (i = 0; i < RTE_DIM(dynf_names); i++) { 3095efa79e68SOri Kam if (names[i] == NULL) 3096efa79e68SOri Kam return -EINVAL; 3097efa79e68SOri Kam strcpy(names[i], dynf_names[i]); 3098efa79e68SOri Kam } 3099efa79e68SOri Kam return RTE_DIM(dynf_names); 3100efa79e68SOri Kam } 3101efa79e68SOri Kam 310221cae858SDekel Peled /** 3103fbc83412SViacheslav Ovsiienko * Look for the ethernet device belonging to mlx5 driver. 3104fbc83412SViacheslav Ovsiienko * 3105fbc83412SViacheslav Ovsiienko * @param[in] port_id 3106fbc83412SViacheslav Ovsiienko * port_id to start looking for device. 310756bb3c84SXueming Li * @param[in] odev 310856bb3c84SXueming Li * Pointer to the hint device. When device is being probed 3109fbc83412SViacheslav Ovsiienko * the its siblings (master and preceding representors might 31102eb4d010SOphir Munk * not have assigned driver yet (because the mlx5_os_pci_probe() 311156bb3c84SXueming Li * is not completed yet, for this case match on hint 3112fbc83412SViacheslav Ovsiienko * device may be used to detect sibling device. 3113fbc83412SViacheslav Ovsiienko * 3114fbc83412SViacheslav Ovsiienko * @return 3115fbc83412SViacheslav Ovsiienko * port_id of found device, RTE_MAX_ETHPORT if not found. 3116fbc83412SViacheslav Ovsiienko */ 3117f7e95215SViacheslav Ovsiienko uint16_t 311856bb3c84SXueming Li mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev) 3119f7e95215SViacheslav Ovsiienko { 3120f7e95215SViacheslav Ovsiienko while (port_id < RTE_MAX_ETHPORTS) { 3121f7e95215SViacheslav Ovsiienko struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3122f7e95215SViacheslav Ovsiienko 3123f7e95215SViacheslav Ovsiienko if (dev->state != RTE_ETH_DEV_UNUSED && 3124f7e95215SViacheslav Ovsiienko dev->device && 312556bb3c84SXueming Li (dev->device == odev || 3126fbc83412SViacheslav Ovsiienko (dev->device->driver && 3127f7e95215SViacheslav Ovsiienko dev->device->driver->name && 3128919488fbSXueming Li ((strcmp(dev->device->driver->name, 3129919488fbSXueming Li MLX5_PCI_DRIVER_NAME) == 0) || 3130919488fbSXueming Li (strcmp(dev->device->driver->name, 3131919488fbSXueming Li MLX5_AUXILIARY_DRIVER_NAME) == 0))))) 3132f7e95215SViacheslav Ovsiienko break; 3133f7e95215SViacheslav Ovsiienko port_id++; 3134f7e95215SViacheslav Ovsiienko } 3135f7e95215SViacheslav Ovsiienko if (port_id >= RTE_MAX_ETHPORTS) 3136f7e95215SViacheslav Ovsiienko return RTE_MAX_ETHPORTS; 3137f7e95215SViacheslav Ovsiienko return port_id; 3138f7e95215SViacheslav Ovsiienko } 3139f7e95215SViacheslav Ovsiienko 31403a820742SOphir Munk /** 3141a7f34989SXueming Li * Callback to remove a device. 31423a820742SOphir Munk * 3143a7f34989SXueming Li * This function removes all Ethernet devices belong to a given device. 31443a820742SOphir Munk * 31457af08c8fSMichael Baum * @param[in] cdev 3146a7f34989SXueming Li * Pointer to the generic device. 31473a820742SOphir Munk * 31483a820742SOphir Munk * @return 31493a820742SOphir Munk * 0 on success, the function cannot fail. 31503a820742SOphir Munk */ 31516856efa5SMichael Baum int 31527af08c8fSMichael Baum mlx5_net_remove(struct mlx5_common_device *cdev) 31533a820742SOphir Munk { 31543a820742SOphir Munk uint16_t port_id; 31558a5a0aadSThomas Monjalon int ret = 0; 31563a820742SOphir Munk 31577af08c8fSMichael Baum RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) { 31582786b7bfSSuanming Mou /* 31592786b7bfSSuanming Mou * mlx5_dev_close() is not registered to secondary process, 31602786b7bfSSuanming Mou * call the close function explicitly for secondary process. 31612786b7bfSSuanming Mou */ 31622786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) 31638a5a0aadSThomas Monjalon ret |= mlx5_dev_close(&rte_eth_devices[port_id]); 31642786b7bfSSuanming Mou else 31658a5a0aadSThomas Monjalon ret |= rte_eth_dev_close(port_id); 31662786b7bfSSuanming Mou } 31678a5a0aadSThomas Monjalon return ret == 0 ? 0 : -EIO; 31683a820742SOphir Munk } 31693a820742SOphir Munk 3170771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 3171771fa900SAdrien Mazarguil { 31721d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 31731d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 3174771fa900SAdrien Mazarguil }, 3175771fa900SAdrien Mazarguil { 31761d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 31771d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 3178771fa900SAdrien Mazarguil }, 3179771fa900SAdrien Mazarguil { 31801d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 31811d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 3182771fa900SAdrien Mazarguil }, 3183771fa900SAdrien Mazarguil { 31841d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 31851d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 3186771fa900SAdrien Mazarguil }, 3187771fa900SAdrien Mazarguil { 3188528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3189528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 3190528a9fbeSYongseok Koh }, 3191528a9fbeSYongseok Koh { 3192528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3193528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 3194528a9fbeSYongseok Koh }, 3195528a9fbeSYongseok Koh { 3196528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3197528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 3198528a9fbeSYongseok Koh }, 3199528a9fbeSYongseok Koh { 3200528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3201528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 3202528a9fbeSYongseok Koh }, 3203528a9fbeSYongseok Koh { 3204dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3205dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 3206dd3331c6SShahaf Shuler }, 3207dd3331c6SShahaf Shuler { 3208c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3209c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 3210c322c0e5SOri Kam }, 3211c322c0e5SOri Kam { 3212f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3213f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 3214f0354d84SWisam Jaddo }, 3215f0354d84SWisam Jaddo { 3216f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3217f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 3218f0354d84SWisam Jaddo }, 3219f0354d84SWisam Jaddo { 32205fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 32215fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DX) 32225fc66630SRaslan Darawsheh }, 32235fc66630SRaslan Darawsheh { 32245fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 32253ea12cadSRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTXVF) 32265fc66630SRaslan Darawsheh }, 32275fc66630SRaslan Darawsheh { 322858b4a2b1SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 322958b4a2b1SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 323058b4a2b1SRaslan Darawsheh }, 323158b4a2b1SRaslan Darawsheh { 323228c9a7d7SAli Alnubani RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 323328c9a7d7SAli Alnubani PCI_DEVICE_ID_MELLANOX_CONNECTX6LX) 323428c9a7d7SAli Alnubani }, 323528c9a7d7SAli Alnubani { 32366ca37b06SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 32376ca37b06SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX7) 32386ca37b06SRaslan Darawsheh }, 32396ca37b06SRaslan Darawsheh { 32406ca37b06SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 32416ca37b06SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX7BF) 32426ca37b06SRaslan Darawsheh }, 32436ca37b06SRaslan Darawsheh { 3244771fa900SAdrien Mazarguil .vendor_id = 0 3245771fa900SAdrien Mazarguil } 3246771fa900SAdrien Mazarguil }; 3247771fa900SAdrien Mazarguil 3248a7f34989SXueming Li static struct mlx5_class_driver mlx5_net_driver = { 3249a7f34989SXueming Li .drv_class = MLX5_CLASS_ETH, 3250a7f34989SXueming Li .name = RTE_STR(MLX5_ETH_DRIVER_NAME), 3251771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 3252a7f34989SXueming Li .probe = mlx5_os_net_probe, 3253a7f34989SXueming Li .remove = mlx5_net_remove, 3254a7f34989SXueming Li .probe_again = 1, 3255a7f34989SXueming Li .intr_lsc = 1, 3256a7f34989SXueming Li .intr_rmv = 1, 3257771fa900SAdrien Mazarguil }; 3258771fa900SAdrien Mazarguil 32599c99878aSJerin Jacob /* Initialize driver log type. */ 3260eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE) 32619c99878aSJerin Jacob 3262771fa900SAdrien Mazarguil /** 3263771fa900SAdrien Mazarguil * Driver initialization routine. 3264771fa900SAdrien Mazarguil */ 3265f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 3266771fa900SAdrien Mazarguil { 3267ef65067cSTal Shnaiderman pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL); 326882088001SParav Pandit mlx5_common_init(); 32695f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 3270ea16068cSYongseok Koh mlx5_set_ptype_table(); 32715f8ba81cSXueming Li mlx5_set_cksum_table(); 32725f8ba81cSXueming Li mlx5_set_swp_types_table(); 32737b4f1e6bSMatan Azrad if (mlx5_glue) 3274a7f34989SXueming Li mlx5_class_driver_register(&mlx5_net_driver); 3275771fa900SAdrien Mazarguil } 3276771fa900SAdrien Mazarguil 3277a7f34989SXueming Li RTE_PMD_EXPORT_NAME(MLX5_ETH_DRIVER_NAME, __COUNTER__); 3278a7f34989SXueming Li RTE_PMD_REGISTER_PCI_TABLE(MLX5_ETH_DRIVER_NAME, mlx5_pci_id_map); 3279a7f34989SXueming Li RTE_PMD_REGISTER_KMOD_DEP(MLX5_ETH_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib"); 3280