18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <stdint.h> 10771fa900SAdrien Mazarguil #include <stdlib.h> 11e72dd09bSNélio Laranjeiro #include <errno.h> 12771fa900SAdrien Mazarguil 13771fa900SAdrien Mazarguil #include <rte_malloc.h> 14df96fd0dSBruce Richardson #include <ethdev_driver.h> 15771fa900SAdrien Mazarguil #include <rte_pci.h> 16c752998bSGaetan Rivet #include <rte_bus_pci.h> 17771fa900SAdrien Mazarguil #include <rte_common.h> 18e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 19e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 20e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 21f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 225dfa003dSMichael Baum #include <rte_eal_paging.h> 23f15db67dSMatan Azrad #include <rte_alarm.h> 2420698c9fSOphir Munk #include <rte_cycles.h> 25771fa900SAdrien Mazarguil 267b4f1e6bSMatan Azrad #include <mlx5_glue.h> 277b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h> 2893e30982SMatan Azrad #include <mlx5_common.h> 29391b8bccSOphir Munk #include <mlx5_common_os.h> 30a4de9586SVu Pham #include <mlx5_common_mp.h> 3183c2047cSSuanming Mou #include <mlx5_malloc.h> 327b4f1e6bSMatan Azrad 337b4f1e6bSMatan Azrad #include "mlx5_defs.h" 34771fa900SAdrien Mazarguil #include "mlx5.h" 35771fa900SAdrien Mazarguil #include "mlx5_utils.h" 362e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 37151cbe3aSMichael Baum #include "mlx5_rx.h" 38377b69fbSMichael Baum #include "mlx5_tx.h" 39771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 4084c406e7SOri Kam #include "mlx5_flow.h" 41223f2c21SOphir Munk #include "mlx5_flow_os.h" 42efa79e68SOri Kam #include "rte_pmd_mlx5.h" 43771fa900SAdrien Mazarguil 44a7f34989SXueming Li #define MLX5_ETH_DRIVER_NAME mlx5_eth 45a7f34989SXueming Li 466428e032SXueming Li /* Driver type key for new device global syntax. */ 476428e032SXueming Li #define MLX5_DRIVER_KEY "driver" 486428e032SXueming Li 4999c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5099c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5199c12dccSNélio Laranjeiro 5278c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 5378c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 5478c7a16dSYongseok Koh 557d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 567d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 577d6bf6b8SYongseok Koh 587d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 597d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 607d6bf6b8SYongseok Koh 61ecb16045SAlexander Kozyrev /* Device parameter to configure log 2 of the stride size for MPRQ. */ 62ecb16045SAlexander Kozyrev #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size" 63ecb16045SAlexander Kozyrev 647d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 657d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 667d6bf6b8SYongseok Koh 677d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 687d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 697d6bf6b8SYongseok Koh 70a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 712a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 722a66cf37SYaacov Hazan 73505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 74505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 75505f1fe4SViacheslav Ovsiienko 76505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 77505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 78505f1fe4SViacheslav Ovsiienko 79505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 80505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 81505f1fe4SViacheslav Ovsiienko 822a66cf37SYaacov Hazan /* 832a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 842a66cf37SYaacov Hazan * enabling inline send. 852a66cf37SYaacov Hazan */ 862a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 872a66cf37SYaacov Hazan 8809d8b416SYongseok Koh /* 8909d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 90a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9109d8b416SYongseok Koh */ 9209d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 9309d8b416SYongseok Koh 94230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 95230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 96230189d9SNélio Laranjeiro 97a6bd4911SViacheslav Ovsiienko /* 988409a285SViacheslav Ovsiienko * Device parameter to force doorbell register mapping 998409a285SViacheslav Ovsiienko * to non-cahed region eliminating the extra write memory barrier. 1008409a285SViacheslav Ovsiienko */ 1018409a285SViacheslav Ovsiienko #define MLX5_TX_DB_NC "tx_db_nc" 1028409a285SViacheslav Ovsiienko 1038409a285SViacheslav Ovsiienko /* 104a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 105a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 106a6bd4911SViacheslav Ovsiienko */ 1076ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1086ce84bd8SYongseok Koh 109a6bd4911SViacheslav Ovsiienko /* 110a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 111a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 112a6bd4911SViacheslav Ovsiienko */ 1136ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1146ce84bd8SYongseok Koh 115a6bd4911SViacheslav Ovsiienko /* 1168f848f32SViacheslav Ovsiienko * Device parameter to enable Tx scheduling on timestamps 1178f848f32SViacheslav Ovsiienko * and specify the packet pacing granularity in nanoseconds. 1188f848f32SViacheslav Ovsiienko */ 1198f848f32SViacheslav Ovsiienko #define MLX5_TX_PP "tx_pp" 1208f848f32SViacheslav Ovsiienko 1218f848f32SViacheslav Ovsiienko /* 1228f848f32SViacheslav Ovsiienko * Device parameter to specify skew in nanoseconds on Tx datapath, 1238f848f32SViacheslav Ovsiienko * it represents the time between SQ start WQE processing and 1248f848f32SViacheslav Ovsiienko * appearing actual packet data on the wire. 1258f848f32SViacheslav Ovsiienko */ 1268f848f32SViacheslav Ovsiienko #define MLX5_TX_SKEW "tx_skew" 1278f848f32SViacheslav Ovsiienko 1288f848f32SViacheslav Ovsiienko /* 129a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 130a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 131a6bd4911SViacheslav Ovsiienko */ 1325644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1335644d5b9SNelio Laranjeiro 1345644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1355644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1365644d5b9SNelio Laranjeiro 13778a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 13878a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 13978a54648SXueming Li 140e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 141e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 142e2b4925eSOri Kam 14351e72d38SOri Kam /* Activate DV flow steering. */ 14451e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 14551e72d38SOri Kam 1462d241515SViacheslav Ovsiienko /* Enable extensive flow metadata support. */ 1472d241515SViacheslav Ovsiienko #define MLX5_DV_XMETA_EN "dv_xmeta_en" 1482d241515SViacheslav Ovsiienko 1490f0ae73aSShiri Kuzin /* Device parameter to let the user manage the lacp traffic of bonded device */ 1500f0ae73aSShiri Kuzin #define MLX5_LACP_BY_USER "lacp_by_user" 1510f0ae73aSShiri Kuzin 152db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 153db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 154db209cc3SNélio Laranjeiro 155dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 156dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 157dceb5029SYongseok Koh 1586de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1596de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1606de569f5SAdrien Mazarguil 161066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 162066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 163066cfecdSMatan Azrad 16421bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 16521bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 16621bb6c7eSDekel Peled 1671ad9a3d0SBing Zhao /* 1681ad9a3d0SBing Zhao * Device parameter to configure the total data buffer size for a single 1691ad9a3d0SBing Zhao * hairpin queue (logarithm value). 1701ad9a3d0SBing Zhao */ 1711ad9a3d0SBing Zhao #define MLX5_HP_BUF_SIZE "hp_buf_log_sz" 1721ad9a3d0SBing Zhao 173a1da6f62SSuanming Mou /* Flow memory reclaim mode. */ 174a1da6f62SSuanming Mou #define MLX5_RECLAIM_MEM "reclaim_mem_mode" 175a1da6f62SSuanming Mou 1765522da6bSSuanming Mou /* The default memory allocator used in PMD. */ 1775522da6bSSuanming Mou #define MLX5_SYS_MEM_EN "sys_mem_en" 17850f95b23SSuanming Mou /* Decap will be used or not. */ 17950f95b23SSuanming Mou #define MLX5_DECAP_EN "decap_en" 1805522da6bSSuanming Mou 181e39226bdSJiawei Wang /* Device parameter to configure allow or prevent duplicate rules pattern. */ 182e39226bdSJiawei Wang #define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern" 183e39226bdSJiawei Wang 184fec28ca0SDmitry Kozlyuk /* Device parameter to configure implicit registration of mempool memory. */ 185fec28ca0SDmitry Kozlyuk #define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en" 186fec28ca0SDmitry Kozlyuk 187febcac7bSBing Zhao /* Device parameter to configure the delay drop when creating Rxqs. */ 188febcac7bSBing Zhao #define MLX5_DELAY_DROP "delay_drop" 189febcac7bSBing Zhao 190974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 191974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 192974f1e7eSYongseok Koh 1932e86c4e5SOphir Munk /** Driver-specific log messages type. */ 1942e86c4e5SOphir Munk int mlx5_logtype; 195a170a30dSNélio Laranjeiro 19691389890SOphir Munk static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = 19791389890SOphir Munk LIST_HEAD_INITIALIZER(); 198ef65067cSTal Shnaiderman static pthread_mutex_t mlx5_dev_ctx_list_mutex; 1995c761238SGregory Etelson static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { 200f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 2019cac7dedSGregory Etelson [MLX5_IPOOL_DECAP_ENCAP] = { 202014d1cbeSSuanming Mou .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), 203014d1cbeSSuanming Mou .trunk_size = 64, 204014d1cbeSSuanming Mou .grow_trunk = 3, 205014d1cbeSSuanming Mou .grow_shift = 2, 2062f3dc1f4SSuanming Mou .need_lock = 1, 207014d1cbeSSuanming Mou .release_mem_en = 1, 20883c2047cSSuanming Mou .malloc = mlx5_malloc, 20983c2047cSSuanming Mou .free = mlx5_free, 210014d1cbeSSuanming Mou .type = "mlx5_encap_decap_ipool", 211014d1cbeSSuanming Mou }, 2129cac7dedSGregory Etelson [MLX5_IPOOL_PUSH_VLAN] = { 2138acf8ac9SSuanming Mou .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), 2148acf8ac9SSuanming Mou .trunk_size = 64, 2158acf8ac9SSuanming Mou .grow_trunk = 3, 2168acf8ac9SSuanming Mou .grow_shift = 2, 2172f3dc1f4SSuanming Mou .need_lock = 1, 2188acf8ac9SSuanming Mou .release_mem_en = 1, 21983c2047cSSuanming Mou .malloc = mlx5_malloc, 22083c2047cSSuanming Mou .free = mlx5_free, 2218acf8ac9SSuanming Mou .type = "mlx5_push_vlan_ipool", 2228acf8ac9SSuanming Mou }, 2239cac7dedSGregory Etelson [MLX5_IPOOL_TAG] = { 2245f114269SSuanming Mou .size = sizeof(struct mlx5_flow_dv_tag_resource), 2255f114269SSuanming Mou .trunk_size = 64, 2265f114269SSuanming Mou .grow_trunk = 3, 2275f114269SSuanming Mou .grow_shift = 2, 2282f3dc1f4SSuanming Mou .need_lock = 1, 22907b51bb9SSuanming Mou .release_mem_en = 0, 23007b51bb9SSuanming Mou .per_core_cache = (1 << 16), 23183c2047cSSuanming Mou .malloc = mlx5_malloc, 23283c2047cSSuanming Mou .free = mlx5_free, 2335f114269SSuanming Mou .type = "mlx5_tag_ipool", 2345f114269SSuanming Mou }, 2359cac7dedSGregory Etelson [MLX5_IPOOL_PORT_ID] = { 236f3faf9eaSSuanming Mou .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), 237f3faf9eaSSuanming Mou .trunk_size = 64, 238f3faf9eaSSuanming Mou .grow_trunk = 3, 239f3faf9eaSSuanming Mou .grow_shift = 2, 2402f3dc1f4SSuanming Mou .need_lock = 1, 241f3faf9eaSSuanming Mou .release_mem_en = 1, 24283c2047cSSuanming Mou .malloc = mlx5_malloc, 24383c2047cSSuanming Mou .free = mlx5_free, 244f3faf9eaSSuanming Mou .type = "mlx5_port_id_ipool", 245f3faf9eaSSuanming Mou }, 2469cac7dedSGregory Etelson [MLX5_IPOOL_JUMP] = { 2477ac99475SSuanming Mou .size = sizeof(struct mlx5_flow_tbl_data_entry), 2487ac99475SSuanming Mou .trunk_size = 64, 2497ac99475SSuanming Mou .grow_trunk = 3, 2507ac99475SSuanming Mou .grow_shift = 2, 2512f3dc1f4SSuanming Mou .need_lock = 1, 2527ac99475SSuanming Mou .release_mem_en = 1, 25383c2047cSSuanming Mou .malloc = mlx5_malloc, 25483c2047cSSuanming Mou .free = mlx5_free, 2557ac99475SSuanming Mou .type = "mlx5_jump_ipool", 2567ac99475SSuanming Mou }, 2579cac7dedSGregory Etelson [MLX5_IPOOL_SAMPLE] = { 258b4c0ddbfSJiawei Wang .size = sizeof(struct mlx5_flow_dv_sample_resource), 259b4c0ddbfSJiawei Wang .trunk_size = 64, 260b4c0ddbfSJiawei Wang .grow_trunk = 3, 261b4c0ddbfSJiawei Wang .grow_shift = 2, 2622f3dc1f4SSuanming Mou .need_lock = 1, 263b4c0ddbfSJiawei Wang .release_mem_en = 1, 264b4c0ddbfSJiawei Wang .malloc = mlx5_malloc, 265b4c0ddbfSJiawei Wang .free = mlx5_free, 266b4c0ddbfSJiawei Wang .type = "mlx5_sample_ipool", 267b4c0ddbfSJiawei Wang }, 2689cac7dedSGregory Etelson [MLX5_IPOOL_DEST_ARRAY] = { 26900c10c22SJiawei Wang .size = sizeof(struct mlx5_flow_dv_dest_array_resource), 27000c10c22SJiawei Wang .trunk_size = 64, 27100c10c22SJiawei Wang .grow_trunk = 3, 27200c10c22SJiawei Wang .grow_shift = 2, 2732f3dc1f4SSuanming Mou .need_lock = 1, 27400c10c22SJiawei Wang .release_mem_en = 1, 27500c10c22SJiawei Wang .malloc = mlx5_malloc, 27600c10c22SJiawei Wang .free = mlx5_free, 27700c10c22SJiawei Wang .type = "mlx5_dest_array_ipool", 27800c10c22SJiawei Wang }, 2799cac7dedSGregory Etelson [MLX5_IPOOL_TUNNEL_ID] = { 2809cac7dedSGregory Etelson .size = sizeof(struct mlx5_flow_tunnel), 281495b2ed4SSuanming Mou .trunk_size = MLX5_MAX_TUNNELS, 2829cac7dedSGregory Etelson .need_lock = 1, 2839cac7dedSGregory Etelson .release_mem_en = 1, 2849cac7dedSGregory Etelson .type = "mlx5_tunnel_offload", 2859cac7dedSGregory Etelson }, 2869cac7dedSGregory Etelson [MLX5_IPOOL_TNL_TBL_ID] = { 2879cac7dedSGregory Etelson .size = 0, 2889cac7dedSGregory Etelson .need_lock = 1, 2899cac7dedSGregory Etelson .type = "mlx5_flow_tnl_tbl_ipool", 2909cac7dedSGregory Etelson }, 291b88341caSSuanming Mou #endif 2929cac7dedSGregory Etelson [MLX5_IPOOL_MTR] = { 29383306d6cSShun Hao /** 29483306d6cSShun Hao * The ipool index should grow continually from small to big, 29583306d6cSShun Hao * for meter idx, so not set grow_trunk to avoid meter index 29683306d6cSShun Hao * not jump continually. 29783306d6cSShun Hao */ 298e6100c7bSLi Zhang .size = sizeof(struct mlx5_legacy_flow_meter), 2998638e2b0SSuanming Mou .trunk_size = 64, 3002f3dc1f4SSuanming Mou .need_lock = 1, 3018638e2b0SSuanming Mou .release_mem_en = 1, 30283c2047cSSuanming Mou .malloc = mlx5_malloc, 30383c2047cSSuanming Mou .free = mlx5_free, 3048638e2b0SSuanming Mou .type = "mlx5_meter_ipool", 3058638e2b0SSuanming Mou }, 3069cac7dedSGregory Etelson [MLX5_IPOOL_MCP] = { 30790e6053aSSuanming Mou .size = sizeof(struct mlx5_flow_mreg_copy_resource), 30890e6053aSSuanming Mou .trunk_size = 64, 30990e6053aSSuanming Mou .grow_trunk = 3, 31090e6053aSSuanming Mou .grow_shift = 2, 3112f3dc1f4SSuanming Mou .need_lock = 1, 31290e6053aSSuanming Mou .release_mem_en = 1, 31383c2047cSSuanming Mou .malloc = mlx5_malloc, 31483c2047cSSuanming Mou .free = mlx5_free, 31590e6053aSSuanming Mou .type = "mlx5_mcp_ipool", 31690e6053aSSuanming Mou }, 3179cac7dedSGregory Etelson [MLX5_IPOOL_HRXQ] = { 318772dc0ebSSuanming Mou .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), 319772dc0ebSSuanming Mou .trunk_size = 64, 320772dc0ebSSuanming Mou .grow_trunk = 3, 321772dc0ebSSuanming Mou .grow_shift = 2, 3222f3dc1f4SSuanming Mou .need_lock = 1, 323772dc0ebSSuanming Mou .release_mem_en = 1, 32483c2047cSSuanming Mou .malloc = mlx5_malloc, 32583c2047cSSuanming Mou .free = mlx5_free, 326772dc0ebSSuanming Mou .type = "mlx5_hrxq_ipool", 327772dc0ebSSuanming Mou }, 3289cac7dedSGregory Etelson [MLX5_IPOOL_MLX5_FLOW] = { 3295c761238SGregory Etelson /* 3305c761238SGregory Etelson * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. 3315c761238SGregory Etelson * It set in run time according to PCI function configuration. 3325c761238SGregory Etelson */ 3335c761238SGregory Etelson .size = 0, 334b88341caSSuanming Mou .trunk_size = 64, 335b88341caSSuanming Mou .grow_trunk = 3, 336b88341caSSuanming Mou .grow_shift = 2, 3372f3dc1f4SSuanming Mou .need_lock = 1, 338b4edeaf3SSuanming Mou .release_mem_en = 0, 339b4edeaf3SSuanming Mou .per_core_cache = 1 << 19, 34083c2047cSSuanming Mou .malloc = mlx5_malloc, 34183c2047cSSuanming Mou .free = mlx5_free, 342b88341caSSuanming Mou .type = "mlx5_flow_handle_ipool", 343b88341caSSuanming Mou }, 3449cac7dedSGregory Etelson [MLX5_IPOOL_RTE_FLOW] = { 345ab612adcSSuanming Mou .size = sizeof(struct rte_flow), 346ab612adcSSuanming Mou .trunk_size = 4096, 347ab612adcSSuanming Mou .need_lock = 1, 348ab612adcSSuanming Mou .release_mem_en = 1, 34983c2047cSSuanming Mou .malloc = mlx5_malloc, 35083c2047cSSuanming Mou .free = mlx5_free, 351ab612adcSSuanming Mou .type = "rte_flow_ipool", 352ab612adcSSuanming Mou }, 3539cac7dedSGregory Etelson [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = { 3544ae8825cSXueming Li .size = 0, 3554ae8825cSXueming Li .need_lock = 1, 3564ae8825cSXueming Li .type = "mlx5_flow_rss_id_ipool", 3574ae8825cSXueming Li }, 3589cac7dedSGregory Etelson [MLX5_IPOOL_RSS_SHARED_ACTIONS] = { 3594a42ac1fSMatan Azrad .size = sizeof(struct mlx5_shared_action_rss), 3604a42ac1fSMatan Azrad .trunk_size = 64, 3614a42ac1fSMatan Azrad .grow_trunk = 3, 3624a42ac1fSMatan Azrad .grow_shift = 2, 3634a42ac1fSMatan Azrad .need_lock = 1, 3644a42ac1fSMatan Azrad .release_mem_en = 1, 3654a42ac1fSMatan Azrad .malloc = mlx5_malloc, 3664a42ac1fSMatan Azrad .free = mlx5_free, 3674a42ac1fSMatan Azrad .type = "mlx5_shared_action_rss", 3684a42ac1fSMatan Azrad }, 369afb4aa4fSLi Zhang [MLX5_IPOOL_MTR_POLICY] = { 370afb4aa4fSLi Zhang /** 371afb4aa4fSLi Zhang * The ipool index should grow continually from small to big, 372afb4aa4fSLi Zhang * for policy idx, so not set grow_trunk to avoid policy index 373afb4aa4fSLi Zhang * not jump continually. 374afb4aa4fSLi Zhang */ 375afb4aa4fSLi Zhang .size = sizeof(struct mlx5_flow_meter_sub_policy), 376afb4aa4fSLi Zhang .trunk_size = 64, 377afb4aa4fSLi Zhang .need_lock = 1, 378afb4aa4fSLi Zhang .release_mem_en = 1, 379afb4aa4fSLi Zhang .malloc = mlx5_malloc, 380afb4aa4fSLi Zhang .free = mlx5_free, 381afb4aa4fSLi Zhang .type = "mlx5_meter_policy_ipool", 382afb4aa4fSLi Zhang }, 383014d1cbeSSuanming Mou }; 384014d1cbeSSuanming Mou 385830d2091SOri Kam #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 386830d2091SOri Kam #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 387830d2091SOri Kam 388f7c3f3c2SSuanming Mou #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 1024 389860897d2SBing Zhao 390830d2091SOri Kam /** 391f926cce3SXueming Li * Decide whether representor ID is a HPF(host PF) port on BF2. 392f926cce3SXueming Li * 393f926cce3SXueming Li * @param dev 394f926cce3SXueming Li * Pointer to Ethernet device structure. 395f926cce3SXueming Li * 396f926cce3SXueming Li * @return 397f926cce3SXueming Li * Non-zero if HPF, otherwise 0. 398f926cce3SXueming Li */ 399f926cce3SXueming Li bool 400f926cce3SXueming Li mlx5_is_hpf(struct rte_eth_dev *dev) 401f926cce3SXueming Li { 402f926cce3SXueming Li struct mlx5_priv *priv = dev->data->dev_private; 403f926cce3SXueming Li uint16_t repr = MLX5_REPRESENTOR_REPR(priv->representor_id); 404f926cce3SXueming Li int type = MLX5_REPRESENTOR_TYPE(priv->representor_id); 405f926cce3SXueming Li 406f926cce3SXueming Li return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_VF && 407f926cce3SXueming Li MLX5_REPRESENTOR_REPR(-1) == repr; 408f926cce3SXueming Li } 409f926cce3SXueming Li 410f926cce3SXueming Li /** 411919488fbSXueming Li * Decide whether representor ID is a SF port representor. 412919488fbSXueming Li * 413919488fbSXueming Li * @param dev 414919488fbSXueming Li * Pointer to Ethernet device structure. 415919488fbSXueming Li * 416919488fbSXueming Li * @return 417919488fbSXueming Li * Non-zero if HPF, otherwise 0. 418919488fbSXueming Li */ 419919488fbSXueming Li bool 420919488fbSXueming Li mlx5_is_sf_repr(struct rte_eth_dev *dev) 421919488fbSXueming Li { 422919488fbSXueming Li struct mlx5_priv *priv = dev->data->dev_private; 423919488fbSXueming Li int type = MLX5_REPRESENTOR_TYPE(priv->representor_id); 424919488fbSXueming Li 425919488fbSXueming Li return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF; 426919488fbSXueming Li } 427919488fbSXueming Li 428919488fbSXueming Li /** 429f935ed4bSDekel Peled * Initialize the ASO aging management structure. 430f935ed4bSDekel Peled * 431f935ed4bSDekel Peled * @param[in] sh 432f935ed4bSDekel Peled * Pointer to mlx5_dev_ctx_shared object to free 433f935ed4bSDekel Peled * 434f935ed4bSDekel Peled * @return 435f935ed4bSDekel Peled * 0 on success, a negative errno value otherwise and rte_errno is set. 436f935ed4bSDekel Peled */ 437f935ed4bSDekel Peled int 438f935ed4bSDekel Peled mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) 439f935ed4bSDekel Peled { 440f935ed4bSDekel Peled int err; 441f935ed4bSDekel Peled 442f935ed4bSDekel Peled if (sh->aso_age_mng) 443f935ed4bSDekel Peled return 0; 444f935ed4bSDekel Peled sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng), 445f935ed4bSDekel Peled RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 446f935ed4bSDekel Peled if (!sh->aso_age_mng) { 447f935ed4bSDekel Peled DRV_LOG(ERR, "aso_age_mng allocation was failed."); 448f935ed4bSDekel Peled rte_errno = ENOMEM; 449f935ed4bSDekel Peled return -ENOMEM; 450f935ed4bSDekel Peled } 45129efa63aSLi Zhang err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT); 452f935ed4bSDekel Peled if (err) { 453f935ed4bSDekel Peled mlx5_free(sh->aso_age_mng); 454f935ed4bSDekel Peled return -1; 455f935ed4bSDekel Peled } 4567cf2d15aSJiawei Wang rte_rwlock_init(&sh->aso_age_mng->resize_rwl); 457f935ed4bSDekel Peled rte_spinlock_init(&sh->aso_age_mng->free_sl); 458f935ed4bSDekel Peled LIST_INIT(&sh->aso_age_mng->free); 459f935ed4bSDekel Peled return 0; 460f935ed4bSDekel Peled } 461f935ed4bSDekel Peled 462f935ed4bSDekel Peled /** 463f935ed4bSDekel Peled * Close and release all the resources of the ASO aging management structure. 464f935ed4bSDekel Peled * 465f935ed4bSDekel Peled * @param[in] sh 466f935ed4bSDekel Peled * Pointer to mlx5_dev_ctx_shared object to free. 467f935ed4bSDekel Peled */ 468f935ed4bSDekel Peled static void 469f935ed4bSDekel Peled mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) 470f935ed4bSDekel Peled { 471f935ed4bSDekel Peled int i, j; 472f935ed4bSDekel Peled 47329efa63aSLi Zhang mlx5_aso_flow_hit_queue_poll_stop(sh); 47429efa63aSLi Zhang mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT); 475f935ed4bSDekel Peled if (sh->aso_age_mng->pools) { 476f935ed4bSDekel Peled struct mlx5_aso_age_pool *pool; 477f935ed4bSDekel Peled 478f935ed4bSDekel Peled for (i = 0; i < sh->aso_age_mng->next; ++i) { 479f935ed4bSDekel Peled pool = sh->aso_age_mng->pools[i]; 480f935ed4bSDekel Peled claim_zero(mlx5_devx_cmd_destroy 481f935ed4bSDekel Peled (pool->flow_hit_aso_obj)); 482f935ed4bSDekel Peled for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) 483f935ed4bSDekel Peled if (pool->actions[j].dr_action) 484f935ed4bSDekel Peled claim_zero 485223f2c21SOphir Munk (mlx5_flow_os_destroy_flow_action 486f935ed4bSDekel Peled (pool->actions[j].dr_action)); 487f935ed4bSDekel Peled mlx5_free(pool); 488f935ed4bSDekel Peled } 489f935ed4bSDekel Peled mlx5_free(sh->aso_age_mng->pools); 490f935ed4bSDekel Peled } 4917ad0b6d9SDekel Peled mlx5_free(sh->aso_age_mng); 492f935ed4bSDekel Peled } 493f935ed4bSDekel Peled 494f935ed4bSDekel Peled /** 495fa2d01c8SDong Zhou * Initialize the shared aging list information per port. 496fa2d01c8SDong Zhou * 497fa2d01c8SDong Zhou * @param[in] sh 4986e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 499fa2d01c8SDong Zhou */ 500fa2d01c8SDong Zhou static void 5016e88bc42SOphir Munk mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) 502fa2d01c8SDong Zhou { 503fa2d01c8SDong Zhou uint32_t i; 504fa2d01c8SDong Zhou struct mlx5_age_info *age_info; 505fa2d01c8SDong Zhou 506fa2d01c8SDong Zhou for (i = 0; i < sh->max_port; i++) { 507fa2d01c8SDong Zhou age_info = &sh->port[i].age_info; 508fa2d01c8SDong Zhou age_info->flags = 0; 509fa2d01c8SDong Zhou TAILQ_INIT(&age_info->aged_counters); 510f9bc5274SMatan Azrad LIST_INIT(&age_info->aged_aso); 511fa2d01c8SDong Zhou rte_spinlock_init(&age_info->aged_sl); 512fa2d01c8SDong Zhou MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 513fa2d01c8SDong Zhou } 514fa2d01c8SDong Zhou } 515fa2d01c8SDong Zhou 516fa2d01c8SDong Zhou /** 5175382d28cSMatan Azrad * Initialize the counters management structure. 5185382d28cSMatan Azrad * 5195382d28cSMatan Azrad * @param[in] sh 5206e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 5215382d28cSMatan Azrad */ 5225382d28cSMatan Azrad static void 5236e88bc42SOphir Munk mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) 5245382d28cSMatan Azrad { 525994829e6SSuanming Mou int i; 5265382d28cSMatan Azrad 5275af61440SMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 5285382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 529994829e6SSuanming Mou sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET; 530994829e6SSuanming Mou sh->cmng.max_id = -1; 531994829e6SSuanming Mou sh->cmng.last_pool_idx = POOL_IDX_INVALID; 5323aa27915SSuanming Mou rte_spinlock_init(&sh->cmng.pool_update_sl); 533994829e6SSuanming Mou for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) { 534994829e6SSuanming Mou TAILQ_INIT(&sh->cmng.counters[i]); 535994829e6SSuanming Mou rte_spinlock_init(&sh->cmng.csl[i]); 536fa2d01c8SDong Zhou } 5375382d28cSMatan Azrad } 5385382d28cSMatan Azrad 5395382d28cSMatan Azrad /** 5405382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 5415382d28cSMatan Azrad * 5425382d28cSMatan Azrad * @param[in] mng 5435382d28cSMatan Azrad * Pointer to the memory management structure. 5445382d28cSMatan Azrad */ 5455382d28cSMatan Azrad static void 5465382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 5475382d28cSMatan Azrad { 5485382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 5495382d28cSMatan Azrad 5505382d28cSMatan Azrad LIST_REMOVE(mng, next); 5518451e165SMichael Baum mlx5_os_wrapped_mkey_destroy(&mng->wm); 55283c2047cSSuanming Mou mlx5_free(mem); 5535382d28cSMatan Azrad } 5545382d28cSMatan Azrad 5555382d28cSMatan Azrad /** 5565382d28cSMatan Azrad * Close and release all the resources of the counters management. 5575382d28cSMatan Azrad * 5585382d28cSMatan Azrad * @param[in] sh 5596e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free. 5605382d28cSMatan Azrad */ 5615382d28cSMatan Azrad static void 5626e88bc42SOphir Munk mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) 5635382d28cSMatan Azrad { 5645382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 5653aa27915SSuanming Mou int i, j; 566f15db67dSMatan Azrad int retries = 1024; 5675382d28cSMatan Azrad 568f15db67dSMatan Azrad rte_errno = 0; 569f15db67dSMatan Azrad while (--retries) { 570f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 571f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 572f15db67dSMatan Azrad break; 573f15db67dSMatan Azrad rte_pause(); 574f15db67dSMatan Azrad } 5755382d28cSMatan Azrad 576994829e6SSuanming Mou if (sh->cmng.pools) { 577994829e6SSuanming Mou struct mlx5_flow_counter_pool *pool; 5783aa27915SSuanming Mou uint16_t n_valid = sh->cmng.n_valid; 5792b5b1aebSSuanming Mou bool fallback = sh->cmng.counter_fallback; 580994829e6SSuanming Mou 5813aa27915SSuanming Mou for (i = 0; i < n_valid; ++i) { 5823aa27915SSuanming Mou pool = sh->cmng.pools[i]; 5832b5b1aebSSuanming Mou if (!fallback && pool->min_dcs) 5845af61440SMatan Azrad claim_zero(mlx5_devx_cmd_destroy 585fa2d01c8SDong Zhou (pool->min_dcs)); 5865382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 5872b5b1aebSSuanming Mou struct mlx5_flow_counter *cnt = 5882b5b1aebSSuanming Mou MLX5_POOL_GET_CNT(pool, j); 5892b5b1aebSSuanming Mou 5902b5b1aebSSuanming Mou if (cnt->action) 5915382d28cSMatan Azrad claim_zero 592223f2c21SOphir Munk (mlx5_flow_os_destroy_flow_action 5932b5b1aebSSuanming Mou (cnt->action)); 5942b5b1aebSSuanming Mou if (fallback && MLX5_POOL_GET_CNT 5952b5b1aebSSuanming Mou (pool, j)->dcs_when_free) 5965382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 5972b5b1aebSSuanming Mou (cnt->dcs_when_free)); 5985382d28cSMatan Azrad } 59983c2047cSSuanming Mou mlx5_free(pool); 6005382d28cSMatan Azrad } 601994829e6SSuanming Mou mlx5_free(sh->cmng.pools); 6025382d28cSMatan Azrad } 6035382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 6045382d28cSMatan Azrad while (mng) { 6055382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 6065382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 6075382d28cSMatan Azrad } 6085382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 6095382d28cSMatan Azrad } 6105382d28cSMatan Azrad 61129efa63aSLi Zhang /** 61229efa63aSLi Zhang * Initialize the aso flow meters management structure. 61329efa63aSLi Zhang * 61429efa63aSLi Zhang * @param[in] sh 61529efa63aSLi Zhang * Pointer to mlx5_dev_ctx_shared object to free 61629efa63aSLi Zhang */ 61729efa63aSLi Zhang int 618afb4aa4fSLi Zhang mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh) 61929efa63aSLi Zhang { 620afb4aa4fSLi Zhang if (!sh->mtrmng) { 621afb4aa4fSLi Zhang sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO, 622afb4aa4fSLi Zhang sizeof(*sh->mtrmng), 62329efa63aSLi Zhang RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 624afb4aa4fSLi Zhang if (!sh->mtrmng) { 625afb4aa4fSLi Zhang DRV_LOG(ERR, 626afb4aa4fSLi Zhang "meter management allocation was failed."); 62729efa63aSLi Zhang rte_errno = ENOMEM; 62829efa63aSLi Zhang return -ENOMEM; 62929efa63aSLi Zhang } 630afb4aa4fSLi Zhang if (sh->meter_aso_en) { 631afb4aa4fSLi Zhang rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl); 6327797b0feSJiawei Wang rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl); 633afb4aa4fSLi Zhang LIST_INIT(&sh->mtrmng->pools_mng.meters); 634afb4aa4fSLi Zhang } 635afb4aa4fSLi Zhang sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID; 63629efa63aSLi Zhang } 63729efa63aSLi Zhang return 0; 63829efa63aSLi Zhang } 63929efa63aSLi Zhang 64029efa63aSLi Zhang /** 64129efa63aSLi Zhang * Close and release all the resources of 64229efa63aSLi Zhang * the ASO flow meter management structure. 64329efa63aSLi Zhang * 64429efa63aSLi Zhang * @param[in] sh 64529efa63aSLi Zhang * Pointer to mlx5_dev_ctx_shared object to free. 64629efa63aSLi Zhang */ 64729efa63aSLi Zhang static void 64829efa63aSLi Zhang mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh) 64929efa63aSLi Zhang { 65029efa63aSLi Zhang struct mlx5_aso_mtr_pool *mtr_pool; 651afb4aa4fSLi Zhang struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng; 65229efa63aSLi Zhang uint32_t idx; 653c99b4f8bSLi Zhang #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO 654c99b4f8bSLi Zhang struct mlx5_aso_mtr *aso_mtr; 655c99b4f8bSLi Zhang int i; 656c99b4f8bSLi Zhang #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ 65729efa63aSLi Zhang 658afb4aa4fSLi Zhang if (sh->meter_aso_en) { 65929efa63aSLi Zhang mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER); 660afb4aa4fSLi Zhang idx = mtrmng->pools_mng.n_valid; 66129efa63aSLi Zhang while (idx--) { 662afb4aa4fSLi Zhang mtr_pool = mtrmng->pools_mng.pools[idx]; 663c99b4f8bSLi Zhang #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO 664c99b4f8bSLi Zhang for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) { 665c99b4f8bSLi Zhang aso_mtr = &mtr_pool->mtrs[i]; 666c99b4f8bSLi Zhang if (aso_mtr->fm.meter_action) 667afb4aa4fSLi Zhang claim_zero 668afb4aa4fSLi Zhang (mlx5_glue->destroy_flow_action 669c99b4f8bSLi Zhang (aso_mtr->fm.meter_action)); 670c99b4f8bSLi Zhang } 671c99b4f8bSLi Zhang #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ 67229efa63aSLi Zhang claim_zero(mlx5_devx_cmd_destroy 67329efa63aSLi Zhang (mtr_pool->devx_obj)); 674afb4aa4fSLi Zhang mtrmng->pools_mng.n_valid--; 67529efa63aSLi Zhang mlx5_free(mtr_pool); 67629efa63aSLi Zhang } 677afb4aa4fSLi Zhang mlx5_free(sh->mtrmng->pools_mng.pools); 678afb4aa4fSLi Zhang } 67929efa63aSLi Zhang mlx5_free(sh->mtrmng); 68029efa63aSLi Zhang sh->mtrmng = NULL; 68129efa63aSLi Zhang } 68229efa63aSLi Zhang 683f935ed4bSDekel Peled /* Send FLOW_AGED event if needed. */ 684f935ed4bSDekel Peled void 685f935ed4bSDekel Peled mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) 686f935ed4bSDekel Peled { 687f935ed4bSDekel Peled struct mlx5_age_info *age_info; 688f935ed4bSDekel Peled uint32_t i; 689f935ed4bSDekel Peled 690f935ed4bSDekel Peled for (i = 0; i < sh->max_port; i++) { 691f935ed4bSDekel Peled age_info = &sh->port[i].age_info; 692f935ed4bSDekel Peled if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) 693f935ed4bSDekel Peled continue; 694447d4d79SMichael Baum MLX5_AGE_UNSET(age_info, MLX5_AGE_EVENT_NEW); 695447d4d79SMichael Baum if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) { 696447d4d79SMichael Baum MLX5_AGE_UNSET(age_info, MLX5_AGE_TRIGGER); 697f935ed4bSDekel Peled rte_eth_dev_callback_process 698f935ed4bSDekel Peled (&rte_eth_devices[sh->port[i].devx_ih_port_id], 699f935ed4bSDekel Peled RTE_ETH_EVENT_FLOW_AGED, NULL); 700447d4d79SMichael Baum } 701f935ed4bSDekel Peled } 702f935ed4bSDekel Peled } 703f935ed4bSDekel Peled 704ee9e5fadSBing Zhao /* 705ee9e5fadSBing Zhao * Initialize the ASO connection tracking structure. 706ee9e5fadSBing Zhao * 707ee9e5fadSBing Zhao * @param[in] sh 708ee9e5fadSBing Zhao * Pointer to mlx5_dev_ctx_shared object. 709ee9e5fadSBing Zhao * 710ee9e5fadSBing Zhao * @return 711ee9e5fadSBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 712ee9e5fadSBing Zhao */ 713ee9e5fadSBing Zhao int 714ee9e5fadSBing Zhao mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh) 715ee9e5fadSBing Zhao { 716ee9e5fadSBing Zhao int err; 717ee9e5fadSBing Zhao 718ee9e5fadSBing Zhao if (sh->ct_mng) 719ee9e5fadSBing Zhao return 0; 720ee9e5fadSBing Zhao sh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng), 721ee9e5fadSBing Zhao RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 722ee9e5fadSBing Zhao if (!sh->ct_mng) { 723ee9e5fadSBing Zhao DRV_LOG(ERR, "ASO CT management allocation failed."); 724ee9e5fadSBing Zhao rte_errno = ENOMEM; 725ee9e5fadSBing Zhao return -rte_errno; 726ee9e5fadSBing Zhao } 727ee9e5fadSBing Zhao err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_CONNECTION_TRACKING); 728ee9e5fadSBing Zhao if (err) { 729ee9e5fadSBing Zhao mlx5_free(sh->ct_mng); 730ee9e5fadSBing Zhao /* rte_errno should be extracted from the failure. */ 731ee9e5fadSBing Zhao rte_errno = EINVAL; 732ee9e5fadSBing Zhao return -rte_errno; 733ee9e5fadSBing Zhao } 734ee9e5fadSBing Zhao rte_spinlock_init(&sh->ct_mng->ct_sl); 735ee9e5fadSBing Zhao rte_rwlock_init(&sh->ct_mng->resize_rwl); 736ee9e5fadSBing Zhao LIST_INIT(&sh->ct_mng->free_cts); 737ee9e5fadSBing Zhao return 0; 738ee9e5fadSBing Zhao } 739ee9e5fadSBing Zhao 7400af8a229SBing Zhao /* 7410af8a229SBing Zhao * Close and release all the resources of the 7420af8a229SBing Zhao * ASO connection tracking management structure. 7430af8a229SBing Zhao * 7440af8a229SBing Zhao * @param[in] sh 7450af8a229SBing Zhao * Pointer to mlx5_dev_ctx_shared object to free. 7460af8a229SBing Zhao */ 7470af8a229SBing Zhao static void 7480af8a229SBing Zhao mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh) 7490af8a229SBing Zhao { 7500af8a229SBing Zhao struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng; 7510af8a229SBing Zhao struct mlx5_aso_ct_pool *ct_pool; 7520af8a229SBing Zhao struct mlx5_aso_ct_action *ct; 7530af8a229SBing Zhao uint32_t idx; 7540af8a229SBing Zhao uint32_t val; 7550af8a229SBing Zhao uint32_t cnt; 7560af8a229SBing Zhao int i; 7570af8a229SBing Zhao 7580af8a229SBing Zhao mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_CONNECTION_TRACKING); 7590af8a229SBing Zhao idx = mng->next; 7600af8a229SBing Zhao while (idx--) { 7610af8a229SBing Zhao cnt = 0; 7620af8a229SBing Zhao ct_pool = mng->pools[idx]; 7630af8a229SBing Zhao for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) { 7640af8a229SBing Zhao ct = &ct_pool->actions[i]; 7650af8a229SBing Zhao val = __atomic_fetch_sub(&ct->refcnt, 1, 7660af8a229SBing Zhao __ATOMIC_RELAXED); 7670af8a229SBing Zhao MLX5_ASSERT(val == 1); 7680af8a229SBing Zhao if (val > 1) 7690af8a229SBing Zhao cnt++; 7700af8a229SBing Zhao #ifdef HAVE_MLX5_DR_ACTION_ASO_CT 7710af8a229SBing Zhao if (ct->dr_action_orig) 7720af8a229SBing Zhao claim_zero(mlx5_glue->destroy_flow_action 7730af8a229SBing Zhao (ct->dr_action_orig)); 7740af8a229SBing Zhao if (ct->dr_action_rply) 7750af8a229SBing Zhao claim_zero(mlx5_glue->destroy_flow_action 7760af8a229SBing Zhao (ct->dr_action_rply)); 7770af8a229SBing Zhao #endif 7780af8a229SBing Zhao } 7790af8a229SBing Zhao claim_zero(mlx5_devx_cmd_destroy(ct_pool->devx_obj)); 7800af8a229SBing Zhao if (cnt) { 7810af8a229SBing Zhao DRV_LOG(DEBUG, "%u ASO CT objects are being used in the pool %u", 7820af8a229SBing Zhao cnt, i); 7830af8a229SBing Zhao } 7840af8a229SBing Zhao mlx5_free(ct_pool); 7850af8a229SBing Zhao /* in case of failure. */ 7860af8a229SBing Zhao mng->next--; 7870af8a229SBing Zhao } 7880af8a229SBing Zhao mlx5_free(mng->pools); 7890af8a229SBing Zhao mlx5_free(mng); 7900af8a229SBing Zhao /* Management structure must be cleared to 0s during allocation. */ 7910af8a229SBing Zhao sh->ct_mng = NULL; 7920af8a229SBing Zhao } 7930af8a229SBing Zhao 7945382d28cSMatan Azrad /** 795014d1cbeSSuanming Mou * Initialize the flow resources' indexed mempool. 796014d1cbeSSuanming Mou * 797014d1cbeSSuanming Mou * @param[in] sh 7986e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 799447d4d79SMichael Baum * @param[in] config 800b88341caSSuanming Mou * Pointer to user dev config. 801014d1cbeSSuanming Mou */ 802014d1cbeSSuanming Mou static void 8036e88bc42SOphir Munk mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh, 8045c761238SGregory Etelson const struct mlx5_dev_config *config) 805014d1cbeSSuanming Mou { 806014d1cbeSSuanming Mou uint8_t i; 8075c761238SGregory Etelson struct mlx5_indexed_pool_config cfg; 808014d1cbeSSuanming Mou 809a1da6f62SSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) { 8105c761238SGregory Etelson cfg = mlx5_ipool_cfg[i]; 8115c761238SGregory Etelson switch (i) { 8125c761238SGregory Etelson default: 8135c761238SGregory Etelson break; 8145c761238SGregory Etelson /* 8155c761238SGregory Etelson * Set MLX5_IPOOL_MLX5_FLOW ipool size 8165c761238SGregory Etelson * according to PCI function flow configuration. 8175c761238SGregory Etelson */ 8185c761238SGregory Etelson case MLX5_IPOOL_MLX5_FLOW: 8195c761238SGregory Etelson cfg.size = config->dv_flow_en ? 8205c761238SGregory Etelson sizeof(struct mlx5_flow_handle) : 8215c761238SGregory Etelson MLX5_FLOW_HANDLE_VERBS_SIZE; 8225c761238SGregory Etelson break; 8235c761238SGregory Etelson } 824b4edeaf3SSuanming Mou if (config->reclaim_mode) { 8255c761238SGregory Etelson cfg.release_mem_en = 1; 826b4edeaf3SSuanming Mou cfg.per_core_cache = 0; 827cde19e86SSuanming Mou } else { 828cde19e86SSuanming Mou cfg.release_mem_en = 0; 829b4edeaf3SSuanming Mou } 8305c761238SGregory Etelson sh->ipool[i] = mlx5_ipool_create(&cfg); 831014d1cbeSSuanming Mou } 832a1da6f62SSuanming Mou } 833014d1cbeSSuanming Mou 8344f3d8d0eSMatan Azrad 835014d1cbeSSuanming Mou /** 836014d1cbeSSuanming Mou * Release the flow resources' indexed mempool. 837014d1cbeSSuanming Mou * 838014d1cbeSSuanming Mou * @param[in] sh 8396e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 840014d1cbeSSuanming Mou */ 841014d1cbeSSuanming Mou static void 8426e88bc42SOphir Munk mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh) 843014d1cbeSSuanming Mou { 844014d1cbeSSuanming Mou uint8_t i; 845014d1cbeSSuanming Mou 846014d1cbeSSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) 847014d1cbeSSuanming Mou mlx5_ipool_destroy(sh->ipool[i]); 8484f3d8d0eSMatan Azrad for (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i) 8494f3d8d0eSMatan Azrad if (sh->mdh_ipools[i]) 8504f3d8d0eSMatan Azrad mlx5_ipool_destroy(sh->mdh_ipools[i]); 851014d1cbeSSuanming Mou } 852014d1cbeSSuanming Mou 853daa38a89SBing Zhao /* 854daa38a89SBing Zhao * Check if dynamic flex parser for eCPRI already exists. 855daa38a89SBing Zhao * 856daa38a89SBing Zhao * @param dev 857daa38a89SBing Zhao * Pointer to Ethernet device structure. 858daa38a89SBing Zhao * 859daa38a89SBing Zhao * @return 860daa38a89SBing Zhao * true on exists, false on not. 861daa38a89SBing Zhao */ 862daa38a89SBing Zhao bool 863daa38a89SBing Zhao mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev) 864daa38a89SBing Zhao { 865daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 866575740d1SViacheslav Ovsiienko struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; 867daa38a89SBing Zhao 868daa38a89SBing Zhao return !!prf->obj; 869daa38a89SBing Zhao } 870daa38a89SBing Zhao 871daa38a89SBing Zhao /* 872daa38a89SBing Zhao * Allocation of a flex parser for eCPRI. Once created, this parser related 873daa38a89SBing Zhao * resources will be held until the device is closed. 874daa38a89SBing Zhao * 875daa38a89SBing Zhao * @param dev 876daa38a89SBing Zhao * Pointer to Ethernet device structure. 877daa38a89SBing Zhao * 878daa38a89SBing Zhao * @return 879daa38a89SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 880daa38a89SBing Zhao */ 881daa38a89SBing Zhao int 882daa38a89SBing Zhao mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev) 883daa38a89SBing Zhao { 884daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 885575740d1SViacheslav Ovsiienko struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; 8861c506404SBing Zhao struct mlx5_devx_graph_node_attr node = { 8871c506404SBing Zhao .modify_field_select = 0, 8881c506404SBing Zhao }; 8891c506404SBing Zhao uint32_t ids[8]; 8901c506404SBing Zhao int ret; 891daa38a89SBing Zhao 892d7c49561SBing Zhao if (!priv->config.hca_attr.parse_graph_flex_node) { 893d7c49561SBing Zhao DRV_LOG(ERR, "Dynamic flex parser is not supported " 894d7c49561SBing Zhao "for device %s.", priv->dev_data->name); 895d7c49561SBing Zhao return -ENOTSUP; 896d7c49561SBing Zhao } 8971c506404SBing Zhao node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; 8981c506404SBing Zhao /* 8 bytes now: 4B common header + 4B message body header. */ 8991c506404SBing Zhao node.header_length_base_value = 0x8; 9001c506404SBing Zhao /* After MAC layer: Ether / VLAN. */ 9011c506404SBing Zhao node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC; 9021c506404SBing Zhao /* Type of compared condition should be 0xAEFE in the L2 layer. */ 9031c506404SBing Zhao node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI; 9041c506404SBing Zhao /* Sample #0: type in common header. */ 9051c506404SBing Zhao node.sample[0].flow_match_sample_en = 1; 9061c506404SBing Zhao /* Fixed offset. */ 9071c506404SBing Zhao node.sample[0].flow_match_sample_offset_mode = 0x0; 9081c506404SBing Zhao /* Only the 2nd byte will be used. */ 9091c506404SBing Zhao node.sample[0].flow_match_sample_field_base_offset = 0x0; 9101c506404SBing Zhao /* Sample #1: message payload. */ 9111c506404SBing Zhao node.sample[1].flow_match_sample_en = 1; 9121c506404SBing Zhao /* Fixed offset. */ 9131c506404SBing Zhao node.sample[1].flow_match_sample_offset_mode = 0x0; 9141c506404SBing Zhao /* 9151c506404SBing Zhao * Only the first two bytes will be used right now, and its offset will 9161c506404SBing Zhao * start after the common header that with the length of a DW(u32). 9171c506404SBing Zhao */ 9181c506404SBing Zhao node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t); 919ca1418ceSMichael Baum prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node); 9201c506404SBing Zhao if (!prf->obj) { 9211c506404SBing Zhao DRV_LOG(ERR, "Failed to create flex parser node object."); 9221c506404SBing Zhao return (rte_errno == 0) ? -ENODEV : -rte_errno; 9231c506404SBing Zhao } 9241c506404SBing Zhao prf->num = 2; 9251c506404SBing Zhao ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num); 9261c506404SBing Zhao if (ret) { 9271c506404SBing Zhao DRV_LOG(ERR, "Failed to query sample IDs."); 9281c506404SBing Zhao return (rte_errno == 0) ? -ENODEV : -rte_errno; 9291c506404SBing Zhao } 9301c506404SBing Zhao prf->offset[0] = 0x0; 9311c506404SBing Zhao prf->offset[1] = sizeof(uint32_t); 9321c506404SBing Zhao prf->ids[0] = ids[0]; 9331c506404SBing Zhao prf->ids[1] = ids[1]; 934daa38a89SBing Zhao return 0; 935daa38a89SBing Zhao } 936daa38a89SBing Zhao 9371c506404SBing Zhao /* 9381c506404SBing Zhao * Destroy the flex parser node, including the parser itself, input / output 9391c506404SBing Zhao * arcs and DW samples. Resources could be reused then. 9401c506404SBing Zhao * 9411c506404SBing Zhao * @param dev 9421c506404SBing Zhao * Pointer to Ethernet device structure. 9431c506404SBing Zhao */ 9441c506404SBing Zhao static void 9451c506404SBing Zhao mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev) 9461c506404SBing Zhao { 9471c506404SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 948575740d1SViacheslav Ovsiienko struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser; 9491c506404SBing Zhao 9501c506404SBing Zhao if (prf->obj) 9511c506404SBing Zhao mlx5_devx_cmd_destroy(prf->obj); 9521c506404SBing Zhao prf->obj = NULL; 9531c506404SBing Zhao } 9541c506404SBing Zhao 955d47fe9daSTal Shnaiderman uint32_t 956d47fe9daSTal Shnaiderman mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr) 957d47fe9daSTal Shnaiderman { 958d47fe9daSTal Shnaiderman uint32_t sw_parsing_offloads = 0; 959d47fe9daSTal Shnaiderman 960d47fe9daSTal Shnaiderman if (attr->swp) { 961d47fe9daSTal Shnaiderman sw_parsing_offloads |= MLX5_SW_PARSING_CAP; 962d47fe9daSTal Shnaiderman if (attr->swp_csum) 963d47fe9daSTal Shnaiderman sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP; 964d47fe9daSTal Shnaiderman 965d47fe9daSTal Shnaiderman if (attr->swp_lso) 966d47fe9daSTal Shnaiderman sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP; 967d47fe9daSTal Shnaiderman } 968d47fe9daSTal Shnaiderman return sw_parsing_offloads; 969d47fe9daSTal Shnaiderman } 970d47fe9daSTal Shnaiderman 9716a86ee2eSTal Shnaiderman uint32_t 9726a86ee2eSTal Shnaiderman mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr) 9736a86ee2eSTal Shnaiderman { 9746a86ee2eSTal Shnaiderman uint32_t tn_offloads = 0; 9756a86ee2eSTal Shnaiderman 9766a86ee2eSTal Shnaiderman if (attr->tunnel_stateless_vxlan) 9776a86ee2eSTal Shnaiderman tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP; 9786a86ee2eSTal Shnaiderman if (attr->tunnel_stateless_gre) 9796a86ee2eSTal Shnaiderman tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP; 9806a86ee2eSTal Shnaiderman if (attr->tunnel_stateless_geneve_rx) 9816a86ee2eSTal Shnaiderman tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP; 9826a86ee2eSTal Shnaiderman return tn_offloads; 9836a86ee2eSTal Shnaiderman } 9846a86ee2eSTal Shnaiderman 9855dfa003dSMichael Baum /* Fill all fields of UAR structure. */ 986a0bfe9d5SViacheslav Ovsiienko static int 9875dfa003dSMichael Baum mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh) 988a0bfe9d5SViacheslav Ovsiienko { 9895dfa003dSMichael Baum int ret; 990a0bfe9d5SViacheslav Ovsiienko 9915dfa003dSMichael Baum ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar); 9925dfa003dSMichael Baum if (ret) { 9935dfa003dSMichael Baum DRV_LOG(ERR, "Failed to prepare Tx DevX UAR."); 9945dfa003dSMichael Baum return -rte_errno; 995a0bfe9d5SViacheslav Ovsiienko } 9965dfa003dSMichael Baum MLX5_ASSERT(sh->tx_uar.obj); 9975dfa003dSMichael Baum MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj)); 9985dfa003dSMichael Baum ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar); 9995dfa003dSMichael Baum if (ret) { 10005dfa003dSMichael Baum DRV_LOG(ERR, "Failed to prepare Rx DevX UAR."); 10015dfa003dSMichael Baum mlx5_devx_uar_release(&sh->tx_uar); 10025dfa003dSMichael Baum return -rte_errno; 1003a0bfe9d5SViacheslav Ovsiienko } 10045dfa003dSMichael Baum MLX5_ASSERT(sh->rx_uar.obj); 10055dfa003dSMichael Baum MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj)); 10065dfa003dSMichael Baum return 0; 1007a0bfe9d5SViacheslav Ovsiienko } 10085dfa003dSMichael Baum 10095dfa003dSMichael Baum static void 10105dfa003dSMichael Baum mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh) 10115dfa003dSMichael Baum { 10125dfa003dSMichael Baum mlx5_devx_uar_release(&sh->rx_uar); 10135dfa003dSMichael Baum mlx5_devx_uar_release(&sh->tx_uar); 1014a0bfe9d5SViacheslav Ovsiienko } 1015a0bfe9d5SViacheslav Ovsiienko 1016014d1cbeSSuanming Mou /** 1017fc59a1ecSMichael Baum * rte_mempool_walk() callback to unregister Rx mempools. 1018fc59a1ecSMichael Baum * It used when implicit mempool registration is disabled. 1019fec28ca0SDmitry Kozlyuk * 1020fec28ca0SDmitry Kozlyuk * @param mp 1021fec28ca0SDmitry Kozlyuk * The mempool being walked. 1022fec28ca0SDmitry Kozlyuk * @param arg 1023fec28ca0SDmitry Kozlyuk * Pointer to the device shared context. 1024fec28ca0SDmitry Kozlyuk */ 1025fec28ca0SDmitry Kozlyuk static void 1026fc59a1ecSMichael Baum mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg) 1027fec28ca0SDmitry Kozlyuk { 1028fec28ca0SDmitry Kozlyuk struct mlx5_dev_ctx_shared *sh = arg; 1029fec28ca0SDmitry Kozlyuk 1030fc59a1ecSMichael Baum mlx5_dev_mempool_unregister(sh->cdev, mp); 1031fec28ca0SDmitry Kozlyuk } 1032fec28ca0SDmitry Kozlyuk 1033fec28ca0SDmitry Kozlyuk /** 1034fec28ca0SDmitry Kozlyuk * Callback used when implicit mempool registration is disabled 1035fec28ca0SDmitry Kozlyuk * in order to track Rx mempool destruction. 1036fec28ca0SDmitry Kozlyuk * 1037fec28ca0SDmitry Kozlyuk * @param event 1038fec28ca0SDmitry Kozlyuk * Mempool life cycle event. 1039fec28ca0SDmitry Kozlyuk * @param mp 1040fec28ca0SDmitry Kozlyuk * An Rx mempool registered explicitly when the port is started. 1041fec28ca0SDmitry Kozlyuk * @param arg 1042fec28ca0SDmitry Kozlyuk * Pointer to a device shared context. 1043fec28ca0SDmitry Kozlyuk */ 1044fec28ca0SDmitry Kozlyuk static void 1045fec28ca0SDmitry Kozlyuk mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event, 1046fec28ca0SDmitry Kozlyuk struct rte_mempool *mp, void *arg) 1047fec28ca0SDmitry Kozlyuk { 1048fec28ca0SDmitry Kozlyuk struct mlx5_dev_ctx_shared *sh = arg; 1049fec28ca0SDmitry Kozlyuk 1050fec28ca0SDmitry Kozlyuk if (event == RTE_MEMPOOL_EVENT_DESTROY) 1051fc59a1ecSMichael Baum mlx5_dev_mempool_unregister(sh->cdev, mp); 1052fec28ca0SDmitry Kozlyuk } 1053fec28ca0SDmitry Kozlyuk 1054fec28ca0SDmitry Kozlyuk int 1055fec28ca0SDmitry Kozlyuk mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev) 1056fec28ca0SDmitry Kozlyuk { 1057fec28ca0SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1058fec28ca0SDmitry Kozlyuk struct mlx5_dev_ctx_shared *sh = priv->sh; 1059fec28ca0SDmitry Kozlyuk int ret; 1060fec28ca0SDmitry Kozlyuk 1061fec28ca0SDmitry Kozlyuk /* Check if we only need to track Rx mempool destruction. */ 106285209924SMichael Baum if (!sh->cdev->config.mr_mempool_reg_en) { 1063fec28ca0SDmitry Kozlyuk ret = rte_mempool_event_callback_register 1064fec28ca0SDmitry Kozlyuk (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh); 1065fec28ca0SDmitry Kozlyuk return ret == 0 || rte_errno == EEXIST ? 0 : ret; 1066fec28ca0SDmitry Kozlyuk } 1067fc59a1ecSMichael Baum return mlx5_dev_mempool_subscribe(sh->cdev); 1068fec28ca0SDmitry Kozlyuk } 1069fec28ca0SDmitry Kozlyuk 1070fec28ca0SDmitry Kozlyuk /** 1071a89f6433SRongwei Liu * Set up multiple TISs with different affinities according to 1072a89f6433SRongwei Liu * number of bonding ports 1073a89f6433SRongwei Liu * 1074a89f6433SRongwei Liu * @param priv 1075a89f6433SRongwei Liu * Pointer of shared context. 1076a89f6433SRongwei Liu * 1077a89f6433SRongwei Liu * @return 1078a89f6433SRongwei Liu * Zero on success, -1 otherwise. 1079a89f6433SRongwei Liu */ 1080a89f6433SRongwei Liu static int 1081a89f6433SRongwei Liu mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh) 1082a89f6433SRongwei Liu { 1083a89f6433SRongwei Liu int i; 1084a89f6433SRongwei Liu struct mlx5_devx_lag_context lag_ctx = { 0 }; 1085a89f6433SRongwei Liu struct mlx5_devx_tis_attr tis_attr = { 0 }; 1086a89f6433SRongwei Liu 1087a89f6433SRongwei Liu tis_attr.transport_domain = sh->td->id; 1088a89f6433SRongwei Liu if (sh->bond.n_port) { 1089a89f6433SRongwei Liu if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) { 1090a89f6433SRongwei Liu sh->lag.tx_remap_affinity[0] = 1091a89f6433SRongwei Liu lag_ctx.tx_remap_affinity_1; 1092a89f6433SRongwei Liu sh->lag.tx_remap_affinity[1] = 1093a89f6433SRongwei Liu lag_ctx.tx_remap_affinity_2; 1094a89f6433SRongwei Liu sh->lag.affinity_mode = lag_ctx.port_select_mode; 1095a89f6433SRongwei Liu } else { 1096a89f6433SRongwei Liu DRV_LOG(ERR, "Failed to query lag affinity."); 1097a89f6433SRongwei Liu return -1; 1098a89f6433SRongwei Liu } 1099a89f6433SRongwei Liu if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) { 1100a89f6433SRongwei Liu for (i = 0; i < sh->bond.n_port; i++) { 1101a89f6433SRongwei Liu tis_attr.lag_tx_port_affinity = 1102a89f6433SRongwei Liu MLX5_IFC_LAG_MAP_TIS_AFFINITY(i, 1103a89f6433SRongwei Liu sh->bond.n_port); 1104a89f6433SRongwei Liu sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, 1105a89f6433SRongwei Liu &tis_attr); 1106a89f6433SRongwei Liu if (!sh->tis[i]) { 1107a89f6433SRongwei Liu DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device" 1108a89f6433SRongwei Liu " %s.", i, sh->bond.n_port, 1109a89f6433SRongwei Liu sh->ibdev_name); 1110a89f6433SRongwei Liu return -1; 1111a89f6433SRongwei Liu } 1112a89f6433SRongwei Liu } 1113a89f6433SRongwei Liu DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n", 1114a89f6433SRongwei Liu sh->bond.n_port, lag_ctx.tx_remap_affinity_1, 1115a89f6433SRongwei Liu lag_ctx.tx_remap_affinity_2); 1116a89f6433SRongwei Liu return 0; 1117a89f6433SRongwei Liu } 1118a89f6433SRongwei Liu if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH) 1119a89f6433SRongwei Liu DRV_LOG(INFO, "Device %s enabled HW hash based LAG.", 1120a89f6433SRongwei Liu sh->ibdev_name); 1121a89f6433SRongwei Liu } 1122a89f6433SRongwei Liu tis_attr.lag_tx_port_affinity = 0; 1123a89f6433SRongwei Liu sh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr); 1124a89f6433SRongwei Liu if (!sh->tis[0]) { 1125a89f6433SRongwei Liu DRV_LOG(ERR, "Failed to TIS 0 for bonding device" 1126a89f6433SRongwei Liu " %s.", sh->ibdev_name); 1127a89f6433SRongwei Liu return -1; 1128a89f6433SRongwei Liu } 1129a89f6433SRongwei Liu return 0; 1130a89f6433SRongwei Liu } 1131a89f6433SRongwei Liu 1132a89f6433SRongwei Liu /** 113391389890SOphir Munk * Allocate shared device context. If there is multiport device the 113417e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 113591389890SOphir Munk * port dedicated device, the context will be used by only given 113617e19bc4SViacheslav Ovsiienko * port due to unification. 113717e19bc4SViacheslav Ovsiienko * 113891389890SOphir Munk * Routine first searches the context for the specified device name, 113917e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 114017e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 114191389890SOphir Munk * device context and parameters. 114217e19bc4SViacheslav Ovsiienko * 114317e19bc4SViacheslav Ovsiienko * @param[in] spawn 114491389890SOphir Munk * Pointer to the device attributes (name, port, etc). 11458409a285SViacheslav Ovsiienko * @param[in] config 11468409a285SViacheslav Ovsiienko * Pointer to device configuration structure. 114717e19bc4SViacheslav Ovsiienko * 114817e19bc4SViacheslav Ovsiienko * @return 11496e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object on success, 115017e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 115117e19bc4SViacheslav Ovsiienko */ 11522eb4d010SOphir Munk struct mlx5_dev_ctx_shared * 115391389890SOphir Munk mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, 11548409a285SViacheslav Ovsiienko const struct mlx5_dev_config *config) 115517e19bc4SViacheslav Ovsiienko { 11566e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh; 115717e19bc4SViacheslav Ovsiienko int err = 0; 115853e5a82fSViacheslav Ovsiienko uint32_t i; 115917e19bc4SViacheslav Ovsiienko 11608e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn); 116117e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 11628e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 116391389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 116417e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 116591389890SOphir Munk LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) { 1166ca1418ceSMichael Baum if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) { 116717e19bc4SViacheslav Ovsiienko sh->refcnt++; 116817e19bc4SViacheslav Ovsiienko goto exit; 116917e19bc4SViacheslav Ovsiienko } 117017e19bc4SViacheslav Ovsiienko } 1171ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 11728e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn->max_port); 11732175c4dcSSuanming Mou sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 11746e88bc42SOphir Munk sizeof(struct mlx5_dev_ctx_shared) + 117517e19bc4SViacheslav Ovsiienko spawn->max_port * 117691389890SOphir Munk sizeof(struct mlx5_dev_shared_port), 11772175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 117817e19bc4SViacheslav Ovsiienko if (!sh) { 117917e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 118017e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 118117e19bc4SViacheslav Ovsiienko goto exit; 118217e19bc4SViacheslav Ovsiienko } 1183887183efSMichael Baum pthread_mutex_init(&sh->txpp.mutex, NULL); 11847af08c8fSMichael Baum sh->numa_node = spawn->cdev->dev->numa_node; 11857af08c8fSMichael Baum sh->cdev = spawn->cdev; 1186887183efSMichael Baum sh->devx = sh->cdev->config.devx; 1187f5f4c482SXueming Li if (spawn->bond_info) 1188f5f4c482SXueming Li sh->bond = *spawn->bond_info; 1189fe46b20cSMichael Baum err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr); 119017e19bc4SViacheslav Ovsiienko if (err) { 1191e85f623eSOphir Munk DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed"); 119217e19bc4SViacheslav Ovsiienko goto error; 119317e19bc4SViacheslav Ovsiienko } 119417e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 119517e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 119607b51bb9SSuanming Mou sh->reclaim_mode = config->reclaim_mode; 1197ca1418ceSMichael Baum strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx), 1198f44b09f9SOphir Munk sizeof(sh->ibdev_name) - 1); 1199ca1418ceSMichael Baum strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx), 1200f44b09f9SOphir Munk sizeof(sh->ibdev_path) - 1); 120153e5a82fSViacheslav Ovsiienko /* 120253e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 120353e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 120453e5a82fSViacheslav Ovsiienko * the given port index i. 120553e5a82fSViacheslav Ovsiienko */ 120623242063SMatan Azrad for (i = 0; i < sh->max_port; i++) { 120753e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 120823242063SMatan Azrad sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; 120923242063SMatan Azrad } 1210ae18a1aeSOri Kam if (sh->devx) { 1211ca1418ceSMichael Baum sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx); 1212ae18a1aeSOri Kam if (!sh->td) { 1213ae18a1aeSOri Kam DRV_LOG(ERR, "TD allocation failure"); 1214ae18a1aeSOri Kam err = ENOMEM; 1215ae18a1aeSOri Kam goto error; 1216ae18a1aeSOri Kam } 1217a89f6433SRongwei Liu if (mlx5_setup_tis(sh)) { 1218ae18a1aeSOri Kam DRV_LOG(ERR, "TIS allocation failure"); 1219ae18a1aeSOri Kam err = ENOMEM; 1220ae18a1aeSOri Kam goto error; 1221ae18a1aeSOri Kam } 12225dfa003dSMichael Baum err = mlx5_rxtx_uars_prepare(sh); 1223a0bfe9d5SViacheslav Ovsiienko if (err) 1224fc4d4f73SViacheslav Ovsiienko goto error; 122524feb045SViacheslav Ovsiienko #ifndef RTE_ARCH_64 12265dfa003dSMichael Baum } else { 122724feb045SViacheslav Ovsiienko /* Initialize UAR access locks for 32bit implementations. */ 122824feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock_cq); 122924feb045SViacheslav Ovsiienko for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 123024feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock[i]); 123124feb045SViacheslav Ovsiienko #endif 12325dfa003dSMichael Baum } 12332eb4d010SOphir Munk mlx5_os_dev_shared_handler_install(sh); 12345d55a494STal Shnaiderman if (LIST_EMPTY(&mlx5_dev_ctx_list)) { 12355d55a494STal Shnaiderman err = mlx5_flow_os_init_workspace_once(); 12365d55a494STal Shnaiderman if (err) 12375d55a494STal Shnaiderman goto error; 12385d55a494STal Shnaiderman } 1239fa2d01c8SDong Zhou mlx5_flow_aging_init(sh); 12405382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 1241b88341caSSuanming Mou mlx5_flow_ipool_create(sh, config); 12420e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 124391389890SOphir Munk LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); 1244f15f0c38SShiri Kuzin rte_spinlock_init(&sh->geneve_tlv_opt_sl); 124517e19bc4SViacheslav Ovsiienko exit: 124691389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 124717e19bc4SViacheslav Ovsiienko return sh; 124817e19bc4SViacheslav Ovsiienko error: 1249d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 125091389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 12518e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 1252ae18a1aeSOri Kam if (sh->td) 1253ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 1254a89f6433SRongwei Liu i = 0; 1255a89f6433SRongwei Liu do { 1256a89f6433SRongwei Liu if (sh->tis[i]) 1257a89f6433SRongwei Liu claim_zero(mlx5_devx_cmd_destroy(sh->tis[i])); 1258a89f6433SRongwei Liu } while (++i < (uint32_t)sh->bond.n_port); 12595dfa003dSMichael Baum mlx5_rxtx_uars_release(sh); 12602175c4dcSSuanming Mou mlx5_free(sh); 12618e46d4e1SAlexander Kozyrev MLX5_ASSERT(err > 0); 126217e19bc4SViacheslav Ovsiienko rte_errno = err; 126317e19bc4SViacheslav Ovsiienko return NULL; 126417e19bc4SViacheslav Ovsiienko } 126517e19bc4SViacheslav Ovsiienko 126617e19bc4SViacheslav Ovsiienko /** 126717e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 126817e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 126917e19bc4SViacheslav Ovsiienko * 127017e19bc4SViacheslav Ovsiienko * @param[in] sh 12716e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 127217e19bc4SViacheslav Ovsiienko */ 12732eb4d010SOphir Munk void 127491389890SOphir Munk mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) 127517e19bc4SViacheslav Ovsiienko { 1276fec28ca0SDmitry Kozlyuk int ret; 1277a89f6433SRongwei Liu int i = 0; 1278fec28ca0SDmitry Kozlyuk 127991389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 12800afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG 128117e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 12826e88bc42SOphir Munk struct mlx5_dev_ctx_shared *lctx; 128317e19bc4SViacheslav Ovsiienko 128491389890SOphir Munk LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next) 128517e19bc4SViacheslav Ovsiienko if (lctx == sh) 128617e19bc4SViacheslav Ovsiienko break; 12878e46d4e1SAlexander Kozyrev MLX5_ASSERT(lctx); 128817e19bc4SViacheslav Ovsiienko if (lctx != sh) { 128917e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 129017e19bc4SViacheslav Ovsiienko goto exit; 129117e19bc4SViacheslav Ovsiienko } 129217e19bc4SViacheslav Ovsiienko #endif 12938e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 12948e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh->refcnt); 129517e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 12968e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 129717e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 129817e19bc4SViacheslav Ovsiienko goto exit; 1299fec28ca0SDmitry Kozlyuk /* Stop watching for mempool events and unregister all mempools. */ 1300fc59a1ecSMichael Baum if (!sh->cdev->config.mr_mempool_reg_en) { 1301fec28ca0SDmitry Kozlyuk ret = rte_mempool_event_callback_unregister 1302fec28ca0SDmitry Kozlyuk (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh); 1303fec28ca0SDmitry Kozlyuk if (ret == 0) 1304fc59a1ecSMichael Baum rte_mempool_walk 1305fc59a1ecSMichael Baum (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh); 1306fc59a1ecSMichael Baum } 13070e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 130817e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 1309ea823b2cSDmitry Kozlyuk /* Release resources on the last device removal. */ 1310ea823b2cSDmitry Kozlyuk if (LIST_EMPTY(&mlx5_dev_ctx_list)) { 1311ea823b2cSDmitry Kozlyuk mlx5_os_net_cleanup(); 13125d55a494STal Shnaiderman mlx5_flow_os_release_workspace(); 1313ea823b2cSDmitry Kozlyuk } 1314f4a08731SMichael Baum pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 13159086ac09SGregory Etelson if (sh->flex_parsers_dv) { 13169086ac09SGregory Etelson mlx5_list_destroy(sh->flex_parsers_dv); 13179086ac09SGregory Etelson sh->flex_parsers_dv = NULL; 13189086ac09SGregory Etelson } 131953e5a82fSViacheslav Ovsiienko /* 132053e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 132153e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 132253e5a82fSViacheslav Ovsiienko **/ 13235382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 1324f935ed4bSDekel Peled if (sh->aso_age_mng) { 1325f935ed4bSDekel Peled mlx5_flow_aso_age_mng_close(sh); 1326f935ed4bSDekel Peled sh->aso_age_mng = NULL; 1327f935ed4bSDekel Peled } 132829efa63aSLi Zhang if (sh->mtrmng) 132929efa63aSLi Zhang mlx5_aso_flow_mtrs_mng_close(sh); 1330014d1cbeSSuanming Mou mlx5_flow_ipool_destroy(sh); 13312eb4d010SOphir Munk mlx5_os_dev_shared_handler_uninstall(sh); 13325dfa003dSMichael Baum mlx5_rxtx_uars_release(sh); 1333a89f6433SRongwei Liu do { 1334a89f6433SRongwei Liu if (sh->tis[i]) 1335a89f6433SRongwei Liu claim_zero(mlx5_devx_cmd_destroy(sh->tis[i])); 1336a89f6433SRongwei Liu } while (++i < sh->bond.n_port); 1337ae18a1aeSOri Kam if (sh->td) 1338ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 1339f15f0c38SShiri Kuzin MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL); 1340d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 13412175c4dcSSuanming Mou mlx5_free(sh); 1342f4a08731SMichael Baum return; 134317e19bc4SViacheslav Ovsiienko exit: 134491389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 134517e19bc4SViacheslav Ovsiienko } 134617e19bc4SViacheslav Ovsiienko 1347771fa900SAdrien Mazarguil /** 1348afd7a625SXueming Li * Destroy table hash list. 134954534725SMatan Azrad * 135054534725SMatan Azrad * @param[in] priv 135154534725SMatan Azrad * Pointer to the private device data structure. 135254534725SMatan Azrad */ 13532eb4d010SOphir Munk void 135454534725SMatan Azrad mlx5_free_table_hash_list(struct mlx5_priv *priv) 135554534725SMatan Azrad { 13566e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 135754534725SMatan Azrad 135854534725SMatan Azrad if (!sh->flow_tbls) 135954534725SMatan Azrad return; 1360e69a5922SXueming Li mlx5_hlist_destroy(sh->flow_tbls); 1361a6b57ff4SBing Zhao sh->flow_tbls = NULL; 136254534725SMatan Azrad } 136354534725SMatan Azrad 136454534725SMatan Azrad /** 136554534725SMatan Azrad * Initialize flow table hash list and create the root tables entry 136654534725SMatan Azrad * for each domain. 136754534725SMatan Azrad * 136854534725SMatan Azrad * @param[in] priv 136954534725SMatan Azrad * Pointer to the private device data structure. 137054534725SMatan Azrad * 137154534725SMatan Azrad * @return 137254534725SMatan Azrad * Zero on success, positive error code otherwise. 137354534725SMatan Azrad */ 13742eb4d010SOphir Munk int 1375afd7a625SXueming Li mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) 137654534725SMatan Azrad { 1377afd7a625SXueming Li int err = 0; 1378afd7a625SXueming Li /* Tables are only used in DV and DR modes. */ 1379f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 13806e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 1381961b6774SMatan Azrad char s[MLX5_NAME_SIZE]; 138254534725SMatan Azrad 13838e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 138454534725SMatan Azrad snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); 1385e69a5922SXueming Li sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, 1386961b6774SMatan Azrad false, true, sh, 1387961b6774SMatan Azrad flow_dv_tbl_create_cb, 1388f5b0aed2SSuanming Mou flow_dv_tbl_match_cb, 1389961b6774SMatan Azrad flow_dv_tbl_remove_cb, 1390961b6774SMatan Azrad flow_dv_tbl_clone_cb, 1391961b6774SMatan Azrad flow_dv_tbl_clone_free_cb); 139254534725SMatan Azrad if (!sh->flow_tbls) { 139363783b01SDavid Marchand DRV_LOG(ERR, "flow tables with hash creation failed."); 139454534725SMatan Azrad err = ENOMEM; 139554534725SMatan Azrad return err; 139654534725SMatan Azrad } 139754534725SMatan Azrad #ifndef HAVE_MLX5DV_DR 1398afd7a625SXueming Li struct rte_flow_error error; 1399afd7a625SXueming Li struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id]; 1400afd7a625SXueming Li 140154534725SMatan Azrad /* 140254534725SMatan Azrad * In case we have not DR support, the zero tables should be created 140354534725SMatan Azrad * because DV expect to see them even if they cannot be created by 140454534725SMatan Azrad * RDMA-CORE. 140554534725SMatan Azrad */ 14062d2cef5dSLi Zhang if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, 14072d2cef5dSLi Zhang NULL, 0, 1, 0, &error) || 14082d2cef5dSLi Zhang !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, 14092d2cef5dSLi Zhang NULL, 0, 1, 0, &error) || 14102d2cef5dSLi Zhang !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, 14112d2cef5dSLi Zhang NULL, 0, 1, 0, &error)) { 141254534725SMatan Azrad err = ENOMEM; 141354534725SMatan Azrad goto error; 141454534725SMatan Azrad } 141554534725SMatan Azrad return err; 141654534725SMatan Azrad error: 141754534725SMatan Azrad mlx5_free_table_hash_list(priv); 141854534725SMatan Azrad #endif /* HAVE_MLX5DV_DR */ 1419afd7a625SXueming Li #endif 142054534725SMatan Azrad return err; 142154534725SMatan Azrad } 142254534725SMatan Azrad 142354534725SMatan Azrad /** 14244d803a72SOlga Shern * Retrieve integer value from environment variable. 14254d803a72SOlga Shern * 14264d803a72SOlga Shern * @param[in] name 14274d803a72SOlga Shern * Environment variable name. 14284d803a72SOlga Shern * 14294d803a72SOlga Shern * @return 14304d803a72SOlga Shern * Integer value, 0 if the variable is not set. 14314d803a72SOlga Shern */ 14324d803a72SOlga Shern int 14334d803a72SOlga Shern mlx5_getenv_int(const char *name) 14344d803a72SOlga Shern { 14354d803a72SOlga Shern const char *val = getenv(name); 14364d803a72SOlga Shern 14374d803a72SOlga Shern if (val == NULL) 14384d803a72SOlga Shern return 0; 14394d803a72SOlga Shern return atoi(val); 14404d803a72SOlga Shern } 14414d803a72SOlga Shern 14424d803a72SOlga Shern /** 1443c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 1444c9ba7523SRaslan Darawsheh * 1445c9ba7523SRaslan Darawsheh * @param[in] dev 1446c9ba7523SRaslan Darawsheh * A pointer to eth_dev 1447c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 1448c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 1449c9ba7523SRaslan Darawsheh * 1450c9ba7523SRaslan Darawsheh * @return 1451c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 1452c9ba7523SRaslan Darawsheh */ 1453c9ba7523SRaslan Darawsheh int 1454c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 1455c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 1456c9ba7523SRaslan Darawsheh { 14578e46d4e1SAlexander Kozyrev MLX5_ASSERT(udp_tunnel != NULL); 1458295968d1SFerruh Yigit if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN && 1459c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 1460c9ba7523SRaslan Darawsheh return 0; 1461295968d1SFerruh Yigit if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE && 1462c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 1463c9ba7523SRaslan Darawsheh return 0; 1464c9ba7523SRaslan Darawsheh return -ENOTSUP; 1465c9ba7523SRaslan Darawsheh } 1466c9ba7523SRaslan Darawsheh 1467c9ba7523SRaslan Darawsheh /** 1468120dc4a7SYongseok Koh * Initialize process private data structure. 1469120dc4a7SYongseok Koh * 1470120dc4a7SYongseok Koh * @param dev 1471120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1472120dc4a7SYongseok Koh * 1473120dc4a7SYongseok Koh * @return 1474120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 1475120dc4a7SYongseok Koh */ 1476120dc4a7SYongseok Koh int 1477120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 1478120dc4a7SYongseok Koh { 1479120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 1480120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 1481120dc4a7SYongseok Koh size_t ppriv_size; 1482120dc4a7SYongseok Koh 14836dad8b3aSYunjian Wang mlx5_proc_priv_uninit(dev); 1484120dc4a7SYongseok Koh /* 1485120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 1486120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 1487120dc4a7SYongseok Koh */ 14885dfa003dSMichael Baum ppriv_size = sizeof(struct mlx5_proc_priv) + 14895dfa003dSMichael Baum priv->txqs_n * sizeof(struct mlx5_uar_data); 149084a22cbcSSuanming Mou ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size, 149184a22cbcSSuanming Mou RTE_CACHE_LINE_SIZE, dev->device->numa_node); 1492120dc4a7SYongseok Koh if (!ppriv) { 1493120dc4a7SYongseok Koh rte_errno = ENOMEM; 1494120dc4a7SYongseok Koh return -rte_errno; 1495120dc4a7SYongseok Koh } 149684a22cbcSSuanming Mou ppriv->uar_table_sz = priv->txqs_n; 1497120dc4a7SYongseok Koh dev->process_private = ppriv; 1498b6e9c33cSMichael Baum if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1499b6e9c33cSMichael Baum priv->sh->pppriv = ppriv; 1500120dc4a7SYongseok Koh return 0; 1501120dc4a7SYongseok Koh } 1502120dc4a7SYongseok Koh 1503120dc4a7SYongseok Koh /** 1504120dc4a7SYongseok Koh * Un-initialize process private data structure. 1505120dc4a7SYongseok Koh * 1506120dc4a7SYongseok Koh * @param dev 1507120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1508120dc4a7SYongseok Koh */ 15092b36c30bSSuanming Mou void 1510120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 1511120dc4a7SYongseok Koh { 1512120dc4a7SYongseok Koh if (!dev->process_private) 1513120dc4a7SYongseok Koh return; 15142175c4dcSSuanming Mou mlx5_free(dev->process_private); 1515120dc4a7SYongseok Koh dev->process_private = NULL; 1516120dc4a7SYongseok Koh } 1517120dc4a7SYongseok Koh 1518120dc4a7SYongseok Koh /** 1519771fa900SAdrien Mazarguil * DPDK callback to close the device. 1520771fa900SAdrien Mazarguil * 1521771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 1522771fa900SAdrien Mazarguil * 1523771fa900SAdrien Mazarguil * @param dev 1524771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 1525771fa900SAdrien Mazarguil */ 1526b142387bSThomas Monjalon int 1527771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 1528771fa900SAdrien Mazarguil { 1529dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 15302e22920bSAdrien Mazarguil unsigned int i; 15316af6b973SNélio Laranjeiro int ret; 1532771fa900SAdrien Mazarguil 15332786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 15342786b7bfSSuanming Mou /* Check if process_private released. */ 15352786b7bfSSuanming Mou if (!dev->process_private) 1536b142387bSThomas Monjalon return 0; 15372786b7bfSSuanming Mou mlx5_tx_uar_uninit_secondary(dev); 15382786b7bfSSuanming Mou mlx5_proc_priv_uninit(dev); 15392786b7bfSSuanming Mou rte_eth_dev_release_port(dev); 1540b142387bSThomas Monjalon return 0; 15412786b7bfSSuanming Mou } 15422786b7bfSSuanming Mou if (!priv->sh) 1543b142387bSThomas Monjalon return 0; 1544a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 15450f99970bSNélio Laranjeiro dev->data->port_id, 1546ca1418ceSMichael Baum ((priv->sh->cdev->ctx != NULL) ? 1547ca1418ceSMichael Baum mlx5_os_get_ctx_device_name(priv->sh->cdev->ctx) : "")); 15488db7e3b6SBing Zhao /* 15498db7e3b6SBing Zhao * If default mreg copy action is removed at the stop stage, 15508db7e3b6SBing Zhao * the search will return none and nothing will be done anymore. 15518db7e3b6SBing Zhao */ 15528db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 1553af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 15548db7e3b6SBing Zhao /* 15558db7e3b6SBing Zhao * If all the flows are already flushed in the device stop stage, 15568db7e3b6SBing Zhao * then this will return directly without any action. 15578db7e3b6SBing Zhao */ 1558b4edeaf3SSuanming Mou mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true); 15594b61b877SBing Zhao mlx5_action_handle_flush(dev); 156002e76468SSuanming Mou mlx5_flow_meter_flush(dev, NULL); 15612e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 1562*a41f593fSFerruh Yigit dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 1563*a41f593fSFerruh Yigit dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 15642aac5b5dSYongseok Koh rte_wmb(); 15652aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 15662e86c4e5SOphir Munk mlx5_mp_os_req_stop_rxtx(dev); 15671c506404SBing Zhao /* Free the eCPRI flex parser resource. */ 15681c506404SBing Zhao mlx5_flex_parser_ecpri_release(dev); 1569db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_cleanup(dev); 15705cf0707fSXueming Li if (priv->rxq_privs != NULL) { 15712e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 157220698c9fSOphir Munk rte_delay_us_sleep(1000); 1573a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 1574af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 15752e22920bSAdrien Mazarguil priv->rxqs_n = 0; 15764cda06c3SXueming Li mlx5_free(priv->rxq_privs); 15774cda06c3SXueming Li priv->rxq_privs = NULL; 15784cda06c3SXueming Li } 15792e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 15802e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 158120698c9fSOphir Munk rte_delay_us_sleep(1000); 15826e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 1583af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 15842e22920bSAdrien Mazarguil priv->txqs_n = 0; 15852e22920bSAdrien Mazarguil priv->txqs = NULL; 15862e22920bSAdrien Mazarguil } 1587120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 1588e6988afdSMatan Azrad if (priv->q_counters) { 1589e6988afdSMatan Azrad mlx5_devx_cmd_destroy(priv->q_counters); 1590e6988afdSMatan Azrad priv->q_counters = NULL; 1591e6988afdSMatan Azrad } 159265b3cd0dSSuanming Mou if (priv->drop_queue.hrxq) 159365b3cd0dSSuanming Mou mlx5_drop_action_destroy(dev); 1594dd3c774fSViacheslav Ovsiienko if (priv->mreg_cp_tbl) 1595e69a5922SXueming Li mlx5_hlist_destroy(priv->mreg_cp_tbl); 15967d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 15970af8a229SBing Zhao if (priv->sh->ct_mng) 15980af8a229SBing Zhao mlx5_flow_aso_ct_mng_close(priv->sh); 15992eb4d010SOphir Munk mlx5_os_free_shared_dr(priv); 160029c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 160183c2047cSSuanming Mou mlx5_free(priv->rss_conf.rss_key); 1602634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 160383c2047cSSuanming Mou mlx5_free(priv->reta_idx); 1604ccdcba53SNélio Laranjeiro if (priv->config.vf) 1605f00f6562SOphir Munk mlx5_os_mac_addr_flush(dev); 160626c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 160726c08b97SAdrien Mazarguil close(priv->nl_socket_route); 160826c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 160926c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 1610dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 1611dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 161223820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 1613f5479b68SNélio Laranjeiro if (ret) 1614a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 16150f99970bSNélio Laranjeiro dev->data->port_id); 161615c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 16174c7a0f5fSNélio Laranjeiro if (ret) 1618a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 16190f99970bSNélio Laranjeiro dev->data->port_id); 162093403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 162109cb5b58SNélio Laranjeiro if (ret) 162293403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 16230f99970bSNélio Laranjeiro dev->data->port_id); 1624af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 1625a1366b1aSNélio Laranjeiro if (ret) 1626a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 16270f99970bSNélio Laranjeiro dev->data->port_id); 1628894c4a8eSOri Kam ret = mlx5_txq_obj_verify(dev); 1629faf2667fSNélio Laranjeiro if (ret) 1630a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 16310f99970bSNélio Laranjeiro dev->data->port_id); 1632af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 16336e78005aSNélio Laranjeiro if (ret) 1634a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 16350f99970bSNélio Laranjeiro dev->data->port_id); 1636af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 16376af6b973SNélio Laranjeiro if (ret) 1638a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 1639a170a30dSNélio Laranjeiro dev->data->port_id); 1640679f46c7SMatan Azrad if (priv->hrxqs) 1641679f46c7SMatan Azrad mlx5_list_destroy(priv->hrxqs); 1642772dc0ebSSuanming Mou /* 1643772dc0ebSSuanming Mou * Free the shared context in last turn, because the cleanup 1644772dc0ebSSuanming Mou * routines above may use some shared fields, like 16457be78d02SJosh Soref * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving 1646772dc0ebSSuanming Mou * ifindex if Netlink fails. 1647772dc0ebSSuanming Mou */ 164891389890SOphir Munk mlx5_free_shared_dev_ctx(priv->sh); 16492b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 16502b730263SAdrien Mazarguil unsigned int c = 0; 1651d874a4eeSThomas Monjalon uint16_t port_id; 16522b730263SAdrien Mazarguil 165356bb3c84SXueming Li MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 1654dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 1655d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 16562b730263SAdrien Mazarguil 16572b730263SAdrien Mazarguil if (!opriv || 16582b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 1659d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 16602b730263SAdrien Mazarguil continue; 16612b730263SAdrien Mazarguil ++c; 1662f7e95215SViacheslav Ovsiienko break; 16632b730263SAdrien Mazarguil } 16642b730263SAdrien Mazarguil if (!c) 16652b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 16662b730263SAdrien Mazarguil } 1667771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 16682b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 166942603bbdSOphir Munk /* 167042603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 167142603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 167242603bbdSOphir Munk * it is freed when dev_private is freed. 167342603bbdSOphir Munk */ 167442603bbdSOphir Munk dev->data->mac_addrs = NULL; 1675b142387bSThomas Monjalon return 0; 1676771fa900SAdrien Mazarguil } 1677771fa900SAdrien Mazarguil 1678b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_ops = { 1679b012b4ceSOphir Munk .dev_configure = mlx5_dev_configure, 1680b012b4ceSOphir Munk .dev_start = mlx5_dev_start, 1681b012b4ceSOphir Munk .dev_stop = mlx5_dev_stop, 1682b012b4ceSOphir Munk .dev_set_link_down = mlx5_set_link_down, 1683b012b4ceSOphir Munk .dev_set_link_up = mlx5_set_link_up, 1684b012b4ceSOphir Munk .dev_close = mlx5_dev_close, 1685b012b4ceSOphir Munk .promiscuous_enable = mlx5_promiscuous_enable, 1686b012b4ceSOphir Munk .promiscuous_disable = mlx5_promiscuous_disable, 1687b012b4ceSOphir Munk .allmulticast_enable = mlx5_allmulticast_enable, 1688b012b4ceSOphir Munk .allmulticast_disable = mlx5_allmulticast_disable, 1689b012b4ceSOphir Munk .link_update = mlx5_link_update, 1690b012b4ceSOphir Munk .stats_get = mlx5_stats_get, 1691b012b4ceSOphir Munk .stats_reset = mlx5_stats_reset, 1692b012b4ceSOphir Munk .xstats_get = mlx5_xstats_get, 1693b012b4ceSOphir Munk .xstats_reset = mlx5_xstats_reset, 1694b012b4ceSOphir Munk .xstats_get_names = mlx5_xstats_get_names, 1695b012b4ceSOphir Munk .fw_version_get = mlx5_fw_version_get, 1696b012b4ceSOphir Munk .dev_infos_get = mlx5_dev_infos_get, 1697cb95feefSXueming Li .representor_info_get = mlx5_representor_info_get, 1698b012b4ceSOphir Munk .read_clock = mlx5_txpp_read_clock, 1699b012b4ceSOphir Munk .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 1700b012b4ceSOphir Munk .vlan_filter_set = mlx5_vlan_filter_set, 1701b012b4ceSOphir Munk .rx_queue_setup = mlx5_rx_queue_setup, 1702b012b4ceSOphir Munk .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 1703b012b4ceSOphir Munk .tx_queue_setup = mlx5_tx_queue_setup, 1704b012b4ceSOphir Munk .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 1705b012b4ceSOphir Munk .rx_queue_release = mlx5_rx_queue_release, 1706b012b4ceSOphir Munk .tx_queue_release = mlx5_tx_queue_release, 1707b012b4ceSOphir Munk .rx_queue_start = mlx5_rx_queue_start, 1708b012b4ceSOphir Munk .rx_queue_stop = mlx5_rx_queue_stop, 1709b012b4ceSOphir Munk .tx_queue_start = mlx5_tx_queue_start, 1710b012b4ceSOphir Munk .tx_queue_stop = mlx5_tx_queue_stop, 1711b012b4ceSOphir Munk .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 1712b012b4ceSOphir Munk .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 1713b012b4ceSOphir Munk .mac_addr_remove = mlx5_mac_addr_remove, 1714b012b4ceSOphir Munk .mac_addr_add = mlx5_mac_addr_add, 1715b012b4ceSOphir Munk .mac_addr_set = mlx5_mac_addr_set, 1716b012b4ceSOphir Munk .set_mc_addr_list = mlx5_set_mc_addr_list, 1717b012b4ceSOphir Munk .mtu_set = mlx5_dev_set_mtu, 1718b012b4ceSOphir Munk .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 1719b012b4ceSOphir Munk .vlan_offload_set = mlx5_vlan_offload_set, 1720b012b4ceSOphir Munk .reta_update = mlx5_dev_rss_reta_update, 1721b012b4ceSOphir Munk .reta_query = mlx5_dev_rss_reta_query, 1722b012b4ceSOphir Munk .rss_hash_update = mlx5_rss_hash_update, 1723b012b4ceSOphir Munk .rss_hash_conf_get = mlx5_rss_hash_conf_get, 1724fb7ad441SThomas Monjalon .flow_ops_get = mlx5_flow_ops_get, 1725b012b4ceSOphir Munk .rxq_info_get = mlx5_rxq_info_get, 1726b012b4ceSOphir Munk .txq_info_get = mlx5_txq_info_get, 1727b012b4ceSOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 1728b012b4ceSOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 1729b012b4ceSOphir Munk .rx_queue_intr_enable = mlx5_rx_intr_enable, 1730b012b4ceSOphir Munk .rx_queue_intr_disable = mlx5_rx_intr_disable, 1731b012b4ceSOphir Munk .is_removed = mlx5_is_removed, 1732b012b4ceSOphir Munk .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, 1733b012b4ceSOphir Munk .get_module_info = mlx5_get_module_info, 1734b012b4ceSOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 1735b012b4ceSOphir Munk .hairpin_cap_get = mlx5_hairpin_cap_get, 1736b012b4ceSOphir Munk .mtr_ops_get = mlx5_flow_meter_ops_get, 1737b012b4ceSOphir Munk .hairpin_bind = mlx5_hairpin_bind, 1738b012b4ceSOphir Munk .hairpin_unbind = mlx5_hairpin_unbind, 1739b012b4ceSOphir Munk .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, 1740b012b4ceSOphir Munk .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, 1741b012b4ceSOphir Munk .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, 1742b012b4ceSOphir Munk .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, 1743a8f0df6bSAlexander Kozyrev .get_monitor_addr = mlx5_get_monitor_addr, 1744b012b4ceSOphir Munk }; 1745b012b4ceSOphir Munk 1746b012b4ceSOphir Munk /* Available operations from secondary process. */ 1747b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_sec_ops = { 1748b012b4ceSOphir Munk .stats_get = mlx5_stats_get, 1749b012b4ceSOphir Munk .stats_reset = mlx5_stats_reset, 1750b012b4ceSOphir Munk .xstats_get = mlx5_xstats_get, 1751b012b4ceSOphir Munk .xstats_reset = mlx5_xstats_reset, 1752b012b4ceSOphir Munk .xstats_get_names = mlx5_xstats_get_names, 1753b012b4ceSOphir Munk .fw_version_get = mlx5_fw_version_get, 1754b012b4ceSOphir Munk .dev_infos_get = mlx5_dev_infos_get, 175592d16c83SXueming Li .representor_info_get = mlx5_representor_info_get, 1756b012b4ceSOphir Munk .read_clock = mlx5_txpp_read_clock, 1757b012b4ceSOphir Munk .rx_queue_start = mlx5_rx_queue_start, 1758b012b4ceSOphir Munk .rx_queue_stop = mlx5_rx_queue_stop, 1759b012b4ceSOphir Munk .tx_queue_start = mlx5_tx_queue_start, 1760b012b4ceSOphir Munk .tx_queue_stop = mlx5_tx_queue_stop, 1761b012b4ceSOphir Munk .rxq_info_get = mlx5_rxq_info_get, 1762b012b4ceSOphir Munk .txq_info_get = mlx5_txq_info_get, 1763b012b4ceSOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 1764b012b4ceSOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 1765b012b4ceSOphir Munk .get_module_info = mlx5_get_module_info, 1766b012b4ceSOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 1767b012b4ceSOphir Munk }; 1768b012b4ceSOphir Munk 1769b012b4ceSOphir Munk /* Available operations in flow isolated mode. */ 1770b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_ops_isolate = { 1771b012b4ceSOphir Munk .dev_configure = mlx5_dev_configure, 1772b012b4ceSOphir Munk .dev_start = mlx5_dev_start, 1773b012b4ceSOphir Munk .dev_stop = mlx5_dev_stop, 1774b012b4ceSOphir Munk .dev_set_link_down = mlx5_set_link_down, 1775b012b4ceSOphir Munk .dev_set_link_up = mlx5_set_link_up, 1776b012b4ceSOphir Munk .dev_close = mlx5_dev_close, 1777b012b4ceSOphir Munk .promiscuous_enable = mlx5_promiscuous_enable, 1778b012b4ceSOphir Munk .promiscuous_disable = mlx5_promiscuous_disable, 1779b012b4ceSOphir Munk .allmulticast_enable = mlx5_allmulticast_enable, 1780b012b4ceSOphir Munk .allmulticast_disable = mlx5_allmulticast_disable, 1781b012b4ceSOphir Munk .link_update = mlx5_link_update, 1782b012b4ceSOphir Munk .stats_get = mlx5_stats_get, 1783b012b4ceSOphir Munk .stats_reset = mlx5_stats_reset, 1784b012b4ceSOphir Munk .xstats_get = mlx5_xstats_get, 1785b012b4ceSOphir Munk .xstats_reset = mlx5_xstats_reset, 1786b012b4ceSOphir Munk .xstats_get_names = mlx5_xstats_get_names, 1787b012b4ceSOphir Munk .fw_version_get = mlx5_fw_version_get, 1788b012b4ceSOphir Munk .dev_infos_get = mlx5_dev_infos_get, 178992d16c83SXueming Li .representor_info_get = mlx5_representor_info_get, 1790b012b4ceSOphir Munk .read_clock = mlx5_txpp_read_clock, 1791b012b4ceSOphir Munk .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 1792b012b4ceSOphir Munk .vlan_filter_set = mlx5_vlan_filter_set, 1793b012b4ceSOphir Munk .rx_queue_setup = mlx5_rx_queue_setup, 1794b012b4ceSOphir Munk .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 1795b012b4ceSOphir Munk .tx_queue_setup = mlx5_tx_queue_setup, 1796b012b4ceSOphir Munk .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 1797b012b4ceSOphir Munk .rx_queue_release = mlx5_rx_queue_release, 1798b012b4ceSOphir Munk .tx_queue_release = mlx5_tx_queue_release, 1799b012b4ceSOphir Munk .rx_queue_start = mlx5_rx_queue_start, 1800b012b4ceSOphir Munk .rx_queue_stop = mlx5_rx_queue_stop, 1801b012b4ceSOphir Munk .tx_queue_start = mlx5_tx_queue_start, 1802b012b4ceSOphir Munk .tx_queue_stop = mlx5_tx_queue_stop, 1803b012b4ceSOphir Munk .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 1804b012b4ceSOphir Munk .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 1805b012b4ceSOphir Munk .mac_addr_remove = mlx5_mac_addr_remove, 1806b012b4ceSOphir Munk .mac_addr_add = mlx5_mac_addr_add, 1807b012b4ceSOphir Munk .mac_addr_set = mlx5_mac_addr_set, 1808b012b4ceSOphir Munk .set_mc_addr_list = mlx5_set_mc_addr_list, 1809b012b4ceSOphir Munk .mtu_set = mlx5_dev_set_mtu, 1810b012b4ceSOphir Munk .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 1811b012b4ceSOphir Munk .vlan_offload_set = mlx5_vlan_offload_set, 1812fb7ad441SThomas Monjalon .flow_ops_get = mlx5_flow_ops_get, 1813b012b4ceSOphir Munk .rxq_info_get = mlx5_rxq_info_get, 1814b012b4ceSOphir Munk .txq_info_get = mlx5_txq_info_get, 1815b012b4ceSOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 1816b012b4ceSOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 1817b012b4ceSOphir Munk .rx_queue_intr_enable = mlx5_rx_intr_enable, 1818b012b4ceSOphir Munk .rx_queue_intr_disable = mlx5_rx_intr_disable, 1819b012b4ceSOphir Munk .is_removed = mlx5_is_removed, 1820b012b4ceSOphir Munk .get_module_info = mlx5_get_module_info, 1821b012b4ceSOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 1822b012b4ceSOphir Munk .hairpin_cap_get = mlx5_hairpin_cap_get, 1823b012b4ceSOphir Munk .mtr_ops_get = mlx5_flow_meter_ops_get, 1824b012b4ceSOphir Munk .hairpin_bind = mlx5_hairpin_bind, 1825b012b4ceSOphir Munk .hairpin_unbind = mlx5_hairpin_unbind, 1826b012b4ceSOphir Munk .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, 1827b012b4ceSOphir Munk .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, 1828b012b4ceSOphir Munk .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, 1829b012b4ceSOphir Munk .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, 1830a8f0df6bSAlexander Kozyrev .get_monitor_addr = mlx5_get_monitor_addr, 1831b012b4ceSOphir Munk }; 1832b012b4ceSOphir Munk 1833e72dd09bSNélio Laranjeiro /** 1834e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 1835e72dd09bSNélio Laranjeiro * 1836e72dd09bSNélio Laranjeiro * @param[in] key 1837e72dd09bSNélio Laranjeiro * Key argument to verify. 1838e72dd09bSNélio Laranjeiro * @param[in] val 1839e72dd09bSNélio Laranjeiro * Value associated with key. 1840e72dd09bSNélio Laranjeiro * @param opaque 1841e72dd09bSNélio Laranjeiro * User data. 1842e72dd09bSNélio Laranjeiro * 1843e72dd09bSNélio Laranjeiro * @return 1844a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1845e72dd09bSNélio Laranjeiro */ 1846e72dd09bSNélio Laranjeiro static int 1847e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 1848e72dd09bSNélio Laranjeiro { 18497fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 18508f848f32SViacheslav Ovsiienko unsigned long mod; 18518f848f32SViacheslav Ovsiienko signed long tmp; 1852e72dd09bSNélio Laranjeiro 18536de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 185485209924SMichael Baum if (!strcmp(MLX5_DRIVER_KEY, key) || !strcmp(MLX5_REPRESENTOR, key) || 185585209924SMichael Baum !strcmp(MLX5_SYS_MEM_EN, key) || !strcmp(MLX5_TX_DB_NC, key) || 185685209924SMichael Baum !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) || 185785209924SMichael Baum !strcmp(MLX5_MR_EXT_MEMSEG_EN, key)) 18586de569f5SAdrien Mazarguil return 0; 185999c12dccSNélio Laranjeiro errno = 0; 18608f848f32SViacheslav Ovsiienko tmp = strtol(val, NULL, 0); 186199c12dccSNélio Laranjeiro if (errno) { 1862a6d83b6aSNélio Laranjeiro rte_errno = errno; 1863a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1864a6d83b6aSNélio Laranjeiro return -rte_errno; 186599c12dccSNélio Laranjeiro } 18668f848f32SViacheslav Ovsiienko if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) { 18678f848f32SViacheslav Ovsiienko /* Negative values are acceptable for some keys only. */ 18688f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 18698f848f32SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val); 18708f848f32SViacheslav Ovsiienko return -rte_errno; 18718f848f32SViacheslav Ovsiienko } 18728f848f32SViacheslav Ovsiienko mod = tmp >= 0 ? tmp : -tmp; 187399c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 187454c2d46bSAlexander Kozyrev if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) { 187554c2d46bSAlexander Kozyrev DRV_LOG(ERR, "invalid CQE compression " 187654c2d46bSAlexander Kozyrev "format parameter"); 187754c2d46bSAlexander Kozyrev rte_errno = EINVAL; 187854c2d46bSAlexander Kozyrev return -rte_errno; 187954c2d46bSAlexander Kozyrev } 18807fe24446SShahaf Shuler config->cqe_comp = !!tmp; 188154c2d46bSAlexander Kozyrev config->cqe_comp_fmt = tmp; 188278c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 188378c7a16dSYongseok Koh config->hw_padding = !!tmp; 18847d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 18857d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 18867d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 18870947ed38SMichael Baum config->mprq.log_stride_num = tmp; 1888ecb16045SAlexander Kozyrev } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { 18890947ed38SMichael Baum config->mprq.log_stride_size = tmp; 18907d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 18917d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 18927d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 18937d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 18942a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1895505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1896505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1897505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1898505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1899505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1900505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1901505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1902505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1903505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 19042a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 19057fe24446SShahaf Shuler config->txqs_inline = tmp; 190609d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1907a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1908230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1909f9de8718SShahaf Shuler config->mps = !!tmp; 19106ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1911a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 19126ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1913505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1914505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1915505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 19165644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1917a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 19188f848f32SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_PP, key) == 0) { 19198f848f32SViacheslav Ovsiienko if (!mod) { 19208f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Zero Tx packet pacing parameter"); 19218f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 19228f848f32SViacheslav Ovsiienko return -rte_errno; 19238f848f32SViacheslav Ovsiienko } 19248f848f32SViacheslav Ovsiienko config->tx_pp = tmp; 19258f848f32SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_SKEW, key) == 0) { 19268f848f32SViacheslav Ovsiienko config->tx_skew = tmp; 19275644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 19287fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 192978a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 193078a54648SXueming Li config->l3_vxlan_en = !!tmp; 1931db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1932db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1933e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1934e2b4925eSOri Kam config->dv_esw_en = !!tmp; 193551e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 193651e72d38SOri Kam config->dv_flow_en = !!tmp; 19372d241515SViacheslav Ovsiienko } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { 19382d241515SViacheslav Ovsiienko if (tmp != MLX5_XMETA_MODE_LEGACY && 19392d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META16 && 19404ec6360dSGregory Etelson tmp != MLX5_XMETA_MODE_META32 && 19414ec6360dSGregory Etelson tmp != MLX5_XMETA_MODE_MISS_INFO) { 1942f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid extensive " 19432d241515SViacheslav Ovsiienko "metadata parameter"); 19442d241515SViacheslav Ovsiienko rte_errno = EINVAL; 19452d241515SViacheslav Ovsiienko return -rte_errno; 19462d241515SViacheslav Ovsiienko } 19474ec6360dSGregory Etelson if (tmp != MLX5_XMETA_MODE_MISS_INFO) 19482d241515SViacheslav Ovsiienko config->dv_xmeta_en = tmp; 19494ec6360dSGregory Etelson else 19504ec6360dSGregory Etelson config->dv_miss_info = 1; 19510f0ae73aSShiri Kuzin } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) { 19520f0ae73aSShiri Kuzin config->lacp_by_user = !!tmp; 1953066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1954066cfecdSMatan Azrad config->max_dump_files_num = tmp; 195521bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 195621bb6c7eSDekel Peled config->lro.timeout = tmp; 195735d4f17bSXueming Li } else if (strcmp(RTE_DEVARGS_KEY_CLASS, key) == 0) { 1958d768f324SMatan Azrad DRV_LOG(DEBUG, "class argument is %s.", val); 19591ad9a3d0SBing Zhao } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { 19601ad9a3d0SBing Zhao config->log_hp_size = tmp; 1961a1da6f62SSuanming Mou } else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) { 1962a1da6f62SSuanming Mou if (tmp != MLX5_RCM_NONE && 1963a1da6f62SSuanming Mou tmp != MLX5_RCM_LIGHT && 1964a1da6f62SSuanming Mou tmp != MLX5_RCM_AGGR) { 19657be78d02SJosh Soref DRV_LOG(ERR, "Unrecognized %s: \"%s\"", key, val); 1966a1da6f62SSuanming Mou rte_errno = EINVAL; 1967a1da6f62SSuanming Mou return -rte_errno; 1968a1da6f62SSuanming Mou } 1969a1da6f62SSuanming Mou config->reclaim_mode = tmp; 197050f95b23SSuanming Mou } else if (strcmp(MLX5_DECAP_EN, key) == 0) { 197150f95b23SSuanming Mou config->decap_en = !!tmp; 1972e39226bdSJiawei Wang } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) { 1973e39226bdSJiawei Wang config->allow_duplicate_pattern = !!tmp; 1974febcac7bSBing Zhao } else if (strcmp(MLX5_DELAY_DROP, key) == 0) { 1975ce78c518SBing Zhao config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD); 1976ce78c518SBing Zhao config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN); 197799c12dccSNélio Laranjeiro } else { 19788648fa2fSMichael Baum DRV_LOG(WARNING, 19798648fa2fSMichael Baum "%s: unknown parameter, maybe it's for another class.", 19808648fa2fSMichael Baum key); 1981e72dd09bSNélio Laranjeiro } 198299c12dccSNélio Laranjeiro return 0; 198399c12dccSNélio Laranjeiro } 1984e72dd09bSNélio Laranjeiro 1985e72dd09bSNélio Laranjeiro /** 1986e72dd09bSNélio Laranjeiro * Parse device parameters. 1987e72dd09bSNélio Laranjeiro * 19887fe24446SShahaf Shuler * @param config 19897fe24446SShahaf Shuler * Pointer to device configuration structure. 1990e72dd09bSNélio Laranjeiro * @param devargs 1991e72dd09bSNélio Laranjeiro * Device arguments structure. 1992e72dd09bSNélio Laranjeiro * 1993e72dd09bSNélio Laranjeiro * @return 1994a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1995e72dd09bSNélio Laranjeiro */ 19962eb4d010SOphir Munk int 19977fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1998e72dd09bSNélio Laranjeiro { 1999e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 2000e72dd09bSNélio Laranjeiro int ret = 0; 2001e72dd09bSNélio Laranjeiro 2002e72dd09bSNélio Laranjeiro if (devargs == NULL) 2003e72dd09bSNélio Laranjeiro return 0; 2004e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 20058648fa2fSMichael Baum kvlist = rte_kvargs_parse(devargs->args, NULL); 200615b0ea00SMatan Azrad if (kvlist == NULL) { 200715b0ea00SMatan Azrad rte_errno = EINVAL; 200815b0ea00SMatan Azrad return -rte_errno; 200915b0ea00SMatan Azrad } 2010e72dd09bSNélio Laranjeiro /* Process parameters. */ 20118648fa2fSMichael Baum ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config); 2012a6d83b6aSNélio Laranjeiro if (ret) { 2013a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 20148648fa2fSMichael Baum ret = -rte_errno; 2015a67323e4SShahaf Shuler } 2016e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 20178648fa2fSMichael Baum return ret; 2018e72dd09bSNélio Laranjeiro } 2019e72dd09bSNélio Laranjeiro 20207be600c8SYongseok Koh /** 202138b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 202238b4b397SViacheslav Ovsiienko * while sending packets. 202338b4b397SViacheslav Ovsiienko * 202438b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 202538b4b397SViacheslav Ovsiienko * key is specified in devargs 202638b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 202738b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 2028ee76bddcSThomas Monjalon * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx 202938b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 203038b4b397SViacheslav Ovsiienko * 203138b4b397SViacheslav Ovsiienko * @param spawn 203238b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 203338b4b397SViacheslav Ovsiienko * @param config 203438b4b397SViacheslav Ovsiienko * Device configuration parameters. 203538b4b397SViacheslav Ovsiienko */ 20362eb4d010SOphir Munk void 203738b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 203838b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 203938b4b397SViacheslav Ovsiienko { 204038b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 204138b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 204256bb3c84SXueming Li if (spawn->pci_dev != NULL) { 204338b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 204438b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 204538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 204638b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 204738b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 204838b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 204956bb3c84SXueming Li "txq_inline_mix aligned to minimal ConnectX-4 required value %d", 205038b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 205156bb3c84SXueming Li config->txq_inline_min = 205256bb3c84SXueming Li MLX5_INLINE_HSIZE_L2; 205338b4b397SViacheslav Ovsiienko } 205438b4b397SViacheslav Ovsiienko break; 205538b4b397SViacheslav Ovsiienko } 205656bb3c84SXueming Li } 205738b4b397SViacheslav Ovsiienko goto exit; 205838b4b397SViacheslav Ovsiienko } 205938b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 206038b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 206138b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 206238b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 206338b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 206438b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 206538b4b397SViacheslav Ovsiienko goto exit; 206638b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 206738b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 206838b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 206938b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 207038b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 207138b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 207238b4b397SViacheslav Ovsiienko goto exit; 207338b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 207438b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 207538b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 207638b4b397SViacheslav Ovsiienko break; 207738b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 207838b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 207938b4b397SViacheslav Ovsiienko config->txq_inline_min = 208038b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 208138b4b397SViacheslav Ovsiienko goto exit; 208238b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 208338b4b397SViacheslav Ovsiienko config->txq_inline_min = 208438b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 208538b4b397SViacheslav Ovsiienko goto exit; 208638b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 208738b4b397SViacheslav Ovsiienko config->txq_inline_min = 208838b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 208938b4b397SViacheslav Ovsiienko goto exit; 209038b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 209138b4b397SViacheslav Ovsiienko config->txq_inline_min = 209238b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 209338b4b397SViacheslav Ovsiienko goto exit; 209438b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 209538b4b397SViacheslav Ovsiienko config->txq_inline_min = 209638b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 209738b4b397SViacheslav Ovsiienko goto exit; 209838b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 209938b4b397SViacheslav Ovsiienko config->txq_inline_min = 210038b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 210138b4b397SViacheslav Ovsiienko goto exit; 210238b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 210338b4b397SViacheslav Ovsiienko config->txq_inline_min = 210438b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 210538b4b397SViacheslav Ovsiienko goto exit; 210638b4b397SViacheslav Ovsiienko } 210738b4b397SViacheslav Ovsiienko } 210838b4b397SViacheslav Ovsiienko } 210956bb3c84SXueming Li if (spawn->pci_dev == NULL) { 211056bb3c84SXueming Li config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 211156bb3c84SXueming Li goto exit; 211256bb3c84SXueming Li } 211338b4b397SViacheslav Ovsiienko /* 211438b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 211538b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 211638b4b397SViacheslav Ovsiienko * to determine old NICs. 211738b4b397SViacheslav Ovsiienko */ 211838b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 211938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 212038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 212138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 212238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 2123614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 212438b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 212538b4b397SViacheslav Ovsiienko break; 212638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 212738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 212838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 212938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 213038b4b397SViacheslav Ovsiienko /* 213138b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 213238b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 213338b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 213438b4b397SViacheslav Ovsiienko */ 213538b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 213620215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 213738b4b397SViacheslav Ovsiienko break; 213838b4b397SViacheslav Ovsiienko default: 213938b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 214038b4b397SViacheslav Ovsiienko break; 214138b4b397SViacheslav Ovsiienko } 214238b4b397SViacheslav Ovsiienko exit: 214338b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 214438b4b397SViacheslav Ovsiienko } 214538b4b397SViacheslav Ovsiienko 214638b4b397SViacheslav Ovsiienko /** 214739139371SViacheslav Ovsiienko * Configures the metadata mask fields in the shared context. 214839139371SViacheslav Ovsiienko * 214939139371SViacheslav Ovsiienko * @param [in] dev 215039139371SViacheslav Ovsiienko * Pointer to Ethernet device. 215139139371SViacheslav Ovsiienko */ 21522eb4d010SOphir Munk void 215339139371SViacheslav Ovsiienko mlx5_set_metadata_mask(struct rte_eth_dev *dev) 215439139371SViacheslav Ovsiienko { 215539139371SViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 21566e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 215739139371SViacheslav Ovsiienko uint32_t meta, mark, reg_c0; 215839139371SViacheslav Ovsiienko 215939139371SViacheslav Ovsiienko reg_c0 = ~priv->vport_meta_mask; 216039139371SViacheslav Ovsiienko switch (priv->config.dv_xmeta_en) { 216139139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_LEGACY: 216239139371SViacheslav Ovsiienko meta = UINT32_MAX; 216339139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 216439139371SViacheslav Ovsiienko break; 216539139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META16: 216639139371SViacheslav Ovsiienko meta = reg_c0 >> rte_bsf32(reg_c0); 216739139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 216839139371SViacheslav Ovsiienko break; 216939139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META32: 217039139371SViacheslav Ovsiienko meta = UINT32_MAX; 217139139371SViacheslav Ovsiienko mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK; 217239139371SViacheslav Ovsiienko break; 217339139371SViacheslav Ovsiienko default: 217439139371SViacheslav Ovsiienko meta = 0; 217539139371SViacheslav Ovsiienko mark = 0; 21768e46d4e1SAlexander Kozyrev MLX5_ASSERT(false); 217739139371SViacheslav Ovsiienko break; 217839139371SViacheslav Ovsiienko } 217939139371SViacheslav Ovsiienko if (sh->dv_mark_mask && sh->dv_mark_mask != mark) 21807be78d02SJosh Soref DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X", 218139139371SViacheslav Ovsiienko sh->dv_mark_mask, mark); 218239139371SViacheslav Ovsiienko else 218339139371SViacheslav Ovsiienko sh->dv_mark_mask = mark; 218439139371SViacheslav Ovsiienko if (sh->dv_meta_mask && sh->dv_meta_mask != meta) 21857be78d02SJosh Soref DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X", 218639139371SViacheslav Ovsiienko sh->dv_meta_mask, meta); 218739139371SViacheslav Ovsiienko else 218839139371SViacheslav Ovsiienko sh->dv_meta_mask = meta; 218939139371SViacheslav Ovsiienko if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) 21907be78d02SJosh Soref DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X", 219139139371SViacheslav Ovsiienko sh->dv_meta_mask, reg_c0); 219239139371SViacheslav Ovsiienko else 219339139371SViacheslav Ovsiienko sh->dv_regc0_mask = reg_c0; 219439139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en); 219539139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask); 219639139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask); 219739139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask); 219839139371SViacheslav Ovsiienko } 219939139371SViacheslav Ovsiienko 2200efa79e68SOri Kam int 2201efa79e68SOri Kam rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) 2202efa79e68SOri Kam { 2203efa79e68SOri Kam static const char *const dynf_names[] = { 2204efa79e68SOri Kam RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, 22058f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_METADATA_NAME, 22068f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME 2207efa79e68SOri Kam }; 2208efa79e68SOri Kam unsigned int i; 2209efa79e68SOri Kam 2210efa79e68SOri Kam if (n < RTE_DIM(dynf_names)) 2211efa79e68SOri Kam return -ENOMEM; 2212efa79e68SOri Kam for (i = 0; i < RTE_DIM(dynf_names); i++) { 2213efa79e68SOri Kam if (names[i] == NULL) 2214efa79e68SOri Kam return -EINVAL; 2215efa79e68SOri Kam strcpy(names[i], dynf_names[i]); 2216efa79e68SOri Kam } 2217efa79e68SOri Kam return RTE_DIM(dynf_names); 2218efa79e68SOri Kam } 2219efa79e68SOri Kam 222021cae858SDekel Peled /** 22212eb4d010SOphir Munk * Comparison callback to sort device data. 222292d5dd48SViacheslav Ovsiienko * 22232eb4d010SOphir Munk * This is meant to be used with qsort(). 222492d5dd48SViacheslav Ovsiienko * 22252eb4d010SOphir Munk * @param a[in] 22262eb4d010SOphir Munk * Pointer to pointer to first data object. 22272eb4d010SOphir Munk * @param b[in] 22282eb4d010SOphir Munk * Pointer to pointer to second data object. 222992d5dd48SViacheslav Ovsiienko * 223092d5dd48SViacheslav Ovsiienko * @return 22312eb4d010SOphir Munk * 0 if both objects are equal, less than 0 if the first argument is less 22322eb4d010SOphir Munk * than the second, greater than 0 otherwise. 223392d5dd48SViacheslav Ovsiienko */ 22342eb4d010SOphir Munk int 223592d5dd48SViacheslav Ovsiienko mlx5_dev_check_sibling_config(struct mlx5_priv *priv, 2236e9d420dfSGregory Etelson struct mlx5_dev_config *config, 2237e9d420dfSGregory Etelson struct rte_device *dpdk_dev) 223892d5dd48SViacheslav Ovsiienko { 22396e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 224092d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *sh_conf = NULL; 224192d5dd48SViacheslav Ovsiienko uint16_t port_id; 224292d5dd48SViacheslav Ovsiienko 22438e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 224492d5dd48SViacheslav Ovsiienko /* Nothing to compare for the single/first device. */ 224592d5dd48SViacheslav Ovsiienko if (sh->refcnt == 1) 224692d5dd48SViacheslav Ovsiienko return 0; 224792d5dd48SViacheslav Ovsiienko /* Find the device with shared context. */ 2248e9d420dfSGregory Etelson MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) { 224992d5dd48SViacheslav Ovsiienko struct mlx5_priv *opriv = 225092d5dd48SViacheslav Ovsiienko rte_eth_devices[port_id].data->dev_private; 225192d5dd48SViacheslav Ovsiienko 225292d5dd48SViacheslav Ovsiienko if (opriv && opriv != priv && opriv->sh == sh) { 225392d5dd48SViacheslav Ovsiienko sh_conf = &opriv->config; 225492d5dd48SViacheslav Ovsiienko break; 225592d5dd48SViacheslav Ovsiienko } 225692d5dd48SViacheslav Ovsiienko } 225792d5dd48SViacheslav Ovsiienko if (!sh_conf) 225892d5dd48SViacheslav Ovsiienko return 0; 225992d5dd48SViacheslav Ovsiienko if (sh_conf->dv_flow_en ^ config->dv_flow_en) { 226092d5dd48SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch" 226192d5dd48SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 226292d5dd48SViacheslav Ovsiienko rte_errno = EINVAL; 226392d5dd48SViacheslav Ovsiienko return rte_errno; 226492d5dd48SViacheslav Ovsiienko } 22652d241515SViacheslav Ovsiienko if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) { 22662d241515SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch" 22672d241515SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 22682d241515SViacheslav Ovsiienko rte_errno = EINVAL; 22692d241515SViacheslav Ovsiienko return rte_errno; 22702d241515SViacheslav Ovsiienko } 227192d5dd48SViacheslav Ovsiienko return 0; 227292d5dd48SViacheslav Ovsiienko } 2273771fa900SAdrien Mazarguil 2274fbc83412SViacheslav Ovsiienko /** 2275fbc83412SViacheslav Ovsiienko * Look for the ethernet device belonging to mlx5 driver. 2276fbc83412SViacheslav Ovsiienko * 2277fbc83412SViacheslav Ovsiienko * @param[in] port_id 2278fbc83412SViacheslav Ovsiienko * port_id to start looking for device. 227956bb3c84SXueming Li * @param[in] odev 228056bb3c84SXueming Li * Pointer to the hint device. When device is being probed 2281fbc83412SViacheslav Ovsiienko * the its siblings (master and preceding representors might 22822eb4d010SOphir Munk * not have assigned driver yet (because the mlx5_os_pci_probe() 228356bb3c84SXueming Li * is not completed yet, for this case match on hint 2284fbc83412SViacheslav Ovsiienko * device may be used to detect sibling device. 2285fbc83412SViacheslav Ovsiienko * 2286fbc83412SViacheslav Ovsiienko * @return 2287fbc83412SViacheslav Ovsiienko * port_id of found device, RTE_MAX_ETHPORT if not found. 2288fbc83412SViacheslav Ovsiienko */ 2289f7e95215SViacheslav Ovsiienko uint16_t 229056bb3c84SXueming Li mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev) 2291f7e95215SViacheslav Ovsiienko { 2292f7e95215SViacheslav Ovsiienko while (port_id < RTE_MAX_ETHPORTS) { 2293f7e95215SViacheslav Ovsiienko struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2294f7e95215SViacheslav Ovsiienko 2295f7e95215SViacheslav Ovsiienko if (dev->state != RTE_ETH_DEV_UNUSED && 2296f7e95215SViacheslav Ovsiienko dev->device && 229756bb3c84SXueming Li (dev->device == odev || 2298fbc83412SViacheslav Ovsiienko (dev->device->driver && 2299f7e95215SViacheslav Ovsiienko dev->device->driver->name && 2300919488fbSXueming Li ((strcmp(dev->device->driver->name, 2301919488fbSXueming Li MLX5_PCI_DRIVER_NAME) == 0) || 2302919488fbSXueming Li (strcmp(dev->device->driver->name, 2303919488fbSXueming Li MLX5_AUXILIARY_DRIVER_NAME) == 0))))) 2304f7e95215SViacheslav Ovsiienko break; 2305f7e95215SViacheslav Ovsiienko port_id++; 2306f7e95215SViacheslav Ovsiienko } 2307f7e95215SViacheslav Ovsiienko if (port_id >= RTE_MAX_ETHPORTS) 2308f7e95215SViacheslav Ovsiienko return RTE_MAX_ETHPORTS; 2309f7e95215SViacheslav Ovsiienko return port_id; 2310f7e95215SViacheslav Ovsiienko } 2311f7e95215SViacheslav Ovsiienko 23123a820742SOphir Munk /** 2313a7f34989SXueming Li * Callback to remove a device. 23143a820742SOphir Munk * 2315a7f34989SXueming Li * This function removes all Ethernet devices belong to a given device. 23163a820742SOphir Munk * 23177af08c8fSMichael Baum * @param[in] cdev 2318a7f34989SXueming Li * Pointer to the generic device. 23193a820742SOphir Munk * 23203a820742SOphir Munk * @return 23213a820742SOphir Munk * 0 on success, the function cannot fail. 23223a820742SOphir Munk */ 23236856efa5SMichael Baum int 23247af08c8fSMichael Baum mlx5_net_remove(struct mlx5_common_device *cdev) 23253a820742SOphir Munk { 23263a820742SOphir Munk uint16_t port_id; 23278a5a0aadSThomas Monjalon int ret = 0; 23283a820742SOphir Munk 23297af08c8fSMichael Baum RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) { 23302786b7bfSSuanming Mou /* 23312786b7bfSSuanming Mou * mlx5_dev_close() is not registered to secondary process, 23322786b7bfSSuanming Mou * call the close function explicitly for secondary process. 23332786b7bfSSuanming Mou */ 23342786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) 23358a5a0aadSThomas Monjalon ret |= mlx5_dev_close(&rte_eth_devices[port_id]); 23362786b7bfSSuanming Mou else 23378a5a0aadSThomas Monjalon ret |= rte_eth_dev_close(port_id); 23382786b7bfSSuanming Mou } 23398a5a0aadSThomas Monjalon return ret == 0 ? 0 : -EIO; 23403a820742SOphir Munk } 23413a820742SOphir Munk 2342771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 2343771fa900SAdrien Mazarguil { 23441d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 23451d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 2346771fa900SAdrien Mazarguil }, 2347771fa900SAdrien Mazarguil { 23481d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 23491d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 2350771fa900SAdrien Mazarguil }, 2351771fa900SAdrien Mazarguil { 23521d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 23531d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 2354771fa900SAdrien Mazarguil }, 2355771fa900SAdrien Mazarguil { 23561d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 23571d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 2358771fa900SAdrien Mazarguil }, 2359771fa900SAdrien Mazarguil { 2360528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2361528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 2362528a9fbeSYongseok Koh }, 2363528a9fbeSYongseok Koh { 2364528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2365528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 2366528a9fbeSYongseok Koh }, 2367528a9fbeSYongseok Koh { 2368528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2369528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 2370528a9fbeSYongseok Koh }, 2371528a9fbeSYongseok Koh { 2372528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2373528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 2374528a9fbeSYongseok Koh }, 2375528a9fbeSYongseok Koh { 2376dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2377dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 2378dd3331c6SShahaf Shuler }, 2379dd3331c6SShahaf Shuler { 2380c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2381c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 2382c322c0e5SOri Kam }, 2383c322c0e5SOri Kam { 2384f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2385f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 2386f0354d84SWisam Jaddo }, 2387f0354d84SWisam Jaddo { 2388f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2389f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 2390f0354d84SWisam Jaddo }, 2391f0354d84SWisam Jaddo { 23925fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 23935fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DX) 23945fc66630SRaslan Darawsheh }, 23955fc66630SRaslan Darawsheh { 23965fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 23973ea12cadSRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTXVF) 23985fc66630SRaslan Darawsheh }, 23995fc66630SRaslan Darawsheh { 240058b4a2b1SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 240158b4a2b1SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 240258b4a2b1SRaslan Darawsheh }, 240358b4a2b1SRaslan Darawsheh { 240428c9a7d7SAli Alnubani RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 240528c9a7d7SAli Alnubani PCI_DEVICE_ID_MELLANOX_CONNECTX6LX) 240628c9a7d7SAli Alnubani }, 240728c9a7d7SAli Alnubani { 24086ca37b06SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 24096ca37b06SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX7) 24106ca37b06SRaslan Darawsheh }, 24116ca37b06SRaslan Darawsheh { 24126ca37b06SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 24136ca37b06SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX7BF) 24146ca37b06SRaslan Darawsheh }, 24156ca37b06SRaslan Darawsheh { 2416771fa900SAdrien Mazarguil .vendor_id = 0 2417771fa900SAdrien Mazarguil } 2418771fa900SAdrien Mazarguil }; 2419771fa900SAdrien Mazarguil 2420a7f34989SXueming Li static struct mlx5_class_driver mlx5_net_driver = { 2421a7f34989SXueming Li .drv_class = MLX5_CLASS_ETH, 2422a7f34989SXueming Li .name = RTE_STR(MLX5_ETH_DRIVER_NAME), 2423771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 2424a7f34989SXueming Li .probe = mlx5_os_net_probe, 2425a7f34989SXueming Li .remove = mlx5_net_remove, 2426a7f34989SXueming Li .probe_again = 1, 2427a7f34989SXueming Li .intr_lsc = 1, 2428a7f34989SXueming Li .intr_rmv = 1, 2429771fa900SAdrien Mazarguil }; 2430771fa900SAdrien Mazarguil 24319c99878aSJerin Jacob /* Initialize driver log type. */ 2432eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE) 24339c99878aSJerin Jacob 2434771fa900SAdrien Mazarguil /** 2435771fa900SAdrien Mazarguil * Driver initialization routine. 2436771fa900SAdrien Mazarguil */ 2437f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 2438771fa900SAdrien Mazarguil { 2439ef65067cSTal Shnaiderman pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL); 244082088001SParav Pandit mlx5_common_init(); 24415f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 2442ea16068cSYongseok Koh mlx5_set_ptype_table(); 24435f8ba81cSXueming Li mlx5_set_cksum_table(); 24445f8ba81cSXueming Li mlx5_set_swp_types_table(); 24457b4f1e6bSMatan Azrad if (mlx5_glue) 2446a7f34989SXueming Li mlx5_class_driver_register(&mlx5_net_driver); 2447771fa900SAdrien Mazarguil } 2448771fa900SAdrien Mazarguil 2449a7f34989SXueming Li RTE_PMD_EXPORT_NAME(MLX5_ETH_DRIVER_NAME, __COUNTER__); 2450a7f34989SXueming Li RTE_PMD_REGISTER_PCI_TABLE(MLX5_ETH_DRIVER_NAME, mlx5_pci_id_map); 2451a7f34989SXueming Li RTE_PMD_REGISTER_KMOD_DEP(MLX5_ETH_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib"); 2452