18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <stdint.h> 10771fa900SAdrien Mazarguil #include <stdlib.h> 11e72dd09bSNélio Laranjeiro #include <errno.h> 12771fa900SAdrien Mazarguil #include <net/if.h> 134a984153SXueming Li #include <sys/mman.h> 14ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 15771fa900SAdrien Mazarguil 16771fa900SAdrien Mazarguil /* Verbs header. */ 17771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 18771fa900SAdrien Mazarguil #ifdef PEDANTIC 19fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 20771fa900SAdrien Mazarguil #endif 21771fa900SAdrien Mazarguil #include <infiniband/verbs.h> 22771fa900SAdrien Mazarguil #ifdef PEDANTIC 23fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 24771fa900SAdrien Mazarguil #endif 25771fa900SAdrien Mazarguil 26771fa900SAdrien Mazarguil #include <rte_malloc.h> 27ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 28fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 29771fa900SAdrien Mazarguil #include <rte_pci.h> 30c752998bSGaetan Rivet #include <rte_bus_pci.h> 31771fa900SAdrien Mazarguil #include <rte_common.h> 32e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 33e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 34e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 35f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 36f15db67dSMatan Azrad #include <rte_alarm.h> 37771fa900SAdrien Mazarguil 387b4f1e6bSMatan Azrad #include <mlx5_glue.h> 397b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h> 4093e30982SMatan Azrad #include <mlx5_common.h> 41391b8bccSOphir Munk #include <mlx5_common_os.h> 42a4de9586SVu Pham #include <mlx5_common_mp.h> 437b4f1e6bSMatan Azrad 447b4f1e6bSMatan Azrad #include "mlx5_defs.h" 45771fa900SAdrien Mazarguil #include "mlx5.h" 46771fa900SAdrien Mazarguil #include "mlx5_utils.h" 472e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 48771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 49974f1e7eSYongseok Koh #include "mlx5_mr.h" 5084c406e7SOri Kam #include "mlx5_flow.h" 51efa79e68SOri Kam #include "rte_pmd_mlx5.h" 52771fa900SAdrien Mazarguil 5399c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5499c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5599c12dccSNélio Laranjeiro 56bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 57bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 58bc91e8dbSYongseok Koh 5978c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 6078c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 6178c7a16dSYongseok Koh 627d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 637d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 647d6bf6b8SYongseok Koh 657d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 667d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 677d6bf6b8SYongseok Koh 68ecb16045SAlexander Kozyrev /* Device parameter to configure log 2 of the stride size for MPRQ. */ 69ecb16045SAlexander Kozyrev #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size" 70ecb16045SAlexander Kozyrev 717d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 727d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 737d6bf6b8SYongseok Koh 747d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 757d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 767d6bf6b8SYongseok Koh 77a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 782a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 792a66cf37SYaacov Hazan 80505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 81505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 82505f1fe4SViacheslav Ovsiienko 83505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 84505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 85505f1fe4SViacheslav Ovsiienko 86505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 87505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 88505f1fe4SViacheslav Ovsiienko 892a66cf37SYaacov Hazan /* 902a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 912a66cf37SYaacov Hazan * enabling inline send. 922a66cf37SYaacov Hazan */ 932a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 942a66cf37SYaacov Hazan 9509d8b416SYongseok Koh /* 9609d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 97a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9809d8b416SYongseok Koh */ 9909d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 10009d8b416SYongseok Koh 101230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 102230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 103230189d9SNélio Laranjeiro 104a6bd4911SViacheslav Ovsiienko /* 1058409a285SViacheslav Ovsiienko * Device parameter to force doorbell register mapping 1068409a285SViacheslav Ovsiienko * to non-cahed region eliminating the extra write memory barrier. 1078409a285SViacheslav Ovsiienko */ 1088409a285SViacheslav Ovsiienko #define MLX5_TX_DB_NC "tx_db_nc" 1098409a285SViacheslav Ovsiienko 1108409a285SViacheslav Ovsiienko /* 111a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 112a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 113a6bd4911SViacheslav Ovsiienko */ 1146ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1156ce84bd8SYongseok Koh 116a6bd4911SViacheslav Ovsiienko /* 117a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 118a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 119a6bd4911SViacheslav Ovsiienko */ 1206ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1216ce84bd8SYongseok Koh 122a6bd4911SViacheslav Ovsiienko /* 1238f848f32SViacheslav Ovsiienko * Device parameter to enable Tx scheduling on timestamps 1248f848f32SViacheslav Ovsiienko * and specify the packet pacing granularity in nanoseconds. 1258f848f32SViacheslav Ovsiienko */ 1268f848f32SViacheslav Ovsiienko #define MLX5_TX_PP "tx_pp" 1278f848f32SViacheslav Ovsiienko 1288f848f32SViacheslav Ovsiienko /* 1298f848f32SViacheslav Ovsiienko * Device parameter to specify skew in nanoseconds on Tx datapath, 1308f848f32SViacheslav Ovsiienko * it represents the time between SQ start WQE processing and 1318f848f32SViacheslav Ovsiienko * appearing actual packet data on the wire. 1328f848f32SViacheslav Ovsiienko */ 1338f848f32SViacheslav Ovsiienko #define MLX5_TX_SKEW "tx_skew" 1348f848f32SViacheslav Ovsiienko 1358f848f32SViacheslav Ovsiienko /* 136a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 137a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 138a6bd4911SViacheslav Ovsiienko */ 1395644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1405644d5b9SNelio Laranjeiro 1415644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1425644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1435644d5b9SNelio Laranjeiro 14478a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 14578a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 14678a54648SXueming Li 147e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 148e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 149e2b4925eSOri Kam 15051e72d38SOri Kam /* Activate DV flow steering. */ 15151e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 15251e72d38SOri Kam 1532d241515SViacheslav Ovsiienko /* Enable extensive flow metadata support. */ 1542d241515SViacheslav Ovsiienko #define MLX5_DV_XMETA_EN "dv_xmeta_en" 1552d241515SViacheslav Ovsiienko 1560f0ae73aSShiri Kuzin /* Device parameter to let the user manage the lacp traffic of bonded device */ 1570f0ae73aSShiri Kuzin #define MLX5_LACP_BY_USER "lacp_by_user" 1580f0ae73aSShiri Kuzin 159db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 160db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 161db209cc3SNélio Laranjeiro 162dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 163dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 164dceb5029SYongseok Koh 1656de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1666de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1676de569f5SAdrien Mazarguil 168066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 169066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 170066cfecdSMatan Azrad 17121bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 17221bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 17321bb6c7eSDekel Peled 1741ad9a3d0SBing Zhao /* 1751ad9a3d0SBing Zhao * Device parameter to configure the total data buffer size for a single 1761ad9a3d0SBing Zhao * hairpin queue (logarithm value). 1771ad9a3d0SBing Zhao */ 1781ad9a3d0SBing Zhao #define MLX5_HP_BUF_SIZE "hp_buf_log_sz" 1791ad9a3d0SBing Zhao 180a1da6f62SSuanming Mou /* Flow memory reclaim mode. */ 181a1da6f62SSuanming Mou #define MLX5_RECLAIM_MEM "reclaim_mem_mode" 182a1da6f62SSuanming Mou 183974f1e7eSYongseok Koh static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 184974f1e7eSYongseok Koh 185974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 186974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 187974f1e7eSYongseok Koh 188974f1e7eSYongseok Koh /* Spinlock for mlx5_shared_data allocation. */ 189974f1e7eSYongseok Koh static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 190974f1e7eSYongseok Koh 1917be600c8SYongseok Koh /* Process local data for secondary processes. */ 1927be600c8SYongseok Koh static struct mlx5_local_data mlx5_local_data; 193a170a30dSNélio Laranjeiro 19491389890SOphir Munk static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = 19591389890SOphir Munk LIST_HEAD_INITIALIZER(); 19691389890SOphir Munk static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER; 19717e19bc4SViacheslav Ovsiienko 1985c761238SGregory Etelson static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { 199b88341caSSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 200014d1cbeSSuanming Mou { 201014d1cbeSSuanming Mou .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), 202014d1cbeSSuanming Mou .trunk_size = 64, 203014d1cbeSSuanming Mou .grow_trunk = 3, 204014d1cbeSSuanming Mou .grow_shift = 2, 205014d1cbeSSuanming Mou .need_lock = 0, 206014d1cbeSSuanming Mou .release_mem_en = 1, 207014d1cbeSSuanming Mou .malloc = rte_malloc_socket, 208014d1cbeSSuanming Mou .free = rte_free, 209014d1cbeSSuanming Mou .type = "mlx5_encap_decap_ipool", 210014d1cbeSSuanming Mou }, 2118acf8ac9SSuanming Mou { 2128acf8ac9SSuanming Mou .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), 2138acf8ac9SSuanming Mou .trunk_size = 64, 2148acf8ac9SSuanming Mou .grow_trunk = 3, 2158acf8ac9SSuanming Mou .grow_shift = 2, 2168acf8ac9SSuanming Mou .need_lock = 0, 2178acf8ac9SSuanming Mou .release_mem_en = 1, 2188acf8ac9SSuanming Mou .malloc = rte_malloc_socket, 2198acf8ac9SSuanming Mou .free = rte_free, 2208acf8ac9SSuanming Mou .type = "mlx5_push_vlan_ipool", 2218acf8ac9SSuanming Mou }, 2225f114269SSuanming Mou { 2235f114269SSuanming Mou .size = sizeof(struct mlx5_flow_dv_tag_resource), 2245f114269SSuanming Mou .trunk_size = 64, 2255f114269SSuanming Mou .grow_trunk = 3, 2265f114269SSuanming Mou .grow_shift = 2, 2275f114269SSuanming Mou .need_lock = 0, 2285f114269SSuanming Mou .release_mem_en = 1, 2295f114269SSuanming Mou .malloc = rte_malloc_socket, 2305f114269SSuanming Mou .free = rte_free, 2315f114269SSuanming Mou .type = "mlx5_tag_ipool", 2325f114269SSuanming Mou }, 233f3faf9eaSSuanming Mou { 234f3faf9eaSSuanming Mou .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), 235f3faf9eaSSuanming Mou .trunk_size = 64, 236f3faf9eaSSuanming Mou .grow_trunk = 3, 237f3faf9eaSSuanming Mou .grow_shift = 2, 238f3faf9eaSSuanming Mou .need_lock = 0, 239f3faf9eaSSuanming Mou .release_mem_en = 1, 240f3faf9eaSSuanming Mou .malloc = rte_malloc_socket, 241f3faf9eaSSuanming Mou .free = rte_free, 242f3faf9eaSSuanming Mou .type = "mlx5_port_id_ipool", 243f3faf9eaSSuanming Mou }, 2447ac99475SSuanming Mou { 2457ac99475SSuanming Mou .size = sizeof(struct mlx5_flow_tbl_data_entry), 2467ac99475SSuanming Mou .trunk_size = 64, 2477ac99475SSuanming Mou .grow_trunk = 3, 2487ac99475SSuanming Mou .grow_shift = 2, 2497ac99475SSuanming Mou .need_lock = 0, 2507ac99475SSuanming Mou .release_mem_en = 1, 2517ac99475SSuanming Mou .malloc = rte_malloc_socket, 2527ac99475SSuanming Mou .free = rte_free, 2537ac99475SSuanming Mou .type = "mlx5_jump_ipool", 2547ac99475SSuanming Mou }, 255b88341caSSuanming Mou #endif 256772dc0ebSSuanming Mou { 2578638e2b0SSuanming Mou .size = sizeof(struct mlx5_flow_meter), 2588638e2b0SSuanming Mou .trunk_size = 64, 2598638e2b0SSuanming Mou .grow_trunk = 3, 2608638e2b0SSuanming Mou .grow_shift = 2, 2618638e2b0SSuanming Mou .need_lock = 0, 2628638e2b0SSuanming Mou .release_mem_en = 1, 2638638e2b0SSuanming Mou .malloc = rte_malloc_socket, 2648638e2b0SSuanming Mou .free = rte_free, 2658638e2b0SSuanming Mou .type = "mlx5_meter_ipool", 2668638e2b0SSuanming Mou }, 2678638e2b0SSuanming Mou { 26890e6053aSSuanming Mou .size = sizeof(struct mlx5_flow_mreg_copy_resource), 26990e6053aSSuanming Mou .trunk_size = 64, 27090e6053aSSuanming Mou .grow_trunk = 3, 27190e6053aSSuanming Mou .grow_shift = 2, 27290e6053aSSuanming Mou .need_lock = 0, 27390e6053aSSuanming Mou .release_mem_en = 1, 27490e6053aSSuanming Mou .malloc = rte_malloc_socket, 27590e6053aSSuanming Mou .free = rte_free, 27690e6053aSSuanming Mou .type = "mlx5_mcp_ipool", 27790e6053aSSuanming Mou }, 27890e6053aSSuanming Mou { 279772dc0ebSSuanming Mou .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), 280772dc0ebSSuanming Mou .trunk_size = 64, 281772dc0ebSSuanming Mou .grow_trunk = 3, 282772dc0ebSSuanming Mou .grow_shift = 2, 283772dc0ebSSuanming Mou .need_lock = 0, 284772dc0ebSSuanming Mou .release_mem_en = 1, 285772dc0ebSSuanming Mou .malloc = rte_malloc_socket, 286772dc0ebSSuanming Mou .free = rte_free, 287772dc0ebSSuanming Mou .type = "mlx5_hrxq_ipool", 288772dc0ebSSuanming Mou }, 289b88341caSSuanming Mou { 2905c761238SGregory Etelson /* 2915c761238SGregory Etelson * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. 2925c761238SGregory Etelson * It set in run time according to PCI function configuration. 2935c761238SGregory Etelson */ 2945c761238SGregory Etelson .size = 0, 295b88341caSSuanming Mou .trunk_size = 64, 296b88341caSSuanming Mou .grow_trunk = 3, 297b88341caSSuanming Mou .grow_shift = 2, 298b88341caSSuanming Mou .need_lock = 0, 299b88341caSSuanming Mou .release_mem_en = 1, 300b88341caSSuanming Mou .malloc = rte_malloc_socket, 301b88341caSSuanming Mou .free = rte_free, 302b88341caSSuanming Mou .type = "mlx5_flow_handle_ipool", 303b88341caSSuanming Mou }, 304ab612adcSSuanming Mou { 305ab612adcSSuanming Mou .size = sizeof(struct rte_flow), 306ab612adcSSuanming Mou .trunk_size = 4096, 307ab612adcSSuanming Mou .need_lock = 1, 308ab612adcSSuanming Mou .release_mem_en = 1, 309ab612adcSSuanming Mou .malloc = rte_malloc_socket, 310ab612adcSSuanming Mou .free = rte_free, 311ab612adcSSuanming Mou .type = "rte_flow_ipool", 312ab612adcSSuanming Mou }, 313014d1cbeSSuanming Mou }; 314014d1cbeSSuanming Mou 315014d1cbeSSuanming Mou 316830d2091SOri Kam #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 317830d2091SOri Kam #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 318830d2091SOri Kam 319860897d2SBing Zhao #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 320860897d2SBing Zhao 321830d2091SOri Kam /** 322830d2091SOri Kam * Allocate ID pool structure. 323830d2091SOri Kam * 32430a3687dSSuanming Mou * @param[in] max_id 32530a3687dSSuanming Mou * The maximum id can be allocated from the pool. 32630a3687dSSuanming Mou * 327830d2091SOri Kam * @return 328830d2091SOri Kam * Pointer to pool object, NULL value otherwise. 329830d2091SOri Kam */ 330830d2091SOri Kam struct mlx5_flow_id_pool * 33130a3687dSSuanming Mou mlx5_flow_id_pool_alloc(uint32_t max_id) 332830d2091SOri Kam { 333830d2091SOri Kam struct mlx5_flow_id_pool *pool; 334830d2091SOri Kam void *mem; 335830d2091SOri Kam 336830d2091SOri Kam pool = rte_zmalloc("id pool allocation", sizeof(*pool), 337830d2091SOri Kam RTE_CACHE_LINE_SIZE); 338830d2091SOri Kam if (!pool) { 339830d2091SOri Kam DRV_LOG(ERR, "can't allocate id pool"); 340830d2091SOri Kam rte_errno = ENOMEM; 341830d2091SOri Kam return NULL; 342830d2091SOri Kam } 343830d2091SOri Kam mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), 344830d2091SOri Kam RTE_CACHE_LINE_SIZE); 345830d2091SOri Kam if (!mem) { 346830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 347830d2091SOri Kam rte_errno = ENOMEM; 348830d2091SOri Kam goto error; 349830d2091SOri Kam } 350830d2091SOri Kam pool->free_arr = mem; 351830d2091SOri Kam pool->curr = pool->free_arr; 352830d2091SOri Kam pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; 353830d2091SOri Kam pool->base_index = 0; 35430a3687dSSuanming Mou pool->max_id = max_id; 355830d2091SOri Kam return pool; 356830d2091SOri Kam error: 357830d2091SOri Kam rte_free(pool); 358830d2091SOri Kam return NULL; 359830d2091SOri Kam } 360830d2091SOri Kam 361830d2091SOri Kam /** 362830d2091SOri Kam * Release ID pool structure. 363830d2091SOri Kam * 364830d2091SOri Kam * @param[in] pool 365830d2091SOri Kam * Pointer to flow id pool object to free. 366830d2091SOri Kam */ 367830d2091SOri Kam void 368830d2091SOri Kam mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) 369830d2091SOri Kam { 370830d2091SOri Kam rte_free(pool->free_arr); 371830d2091SOri Kam rte_free(pool); 372830d2091SOri Kam } 373830d2091SOri Kam 374830d2091SOri Kam /** 375830d2091SOri Kam * Generate ID. 376830d2091SOri Kam * 377830d2091SOri Kam * @param[in] pool 378830d2091SOri Kam * Pointer to flow id pool. 379830d2091SOri Kam * @param[out] id 380830d2091SOri Kam * The generated ID. 381830d2091SOri Kam * 382830d2091SOri Kam * @return 383830d2091SOri Kam * 0 on success, error value otherwise. 384830d2091SOri Kam */ 385830d2091SOri Kam uint32_t 386830d2091SOri Kam mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) 387830d2091SOri Kam { 388830d2091SOri Kam if (pool->curr == pool->free_arr) { 38930a3687dSSuanming Mou if (pool->base_index == pool->max_id) { 390830d2091SOri Kam rte_errno = ENOMEM; 391830d2091SOri Kam DRV_LOG(ERR, "no free id"); 392830d2091SOri Kam return -rte_errno; 393830d2091SOri Kam } 394830d2091SOri Kam *id = ++pool->base_index; 395830d2091SOri Kam return 0; 396830d2091SOri Kam } 397830d2091SOri Kam *id = *(--pool->curr); 398830d2091SOri Kam return 0; 399830d2091SOri Kam } 400830d2091SOri Kam 401830d2091SOri Kam /** 402830d2091SOri Kam * Release ID. 403830d2091SOri Kam * 404830d2091SOri Kam * @param[in] pool 405830d2091SOri Kam * Pointer to flow id pool. 406830d2091SOri Kam * @param[out] id 407830d2091SOri Kam * The generated ID. 408830d2091SOri Kam * 409830d2091SOri Kam * @return 410830d2091SOri Kam * 0 on success, error value otherwise. 411830d2091SOri Kam */ 412830d2091SOri Kam uint32_t 413830d2091SOri Kam mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) 414830d2091SOri Kam { 415830d2091SOri Kam uint32_t size; 416830d2091SOri Kam uint32_t size2; 417830d2091SOri Kam void *mem; 418830d2091SOri Kam 419830d2091SOri Kam if (pool->curr == pool->last) { 420830d2091SOri Kam size = pool->curr - pool->free_arr; 421830d2091SOri Kam size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; 4228e46d4e1SAlexander Kozyrev MLX5_ASSERT(size2 > size); 423830d2091SOri Kam mem = rte_malloc("", size2 * sizeof(uint32_t), 0); 424830d2091SOri Kam if (!mem) { 425830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 426830d2091SOri Kam rte_errno = ENOMEM; 427830d2091SOri Kam return -rte_errno; 428830d2091SOri Kam } 429830d2091SOri Kam memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); 430830d2091SOri Kam rte_free(pool->free_arr); 431830d2091SOri Kam pool->free_arr = mem; 432830d2091SOri Kam pool->curr = pool->free_arr + size; 433830d2091SOri Kam pool->last = pool->free_arr + size2; 434830d2091SOri Kam } 435830d2091SOri Kam *pool->curr = id; 436830d2091SOri Kam pool->curr++; 437830d2091SOri Kam return 0; 438830d2091SOri Kam } 439830d2091SOri Kam 44017e19bc4SViacheslav Ovsiienko /** 441fa2d01c8SDong Zhou * Initialize the shared aging list information per port. 442fa2d01c8SDong Zhou * 443fa2d01c8SDong Zhou * @param[in] sh 4446e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 445fa2d01c8SDong Zhou */ 446fa2d01c8SDong Zhou static void 4476e88bc42SOphir Munk mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) 448fa2d01c8SDong Zhou { 449fa2d01c8SDong Zhou uint32_t i; 450fa2d01c8SDong Zhou struct mlx5_age_info *age_info; 451fa2d01c8SDong Zhou 452fa2d01c8SDong Zhou for (i = 0; i < sh->max_port; i++) { 453fa2d01c8SDong Zhou age_info = &sh->port[i].age_info; 454fa2d01c8SDong Zhou age_info->flags = 0; 455fa2d01c8SDong Zhou TAILQ_INIT(&age_info->aged_counters); 456fa2d01c8SDong Zhou rte_spinlock_init(&age_info->aged_sl); 457fa2d01c8SDong Zhou MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 458fa2d01c8SDong Zhou } 459fa2d01c8SDong Zhou } 460fa2d01c8SDong Zhou 461fa2d01c8SDong Zhou /** 4625382d28cSMatan Azrad * Initialize the counters management structure. 4635382d28cSMatan Azrad * 4645382d28cSMatan Azrad * @param[in] sh 4656e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 4665382d28cSMatan Azrad */ 4675382d28cSMatan Azrad static void 4686e88bc42SOphir Munk mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) 4695382d28cSMatan Azrad { 4705af61440SMatan Azrad int i; 4715382d28cSMatan Azrad 4725af61440SMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 4735382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 4745af61440SMatan Azrad for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { 475b1cc2266SSuanming Mou sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET; 476b1cc2266SSuanming Mou sh->cmng.ccont[i].max_id = -1; 477b1cc2266SSuanming Mou sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID; 4785af61440SMatan Azrad TAILQ_INIT(&sh->cmng.ccont[i].pool_list); 4795af61440SMatan Azrad rte_spinlock_init(&sh->cmng.ccont[i].resize_sl); 480ac79183dSSuanming Mou TAILQ_INIT(&sh->cmng.ccont[i].counters); 481ac79183dSSuanming Mou rte_spinlock_init(&sh->cmng.ccont[i].csl); 482fa2d01c8SDong Zhou } 4835382d28cSMatan Azrad } 4845382d28cSMatan Azrad 4855382d28cSMatan Azrad /** 4865382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 4875382d28cSMatan Azrad * 4885382d28cSMatan Azrad * @param[in] mng 4895382d28cSMatan Azrad * Pointer to the memory management structure. 4905382d28cSMatan Azrad */ 4915382d28cSMatan Azrad static void 4925382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 4935382d28cSMatan Azrad { 4945382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 4955382d28cSMatan Azrad 4965382d28cSMatan Azrad LIST_REMOVE(mng, next); 4975382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 4985382d28cSMatan Azrad claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); 4995382d28cSMatan Azrad rte_free(mem); 5005382d28cSMatan Azrad } 5015382d28cSMatan Azrad 5025382d28cSMatan Azrad /** 5035382d28cSMatan Azrad * Close and release all the resources of the counters management. 5045382d28cSMatan Azrad * 5055382d28cSMatan Azrad * @param[in] sh 5066e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free. 5075382d28cSMatan Azrad */ 5085382d28cSMatan Azrad static void 5096e88bc42SOphir Munk mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) 5105382d28cSMatan Azrad { 5115382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 5125af61440SMatan Azrad int i; 5135382d28cSMatan Azrad int j; 514f15db67dSMatan Azrad int retries = 1024; 5155382d28cSMatan Azrad 516f15db67dSMatan Azrad rte_errno = 0; 517f15db67dSMatan Azrad while (--retries) { 518f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 519f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 520f15db67dSMatan Azrad break; 521f15db67dSMatan Azrad rte_pause(); 522f15db67dSMatan Azrad } 5235af61440SMatan Azrad for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { 5245382d28cSMatan Azrad struct mlx5_flow_counter_pool *pool; 5255af61440SMatan Azrad uint32_t batch = !!(i > 1); 5265382d28cSMatan Azrad 5275af61440SMatan Azrad if (!sh->cmng.ccont[i].pools) 5285382d28cSMatan Azrad continue; 5295af61440SMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 5305382d28cSMatan Azrad while (pool) { 5315af61440SMatan Azrad if (batch && pool->min_dcs) 5325af61440SMatan Azrad claim_zero(mlx5_devx_cmd_destroy 533fa2d01c8SDong Zhou (pool->min_dcs)); 5345382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 5358d93c830SDong Zhou if (MLX5_POOL_GET_CNT(pool, j)->action) 5365382d28cSMatan Azrad claim_zero 5375382d28cSMatan Azrad (mlx5_glue->destroy_flow_action 538fa2d01c8SDong Zhou (MLX5_POOL_GET_CNT 539fa2d01c8SDong Zhou (pool, j)->action)); 540826b8a87SSuanming Mou if (!batch && MLX5_GET_POOL_CNT_EXT 541826b8a87SSuanming Mou (pool, j)->dcs) 5425382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 543826b8a87SSuanming Mou (MLX5_GET_POOL_CNT_EXT 544826b8a87SSuanming Mou (pool, j)->dcs)); 5455382d28cSMatan Azrad } 5465af61440SMatan Azrad TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next); 5475382d28cSMatan Azrad rte_free(pool); 5485af61440SMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 5495382d28cSMatan Azrad } 5505af61440SMatan Azrad rte_free(sh->cmng.ccont[i].pools); 5515382d28cSMatan Azrad } 5525382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 5535382d28cSMatan Azrad while (mng) { 5545382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 5555382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 5565382d28cSMatan Azrad } 5575382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 5585382d28cSMatan Azrad } 5595382d28cSMatan Azrad 5605382d28cSMatan Azrad /** 561014d1cbeSSuanming Mou * Initialize the flow resources' indexed mempool. 562014d1cbeSSuanming Mou * 563014d1cbeSSuanming Mou * @param[in] sh 5646e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 565b88341caSSuanming Mou * @param[in] sh 566b88341caSSuanming Mou * Pointer to user dev config. 567014d1cbeSSuanming Mou */ 568014d1cbeSSuanming Mou static void 5696e88bc42SOphir Munk mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh, 5705c761238SGregory Etelson const struct mlx5_dev_config *config) 571014d1cbeSSuanming Mou { 572014d1cbeSSuanming Mou uint8_t i; 5735c761238SGregory Etelson struct mlx5_indexed_pool_config cfg; 574014d1cbeSSuanming Mou 575a1da6f62SSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) { 5765c761238SGregory Etelson cfg = mlx5_ipool_cfg[i]; 5775c761238SGregory Etelson switch (i) { 5785c761238SGregory Etelson default: 5795c761238SGregory Etelson break; 5805c761238SGregory Etelson /* 5815c761238SGregory Etelson * Set MLX5_IPOOL_MLX5_FLOW ipool size 5825c761238SGregory Etelson * according to PCI function flow configuration. 5835c761238SGregory Etelson */ 5845c761238SGregory Etelson case MLX5_IPOOL_MLX5_FLOW: 5855c761238SGregory Etelson cfg.size = config->dv_flow_en ? 5865c761238SGregory Etelson sizeof(struct mlx5_flow_handle) : 5875c761238SGregory Etelson MLX5_FLOW_HANDLE_VERBS_SIZE; 5885c761238SGregory Etelson break; 5895c761238SGregory Etelson } 590a1da6f62SSuanming Mou if (config->reclaim_mode) 5915c761238SGregory Etelson cfg.release_mem_en = 1; 5925c761238SGregory Etelson sh->ipool[i] = mlx5_ipool_create(&cfg); 593014d1cbeSSuanming Mou } 594a1da6f62SSuanming Mou } 595014d1cbeSSuanming Mou 596014d1cbeSSuanming Mou /** 597014d1cbeSSuanming Mou * Release the flow resources' indexed mempool. 598014d1cbeSSuanming Mou * 599014d1cbeSSuanming Mou * @param[in] sh 6006e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 601014d1cbeSSuanming Mou */ 602014d1cbeSSuanming Mou static void 6036e88bc42SOphir Munk mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh) 604014d1cbeSSuanming Mou { 605014d1cbeSSuanming Mou uint8_t i; 606014d1cbeSSuanming Mou 607014d1cbeSSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) 608014d1cbeSSuanming Mou mlx5_ipool_destroy(sh->ipool[i]); 609014d1cbeSSuanming Mou } 610014d1cbeSSuanming Mou 611*daa38a89SBing Zhao /* 612*daa38a89SBing Zhao * Check if dynamic flex parser for eCPRI already exists. 613*daa38a89SBing Zhao * 614*daa38a89SBing Zhao * @param dev 615*daa38a89SBing Zhao * Pointer to Ethernet device structure. 616*daa38a89SBing Zhao * 617*daa38a89SBing Zhao * @return 618*daa38a89SBing Zhao * true on exists, false on not. 619*daa38a89SBing Zhao */ 620*daa38a89SBing Zhao bool 621*daa38a89SBing Zhao mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev) 622*daa38a89SBing Zhao { 623*daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 624*daa38a89SBing Zhao struct mlx5_flex_parser_profiles *prf = 625*daa38a89SBing Zhao &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 626*daa38a89SBing Zhao 627*daa38a89SBing Zhao return !!prf->obj; 628*daa38a89SBing Zhao } 629*daa38a89SBing Zhao 630*daa38a89SBing Zhao /* 631*daa38a89SBing Zhao * Allocation of a flex parser for eCPRI. Once created, this parser related 632*daa38a89SBing Zhao * resources will be held until the device is closed. 633*daa38a89SBing Zhao * 634*daa38a89SBing Zhao * @param dev 635*daa38a89SBing Zhao * Pointer to Ethernet device structure. 636*daa38a89SBing Zhao * 637*daa38a89SBing Zhao * @return 638*daa38a89SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 639*daa38a89SBing Zhao */ 640*daa38a89SBing Zhao int 641*daa38a89SBing Zhao mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev) 642*daa38a89SBing Zhao { 643*daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 644*daa38a89SBing Zhao struct mlx5_flex_parser_profiles *prf = 645*daa38a89SBing Zhao &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 646*daa38a89SBing Zhao 647*daa38a89SBing Zhao (void)prf; 648*daa38a89SBing Zhao return 0; 649*daa38a89SBing Zhao } 650*daa38a89SBing Zhao 651014d1cbeSSuanming Mou /** 65291389890SOphir Munk * Allocate shared device context. If there is multiport device the 65317e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 65491389890SOphir Munk * port dedicated device, the context will be used by only given 65517e19bc4SViacheslav Ovsiienko * port due to unification. 65617e19bc4SViacheslav Ovsiienko * 65791389890SOphir Munk * Routine first searches the context for the specified device name, 65817e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 65917e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 66091389890SOphir Munk * device context and parameters. 66117e19bc4SViacheslav Ovsiienko * 66217e19bc4SViacheslav Ovsiienko * @param[in] spawn 66391389890SOphir Munk * Pointer to the device attributes (name, port, etc). 6648409a285SViacheslav Ovsiienko * @param[in] config 6658409a285SViacheslav Ovsiienko * Pointer to device configuration structure. 66617e19bc4SViacheslav Ovsiienko * 66717e19bc4SViacheslav Ovsiienko * @return 6686e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object on success, 66917e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 67017e19bc4SViacheslav Ovsiienko */ 6712eb4d010SOphir Munk struct mlx5_dev_ctx_shared * 67291389890SOphir Munk mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, 6738409a285SViacheslav Ovsiienko const struct mlx5_dev_config *config) 67417e19bc4SViacheslav Ovsiienko { 6756e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh; 67617e19bc4SViacheslav Ovsiienko int err = 0; 67753e5a82fSViacheslav Ovsiienko uint32_t i; 678ae18a1aeSOri Kam struct mlx5_devx_tis_attr tis_attr = { 0 }; 67917e19bc4SViacheslav Ovsiienko 6808e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn); 68117e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 6828e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 68391389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 68417e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 68591389890SOphir Munk LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) { 686834a9019SOphir Munk if (!strcmp(sh->ibdev_name, 687834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev))) { 68817e19bc4SViacheslav Ovsiienko sh->refcnt++; 68917e19bc4SViacheslav Ovsiienko goto exit; 69017e19bc4SViacheslav Ovsiienko } 69117e19bc4SViacheslav Ovsiienko } 692ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 6938e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn->max_port); 69417e19bc4SViacheslav Ovsiienko sh = rte_zmalloc("ethdev shared ib context", 6956e88bc42SOphir Munk sizeof(struct mlx5_dev_ctx_shared) + 69617e19bc4SViacheslav Ovsiienko spawn->max_port * 69791389890SOphir Munk sizeof(struct mlx5_dev_shared_port), 69817e19bc4SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 69917e19bc4SViacheslav Ovsiienko if (!sh) { 70017e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 70117e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 70217e19bc4SViacheslav Ovsiienko goto exit; 70317e19bc4SViacheslav Ovsiienko } 7042eb4d010SOphir Munk err = mlx5_os_open_device(spawn, config, sh); 70506f78b5eSViacheslav Ovsiienko if (!sh->ctx) 70617e19bc4SViacheslav Ovsiienko goto error; 707e85f623eSOphir Munk err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr); 70817e19bc4SViacheslav Ovsiienko if (err) { 709e85f623eSOphir Munk DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed"); 71017e19bc4SViacheslav Ovsiienko goto error; 71117e19bc4SViacheslav Ovsiienko } 71217e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 71317e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 714f44b09f9SOphir Munk strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx), 715f44b09f9SOphir Munk sizeof(sh->ibdev_name) - 1); 716f44b09f9SOphir Munk strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx), 717f44b09f9SOphir Munk sizeof(sh->ibdev_path) - 1); 71853e5a82fSViacheslav Ovsiienko /* 71953e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 72053e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 72153e5a82fSViacheslav Ovsiienko * the given port index i. 72253e5a82fSViacheslav Ovsiienko */ 72323242063SMatan Azrad for (i = 0; i < sh->max_port; i++) { 72453e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 72523242063SMatan Azrad sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; 72623242063SMatan Azrad } 72717e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 72817e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 72917e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 73017e19bc4SViacheslav Ovsiienko err = ENOMEM; 73117e19bc4SViacheslav Ovsiienko goto error; 73217e19bc4SViacheslav Ovsiienko } 733ae18a1aeSOri Kam if (sh->devx) { 7342eb4d010SOphir Munk err = mlx5_os_get_pdn(sh->pd, &sh->pdn); 735b9d86122SDekel Peled if (err) { 736b9d86122SDekel Peled DRV_LOG(ERR, "Fail to extract pdn from PD"); 737b9d86122SDekel Peled goto error; 738b9d86122SDekel Peled } 739ae18a1aeSOri Kam sh->td = mlx5_devx_cmd_create_td(sh->ctx); 740ae18a1aeSOri Kam if (!sh->td) { 741ae18a1aeSOri Kam DRV_LOG(ERR, "TD allocation failure"); 742ae18a1aeSOri Kam err = ENOMEM; 743ae18a1aeSOri Kam goto error; 744ae18a1aeSOri Kam } 745ae18a1aeSOri Kam tis_attr.transport_domain = sh->td->id; 746ae18a1aeSOri Kam sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr); 747ae18a1aeSOri Kam if (!sh->tis) { 748ae18a1aeSOri Kam DRV_LOG(ERR, "TIS allocation failure"); 749ae18a1aeSOri Kam err = ENOMEM; 750ae18a1aeSOri Kam goto error; 751ae18a1aeSOri Kam } 752fc4d4f73SViacheslav Ovsiienko sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, 0); 753fc4d4f73SViacheslav Ovsiienko if (!sh->tx_uar) { 754fc4d4f73SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to allocate DevX UAR."); 755fc4d4f73SViacheslav Ovsiienko err = ENOMEM; 756fc4d4f73SViacheslav Ovsiienko goto error; 757fc4d4f73SViacheslav Ovsiienko } 758ae18a1aeSOri Kam } 7590136df99SSuanming Mou sh->flow_id_pool = mlx5_flow_id_pool_alloc 7600136df99SSuanming Mou ((1 << HAIRPIN_FLOW_ID_BITS) - 1); 761d85c7b5eSOri Kam if (!sh->flow_id_pool) { 762d85c7b5eSOri Kam DRV_LOG(ERR, "can't create flow id pool"); 763d85c7b5eSOri Kam err = ENOMEM; 764d85c7b5eSOri Kam goto error; 765d85c7b5eSOri Kam } 76624feb045SViacheslav Ovsiienko #ifndef RTE_ARCH_64 76724feb045SViacheslav Ovsiienko /* Initialize UAR access locks for 32bit implementations. */ 76824feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock_cq); 76924feb045SViacheslav Ovsiienko for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 77024feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock[i]); 77124feb045SViacheslav Ovsiienko #endif 772ab3cffcfSViacheslav Ovsiienko /* 773ab3cffcfSViacheslav Ovsiienko * Once the device is added to the list of memory event 774ab3cffcfSViacheslav Ovsiienko * callback, its global MR cache table cannot be expanded 775ab3cffcfSViacheslav Ovsiienko * on the fly because of deadlock. If it overflows, lookup 776ab3cffcfSViacheslav Ovsiienko * should be done by searching MR list linearly, which is slow. 777ab3cffcfSViacheslav Ovsiienko * 778ab3cffcfSViacheslav Ovsiienko * At this point the device is not added to the memory 779ab3cffcfSViacheslav Ovsiienko * event list yet, context is just being created. 780ab3cffcfSViacheslav Ovsiienko */ 781b8dc6b0eSVu Pham err = mlx5_mr_btree_init(&sh->share_cache.cache, 782ab3cffcfSViacheslav Ovsiienko MLX5_MR_BTREE_CACHE_N * 2, 78346e10a4cSViacheslav Ovsiienko spawn->pci_dev->device.numa_node); 784ab3cffcfSViacheslav Ovsiienko if (err) { 785ab3cffcfSViacheslav Ovsiienko err = rte_errno; 786ab3cffcfSViacheslav Ovsiienko goto error; 787ab3cffcfSViacheslav Ovsiienko } 788d5ed8aa9SOphir Munk mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb, 789d5ed8aa9SOphir Munk &sh->share_cache.dereg_mr_cb); 7902eb4d010SOphir Munk mlx5_os_dev_shared_handler_install(sh); 791632f0f19SSuanming Mou sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD); 792632f0f19SSuanming Mou if (!sh->cnt_id_tbl) { 793632f0f19SSuanming Mou err = rte_errno; 794632f0f19SSuanming Mou goto error; 795632f0f19SSuanming Mou } 796fa2d01c8SDong Zhou mlx5_flow_aging_init(sh); 7975382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 798b88341caSSuanming Mou mlx5_flow_ipool_create(sh, config); 7990e3d0525SViacheslav Ovsiienko /* Add device to memory callback list. */ 8000e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 8010e3d0525SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 8020e3d0525SViacheslav Ovsiienko sh, mem_event_cb); 8030e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 8040e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 80591389890SOphir Munk LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); 80617e19bc4SViacheslav Ovsiienko exit: 80791389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 80817e19bc4SViacheslav Ovsiienko return sh; 80917e19bc4SViacheslav Ovsiienko error: 810d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 81191389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 8128e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 813632f0f19SSuanming Mou if (sh->cnt_id_tbl) { 814632f0f19SSuanming Mou mlx5_l3t_destroy(sh->cnt_id_tbl); 815632f0f19SSuanming Mou sh->cnt_id_tbl = NULL; 816632f0f19SSuanming Mou } 817fc4d4f73SViacheslav Ovsiienko if (sh->tx_uar) { 818fc4d4f73SViacheslav Ovsiienko mlx5_glue->devx_free_uar(sh->tx_uar); 819fc4d4f73SViacheslav Ovsiienko sh->tx_uar = NULL; 820fc4d4f73SViacheslav Ovsiienko } 821ae18a1aeSOri Kam if (sh->tis) 822ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 823ae18a1aeSOri Kam if (sh->td) 824ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 82517e19bc4SViacheslav Ovsiienko if (sh->pd) 82617e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 82717e19bc4SViacheslav Ovsiienko if (sh->ctx) 82817e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 829d85c7b5eSOri Kam if (sh->flow_id_pool) 830d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 83117e19bc4SViacheslav Ovsiienko rte_free(sh); 8328e46d4e1SAlexander Kozyrev MLX5_ASSERT(err > 0); 83317e19bc4SViacheslav Ovsiienko rte_errno = err; 83417e19bc4SViacheslav Ovsiienko return NULL; 83517e19bc4SViacheslav Ovsiienko } 83617e19bc4SViacheslav Ovsiienko 83717e19bc4SViacheslav Ovsiienko /** 83817e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 83917e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 84017e19bc4SViacheslav Ovsiienko * 84117e19bc4SViacheslav Ovsiienko * @param[in] sh 8426e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 84317e19bc4SViacheslav Ovsiienko */ 8442eb4d010SOphir Munk void 84591389890SOphir Munk mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) 84617e19bc4SViacheslav Ovsiienko { 84791389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 8480afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG 84917e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 8506e88bc42SOphir Munk struct mlx5_dev_ctx_shared *lctx; 85117e19bc4SViacheslav Ovsiienko 85291389890SOphir Munk LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next) 85317e19bc4SViacheslav Ovsiienko if (lctx == sh) 85417e19bc4SViacheslav Ovsiienko break; 8558e46d4e1SAlexander Kozyrev MLX5_ASSERT(lctx); 85617e19bc4SViacheslav Ovsiienko if (lctx != sh) { 85717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 85817e19bc4SViacheslav Ovsiienko goto exit; 85917e19bc4SViacheslav Ovsiienko } 86017e19bc4SViacheslav Ovsiienko #endif 8618e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 8628e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh->refcnt); 86317e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 8648e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 86517e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 86617e19bc4SViacheslav Ovsiienko goto exit; 8670e3d0525SViacheslav Ovsiienko /* Remove from memory callback device list. */ 8680e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 8690e3d0525SViacheslav Ovsiienko LIST_REMOVE(sh, mem_event_cb); 8700e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 8714f8e6befSMichael Baum /* Release created Memory Regions. */ 872b8dc6b0eSVu Pham mlx5_mr_release_cache(&sh->share_cache); 8730e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 87417e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 87553e5a82fSViacheslav Ovsiienko /* 87653e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 87753e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 87853e5a82fSViacheslav Ovsiienko **/ 8795382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 880014d1cbeSSuanming Mou mlx5_flow_ipool_destroy(sh); 8812eb4d010SOphir Munk mlx5_os_dev_shared_handler_uninstall(sh); 882632f0f19SSuanming Mou if (sh->cnt_id_tbl) { 883632f0f19SSuanming Mou mlx5_l3t_destroy(sh->cnt_id_tbl); 884632f0f19SSuanming Mou sh->cnt_id_tbl = NULL; 885632f0f19SSuanming Mou } 886fc4d4f73SViacheslav Ovsiienko if (sh->tx_uar) { 887fc4d4f73SViacheslav Ovsiienko mlx5_glue->devx_free_uar(sh->tx_uar); 888fc4d4f73SViacheslav Ovsiienko sh->tx_uar = NULL; 889fc4d4f73SViacheslav Ovsiienko } 89017e19bc4SViacheslav Ovsiienko if (sh->pd) 89117e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 892ae18a1aeSOri Kam if (sh->tis) 893ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 894ae18a1aeSOri Kam if (sh->td) 895ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 89617e19bc4SViacheslav Ovsiienko if (sh->ctx) 89717e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 898d85c7b5eSOri Kam if (sh->flow_id_pool) 899d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 900d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 90117e19bc4SViacheslav Ovsiienko rte_free(sh); 90217e19bc4SViacheslav Ovsiienko exit: 90391389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 90417e19bc4SViacheslav Ovsiienko } 90517e19bc4SViacheslav Ovsiienko 906771fa900SAdrien Mazarguil /** 90754534725SMatan Azrad * Destroy table hash list and all the root entries per domain. 90854534725SMatan Azrad * 90954534725SMatan Azrad * @param[in] priv 91054534725SMatan Azrad * Pointer to the private device data structure. 91154534725SMatan Azrad */ 9122eb4d010SOphir Munk void 91354534725SMatan Azrad mlx5_free_table_hash_list(struct mlx5_priv *priv) 91454534725SMatan Azrad { 9156e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 91654534725SMatan Azrad struct mlx5_flow_tbl_data_entry *tbl_data; 91754534725SMatan Azrad union mlx5_flow_tbl_key table_key = { 91854534725SMatan Azrad { 91954534725SMatan Azrad .table_id = 0, 92054534725SMatan Azrad .reserved = 0, 92154534725SMatan Azrad .domain = 0, 92254534725SMatan Azrad .direction = 0, 92354534725SMatan Azrad } 92454534725SMatan Azrad }; 92554534725SMatan Azrad struct mlx5_hlist_entry *pos; 92654534725SMatan Azrad 92754534725SMatan Azrad if (!sh->flow_tbls) 92854534725SMatan Azrad return; 92954534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 93054534725SMatan Azrad if (pos) { 93154534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 93254534725SMatan Azrad entry); 9338e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 93454534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 93554534725SMatan Azrad rte_free(tbl_data); 93654534725SMatan Azrad } 93754534725SMatan Azrad table_key.direction = 1; 93854534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 93954534725SMatan Azrad if (pos) { 94054534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 94154534725SMatan Azrad entry); 9428e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 94354534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 94454534725SMatan Azrad rte_free(tbl_data); 94554534725SMatan Azrad } 94654534725SMatan Azrad table_key.direction = 0; 94754534725SMatan Azrad table_key.domain = 1; 94854534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 94954534725SMatan Azrad if (pos) { 95054534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 95154534725SMatan Azrad entry); 9528e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 95354534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 95454534725SMatan Azrad rte_free(tbl_data); 95554534725SMatan Azrad } 95654534725SMatan Azrad mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL); 95754534725SMatan Azrad } 95854534725SMatan Azrad 95954534725SMatan Azrad /** 96054534725SMatan Azrad * Initialize flow table hash list and create the root tables entry 96154534725SMatan Azrad * for each domain. 96254534725SMatan Azrad * 96354534725SMatan Azrad * @param[in] priv 96454534725SMatan Azrad * Pointer to the private device data structure. 96554534725SMatan Azrad * 96654534725SMatan Azrad * @return 96754534725SMatan Azrad * Zero on success, positive error code otherwise. 96854534725SMatan Azrad */ 9692eb4d010SOphir Munk int 97054534725SMatan Azrad mlx5_alloc_table_hash_list(struct mlx5_priv *priv) 97154534725SMatan Azrad { 9726e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 97354534725SMatan Azrad char s[MLX5_HLIST_NAMESIZE]; 97454534725SMatan Azrad int err = 0; 97554534725SMatan Azrad 9768e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 97754534725SMatan Azrad snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); 97854534725SMatan Azrad sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE); 97954534725SMatan Azrad if (!sh->flow_tbls) { 98063783b01SDavid Marchand DRV_LOG(ERR, "flow tables with hash creation failed."); 98154534725SMatan Azrad err = ENOMEM; 98254534725SMatan Azrad return err; 98354534725SMatan Azrad } 98454534725SMatan Azrad #ifndef HAVE_MLX5DV_DR 98554534725SMatan Azrad /* 98654534725SMatan Azrad * In case we have not DR support, the zero tables should be created 98754534725SMatan Azrad * because DV expect to see them even if they cannot be created by 98854534725SMatan Azrad * RDMA-CORE. 98954534725SMatan Azrad */ 99054534725SMatan Azrad union mlx5_flow_tbl_key table_key = { 99154534725SMatan Azrad { 99254534725SMatan Azrad .table_id = 0, 99354534725SMatan Azrad .reserved = 0, 99454534725SMatan Azrad .domain = 0, 99554534725SMatan Azrad .direction = 0, 99654534725SMatan Azrad } 99754534725SMatan Azrad }; 99854534725SMatan Azrad struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL, 99954534725SMatan Azrad sizeof(*tbl_data), 0); 100054534725SMatan Azrad 100154534725SMatan Azrad if (!tbl_data) { 100254534725SMatan Azrad err = ENOMEM; 100354534725SMatan Azrad goto error; 100454534725SMatan Azrad } 100554534725SMatan Azrad tbl_data->entry.key = table_key.v64; 100654534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 100754534725SMatan Azrad if (err) 100854534725SMatan Azrad goto error; 100954534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 101054534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 101154534725SMatan Azrad table_key.direction = 1; 101254534725SMatan Azrad tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0); 101354534725SMatan Azrad if (!tbl_data) { 101454534725SMatan Azrad err = ENOMEM; 101554534725SMatan Azrad goto error; 101654534725SMatan Azrad } 101754534725SMatan Azrad tbl_data->entry.key = table_key.v64; 101854534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 101954534725SMatan Azrad if (err) 102054534725SMatan Azrad goto error; 102154534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 102254534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 102354534725SMatan Azrad table_key.direction = 0; 102454534725SMatan Azrad table_key.domain = 1; 102554534725SMatan Azrad tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0); 102654534725SMatan Azrad if (!tbl_data) { 102754534725SMatan Azrad err = ENOMEM; 102854534725SMatan Azrad goto error; 102954534725SMatan Azrad } 103054534725SMatan Azrad tbl_data->entry.key = table_key.v64; 103154534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 103254534725SMatan Azrad if (err) 103354534725SMatan Azrad goto error; 103454534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 103554534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 103654534725SMatan Azrad return err; 103754534725SMatan Azrad error: 103854534725SMatan Azrad mlx5_free_table_hash_list(priv); 103954534725SMatan Azrad #endif /* HAVE_MLX5DV_DR */ 104054534725SMatan Azrad return err; 104154534725SMatan Azrad } 104254534725SMatan Azrad 104354534725SMatan Azrad /** 10447be600c8SYongseok Koh * Initialize shared data between primary and secondary process. 10457be600c8SYongseok Koh * 10467be600c8SYongseok Koh * A memzone is reserved by primary process and secondary processes attach to 10477be600c8SYongseok Koh * the memzone. 10487be600c8SYongseok Koh * 10497be600c8SYongseok Koh * @return 10507be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 1051974f1e7eSYongseok Koh */ 10527be600c8SYongseok Koh static int 10537be600c8SYongseok Koh mlx5_init_shared_data(void) 1054974f1e7eSYongseok Koh { 1055974f1e7eSYongseok Koh const struct rte_memzone *mz; 10567be600c8SYongseok Koh int ret = 0; 1057974f1e7eSYongseok Koh 1058974f1e7eSYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 1059974f1e7eSYongseok Koh if (mlx5_shared_data == NULL) { 1060974f1e7eSYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1061974f1e7eSYongseok Koh /* Allocate shared memory. */ 1062974f1e7eSYongseok Koh mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 1063974f1e7eSYongseok Koh sizeof(*mlx5_shared_data), 1064974f1e7eSYongseok Koh SOCKET_ID_ANY, 0); 10657be600c8SYongseok Koh if (mz == NULL) { 10667be600c8SYongseok Koh DRV_LOG(ERR, 106706fa6988SDekel Peled "Cannot allocate mlx5 shared data"); 10687be600c8SYongseok Koh ret = -rte_errno; 10697be600c8SYongseok Koh goto error; 10707be600c8SYongseok Koh } 10717be600c8SYongseok Koh mlx5_shared_data = mz->addr; 10727be600c8SYongseok Koh memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 10737be600c8SYongseok Koh rte_spinlock_init(&mlx5_shared_data->lock); 1074974f1e7eSYongseok Koh } else { 1075974f1e7eSYongseok Koh /* Lookup allocated shared memory. */ 1076974f1e7eSYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 10777be600c8SYongseok Koh if (mz == NULL) { 10787be600c8SYongseok Koh DRV_LOG(ERR, 107906fa6988SDekel Peled "Cannot attach mlx5 shared data"); 10807be600c8SYongseok Koh ret = -rte_errno; 10817be600c8SYongseok Koh goto error; 1082974f1e7eSYongseok Koh } 1083974f1e7eSYongseok Koh mlx5_shared_data = mz->addr; 10847be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 10853ebe6580SYongseok Koh } 1086974f1e7eSYongseok Koh } 10877be600c8SYongseok Koh error: 10887be600c8SYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 10897be600c8SYongseok Koh return ret; 10907be600c8SYongseok Koh } 10917be600c8SYongseok Koh 10927be600c8SYongseok Koh /** 10934d803a72SOlga Shern * Retrieve integer value from environment variable. 10944d803a72SOlga Shern * 10954d803a72SOlga Shern * @param[in] name 10964d803a72SOlga Shern * Environment variable name. 10974d803a72SOlga Shern * 10984d803a72SOlga Shern * @return 10994d803a72SOlga Shern * Integer value, 0 if the variable is not set. 11004d803a72SOlga Shern */ 11014d803a72SOlga Shern int 11024d803a72SOlga Shern mlx5_getenv_int(const char *name) 11034d803a72SOlga Shern { 11044d803a72SOlga Shern const char *val = getenv(name); 11054d803a72SOlga Shern 11064d803a72SOlga Shern if (val == NULL) 11074d803a72SOlga Shern return 0; 11084d803a72SOlga Shern return atoi(val); 11094d803a72SOlga Shern } 11104d803a72SOlga Shern 11114d803a72SOlga Shern /** 1112c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 1113c9ba7523SRaslan Darawsheh * 1114c9ba7523SRaslan Darawsheh * @param[in] dev 1115c9ba7523SRaslan Darawsheh * A pointer to eth_dev 1116c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 1117c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 1118c9ba7523SRaslan Darawsheh * 1119c9ba7523SRaslan Darawsheh * @return 1120c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 1121c9ba7523SRaslan Darawsheh */ 1122c9ba7523SRaslan Darawsheh int 1123c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 1124c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 1125c9ba7523SRaslan Darawsheh { 11268e46d4e1SAlexander Kozyrev MLX5_ASSERT(udp_tunnel != NULL); 1127c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && 1128c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 1129c9ba7523SRaslan Darawsheh return 0; 1130c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && 1131c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 1132c9ba7523SRaslan Darawsheh return 0; 1133c9ba7523SRaslan Darawsheh return -ENOTSUP; 1134c9ba7523SRaslan Darawsheh } 1135c9ba7523SRaslan Darawsheh 1136c9ba7523SRaslan Darawsheh /** 1137120dc4a7SYongseok Koh * Initialize process private data structure. 1138120dc4a7SYongseok Koh * 1139120dc4a7SYongseok Koh * @param dev 1140120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1141120dc4a7SYongseok Koh * 1142120dc4a7SYongseok Koh * @return 1143120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 1144120dc4a7SYongseok Koh */ 1145120dc4a7SYongseok Koh int 1146120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 1147120dc4a7SYongseok Koh { 1148120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 1149120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 1150120dc4a7SYongseok Koh size_t ppriv_size; 1151120dc4a7SYongseok Koh 1152120dc4a7SYongseok Koh /* 1153120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 1154120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 1155120dc4a7SYongseok Koh */ 1156120dc4a7SYongseok Koh ppriv_size = 1157120dc4a7SYongseok Koh sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 1158120dc4a7SYongseok Koh ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, 1159120dc4a7SYongseok Koh RTE_CACHE_LINE_SIZE, dev->device->numa_node); 1160120dc4a7SYongseok Koh if (!ppriv) { 1161120dc4a7SYongseok Koh rte_errno = ENOMEM; 1162120dc4a7SYongseok Koh return -rte_errno; 1163120dc4a7SYongseok Koh } 1164120dc4a7SYongseok Koh ppriv->uar_table_sz = ppriv_size; 1165120dc4a7SYongseok Koh dev->process_private = ppriv; 1166120dc4a7SYongseok Koh return 0; 1167120dc4a7SYongseok Koh } 1168120dc4a7SYongseok Koh 1169120dc4a7SYongseok Koh /** 1170120dc4a7SYongseok Koh * Un-initialize process private data structure. 1171120dc4a7SYongseok Koh * 1172120dc4a7SYongseok Koh * @param dev 1173120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1174120dc4a7SYongseok Koh */ 1175120dc4a7SYongseok Koh static void 1176120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 1177120dc4a7SYongseok Koh { 1178120dc4a7SYongseok Koh if (!dev->process_private) 1179120dc4a7SYongseok Koh return; 1180120dc4a7SYongseok Koh rte_free(dev->process_private); 1181120dc4a7SYongseok Koh dev->process_private = NULL; 1182120dc4a7SYongseok Koh } 1183120dc4a7SYongseok Koh 1184120dc4a7SYongseok Koh /** 1185771fa900SAdrien Mazarguil * DPDK callback to close the device. 1186771fa900SAdrien Mazarguil * 1187771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 1188771fa900SAdrien Mazarguil * 1189771fa900SAdrien Mazarguil * @param dev 1190771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 1191771fa900SAdrien Mazarguil */ 11922eb4d010SOphir Munk void 1193771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 1194771fa900SAdrien Mazarguil { 1195dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 11962e22920bSAdrien Mazarguil unsigned int i; 11976af6b973SNélio Laranjeiro int ret; 1198771fa900SAdrien Mazarguil 11992786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 12002786b7bfSSuanming Mou /* Check if process_private released. */ 12012786b7bfSSuanming Mou if (!dev->process_private) 12022786b7bfSSuanming Mou return; 12032786b7bfSSuanming Mou mlx5_tx_uar_uninit_secondary(dev); 12042786b7bfSSuanming Mou mlx5_proc_priv_uninit(dev); 12052786b7bfSSuanming Mou rte_eth_dev_release_port(dev); 12062786b7bfSSuanming Mou return; 12072786b7bfSSuanming Mou } 12082786b7bfSSuanming Mou if (!priv->sh) 12092786b7bfSSuanming Mou return; 1210a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 12110f99970bSNélio Laranjeiro dev->data->port_id, 1212f44b09f9SOphir Munk ((priv->sh->ctx != NULL) ? 1213f44b09f9SOphir Munk mlx5_os_get_ctx_device_name(priv->sh->ctx) : "")); 12148db7e3b6SBing Zhao /* 12158db7e3b6SBing Zhao * If default mreg copy action is removed at the stop stage, 12168db7e3b6SBing Zhao * the search will return none and nothing will be done anymore. 12178db7e3b6SBing Zhao */ 12188db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 1219af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 12208db7e3b6SBing Zhao /* 12218db7e3b6SBing Zhao * If all the flows are already flushed in the device stop stage, 12228db7e3b6SBing Zhao * then this will return directly without any action. 12238db7e3b6SBing Zhao */ 12248db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->flows, true); 122502e76468SSuanming Mou mlx5_flow_meter_flush(dev, NULL); 1226e7bfa359SBing Zhao /* Free the intermediate buffers for flow creation. */ 1227e7bfa359SBing Zhao mlx5_flow_free_intermediate(dev); 12282e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 12292e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 12302e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 12312aac5b5dSYongseok Koh rte_wmb(); 12322aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 12332aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 12342e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 12352e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 12362e22920bSAdrien Mazarguil usleep(1000); 1237a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 1238af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 12392e22920bSAdrien Mazarguil priv->rxqs_n = 0; 12402e22920bSAdrien Mazarguil priv->rxqs = NULL; 12412e22920bSAdrien Mazarguil } 12422e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 12432e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 12442e22920bSAdrien Mazarguil usleep(1000); 12456e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 1246af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 12472e22920bSAdrien Mazarguil priv->txqs_n = 0; 12482e22920bSAdrien Mazarguil priv->txqs = NULL; 12492e22920bSAdrien Mazarguil } 1250120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 1251dd3c774fSViacheslav Ovsiienko if (priv->mreg_cp_tbl) 1252dd3c774fSViacheslav Ovsiienko mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); 12537d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 12542eb4d010SOphir Munk mlx5_os_free_shared_dr(priv); 125529c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 125629c1d8bbSNélio Laranjeiro rte_free(priv->rss_conf.rss_key); 1257634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 1258634efbc2SNelio Laranjeiro rte_free(priv->reta_idx); 1259ccdcba53SNélio Laranjeiro if (priv->config.vf) 1260f22442cbSMatan Azrad mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), 1261f22442cbSMatan Azrad dev->data->mac_addrs, 1262f22442cbSMatan Azrad MLX5_MAX_MAC_ADDRESSES, priv->mac_own); 126326c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 126426c08b97SAdrien Mazarguil close(priv->nl_socket_route); 126526c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 126626c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 1267dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 1268dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 126923820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 1270f5479b68SNélio Laranjeiro if (ret) 1271a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 12720f99970bSNélio Laranjeiro dev->data->port_id); 127315c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 12744c7a0f5fSNélio Laranjeiro if (ret) 1275a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 12760f99970bSNélio Laranjeiro dev->data->port_id); 127793403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 127809cb5b58SNélio Laranjeiro if (ret) 127993403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 12800f99970bSNélio Laranjeiro dev->data->port_id); 1281af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 1282a1366b1aSNélio Laranjeiro if (ret) 1283a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 12840f99970bSNélio Laranjeiro dev->data->port_id); 1285894c4a8eSOri Kam ret = mlx5_txq_obj_verify(dev); 1286faf2667fSNélio Laranjeiro if (ret) 1287a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 12880f99970bSNélio Laranjeiro dev->data->port_id); 1289af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 12906e78005aSNélio Laranjeiro if (ret) 1291a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 12920f99970bSNélio Laranjeiro dev->data->port_id); 1293af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 12946af6b973SNélio Laranjeiro if (ret) 1295a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 1296a170a30dSNélio Laranjeiro dev->data->port_id); 1297772dc0ebSSuanming Mou /* 1298772dc0ebSSuanming Mou * Free the shared context in last turn, because the cleanup 1299772dc0ebSSuanming Mou * routines above may use some shared fields, like 1300772dc0ebSSuanming Mou * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing 1301772dc0ebSSuanming Mou * ifindex if Netlink fails. 1302772dc0ebSSuanming Mou */ 130391389890SOphir Munk mlx5_free_shared_dev_ctx(priv->sh); 13042b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 13052b730263SAdrien Mazarguil unsigned int c = 0; 1306d874a4eeSThomas Monjalon uint16_t port_id; 13072b730263SAdrien Mazarguil 1308fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 1309dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 1310d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 13112b730263SAdrien Mazarguil 13122b730263SAdrien Mazarguil if (!opriv || 13132b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 1314d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 13152b730263SAdrien Mazarguil continue; 13162b730263SAdrien Mazarguil ++c; 1317f7e95215SViacheslav Ovsiienko break; 13182b730263SAdrien Mazarguil } 13192b730263SAdrien Mazarguil if (!c) 13202b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 13212b730263SAdrien Mazarguil } 1322771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 13232b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 132442603bbdSOphir Munk /* 132542603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 132642603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 132742603bbdSOphir Munk * it is freed when dev_private is freed. 132842603bbdSOphir Munk */ 132942603bbdSOphir Munk dev->data->mac_addrs = NULL; 1330771fa900SAdrien Mazarguil } 1331771fa900SAdrien Mazarguil 1332e72dd09bSNélio Laranjeiro /** 1333e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 1334e72dd09bSNélio Laranjeiro * 1335e72dd09bSNélio Laranjeiro * @param[in] key 1336e72dd09bSNélio Laranjeiro * Key argument to verify. 1337e72dd09bSNélio Laranjeiro * @param[in] val 1338e72dd09bSNélio Laranjeiro * Value associated with key. 1339e72dd09bSNélio Laranjeiro * @param opaque 1340e72dd09bSNélio Laranjeiro * User data. 1341e72dd09bSNélio Laranjeiro * 1342e72dd09bSNélio Laranjeiro * @return 1343a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1344e72dd09bSNélio Laranjeiro */ 1345e72dd09bSNélio Laranjeiro static int 1346e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 1347e72dd09bSNélio Laranjeiro { 13487fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 13498f848f32SViacheslav Ovsiienko unsigned long mod; 13508f848f32SViacheslav Ovsiienko signed long tmp; 1351e72dd09bSNélio Laranjeiro 13526de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 13536de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 13546de569f5SAdrien Mazarguil return 0; 135599c12dccSNélio Laranjeiro errno = 0; 13568f848f32SViacheslav Ovsiienko tmp = strtol(val, NULL, 0); 135799c12dccSNélio Laranjeiro if (errno) { 1358a6d83b6aSNélio Laranjeiro rte_errno = errno; 1359a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1360a6d83b6aSNélio Laranjeiro return -rte_errno; 136199c12dccSNélio Laranjeiro } 13628f848f32SViacheslav Ovsiienko if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) { 13638f848f32SViacheslav Ovsiienko /* Negative values are acceptable for some keys only. */ 13648f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 13658f848f32SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val); 13668f848f32SViacheslav Ovsiienko return -rte_errno; 13678f848f32SViacheslav Ovsiienko } 13688f848f32SViacheslav Ovsiienko mod = tmp >= 0 ? tmp : -tmp; 136999c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 13707fe24446SShahaf Shuler config->cqe_comp = !!tmp; 1371bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 1372bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 137378c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 137478c7a16dSYongseok Koh config->hw_padding = !!tmp; 13757d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 13767d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 13777d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 13787d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 1379ecb16045SAlexander Kozyrev } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { 1380ecb16045SAlexander Kozyrev config->mprq.stride_size_n = tmp; 13817d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 13827d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 13837d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 13847d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 13852a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1386505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1387505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1388505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1389505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1390505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1391505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1392505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1393505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1394505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 13952a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 13967fe24446SShahaf Shuler config->txqs_inline = tmp; 139709d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1398a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1399230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1400f9de8718SShahaf Shuler config->mps = !!tmp; 14018409a285SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_DB_NC, key) == 0) { 1402f078ceb6SViacheslav Ovsiienko if (tmp != MLX5_TXDB_CACHED && 1403f078ceb6SViacheslav Ovsiienko tmp != MLX5_TXDB_NCACHED && 1404f078ceb6SViacheslav Ovsiienko tmp != MLX5_TXDB_HEURISTIC) { 1405f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid Tx doorbell " 1406f078ceb6SViacheslav Ovsiienko "mapping parameter"); 1407f078ceb6SViacheslav Ovsiienko rte_errno = EINVAL; 1408f078ceb6SViacheslav Ovsiienko return -rte_errno; 1409f078ceb6SViacheslav Ovsiienko } 1410f078ceb6SViacheslav Ovsiienko config->dbnc = tmp; 14116ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1412a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 14136ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1414505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1415505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1416505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 14175644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1418a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 14198f848f32SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_PP, key) == 0) { 14208f848f32SViacheslav Ovsiienko if (!mod) { 14218f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Zero Tx packet pacing parameter"); 14228f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 14238f848f32SViacheslav Ovsiienko return -rte_errno; 14248f848f32SViacheslav Ovsiienko } 14258f848f32SViacheslav Ovsiienko config->tx_pp = tmp; 14268f848f32SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_SKEW, key) == 0) { 14278f848f32SViacheslav Ovsiienko config->tx_skew = tmp; 14285644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 14297fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 143078a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 143178a54648SXueming Li config->l3_vxlan_en = !!tmp; 1432db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1433db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1434e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1435e2b4925eSOri Kam config->dv_esw_en = !!tmp; 143651e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 143751e72d38SOri Kam config->dv_flow_en = !!tmp; 14382d241515SViacheslav Ovsiienko } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { 14392d241515SViacheslav Ovsiienko if (tmp != MLX5_XMETA_MODE_LEGACY && 14402d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META16 && 14412d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META32) { 1442f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid extensive " 14432d241515SViacheslav Ovsiienko "metadata parameter"); 14442d241515SViacheslav Ovsiienko rte_errno = EINVAL; 14452d241515SViacheslav Ovsiienko return -rte_errno; 14462d241515SViacheslav Ovsiienko } 14472d241515SViacheslav Ovsiienko config->dv_xmeta_en = tmp; 14480f0ae73aSShiri Kuzin } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) { 14490f0ae73aSShiri Kuzin config->lacp_by_user = !!tmp; 1450dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1451dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 1452066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1453066cfecdSMatan Azrad config->max_dump_files_num = tmp; 145421bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 145521bb6c7eSDekel Peled config->lro.timeout = tmp; 1456d768f324SMatan Azrad } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) { 1457d768f324SMatan Azrad DRV_LOG(DEBUG, "class argument is %s.", val); 14581ad9a3d0SBing Zhao } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { 14591ad9a3d0SBing Zhao config->log_hp_size = tmp; 1460a1da6f62SSuanming Mou } else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) { 1461a1da6f62SSuanming Mou if (tmp != MLX5_RCM_NONE && 1462a1da6f62SSuanming Mou tmp != MLX5_RCM_LIGHT && 1463a1da6f62SSuanming Mou tmp != MLX5_RCM_AGGR) { 1464a1da6f62SSuanming Mou DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val); 1465a1da6f62SSuanming Mou rte_errno = EINVAL; 1466a1da6f62SSuanming Mou return -rte_errno; 1467a1da6f62SSuanming Mou } 1468a1da6f62SSuanming Mou config->reclaim_mode = tmp; 146999c12dccSNélio Laranjeiro } else { 1470a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 1471a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1472a6d83b6aSNélio Laranjeiro return -rte_errno; 1473e72dd09bSNélio Laranjeiro } 147499c12dccSNélio Laranjeiro return 0; 147599c12dccSNélio Laranjeiro } 1476e72dd09bSNélio Laranjeiro 1477e72dd09bSNélio Laranjeiro /** 1478e72dd09bSNélio Laranjeiro * Parse device parameters. 1479e72dd09bSNélio Laranjeiro * 14807fe24446SShahaf Shuler * @param config 14817fe24446SShahaf Shuler * Pointer to device configuration structure. 1482e72dd09bSNélio Laranjeiro * @param devargs 1483e72dd09bSNélio Laranjeiro * Device arguments structure. 1484e72dd09bSNélio Laranjeiro * 1485e72dd09bSNélio Laranjeiro * @return 1486a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1487e72dd09bSNélio Laranjeiro */ 14882eb4d010SOphir Munk int 14897fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1490e72dd09bSNélio Laranjeiro { 1491e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 149299c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 1493bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 149478c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 14957d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 14967d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 1497ecb16045SAlexander Kozyrev MLX5_RX_MPRQ_LOG_STRIDE_SIZE, 14987d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 14997d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 15002a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 1501505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MIN, 1502505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MAX, 1503505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MPW, 15042a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 150509d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 1506230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 15076ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 15086ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 15098409a285SViacheslav Ovsiienko MLX5_TX_DB_NC, 15108f848f32SViacheslav Ovsiienko MLX5_TX_PP, 15118f848f32SViacheslav Ovsiienko MLX5_TX_SKEW, 15125644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 15135644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 151478a54648SXueming Li MLX5_L3_VXLAN_EN, 1515db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 1516e2b4925eSOri Kam MLX5_DV_ESW_EN, 151751e72d38SOri Kam MLX5_DV_FLOW_EN, 15182d241515SViacheslav Ovsiienko MLX5_DV_XMETA_EN, 15190f0ae73aSShiri Kuzin MLX5_LACP_BY_USER, 1520dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 15216de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 1522066cfecdSMatan Azrad MLX5_MAX_DUMP_FILES_NUM, 152321bb6c7eSDekel Peled MLX5_LRO_TIMEOUT_USEC, 1524d768f324SMatan Azrad MLX5_CLASS_ARG_NAME, 15251ad9a3d0SBing Zhao MLX5_HP_BUF_SIZE, 1526a1da6f62SSuanming Mou MLX5_RECLAIM_MEM, 1527e72dd09bSNélio Laranjeiro NULL, 1528e72dd09bSNélio Laranjeiro }; 1529e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 1530e72dd09bSNélio Laranjeiro int ret = 0; 1531e72dd09bSNélio Laranjeiro int i; 1532e72dd09bSNélio Laranjeiro 1533e72dd09bSNélio Laranjeiro if (devargs == NULL) 1534e72dd09bSNélio Laranjeiro return 0; 1535e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 1536e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 153715b0ea00SMatan Azrad if (kvlist == NULL) { 153815b0ea00SMatan Azrad rte_errno = EINVAL; 153915b0ea00SMatan Azrad return -rte_errno; 154015b0ea00SMatan Azrad } 1541e72dd09bSNélio Laranjeiro /* Process parameters. */ 1542e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 1543e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 1544e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 15457fe24446SShahaf Shuler mlx5_args_check, config); 1546a6d83b6aSNélio Laranjeiro if (ret) { 1547a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1548a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 1549a6d83b6aSNélio Laranjeiro return -rte_errno; 1550e72dd09bSNélio Laranjeiro } 1551e72dd09bSNélio Laranjeiro } 1552a67323e4SShahaf Shuler } 1553e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 1554e72dd09bSNélio Laranjeiro return 0; 1555e72dd09bSNélio Laranjeiro } 1556e72dd09bSNélio Laranjeiro 15577be600c8SYongseok Koh /** 15587be600c8SYongseok Koh * PMD global initialization. 15597be600c8SYongseok Koh * 15607be600c8SYongseok Koh * Independent from individual device, this function initializes global 15617be600c8SYongseok Koh * per-PMD data structures distinguishing primary and secondary processes. 15627be600c8SYongseok Koh * Hence, each initialization is called once per a process. 15637be600c8SYongseok Koh * 15647be600c8SYongseok Koh * @return 15657be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 15667be600c8SYongseok Koh */ 15672eb4d010SOphir Munk int 15687be600c8SYongseok Koh mlx5_init_once(void) 15697be600c8SYongseok Koh { 15707be600c8SYongseok Koh struct mlx5_shared_data *sd; 15717be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 1572edf73dd3SAnatoly Burakov int ret = 0; 15737be600c8SYongseok Koh 15747be600c8SYongseok Koh if (mlx5_init_shared_data()) 15757be600c8SYongseok Koh return -rte_errno; 15767be600c8SYongseok Koh sd = mlx5_shared_data; 15778e46d4e1SAlexander Kozyrev MLX5_ASSERT(sd); 15787be600c8SYongseok Koh rte_spinlock_lock(&sd->lock); 15797be600c8SYongseok Koh switch (rte_eal_process_type()) { 15807be600c8SYongseok Koh case RTE_PROC_PRIMARY: 15817be600c8SYongseok Koh if (sd->init_done) 15827be600c8SYongseok Koh break; 15837be600c8SYongseok Koh LIST_INIT(&sd->mem_event_cb_list); 15847be600c8SYongseok Koh rte_rwlock_init(&sd->mem_event_rwlock); 15857be600c8SYongseok Koh rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 15867be600c8SYongseok Koh mlx5_mr_mem_event_cb, NULL); 1587a4de9586SVu Pham ret = mlx5_mp_init_primary(MLX5_MP_NAME, 1588a4de9586SVu Pham mlx5_mp_primary_handle); 1589edf73dd3SAnatoly Burakov if (ret) 1590edf73dd3SAnatoly Burakov goto out; 15917be600c8SYongseok Koh sd->init_done = true; 15927be600c8SYongseok Koh break; 15937be600c8SYongseok Koh case RTE_PROC_SECONDARY: 15947be600c8SYongseok Koh if (ld->init_done) 15957be600c8SYongseok Koh break; 1596a4de9586SVu Pham ret = mlx5_mp_init_secondary(MLX5_MP_NAME, 1597a4de9586SVu Pham mlx5_mp_secondary_handle); 1598edf73dd3SAnatoly Burakov if (ret) 1599edf73dd3SAnatoly Burakov goto out; 16007be600c8SYongseok Koh ++sd->secondary_cnt; 16017be600c8SYongseok Koh ld->init_done = true; 16027be600c8SYongseok Koh break; 16037be600c8SYongseok Koh default: 16047be600c8SYongseok Koh break; 16057be600c8SYongseok Koh } 1606edf73dd3SAnatoly Burakov out: 16077be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 1608edf73dd3SAnatoly Burakov return ret; 16097be600c8SYongseok Koh } 16107be600c8SYongseok Koh 16117be600c8SYongseok Koh /** 161238b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 161338b4b397SViacheslav Ovsiienko * while sending packets. 161438b4b397SViacheslav Ovsiienko * 161538b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 161638b4b397SViacheslav Ovsiienko * key is specified in devargs 161738b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 161838b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 1619ee76bddcSThomas Monjalon * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx 162038b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 162138b4b397SViacheslav Ovsiienko * 162238b4b397SViacheslav Ovsiienko * @param spawn 162338b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 162438b4b397SViacheslav Ovsiienko * @param config 162538b4b397SViacheslav Ovsiienko * Device configuration parameters. 162638b4b397SViacheslav Ovsiienko */ 16272eb4d010SOphir Munk void 162838b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 162938b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 163038b4b397SViacheslav Ovsiienko { 163138b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 163238b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 163338b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 163438b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 163538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 163638b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 163738b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 163838b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 163938b4b397SViacheslav Ovsiienko "txq_inline_mix aligned to minimal" 164038b4b397SViacheslav Ovsiienko " ConnectX-4 required value %d", 164138b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 164238b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 164338b4b397SViacheslav Ovsiienko } 164438b4b397SViacheslav Ovsiienko break; 164538b4b397SViacheslav Ovsiienko } 164638b4b397SViacheslav Ovsiienko goto exit; 164738b4b397SViacheslav Ovsiienko } 164838b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 164938b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 165038b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 165138b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 165238b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 165338b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 165438b4b397SViacheslav Ovsiienko goto exit; 165538b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 165638b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 165738b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 165838b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 165938b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 166038b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 166138b4b397SViacheslav Ovsiienko goto exit; 166238b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 166338b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 166438b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 166538b4b397SViacheslav Ovsiienko break; 166638b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 166738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 166838b4b397SViacheslav Ovsiienko config->txq_inline_min = 166938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 167038b4b397SViacheslav Ovsiienko goto exit; 167138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 167238b4b397SViacheslav Ovsiienko config->txq_inline_min = 167338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 167438b4b397SViacheslav Ovsiienko goto exit; 167538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 167638b4b397SViacheslav Ovsiienko config->txq_inline_min = 167738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 167838b4b397SViacheslav Ovsiienko goto exit; 167938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 168038b4b397SViacheslav Ovsiienko config->txq_inline_min = 168138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 168238b4b397SViacheslav Ovsiienko goto exit; 168338b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 168438b4b397SViacheslav Ovsiienko config->txq_inline_min = 168538b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 168638b4b397SViacheslav Ovsiienko goto exit; 168738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 168838b4b397SViacheslav Ovsiienko config->txq_inline_min = 168938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 169038b4b397SViacheslav Ovsiienko goto exit; 169138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 169238b4b397SViacheslav Ovsiienko config->txq_inline_min = 169338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 169438b4b397SViacheslav Ovsiienko goto exit; 169538b4b397SViacheslav Ovsiienko } 169638b4b397SViacheslav Ovsiienko } 169738b4b397SViacheslav Ovsiienko } 169838b4b397SViacheslav Ovsiienko /* 169938b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 170038b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 170138b4b397SViacheslav Ovsiienko * to determine old NICs. 170238b4b397SViacheslav Ovsiienko */ 170338b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 170438b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 170538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 170638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 170738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1708614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 170938b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 171038b4b397SViacheslav Ovsiienko break; 171138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 171238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 171338b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 171438b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 171538b4b397SViacheslav Ovsiienko /* 171638b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 171738b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 171838b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 171938b4b397SViacheslav Ovsiienko */ 172038b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 172120215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 172238b4b397SViacheslav Ovsiienko break; 172338b4b397SViacheslav Ovsiienko default: 172438b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 172538b4b397SViacheslav Ovsiienko break; 172638b4b397SViacheslav Ovsiienko } 172738b4b397SViacheslav Ovsiienko exit: 172838b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 172938b4b397SViacheslav Ovsiienko } 173038b4b397SViacheslav Ovsiienko 173138b4b397SViacheslav Ovsiienko /** 173239139371SViacheslav Ovsiienko * Configures the metadata mask fields in the shared context. 173339139371SViacheslav Ovsiienko * 173439139371SViacheslav Ovsiienko * @param [in] dev 173539139371SViacheslav Ovsiienko * Pointer to Ethernet device. 173639139371SViacheslav Ovsiienko */ 17372eb4d010SOphir Munk void 173839139371SViacheslav Ovsiienko mlx5_set_metadata_mask(struct rte_eth_dev *dev) 173939139371SViacheslav Ovsiienko { 174039139371SViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 17416e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 174239139371SViacheslav Ovsiienko uint32_t meta, mark, reg_c0; 174339139371SViacheslav Ovsiienko 174439139371SViacheslav Ovsiienko reg_c0 = ~priv->vport_meta_mask; 174539139371SViacheslav Ovsiienko switch (priv->config.dv_xmeta_en) { 174639139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_LEGACY: 174739139371SViacheslav Ovsiienko meta = UINT32_MAX; 174839139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 174939139371SViacheslav Ovsiienko break; 175039139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META16: 175139139371SViacheslav Ovsiienko meta = reg_c0 >> rte_bsf32(reg_c0); 175239139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 175339139371SViacheslav Ovsiienko break; 175439139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META32: 175539139371SViacheslav Ovsiienko meta = UINT32_MAX; 175639139371SViacheslav Ovsiienko mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK; 175739139371SViacheslav Ovsiienko break; 175839139371SViacheslav Ovsiienko default: 175939139371SViacheslav Ovsiienko meta = 0; 176039139371SViacheslav Ovsiienko mark = 0; 17618e46d4e1SAlexander Kozyrev MLX5_ASSERT(false); 176239139371SViacheslav Ovsiienko break; 176339139371SViacheslav Ovsiienko } 176439139371SViacheslav Ovsiienko if (sh->dv_mark_mask && sh->dv_mark_mask != mark) 176539139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X", 176639139371SViacheslav Ovsiienko sh->dv_mark_mask, mark); 176739139371SViacheslav Ovsiienko else 176839139371SViacheslav Ovsiienko sh->dv_mark_mask = mark; 176939139371SViacheslav Ovsiienko if (sh->dv_meta_mask && sh->dv_meta_mask != meta) 177039139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X", 177139139371SViacheslav Ovsiienko sh->dv_meta_mask, meta); 177239139371SViacheslav Ovsiienko else 177339139371SViacheslav Ovsiienko sh->dv_meta_mask = meta; 177439139371SViacheslav Ovsiienko if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) 177539139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X", 177639139371SViacheslav Ovsiienko sh->dv_meta_mask, reg_c0); 177739139371SViacheslav Ovsiienko else 177839139371SViacheslav Ovsiienko sh->dv_regc0_mask = reg_c0; 177939139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en); 178039139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask); 178139139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask); 178239139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask); 178339139371SViacheslav Ovsiienko } 178439139371SViacheslav Ovsiienko 1785efa79e68SOri Kam int 1786efa79e68SOri Kam rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) 1787efa79e68SOri Kam { 1788efa79e68SOri Kam static const char *const dynf_names[] = { 1789efa79e68SOri Kam RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, 17908f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_METADATA_NAME, 17918f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME 1792efa79e68SOri Kam }; 1793efa79e68SOri Kam unsigned int i; 1794efa79e68SOri Kam 1795efa79e68SOri Kam if (n < RTE_DIM(dynf_names)) 1796efa79e68SOri Kam return -ENOMEM; 1797efa79e68SOri Kam for (i = 0; i < RTE_DIM(dynf_names); i++) { 1798efa79e68SOri Kam if (names[i] == NULL) 1799efa79e68SOri Kam return -EINVAL; 1800efa79e68SOri Kam strcpy(names[i], dynf_names[i]); 1801efa79e68SOri Kam } 1802efa79e68SOri Kam return RTE_DIM(dynf_names); 1803efa79e68SOri Kam } 1804efa79e68SOri Kam 180521cae858SDekel Peled /** 18062eb4d010SOphir Munk * Comparison callback to sort device data. 180792d5dd48SViacheslav Ovsiienko * 18082eb4d010SOphir Munk * This is meant to be used with qsort(). 180992d5dd48SViacheslav Ovsiienko * 18102eb4d010SOphir Munk * @param a[in] 18112eb4d010SOphir Munk * Pointer to pointer to first data object. 18122eb4d010SOphir Munk * @param b[in] 18132eb4d010SOphir Munk * Pointer to pointer to second data object. 181492d5dd48SViacheslav Ovsiienko * 181592d5dd48SViacheslav Ovsiienko * @return 18162eb4d010SOphir Munk * 0 if both objects are equal, less than 0 if the first argument is less 18172eb4d010SOphir Munk * than the second, greater than 0 otherwise. 181892d5dd48SViacheslav Ovsiienko */ 18192eb4d010SOphir Munk int 182092d5dd48SViacheslav Ovsiienko mlx5_dev_check_sibling_config(struct mlx5_priv *priv, 182192d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *config) 182292d5dd48SViacheslav Ovsiienko { 18236e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 182492d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *sh_conf = NULL; 182592d5dd48SViacheslav Ovsiienko uint16_t port_id; 182692d5dd48SViacheslav Ovsiienko 18278e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 182892d5dd48SViacheslav Ovsiienko /* Nothing to compare for the single/first device. */ 182992d5dd48SViacheslav Ovsiienko if (sh->refcnt == 1) 183092d5dd48SViacheslav Ovsiienko return 0; 183192d5dd48SViacheslav Ovsiienko /* Find the device with shared context. */ 1832fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 183392d5dd48SViacheslav Ovsiienko struct mlx5_priv *opriv = 183492d5dd48SViacheslav Ovsiienko rte_eth_devices[port_id].data->dev_private; 183592d5dd48SViacheslav Ovsiienko 183692d5dd48SViacheslav Ovsiienko if (opriv && opriv != priv && opriv->sh == sh) { 183792d5dd48SViacheslav Ovsiienko sh_conf = &opriv->config; 183892d5dd48SViacheslav Ovsiienko break; 183992d5dd48SViacheslav Ovsiienko } 184092d5dd48SViacheslav Ovsiienko } 184192d5dd48SViacheslav Ovsiienko if (!sh_conf) 184292d5dd48SViacheslav Ovsiienko return 0; 184392d5dd48SViacheslav Ovsiienko if (sh_conf->dv_flow_en ^ config->dv_flow_en) { 184492d5dd48SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch" 184592d5dd48SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 184692d5dd48SViacheslav Ovsiienko rte_errno = EINVAL; 184792d5dd48SViacheslav Ovsiienko return rte_errno; 184892d5dd48SViacheslav Ovsiienko } 18492d241515SViacheslav Ovsiienko if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) { 18502d241515SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch" 18512d241515SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 18522d241515SViacheslav Ovsiienko rte_errno = EINVAL; 18532d241515SViacheslav Ovsiienko return rte_errno; 18542d241515SViacheslav Ovsiienko } 185592d5dd48SViacheslav Ovsiienko return 0; 185692d5dd48SViacheslav Ovsiienko } 1857771fa900SAdrien Mazarguil 1858fbc83412SViacheslav Ovsiienko /** 1859fbc83412SViacheslav Ovsiienko * Look for the ethernet device belonging to mlx5 driver. 1860fbc83412SViacheslav Ovsiienko * 1861fbc83412SViacheslav Ovsiienko * @param[in] port_id 1862fbc83412SViacheslav Ovsiienko * port_id to start looking for device. 1863fbc83412SViacheslav Ovsiienko * @param[in] pci_dev 1864fbc83412SViacheslav Ovsiienko * Pointer to the hint PCI device. When device is being probed 1865fbc83412SViacheslav Ovsiienko * the its siblings (master and preceding representors might 18662eb4d010SOphir Munk * not have assigned driver yet (because the mlx5_os_pci_probe() 1867fbc83412SViacheslav Ovsiienko * is not completed yet, for this case match on hint PCI 1868fbc83412SViacheslav Ovsiienko * device may be used to detect sibling device. 1869fbc83412SViacheslav Ovsiienko * 1870fbc83412SViacheslav Ovsiienko * @return 1871fbc83412SViacheslav Ovsiienko * port_id of found device, RTE_MAX_ETHPORT if not found. 1872fbc83412SViacheslav Ovsiienko */ 1873f7e95215SViacheslav Ovsiienko uint16_t 1874fbc83412SViacheslav Ovsiienko mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) 1875f7e95215SViacheslav Ovsiienko { 1876f7e95215SViacheslav Ovsiienko while (port_id < RTE_MAX_ETHPORTS) { 1877f7e95215SViacheslav Ovsiienko struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1878f7e95215SViacheslav Ovsiienko 1879f7e95215SViacheslav Ovsiienko if (dev->state != RTE_ETH_DEV_UNUSED && 1880f7e95215SViacheslav Ovsiienko dev->device && 1881fbc83412SViacheslav Ovsiienko (dev->device == &pci_dev->device || 1882fbc83412SViacheslav Ovsiienko (dev->device->driver && 1883f7e95215SViacheslav Ovsiienko dev->device->driver->name && 1884fbc83412SViacheslav Ovsiienko !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME)))) 1885f7e95215SViacheslav Ovsiienko break; 1886f7e95215SViacheslav Ovsiienko port_id++; 1887f7e95215SViacheslav Ovsiienko } 1888f7e95215SViacheslav Ovsiienko if (port_id >= RTE_MAX_ETHPORTS) 1889f7e95215SViacheslav Ovsiienko return RTE_MAX_ETHPORTS; 1890f7e95215SViacheslav Ovsiienko return port_id; 1891f7e95215SViacheslav Ovsiienko } 1892f7e95215SViacheslav Ovsiienko 18933a820742SOphir Munk /** 18943a820742SOphir Munk * DPDK callback to remove a PCI device. 18953a820742SOphir Munk * 18963a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 18973a820742SOphir Munk * 18983a820742SOphir Munk * @param[in] pci_dev 18993a820742SOphir Munk * Pointer to the PCI device. 19003a820742SOphir Munk * 19013a820742SOphir Munk * @return 19023a820742SOphir Munk * 0 on success, the function cannot fail. 19033a820742SOphir Munk */ 19043a820742SOphir Munk static int 19053a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 19063a820742SOphir Munk { 19073a820742SOphir Munk uint16_t port_id; 19083a820742SOphir Munk 19092786b7bfSSuanming Mou RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { 19102786b7bfSSuanming Mou /* 19112786b7bfSSuanming Mou * mlx5_dev_close() is not registered to secondary process, 19122786b7bfSSuanming Mou * call the close function explicitly for secondary process. 19132786b7bfSSuanming Mou */ 19142786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) 19152786b7bfSSuanming Mou mlx5_dev_close(&rte_eth_devices[port_id]); 19162786b7bfSSuanming Mou else 19173a820742SOphir Munk rte_eth_dev_close(port_id); 19182786b7bfSSuanming Mou } 19193a820742SOphir Munk return 0; 19203a820742SOphir Munk } 19213a820742SOphir Munk 1922771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 1923771fa900SAdrien Mazarguil { 19241d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19251d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 1926771fa900SAdrien Mazarguil }, 1927771fa900SAdrien Mazarguil { 19281d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19291d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 1930771fa900SAdrien Mazarguil }, 1931771fa900SAdrien Mazarguil { 19321d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19331d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 1934771fa900SAdrien Mazarguil }, 1935771fa900SAdrien Mazarguil { 19361d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19371d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 1938771fa900SAdrien Mazarguil }, 1939771fa900SAdrien Mazarguil { 1940528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1941528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 1942528a9fbeSYongseok Koh }, 1943528a9fbeSYongseok Koh { 1944528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1945528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 1946528a9fbeSYongseok Koh }, 1947528a9fbeSYongseok Koh { 1948528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1949528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 1950528a9fbeSYongseok Koh }, 1951528a9fbeSYongseok Koh { 1952528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1953528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 1954528a9fbeSYongseok Koh }, 1955528a9fbeSYongseok Koh { 1956dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1957dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 1958dd3331c6SShahaf Shuler }, 1959dd3331c6SShahaf Shuler { 1960c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1961c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 1962c322c0e5SOri Kam }, 1963c322c0e5SOri Kam { 1964f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1965f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 1966f0354d84SWisam Jaddo }, 1967f0354d84SWisam Jaddo { 1968f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1969f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 1970f0354d84SWisam Jaddo }, 1971f0354d84SWisam Jaddo { 19725fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19735fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DX) 19745fc66630SRaslan Darawsheh }, 19755fc66630SRaslan Darawsheh { 19765fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19775fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF) 19785fc66630SRaslan Darawsheh }, 19795fc66630SRaslan Darawsheh { 198058b4a2b1SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 198158b4a2b1SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 198258b4a2b1SRaslan Darawsheh }, 198358b4a2b1SRaslan Darawsheh { 198428c9a7d7SAli Alnubani RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 198528c9a7d7SAli Alnubani PCI_DEVICE_ID_MELLANOX_CONNECTX6LX) 198628c9a7d7SAli Alnubani }, 198728c9a7d7SAli Alnubani { 1988771fa900SAdrien Mazarguil .vendor_id = 0 1989771fa900SAdrien Mazarguil } 1990771fa900SAdrien Mazarguil }; 1991771fa900SAdrien Mazarguil 19922eb4d010SOphir Munk struct rte_pci_driver mlx5_driver = { 19932f3193cfSJan Viktorin .driver = { 19942f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 19952f3193cfSJan Viktorin }, 1996771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 19972eb4d010SOphir Munk .probe = mlx5_os_pci_probe, 19983a820742SOphir Munk .remove = mlx5_pci_remove, 1999989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 2000989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 200110f3581dSOphir Munk .drv_flags = PCI_DRV_FLAGS, 2002771fa900SAdrien Mazarguil }; 2003771fa900SAdrien Mazarguil 20049c99878aSJerin Jacob /* Initialize driver log type. */ 20059c99878aSJerin Jacob RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE) 20069c99878aSJerin Jacob 2007771fa900SAdrien Mazarguil /** 2008771fa900SAdrien Mazarguil * Driver initialization routine. 2009771fa900SAdrien Mazarguil */ 2010f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 2011771fa900SAdrien Mazarguil { 20125f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 2013ea16068cSYongseok Koh mlx5_set_ptype_table(); 20145f8ba81cSXueming Li mlx5_set_cksum_table(); 20155f8ba81cSXueming Li mlx5_set_swp_types_table(); 20167b4f1e6bSMatan Azrad if (mlx5_glue) 20173dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 2018771fa900SAdrien Mazarguil } 2019771fa900SAdrien Mazarguil 202001f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 202101f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 20220880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 2023