18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <stdint.h> 10771fa900SAdrien Mazarguil #include <stdlib.h> 11e72dd09bSNélio Laranjeiro #include <errno.h> 12771fa900SAdrien Mazarguil #include <net/if.h> 134a984153SXueming Li #include <sys/mman.h> 14ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 15771fa900SAdrien Mazarguil 16771fa900SAdrien Mazarguil /* Verbs header. */ 17771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 18771fa900SAdrien Mazarguil #ifdef PEDANTIC 19fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 20771fa900SAdrien Mazarguil #endif 21771fa900SAdrien Mazarguil #include <infiniband/verbs.h> 22771fa900SAdrien Mazarguil #ifdef PEDANTIC 23fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 24771fa900SAdrien Mazarguil #endif 25771fa900SAdrien Mazarguil 26771fa900SAdrien Mazarguil #include <rte_malloc.h> 27ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 28fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 29771fa900SAdrien Mazarguil #include <rte_pci.h> 30c752998bSGaetan Rivet #include <rte_bus_pci.h> 31771fa900SAdrien Mazarguil #include <rte_common.h> 32e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 33e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 34e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 35f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 36f15db67dSMatan Azrad #include <rte_alarm.h> 37771fa900SAdrien Mazarguil 387b4f1e6bSMatan Azrad #include <mlx5_glue.h> 397b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h> 4093e30982SMatan Azrad #include <mlx5_common.h> 41391b8bccSOphir Munk #include <mlx5_common_os.h> 42a4de9586SVu Pham #include <mlx5_common_mp.h> 4383c2047cSSuanming Mou #include <mlx5_malloc.h> 447b4f1e6bSMatan Azrad 457b4f1e6bSMatan Azrad #include "mlx5_defs.h" 46771fa900SAdrien Mazarguil #include "mlx5.h" 47771fa900SAdrien Mazarguil #include "mlx5_utils.h" 482e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 49771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 50974f1e7eSYongseok Koh #include "mlx5_mr.h" 5184c406e7SOri Kam #include "mlx5_flow.h" 52efa79e68SOri Kam #include "rte_pmd_mlx5.h" 53771fa900SAdrien Mazarguil 5499c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5599c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5699c12dccSNélio Laranjeiro 57bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 58bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 59bc91e8dbSYongseok Koh 6078c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 6178c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 6278c7a16dSYongseok Koh 637d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 647d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 657d6bf6b8SYongseok Koh 667d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 677d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 687d6bf6b8SYongseok Koh 69ecb16045SAlexander Kozyrev /* Device parameter to configure log 2 of the stride size for MPRQ. */ 70ecb16045SAlexander Kozyrev #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size" 71ecb16045SAlexander Kozyrev 727d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 737d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 747d6bf6b8SYongseok Koh 757d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 767d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 777d6bf6b8SYongseok Koh 78a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 792a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 802a66cf37SYaacov Hazan 81505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 82505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 83505f1fe4SViacheslav Ovsiienko 84505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 85505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 86505f1fe4SViacheslav Ovsiienko 87505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 88505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 89505f1fe4SViacheslav Ovsiienko 902a66cf37SYaacov Hazan /* 912a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 922a66cf37SYaacov Hazan * enabling inline send. 932a66cf37SYaacov Hazan */ 942a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 952a66cf37SYaacov Hazan 9609d8b416SYongseok Koh /* 9709d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 98a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9909d8b416SYongseok Koh */ 10009d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 10109d8b416SYongseok Koh 102230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 103230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 104230189d9SNélio Laranjeiro 105a6bd4911SViacheslav Ovsiienko /* 1068409a285SViacheslav Ovsiienko * Device parameter to force doorbell register mapping 1078409a285SViacheslav Ovsiienko * to non-cahed region eliminating the extra write memory barrier. 1088409a285SViacheslav Ovsiienko */ 1098409a285SViacheslav Ovsiienko #define MLX5_TX_DB_NC "tx_db_nc" 1108409a285SViacheslav Ovsiienko 1118409a285SViacheslav Ovsiienko /* 112a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 113a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 114a6bd4911SViacheslav Ovsiienko */ 1156ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1166ce84bd8SYongseok Koh 117a6bd4911SViacheslav Ovsiienko /* 118a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 119a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 120a6bd4911SViacheslav Ovsiienko */ 1216ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1226ce84bd8SYongseok Koh 123a6bd4911SViacheslav Ovsiienko /* 1248f848f32SViacheslav Ovsiienko * Device parameter to enable Tx scheduling on timestamps 1258f848f32SViacheslav Ovsiienko * and specify the packet pacing granularity in nanoseconds. 1268f848f32SViacheslav Ovsiienko */ 1278f848f32SViacheslav Ovsiienko #define MLX5_TX_PP "tx_pp" 1288f848f32SViacheslav Ovsiienko 1298f848f32SViacheslav Ovsiienko /* 1308f848f32SViacheslav Ovsiienko * Device parameter to specify skew in nanoseconds on Tx datapath, 1318f848f32SViacheslav Ovsiienko * it represents the time between SQ start WQE processing and 1328f848f32SViacheslav Ovsiienko * appearing actual packet data on the wire. 1338f848f32SViacheslav Ovsiienko */ 1348f848f32SViacheslav Ovsiienko #define MLX5_TX_SKEW "tx_skew" 1358f848f32SViacheslav Ovsiienko 1368f848f32SViacheslav Ovsiienko /* 137a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 138a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 139a6bd4911SViacheslav Ovsiienko */ 1405644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1415644d5b9SNelio Laranjeiro 1425644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1435644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1445644d5b9SNelio Laranjeiro 14578a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 14678a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 14778a54648SXueming Li 148e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 149e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 150e2b4925eSOri Kam 15151e72d38SOri Kam /* Activate DV flow steering. */ 15251e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 15351e72d38SOri Kam 1542d241515SViacheslav Ovsiienko /* Enable extensive flow metadata support. */ 1552d241515SViacheslav Ovsiienko #define MLX5_DV_XMETA_EN "dv_xmeta_en" 1562d241515SViacheslav Ovsiienko 1570f0ae73aSShiri Kuzin /* Device parameter to let the user manage the lacp traffic of bonded device */ 1580f0ae73aSShiri Kuzin #define MLX5_LACP_BY_USER "lacp_by_user" 1590f0ae73aSShiri Kuzin 160db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 161db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 162db209cc3SNélio Laranjeiro 163dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 164dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 165dceb5029SYongseok Koh 1666de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1676de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1686de569f5SAdrien Mazarguil 169066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 170066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 171066cfecdSMatan Azrad 17221bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 17321bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 17421bb6c7eSDekel Peled 1751ad9a3d0SBing Zhao /* 1761ad9a3d0SBing Zhao * Device parameter to configure the total data buffer size for a single 1771ad9a3d0SBing Zhao * hairpin queue (logarithm value). 1781ad9a3d0SBing Zhao */ 1791ad9a3d0SBing Zhao #define MLX5_HP_BUF_SIZE "hp_buf_log_sz" 1801ad9a3d0SBing Zhao 181a1da6f62SSuanming Mou /* Flow memory reclaim mode. */ 182a1da6f62SSuanming Mou #define MLX5_RECLAIM_MEM "reclaim_mem_mode" 183a1da6f62SSuanming Mou 1845522da6bSSuanming Mou /* The default memory allocator used in PMD. */ 1855522da6bSSuanming Mou #define MLX5_SYS_MEM_EN "sys_mem_en" 186*50f95b23SSuanming Mou /* Decap will be used or not. */ 187*50f95b23SSuanming Mou #define MLX5_DECAP_EN "decap_en" 1885522da6bSSuanming Mou 189974f1e7eSYongseok Koh static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 190974f1e7eSYongseok Koh 191974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 192974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 193974f1e7eSYongseok Koh 194974f1e7eSYongseok Koh /* Spinlock for mlx5_shared_data allocation. */ 195974f1e7eSYongseok Koh static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 196974f1e7eSYongseok Koh 1977be600c8SYongseok Koh /* Process local data for secondary processes. */ 1987be600c8SYongseok Koh static struct mlx5_local_data mlx5_local_data; 199a170a30dSNélio Laranjeiro 20091389890SOphir Munk static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = 20191389890SOphir Munk LIST_HEAD_INITIALIZER(); 20291389890SOphir Munk static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER; 20317e19bc4SViacheslav Ovsiienko 2045c761238SGregory Etelson static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { 205b88341caSSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 206014d1cbeSSuanming Mou { 207014d1cbeSSuanming Mou .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), 208014d1cbeSSuanming Mou .trunk_size = 64, 209014d1cbeSSuanming Mou .grow_trunk = 3, 210014d1cbeSSuanming Mou .grow_shift = 2, 211014d1cbeSSuanming Mou .need_lock = 0, 212014d1cbeSSuanming Mou .release_mem_en = 1, 21383c2047cSSuanming Mou .malloc = mlx5_malloc, 21483c2047cSSuanming Mou .free = mlx5_free, 215014d1cbeSSuanming Mou .type = "mlx5_encap_decap_ipool", 216014d1cbeSSuanming Mou }, 2178acf8ac9SSuanming Mou { 2188acf8ac9SSuanming Mou .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), 2198acf8ac9SSuanming Mou .trunk_size = 64, 2208acf8ac9SSuanming Mou .grow_trunk = 3, 2218acf8ac9SSuanming Mou .grow_shift = 2, 2228acf8ac9SSuanming Mou .need_lock = 0, 2238acf8ac9SSuanming Mou .release_mem_en = 1, 22483c2047cSSuanming Mou .malloc = mlx5_malloc, 22583c2047cSSuanming Mou .free = mlx5_free, 2268acf8ac9SSuanming Mou .type = "mlx5_push_vlan_ipool", 2278acf8ac9SSuanming Mou }, 2285f114269SSuanming Mou { 2295f114269SSuanming Mou .size = sizeof(struct mlx5_flow_dv_tag_resource), 2305f114269SSuanming Mou .trunk_size = 64, 2315f114269SSuanming Mou .grow_trunk = 3, 2325f114269SSuanming Mou .grow_shift = 2, 2335f114269SSuanming Mou .need_lock = 0, 2345f114269SSuanming Mou .release_mem_en = 1, 23583c2047cSSuanming Mou .malloc = mlx5_malloc, 23683c2047cSSuanming Mou .free = mlx5_free, 2375f114269SSuanming Mou .type = "mlx5_tag_ipool", 2385f114269SSuanming Mou }, 239f3faf9eaSSuanming Mou { 240f3faf9eaSSuanming Mou .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), 241f3faf9eaSSuanming Mou .trunk_size = 64, 242f3faf9eaSSuanming Mou .grow_trunk = 3, 243f3faf9eaSSuanming Mou .grow_shift = 2, 244f3faf9eaSSuanming Mou .need_lock = 0, 245f3faf9eaSSuanming Mou .release_mem_en = 1, 24683c2047cSSuanming Mou .malloc = mlx5_malloc, 24783c2047cSSuanming Mou .free = mlx5_free, 248f3faf9eaSSuanming Mou .type = "mlx5_port_id_ipool", 249f3faf9eaSSuanming Mou }, 2507ac99475SSuanming Mou { 2517ac99475SSuanming Mou .size = sizeof(struct mlx5_flow_tbl_data_entry), 2527ac99475SSuanming Mou .trunk_size = 64, 2537ac99475SSuanming Mou .grow_trunk = 3, 2547ac99475SSuanming Mou .grow_shift = 2, 2557ac99475SSuanming Mou .need_lock = 0, 2567ac99475SSuanming Mou .release_mem_en = 1, 25783c2047cSSuanming Mou .malloc = mlx5_malloc, 25883c2047cSSuanming Mou .free = mlx5_free, 2597ac99475SSuanming Mou .type = "mlx5_jump_ipool", 2607ac99475SSuanming Mou }, 261b88341caSSuanming Mou #endif 262772dc0ebSSuanming Mou { 2638638e2b0SSuanming Mou .size = sizeof(struct mlx5_flow_meter), 2648638e2b0SSuanming Mou .trunk_size = 64, 2658638e2b0SSuanming Mou .grow_trunk = 3, 2668638e2b0SSuanming Mou .grow_shift = 2, 2678638e2b0SSuanming Mou .need_lock = 0, 2688638e2b0SSuanming Mou .release_mem_en = 1, 26983c2047cSSuanming Mou .malloc = mlx5_malloc, 27083c2047cSSuanming Mou .free = mlx5_free, 2718638e2b0SSuanming Mou .type = "mlx5_meter_ipool", 2728638e2b0SSuanming Mou }, 2738638e2b0SSuanming Mou { 27490e6053aSSuanming Mou .size = sizeof(struct mlx5_flow_mreg_copy_resource), 27590e6053aSSuanming Mou .trunk_size = 64, 27690e6053aSSuanming Mou .grow_trunk = 3, 27790e6053aSSuanming Mou .grow_shift = 2, 27890e6053aSSuanming Mou .need_lock = 0, 27990e6053aSSuanming Mou .release_mem_en = 1, 28083c2047cSSuanming Mou .malloc = mlx5_malloc, 28183c2047cSSuanming Mou .free = mlx5_free, 28290e6053aSSuanming Mou .type = "mlx5_mcp_ipool", 28390e6053aSSuanming Mou }, 28490e6053aSSuanming Mou { 285772dc0ebSSuanming Mou .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), 286772dc0ebSSuanming Mou .trunk_size = 64, 287772dc0ebSSuanming Mou .grow_trunk = 3, 288772dc0ebSSuanming Mou .grow_shift = 2, 289772dc0ebSSuanming Mou .need_lock = 0, 290772dc0ebSSuanming Mou .release_mem_en = 1, 29183c2047cSSuanming Mou .malloc = mlx5_malloc, 29283c2047cSSuanming Mou .free = mlx5_free, 293772dc0ebSSuanming Mou .type = "mlx5_hrxq_ipool", 294772dc0ebSSuanming Mou }, 295b88341caSSuanming Mou { 2965c761238SGregory Etelson /* 2975c761238SGregory Etelson * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. 2985c761238SGregory Etelson * It set in run time according to PCI function configuration. 2995c761238SGregory Etelson */ 3005c761238SGregory Etelson .size = 0, 301b88341caSSuanming Mou .trunk_size = 64, 302b88341caSSuanming Mou .grow_trunk = 3, 303b88341caSSuanming Mou .grow_shift = 2, 304b88341caSSuanming Mou .need_lock = 0, 305b88341caSSuanming Mou .release_mem_en = 1, 30683c2047cSSuanming Mou .malloc = mlx5_malloc, 30783c2047cSSuanming Mou .free = mlx5_free, 308b88341caSSuanming Mou .type = "mlx5_flow_handle_ipool", 309b88341caSSuanming Mou }, 310ab612adcSSuanming Mou { 311ab612adcSSuanming Mou .size = sizeof(struct rte_flow), 312ab612adcSSuanming Mou .trunk_size = 4096, 313ab612adcSSuanming Mou .need_lock = 1, 314ab612adcSSuanming Mou .release_mem_en = 1, 31583c2047cSSuanming Mou .malloc = mlx5_malloc, 31683c2047cSSuanming Mou .free = mlx5_free, 317ab612adcSSuanming Mou .type = "rte_flow_ipool", 318ab612adcSSuanming Mou }, 319014d1cbeSSuanming Mou }; 320014d1cbeSSuanming Mou 321014d1cbeSSuanming Mou 322830d2091SOri Kam #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 323830d2091SOri Kam #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 324830d2091SOri Kam 325860897d2SBing Zhao #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 326860897d2SBing Zhao 327830d2091SOri Kam /** 328830d2091SOri Kam * Allocate ID pool structure. 329830d2091SOri Kam * 33030a3687dSSuanming Mou * @param[in] max_id 33130a3687dSSuanming Mou * The maximum id can be allocated from the pool. 33230a3687dSSuanming Mou * 333830d2091SOri Kam * @return 334830d2091SOri Kam * Pointer to pool object, NULL value otherwise. 335830d2091SOri Kam */ 336830d2091SOri Kam struct mlx5_flow_id_pool * 33730a3687dSSuanming Mou mlx5_flow_id_pool_alloc(uint32_t max_id) 338830d2091SOri Kam { 339830d2091SOri Kam struct mlx5_flow_id_pool *pool; 340830d2091SOri Kam void *mem; 341830d2091SOri Kam 34283c2047cSSuanming Mou pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 34383c2047cSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 344830d2091SOri Kam if (!pool) { 345830d2091SOri Kam DRV_LOG(ERR, "can't allocate id pool"); 346830d2091SOri Kam rte_errno = ENOMEM; 347830d2091SOri Kam return NULL; 348830d2091SOri Kam } 34983c2047cSSuanming Mou mem = mlx5_malloc(MLX5_MEM_ZERO, 35083c2047cSSuanming Mou MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), 35183c2047cSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 352830d2091SOri Kam if (!mem) { 353830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 354830d2091SOri Kam rte_errno = ENOMEM; 355830d2091SOri Kam goto error; 356830d2091SOri Kam } 357830d2091SOri Kam pool->free_arr = mem; 358830d2091SOri Kam pool->curr = pool->free_arr; 359830d2091SOri Kam pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; 360830d2091SOri Kam pool->base_index = 0; 36130a3687dSSuanming Mou pool->max_id = max_id; 362830d2091SOri Kam return pool; 363830d2091SOri Kam error: 36483c2047cSSuanming Mou mlx5_free(pool); 365830d2091SOri Kam return NULL; 366830d2091SOri Kam } 367830d2091SOri Kam 368830d2091SOri Kam /** 369830d2091SOri Kam * Release ID pool structure. 370830d2091SOri Kam * 371830d2091SOri Kam * @param[in] pool 372830d2091SOri Kam * Pointer to flow id pool object to free. 373830d2091SOri Kam */ 374830d2091SOri Kam void 375830d2091SOri Kam mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) 376830d2091SOri Kam { 37783c2047cSSuanming Mou mlx5_free(pool->free_arr); 37883c2047cSSuanming Mou mlx5_free(pool); 379830d2091SOri Kam } 380830d2091SOri Kam 381830d2091SOri Kam /** 382830d2091SOri Kam * Generate ID. 383830d2091SOri Kam * 384830d2091SOri Kam * @param[in] pool 385830d2091SOri Kam * Pointer to flow id pool. 386830d2091SOri Kam * @param[out] id 387830d2091SOri Kam * The generated ID. 388830d2091SOri Kam * 389830d2091SOri Kam * @return 390830d2091SOri Kam * 0 on success, error value otherwise. 391830d2091SOri Kam */ 392830d2091SOri Kam uint32_t 393830d2091SOri Kam mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) 394830d2091SOri Kam { 395830d2091SOri Kam if (pool->curr == pool->free_arr) { 39630a3687dSSuanming Mou if (pool->base_index == pool->max_id) { 397830d2091SOri Kam rte_errno = ENOMEM; 398830d2091SOri Kam DRV_LOG(ERR, "no free id"); 399830d2091SOri Kam return -rte_errno; 400830d2091SOri Kam } 401830d2091SOri Kam *id = ++pool->base_index; 402830d2091SOri Kam return 0; 403830d2091SOri Kam } 404830d2091SOri Kam *id = *(--pool->curr); 405830d2091SOri Kam return 0; 406830d2091SOri Kam } 407830d2091SOri Kam 408830d2091SOri Kam /** 409830d2091SOri Kam * Release ID. 410830d2091SOri Kam * 411830d2091SOri Kam * @param[in] pool 412830d2091SOri Kam * Pointer to flow id pool. 413830d2091SOri Kam * @param[out] id 414830d2091SOri Kam * The generated ID. 415830d2091SOri Kam * 416830d2091SOri Kam * @return 417830d2091SOri Kam * 0 on success, error value otherwise. 418830d2091SOri Kam */ 419830d2091SOri Kam uint32_t 420830d2091SOri Kam mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) 421830d2091SOri Kam { 422830d2091SOri Kam uint32_t size; 423830d2091SOri Kam uint32_t size2; 424830d2091SOri Kam void *mem; 425830d2091SOri Kam 426830d2091SOri Kam if (pool->curr == pool->last) { 427830d2091SOri Kam size = pool->curr - pool->free_arr; 428830d2091SOri Kam size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; 4298e46d4e1SAlexander Kozyrev MLX5_ASSERT(size2 > size); 43083c2047cSSuanming Mou mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0, 43183c2047cSSuanming Mou SOCKET_ID_ANY); 432830d2091SOri Kam if (!mem) { 433830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 434830d2091SOri Kam rte_errno = ENOMEM; 435830d2091SOri Kam return -rte_errno; 436830d2091SOri Kam } 437830d2091SOri Kam memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); 43883c2047cSSuanming Mou mlx5_free(pool->free_arr); 439830d2091SOri Kam pool->free_arr = mem; 440830d2091SOri Kam pool->curr = pool->free_arr + size; 441830d2091SOri Kam pool->last = pool->free_arr + size2; 442830d2091SOri Kam } 443830d2091SOri Kam *pool->curr = id; 444830d2091SOri Kam pool->curr++; 445830d2091SOri Kam return 0; 446830d2091SOri Kam } 447830d2091SOri Kam 44817e19bc4SViacheslav Ovsiienko /** 449fa2d01c8SDong Zhou * Initialize the shared aging list information per port. 450fa2d01c8SDong Zhou * 451fa2d01c8SDong Zhou * @param[in] sh 4526e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 453fa2d01c8SDong Zhou */ 454fa2d01c8SDong Zhou static void 4556e88bc42SOphir Munk mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) 456fa2d01c8SDong Zhou { 457fa2d01c8SDong Zhou uint32_t i; 458fa2d01c8SDong Zhou struct mlx5_age_info *age_info; 459fa2d01c8SDong Zhou 460fa2d01c8SDong Zhou for (i = 0; i < sh->max_port; i++) { 461fa2d01c8SDong Zhou age_info = &sh->port[i].age_info; 462fa2d01c8SDong Zhou age_info->flags = 0; 463fa2d01c8SDong Zhou TAILQ_INIT(&age_info->aged_counters); 464fa2d01c8SDong Zhou rte_spinlock_init(&age_info->aged_sl); 465fa2d01c8SDong Zhou MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 466fa2d01c8SDong Zhou } 467fa2d01c8SDong Zhou } 468fa2d01c8SDong Zhou 469fa2d01c8SDong Zhou /** 4705382d28cSMatan Azrad * Initialize the counters management structure. 4715382d28cSMatan Azrad * 4725382d28cSMatan Azrad * @param[in] sh 4736e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 4745382d28cSMatan Azrad */ 4755382d28cSMatan Azrad static void 4766e88bc42SOphir Munk mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) 4775382d28cSMatan Azrad { 4785af61440SMatan Azrad int i; 4795382d28cSMatan Azrad 4805af61440SMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 4815382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 4825af61440SMatan Azrad for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { 483b1cc2266SSuanming Mou sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET; 484b1cc2266SSuanming Mou sh->cmng.ccont[i].max_id = -1; 485b1cc2266SSuanming Mou sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID; 4865af61440SMatan Azrad TAILQ_INIT(&sh->cmng.ccont[i].pool_list); 4875af61440SMatan Azrad rte_spinlock_init(&sh->cmng.ccont[i].resize_sl); 488ac79183dSSuanming Mou TAILQ_INIT(&sh->cmng.ccont[i].counters); 489ac79183dSSuanming Mou rte_spinlock_init(&sh->cmng.ccont[i].csl); 490fa2d01c8SDong Zhou } 4915382d28cSMatan Azrad } 4925382d28cSMatan Azrad 4935382d28cSMatan Azrad /** 4945382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 4955382d28cSMatan Azrad * 4965382d28cSMatan Azrad * @param[in] mng 4975382d28cSMatan Azrad * Pointer to the memory management structure. 4985382d28cSMatan Azrad */ 4995382d28cSMatan Azrad static void 5005382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 5015382d28cSMatan Azrad { 5025382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 5035382d28cSMatan Azrad 5045382d28cSMatan Azrad LIST_REMOVE(mng, next); 5055382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 5065382d28cSMatan Azrad claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); 50783c2047cSSuanming Mou mlx5_free(mem); 5085382d28cSMatan Azrad } 5095382d28cSMatan Azrad 5105382d28cSMatan Azrad /** 5115382d28cSMatan Azrad * Close and release all the resources of the counters management. 5125382d28cSMatan Azrad * 5135382d28cSMatan Azrad * @param[in] sh 5146e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free. 5155382d28cSMatan Azrad */ 5165382d28cSMatan Azrad static void 5176e88bc42SOphir Munk mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) 5185382d28cSMatan Azrad { 5195382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 5205af61440SMatan Azrad int i; 5215382d28cSMatan Azrad int j; 522f15db67dSMatan Azrad int retries = 1024; 5235382d28cSMatan Azrad 524f15db67dSMatan Azrad rte_errno = 0; 525f15db67dSMatan Azrad while (--retries) { 526f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 527f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 528f15db67dSMatan Azrad break; 529f15db67dSMatan Azrad rte_pause(); 530f15db67dSMatan Azrad } 5315af61440SMatan Azrad for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { 5325382d28cSMatan Azrad struct mlx5_flow_counter_pool *pool; 5335af61440SMatan Azrad uint32_t batch = !!(i > 1); 5345382d28cSMatan Azrad 5355af61440SMatan Azrad if (!sh->cmng.ccont[i].pools) 5365382d28cSMatan Azrad continue; 5375af61440SMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 5385382d28cSMatan Azrad while (pool) { 5395af61440SMatan Azrad if (batch && pool->min_dcs) 5405af61440SMatan Azrad claim_zero(mlx5_devx_cmd_destroy 541fa2d01c8SDong Zhou (pool->min_dcs)); 5425382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 5438d93c830SDong Zhou if (MLX5_POOL_GET_CNT(pool, j)->action) 5445382d28cSMatan Azrad claim_zero 5455382d28cSMatan Azrad (mlx5_glue->destroy_flow_action 546fa2d01c8SDong Zhou (MLX5_POOL_GET_CNT 547fa2d01c8SDong Zhou (pool, j)->action)); 548826b8a87SSuanming Mou if (!batch && MLX5_GET_POOL_CNT_EXT 549826b8a87SSuanming Mou (pool, j)->dcs) 5505382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 551826b8a87SSuanming Mou (MLX5_GET_POOL_CNT_EXT 552826b8a87SSuanming Mou (pool, j)->dcs)); 5535382d28cSMatan Azrad } 5545af61440SMatan Azrad TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next); 55583c2047cSSuanming Mou mlx5_free(pool); 5565af61440SMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 5575382d28cSMatan Azrad } 55883c2047cSSuanming Mou mlx5_free(sh->cmng.ccont[i].pools); 5595382d28cSMatan Azrad } 5605382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 5615382d28cSMatan Azrad while (mng) { 5625382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 5635382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 5645382d28cSMatan Azrad } 5655382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 5665382d28cSMatan Azrad } 5675382d28cSMatan Azrad 5685382d28cSMatan Azrad /** 569014d1cbeSSuanming Mou * Initialize the flow resources' indexed mempool. 570014d1cbeSSuanming Mou * 571014d1cbeSSuanming Mou * @param[in] sh 5726e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 573b88341caSSuanming Mou * @param[in] sh 574b88341caSSuanming Mou * Pointer to user dev config. 575014d1cbeSSuanming Mou */ 576014d1cbeSSuanming Mou static void 5776e88bc42SOphir Munk mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh, 5785c761238SGregory Etelson const struct mlx5_dev_config *config) 579014d1cbeSSuanming Mou { 580014d1cbeSSuanming Mou uint8_t i; 5815c761238SGregory Etelson struct mlx5_indexed_pool_config cfg; 582014d1cbeSSuanming Mou 583a1da6f62SSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) { 5845c761238SGregory Etelson cfg = mlx5_ipool_cfg[i]; 5855c761238SGregory Etelson switch (i) { 5865c761238SGregory Etelson default: 5875c761238SGregory Etelson break; 5885c761238SGregory Etelson /* 5895c761238SGregory Etelson * Set MLX5_IPOOL_MLX5_FLOW ipool size 5905c761238SGregory Etelson * according to PCI function flow configuration. 5915c761238SGregory Etelson */ 5925c761238SGregory Etelson case MLX5_IPOOL_MLX5_FLOW: 5935c761238SGregory Etelson cfg.size = config->dv_flow_en ? 5945c761238SGregory Etelson sizeof(struct mlx5_flow_handle) : 5955c761238SGregory Etelson MLX5_FLOW_HANDLE_VERBS_SIZE; 5965c761238SGregory Etelson break; 5975c761238SGregory Etelson } 598a1da6f62SSuanming Mou if (config->reclaim_mode) 5995c761238SGregory Etelson cfg.release_mem_en = 1; 6005c761238SGregory Etelson sh->ipool[i] = mlx5_ipool_create(&cfg); 601014d1cbeSSuanming Mou } 602a1da6f62SSuanming Mou } 603014d1cbeSSuanming Mou 604014d1cbeSSuanming Mou /** 605014d1cbeSSuanming Mou * Release the flow resources' indexed mempool. 606014d1cbeSSuanming Mou * 607014d1cbeSSuanming Mou * @param[in] sh 6086e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 609014d1cbeSSuanming Mou */ 610014d1cbeSSuanming Mou static void 6116e88bc42SOphir Munk mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh) 612014d1cbeSSuanming Mou { 613014d1cbeSSuanming Mou uint8_t i; 614014d1cbeSSuanming Mou 615014d1cbeSSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) 616014d1cbeSSuanming Mou mlx5_ipool_destroy(sh->ipool[i]); 617014d1cbeSSuanming Mou } 618014d1cbeSSuanming Mou 619daa38a89SBing Zhao /* 620daa38a89SBing Zhao * Check if dynamic flex parser for eCPRI already exists. 621daa38a89SBing Zhao * 622daa38a89SBing Zhao * @param dev 623daa38a89SBing Zhao * Pointer to Ethernet device structure. 624daa38a89SBing Zhao * 625daa38a89SBing Zhao * @return 626daa38a89SBing Zhao * true on exists, false on not. 627daa38a89SBing Zhao */ 628daa38a89SBing Zhao bool 629daa38a89SBing Zhao mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev) 630daa38a89SBing Zhao { 631daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 632daa38a89SBing Zhao struct mlx5_flex_parser_profiles *prf = 633daa38a89SBing Zhao &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 634daa38a89SBing Zhao 635daa38a89SBing Zhao return !!prf->obj; 636daa38a89SBing Zhao } 637daa38a89SBing Zhao 638daa38a89SBing Zhao /* 639daa38a89SBing Zhao * Allocation of a flex parser for eCPRI. Once created, this parser related 640daa38a89SBing Zhao * resources will be held until the device is closed. 641daa38a89SBing Zhao * 642daa38a89SBing Zhao * @param dev 643daa38a89SBing Zhao * Pointer to Ethernet device structure. 644daa38a89SBing Zhao * 645daa38a89SBing Zhao * @return 646daa38a89SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 647daa38a89SBing Zhao */ 648daa38a89SBing Zhao int 649daa38a89SBing Zhao mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev) 650daa38a89SBing Zhao { 651daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 652daa38a89SBing Zhao struct mlx5_flex_parser_profiles *prf = 653daa38a89SBing Zhao &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 6541c506404SBing Zhao struct mlx5_devx_graph_node_attr node = { 6551c506404SBing Zhao .modify_field_select = 0, 6561c506404SBing Zhao }; 6571c506404SBing Zhao uint32_t ids[8]; 6581c506404SBing Zhao int ret; 659daa38a89SBing Zhao 660d7c49561SBing Zhao if (!priv->config.hca_attr.parse_graph_flex_node) { 661d7c49561SBing Zhao DRV_LOG(ERR, "Dynamic flex parser is not supported " 662d7c49561SBing Zhao "for device %s.", priv->dev_data->name); 663d7c49561SBing Zhao return -ENOTSUP; 664d7c49561SBing Zhao } 6651c506404SBing Zhao node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; 6661c506404SBing Zhao /* 8 bytes now: 4B common header + 4B message body header. */ 6671c506404SBing Zhao node.header_length_base_value = 0x8; 6681c506404SBing Zhao /* After MAC layer: Ether / VLAN. */ 6691c506404SBing Zhao node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC; 6701c506404SBing Zhao /* Type of compared condition should be 0xAEFE in the L2 layer. */ 6711c506404SBing Zhao node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI; 6721c506404SBing Zhao /* Sample #0: type in common header. */ 6731c506404SBing Zhao node.sample[0].flow_match_sample_en = 1; 6741c506404SBing Zhao /* Fixed offset. */ 6751c506404SBing Zhao node.sample[0].flow_match_sample_offset_mode = 0x0; 6761c506404SBing Zhao /* Only the 2nd byte will be used. */ 6771c506404SBing Zhao node.sample[0].flow_match_sample_field_base_offset = 0x0; 6781c506404SBing Zhao /* Sample #1: message payload. */ 6791c506404SBing Zhao node.sample[1].flow_match_sample_en = 1; 6801c506404SBing Zhao /* Fixed offset. */ 6811c506404SBing Zhao node.sample[1].flow_match_sample_offset_mode = 0x0; 6821c506404SBing Zhao /* 6831c506404SBing Zhao * Only the first two bytes will be used right now, and its offset will 6841c506404SBing Zhao * start after the common header that with the length of a DW(u32). 6851c506404SBing Zhao */ 6861c506404SBing Zhao node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t); 6871c506404SBing Zhao prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node); 6881c506404SBing Zhao if (!prf->obj) { 6891c506404SBing Zhao DRV_LOG(ERR, "Failed to create flex parser node object."); 6901c506404SBing Zhao return (rte_errno == 0) ? -ENODEV : -rte_errno; 6911c506404SBing Zhao } 6921c506404SBing Zhao prf->num = 2; 6931c506404SBing Zhao ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num); 6941c506404SBing Zhao if (ret) { 6951c506404SBing Zhao DRV_LOG(ERR, "Failed to query sample IDs."); 6961c506404SBing Zhao return (rte_errno == 0) ? -ENODEV : -rte_errno; 6971c506404SBing Zhao } 6981c506404SBing Zhao prf->offset[0] = 0x0; 6991c506404SBing Zhao prf->offset[1] = sizeof(uint32_t); 7001c506404SBing Zhao prf->ids[0] = ids[0]; 7011c506404SBing Zhao prf->ids[1] = ids[1]; 702daa38a89SBing Zhao return 0; 703daa38a89SBing Zhao } 704daa38a89SBing Zhao 7051c506404SBing Zhao /* 7061c506404SBing Zhao * Destroy the flex parser node, including the parser itself, input / output 7071c506404SBing Zhao * arcs and DW samples. Resources could be reused then. 7081c506404SBing Zhao * 7091c506404SBing Zhao * @param dev 7101c506404SBing Zhao * Pointer to Ethernet device structure. 7111c506404SBing Zhao */ 7121c506404SBing Zhao static void 7131c506404SBing Zhao mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev) 7141c506404SBing Zhao { 7151c506404SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 7161c506404SBing Zhao struct mlx5_flex_parser_profiles *prf = 7171c506404SBing Zhao &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 7181c506404SBing Zhao 7191c506404SBing Zhao if (prf->obj) 7201c506404SBing Zhao mlx5_devx_cmd_destroy(prf->obj); 7211c506404SBing Zhao prf->obj = NULL; 7221c506404SBing Zhao } 7231c506404SBing Zhao 724014d1cbeSSuanming Mou /** 72591389890SOphir Munk * Allocate shared device context. If there is multiport device the 72617e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 72791389890SOphir Munk * port dedicated device, the context will be used by only given 72817e19bc4SViacheslav Ovsiienko * port due to unification. 72917e19bc4SViacheslav Ovsiienko * 73091389890SOphir Munk * Routine first searches the context for the specified device name, 73117e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 73217e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 73391389890SOphir Munk * device context and parameters. 73417e19bc4SViacheslav Ovsiienko * 73517e19bc4SViacheslav Ovsiienko * @param[in] spawn 73691389890SOphir Munk * Pointer to the device attributes (name, port, etc). 7378409a285SViacheslav Ovsiienko * @param[in] config 7388409a285SViacheslav Ovsiienko * Pointer to device configuration structure. 73917e19bc4SViacheslav Ovsiienko * 74017e19bc4SViacheslav Ovsiienko * @return 7416e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object on success, 74217e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 74317e19bc4SViacheslav Ovsiienko */ 7442eb4d010SOphir Munk struct mlx5_dev_ctx_shared * 74591389890SOphir Munk mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, 7468409a285SViacheslav Ovsiienko const struct mlx5_dev_config *config) 74717e19bc4SViacheslav Ovsiienko { 7486e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh; 74917e19bc4SViacheslav Ovsiienko int err = 0; 75053e5a82fSViacheslav Ovsiienko uint32_t i; 751ae18a1aeSOri Kam struct mlx5_devx_tis_attr tis_attr = { 0 }; 75217e19bc4SViacheslav Ovsiienko 7538e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn); 75417e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 7558e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 75691389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 75717e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 75891389890SOphir Munk LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) { 759834a9019SOphir Munk if (!strcmp(sh->ibdev_name, 760834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev))) { 76117e19bc4SViacheslav Ovsiienko sh->refcnt++; 76217e19bc4SViacheslav Ovsiienko goto exit; 76317e19bc4SViacheslav Ovsiienko } 76417e19bc4SViacheslav Ovsiienko } 765ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 7668e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn->max_port); 7672175c4dcSSuanming Mou sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 7686e88bc42SOphir Munk sizeof(struct mlx5_dev_ctx_shared) + 76917e19bc4SViacheslav Ovsiienko spawn->max_port * 77091389890SOphir Munk sizeof(struct mlx5_dev_shared_port), 7712175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 77217e19bc4SViacheslav Ovsiienko if (!sh) { 77317e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 77417e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 77517e19bc4SViacheslav Ovsiienko goto exit; 77617e19bc4SViacheslav Ovsiienko } 7772eb4d010SOphir Munk err = mlx5_os_open_device(spawn, config, sh); 77806f78b5eSViacheslav Ovsiienko if (!sh->ctx) 77917e19bc4SViacheslav Ovsiienko goto error; 780e85f623eSOphir Munk err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr); 78117e19bc4SViacheslav Ovsiienko if (err) { 782e85f623eSOphir Munk DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed"); 78317e19bc4SViacheslav Ovsiienko goto error; 78417e19bc4SViacheslav Ovsiienko } 78517e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 78617e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 787f44b09f9SOphir Munk strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx), 788f44b09f9SOphir Munk sizeof(sh->ibdev_name) - 1); 789f44b09f9SOphir Munk strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx), 790f44b09f9SOphir Munk sizeof(sh->ibdev_path) - 1); 79153e5a82fSViacheslav Ovsiienko /* 79253e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 79353e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 79453e5a82fSViacheslav Ovsiienko * the given port index i. 79553e5a82fSViacheslav Ovsiienko */ 79623242063SMatan Azrad for (i = 0; i < sh->max_port; i++) { 79753e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 79823242063SMatan Azrad sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; 79923242063SMatan Azrad } 80017e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 80117e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 80217e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 80317e19bc4SViacheslav Ovsiienko err = ENOMEM; 80417e19bc4SViacheslav Ovsiienko goto error; 80517e19bc4SViacheslav Ovsiienko } 806ae18a1aeSOri Kam if (sh->devx) { 8072eb4d010SOphir Munk err = mlx5_os_get_pdn(sh->pd, &sh->pdn); 808b9d86122SDekel Peled if (err) { 809b9d86122SDekel Peled DRV_LOG(ERR, "Fail to extract pdn from PD"); 810b9d86122SDekel Peled goto error; 811b9d86122SDekel Peled } 812ae18a1aeSOri Kam sh->td = mlx5_devx_cmd_create_td(sh->ctx); 813ae18a1aeSOri Kam if (!sh->td) { 814ae18a1aeSOri Kam DRV_LOG(ERR, "TD allocation failure"); 815ae18a1aeSOri Kam err = ENOMEM; 816ae18a1aeSOri Kam goto error; 817ae18a1aeSOri Kam } 818ae18a1aeSOri Kam tis_attr.transport_domain = sh->td->id; 819ae18a1aeSOri Kam sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr); 820ae18a1aeSOri Kam if (!sh->tis) { 821ae18a1aeSOri Kam DRV_LOG(ERR, "TIS allocation failure"); 822ae18a1aeSOri Kam err = ENOMEM; 823ae18a1aeSOri Kam goto error; 824ae18a1aeSOri Kam } 825fc4d4f73SViacheslav Ovsiienko sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, 0); 826fc4d4f73SViacheslav Ovsiienko if (!sh->tx_uar) { 827fc4d4f73SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to allocate DevX UAR."); 828fc4d4f73SViacheslav Ovsiienko err = ENOMEM; 829fc4d4f73SViacheslav Ovsiienko goto error; 830fc4d4f73SViacheslav Ovsiienko } 831ae18a1aeSOri Kam } 8320136df99SSuanming Mou sh->flow_id_pool = mlx5_flow_id_pool_alloc 8330136df99SSuanming Mou ((1 << HAIRPIN_FLOW_ID_BITS) - 1); 834d85c7b5eSOri Kam if (!sh->flow_id_pool) { 835d85c7b5eSOri Kam DRV_LOG(ERR, "can't create flow id pool"); 836d85c7b5eSOri Kam err = ENOMEM; 837d85c7b5eSOri Kam goto error; 838d85c7b5eSOri Kam } 83924feb045SViacheslav Ovsiienko #ifndef RTE_ARCH_64 84024feb045SViacheslav Ovsiienko /* Initialize UAR access locks for 32bit implementations. */ 84124feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock_cq); 84224feb045SViacheslav Ovsiienko for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 84324feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock[i]); 84424feb045SViacheslav Ovsiienko #endif 845ab3cffcfSViacheslav Ovsiienko /* 846ab3cffcfSViacheslav Ovsiienko * Once the device is added to the list of memory event 847ab3cffcfSViacheslav Ovsiienko * callback, its global MR cache table cannot be expanded 848ab3cffcfSViacheslav Ovsiienko * on the fly because of deadlock. If it overflows, lookup 849ab3cffcfSViacheslav Ovsiienko * should be done by searching MR list linearly, which is slow. 850ab3cffcfSViacheslav Ovsiienko * 851ab3cffcfSViacheslav Ovsiienko * At this point the device is not added to the memory 852ab3cffcfSViacheslav Ovsiienko * event list yet, context is just being created. 853ab3cffcfSViacheslav Ovsiienko */ 854b8dc6b0eSVu Pham err = mlx5_mr_btree_init(&sh->share_cache.cache, 855ab3cffcfSViacheslav Ovsiienko MLX5_MR_BTREE_CACHE_N * 2, 85646e10a4cSViacheslav Ovsiienko spawn->pci_dev->device.numa_node); 857ab3cffcfSViacheslav Ovsiienko if (err) { 858ab3cffcfSViacheslav Ovsiienko err = rte_errno; 859ab3cffcfSViacheslav Ovsiienko goto error; 860ab3cffcfSViacheslav Ovsiienko } 861d5ed8aa9SOphir Munk mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb, 862d5ed8aa9SOphir Munk &sh->share_cache.dereg_mr_cb); 8632eb4d010SOphir Munk mlx5_os_dev_shared_handler_install(sh); 864632f0f19SSuanming Mou sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD); 865632f0f19SSuanming Mou if (!sh->cnt_id_tbl) { 866632f0f19SSuanming Mou err = rte_errno; 867632f0f19SSuanming Mou goto error; 868632f0f19SSuanming Mou } 869fa2d01c8SDong Zhou mlx5_flow_aging_init(sh); 8705382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 871b88341caSSuanming Mou mlx5_flow_ipool_create(sh, config); 8720e3d0525SViacheslav Ovsiienko /* Add device to memory callback list. */ 8730e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 8740e3d0525SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 8750e3d0525SViacheslav Ovsiienko sh, mem_event_cb); 8760e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 8770e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 87891389890SOphir Munk LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); 87917e19bc4SViacheslav Ovsiienko exit: 88091389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 88117e19bc4SViacheslav Ovsiienko return sh; 88217e19bc4SViacheslav Ovsiienko error: 883d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 88491389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 8858e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 886632f0f19SSuanming Mou if (sh->cnt_id_tbl) { 887632f0f19SSuanming Mou mlx5_l3t_destroy(sh->cnt_id_tbl); 888632f0f19SSuanming Mou sh->cnt_id_tbl = NULL; 889632f0f19SSuanming Mou } 890fc4d4f73SViacheslav Ovsiienko if (sh->tx_uar) { 891fc4d4f73SViacheslav Ovsiienko mlx5_glue->devx_free_uar(sh->tx_uar); 892fc4d4f73SViacheslav Ovsiienko sh->tx_uar = NULL; 893fc4d4f73SViacheslav Ovsiienko } 894ae18a1aeSOri Kam if (sh->tis) 895ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 896ae18a1aeSOri Kam if (sh->td) 897ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 89817e19bc4SViacheslav Ovsiienko if (sh->pd) 89917e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 90017e19bc4SViacheslav Ovsiienko if (sh->ctx) 90117e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 902d85c7b5eSOri Kam if (sh->flow_id_pool) 903d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 9042175c4dcSSuanming Mou mlx5_free(sh); 9058e46d4e1SAlexander Kozyrev MLX5_ASSERT(err > 0); 90617e19bc4SViacheslav Ovsiienko rte_errno = err; 90717e19bc4SViacheslav Ovsiienko return NULL; 90817e19bc4SViacheslav Ovsiienko } 90917e19bc4SViacheslav Ovsiienko 91017e19bc4SViacheslav Ovsiienko /** 91117e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 91217e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 91317e19bc4SViacheslav Ovsiienko * 91417e19bc4SViacheslav Ovsiienko * @param[in] sh 9156e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 91617e19bc4SViacheslav Ovsiienko */ 9172eb4d010SOphir Munk void 91891389890SOphir Munk mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) 91917e19bc4SViacheslav Ovsiienko { 92091389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 9210afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG 92217e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 9236e88bc42SOphir Munk struct mlx5_dev_ctx_shared *lctx; 92417e19bc4SViacheslav Ovsiienko 92591389890SOphir Munk LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next) 92617e19bc4SViacheslav Ovsiienko if (lctx == sh) 92717e19bc4SViacheslav Ovsiienko break; 9288e46d4e1SAlexander Kozyrev MLX5_ASSERT(lctx); 92917e19bc4SViacheslav Ovsiienko if (lctx != sh) { 93017e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 93117e19bc4SViacheslav Ovsiienko goto exit; 93217e19bc4SViacheslav Ovsiienko } 93317e19bc4SViacheslav Ovsiienko #endif 9348e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 9358e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh->refcnt); 93617e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 9378e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 93817e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 93917e19bc4SViacheslav Ovsiienko goto exit; 9400e3d0525SViacheslav Ovsiienko /* Remove from memory callback device list. */ 9410e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 9420e3d0525SViacheslav Ovsiienko LIST_REMOVE(sh, mem_event_cb); 9430e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 9444f8e6befSMichael Baum /* Release created Memory Regions. */ 945b8dc6b0eSVu Pham mlx5_mr_release_cache(&sh->share_cache); 9460e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 94717e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 94853e5a82fSViacheslav Ovsiienko /* 94953e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 95053e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 95153e5a82fSViacheslav Ovsiienko **/ 9525382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 953014d1cbeSSuanming Mou mlx5_flow_ipool_destroy(sh); 9542eb4d010SOphir Munk mlx5_os_dev_shared_handler_uninstall(sh); 955632f0f19SSuanming Mou if (sh->cnt_id_tbl) { 956632f0f19SSuanming Mou mlx5_l3t_destroy(sh->cnt_id_tbl); 957632f0f19SSuanming Mou sh->cnt_id_tbl = NULL; 958632f0f19SSuanming Mou } 959fc4d4f73SViacheslav Ovsiienko if (sh->tx_uar) { 960fc4d4f73SViacheslav Ovsiienko mlx5_glue->devx_free_uar(sh->tx_uar); 961fc4d4f73SViacheslav Ovsiienko sh->tx_uar = NULL; 962fc4d4f73SViacheslav Ovsiienko } 96317e19bc4SViacheslav Ovsiienko if (sh->pd) 96417e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 965ae18a1aeSOri Kam if (sh->tis) 966ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 967ae18a1aeSOri Kam if (sh->td) 968ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 96917e19bc4SViacheslav Ovsiienko if (sh->ctx) 97017e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 971d85c7b5eSOri Kam if (sh->flow_id_pool) 972d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 973d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 9742175c4dcSSuanming Mou mlx5_free(sh); 97517e19bc4SViacheslav Ovsiienko exit: 97691389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 97717e19bc4SViacheslav Ovsiienko } 97817e19bc4SViacheslav Ovsiienko 979771fa900SAdrien Mazarguil /** 98054534725SMatan Azrad * Destroy table hash list and all the root entries per domain. 98154534725SMatan Azrad * 98254534725SMatan Azrad * @param[in] priv 98354534725SMatan Azrad * Pointer to the private device data structure. 98454534725SMatan Azrad */ 9852eb4d010SOphir Munk void 98654534725SMatan Azrad mlx5_free_table_hash_list(struct mlx5_priv *priv) 98754534725SMatan Azrad { 9886e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 98954534725SMatan Azrad struct mlx5_flow_tbl_data_entry *tbl_data; 99054534725SMatan Azrad union mlx5_flow_tbl_key table_key = { 99154534725SMatan Azrad { 99254534725SMatan Azrad .table_id = 0, 99354534725SMatan Azrad .reserved = 0, 99454534725SMatan Azrad .domain = 0, 99554534725SMatan Azrad .direction = 0, 99654534725SMatan Azrad } 99754534725SMatan Azrad }; 99854534725SMatan Azrad struct mlx5_hlist_entry *pos; 99954534725SMatan Azrad 100054534725SMatan Azrad if (!sh->flow_tbls) 100154534725SMatan Azrad return; 100254534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 100354534725SMatan Azrad if (pos) { 100454534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 100554534725SMatan Azrad entry); 10068e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 100754534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 100883c2047cSSuanming Mou mlx5_free(tbl_data); 100954534725SMatan Azrad } 101054534725SMatan Azrad table_key.direction = 1; 101154534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 101254534725SMatan Azrad if (pos) { 101354534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 101454534725SMatan Azrad entry); 10158e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 101654534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 101783c2047cSSuanming Mou mlx5_free(tbl_data); 101854534725SMatan Azrad } 101954534725SMatan Azrad table_key.direction = 0; 102054534725SMatan Azrad table_key.domain = 1; 102154534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 102254534725SMatan Azrad if (pos) { 102354534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 102454534725SMatan Azrad entry); 10258e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 102654534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 102783c2047cSSuanming Mou mlx5_free(tbl_data); 102854534725SMatan Azrad } 102954534725SMatan Azrad mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL); 103054534725SMatan Azrad } 103154534725SMatan Azrad 103254534725SMatan Azrad /** 103354534725SMatan Azrad * Initialize flow table hash list and create the root tables entry 103454534725SMatan Azrad * for each domain. 103554534725SMatan Azrad * 103654534725SMatan Azrad * @param[in] priv 103754534725SMatan Azrad * Pointer to the private device data structure. 103854534725SMatan Azrad * 103954534725SMatan Azrad * @return 104054534725SMatan Azrad * Zero on success, positive error code otherwise. 104154534725SMatan Azrad */ 10422eb4d010SOphir Munk int 104354534725SMatan Azrad mlx5_alloc_table_hash_list(struct mlx5_priv *priv) 104454534725SMatan Azrad { 10456e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 104654534725SMatan Azrad char s[MLX5_HLIST_NAMESIZE]; 104754534725SMatan Azrad int err = 0; 104854534725SMatan Azrad 10498e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 105054534725SMatan Azrad snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); 105154534725SMatan Azrad sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE); 105254534725SMatan Azrad if (!sh->flow_tbls) { 105363783b01SDavid Marchand DRV_LOG(ERR, "flow tables with hash creation failed."); 105454534725SMatan Azrad err = ENOMEM; 105554534725SMatan Azrad return err; 105654534725SMatan Azrad } 105754534725SMatan Azrad #ifndef HAVE_MLX5DV_DR 105854534725SMatan Azrad /* 105954534725SMatan Azrad * In case we have not DR support, the zero tables should be created 106054534725SMatan Azrad * because DV expect to see them even if they cannot be created by 106154534725SMatan Azrad * RDMA-CORE. 106254534725SMatan Azrad */ 106354534725SMatan Azrad union mlx5_flow_tbl_key table_key = { 106454534725SMatan Azrad { 106554534725SMatan Azrad .table_id = 0, 106654534725SMatan Azrad .reserved = 0, 106754534725SMatan Azrad .domain = 0, 106854534725SMatan Azrad .direction = 0, 106954534725SMatan Azrad } 107054534725SMatan Azrad }; 107183c2047cSSuanming Mou struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO, 107283c2047cSSuanming Mou sizeof(*tbl_data), 0, 107383c2047cSSuanming Mou SOCKET_ID_ANY); 107454534725SMatan Azrad 107554534725SMatan Azrad if (!tbl_data) { 107654534725SMatan Azrad err = ENOMEM; 107754534725SMatan Azrad goto error; 107854534725SMatan Azrad } 107954534725SMatan Azrad tbl_data->entry.key = table_key.v64; 108054534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 108154534725SMatan Azrad if (err) 108254534725SMatan Azrad goto error; 108354534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 108454534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 108554534725SMatan Azrad table_key.direction = 1; 108683c2047cSSuanming Mou tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, 108783c2047cSSuanming Mou SOCKET_ID_ANY); 108854534725SMatan Azrad if (!tbl_data) { 108954534725SMatan Azrad err = ENOMEM; 109054534725SMatan Azrad goto error; 109154534725SMatan Azrad } 109254534725SMatan Azrad tbl_data->entry.key = table_key.v64; 109354534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 109454534725SMatan Azrad if (err) 109554534725SMatan Azrad goto error; 109654534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 109754534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 109854534725SMatan Azrad table_key.direction = 0; 109954534725SMatan Azrad table_key.domain = 1; 110083c2047cSSuanming Mou tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, 110183c2047cSSuanming Mou SOCKET_ID_ANY); 110254534725SMatan Azrad if (!tbl_data) { 110354534725SMatan Azrad err = ENOMEM; 110454534725SMatan Azrad goto error; 110554534725SMatan Azrad } 110654534725SMatan Azrad tbl_data->entry.key = table_key.v64; 110754534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 110854534725SMatan Azrad if (err) 110954534725SMatan Azrad goto error; 111054534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 111154534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 111254534725SMatan Azrad return err; 111354534725SMatan Azrad error: 111454534725SMatan Azrad mlx5_free_table_hash_list(priv); 111554534725SMatan Azrad #endif /* HAVE_MLX5DV_DR */ 111654534725SMatan Azrad return err; 111754534725SMatan Azrad } 111854534725SMatan Azrad 111954534725SMatan Azrad /** 11207be600c8SYongseok Koh * Initialize shared data between primary and secondary process. 11217be600c8SYongseok Koh * 11227be600c8SYongseok Koh * A memzone is reserved by primary process and secondary processes attach to 11237be600c8SYongseok Koh * the memzone. 11247be600c8SYongseok Koh * 11257be600c8SYongseok Koh * @return 11267be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 1127974f1e7eSYongseok Koh */ 11287be600c8SYongseok Koh static int 11297be600c8SYongseok Koh mlx5_init_shared_data(void) 1130974f1e7eSYongseok Koh { 1131974f1e7eSYongseok Koh const struct rte_memzone *mz; 11327be600c8SYongseok Koh int ret = 0; 1133974f1e7eSYongseok Koh 1134974f1e7eSYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 1135974f1e7eSYongseok Koh if (mlx5_shared_data == NULL) { 1136974f1e7eSYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1137974f1e7eSYongseok Koh /* Allocate shared memory. */ 1138974f1e7eSYongseok Koh mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 1139974f1e7eSYongseok Koh sizeof(*mlx5_shared_data), 1140974f1e7eSYongseok Koh SOCKET_ID_ANY, 0); 11417be600c8SYongseok Koh if (mz == NULL) { 11427be600c8SYongseok Koh DRV_LOG(ERR, 114306fa6988SDekel Peled "Cannot allocate mlx5 shared data"); 11447be600c8SYongseok Koh ret = -rte_errno; 11457be600c8SYongseok Koh goto error; 11467be600c8SYongseok Koh } 11477be600c8SYongseok Koh mlx5_shared_data = mz->addr; 11487be600c8SYongseok Koh memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 11497be600c8SYongseok Koh rte_spinlock_init(&mlx5_shared_data->lock); 1150974f1e7eSYongseok Koh } else { 1151974f1e7eSYongseok Koh /* Lookup allocated shared memory. */ 1152974f1e7eSYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 11537be600c8SYongseok Koh if (mz == NULL) { 11547be600c8SYongseok Koh DRV_LOG(ERR, 115506fa6988SDekel Peled "Cannot attach mlx5 shared data"); 11567be600c8SYongseok Koh ret = -rte_errno; 11577be600c8SYongseok Koh goto error; 1158974f1e7eSYongseok Koh } 1159974f1e7eSYongseok Koh mlx5_shared_data = mz->addr; 11607be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 11613ebe6580SYongseok Koh } 1162974f1e7eSYongseok Koh } 11637be600c8SYongseok Koh error: 11647be600c8SYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 11657be600c8SYongseok Koh return ret; 11667be600c8SYongseok Koh } 11677be600c8SYongseok Koh 11687be600c8SYongseok Koh /** 11694d803a72SOlga Shern * Retrieve integer value from environment variable. 11704d803a72SOlga Shern * 11714d803a72SOlga Shern * @param[in] name 11724d803a72SOlga Shern * Environment variable name. 11734d803a72SOlga Shern * 11744d803a72SOlga Shern * @return 11754d803a72SOlga Shern * Integer value, 0 if the variable is not set. 11764d803a72SOlga Shern */ 11774d803a72SOlga Shern int 11784d803a72SOlga Shern mlx5_getenv_int(const char *name) 11794d803a72SOlga Shern { 11804d803a72SOlga Shern const char *val = getenv(name); 11814d803a72SOlga Shern 11824d803a72SOlga Shern if (val == NULL) 11834d803a72SOlga Shern return 0; 11844d803a72SOlga Shern return atoi(val); 11854d803a72SOlga Shern } 11864d803a72SOlga Shern 11874d803a72SOlga Shern /** 1188c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 1189c9ba7523SRaslan Darawsheh * 1190c9ba7523SRaslan Darawsheh * @param[in] dev 1191c9ba7523SRaslan Darawsheh * A pointer to eth_dev 1192c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 1193c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 1194c9ba7523SRaslan Darawsheh * 1195c9ba7523SRaslan Darawsheh * @return 1196c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 1197c9ba7523SRaslan Darawsheh */ 1198c9ba7523SRaslan Darawsheh int 1199c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 1200c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 1201c9ba7523SRaslan Darawsheh { 12028e46d4e1SAlexander Kozyrev MLX5_ASSERT(udp_tunnel != NULL); 1203c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && 1204c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 1205c9ba7523SRaslan Darawsheh return 0; 1206c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && 1207c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 1208c9ba7523SRaslan Darawsheh return 0; 1209c9ba7523SRaslan Darawsheh return -ENOTSUP; 1210c9ba7523SRaslan Darawsheh } 1211c9ba7523SRaslan Darawsheh 1212c9ba7523SRaslan Darawsheh /** 1213120dc4a7SYongseok Koh * Initialize process private data structure. 1214120dc4a7SYongseok Koh * 1215120dc4a7SYongseok Koh * @param dev 1216120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1217120dc4a7SYongseok Koh * 1218120dc4a7SYongseok Koh * @return 1219120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 1220120dc4a7SYongseok Koh */ 1221120dc4a7SYongseok Koh int 1222120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 1223120dc4a7SYongseok Koh { 1224120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 1225120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 1226120dc4a7SYongseok Koh size_t ppriv_size; 1227120dc4a7SYongseok Koh 1228120dc4a7SYongseok Koh /* 1229120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 1230120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 1231120dc4a7SYongseok Koh */ 1232120dc4a7SYongseok Koh ppriv_size = 1233120dc4a7SYongseok Koh sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 12342175c4dcSSuanming Mou ppriv = mlx5_malloc(MLX5_MEM_RTE, ppriv_size, RTE_CACHE_LINE_SIZE, 12352175c4dcSSuanming Mou dev->device->numa_node); 1236120dc4a7SYongseok Koh if (!ppriv) { 1237120dc4a7SYongseok Koh rte_errno = ENOMEM; 1238120dc4a7SYongseok Koh return -rte_errno; 1239120dc4a7SYongseok Koh } 1240120dc4a7SYongseok Koh ppriv->uar_table_sz = ppriv_size; 1241120dc4a7SYongseok Koh dev->process_private = ppriv; 1242120dc4a7SYongseok Koh return 0; 1243120dc4a7SYongseok Koh } 1244120dc4a7SYongseok Koh 1245120dc4a7SYongseok Koh /** 1246120dc4a7SYongseok Koh * Un-initialize process private data structure. 1247120dc4a7SYongseok Koh * 1248120dc4a7SYongseok Koh * @param dev 1249120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1250120dc4a7SYongseok Koh */ 1251120dc4a7SYongseok Koh static void 1252120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 1253120dc4a7SYongseok Koh { 1254120dc4a7SYongseok Koh if (!dev->process_private) 1255120dc4a7SYongseok Koh return; 12562175c4dcSSuanming Mou mlx5_free(dev->process_private); 1257120dc4a7SYongseok Koh dev->process_private = NULL; 1258120dc4a7SYongseok Koh } 1259120dc4a7SYongseok Koh 1260120dc4a7SYongseok Koh /** 1261771fa900SAdrien Mazarguil * DPDK callback to close the device. 1262771fa900SAdrien Mazarguil * 1263771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 1264771fa900SAdrien Mazarguil * 1265771fa900SAdrien Mazarguil * @param dev 1266771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 1267771fa900SAdrien Mazarguil */ 12682eb4d010SOphir Munk void 1269771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 1270771fa900SAdrien Mazarguil { 1271dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 12722e22920bSAdrien Mazarguil unsigned int i; 12736af6b973SNélio Laranjeiro int ret; 1274771fa900SAdrien Mazarguil 12752786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 12762786b7bfSSuanming Mou /* Check if process_private released. */ 12772786b7bfSSuanming Mou if (!dev->process_private) 12782786b7bfSSuanming Mou return; 12792786b7bfSSuanming Mou mlx5_tx_uar_uninit_secondary(dev); 12802786b7bfSSuanming Mou mlx5_proc_priv_uninit(dev); 12812786b7bfSSuanming Mou rte_eth_dev_release_port(dev); 12822786b7bfSSuanming Mou return; 12832786b7bfSSuanming Mou } 12842786b7bfSSuanming Mou if (!priv->sh) 12852786b7bfSSuanming Mou return; 1286a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 12870f99970bSNélio Laranjeiro dev->data->port_id, 1288f44b09f9SOphir Munk ((priv->sh->ctx != NULL) ? 1289f44b09f9SOphir Munk mlx5_os_get_ctx_device_name(priv->sh->ctx) : "")); 12908db7e3b6SBing Zhao /* 12918db7e3b6SBing Zhao * If default mreg copy action is removed at the stop stage, 12928db7e3b6SBing Zhao * the search will return none and nothing will be done anymore. 12938db7e3b6SBing Zhao */ 12948db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 1295af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 12968db7e3b6SBing Zhao /* 12978db7e3b6SBing Zhao * If all the flows are already flushed in the device stop stage, 12988db7e3b6SBing Zhao * then this will return directly without any action. 12998db7e3b6SBing Zhao */ 13008db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->flows, true); 130102e76468SSuanming Mou mlx5_flow_meter_flush(dev, NULL); 1302e7bfa359SBing Zhao /* Free the intermediate buffers for flow creation. */ 1303e7bfa359SBing Zhao mlx5_flow_free_intermediate(dev); 13042e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 13052e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 13062e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 13072aac5b5dSYongseok Koh rte_wmb(); 13082aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 13092aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 13101c506404SBing Zhao /* Free the eCPRI flex parser resource. */ 13111c506404SBing Zhao mlx5_flex_parser_ecpri_release(dev); 13122e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 13132e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 13142e22920bSAdrien Mazarguil usleep(1000); 1315a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 1316af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 13172e22920bSAdrien Mazarguil priv->rxqs_n = 0; 13182e22920bSAdrien Mazarguil priv->rxqs = NULL; 13192e22920bSAdrien Mazarguil } 13202e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 13212e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 13222e22920bSAdrien Mazarguil usleep(1000); 13236e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 1324af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 13252e22920bSAdrien Mazarguil priv->txqs_n = 0; 13262e22920bSAdrien Mazarguil priv->txqs = NULL; 13272e22920bSAdrien Mazarguil } 1328120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 1329dd3c774fSViacheslav Ovsiienko if (priv->mreg_cp_tbl) 1330dd3c774fSViacheslav Ovsiienko mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); 13317d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 13322eb4d010SOphir Munk mlx5_os_free_shared_dr(priv); 133329c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 133483c2047cSSuanming Mou mlx5_free(priv->rss_conf.rss_key); 1335634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 133683c2047cSSuanming Mou mlx5_free(priv->reta_idx); 1337ccdcba53SNélio Laranjeiro if (priv->config.vf) 1338f22442cbSMatan Azrad mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), 1339f22442cbSMatan Azrad dev->data->mac_addrs, 1340f22442cbSMatan Azrad MLX5_MAX_MAC_ADDRESSES, priv->mac_own); 134126c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 134226c08b97SAdrien Mazarguil close(priv->nl_socket_route); 134326c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 134426c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 1345dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 1346dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 134723820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 1348f5479b68SNélio Laranjeiro if (ret) 1349a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 13500f99970bSNélio Laranjeiro dev->data->port_id); 135115c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 13524c7a0f5fSNélio Laranjeiro if (ret) 1353a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 13540f99970bSNélio Laranjeiro dev->data->port_id); 135593403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 135609cb5b58SNélio Laranjeiro if (ret) 135793403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 13580f99970bSNélio Laranjeiro dev->data->port_id); 1359af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 1360a1366b1aSNélio Laranjeiro if (ret) 1361a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 13620f99970bSNélio Laranjeiro dev->data->port_id); 1363894c4a8eSOri Kam ret = mlx5_txq_obj_verify(dev); 1364faf2667fSNélio Laranjeiro if (ret) 1365a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 13660f99970bSNélio Laranjeiro dev->data->port_id); 1367af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 13686e78005aSNélio Laranjeiro if (ret) 1369a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 13700f99970bSNélio Laranjeiro dev->data->port_id); 1371af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 13726af6b973SNélio Laranjeiro if (ret) 1373a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 1374a170a30dSNélio Laranjeiro dev->data->port_id); 1375772dc0ebSSuanming Mou /* 1376772dc0ebSSuanming Mou * Free the shared context in last turn, because the cleanup 1377772dc0ebSSuanming Mou * routines above may use some shared fields, like 1378772dc0ebSSuanming Mou * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing 1379772dc0ebSSuanming Mou * ifindex if Netlink fails. 1380772dc0ebSSuanming Mou */ 138191389890SOphir Munk mlx5_free_shared_dev_ctx(priv->sh); 13822b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 13832b730263SAdrien Mazarguil unsigned int c = 0; 1384d874a4eeSThomas Monjalon uint16_t port_id; 13852b730263SAdrien Mazarguil 1386fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 1387dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 1388d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 13892b730263SAdrien Mazarguil 13902b730263SAdrien Mazarguil if (!opriv || 13912b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 1392d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 13932b730263SAdrien Mazarguil continue; 13942b730263SAdrien Mazarguil ++c; 1395f7e95215SViacheslav Ovsiienko break; 13962b730263SAdrien Mazarguil } 13972b730263SAdrien Mazarguil if (!c) 13982b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 13992b730263SAdrien Mazarguil } 1400771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 14012b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 140242603bbdSOphir Munk /* 140342603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 140442603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 140542603bbdSOphir Munk * it is freed when dev_private is freed. 140642603bbdSOphir Munk */ 140742603bbdSOphir Munk dev->data->mac_addrs = NULL; 1408771fa900SAdrien Mazarguil } 1409771fa900SAdrien Mazarguil 1410e72dd09bSNélio Laranjeiro /** 1411e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 1412e72dd09bSNélio Laranjeiro * 1413e72dd09bSNélio Laranjeiro * @param[in] key 1414e72dd09bSNélio Laranjeiro * Key argument to verify. 1415e72dd09bSNélio Laranjeiro * @param[in] val 1416e72dd09bSNélio Laranjeiro * Value associated with key. 1417e72dd09bSNélio Laranjeiro * @param opaque 1418e72dd09bSNélio Laranjeiro * User data. 1419e72dd09bSNélio Laranjeiro * 1420e72dd09bSNélio Laranjeiro * @return 1421a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1422e72dd09bSNélio Laranjeiro */ 1423e72dd09bSNélio Laranjeiro static int 1424e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 1425e72dd09bSNélio Laranjeiro { 14267fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 14278f848f32SViacheslav Ovsiienko unsigned long mod; 14288f848f32SViacheslav Ovsiienko signed long tmp; 1429e72dd09bSNélio Laranjeiro 14306de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 14316de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 14326de569f5SAdrien Mazarguil return 0; 143399c12dccSNélio Laranjeiro errno = 0; 14348f848f32SViacheslav Ovsiienko tmp = strtol(val, NULL, 0); 143599c12dccSNélio Laranjeiro if (errno) { 1436a6d83b6aSNélio Laranjeiro rte_errno = errno; 1437a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1438a6d83b6aSNélio Laranjeiro return -rte_errno; 143999c12dccSNélio Laranjeiro } 14408f848f32SViacheslav Ovsiienko if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) { 14418f848f32SViacheslav Ovsiienko /* Negative values are acceptable for some keys only. */ 14428f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 14438f848f32SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val); 14448f848f32SViacheslav Ovsiienko return -rte_errno; 14458f848f32SViacheslav Ovsiienko } 14468f848f32SViacheslav Ovsiienko mod = tmp >= 0 ? tmp : -tmp; 144799c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 14487fe24446SShahaf Shuler config->cqe_comp = !!tmp; 1449bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 1450bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 145178c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 145278c7a16dSYongseok Koh config->hw_padding = !!tmp; 14537d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 14547d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 14557d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 14567d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 1457ecb16045SAlexander Kozyrev } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { 1458ecb16045SAlexander Kozyrev config->mprq.stride_size_n = tmp; 14597d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 14607d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 14617d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 14627d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 14632a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1464505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1465505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1466505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1467505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1468505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1469505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1470505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1471505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1472505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 14732a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 14747fe24446SShahaf Shuler config->txqs_inline = tmp; 147509d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1476a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1477230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1478f9de8718SShahaf Shuler config->mps = !!tmp; 14798409a285SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_DB_NC, key) == 0) { 1480f078ceb6SViacheslav Ovsiienko if (tmp != MLX5_TXDB_CACHED && 1481f078ceb6SViacheslav Ovsiienko tmp != MLX5_TXDB_NCACHED && 1482f078ceb6SViacheslav Ovsiienko tmp != MLX5_TXDB_HEURISTIC) { 1483f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid Tx doorbell " 1484f078ceb6SViacheslav Ovsiienko "mapping parameter"); 1485f078ceb6SViacheslav Ovsiienko rte_errno = EINVAL; 1486f078ceb6SViacheslav Ovsiienko return -rte_errno; 1487f078ceb6SViacheslav Ovsiienko } 1488f078ceb6SViacheslav Ovsiienko config->dbnc = tmp; 14896ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1490a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 14916ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1492505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1493505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1494505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 14955644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1496a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 14978f848f32SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_PP, key) == 0) { 14988f848f32SViacheslav Ovsiienko if (!mod) { 14998f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Zero Tx packet pacing parameter"); 15008f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 15018f848f32SViacheslav Ovsiienko return -rte_errno; 15028f848f32SViacheslav Ovsiienko } 15038f848f32SViacheslav Ovsiienko config->tx_pp = tmp; 15048f848f32SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_SKEW, key) == 0) { 15058f848f32SViacheslav Ovsiienko config->tx_skew = tmp; 15065644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 15077fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 150878a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 150978a54648SXueming Li config->l3_vxlan_en = !!tmp; 1510db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1511db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1512e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1513e2b4925eSOri Kam config->dv_esw_en = !!tmp; 151451e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 151551e72d38SOri Kam config->dv_flow_en = !!tmp; 15162d241515SViacheslav Ovsiienko } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { 15172d241515SViacheslav Ovsiienko if (tmp != MLX5_XMETA_MODE_LEGACY && 15182d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META16 && 15192d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META32) { 1520f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid extensive " 15212d241515SViacheslav Ovsiienko "metadata parameter"); 15222d241515SViacheslav Ovsiienko rte_errno = EINVAL; 15232d241515SViacheslav Ovsiienko return -rte_errno; 15242d241515SViacheslav Ovsiienko } 15252d241515SViacheslav Ovsiienko config->dv_xmeta_en = tmp; 15260f0ae73aSShiri Kuzin } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) { 15270f0ae73aSShiri Kuzin config->lacp_by_user = !!tmp; 1528dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1529dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 1530066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1531066cfecdSMatan Azrad config->max_dump_files_num = tmp; 153221bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 153321bb6c7eSDekel Peled config->lro.timeout = tmp; 1534d768f324SMatan Azrad } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) { 1535d768f324SMatan Azrad DRV_LOG(DEBUG, "class argument is %s.", val); 15361ad9a3d0SBing Zhao } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { 15371ad9a3d0SBing Zhao config->log_hp_size = tmp; 1538a1da6f62SSuanming Mou } else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) { 1539a1da6f62SSuanming Mou if (tmp != MLX5_RCM_NONE && 1540a1da6f62SSuanming Mou tmp != MLX5_RCM_LIGHT && 1541a1da6f62SSuanming Mou tmp != MLX5_RCM_AGGR) { 1542a1da6f62SSuanming Mou DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val); 1543a1da6f62SSuanming Mou rte_errno = EINVAL; 1544a1da6f62SSuanming Mou return -rte_errno; 1545a1da6f62SSuanming Mou } 1546a1da6f62SSuanming Mou config->reclaim_mode = tmp; 15475522da6bSSuanming Mou } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) { 15485522da6bSSuanming Mou config->sys_mem_en = !!tmp; 1549*50f95b23SSuanming Mou } else if (strcmp(MLX5_DECAP_EN, key) == 0) { 1550*50f95b23SSuanming Mou config->decap_en = !!tmp; 155199c12dccSNélio Laranjeiro } else { 1552a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 1553a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1554a6d83b6aSNélio Laranjeiro return -rte_errno; 1555e72dd09bSNélio Laranjeiro } 155699c12dccSNélio Laranjeiro return 0; 155799c12dccSNélio Laranjeiro } 1558e72dd09bSNélio Laranjeiro 1559e72dd09bSNélio Laranjeiro /** 1560e72dd09bSNélio Laranjeiro * Parse device parameters. 1561e72dd09bSNélio Laranjeiro * 15627fe24446SShahaf Shuler * @param config 15637fe24446SShahaf Shuler * Pointer to device configuration structure. 1564e72dd09bSNélio Laranjeiro * @param devargs 1565e72dd09bSNélio Laranjeiro * Device arguments structure. 1566e72dd09bSNélio Laranjeiro * 1567e72dd09bSNélio Laranjeiro * @return 1568a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1569e72dd09bSNélio Laranjeiro */ 15702eb4d010SOphir Munk int 15717fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1572e72dd09bSNélio Laranjeiro { 1573e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 157499c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 1575bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 157678c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 15777d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 15787d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 1579ecb16045SAlexander Kozyrev MLX5_RX_MPRQ_LOG_STRIDE_SIZE, 15807d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 15817d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 15822a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 1583505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MIN, 1584505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MAX, 1585505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MPW, 15862a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 158709d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 1588230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 15896ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 15906ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 15918409a285SViacheslav Ovsiienko MLX5_TX_DB_NC, 15928f848f32SViacheslav Ovsiienko MLX5_TX_PP, 15938f848f32SViacheslav Ovsiienko MLX5_TX_SKEW, 15945644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 15955644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 159678a54648SXueming Li MLX5_L3_VXLAN_EN, 1597db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 1598e2b4925eSOri Kam MLX5_DV_ESW_EN, 159951e72d38SOri Kam MLX5_DV_FLOW_EN, 16002d241515SViacheslav Ovsiienko MLX5_DV_XMETA_EN, 16010f0ae73aSShiri Kuzin MLX5_LACP_BY_USER, 1602dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 16036de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 1604066cfecdSMatan Azrad MLX5_MAX_DUMP_FILES_NUM, 160521bb6c7eSDekel Peled MLX5_LRO_TIMEOUT_USEC, 1606d768f324SMatan Azrad MLX5_CLASS_ARG_NAME, 16071ad9a3d0SBing Zhao MLX5_HP_BUF_SIZE, 1608a1da6f62SSuanming Mou MLX5_RECLAIM_MEM, 16095522da6bSSuanming Mou MLX5_SYS_MEM_EN, 1610*50f95b23SSuanming Mou MLX5_DECAP_EN, 1611e72dd09bSNélio Laranjeiro NULL, 1612e72dd09bSNélio Laranjeiro }; 1613e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 1614e72dd09bSNélio Laranjeiro int ret = 0; 1615e72dd09bSNélio Laranjeiro int i; 1616e72dd09bSNélio Laranjeiro 1617e72dd09bSNélio Laranjeiro if (devargs == NULL) 1618e72dd09bSNélio Laranjeiro return 0; 1619e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 1620e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 162115b0ea00SMatan Azrad if (kvlist == NULL) { 162215b0ea00SMatan Azrad rte_errno = EINVAL; 162315b0ea00SMatan Azrad return -rte_errno; 162415b0ea00SMatan Azrad } 1625e72dd09bSNélio Laranjeiro /* Process parameters. */ 1626e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 1627e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 1628e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 16297fe24446SShahaf Shuler mlx5_args_check, config); 1630a6d83b6aSNélio Laranjeiro if (ret) { 1631a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1632a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 1633a6d83b6aSNélio Laranjeiro return -rte_errno; 1634e72dd09bSNélio Laranjeiro } 1635e72dd09bSNélio Laranjeiro } 1636a67323e4SShahaf Shuler } 1637e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 1638e72dd09bSNélio Laranjeiro return 0; 1639e72dd09bSNélio Laranjeiro } 1640e72dd09bSNélio Laranjeiro 16417be600c8SYongseok Koh /** 16427be600c8SYongseok Koh * PMD global initialization. 16437be600c8SYongseok Koh * 16447be600c8SYongseok Koh * Independent from individual device, this function initializes global 16457be600c8SYongseok Koh * per-PMD data structures distinguishing primary and secondary processes. 16467be600c8SYongseok Koh * Hence, each initialization is called once per a process. 16477be600c8SYongseok Koh * 16487be600c8SYongseok Koh * @return 16497be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 16507be600c8SYongseok Koh */ 16512eb4d010SOphir Munk int 16527be600c8SYongseok Koh mlx5_init_once(void) 16537be600c8SYongseok Koh { 16547be600c8SYongseok Koh struct mlx5_shared_data *sd; 16557be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 1656edf73dd3SAnatoly Burakov int ret = 0; 16577be600c8SYongseok Koh 16587be600c8SYongseok Koh if (mlx5_init_shared_data()) 16597be600c8SYongseok Koh return -rte_errno; 16607be600c8SYongseok Koh sd = mlx5_shared_data; 16618e46d4e1SAlexander Kozyrev MLX5_ASSERT(sd); 16627be600c8SYongseok Koh rte_spinlock_lock(&sd->lock); 16637be600c8SYongseok Koh switch (rte_eal_process_type()) { 16647be600c8SYongseok Koh case RTE_PROC_PRIMARY: 16657be600c8SYongseok Koh if (sd->init_done) 16667be600c8SYongseok Koh break; 16677be600c8SYongseok Koh LIST_INIT(&sd->mem_event_cb_list); 16687be600c8SYongseok Koh rte_rwlock_init(&sd->mem_event_rwlock); 16697be600c8SYongseok Koh rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 16707be600c8SYongseok Koh mlx5_mr_mem_event_cb, NULL); 1671a4de9586SVu Pham ret = mlx5_mp_init_primary(MLX5_MP_NAME, 1672a4de9586SVu Pham mlx5_mp_primary_handle); 1673edf73dd3SAnatoly Burakov if (ret) 1674edf73dd3SAnatoly Burakov goto out; 16757be600c8SYongseok Koh sd->init_done = true; 16767be600c8SYongseok Koh break; 16777be600c8SYongseok Koh case RTE_PROC_SECONDARY: 16787be600c8SYongseok Koh if (ld->init_done) 16797be600c8SYongseok Koh break; 1680a4de9586SVu Pham ret = mlx5_mp_init_secondary(MLX5_MP_NAME, 1681a4de9586SVu Pham mlx5_mp_secondary_handle); 1682edf73dd3SAnatoly Burakov if (ret) 1683edf73dd3SAnatoly Burakov goto out; 16847be600c8SYongseok Koh ++sd->secondary_cnt; 16857be600c8SYongseok Koh ld->init_done = true; 16867be600c8SYongseok Koh break; 16877be600c8SYongseok Koh default: 16887be600c8SYongseok Koh break; 16897be600c8SYongseok Koh } 1690edf73dd3SAnatoly Burakov out: 16917be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 1692edf73dd3SAnatoly Burakov return ret; 16937be600c8SYongseok Koh } 16947be600c8SYongseok Koh 16957be600c8SYongseok Koh /** 169638b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 169738b4b397SViacheslav Ovsiienko * while sending packets. 169838b4b397SViacheslav Ovsiienko * 169938b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 170038b4b397SViacheslav Ovsiienko * key is specified in devargs 170138b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 170238b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 1703ee76bddcSThomas Monjalon * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx 170438b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 170538b4b397SViacheslav Ovsiienko * 170638b4b397SViacheslav Ovsiienko * @param spawn 170738b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 170838b4b397SViacheslav Ovsiienko * @param config 170938b4b397SViacheslav Ovsiienko * Device configuration parameters. 171038b4b397SViacheslav Ovsiienko */ 17112eb4d010SOphir Munk void 171238b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 171338b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 171438b4b397SViacheslav Ovsiienko { 171538b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 171638b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 171738b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 171838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 171938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 172038b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 172138b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 172238b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 172338b4b397SViacheslav Ovsiienko "txq_inline_mix aligned to minimal" 172438b4b397SViacheslav Ovsiienko " ConnectX-4 required value %d", 172538b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 172638b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 172738b4b397SViacheslav Ovsiienko } 172838b4b397SViacheslav Ovsiienko break; 172938b4b397SViacheslav Ovsiienko } 173038b4b397SViacheslav Ovsiienko goto exit; 173138b4b397SViacheslav Ovsiienko } 173238b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 173338b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 173438b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 173538b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 173638b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 173738b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 173838b4b397SViacheslav Ovsiienko goto exit; 173938b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 174038b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 174138b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 174238b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 174338b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 174438b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 174538b4b397SViacheslav Ovsiienko goto exit; 174638b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 174738b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 174838b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 174938b4b397SViacheslav Ovsiienko break; 175038b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 175138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 175238b4b397SViacheslav Ovsiienko config->txq_inline_min = 175338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 175438b4b397SViacheslav Ovsiienko goto exit; 175538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 175638b4b397SViacheslav Ovsiienko config->txq_inline_min = 175738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 175838b4b397SViacheslav Ovsiienko goto exit; 175938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 176038b4b397SViacheslav Ovsiienko config->txq_inline_min = 176138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 176238b4b397SViacheslav Ovsiienko goto exit; 176338b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 176438b4b397SViacheslav Ovsiienko config->txq_inline_min = 176538b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 176638b4b397SViacheslav Ovsiienko goto exit; 176738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 176838b4b397SViacheslav Ovsiienko config->txq_inline_min = 176938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 177038b4b397SViacheslav Ovsiienko goto exit; 177138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 177238b4b397SViacheslav Ovsiienko config->txq_inline_min = 177338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 177438b4b397SViacheslav Ovsiienko goto exit; 177538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 177638b4b397SViacheslav Ovsiienko config->txq_inline_min = 177738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 177838b4b397SViacheslav Ovsiienko goto exit; 177938b4b397SViacheslav Ovsiienko } 178038b4b397SViacheslav Ovsiienko } 178138b4b397SViacheslav Ovsiienko } 178238b4b397SViacheslav Ovsiienko /* 178338b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 178438b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 178538b4b397SViacheslav Ovsiienko * to determine old NICs. 178638b4b397SViacheslav Ovsiienko */ 178738b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 178838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 178938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 179038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 179138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1792614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 179338b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 179438b4b397SViacheslav Ovsiienko break; 179538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 179638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 179738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 179838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 179938b4b397SViacheslav Ovsiienko /* 180038b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 180138b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 180238b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 180338b4b397SViacheslav Ovsiienko */ 180438b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 180520215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 180638b4b397SViacheslav Ovsiienko break; 180738b4b397SViacheslav Ovsiienko default: 180838b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 180938b4b397SViacheslav Ovsiienko break; 181038b4b397SViacheslav Ovsiienko } 181138b4b397SViacheslav Ovsiienko exit: 181238b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 181338b4b397SViacheslav Ovsiienko } 181438b4b397SViacheslav Ovsiienko 181538b4b397SViacheslav Ovsiienko /** 181639139371SViacheslav Ovsiienko * Configures the metadata mask fields in the shared context. 181739139371SViacheslav Ovsiienko * 181839139371SViacheslav Ovsiienko * @param [in] dev 181939139371SViacheslav Ovsiienko * Pointer to Ethernet device. 182039139371SViacheslav Ovsiienko */ 18212eb4d010SOphir Munk void 182239139371SViacheslav Ovsiienko mlx5_set_metadata_mask(struct rte_eth_dev *dev) 182339139371SViacheslav Ovsiienko { 182439139371SViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 18256e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 182639139371SViacheslav Ovsiienko uint32_t meta, mark, reg_c0; 182739139371SViacheslav Ovsiienko 182839139371SViacheslav Ovsiienko reg_c0 = ~priv->vport_meta_mask; 182939139371SViacheslav Ovsiienko switch (priv->config.dv_xmeta_en) { 183039139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_LEGACY: 183139139371SViacheslav Ovsiienko meta = UINT32_MAX; 183239139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 183339139371SViacheslav Ovsiienko break; 183439139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META16: 183539139371SViacheslav Ovsiienko meta = reg_c0 >> rte_bsf32(reg_c0); 183639139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 183739139371SViacheslav Ovsiienko break; 183839139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META32: 183939139371SViacheslav Ovsiienko meta = UINT32_MAX; 184039139371SViacheslav Ovsiienko mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK; 184139139371SViacheslav Ovsiienko break; 184239139371SViacheslav Ovsiienko default: 184339139371SViacheslav Ovsiienko meta = 0; 184439139371SViacheslav Ovsiienko mark = 0; 18458e46d4e1SAlexander Kozyrev MLX5_ASSERT(false); 184639139371SViacheslav Ovsiienko break; 184739139371SViacheslav Ovsiienko } 184839139371SViacheslav Ovsiienko if (sh->dv_mark_mask && sh->dv_mark_mask != mark) 184939139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X", 185039139371SViacheslav Ovsiienko sh->dv_mark_mask, mark); 185139139371SViacheslav Ovsiienko else 185239139371SViacheslav Ovsiienko sh->dv_mark_mask = mark; 185339139371SViacheslav Ovsiienko if (sh->dv_meta_mask && sh->dv_meta_mask != meta) 185439139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X", 185539139371SViacheslav Ovsiienko sh->dv_meta_mask, meta); 185639139371SViacheslav Ovsiienko else 185739139371SViacheslav Ovsiienko sh->dv_meta_mask = meta; 185839139371SViacheslav Ovsiienko if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) 185939139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X", 186039139371SViacheslav Ovsiienko sh->dv_meta_mask, reg_c0); 186139139371SViacheslav Ovsiienko else 186239139371SViacheslav Ovsiienko sh->dv_regc0_mask = reg_c0; 186339139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en); 186439139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask); 186539139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask); 186639139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask); 186739139371SViacheslav Ovsiienko } 186839139371SViacheslav Ovsiienko 1869efa79e68SOri Kam int 1870efa79e68SOri Kam rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) 1871efa79e68SOri Kam { 1872efa79e68SOri Kam static const char *const dynf_names[] = { 1873efa79e68SOri Kam RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, 18748f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_METADATA_NAME, 18758f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME 1876efa79e68SOri Kam }; 1877efa79e68SOri Kam unsigned int i; 1878efa79e68SOri Kam 1879efa79e68SOri Kam if (n < RTE_DIM(dynf_names)) 1880efa79e68SOri Kam return -ENOMEM; 1881efa79e68SOri Kam for (i = 0; i < RTE_DIM(dynf_names); i++) { 1882efa79e68SOri Kam if (names[i] == NULL) 1883efa79e68SOri Kam return -EINVAL; 1884efa79e68SOri Kam strcpy(names[i], dynf_names[i]); 1885efa79e68SOri Kam } 1886efa79e68SOri Kam return RTE_DIM(dynf_names); 1887efa79e68SOri Kam } 1888efa79e68SOri Kam 188921cae858SDekel Peled /** 18902eb4d010SOphir Munk * Comparison callback to sort device data. 189192d5dd48SViacheslav Ovsiienko * 18922eb4d010SOphir Munk * This is meant to be used with qsort(). 189392d5dd48SViacheslav Ovsiienko * 18942eb4d010SOphir Munk * @param a[in] 18952eb4d010SOphir Munk * Pointer to pointer to first data object. 18962eb4d010SOphir Munk * @param b[in] 18972eb4d010SOphir Munk * Pointer to pointer to second data object. 189892d5dd48SViacheslav Ovsiienko * 189992d5dd48SViacheslav Ovsiienko * @return 19002eb4d010SOphir Munk * 0 if both objects are equal, less than 0 if the first argument is less 19012eb4d010SOphir Munk * than the second, greater than 0 otherwise. 190292d5dd48SViacheslav Ovsiienko */ 19032eb4d010SOphir Munk int 190492d5dd48SViacheslav Ovsiienko mlx5_dev_check_sibling_config(struct mlx5_priv *priv, 190592d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *config) 190692d5dd48SViacheslav Ovsiienko { 19076e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 190892d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *sh_conf = NULL; 190992d5dd48SViacheslav Ovsiienko uint16_t port_id; 191092d5dd48SViacheslav Ovsiienko 19118e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 191292d5dd48SViacheslav Ovsiienko /* Nothing to compare for the single/first device. */ 191392d5dd48SViacheslav Ovsiienko if (sh->refcnt == 1) 191492d5dd48SViacheslav Ovsiienko return 0; 191592d5dd48SViacheslav Ovsiienko /* Find the device with shared context. */ 1916fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 191792d5dd48SViacheslav Ovsiienko struct mlx5_priv *opriv = 191892d5dd48SViacheslav Ovsiienko rte_eth_devices[port_id].data->dev_private; 191992d5dd48SViacheslav Ovsiienko 192092d5dd48SViacheslav Ovsiienko if (opriv && opriv != priv && opriv->sh == sh) { 192192d5dd48SViacheslav Ovsiienko sh_conf = &opriv->config; 192292d5dd48SViacheslav Ovsiienko break; 192392d5dd48SViacheslav Ovsiienko } 192492d5dd48SViacheslav Ovsiienko } 192592d5dd48SViacheslav Ovsiienko if (!sh_conf) 192692d5dd48SViacheslav Ovsiienko return 0; 192792d5dd48SViacheslav Ovsiienko if (sh_conf->dv_flow_en ^ config->dv_flow_en) { 192892d5dd48SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch" 192992d5dd48SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 193092d5dd48SViacheslav Ovsiienko rte_errno = EINVAL; 193192d5dd48SViacheslav Ovsiienko return rte_errno; 193292d5dd48SViacheslav Ovsiienko } 19332d241515SViacheslav Ovsiienko if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) { 19342d241515SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch" 19352d241515SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 19362d241515SViacheslav Ovsiienko rte_errno = EINVAL; 19372d241515SViacheslav Ovsiienko return rte_errno; 19382d241515SViacheslav Ovsiienko } 193992d5dd48SViacheslav Ovsiienko return 0; 194092d5dd48SViacheslav Ovsiienko } 1941771fa900SAdrien Mazarguil 1942fbc83412SViacheslav Ovsiienko /** 1943fbc83412SViacheslav Ovsiienko * Look for the ethernet device belonging to mlx5 driver. 1944fbc83412SViacheslav Ovsiienko * 1945fbc83412SViacheslav Ovsiienko * @param[in] port_id 1946fbc83412SViacheslav Ovsiienko * port_id to start looking for device. 1947fbc83412SViacheslav Ovsiienko * @param[in] pci_dev 1948fbc83412SViacheslav Ovsiienko * Pointer to the hint PCI device. When device is being probed 1949fbc83412SViacheslav Ovsiienko * the its siblings (master and preceding representors might 19502eb4d010SOphir Munk * not have assigned driver yet (because the mlx5_os_pci_probe() 1951fbc83412SViacheslav Ovsiienko * is not completed yet, for this case match on hint PCI 1952fbc83412SViacheslav Ovsiienko * device may be used to detect sibling device. 1953fbc83412SViacheslav Ovsiienko * 1954fbc83412SViacheslav Ovsiienko * @return 1955fbc83412SViacheslav Ovsiienko * port_id of found device, RTE_MAX_ETHPORT if not found. 1956fbc83412SViacheslav Ovsiienko */ 1957f7e95215SViacheslav Ovsiienko uint16_t 1958fbc83412SViacheslav Ovsiienko mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) 1959f7e95215SViacheslav Ovsiienko { 1960f7e95215SViacheslav Ovsiienko while (port_id < RTE_MAX_ETHPORTS) { 1961f7e95215SViacheslav Ovsiienko struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1962f7e95215SViacheslav Ovsiienko 1963f7e95215SViacheslav Ovsiienko if (dev->state != RTE_ETH_DEV_UNUSED && 1964f7e95215SViacheslav Ovsiienko dev->device && 1965fbc83412SViacheslav Ovsiienko (dev->device == &pci_dev->device || 1966fbc83412SViacheslav Ovsiienko (dev->device->driver && 1967f7e95215SViacheslav Ovsiienko dev->device->driver->name && 1968fbc83412SViacheslav Ovsiienko !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME)))) 1969f7e95215SViacheslav Ovsiienko break; 1970f7e95215SViacheslav Ovsiienko port_id++; 1971f7e95215SViacheslav Ovsiienko } 1972f7e95215SViacheslav Ovsiienko if (port_id >= RTE_MAX_ETHPORTS) 1973f7e95215SViacheslav Ovsiienko return RTE_MAX_ETHPORTS; 1974f7e95215SViacheslav Ovsiienko return port_id; 1975f7e95215SViacheslav Ovsiienko } 1976f7e95215SViacheslav Ovsiienko 19773a820742SOphir Munk /** 19783a820742SOphir Munk * DPDK callback to remove a PCI device. 19793a820742SOphir Munk * 19803a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 19813a820742SOphir Munk * 19823a820742SOphir Munk * @param[in] pci_dev 19833a820742SOphir Munk * Pointer to the PCI device. 19843a820742SOphir Munk * 19853a820742SOphir Munk * @return 19863a820742SOphir Munk * 0 on success, the function cannot fail. 19873a820742SOphir Munk */ 19883a820742SOphir Munk static int 19893a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 19903a820742SOphir Munk { 19913a820742SOphir Munk uint16_t port_id; 19923a820742SOphir Munk 19932786b7bfSSuanming Mou RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { 19942786b7bfSSuanming Mou /* 19952786b7bfSSuanming Mou * mlx5_dev_close() is not registered to secondary process, 19962786b7bfSSuanming Mou * call the close function explicitly for secondary process. 19972786b7bfSSuanming Mou */ 19982786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) 19992786b7bfSSuanming Mou mlx5_dev_close(&rte_eth_devices[port_id]); 20002786b7bfSSuanming Mou else 20013a820742SOphir Munk rte_eth_dev_close(port_id); 20022786b7bfSSuanming Mou } 20033a820742SOphir Munk return 0; 20043a820742SOphir Munk } 20053a820742SOphir Munk 2006771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 2007771fa900SAdrien Mazarguil { 20081d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20091d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 2010771fa900SAdrien Mazarguil }, 2011771fa900SAdrien Mazarguil { 20121d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20131d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 2014771fa900SAdrien Mazarguil }, 2015771fa900SAdrien Mazarguil { 20161d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20171d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 2018771fa900SAdrien Mazarguil }, 2019771fa900SAdrien Mazarguil { 20201d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20211d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 2022771fa900SAdrien Mazarguil }, 2023771fa900SAdrien Mazarguil { 2024528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2025528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 2026528a9fbeSYongseok Koh }, 2027528a9fbeSYongseok Koh { 2028528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2029528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 2030528a9fbeSYongseok Koh }, 2031528a9fbeSYongseok Koh { 2032528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2033528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 2034528a9fbeSYongseok Koh }, 2035528a9fbeSYongseok Koh { 2036528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2037528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 2038528a9fbeSYongseok Koh }, 2039528a9fbeSYongseok Koh { 2040dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2041dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 2042dd3331c6SShahaf Shuler }, 2043dd3331c6SShahaf Shuler { 2044c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2045c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 2046c322c0e5SOri Kam }, 2047c322c0e5SOri Kam { 2048f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2049f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 2050f0354d84SWisam Jaddo }, 2051f0354d84SWisam Jaddo { 2052f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2053f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 2054f0354d84SWisam Jaddo }, 2055f0354d84SWisam Jaddo { 20565fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20575fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DX) 20585fc66630SRaslan Darawsheh }, 20595fc66630SRaslan Darawsheh { 20605fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20615fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF) 20625fc66630SRaslan Darawsheh }, 20635fc66630SRaslan Darawsheh { 206458b4a2b1SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 206558b4a2b1SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 206658b4a2b1SRaslan Darawsheh }, 206758b4a2b1SRaslan Darawsheh { 206828c9a7d7SAli Alnubani RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 206928c9a7d7SAli Alnubani PCI_DEVICE_ID_MELLANOX_CONNECTX6LX) 207028c9a7d7SAli Alnubani }, 207128c9a7d7SAli Alnubani { 2072771fa900SAdrien Mazarguil .vendor_id = 0 2073771fa900SAdrien Mazarguil } 2074771fa900SAdrien Mazarguil }; 2075771fa900SAdrien Mazarguil 20762eb4d010SOphir Munk struct rte_pci_driver mlx5_driver = { 20772f3193cfSJan Viktorin .driver = { 20782f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 20792f3193cfSJan Viktorin }, 2080771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 20812eb4d010SOphir Munk .probe = mlx5_os_pci_probe, 20823a820742SOphir Munk .remove = mlx5_pci_remove, 2083989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 2084989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 208510f3581dSOphir Munk .drv_flags = PCI_DRV_FLAGS, 2086771fa900SAdrien Mazarguil }; 2087771fa900SAdrien Mazarguil 20889c99878aSJerin Jacob /* Initialize driver log type. */ 20899c99878aSJerin Jacob RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE) 20909c99878aSJerin Jacob 2091771fa900SAdrien Mazarguil /** 2092771fa900SAdrien Mazarguil * Driver initialization routine. 2093771fa900SAdrien Mazarguil */ 2094f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 2095771fa900SAdrien Mazarguil { 20965f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 2097ea16068cSYongseok Koh mlx5_set_ptype_table(); 20985f8ba81cSXueming Li mlx5_set_cksum_table(); 20995f8ba81cSXueming Li mlx5_set_swp_types_table(); 21007b4f1e6bSMatan Azrad if (mlx5_glue) 21013dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 2102771fa900SAdrien Mazarguil } 2103771fa900SAdrien Mazarguil 210401f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 210501f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 21060880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 2107