18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <stdint.h> 10771fa900SAdrien Mazarguil #include <stdlib.h> 11e72dd09bSNélio Laranjeiro #include <errno.h> 12771fa900SAdrien Mazarguil #include <net/if.h> 1333860cfaSSuanming Mou #include <fcntl.h> 144a984153SXueming Li #include <sys/mman.h> 15ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 16771fa900SAdrien Mazarguil 17771fa900SAdrien Mazarguil /* Verbs header. */ 18771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 19771fa900SAdrien Mazarguil #ifdef PEDANTIC 20fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 21771fa900SAdrien Mazarguil #endif 22771fa900SAdrien Mazarguil #include <infiniband/verbs.h> 23771fa900SAdrien Mazarguil #ifdef PEDANTIC 24fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 25771fa900SAdrien Mazarguil #endif 26771fa900SAdrien Mazarguil 27771fa900SAdrien Mazarguil #include <rte_malloc.h> 28ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 29fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 30771fa900SAdrien Mazarguil #include <rte_pci.h> 31c752998bSGaetan Rivet #include <rte_bus_pci.h> 32771fa900SAdrien Mazarguil #include <rte_common.h> 33e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 34e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 35e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 36f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 37f15db67dSMatan Azrad #include <rte_alarm.h> 38771fa900SAdrien Mazarguil 397b4f1e6bSMatan Azrad #include <mlx5_glue.h> 407b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h> 4193e30982SMatan Azrad #include <mlx5_common.h> 42a4de9586SVu Pham #include <mlx5_common_mp.h> 437b4f1e6bSMatan Azrad 447b4f1e6bSMatan Azrad #include "mlx5_defs.h" 45771fa900SAdrien Mazarguil #include "mlx5.h" 46771fa900SAdrien Mazarguil #include "mlx5_utils.h" 472e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 48771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 49974f1e7eSYongseok Koh #include "mlx5_mr.h" 5084c406e7SOri Kam #include "mlx5_flow.h" 51efa79e68SOri Kam #include "rte_pmd_mlx5.h" 52771fa900SAdrien Mazarguil 5399c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5499c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5599c12dccSNélio Laranjeiro 56bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 57bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 58bc91e8dbSYongseok Koh 5978c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 6078c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 6178c7a16dSYongseok Koh 627d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 637d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 647d6bf6b8SYongseok Koh 657d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 667d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 677d6bf6b8SYongseok Koh 68ecb16045SAlexander Kozyrev /* Device parameter to configure log 2 of the stride size for MPRQ. */ 69ecb16045SAlexander Kozyrev #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size" 70ecb16045SAlexander Kozyrev 717d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 727d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 737d6bf6b8SYongseok Koh 747d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 757d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 767d6bf6b8SYongseok Koh 77a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 782a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 792a66cf37SYaacov Hazan 80505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 81505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 82505f1fe4SViacheslav Ovsiienko 83505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 84505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 85505f1fe4SViacheslav Ovsiienko 86505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 87505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 88505f1fe4SViacheslav Ovsiienko 892a66cf37SYaacov Hazan /* 902a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 912a66cf37SYaacov Hazan * enabling inline send. 922a66cf37SYaacov Hazan */ 932a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 942a66cf37SYaacov Hazan 9509d8b416SYongseok Koh /* 9609d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 97a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9809d8b416SYongseok Koh */ 9909d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 10009d8b416SYongseok Koh 101230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 102230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 103230189d9SNélio Laranjeiro 104a6bd4911SViacheslav Ovsiienko /* 1058409a285SViacheslav Ovsiienko * Device parameter to force doorbell register mapping 1068409a285SViacheslav Ovsiienko * to non-cahed region eliminating the extra write memory barrier. 1078409a285SViacheslav Ovsiienko */ 1088409a285SViacheslav Ovsiienko #define MLX5_TX_DB_NC "tx_db_nc" 1098409a285SViacheslav Ovsiienko 1108409a285SViacheslav Ovsiienko /* 111a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 112a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 113a6bd4911SViacheslav Ovsiienko */ 1146ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1156ce84bd8SYongseok Koh 116a6bd4911SViacheslav Ovsiienko /* 117a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 118a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 119a6bd4911SViacheslav Ovsiienko */ 1206ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1216ce84bd8SYongseok Koh 122a6bd4911SViacheslav Ovsiienko /* 123a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 124a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 125a6bd4911SViacheslav Ovsiienko */ 1265644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1275644d5b9SNelio Laranjeiro 1285644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1295644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1305644d5b9SNelio Laranjeiro 13178a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 13278a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 13378a54648SXueming Li 134e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 135e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 136e2b4925eSOri Kam 13751e72d38SOri Kam /* Activate DV flow steering. */ 13851e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 13951e72d38SOri Kam 1402d241515SViacheslav Ovsiienko /* Enable extensive flow metadata support. */ 1412d241515SViacheslav Ovsiienko #define MLX5_DV_XMETA_EN "dv_xmeta_en" 1422d241515SViacheslav Ovsiienko 143db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 144db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 145db209cc3SNélio Laranjeiro 146dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 147dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 148dceb5029SYongseok Koh 1496de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1506de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1516de569f5SAdrien Mazarguil 152066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 153066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 154066cfecdSMatan Azrad 15521bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 15621bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 15721bb6c7eSDekel Peled 1581ad9a3d0SBing Zhao /* 1591ad9a3d0SBing Zhao * Device parameter to configure the total data buffer size for a single 1601ad9a3d0SBing Zhao * hairpin queue (logarithm value). 1611ad9a3d0SBing Zhao */ 1621ad9a3d0SBing Zhao #define MLX5_HP_BUF_SIZE "hp_buf_log_sz" 1631ad9a3d0SBing Zhao 164a1da6f62SSuanming Mou /* Flow memory reclaim mode. */ 165a1da6f62SSuanming Mou #define MLX5_RECLAIM_MEM "reclaim_mem_mode" 166a1da6f62SSuanming Mou 16743e9d979SShachar Beiser #ifndef HAVE_IBV_MLX5_MOD_MPW 16843e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 16943e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 17043e9d979SShachar Beiser #endif 17143e9d979SShachar Beiser 172523f5a74SYongseok Koh #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 173523f5a74SYongseok Koh #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 174523f5a74SYongseok Koh #endif 175523f5a74SYongseok Koh 176974f1e7eSYongseok Koh static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 177974f1e7eSYongseok Koh 178974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 179974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 180974f1e7eSYongseok Koh 181974f1e7eSYongseok Koh /* Spinlock for mlx5_shared_data allocation. */ 182974f1e7eSYongseok Koh static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 183974f1e7eSYongseok Koh 1847be600c8SYongseok Koh /* Process local data for secondary processes. */ 1857be600c8SYongseok Koh static struct mlx5_local_data mlx5_local_data; 1867be600c8SYongseok Koh 187a170a30dSNélio Laranjeiro /** Driver-specific log messages type. */ 188a170a30dSNélio Laranjeiro int mlx5_logtype; 189a170a30dSNélio Laranjeiro 190ad74bc61SViacheslav Ovsiienko /** Data associated with devices to spawn. */ 191ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data { 192ad74bc61SViacheslav Ovsiienko uint32_t ifindex; /**< Network interface index. */ 193ad74bc61SViacheslav Ovsiienko uint32_t max_port; /**< IB device maximal port index. */ 194ad74bc61SViacheslav Ovsiienko uint32_t ibv_port; /**< IB device physical port index. */ 1952e569a37SViacheslav Ovsiienko int pf_bond; /**< bonding device PF index. < 0 - no bonding */ 196ad74bc61SViacheslav Ovsiienko struct mlx5_switch_info info; /**< Switch information. */ 197ad74bc61SViacheslav Ovsiienko struct ibv_device *ibv_dev; /**< Associated IB device. */ 198ad74bc61SViacheslav Ovsiienko struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ 199ab3cffcfSViacheslav Ovsiienko struct rte_pci_device *pci_dev; /**< Backend PCI device. */ 200ad74bc61SViacheslav Ovsiienko }; 201ad74bc61SViacheslav Ovsiienko 2026e88bc42SOphir Munk static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER(); 20317e19bc4SViacheslav Ovsiienko static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER; 20417e19bc4SViacheslav Ovsiienko 205014d1cbeSSuanming Mou static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { 206b88341caSSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 207014d1cbeSSuanming Mou { 208014d1cbeSSuanming Mou .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), 209014d1cbeSSuanming Mou .trunk_size = 64, 210014d1cbeSSuanming Mou .grow_trunk = 3, 211014d1cbeSSuanming Mou .grow_shift = 2, 212014d1cbeSSuanming Mou .need_lock = 0, 213014d1cbeSSuanming Mou .release_mem_en = 1, 214014d1cbeSSuanming Mou .malloc = rte_malloc_socket, 215014d1cbeSSuanming Mou .free = rte_free, 216014d1cbeSSuanming Mou .type = "mlx5_encap_decap_ipool", 217014d1cbeSSuanming Mou }, 2188acf8ac9SSuanming Mou { 2198acf8ac9SSuanming Mou .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), 2208acf8ac9SSuanming Mou .trunk_size = 64, 2218acf8ac9SSuanming Mou .grow_trunk = 3, 2228acf8ac9SSuanming Mou .grow_shift = 2, 2238acf8ac9SSuanming Mou .need_lock = 0, 2248acf8ac9SSuanming Mou .release_mem_en = 1, 2258acf8ac9SSuanming Mou .malloc = rte_malloc_socket, 2268acf8ac9SSuanming Mou .free = rte_free, 2278acf8ac9SSuanming Mou .type = "mlx5_push_vlan_ipool", 2288acf8ac9SSuanming Mou }, 2295f114269SSuanming Mou { 2305f114269SSuanming Mou .size = sizeof(struct mlx5_flow_dv_tag_resource), 2315f114269SSuanming Mou .trunk_size = 64, 2325f114269SSuanming Mou .grow_trunk = 3, 2335f114269SSuanming Mou .grow_shift = 2, 2345f114269SSuanming Mou .need_lock = 0, 2355f114269SSuanming Mou .release_mem_en = 1, 2365f114269SSuanming Mou .malloc = rte_malloc_socket, 2375f114269SSuanming Mou .free = rte_free, 2385f114269SSuanming Mou .type = "mlx5_tag_ipool", 2395f114269SSuanming Mou }, 240f3faf9eaSSuanming Mou { 241f3faf9eaSSuanming Mou .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), 242f3faf9eaSSuanming Mou .trunk_size = 64, 243f3faf9eaSSuanming Mou .grow_trunk = 3, 244f3faf9eaSSuanming Mou .grow_shift = 2, 245f3faf9eaSSuanming Mou .need_lock = 0, 246f3faf9eaSSuanming Mou .release_mem_en = 1, 247f3faf9eaSSuanming Mou .malloc = rte_malloc_socket, 248f3faf9eaSSuanming Mou .free = rte_free, 249f3faf9eaSSuanming Mou .type = "mlx5_port_id_ipool", 250f3faf9eaSSuanming Mou }, 2517ac99475SSuanming Mou { 2527ac99475SSuanming Mou .size = sizeof(struct mlx5_flow_tbl_data_entry), 2537ac99475SSuanming Mou .trunk_size = 64, 2547ac99475SSuanming Mou .grow_trunk = 3, 2557ac99475SSuanming Mou .grow_shift = 2, 2567ac99475SSuanming Mou .need_lock = 0, 2577ac99475SSuanming Mou .release_mem_en = 1, 2587ac99475SSuanming Mou .malloc = rte_malloc_socket, 2597ac99475SSuanming Mou .free = rte_free, 2607ac99475SSuanming Mou .type = "mlx5_jump_ipool", 2617ac99475SSuanming Mou }, 262b88341caSSuanming Mou #endif 263772dc0ebSSuanming Mou { 2648638e2b0SSuanming Mou .size = sizeof(struct mlx5_flow_meter), 2658638e2b0SSuanming Mou .trunk_size = 64, 2668638e2b0SSuanming Mou .grow_trunk = 3, 2678638e2b0SSuanming Mou .grow_shift = 2, 2688638e2b0SSuanming Mou .need_lock = 0, 2698638e2b0SSuanming Mou .release_mem_en = 1, 2708638e2b0SSuanming Mou .malloc = rte_malloc_socket, 2718638e2b0SSuanming Mou .free = rte_free, 2728638e2b0SSuanming Mou .type = "mlx5_meter_ipool", 2738638e2b0SSuanming Mou }, 2748638e2b0SSuanming Mou { 27590e6053aSSuanming Mou .size = sizeof(struct mlx5_flow_mreg_copy_resource), 27690e6053aSSuanming Mou .trunk_size = 64, 27790e6053aSSuanming Mou .grow_trunk = 3, 27890e6053aSSuanming Mou .grow_shift = 2, 27990e6053aSSuanming Mou .need_lock = 0, 28090e6053aSSuanming Mou .release_mem_en = 1, 28190e6053aSSuanming Mou .malloc = rte_malloc_socket, 28290e6053aSSuanming Mou .free = rte_free, 28390e6053aSSuanming Mou .type = "mlx5_mcp_ipool", 28490e6053aSSuanming Mou }, 28590e6053aSSuanming Mou { 286772dc0ebSSuanming Mou .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), 287772dc0ebSSuanming Mou .trunk_size = 64, 288772dc0ebSSuanming Mou .grow_trunk = 3, 289772dc0ebSSuanming Mou .grow_shift = 2, 290772dc0ebSSuanming Mou .need_lock = 0, 291772dc0ebSSuanming Mou .release_mem_en = 1, 292772dc0ebSSuanming Mou .malloc = rte_malloc_socket, 293772dc0ebSSuanming Mou .free = rte_free, 294772dc0ebSSuanming Mou .type = "mlx5_hrxq_ipool", 295772dc0ebSSuanming Mou }, 296b88341caSSuanming Mou { 297b88341caSSuanming Mou .size = sizeof(struct mlx5_flow_handle), 298b88341caSSuanming Mou .trunk_size = 64, 299b88341caSSuanming Mou .grow_trunk = 3, 300b88341caSSuanming Mou .grow_shift = 2, 301b88341caSSuanming Mou .need_lock = 0, 302b88341caSSuanming Mou .release_mem_en = 1, 303b88341caSSuanming Mou .malloc = rte_malloc_socket, 304b88341caSSuanming Mou .free = rte_free, 305b88341caSSuanming Mou .type = "mlx5_flow_handle_ipool", 306b88341caSSuanming Mou }, 307ab612adcSSuanming Mou { 308ab612adcSSuanming Mou .size = sizeof(struct rte_flow), 309ab612adcSSuanming Mou .trunk_size = 4096, 310ab612adcSSuanming Mou .need_lock = 1, 311ab612adcSSuanming Mou .release_mem_en = 1, 312ab612adcSSuanming Mou .malloc = rte_malloc_socket, 313ab612adcSSuanming Mou .free = rte_free, 314ab612adcSSuanming Mou .type = "rte_flow_ipool", 315ab612adcSSuanming Mou }, 316014d1cbeSSuanming Mou }; 317014d1cbeSSuanming Mou 318014d1cbeSSuanming Mou 319830d2091SOri Kam #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 320830d2091SOri Kam #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 321830d2091SOri Kam 322860897d2SBing Zhao #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 323e484e403SBing Zhao #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192 324860897d2SBing Zhao 325830d2091SOri Kam /** 326830d2091SOri Kam * Allocate ID pool structure. 327830d2091SOri Kam * 32830a3687dSSuanming Mou * @param[in] max_id 32930a3687dSSuanming Mou * The maximum id can be allocated from the pool. 33030a3687dSSuanming Mou * 331830d2091SOri Kam * @return 332830d2091SOri Kam * Pointer to pool object, NULL value otherwise. 333830d2091SOri Kam */ 334830d2091SOri Kam struct mlx5_flow_id_pool * 33530a3687dSSuanming Mou mlx5_flow_id_pool_alloc(uint32_t max_id) 336830d2091SOri Kam { 337830d2091SOri Kam struct mlx5_flow_id_pool *pool; 338830d2091SOri Kam void *mem; 339830d2091SOri Kam 340830d2091SOri Kam pool = rte_zmalloc("id pool allocation", sizeof(*pool), 341830d2091SOri Kam RTE_CACHE_LINE_SIZE); 342830d2091SOri Kam if (!pool) { 343830d2091SOri Kam DRV_LOG(ERR, "can't allocate id pool"); 344830d2091SOri Kam rte_errno = ENOMEM; 345830d2091SOri Kam return NULL; 346830d2091SOri Kam } 347830d2091SOri Kam mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), 348830d2091SOri Kam RTE_CACHE_LINE_SIZE); 349830d2091SOri Kam if (!mem) { 350830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 351830d2091SOri Kam rte_errno = ENOMEM; 352830d2091SOri Kam goto error; 353830d2091SOri Kam } 354830d2091SOri Kam pool->free_arr = mem; 355830d2091SOri Kam pool->curr = pool->free_arr; 356830d2091SOri Kam pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; 357830d2091SOri Kam pool->base_index = 0; 35830a3687dSSuanming Mou pool->max_id = max_id; 359830d2091SOri Kam return pool; 360830d2091SOri Kam error: 361830d2091SOri Kam rte_free(pool); 362830d2091SOri Kam return NULL; 363830d2091SOri Kam } 364830d2091SOri Kam 365830d2091SOri Kam /** 366830d2091SOri Kam * Release ID pool structure. 367830d2091SOri Kam * 368830d2091SOri Kam * @param[in] pool 369830d2091SOri Kam * Pointer to flow id pool object to free. 370830d2091SOri Kam */ 371830d2091SOri Kam void 372830d2091SOri Kam mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) 373830d2091SOri Kam { 374830d2091SOri Kam rte_free(pool->free_arr); 375830d2091SOri Kam rte_free(pool); 376830d2091SOri Kam } 377830d2091SOri Kam 378830d2091SOri Kam /** 379830d2091SOri Kam * Generate ID. 380830d2091SOri Kam * 381830d2091SOri Kam * @param[in] pool 382830d2091SOri Kam * Pointer to flow id pool. 383830d2091SOri Kam * @param[out] id 384830d2091SOri Kam * The generated ID. 385830d2091SOri Kam * 386830d2091SOri Kam * @return 387830d2091SOri Kam * 0 on success, error value otherwise. 388830d2091SOri Kam */ 389830d2091SOri Kam uint32_t 390830d2091SOri Kam mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) 391830d2091SOri Kam { 392830d2091SOri Kam if (pool->curr == pool->free_arr) { 39330a3687dSSuanming Mou if (pool->base_index == pool->max_id) { 394830d2091SOri Kam rte_errno = ENOMEM; 395830d2091SOri Kam DRV_LOG(ERR, "no free id"); 396830d2091SOri Kam return -rte_errno; 397830d2091SOri Kam } 398830d2091SOri Kam *id = ++pool->base_index; 399830d2091SOri Kam return 0; 400830d2091SOri Kam } 401830d2091SOri Kam *id = *(--pool->curr); 402830d2091SOri Kam return 0; 403830d2091SOri Kam } 404830d2091SOri Kam 405830d2091SOri Kam /** 406830d2091SOri Kam * Release ID. 407830d2091SOri Kam * 408830d2091SOri Kam * @param[in] pool 409830d2091SOri Kam * Pointer to flow id pool. 410830d2091SOri Kam * @param[out] id 411830d2091SOri Kam * The generated ID. 412830d2091SOri Kam * 413830d2091SOri Kam * @return 414830d2091SOri Kam * 0 on success, error value otherwise. 415830d2091SOri Kam */ 416830d2091SOri Kam uint32_t 417830d2091SOri Kam mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) 418830d2091SOri Kam { 419830d2091SOri Kam uint32_t size; 420830d2091SOri Kam uint32_t size2; 421830d2091SOri Kam void *mem; 422830d2091SOri Kam 423830d2091SOri Kam if (pool->curr == pool->last) { 424830d2091SOri Kam size = pool->curr - pool->free_arr; 425830d2091SOri Kam size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; 4268e46d4e1SAlexander Kozyrev MLX5_ASSERT(size2 > size); 427830d2091SOri Kam mem = rte_malloc("", size2 * sizeof(uint32_t), 0); 428830d2091SOri Kam if (!mem) { 429830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 430830d2091SOri Kam rte_errno = ENOMEM; 431830d2091SOri Kam return -rte_errno; 432830d2091SOri Kam } 433830d2091SOri Kam memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); 434830d2091SOri Kam rte_free(pool->free_arr); 435830d2091SOri Kam pool->free_arr = mem; 436830d2091SOri Kam pool->curr = pool->free_arr + size; 437830d2091SOri Kam pool->last = pool->free_arr + size2; 438830d2091SOri Kam } 439830d2091SOri Kam *pool->curr = id; 440830d2091SOri Kam pool->curr++; 441830d2091SOri Kam return 0; 442830d2091SOri Kam } 443830d2091SOri Kam 44417e19bc4SViacheslav Ovsiienko /** 445fa2d01c8SDong Zhou * Initialize the shared aging list information per port. 446fa2d01c8SDong Zhou * 447fa2d01c8SDong Zhou * @param[in] sh 4486e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 449fa2d01c8SDong Zhou */ 450fa2d01c8SDong Zhou static void 4516e88bc42SOphir Munk mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) 452fa2d01c8SDong Zhou { 453fa2d01c8SDong Zhou uint32_t i; 454fa2d01c8SDong Zhou struct mlx5_age_info *age_info; 455fa2d01c8SDong Zhou 456fa2d01c8SDong Zhou for (i = 0; i < sh->max_port; i++) { 457fa2d01c8SDong Zhou age_info = &sh->port[i].age_info; 458fa2d01c8SDong Zhou age_info->flags = 0; 459fa2d01c8SDong Zhou TAILQ_INIT(&age_info->aged_counters); 460fa2d01c8SDong Zhou rte_spinlock_init(&age_info->aged_sl); 461fa2d01c8SDong Zhou MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 462fa2d01c8SDong Zhou } 463fa2d01c8SDong Zhou } 464fa2d01c8SDong Zhou 465fa2d01c8SDong Zhou /** 4665382d28cSMatan Azrad * Initialize the counters management structure. 4675382d28cSMatan Azrad * 4685382d28cSMatan Azrad * @param[in] sh 4696e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 4705382d28cSMatan Azrad */ 4715382d28cSMatan Azrad static void 4726e88bc42SOphir Munk mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) 4735382d28cSMatan Azrad { 4745af61440SMatan Azrad int i; 4755382d28cSMatan Azrad 4765af61440SMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 4775382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 4785af61440SMatan Azrad for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { 4795af61440SMatan Azrad TAILQ_INIT(&sh->cmng.ccont[i].pool_list); 4805af61440SMatan Azrad rte_spinlock_init(&sh->cmng.ccont[i].resize_sl); 481fa2d01c8SDong Zhou } 4825382d28cSMatan Azrad } 4835382d28cSMatan Azrad 4845382d28cSMatan Azrad /** 4855382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 4865382d28cSMatan Azrad * 4875382d28cSMatan Azrad * @param[in] mng 4885382d28cSMatan Azrad * Pointer to the memory management structure. 4895382d28cSMatan Azrad */ 4905382d28cSMatan Azrad static void 4915382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 4925382d28cSMatan Azrad { 4935382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 4945382d28cSMatan Azrad 4955382d28cSMatan Azrad LIST_REMOVE(mng, next); 4965382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 4975382d28cSMatan Azrad claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); 4985382d28cSMatan Azrad rte_free(mem); 4995382d28cSMatan Azrad } 5005382d28cSMatan Azrad 5015382d28cSMatan Azrad /** 5025382d28cSMatan Azrad * Close and release all the resources of the counters management. 5035382d28cSMatan Azrad * 5045382d28cSMatan Azrad * @param[in] sh 5056e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free. 5065382d28cSMatan Azrad */ 5075382d28cSMatan Azrad static void 5086e88bc42SOphir Munk mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) 5095382d28cSMatan Azrad { 5105382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 5115af61440SMatan Azrad int i; 5125382d28cSMatan Azrad int j; 513f15db67dSMatan Azrad int retries = 1024; 5145382d28cSMatan Azrad 515f15db67dSMatan Azrad rte_errno = 0; 516f15db67dSMatan Azrad while (--retries) { 517f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 518f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 519f15db67dSMatan Azrad break; 520f15db67dSMatan Azrad rte_pause(); 521f15db67dSMatan Azrad } 5225af61440SMatan Azrad for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { 5235382d28cSMatan Azrad struct mlx5_flow_counter_pool *pool; 5245af61440SMatan Azrad uint32_t batch = !!(i > 1); 5255382d28cSMatan Azrad 5265af61440SMatan Azrad if (!sh->cmng.ccont[i].pools) 5275382d28cSMatan Azrad continue; 5285af61440SMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 5295382d28cSMatan Azrad while (pool) { 5305af61440SMatan Azrad if (batch && pool->min_dcs) 5315af61440SMatan Azrad claim_zero(mlx5_devx_cmd_destroy 532fa2d01c8SDong Zhou (pool->min_dcs)); 5335382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 5348d93c830SDong Zhou if (MLX5_POOL_GET_CNT(pool, j)->action) 5355382d28cSMatan Azrad claim_zero 5365382d28cSMatan Azrad (mlx5_glue->destroy_flow_action 537fa2d01c8SDong Zhou (MLX5_POOL_GET_CNT 538fa2d01c8SDong Zhou (pool, j)->action)); 539826b8a87SSuanming Mou if (!batch && MLX5_GET_POOL_CNT_EXT 540826b8a87SSuanming Mou (pool, j)->dcs) 5415382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 542826b8a87SSuanming Mou (MLX5_GET_POOL_CNT_EXT 543826b8a87SSuanming Mou (pool, j)->dcs)); 5445382d28cSMatan Azrad } 5455af61440SMatan Azrad TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next); 5465382d28cSMatan Azrad rte_free(pool); 5475af61440SMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 5485382d28cSMatan Azrad } 5495af61440SMatan Azrad rte_free(sh->cmng.ccont[i].pools); 5505382d28cSMatan Azrad } 5515382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 5525382d28cSMatan Azrad while (mng) { 5535382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 5545382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 5555382d28cSMatan Azrad } 5565382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 5575382d28cSMatan Azrad } 5585382d28cSMatan Azrad 5595382d28cSMatan Azrad /** 560014d1cbeSSuanming Mou * Initialize the flow resources' indexed mempool. 561014d1cbeSSuanming Mou * 562014d1cbeSSuanming Mou * @param[in] sh 5636e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 564b88341caSSuanming Mou * @param[in] sh 565b88341caSSuanming Mou * Pointer to user dev config. 566014d1cbeSSuanming Mou */ 567014d1cbeSSuanming Mou static void 5686e88bc42SOphir Munk mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh, 569b88341caSSuanming Mou const struct mlx5_dev_config *config __rte_unused) 570014d1cbeSSuanming Mou { 571014d1cbeSSuanming Mou uint8_t i; 572014d1cbeSSuanming Mou 573b88341caSSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 574b88341caSSuanming Mou /* 575b88341caSSuanming Mou * While DV is supported, user chooses the verbs mode, 576b88341caSSuanming Mou * the mlx5 flow handle size is different with the 577b88341caSSuanming Mou * MLX5_FLOW_HANDLE_VERBS_SIZE. 578b88341caSSuanming Mou */ 579b88341caSSuanming Mou if (!config->dv_flow_en) 580b88341caSSuanming Mou mlx5_ipool_cfg[MLX5_IPOOL_MLX5_FLOW].size = 581b88341caSSuanming Mou MLX5_FLOW_HANDLE_VERBS_SIZE; 582b88341caSSuanming Mou #endif 583a1da6f62SSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) { 584a1da6f62SSuanming Mou if (config->reclaim_mode) 585a1da6f62SSuanming Mou mlx5_ipool_cfg[i].release_mem_en = 1; 586014d1cbeSSuanming Mou sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]); 587014d1cbeSSuanming Mou } 588a1da6f62SSuanming Mou } 589014d1cbeSSuanming Mou 590014d1cbeSSuanming Mou /** 591014d1cbeSSuanming Mou * Release the flow resources' indexed mempool. 592014d1cbeSSuanming Mou * 593014d1cbeSSuanming Mou * @param[in] sh 5946e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 595014d1cbeSSuanming Mou */ 596014d1cbeSSuanming Mou static void 5976e88bc42SOphir Munk mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh) 598014d1cbeSSuanming Mou { 599014d1cbeSSuanming Mou uint8_t i; 600014d1cbeSSuanming Mou 601014d1cbeSSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) 602014d1cbeSSuanming Mou mlx5_ipool_destroy(sh->ipool[i]); 603014d1cbeSSuanming Mou } 604014d1cbeSSuanming Mou 605014d1cbeSSuanming Mou /** 606b9d86122SDekel Peled * Extract pdn of PD object using DV API. 607b9d86122SDekel Peled * 608b9d86122SDekel Peled * @param[in] pd 609b9d86122SDekel Peled * Pointer to the verbs PD object. 610b9d86122SDekel Peled * @param[out] pdn 611b9d86122SDekel Peled * Pointer to the PD object number variable. 612b9d86122SDekel Peled * 613b9d86122SDekel Peled * @return 614b9d86122SDekel Peled * 0 on success, error value otherwise. 615b9d86122SDekel Peled */ 616b9d86122SDekel Peled #ifdef HAVE_IBV_FLOW_DV_SUPPORT 617b9d86122SDekel Peled static int 618b9d86122SDekel Peled mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused) 619b9d86122SDekel Peled { 620b9d86122SDekel Peled struct mlx5dv_obj obj; 621b9d86122SDekel Peled struct mlx5dv_pd pd_info; 622b9d86122SDekel Peled int ret = 0; 623b9d86122SDekel Peled 624b9d86122SDekel Peled obj.pd.in = pd; 625b9d86122SDekel Peled obj.pd.out = &pd_info; 626b9d86122SDekel Peled ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 627b9d86122SDekel Peled if (ret) { 628b9d86122SDekel Peled DRV_LOG(DEBUG, "Fail to get PD object info"); 629b9d86122SDekel Peled return ret; 630b9d86122SDekel Peled } 631b9d86122SDekel Peled *pdn = pd_info.pdn; 632b9d86122SDekel Peled return 0; 633b9d86122SDekel Peled } 634b9d86122SDekel Peled #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 635b9d86122SDekel Peled 6368409a285SViacheslav Ovsiienko static int 6378409a285SViacheslav Ovsiienko mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config) 6388409a285SViacheslav Ovsiienko { 6398409a285SViacheslav Ovsiienko char *env; 6408409a285SViacheslav Ovsiienko int value; 6418409a285SViacheslav Ovsiienko 6428e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 6438409a285SViacheslav Ovsiienko /* Get environment variable to store. */ 6448409a285SViacheslav Ovsiienko env = getenv(MLX5_SHUT_UP_BF); 6458409a285SViacheslav Ovsiienko value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET; 6468409a285SViacheslav Ovsiienko if (config->dbnc == MLX5_ARG_UNSET) 6478409a285SViacheslav Ovsiienko setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1); 6488409a285SViacheslav Ovsiienko else 649f078ceb6SViacheslav Ovsiienko setenv(MLX5_SHUT_UP_BF, 650f078ceb6SViacheslav Ovsiienko config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1); 6518409a285SViacheslav Ovsiienko return value; 6528409a285SViacheslav Ovsiienko } 6538409a285SViacheslav Ovsiienko 6548409a285SViacheslav Ovsiienko static void 65506f78b5eSViacheslav Ovsiienko mlx5_restore_doorbell_mapping_env(int value) 6568409a285SViacheslav Ovsiienko { 6578e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 6588409a285SViacheslav Ovsiienko /* Restore the original environment variable state. */ 6598409a285SViacheslav Ovsiienko if (value == MLX5_ARG_UNSET) 6608409a285SViacheslav Ovsiienko unsetenv(MLX5_SHUT_UP_BF); 6618409a285SViacheslav Ovsiienko else 6628409a285SViacheslav Ovsiienko setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1); 6638409a285SViacheslav Ovsiienko } 6648409a285SViacheslav Ovsiienko 665b9d86122SDekel Peled /** 66633860cfaSSuanming Mou * Install shared asynchronous device events handler. 66733860cfaSSuanming Mou * This function is implemented to support event sharing 66833860cfaSSuanming Mou * between multiple ports of single IB device. 66933860cfaSSuanming Mou * 67033860cfaSSuanming Mou * @param sh 6716e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 67233860cfaSSuanming Mou */ 67333860cfaSSuanming Mou static void 6746e88bc42SOphir Munk mlx5_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) 67533860cfaSSuanming Mou { 67633860cfaSSuanming Mou int ret; 67733860cfaSSuanming Mou int flags; 67833860cfaSSuanming Mou 67933860cfaSSuanming Mou sh->intr_handle.fd = -1; 680*f44b09f9SOphir Munk flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL); 681*f44b09f9SOphir Munk ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd, 682*f44b09f9SOphir Munk F_SETFL, flags | O_NONBLOCK); 68333860cfaSSuanming Mou if (ret) { 68433860cfaSSuanming Mou DRV_LOG(INFO, "failed to change file descriptor async event" 68533860cfaSSuanming Mou " queue"); 68633860cfaSSuanming Mou } else { 687*f44b09f9SOphir Munk sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd; 68833860cfaSSuanming Mou sh->intr_handle.type = RTE_INTR_HANDLE_EXT; 68933860cfaSSuanming Mou if (rte_intr_callback_register(&sh->intr_handle, 69033860cfaSSuanming Mou mlx5_dev_interrupt_handler, sh)) { 69133860cfaSSuanming Mou DRV_LOG(INFO, "Fail to install the shared interrupt."); 69233860cfaSSuanming Mou sh->intr_handle.fd = -1; 69333860cfaSSuanming Mou } 69433860cfaSSuanming Mou } 69533860cfaSSuanming Mou if (sh->devx) { 69633860cfaSSuanming Mou #ifdef HAVE_IBV_DEVX_ASYNC 69733860cfaSSuanming Mou sh->intr_handle_devx.fd = -1; 69833860cfaSSuanming Mou sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx); 69933860cfaSSuanming Mou if (!sh->devx_comp) { 70033860cfaSSuanming Mou DRV_LOG(INFO, "failed to allocate devx_comp."); 70133860cfaSSuanming Mou return; 70233860cfaSSuanming Mou } 70333860cfaSSuanming Mou flags = fcntl(sh->devx_comp->fd, F_GETFL); 70433860cfaSSuanming Mou ret = fcntl(sh->devx_comp->fd, F_SETFL, flags | O_NONBLOCK); 70533860cfaSSuanming Mou if (ret) { 70633860cfaSSuanming Mou DRV_LOG(INFO, "failed to change file descriptor" 70733860cfaSSuanming Mou " devx comp"); 70833860cfaSSuanming Mou return; 70933860cfaSSuanming Mou } 71033860cfaSSuanming Mou sh->intr_handle_devx.fd = sh->devx_comp->fd; 71133860cfaSSuanming Mou sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; 71233860cfaSSuanming Mou if (rte_intr_callback_register(&sh->intr_handle_devx, 71333860cfaSSuanming Mou mlx5_dev_interrupt_handler_devx, sh)) { 71433860cfaSSuanming Mou DRV_LOG(INFO, "Fail to install the devx shared" 71533860cfaSSuanming Mou " interrupt."); 71633860cfaSSuanming Mou sh->intr_handle_devx.fd = -1; 71733860cfaSSuanming Mou } 71833860cfaSSuanming Mou #endif /* HAVE_IBV_DEVX_ASYNC */ 71933860cfaSSuanming Mou } 72033860cfaSSuanming Mou } 72133860cfaSSuanming Mou 72233860cfaSSuanming Mou /** 72333860cfaSSuanming Mou * Uninstall shared asynchronous device events handler. 72433860cfaSSuanming Mou * This function is implemented to support event sharing 72533860cfaSSuanming Mou * between multiple ports of single IB device. 72633860cfaSSuanming Mou * 72733860cfaSSuanming Mou * @param dev 7286e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 72933860cfaSSuanming Mou */ 73033860cfaSSuanming Mou static void 7316e88bc42SOphir Munk mlx5_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) 73233860cfaSSuanming Mou { 73333860cfaSSuanming Mou if (sh->intr_handle.fd >= 0) 73433860cfaSSuanming Mou mlx5_intr_callback_unregister(&sh->intr_handle, 73533860cfaSSuanming Mou mlx5_dev_interrupt_handler, sh); 73633860cfaSSuanming Mou #ifdef HAVE_IBV_DEVX_ASYNC 73733860cfaSSuanming Mou if (sh->intr_handle_devx.fd >= 0) 73833860cfaSSuanming Mou rte_intr_callback_unregister(&sh->intr_handle_devx, 73933860cfaSSuanming Mou mlx5_dev_interrupt_handler_devx, sh); 74033860cfaSSuanming Mou if (sh->devx_comp) 74133860cfaSSuanming Mou mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); 74233860cfaSSuanming Mou #endif 74333860cfaSSuanming Mou } 74433860cfaSSuanming Mou 74533860cfaSSuanming Mou /** 74617e19bc4SViacheslav Ovsiienko * Allocate shared IB device context. If there is multiport device the 74717e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 74817e19bc4SViacheslav Ovsiienko * port dedicated IB device, the context will be used by only given 74917e19bc4SViacheslav Ovsiienko * port due to unification. 75017e19bc4SViacheslav Ovsiienko * 751ae4eb7dcSViacheslav Ovsiienko * Routine first searches the context for the specified IB device name, 75217e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 75317e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 75417e19bc4SViacheslav Ovsiienko * IB device context and parameters. 75517e19bc4SViacheslav Ovsiienko * 75617e19bc4SViacheslav Ovsiienko * @param[in] spawn 75717e19bc4SViacheslav Ovsiienko * Pointer to the IB device attributes (name, port, etc). 7588409a285SViacheslav Ovsiienko * @param[in] config 7598409a285SViacheslav Ovsiienko * Pointer to device configuration structure. 76017e19bc4SViacheslav Ovsiienko * 76117e19bc4SViacheslav Ovsiienko * @return 7626e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object on success, 76317e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 76417e19bc4SViacheslav Ovsiienko */ 7656e88bc42SOphir Munk static struct mlx5_dev_ctx_shared * 7668409a285SViacheslav Ovsiienko mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn, 7678409a285SViacheslav Ovsiienko const struct mlx5_dev_config *config) 76817e19bc4SViacheslav Ovsiienko { 7696e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh; 7708409a285SViacheslav Ovsiienko int dbmap_env; 77117e19bc4SViacheslav Ovsiienko int err = 0; 77253e5a82fSViacheslav Ovsiienko uint32_t i; 773ae18a1aeSOri Kam #ifdef HAVE_IBV_FLOW_DV_SUPPORT 774ae18a1aeSOri Kam struct mlx5_devx_tis_attr tis_attr = { 0 }; 775ae18a1aeSOri Kam #endif 77617e19bc4SViacheslav Ovsiienko 7778e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn); 77817e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 7798e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 78017e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 78117e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 78217e19bc4SViacheslav Ovsiienko LIST_FOREACH(sh, &mlx5_ibv_list, next) { 78317e19bc4SViacheslav Ovsiienko if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) { 78417e19bc4SViacheslav Ovsiienko sh->refcnt++; 78517e19bc4SViacheslav Ovsiienko goto exit; 78617e19bc4SViacheslav Ovsiienko } 78717e19bc4SViacheslav Ovsiienko } 788ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 7898e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn->max_port); 79017e19bc4SViacheslav Ovsiienko sh = rte_zmalloc("ethdev shared ib context", 7916e88bc42SOphir Munk sizeof(struct mlx5_dev_ctx_shared) + 79217e19bc4SViacheslav Ovsiienko spawn->max_port * 79317e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared_port), 79417e19bc4SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 79517e19bc4SViacheslav Ovsiienko if (!sh) { 79617e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 79717e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 79817e19bc4SViacheslav Ovsiienko goto exit; 79917e19bc4SViacheslav Ovsiienko } 8008409a285SViacheslav Ovsiienko /* 8018409a285SViacheslav Ovsiienko * Configure environment variable "MLX5_BF_SHUT_UP" 8028409a285SViacheslav Ovsiienko * before the device creation. The rdma_core library 8038409a285SViacheslav Ovsiienko * checks the variable at device creation and 8048409a285SViacheslav Ovsiienko * stores the result internally. 8058409a285SViacheslav Ovsiienko */ 8068409a285SViacheslav Ovsiienko dbmap_env = mlx5_config_doorbell_mapping_env(config); 80717e19bc4SViacheslav Ovsiienko /* Try to open IB device with DV first, then usual Verbs. */ 80817e19bc4SViacheslav Ovsiienko errno = 0; 80917e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev); 81017e19bc4SViacheslav Ovsiienko if (sh->ctx) { 81117e19bc4SViacheslav Ovsiienko sh->devx = 1; 81217e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is supported"); 8138409a285SViacheslav Ovsiienko /* The device is created, no need for environment. */ 81406f78b5eSViacheslav Ovsiienko mlx5_restore_doorbell_mapping_env(dbmap_env); 81517e19bc4SViacheslav Ovsiienko } else { 8168409a285SViacheslav Ovsiienko /* The environment variable is still configured. */ 81717e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->open_device(spawn->ibv_dev); 81817e19bc4SViacheslav Ovsiienko err = errno ? errno : ENODEV; 8198409a285SViacheslav Ovsiienko /* 8208409a285SViacheslav Ovsiienko * The environment variable is not needed anymore, 8218409a285SViacheslav Ovsiienko * all device creation attempts are completed. 8228409a285SViacheslav Ovsiienko */ 82306f78b5eSViacheslav Ovsiienko mlx5_restore_doorbell_mapping_env(dbmap_env); 82406f78b5eSViacheslav Ovsiienko if (!sh->ctx) 82517e19bc4SViacheslav Ovsiienko goto error; 82617e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is NOT supported"); 82717e19bc4SViacheslav Ovsiienko } 82817e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr); 82917e19bc4SViacheslav Ovsiienko if (err) { 83017e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "ibv_query_device_ex() failed"); 83117e19bc4SViacheslav Ovsiienko goto error; 83217e19bc4SViacheslav Ovsiienko } 83317e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 83417e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 835*f44b09f9SOphir Munk strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx), 836*f44b09f9SOphir Munk sizeof(sh->ibdev_name) - 1); 837*f44b09f9SOphir Munk strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx), 838*f44b09f9SOphir Munk sizeof(sh->ibdev_path) - 1); 83953e5a82fSViacheslav Ovsiienko /* 84053e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 84153e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 84253e5a82fSViacheslav Ovsiienko * the given port index i. 84353e5a82fSViacheslav Ovsiienko */ 84423242063SMatan Azrad for (i = 0; i < sh->max_port; i++) { 84553e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 84623242063SMatan Azrad sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; 84723242063SMatan Azrad } 84817e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 84917e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 85017e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 85117e19bc4SViacheslav Ovsiienko err = ENOMEM; 85217e19bc4SViacheslav Ovsiienko goto error; 85317e19bc4SViacheslav Ovsiienko } 854b9d86122SDekel Peled #ifdef HAVE_IBV_FLOW_DV_SUPPORT 855ae18a1aeSOri Kam if (sh->devx) { 856b9d86122SDekel Peled err = mlx5_get_pdn(sh->pd, &sh->pdn); 857b9d86122SDekel Peled if (err) { 858b9d86122SDekel Peled DRV_LOG(ERR, "Fail to extract pdn from PD"); 859b9d86122SDekel Peled goto error; 860b9d86122SDekel Peled } 861ae18a1aeSOri Kam sh->td = mlx5_devx_cmd_create_td(sh->ctx); 862ae18a1aeSOri Kam if (!sh->td) { 863ae18a1aeSOri Kam DRV_LOG(ERR, "TD allocation failure"); 864ae18a1aeSOri Kam err = ENOMEM; 865ae18a1aeSOri Kam goto error; 866ae18a1aeSOri Kam } 867ae18a1aeSOri Kam tis_attr.transport_domain = sh->td->id; 868ae18a1aeSOri Kam sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr); 869ae18a1aeSOri Kam if (!sh->tis) { 870ae18a1aeSOri Kam DRV_LOG(ERR, "TIS allocation failure"); 871ae18a1aeSOri Kam err = ENOMEM; 872ae18a1aeSOri Kam goto error; 873ae18a1aeSOri Kam } 874ae18a1aeSOri Kam } 8750136df99SSuanming Mou sh->flow_id_pool = mlx5_flow_id_pool_alloc 8760136df99SSuanming Mou ((1 << HAIRPIN_FLOW_ID_BITS) - 1); 877d85c7b5eSOri Kam if (!sh->flow_id_pool) { 878d85c7b5eSOri Kam DRV_LOG(ERR, "can't create flow id pool"); 879d85c7b5eSOri Kam err = ENOMEM; 880d85c7b5eSOri Kam goto error; 881d85c7b5eSOri Kam } 882b9d86122SDekel Peled #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 883ab3cffcfSViacheslav Ovsiienko /* 884ab3cffcfSViacheslav Ovsiienko * Once the device is added to the list of memory event 885ab3cffcfSViacheslav Ovsiienko * callback, its global MR cache table cannot be expanded 886ab3cffcfSViacheslav Ovsiienko * on the fly because of deadlock. If it overflows, lookup 887ab3cffcfSViacheslav Ovsiienko * should be done by searching MR list linearly, which is slow. 888ab3cffcfSViacheslav Ovsiienko * 889ab3cffcfSViacheslav Ovsiienko * At this point the device is not added to the memory 890ab3cffcfSViacheslav Ovsiienko * event list yet, context is just being created. 891ab3cffcfSViacheslav Ovsiienko */ 892b8dc6b0eSVu Pham err = mlx5_mr_btree_init(&sh->share_cache.cache, 893ab3cffcfSViacheslav Ovsiienko MLX5_MR_BTREE_CACHE_N * 2, 89446e10a4cSViacheslav Ovsiienko spawn->pci_dev->device.numa_node); 895ab3cffcfSViacheslav Ovsiienko if (err) { 896ab3cffcfSViacheslav Ovsiienko err = rte_errno; 897ab3cffcfSViacheslav Ovsiienko goto error; 898ab3cffcfSViacheslav Ovsiienko } 89933860cfaSSuanming Mou mlx5_dev_shared_handler_install(sh); 900fa2d01c8SDong Zhou mlx5_flow_aging_init(sh); 9015382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 902b88341caSSuanming Mou mlx5_flow_ipool_create(sh, config); 9030e3d0525SViacheslav Ovsiienko /* Add device to memory callback list. */ 9040e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 9050e3d0525SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 9060e3d0525SViacheslav Ovsiienko sh, mem_event_cb); 9070e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 9080e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 90917e19bc4SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next); 91017e19bc4SViacheslav Ovsiienko exit: 91117e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 91217e19bc4SViacheslav Ovsiienko return sh; 91317e19bc4SViacheslav Ovsiienko error: 91417e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 9158e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 916ae18a1aeSOri Kam if (sh->tis) 917ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 918ae18a1aeSOri Kam if (sh->td) 919ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 92017e19bc4SViacheslav Ovsiienko if (sh->pd) 92117e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 92217e19bc4SViacheslav Ovsiienko if (sh->ctx) 92317e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 924d85c7b5eSOri Kam if (sh->flow_id_pool) 925d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 92617e19bc4SViacheslav Ovsiienko rte_free(sh); 9278e46d4e1SAlexander Kozyrev MLX5_ASSERT(err > 0); 92817e19bc4SViacheslav Ovsiienko rte_errno = err; 92917e19bc4SViacheslav Ovsiienko return NULL; 93017e19bc4SViacheslav Ovsiienko } 93117e19bc4SViacheslav Ovsiienko 93217e19bc4SViacheslav Ovsiienko /** 93317e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 93417e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 93517e19bc4SViacheslav Ovsiienko * 93617e19bc4SViacheslav Ovsiienko * @param[in] sh 9376e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 93817e19bc4SViacheslav Ovsiienko */ 93917e19bc4SViacheslav Ovsiienko static void 9406e88bc42SOphir Munk mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh) 94117e19bc4SViacheslav Ovsiienko { 94217e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 9430afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG 94417e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 9456e88bc42SOphir Munk struct mlx5_dev_ctx_shared *lctx; 94617e19bc4SViacheslav Ovsiienko 94717e19bc4SViacheslav Ovsiienko LIST_FOREACH(lctx, &mlx5_ibv_list, next) 94817e19bc4SViacheslav Ovsiienko if (lctx == sh) 94917e19bc4SViacheslav Ovsiienko break; 9508e46d4e1SAlexander Kozyrev MLX5_ASSERT(lctx); 95117e19bc4SViacheslav Ovsiienko if (lctx != sh) { 95217e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 95317e19bc4SViacheslav Ovsiienko goto exit; 95417e19bc4SViacheslav Ovsiienko } 95517e19bc4SViacheslav Ovsiienko #endif 9568e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 9578e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh->refcnt); 95817e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 9598e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 96017e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 96117e19bc4SViacheslav Ovsiienko goto exit; 9620e3d0525SViacheslav Ovsiienko /* Remove from memory callback device list. */ 9630e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 9640e3d0525SViacheslav Ovsiienko LIST_REMOVE(sh, mem_event_cb); 9650e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 9664f8e6befSMichael Baum /* Release created Memory Regions. */ 967b8dc6b0eSVu Pham mlx5_mr_release_cache(&sh->share_cache); 9680e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 96917e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 97053e5a82fSViacheslav Ovsiienko /* 97153e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 97253e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 97353e5a82fSViacheslav Ovsiienko **/ 9745382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 975014d1cbeSSuanming Mou mlx5_flow_ipool_destroy(sh); 97633860cfaSSuanming Mou mlx5_dev_shared_handler_uninstall(sh); 97717e19bc4SViacheslav Ovsiienko if (sh->pd) 97817e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 979ae18a1aeSOri Kam if (sh->tis) 980ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 981ae18a1aeSOri Kam if (sh->td) 982ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 98317e19bc4SViacheslav Ovsiienko if (sh->ctx) 98417e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 985d85c7b5eSOri Kam if (sh->flow_id_pool) 986d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 98717e19bc4SViacheslav Ovsiienko rte_free(sh); 98817e19bc4SViacheslav Ovsiienko exit: 98917e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 99017e19bc4SViacheslav Ovsiienko } 99117e19bc4SViacheslav Ovsiienko 992771fa900SAdrien Mazarguil /** 99354534725SMatan Azrad * Destroy table hash list and all the root entries per domain. 99454534725SMatan Azrad * 99554534725SMatan Azrad * @param[in] priv 99654534725SMatan Azrad * Pointer to the private device data structure. 99754534725SMatan Azrad */ 99854534725SMatan Azrad static void 99954534725SMatan Azrad mlx5_free_table_hash_list(struct mlx5_priv *priv) 100054534725SMatan Azrad { 10016e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 100254534725SMatan Azrad struct mlx5_flow_tbl_data_entry *tbl_data; 100354534725SMatan Azrad union mlx5_flow_tbl_key table_key = { 100454534725SMatan Azrad { 100554534725SMatan Azrad .table_id = 0, 100654534725SMatan Azrad .reserved = 0, 100754534725SMatan Azrad .domain = 0, 100854534725SMatan Azrad .direction = 0, 100954534725SMatan Azrad } 101054534725SMatan Azrad }; 101154534725SMatan Azrad struct mlx5_hlist_entry *pos; 101254534725SMatan Azrad 101354534725SMatan Azrad if (!sh->flow_tbls) 101454534725SMatan Azrad return; 101554534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 101654534725SMatan Azrad if (pos) { 101754534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 101854534725SMatan Azrad entry); 10198e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 102054534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 102154534725SMatan Azrad rte_free(tbl_data); 102254534725SMatan Azrad } 102354534725SMatan Azrad table_key.direction = 1; 102454534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 102554534725SMatan Azrad if (pos) { 102654534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 102754534725SMatan Azrad entry); 10288e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 102954534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 103054534725SMatan Azrad rte_free(tbl_data); 103154534725SMatan Azrad } 103254534725SMatan Azrad table_key.direction = 0; 103354534725SMatan Azrad table_key.domain = 1; 103454534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 103554534725SMatan Azrad if (pos) { 103654534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 103754534725SMatan Azrad entry); 10388e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 103954534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 104054534725SMatan Azrad rte_free(tbl_data); 104154534725SMatan Azrad } 104254534725SMatan Azrad mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL); 104354534725SMatan Azrad } 104454534725SMatan Azrad 104554534725SMatan Azrad /** 104654534725SMatan Azrad * Initialize flow table hash list and create the root tables entry 104754534725SMatan Azrad * for each domain. 104854534725SMatan Azrad * 104954534725SMatan Azrad * @param[in] priv 105054534725SMatan Azrad * Pointer to the private device data structure. 105154534725SMatan Azrad * 105254534725SMatan Azrad * @return 105354534725SMatan Azrad * Zero on success, positive error code otherwise. 105454534725SMatan Azrad */ 105554534725SMatan Azrad static int 105654534725SMatan Azrad mlx5_alloc_table_hash_list(struct mlx5_priv *priv) 105754534725SMatan Azrad { 10586e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 105954534725SMatan Azrad char s[MLX5_HLIST_NAMESIZE]; 106054534725SMatan Azrad int err = 0; 106154534725SMatan Azrad 10628e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 106354534725SMatan Azrad snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); 106454534725SMatan Azrad sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE); 106554534725SMatan Azrad if (!sh->flow_tbls) { 106654534725SMatan Azrad DRV_LOG(ERR, "flow tables with hash creation failed.\n"); 106754534725SMatan Azrad err = ENOMEM; 106854534725SMatan Azrad return err; 106954534725SMatan Azrad } 107054534725SMatan Azrad #ifndef HAVE_MLX5DV_DR 107154534725SMatan Azrad /* 107254534725SMatan Azrad * In case we have not DR support, the zero tables should be created 107354534725SMatan Azrad * because DV expect to see them even if they cannot be created by 107454534725SMatan Azrad * RDMA-CORE. 107554534725SMatan Azrad */ 107654534725SMatan Azrad union mlx5_flow_tbl_key table_key = { 107754534725SMatan Azrad { 107854534725SMatan Azrad .table_id = 0, 107954534725SMatan Azrad .reserved = 0, 108054534725SMatan Azrad .domain = 0, 108154534725SMatan Azrad .direction = 0, 108254534725SMatan Azrad } 108354534725SMatan Azrad }; 108454534725SMatan Azrad struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL, 108554534725SMatan Azrad sizeof(*tbl_data), 0); 108654534725SMatan Azrad 108754534725SMatan Azrad if (!tbl_data) { 108854534725SMatan Azrad err = ENOMEM; 108954534725SMatan Azrad goto error; 109054534725SMatan Azrad } 109154534725SMatan Azrad tbl_data->entry.key = table_key.v64; 109254534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 109354534725SMatan Azrad if (err) 109454534725SMatan Azrad goto error; 109554534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 109654534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 109754534725SMatan Azrad table_key.direction = 1; 109854534725SMatan Azrad tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0); 109954534725SMatan Azrad if (!tbl_data) { 110054534725SMatan Azrad err = ENOMEM; 110154534725SMatan Azrad goto error; 110254534725SMatan Azrad } 110354534725SMatan Azrad tbl_data->entry.key = table_key.v64; 110454534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 110554534725SMatan Azrad if (err) 110654534725SMatan Azrad goto error; 110754534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 110854534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 110954534725SMatan Azrad table_key.direction = 0; 111054534725SMatan Azrad table_key.domain = 1; 111154534725SMatan Azrad tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0); 111254534725SMatan Azrad if (!tbl_data) { 111354534725SMatan Azrad err = ENOMEM; 111454534725SMatan Azrad goto error; 111554534725SMatan Azrad } 111654534725SMatan Azrad tbl_data->entry.key = table_key.v64; 111754534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 111854534725SMatan Azrad if (err) 111954534725SMatan Azrad goto error; 112054534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 112154534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 112254534725SMatan Azrad return err; 112354534725SMatan Azrad error: 112454534725SMatan Azrad mlx5_free_table_hash_list(priv); 112554534725SMatan Azrad #endif /* HAVE_MLX5DV_DR */ 112654534725SMatan Azrad return err; 112754534725SMatan Azrad } 112854534725SMatan Azrad 112954534725SMatan Azrad /** 1130b2177648SViacheslav Ovsiienko * Initialize DR related data within private structure. 1131b2177648SViacheslav Ovsiienko * Routine checks the reference counter and does actual 1132ae4eb7dcSViacheslav Ovsiienko * resources creation/initialization only if counter is zero. 1133b2177648SViacheslav Ovsiienko * 1134b2177648SViacheslav Ovsiienko * @param[in] priv 1135b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 1136b2177648SViacheslav Ovsiienko * 1137b2177648SViacheslav Ovsiienko * @return 1138b2177648SViacheslav Ovsiienko * Zero on success, positive error code otherwise. 1139b2177648SViacheslav Ovsiienko */ 1140b2177648SViacheslav Ovsiienko static int 1141b2177648SViacheslav Ovsiienko mlx5_alloc_shared_dr(struct mlx5_priv *priv) 1142b2177648SViacheslav Ovsiienko { 11436e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 11441ef4cdefSMatan Azrad char s[MLX5_HLIST_NAMESIZE]; 114568011166SXiaoyu Min int err = 0; 114654534725SMatan Azrad 114768011166SXiaoyu Min if (!sh->flow_tbls) 114868011166SXiaoyu Min err = mlx5_alloc_table_hash_list(priv); 114968011166SXiaoyu Min else 115068011166SXiaoyu Min DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n", 115168011166SXiaoyu Min (void *)sh->flow_tbls); 115254534725SMatan Azrad if (err) 115354534725SMatan Azrad return err; 11541ef4cdefSMatan Azrad /* Create tags hash list table. */ 11551ef4cdefSMatan Azrad snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); 11561ef4cdefSMatan Azrad sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE); 11571ef4cdefSMatan Azrad if (!sh->tag_table) { 11581ef4cdefSMatan Azrad DRV_LOG(ERR, "tags with hash creation failed.\n"); 11591ef4cdefSMatan Azrad err = ENOMEM; 11601ef4cdefSMatan Azrad goto error; 11611ef4cdefSMatan Azrad } 1162b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 116354534725SMatan Azrad void *domain; 1164b2177648SViacheslav Ovsiienko 1165b2177648SViacheslav Ovsiienko if (sh->dv_refcnt) { 1166b2177648SViacheslav Ovsiienko /* Shared DV/DR structures is already initialized. */ 1167b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 1168b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 1169b2177648SViacheslav Ovsiienko return 0; 1170b2177648SViacheslav Ovsiienko } 1171b2177648SViacheslav Ovsiienko /* Reference counter is zero, we should initialize structures. */ 1172d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 1173d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_RX); 1174d1e64fbfSOri Kam if (!domain) { 1175d1e64fbfSOri Kam DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); 1176b2177648SViacheslav Ovsiienko err = errno; 1177b2177648SViacheslav Ovsiienko goto error; 1178b2177648SViacheslav Ovsiienko } 1179d1e64fbfSOri Kam sh->rx_domain = domain; 1180d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 1181d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_TX); 1182d1e64fbfSOri Kam if (!domain) { 1183d1e64fbfSOri Kam DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); 1184b2177648SViacheslav Ovsiienko err = errno; 1185b2177648SViacheslav Ovsiienko goto error; 1186b2177648SViacheslav Ovsiienko } 118779e35d0dSViacheslav Ovsiienko pthread_mutex_init(&sh->dv_mutex, NULL); 1188d1e64fbfSOri Kam sh->tx_domain = domain; 1189e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 1190e2b4925eSOri Kam if (priv->config.dv_esw_en) { 1191d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain 1192d1e64fbfSOri Kam (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); 1193d1e64fbfSOri Kam if (!domain) { 1194d1e64fbfSOri Kam DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); 1195e2b4925eSOri Kam err = errno; 1196e2b4925eSOri Kam goto error; 1197e2b4925eSOri Kam } 1198d1e64fbfSOri Kam sh->fdb_domain = domain; 119934fa7c02SOri Kam sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); 1200e2b4925eSOri Kam } 1201e2b4925eSOri Kam #endif 1202a1da6f62SSuanming Mou if (priv->config.reclaim_mode == MLX5_RCM_AGGR) { 1203a1da6f62SSuanming Mou mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1); 1204a1da6f62SSuanming Mou mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1); 1205a1da6f62SSuanming Mou if (sh->fdb_domain) 1206a1da6f62SSuanming Mou mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1); 1207a1da6f62SSuanming Mou } 1208b41e47daSMoti Haimovsky sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); 12091ef4cdefSMatan Azrad #endif /* HAVE_MLX5DV_DR */ 1210b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 1211b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 1212b2177648SViacheslav Ovsiienko return 0; 1213b2177648SViacheslav Ovsiienko error: 1214b2177648SViacheslav Ovsiienko /* Rollback the created objects. */ 1215d1e64fbfSOri Kam if (sh->rx_domain) { 1216d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 1217d1e64fbfSOri Kam sh->rx_domain = NULL; 1218b2177648SViacheslav Ovsiienko } 1219d1e64fbfSOri Kam if (sh->tx_domain) { 1220d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 1221d1e64fbfSOri Kam sh->tx_domain = NULL; 1222b2177648SViacheslav Ovsiienko } 1223d1e64fbfSOri Kam if (sh->fdb_domain) { 1224d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 1225d1e64fbfSOri Kam sh->fdb_domain = NULL; 1226e2b4925eSOri Kam } 122734fa7c02SOri Kam if (sh->esw_drop_action) { 122834fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 122934fa7c02SOri Kam sh->esw_drop_action = NULL; 123034fa7c02SOri Kam } 1231b41e47daSMoti Haimovsky if (sh->pop_vlan_action) { 1232b41e47daSMoti Haimovsky mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 1233b41e47daSMoti Haimovsky sh->pop_vlan_action = NULL; 1234b41e47daSMoti Haimovsky } 12351ef4cdefSMatan Azrad if (sh->tag_table) { 12361ef4cdefSMatan Azrad /* tags should be destroyed with flow before. */ 12371ef4cdefSMatan Azrad mlx5_hlist_destroy(sh->tag_table, NULL, NULL); 12381ef4cdefSMatan Azrad sh->tag_table = NULL; 12391ef4cdefSMatan Azrad } 124054534725SMatan Azrad mlx5_free_table_hash_list(priv); 124154534725SMatan Azrad return err; 1242b2177648SViacheslav Ovsiienko } 1243b2177648SViacheslav Ovsiienko 1244b2177648SViacheslav Ovsiienko /** 1245b2177648SViacheslav Ovsiienko * Destroy DR related data within private structure. 1246b2177648SViacheslav Ovsiienko * 1247b2177648SViacheslav Ovsiienko * @param[in] priv 1248b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 1249b2177648SViacheslav Ovsiienko */ 1250b2177648SViacheslav Ovsiienko static void 1251b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(struct mlx5_priv *priv) 1252b2177648SViacheslav Ovsiienko { 12536e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh; 1254b2177648SViacheslav Ovsiienko 1255b2177648SViacheslav Ovsiienko if (!priv->dr_shared) 1256b2177648SViacheslav Ovsiienko return; 1257b2177648SViacheslav Ovsiienko priv->dr_shared = 0; 1258b2177648SViacheslav Ovsiienko sh = priv->sh; 12598e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 12601ef4cdefSMatan Azrad #ifdef HAVE_MLX5DV_DR 12618e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh->dv_refcnt); 1262b2177648SViacheslav Ovsiienko if (sh->dv_refcnt && --sh->dv_refcnt) 1263b2177648SViacheslav Ovsiienko return; 1264d1e64fbfSOri Kam if (sh->rx_domain) { 1265d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 1266d1e64fbfSOri Kam sh->rx_domain = NULL; 1267b2177648SViacheslav Ovsiienko } 1268d1e64fbfSOri Kam if (sh->tx_domain) { 1269d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 1270d1e64fbfSOri Kam sh->tx_domain = NULL; 1271b2177648SViacheslav Ovsiienko } 1272e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 1273d1e64fbfSOri Kam if (sh->fdb_domain) { 1274d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 1275d1e64fbfSOri Kam sh->fdb_domain = NULL; 1276e2b4925eSOri Kam } 127734fa7c02SOri Kam if (sh->esw_drop_action) { 127834fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 127934fa7c02SOri Kam sh->esw_drop_action = NULL; 128034fa7c02SOri Kam } 1281e2b4925eSOri Kam #endif 1282b41e47daSMoti Haimovsky if (sh->pop_vlan_action) { 1283b41e47daSMoti Haimovsky mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 1284b41e47daSMoti Haimovsky sh->pop_vlan_action = NULL; 1285b41e47daSMoti Haimovsky } 128679e35d0dSViacheslav Ovsiienko pthread_mutex_destroy(&sh->dv_mutex); 128754534725SMatan Azrad #endif /* HAVE_MLX5DV_DR */ 12881ef4cdefSMatan Azrad if (sh->tag_table) { 12891ef4cdefSMatan Azrad /* tags should be destroyed with flow before. */ 12901ef4cdefSMatan Azrad mlx5_hlist_destroy(sh->tag_table, NULL, NULL); 12911ef4cdefSMatan Azrad sh->tag_table = NULL; 12921ef4cdefSMatan Azrad } 129354534725SMatan Azrad mlx5_free_table_hash_list(priv); 1294b2177648SViacheslav Ovsiienko } 1295b2177648SViacheslav Ovsiienko 1296b2177648SViacheslav Ovsiienko /** 12977be600c8SYongseok Koh * Initialize shared data between primary and secondary process. 12987be600c8SYongseok Koh * 12997be600c8SYongseok Koh * A memzone is reserved by primary process and secondary processes attach to 13007be600c8SYongseok Koh * the memzone. 13017be600c8SYongseok Koh * 13027be600c8SYongseok Koh * @return 13037be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 1304974f1e7eSYongseok Koh */ 13057be600c8SYongseok Koh static int 13067be600c8SYongseok Koh mlx5_init_shared_data(void) 1307974f1e7eSYongseok Koh { 1308974f1e7eSYongseok Koh const struct rte_memzone *mz; 13097be600c8SYongseok Koh int ret = 0; 1310974f1e7eSYongseok Koh 1311974f1e7eSYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 1312974f1e7eSYongseok Koh if (mlx5_shared_data == NULL) { 1313974f1e7eSYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1314974f1e7eSYongseok Koh /* Allocate shared memory. */ 1315974f1e7eSYongseok Koh mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 1316974f1e7eSYongseok Koh sizeof(*mlx5_shared_data), 1317974f1e7eSYongseok Koh SOCKET_ID_ANY, 0); 13187be600c8SYongseok Koh if (mz == NULL) { 13197be600c8SYongseok Koh DRV_LOG(ERR, 132006fa6988SDekel Peled "Cannot allocate mlx5 shared data"); 13217be600c8SYongseok Koh ret = -rte_errno; 13227be600c8SYongseok Koh goto error; 13237be600c8SYongseok Koh } 13247be600c8SYongseok Koh mlx5_shared_data = mz->addr; 13257be600c8SYongseok Koh memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 13267be600c8SYongseok Koh rte_spinlock_init(&mlx5_shared_data->lock); 1327974f1e7eSYongseok Koh } else { 1328974f1e7eSYongseok Koh /* Lookup allocated shared memory. */ 1329974f1e7eSYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 13307be600c8SYongseok Koh if (mz == NULL) { 13317be600c8SYongseok Koh DRV_LOG(ERR, 133206fa6988SDekel Peled "Cannot attach mlx5 shared data"); 13337be600c8SYongseok Koh ret = -rte_errno; 13347be600c8SYongseok Koh goto error; 1335974f1e7eSYongseok Koh } 1336974f1e7eSYongseok Koh mlx5_shared_data = mz->addr; 13377be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 13383ebe6580SYongseok Koh } 1339974f1e7eSYongseok Koh } 13407be600c8SYongseok Koh error: 13417be600c8SYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 13427be600c8SYongseok Koh return ret; 13437be600c8SYongseok Koh } 13447be600c8SYongseok Koh 13457be600c8SYongseok Koh /** 13464d803a72SOlga Shern * Retrieve integer value from environment variable. 13474d803a72SOlga Shern * 13484d803a72SOlga Shern * @param[in] name 13494d803a72SOlga Shern * Environment variable name. 13504d803a72SOlga Shern * 13514d803a72SOlga Shern * @return 13524d803a72SOlga Shern * Integer value, 0 if the variable is not set. 13534d803a72SOlga Shern */ 13544d803a72SOlga Shern int 13554d803a72SOlga Shern mlx5_getenv_int(const char *name) 13564d803a72SOlga Shern { 13574d803a72SOlga Shern const char *val = getenv(name); 13584d803a72SOlga Shern 13594d803a72SOlga Shern if (val == NULL) 13604d803a72SOlga Shern return 0; 13614d803a72SOlga Shern return atoi(val); 13624d803a72SOlga Shern } 13634d803a72SOlga Shern 13644d803a72SOlga Shern /** 13651e3a39f7SXueming Li * Verbs callback to allocate a memory. This function should allocate the space 13661e3a39f7SXueming Li * according to the size provided residing inside a huge page. 13671e3a39f7SXueming Li * Please note that all allocation must respect the alignment from libmlx5 13681e3a39f7SXueming Li * (i.e. currently sysconf(_SC_PAGESIZE)). 13691e3a39f7SXueming Li * 13701e3a39f7SXueming Li * @param[in] size 13711e3a39f7SXueming Li * The size in bytes of the memory to allocate. 13721e3a39f7SXueming Li * @param[in] data 13731e3a39f7SXueming Li * A pointer to the callback data. 13741e3a39f7SXueming Li * 13751e3a39f7SXueming Li * @return 1376a6d83b6aSNélio Laranjeiro * Allocated buffer, NULL otherwise and rte_errno is set. 13771e3a39f7SXueming Li */ 13781e3a39f7SXueming Li static void * 13791e3a39f7SXueming Li mlx5_alloc_verbs_buf(size_t size, void *data) 13801e3a39f7SXueming Li { 1381dbeba4cfSThomas Monjalon struct mlx5_priv *priv = data; 13821e3a39f7SXueming Li void *ret; 13831e3a39f7SXueming Li size_t alignment = sysconf(_SC_PAGESIZE); 1384d10b09dbSOlivier Matz unsigned int socket = SOCKET_ID_ANY; 13851e3a39f7SXueming Li 1386d10b09dbSOlivier Matz if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { 1387d10b09dbSOlivier Matz const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 1388d10b09dbSOlivier Matz 1389d10b09dbSOlivier Matz socket = ctrl->socket; 1390d10b09dbSOlivier Matz } else if (priv->verbs_alloc_ctx.type == 1391d10b09dbSOlivier Matz MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { 1392d10b09dbSOlivier Matz const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 1393d10b09dbSOlivier Matz 1394d10b09dbSOlivier Matz socket = ctrl->socket; 1395d10b09dbSOlivier Matz } 13968e46d4e1SAlexander Kozyrev MLX5_ASSERT(data != NULL); 1397d10b09dbSOlivier Matz ret = rte_malloc_socket(__func__, size, alignment, socket); 1398a6d83b6aSNélio Laranjeiro if (!ret && size) 1399a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 14001e3a39f7SXueming Li return ret; 14011e3a39f7SXueming Li } 14021e3a39f7SXueming Li 14031e3a39f7SXueming Li /** 14041e3a39f7SXueming Li * Verbs callback to free a memory. 14051e3a39f7SXueming Li * 14061e3a39f7SXueming Li * @param[in] ptr 14071e3a39f7SXueming Li * A pointer to the memory to free. 14081e3a39f7SXueming Li * @param[in] data 14091e3a39f7SXueming Li * A pointer to the callback data. 14101e3a39f7SXueming Li */ 14111e3a39f7SXueming Li static void 14121e3a39f7SXueming Li mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 14131e3a39f7SXueming Li { 14148e46d4e1SAlexander Kozyrev MLX5_ASSERT(data != NULL); 14151e3a39f7SXueming Li rte_free(ptr); 14161e3a39f7SXueming Li } 14171e3a39f7SXueming Li 14181e3a39f7SXueming Li /** 1419c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 1420c9ba7523SRaslan Darawsheh * 1421c9ba7523SRaslan Darawsheh * @param[in] dev 1422c9ba7523SRaslan Darawsheh * A pointer to eth_dev 1423c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 1424c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 1425c9ba7523SRaslan Darawsheh * 1426c9ba7523SRaslan Darawsheh * @return 1427c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 1428c9ba7523SRaslan Darawsheh */ 1429c9ba7523SRaslan Darawsheh int 1430c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 1431c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 1432c9ba7523SRaslan Darawsheh { 14338e46d4e1SAlexander Kozyrev MLX5_ASSERT(udp_tunnel != NULL); 1434c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && 1435c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 1436c9ba7523SRaslan Darawsheh return 0; 1437c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && 1438c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 1439c9ba7523SRaslan Darawsheh return 0; 1440c9ba7523SRaslan Darawsheh return -ENOTSUP; 1441c9ba7523SRaslan Darawsheh } 1442c9ba7523SRaslan Darawsheh 1443c9ba7523SRaslan Darawsheh /** 1444120dc4a7SYongseok Koh * Initialize process private data structure. 1445120dc4a7SYongseok Koh * 1446120dc4a7SYongseok Koh * @param dev 1447120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1448120dc4a7SYongseok Koh * 1449120dc4a7SYongseok Koh * @return 1450120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 1451120dc4a7SYongseok Koh */ 1452120dc4a7SYongseok Koh int 1453120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 1454120dc4a7SYongseok Koh { 1455120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 1456120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 1457120dc4a7SYongseok Koh size_t ppriv_size; 1458120dc4a7SYongseok Koh 1459120dc4a7SYongseok Koh /* 1460120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 1461120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 1462120dc4a7SYongseok Koh */ 1463120dc4a7SYongseok Koh ppriv_size = 1464120dc4a7SYongseok Koh sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 1465120dc4a7SYongseok Koh ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, 1466120dc4a7SYongseok Koh RTE_CACHE_LINE_SIZE, dev->device->numa_node); 1467120dc4a7SYongseok Koh if (!ppriv) { 1468120dc4a7SYongseok Koh rte_errno = ENOMEM; 1469120dc4a7SYongseok Koh return -rte_errno; 1470120dc4a7SYongseok Koh } 1471120dc4a7SYongseok Koh ppriv->uar_table_sz = ppriv_size; 1472120dc4a7SYongseok Koh dev->process_private = ppriv; 1473120dc4a7SYongseok Koh return 0; 1474120dc4a7SYongseok Koh } 1475120dc4a7SYongseok Koh 1476120dc4a7SYongseok Koh /** 1477120dc4a7SYongseok Koh * Un-initialize process private data structure. 1478120dc4a7SYongseok Koh * 1479120dc4a7SYongseok Koh * @param dev 1480120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1481120dc4a7SYongseok Koh */ 1482120dc4a7SYongseok Koh static void 1483120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 1484120dc4a7SYongseok Koh { 1485120dc4a7SYongseok Koh if (!dev->process_private) 1486120dc4a7SYongseok Koh return; 1487120dc4a7SYongseok Koh rte_free(dev->process_private); 1488120dc4a7SYongseok Koh dev->process_private = NULL; 1489120dc4a7SYongseok Koh } 1490120dc4a7SYongseok Koh 1491120dc4a7SYongseok Koh /** 1492771fa900SAdrien Mazarguil * DPDK callback to close the device. 1493771fa900SAdrien Mazarguil * 1494771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 1495771fa900SAdrien Mazarguil * 1496771fa900SAdrien Mazarguil * @param dev 1497771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 1498771fa900SAdrien Mazarguil */ 1499771fa900SAdrien Mazarguil static void 1500771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 1501771fa900SAdrien Mazarguil { 1502dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 15032e22920bSAdrien Mazarguil unsigned int i; 15046af6b973SNélio Laranjeiro int ret; 1505771fa900SAdrien Mazarguil 15062786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 15072786b7bfSSuanming Mou /* Check if process_private released. */ 15082786b7bfSSuanming Mou if (!dev->process_private) 15092786b7bfSSuanming Mou return; 15102786b7bfSSuanming Mou mlx5_tx_uar_uninit_secondary(dev); 15112786b7bfSSuanming Mou mlx5_proc_priv_uninit(dev); 15122786b7bfSSuanming Mou rte_eth_dev_release_port(dev); 15132786b7bfSSuanming Mou return; 15142786b7bfSSuanming Mou } 15152786b7bfSSuanming Mou if (!priv->sh) 15162786b7bfSSuanming Mou return; 1517a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 15180f99970bSNélio Laranjeiro dev->data->port_id, 1519*f44b09f9SOphir Munk ((priv->sh->ctx != NULL) ? 1520*f44b09f9SOphir Munk mlx5_os_get_ctx_device_name(priv->sh->ctx) : "")); 15218db7e3b6SBing Zhao /* 15228db7e3b6SBing Zhao * If default mreg copy action is removed at the stop stage, 15238db7e3b6SBing Zhao * the search will return none and nothing will be done anymore. 15248db7e3b6SBing Zhao */ 15258db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 1526af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 15278db7e3b6SBing Zhao /* 15288db7e3b6SBing Zhao * If all the flows are already flushed in the device stop stage, 15298db7e3b6SBing Zhao * then this will return directly without any action. 15308db7e3b6SBing Zhao */ 15318db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->flows, true); 153202e76468SSuanming Mou mlx5_flow_meter_flush(dev, NULL); 1533e7bfa359SBing Zhao /* Free the intermediate buffers for flow creation. */ 1534e7bfa359SBing Zhao mlx5_flow_free_intermediate(dev); 15352e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 15362e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 15372e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 15382aac5b5dSYongseok Koh rte_wmb(); 15392aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 15402aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 15412e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 15422e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 15432e22920bSAdrien Mazarguil usleep(1000); 1544a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 1545af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 15462e22920bSAdrien Mazarguil priv->rxqs_n = 0; 15472e22920bSAdrien Mazarguil priv->rxqs = NULL; 15482e22920bSAdrien Mazarguil } 15492e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 15502e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 15512e22920bSAdrien Mazarguil usleep(1000); 15526e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 1553af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 15542e22920bSAdrien Mazarguil priv->txqs_n = 0; 15552e22920bSAdrien Mazarguil priv->txqs = NULL; 15562e22920bSAdrien Mazarguil } 1557120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 1558dd3c774fSViacheslav Ovsiienko if (priv->mreg_cp_tbl) 1559dd3c774fSViacheslav Ovsiienko mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); 15607d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 1561b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 156229c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 156329c1d8bbSNélio Laranjeiro rte_free(priv->rss_conf.rss_key); 1564634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 1565634efbc2SNelio Laranjeiro rte_free(priv->reta_idx); 1566ccdcba53SNélio Laranjeiro if (priv->config.vf) 1567f22442cbSMatan Azrad mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), 1568f22442cbSMatan Azrad dev->data->mac_addrs, 1569f22442cbSMatan Azrad MLX5_MAX_MAC_ADDRESSES, priv->mac_own); 157026c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 157126c08b97SAdrien Mazarguil close(priv->nl_socket_route); 157226c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 157326c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 1574dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 1575dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 157623820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 1577f5479b68SNélio Laranjeiro if (ret) 1578a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 15790f99970bSNélio Laranjeiro dev->data->port_id); 158015c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 15814c7a0f5fSNélio Laranjeiro if (ret) 1582a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 15830f99970bSNélio Laranjeiro dev->data->port_id); 158493403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 158509cb5b58SNélio Laranjeiro if (ret) 158693403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 15870f99970bSNélio Laranjeiro dev->data->port_id); 1588af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 1589a1366b1aSNélio Laranjeiro if (ret) 1590a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 15910f99970bSNélio Laranjeiro dev->data->port_id); 1592894c4a8eSOri Kam ret = mlx5_txq_obj_verify(dev); 1593faf2667fSNélio Laranjeiro if (ret) 1594a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 15950f99970bSNélio Laranjeiro dev->data->port_id); 1596af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 15976e78005aSNélio Laranjeiro if (ret) 1598a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 15990f99970bSNélio Laranjeiro dev->data->port_id); 1600af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 16016af6b973SNélio Laranjeiro if (ret) 1602a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 1603a170a30dSNélio Laranjeiro dev->data->port_id); 1604772dc0ebSSuanming Mou /* 1605772dc0ebSSuanming Mou * Free the shared context in last turn, because the cleanup 1606772dc0ebSSuanming Mou * routines above may use some shared fields, like 1607772dc0ebSSuanming Mou * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing 1608772dc0ebSSuanming Mou * ifindex if Netlink fails. 1609772dc0ebSSuanming Mou */ 1610772dc0ebSSuanming Mou mlx5_free_shared_ibctx(priv->sh); 16112b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 16122b730263SAdrien Mazarguil unsigned int c = 0; 1613d874a4eeSThomas Monjalon uint16_t port_id; 16142b730263SAdrien Mazarguil 1615fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 1616dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 1617d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 16182b730263SAdrien Mazarguil 16192b730263SAdrien Mazarguil if (!opriv || 16202b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 1621d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 16222b730263SAdrien Mazarguil continue; 16232b730263SAdrien Mazarguil ++c; 1624f7e95215SViacheslav Ovsiienko break; 16252b730263SAdrien Mazarguil } 16262b730263SAdrien Mazarguil if (!c) 16272b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 16282b730263SAdrien Mazarguil } 1629771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 16302b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 163142603bbdSOphir Munk /* 163242603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 163342603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 163442603bbdSOphir Munk * it is freed when dev_private is freed. 163542603bbdSOphir Munk */ 163642603bbdSOphir Munk dev->data->mac_addrs = NULL; 1637771fa900SAdrien Mazarguil } 1638771fa900SAdrien Mazarguil 16390887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops = { 1640e60fbd5bSAdrien Mazarguil .dev_configure = mlx5_dev_configure, 1641e60fbd5bSAdrien Mazarguil .dev_start = mlx5_dev_start, 1642e60fbd5bSAdrien Mazarguil .dev_stop = mlx5_dev_stop, 164362072098SOr Ami .dev_set_link_down = mlx5_set_link_down, 164462072098SOr Ami .dev_set_link_up = mlx5_set_link_up, 1645771fa900SAdrien Mazarguil .dev_close = mlx5_dev_close, 16461bdbe1afSAdrien Mazarguil .promiscuous_enable = mlx5_promiscuous_enable, 16471bdbe1afSAdrien Mazarguil .promiscuous_disable = mlx5_promiscuous_disable, 16481bdbe1afSAdrien Mazarguil .allmulticast_enable = mlx5_allmulticast_enable, 16491bdbe1afSAdrien Mazarguil .allmulticast_disable = mlx5_allmulticast_disable, 1650cb8faed7SAdrien Mazarguil .link_update = mlx5_link_update, 165187011737SAdrien Mazarguil .stats_get = mlx5_stats_get, 165287011737SAdrien Mazarguil .stats_reset = mlx5_stats_reset, 1653a4193ae3SShahaf Shuler .xstats_get = mlx5_xstats_get, 1654a4193ae3SShahaf Shuler .xstats_reset = mlx5_xstats_reset, 1655a4193ae3SShahaf Shuler .xstats_get_names = mlx5_xstats_get_names, 1656714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 1657e60fbd5bSAdrien Mazarguil .dev_infos_get = mlx5_dev_infos_get, 1658e571ad55STom Barbette .read_clock = mlx5_read_clock, 165978a38edfSJianfeng Tan .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 1660e9086978SAdrien Mazarguil .vlan_filter_set = mlx5_vlan_filter_set, 16612e22920bSAdrien Mazarguil .rx_queue_setup = mlx5_rx_queue_setup, 1662e79c9be9SOri Kam .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 16632e22920bSAdrien Mazarguil .tx_queue_setup = mlx5_tx_queue_setup, 1664ae18a1aeSOri Kam .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 16652e22920bSAdrien Mazarguil .rx_queue_release = mlx5_rx_queue_release, 16662e22920bSAdrien Mazarguil .tx_queue_release = mlx5_tx_queue_release, 166702d75430SAdrien Mazarguil .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 166802d75430SAdrien Mazarguil .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 16693318aef7SAdrien Mazarguil .mac_addr_remove = mlx5_mac_addr_remove, 16703318aef7SAdrien Mazarguil .mac_addr_add = mlx5_mac_addr_add, 167186977fccSDavid Marchand .mac_addr_set = mlx5_mac_addr_set, 1672e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 1673cf37ca95SAdrien Mazarguil .mtu_set = mlx5_dev_set_mtu, 1674f3db9489SYaacov Hazan .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 1675f3db9489SYaacov Hazan .vlan_offload_set = mlx5_vlan_offload_set, 1676634efbc2SNelio Laranjeiro .reta_update = mlx5_dev_rss_reta_update, 1677634efbc2SNelio Laranjeiro .reta_query = mlx5_dev_rss_reta_query, 16782f97422eSNelio Laranjeiro .rss_hash_update = mlx5_rss_hash_update, 16792f97422eSNelio Laranjeiro .rss_hash_conf_get = mlx5_rss_hash_conf_get, 168076f5c99eSYaacov Hazan .filter_ctrl = mlx5_dev_filter_ctrl, 16818788fec1SOlivier Matz .rx_descriptor_status = mlx5_rx_descriptor_status, 16828788fec1SOlivier Matz .tx_descriptor_status = mlx5_tx_descriptor_status, 168326f1bae8SAlexander Kozyrev .rxq_info_get = mlx5_rxq_info_get, 168426f1bae8SAlexander Kozyrev .txq_info_get = mlx5_txq_info_get, 168526f1bae8SAlexander Kozyrev .rx_burst_mode_get = mlx5_rx_burst_mode_get, 168626f1bae8SAlexander Kozyrev .tx_burst_mode_get = mlx5_tx_burst_mode_get, 168726f04883STom Barbette .rx_queue_count = mlx5_rx_queue_count, 16883c7d44afSShahaf Shuler .rx_queue_intr_enable = mlx5_rx_intr_enable, 16893c7d44afSShahaf Shuler .rx_queue_intr_disable = mlx5_rx_intr_disable, 1690d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 1691c9ba7523SRaslan Darawsheh .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, 16928a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 16938a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 1694b6b3bf86SOri Kam .hairpin_cap_get = mlx5_hairpin_cap_get, 1695d740eb50SSuanming Mou .mtr_ops_get = mlx5_flow_meter_ops_get, 1696771fa900SAdrien Mazarguil }; 1697771fa900SAdrien Mazarguil 1698714bf46eSThomas Monjalon /* Available operations from secondary process. */ 169987ec44ceSXueming Li static const struct eth_dev_ops mlx5_dev_sec_ops = { 170087ec44ceSXueming Li .stats_get = mlx5_stats_get, 170187ec44ceSXueming Li .stats_reset = mlx5_stats_reset, 170287ec44ceSXueming Li .xstats_get = mlx5_xstats_get, 170387ec44ceSXueming Li .xstats_reset = mlx5_xstats_reset, 170487ec44ceSXueming Li .xstats_get_names = mlx5_xstats_get_names, 1705714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 170687ec44ceSXueming Li .dev_infos_get = mlx5_dev_infos_get, 170787ec44ceSXueming Li .rx_descriptor_status = mlx5_rx_descriptor_status, 170887ec44ceSXueming Li .tx_descriptor_status = mlx5_tx_descriptor_status, 170926f1bae8SAlexander Kozyrev .rxq_info_get = mlx5_rxq_info_get, 171026f1bae8SAlexander Kozyrev .txq_info_get = mlx5_txq_info_get, 171126f1bae8SAlexander Kozyrev .rx_burst_mode_get = mlx5_rx_burst_mode_get, 171226f1bae8SAlexander Kozyrev .tx_burst_mode_get = mlx5_tx_burst_mode_get, 17138a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 17148a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 171587ec44ceSXueming Li }; 171687ec44ceSXueming Li 1717714bf46eSThomas Monjalon /* Available operations in flow isolated mode. */ 17180887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops_isolate = { 17190887aa7fSNélio Laranjeiro .dev_configure = mlx5_dev_configure, 17200887aa7fSNélio Laranjeiro .dev_start = mlx5_dev_start, 17210887aa7fSNélio Laranjeiro .dev_stop = mlx5_dev_stop, 17220887aa7fSNélio Laranjeiro .dev_set_link_down = mlx5_set_link_down, 17230887aa7fSNélio Laranjeiro .dev_set_link_up = mlx5_set_link_up, 17240887aa7fSNélio Laranjeiro .dev_close = mlx5_dev_close, 172524b068adSYongseok Koh .promiscuous_enable = mlx5_promiscuous_enable, 172624b068adSYongseok Koh .promiscuous_disable = mlx5_promiscuous_disable, 17272547ee74SYongseok Koh .allmulticast_enable = mlx5_allmulticast_enable, 17282547ee74SYongseok Koh .allmulticast_disable = mlx5_allmulticast_disable, 17290887aa7fSNélio Laranjeiro .link_update = mlx5_link_update, 17300887aa7fSNélio Laranjeiro .stats_get = mlx5_stats_get, 17310887aa7fSNélio Laranjeiro .stats_reset = mlx5_stats_reset, 17320887aa7fSNélio Laranjeiro .xstats_get = mlx5_xstats_get, 17330887aa7fSNélio Laranjeiro .xstats_reset = mlx5_xstats_reset, 17340887aa7fSNélio Laranjeiro .xstats_get_names = mlx5_xstats_get_names, 1735714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 17360887aa7fSNélio Laranjeiro .dev_infos_get = mlx5_dev_infos_get, 17370887aa7fSNélio Laranjeiro .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 17380887aa7fSNélio Laranjeiro .vlan_filter_set = mlx5_vlan_filter_set, 17390887aa7fSNélio Laranjeiro .rx_queue_setup = mlx5_rx_queue_setup, 1740e79c9be9SOri Kam .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 17410887aa7fSNélio Laranjeiro .tx_queue_setup = mlx5_tx_queue_setup, 1742ae18a1aeSOri Kam .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 17430887aa7fSNélio Laranjeiro .rx_queue_release = mlx5_rx_queue_release, 17440887aa7fSNélio Laranjeiro .tx_queue_release = mlx5_tx_queue_release, 17450887aa7fSNélio Laranjeiro .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 17460887aa7fSNélio Laranjeiro .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 17470887aa7fSNélio Laranjeiro .mac_addr_remove = mlx5_mac_addr_remove, 17480887aa7fSNélio Laranjeiro .mac_addr_add = mlx5_mac_addr_add, 17490887aa7fSNélio Laranjeiro .mac_addr_set = mlx5_mac_addr_set, 1750e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 17510887aa7fSNélio Laranjeiro .mtu_set = mlx5_dev_set_mtu, 17520887aa7fSNélio Laranjeiro .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 17530887aa7fSNélio Laranjeiro .vlan_offload_set = mlx5_vlan_offload_set, 17540887aa7fSNélio Laranjeiro .filter_ctrl = mlx5_dev_filter_ctrl, 17550887aa7fSNélio Laranjeiro .rx_descriptor_status = mlx5_rx_descriptor_status, 17560887aa7fSNélio Laranjeiro .tx_descriptor_status = mlx5_tx_descriptor_status, 175726f1bae8SAlexander Kozyrev .rxq_info_get = mlx5_rxq_info_get, 175826f1bae8SAlexander Kozyrev .txq_info_get = mlx5_txq_info_get, 175926f1bae8SAlexander Kozyrev .rx_burst_mode_get = mlx5_rx_burst_mode_get, 176026f1bae8SAlexander Kozyrev .tx_burst_mode_get = mlx5_tx_burst_mode_get, 17610887aa7fSNélio Laranjeiro .rx_queue_intr_enable = mlx5_rx_intr_enable, 17620887aa7fSNélio Laranjeiro .rx_queue_intr_disable = mlx5_rx_intr_disable, 1763d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 17648a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 17658a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 1766b6b3bf86SOri Kam .hairpin_cap_get = mlx5_hairpin_cap_get, 1767d740eb50SSuanming Mou .mtr_ops_get = mlx5_flow_meter_ops_get, 17680887aa7fSNélio Laranjeiro }; 17690887aa7fSNélio Laranjeiro 1770e72dd09bSNélio Laranjeiro /** 1771e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 1772e72dd09bSNélio Laranjeiro * 1773e72dd09bSNélio Laranjeiro * @param[in] key 1774e72dd09bSNélio Laranjeiro * Key argument to verify. 1775e72dd09bSNélio Laranjeiro * @param[in] val 1776e72dd09bSNélio Laranjeiro * Value associated with key. 1777e72dd09bSNélio Laranjeiro * @param opaque 1778e72dd09bSNélio Laranjeiro * User data. 1779e72dd09bSNélio Laranjeiro * 1780e72dd09bSNélio Laranjeiro * @return 1781a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1782e72dd09bSNélio Laranjeiro */ 1783e72dd09bSNélio Laranjeiro static int 1784e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 1785e72dd09bSNélio Laranjeiro { 17867fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 178799c12dccSNélio Laranjeiro unsigned long tmp; 1788e72dd09bSNélio Laranjeiro 17896de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 17906de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 17916de569f5SAdrien Mazarguil return 0; 179299c12dccSNélio Laranjeiro errno = 0; 179399c12dccSNélio Laranjeiro tmp = strtoul(val, NULL, 0); 179499c12dccSNélio Laranjeiro if (errno) { 1795a6d83b6aSNélio Laranjeiro rte_errno = errno; 1796a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1797a6d83b6aSNélio Laranjeiro return -rte_errno; 179899c12dccSNélio Laranjeiro } 179999c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 18007fe24446SShahaf Shuler config->cqe_comp = !!tmp; 1801bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 1802bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 180378c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 180478c7a16dSYongseok Koh config->hw_padding = !!tmp; 18057d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 18067d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 18077d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 18087d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 1809ecb16045SAlexander Kozyrev } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { 1810ecb16045SAlexander Kozyrev config->mprq.stride_size_n = tmp; 18117d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 18127d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 18137d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 18147d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 18152a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1816505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1817505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1818505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1819505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1820505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1821505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1822505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1823505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1824505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 18252a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 18267fe24446SShahaf Shuler config->txqs_inline = tmp; 182709d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1828a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1829230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1830f9de8718SShahaf Shuler config->mps = !!tmp; 18318409a285SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_DB_NC, key) == 0) { 1832f078ceb6SViacheslav Ovsiienko if (tmp != MLX5_TXDB_CACHED && 1833f078ceb6SViacheslav Ovsiienko tmp != MLX5_TXDB_NCACHED && 1834f078ceb6SViacheslav Ovsiienko tmp != MLX5_TXDB_HEURISTIC) { 1835f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid Tx doorbell " 1836f078ceb6SViacheslav Ovsiienko "mapping parameter"); 1837f078ceb6SViacheslav Ovsiienko rte_errno = EINVAL; 1838f078ceb6SViacheslav Ovsiienko return -rte_errno; 1839f078ceb6SViacheslav Ovsiienko } 1840f078ceb6SViacheslav Ovsiienko config->dbnc = tmp; 18416ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1842a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 18436ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1844505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1845505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1846505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 18475644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1848a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 18495644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 18507fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 185178a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 185278a54648SXueming Li config->l3_vxlan_en = !!tmp; 1853db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1854db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1855e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1856e2b4925eSOri Kam config->dv_esw_en = !!tmp; 185751e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 185851e72d38SOri Kam config->dv_flow_en = !!tmp; 18592d241515SViacheslav Ovsiienko } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { 18602d241515SViacheslav Ovsiienko if (tmp != MLX5_XMETA_MODE_LEGACY && 18612d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META16 && 18622d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META32) { 1863f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid extensive " 18642d241515SViacheslav Ovsiienko "metadata parameter"); 18652d241515SViacheslav Ovsiienko rte_errno = EINVAL; 18662d241515SViacheslav Ovsiienko return -rte_errno; 18672d241515SViacheslav Ovsiienko } 18682d241515SViacheslav Ovsiienko config->dv_xmeta_en = tmp; 1869dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1870dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 1871066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1872066cfecdSMatan Azrad config->max_dump_files_num = tmp; 187321bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 187421bb6c7eSDekel Peled config->lro.timeout = tmp; 1875d768f324SMatan Azrad } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) { 1876d768f324SMatan Azrad DRV_LOG(DEBUG, "class argument is %s.", val); 18771ad9a3d0SBing Zhao } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { 18781ad9a3d0SBing Zhao config->log_hp_size = tmp; 1879a1da6f62SSuanming Mou } else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) { 1880a1da6f62SSuanming Mou if (tmp != MLX5_RCM_NONE && 1881a1da6f62SSuanming Mou tmp != MLX5_RCM_LIGHT && 1882a1da6f62SSuanming Mou tmp != MLX5_RCM_AGGR) { 1883a1da6f62SSuanming Mou DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val); 1884a1da6f62SSuanming Mou rte_errno = EINVAL; 1885a1da6f62SSuanming Mou return -rte_errno; 1886a1da6f62SSuanming Mou } 1887a1da6f62SSuanming Mou config->reclaim_mode = tmp; 188899c12dccSNélio Laranjeiro } else { 1889a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 1890a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1891a6d83b6aSNélio Laranjeiro return -rte_errno; 1892e72dd09bSNélio Laranjeiro } 189399c12dccSNélio Laranjeiro return 0; 189499c12dccSNélio Laranjeiro } 1895e72dd09bSNélio Laranjeiro 1896e72dd09bSNélio Laranjeiro /** 1897e72dd09bSNélio Laranjeiro * Parse device parameters. 1898e72dd09bSNélio Laranjeiro * 18997fe24446SShahaf Shuler * @param config 19007fe24446SShahaf Shuler * Pointer to device configuration structure. 1901e72dd09bSNélio Laranjeiro * @param devargs 1902e72dd09bSNélio Laranjeiro * Device arguments structure. 1903e72dd09bSNélio Laranjeiro * 1904e72dd09bSNélio Laranjeiro * @return 1905a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1906e72dd09bSNélio Laranjeiro */ 1907e72dd09bSNélio Laranjeiro static int 19087fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1909e72dd09bSNélio Laranjeiro { 1910e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 191199c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 1912bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 191378c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 19147d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 19157d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 1916ecb16045SAlexander Kozyrev MLX5_RX_MPRQ_LOG_STRIDE_SIZE, 19177d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 19187d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 19192a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 1920505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MIN, 1921505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MAX, 1922505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MPW, 19232a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 192409d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 1925230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 19266ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 19276ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 19288409a285SViacheslav Ovsiienko MLX5_TX_DB_NC, 19295644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 19305644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 193178a54648SXueming Li MLX5_L3_VXLAN_EN, 1932db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 1933e2b4925eSOri Kam MLX5_DV_ESW_EN, 193451e72d38SOri Kam MLX5_DV_FLOW_EN, 19352d241515SViacheslav Ovsiienko MLX5_DV_XMETA_EN, 1936dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 19376de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 1938066cfecdSMatan Azrad MLX5_MAX_DUMP_FILES_NUM, 193921bb6c7eSDekel Peled MLX5_LRO_TIMEOUT_USEC, 1940d768f324SMatan Azrad MLX5_CLASS_ARG_NAME, 19411ad9a3d0SBing Zhao MLX5_HP_BUF_SIZE, 1942a1da6f62SSuanming Mou MLX5_RECLAIM_MEM, 1943e72dd09bSNélio Laranjeiro NULL, 1944e72dd09bSNélio Laranjeiro }; 1945e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 1946e72dd09bSNélio Laranjeiro int ret = 0; 1947e72dd09bSNélio Laranjeiro int i; 1948e72dd09bSNélio Laranjeiro 1949e72dd09bSNélio Laranjeiro if (devargs == NULL) 1950e72dd09bSNélio Laranjeiro return 0; 1951e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 1952e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 195315b0ea00SMatan Azrad if (kvlist == NULL) { 195415b0ea00SMatan Azrad rte_errno = EINVAL; 195515b0ea00SMatan Azrad return -rte_errno; 195615b0ea00SMatan Azrad } 1957e72dd09bSNélio Laranjeiro /* Process parameters. */ 1958e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 1959e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 1960e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 19617fe24446SShahaf Shuler mlx5_args_check, config); 1962a6d83b6aSNélio Laranjeiro if (ret) { 1963a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1964a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 1965a6d83b6aSNélio Laranjeiro return -rte_errno; 1966e72dd09bSNélio Laranjeiro } 1967e72dd09bSNélio Laranjeiro } 1968a67323e4SShahaf Shuler } 1969e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 1970e72dd09bSNélio Laranjeiro return 0; 1971e72dd09bSNélio Laranjeiro } 1972e72dd09bSNélio Laranjeiro 1973fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver; 1974771fa900SAdrien Mazarguil 19757be600c8SYongseok Koh /** 19767be600c8SYongseok Koh * PMD global initialization. 19777be600c8SYongseok Koh * 19787be600c8SYongseok Koh * Independent from individual device, this function initializes global 19797be600c8SYongseok Koh * per-PMD data structures distinguishing primary and secondary processes. 19807be600c8SYongseok Koh * Hence, each initialization is called once per a process. 19817be600c8SYongseok Koh * 19827be600c8SYongseok Koh * @return 19837be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 19847be600c8SYongseok Koh */ 19857be600c8SYongseok Koh static int 19867be600c8SYongseok Koh mlx5_init_once(void) 19877be600c8SYongseok Koh { 19887be600c8SYongseok Koh struct mlx5_shared_data *sd; 19897be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 1990edf73dd3SAnatoly Burakov int ret = 0; 19917be600c8SYongseok Koh 19927be600c8SYongseok Koh if (mlx5_init_shared_data()) 19937be600c8SYongseok Koh return -rte_errno; 19947be600c8SYongseok Koh sd = mlx5_shared_data; 19958e46d4e1SAlexander Kozyrev MLX5_ASSERT(sd); 19967be600c8SYongseok Koh rte_spinlock_lock(&sd->lock); 19977be600c8SYongseok Koh switch (rte_eal_process_type()) { 19987be600c8SYongseok Koh case RTE_PROC_PRIMARY: 19997be600c8SYongseok Koh if (sd->init_done) 20007be600c8SYongseok Koh break; 20017be600c8SYongseok Koh LIST_INIT(&sd->mem_event_cb_list); 20027be600c8SYongseok Koh rte_rwlock_init(&sd->mem_event_rwlock); 20037be600c8SYongseok Koh rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 20047be600c8SYongseok Koh mlx5_mr_mem_event_cb, NULL); 2005a4de9586SVu Pham ret = mlx5_mp_init_primary(MLX5_MP_NAME, 2006a4de9586SVu Pham mlx5_mp_primary_handle); 2007edf73dd3SAnatoly Burakov if (ret) 2008edf73dd3SAnatoly Burakov goto out; 20097be600c8SYongseok Koh sd->init_done = true; 20107be600c8SYongseok Koh break; 20117be600c8SYongseok Koh case RTE_PROC_SECONDARY: 20127be600c8SYongseok Koh if (ld->init_done) 20137be600c8SYongseok Koh break; 2014a4de9586SVu Pham ret = mlx5_mp_init_secondary(MLX5_MP_NAME, 2015a4de9586SVu Pham mlx5_mp_secondary_handle); 2016edf73dd3SAnatoly Burakov if (ret) 2017edf73dd3SAnatoly Burakov goto out; 20187be600c8SYongseok Koh ++sd->secondary_cnt; 20197be600c8SYongseok Koh ld->init_done = true; 20207be600c8SYongseok Koh break; 20217be600c8SYongseok Koh default: 20227be600c8SYongseok Koh break; 20237be600c8SYongseok Koh } 2024edf73dd3SAnatoly Burakov out: 20257be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 2026edf73dd3SAnatoly Burakov return ret; 20277be600c8SYongseok Koh } 20287be600c8SYongseok Koh 20297be600c8SYongseok Koh /** 203038b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 203138b4b397SViacheslav Ovsiienko * while sending packets. 203238b4b397SViacheslav Ovsiienko * 203338b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 203438b4b397SViacheslav Ovsiienko * key is specified in devargs 203538b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 203638b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 2037ee76bddcSThomas Monjalon * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx 203838b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 203938b4b397SViacheslav Ovsiienko * 204038b4b397SViacheslav Ovsiienko * @param spawn 204138b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 204238b4b397SViacheslav Ovsiienko * @param config 204338b4b397SViacheslav Ovsiienko * Device configuration parameters. 204438b4b397SViacheslav Ovsiienko */ 204538b4b397SViacheslav Ovsiienko static void 204638b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 204738b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 204838b4b397SViacheslav Ovsiienko { 204938b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 205038b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 205138b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 205238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 205338b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 205438b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 205538b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 205638b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 205738b4b397SViacheslav Ovsiienko "txq_inline_mix aligned to minimal" 205838b4b397SViacheslav Ovsiienko " ConnectX-4 required value %d", 205938b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 206038b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 206138b4b397SViacheslav Ovsiienko } 206238b4b397SViacheslav Ovsiienko break; 206338b4b397SViacheslav Ovsiienko } 206438b4b397SViacheslav Ovsiienko goto exit; 206538b4b397SViacheslav Ovsiienko } 206638b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 206738b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 206838b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 206938b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 207038b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 207138b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 207238b4b397SViacheslav Ovsiienko goto exit; 207338b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 207438b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 207538b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 207638b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 207738b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 207838b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 207938b4b397SViacheslav Ovsiienko goto exit; 208038b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 208138b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 208238b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 208338b4b397SViacheslav Ovsiienko break; 208438b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 208538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 208638b4b397SViacheslav Ovsiienko config->txq_inline_min = 208738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 208838b4b397SViacheslav Ovsiienko goto exit; 208938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 209038b4b397SViacheslav Ovsiienko config->txq_inline_min = 209138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 209238b4b397SViacheslav Ovsiienko goto exit; 209338b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 209438b4b397SViacheslav Ovsiienko config->txq_inline_min = 209538b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 209638b4b397SViacheslav Ovsiienko goto exit; 209738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 209838b4b397SViacheslav Ovsiienko config->txq_inline_min = 209938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 210038b4b397SViacheslav Ovsiienko goto exit; 210138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 210238b4b397SViacheslav Ovsiienko config->txq_inline_min = 210338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 210438b4b397SViacheslav Ovsiienko goto exit; 210538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 210638b4b397SViacheslav Ovsiienko config->txq_inline_min = 210738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 210838b4b397SViacheslav Ovsiienko goto exit; 210938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 211038b4b397SViacheslav Ovsiienko config->txq_inline_min = 211138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 211238b4b397SViacheslav Ovsiienko goto exit; 211338b4b397SViacheslav Ovsiienko } 211438b4b397SViacheslav Ovsiienko } 211538b4b397SViacheslav Ovsiienko } 211638b4b397SViacheslav Ovsiienko /* 211738b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 211838b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 211938b4b397SViacheslav Ovsiienko * to determine old NICs. 212038b4b397SViacheslav Ovsiienko */ 212138b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 212238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 212338b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 212438b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 212538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 2126614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 212738b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 212838b4b397SViacheslav Ovsiienko break; 212938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 213038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 213138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 213238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 213338b4b397SViacheslav Ovsiienko /* 213438b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 213538b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 213638b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 213738b4b397SViacheslav Ovsiienko */ 213838b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 213920215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 214038b4b397SViacheslav Ovsiienko break; 214138b4b397SViacheslav Ovsiienko default: 214238b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 214338b4b397SViacheslav Ovsiienko break; 214438b4b397SViacheslav Ovsiienko } 214538b4b397SViacheslav Ovsiienko exit: 214638b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 214738b4b397SViacheslav Ovsiienko } 214838b4b397SViacheslav Ovsiienko 214938b4b397SViacheslav Ovsiienko /** 215039139371SViacheslav Ovsiienko * Configures the metadata mask fields in the shared context. 215139139371SViacheslav Ovsiienko * 215239139371SViacheslav Ovsiienko * @param [in] dev 215339139371SViacheslav Ovsiienko * Pointer to Ethernet device. 215439139371SViacheslav Ovsiienko */ 215539139371SViacheslav Ovsiienko static void 215639139371SViacheslav Ovsiienko mlx5_set_metadata_mask(struct rte_eth_dev *dev) 215739139371SViacheslav Ovsiienko { 215839139371SViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 21596e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 216039139371SViacheslav Ovsiienko uint32_t meta, mark, reg_c0; 216139139371SViacheslav Ovsiienko 216239139371SViacheslav Ovsiienko reg_c0 = ~priv->vport_meta_mask; 216339139371SViacheslav Ovsiienko switch (priv->config.dv_xmeta_en) { 216439139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_LEGACY: 216539139371SViacheslav Ovsiienko meta = UINT32_MAX; 216639139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 216739139371SViacheslav Ovsiienko break; 216839139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META16: 216939139371SViacheslav Ovsiienko meta = reg_c0 >> rte_bsf32(reg_c0); 217039139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 217139139371SViacheslav Ovsiienko break; 217239139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META32: 217339139371SViacheslav Ovsiienko meta = UINT32_MAX; 217439139371SViacheslav Ovsiienko mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK; 217539139371SViacheslav Ovsiienko break; 217639139371SViacheslav Ovsiienko default: 217739139371SViacheslav Ovsiienko meta = 0; 217839139371SViacheslav Ovsiienko mark = 0; 21798e46d4e1SAlexander Kozyrev MLX5_ASSERT(false); 218039139371SViacheslav Ovsiienko break; 218139139371SViacheslav Ovsiienko } 218239139371SViacheslav Ovsiienko if (sh->dv_mark_mask && sh->dv_mark_mask != mark) 218339139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X", 218439139371SViacheslav Ovsiienko sh->dv_mark_mask, mark); 218539139371SViacheslav Ovsiienko else 218639139371SViacheslav Ovsiienko sh->dv_mark_mask = mark; 218739139371SViacheslav Ovsiienko if (sh->dv_meta_mask && sh->dv_meta_mask != meta) 218839139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X", 218939139371SViacheslav Ovsiienko sh->dv_meta_mask, meta); 219039139371SViacheslav Ovsiienko else 219139139371SViacheslav Ovsiienko sh->dv_meta_mask = meta; 219239139371SViacheslav Ovsiienko if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) 219339139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X", 219439139371SViacheslav Ovsiienko sh->dv_meta_mask, reg_c0); 219539139371SViacheslav Ovsiienko else 219639139371SViacheslav Ovsiienko sh->dv_regc0_mask = reg_c0; 219739139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en); 219839139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask); 219939139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask); 220039139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask); 220139139371SViacheslav Ovsiienko } 220239139371SViacheslav Ovsiienko 220339139371SViacheslav Ovsiienko /** 220421cae858SDekel Peled * Allocate page of door-bells and register it using DevX API. 220521cae858SDekel Peled * 220621cae858SDekel Peled * @param [in] dev 220721cae858SDekel Peled * Pointer to Ethernet device. 220821cae858SDekel Peled * 220921cae858SDekel Peled * @return 221021cae858SDekel Peled * Pointer to new page on success, NULL otherwise. 221121cae858SDekel Peled */ 221221cae858SDekel Peled static struct mlx5_devx_dbr_page * 221321cae858SDekel Peled mlx5_alloc_dbr_page(struct rte_eth_dev *dev) 221421cae858SDekel Peled { 221521cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 221621cae858SDekel Peled struct mlx5_devx_dbr_page *page; 221721cae858SDekel Peled 221821cae858SDekel Peled /* Allocate space for door-bell page and management data. */ 221921cae858SDekel Peled page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page), 222021cae858SDekel Peled RTE_CACHE_LINE_SIZE, dev->device->numa_node); 222121cae858SDekel Peled if (!page) { 222221cae858SDekel Peled DRV_LOG(ERR, "port %u cannot allocate dbr page", 222321cae858SDekel Peled dev->data->port_id); 222421cae858SDekel Peled return NULL; 222521cae858SDekel Peled } 222621cae858SDekel Peled /* Register allocated memory. */ 222721cae858SDekel Peled page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs, 222821cae858SDekel Peled MLX5_DBR_PAGE_SIZE, 0); 222921cae858SDekel Peled if (!page->umem) { 223021cae858SDekel Peled DRV_LOG(ERR, "port %u cannot umem reg dbr page", 223121cae858SDekel Peled dev->data->port_id); 223221cae858SDekel Peled rte_free(page); 223321cae858SDekel Peled return NULL; 223421cae858SDekel Peled } 223521cae858SDekel Peled return page; 223621cae858SDekel Peled } 223721cae858SDekel Peled 223821cae858SDekel Peled /** 223921cae858SDekel Peled * Find the next available door-bell, allocate new page if needed. 224021cae858SDekel Peled * 224121cae858SDekel Peled * @param [in] dev 224221cae858SDekel Peled * Pointer to Ethernet device. 224321cae858SDekel Peled * @param [out] dbr_page 224421cae858SDekel Peled * Door-bell page containing the page data. 224521cae858SDekel Peled * 224621cae858SDekel Peled * @return 224721cae858SDekel Peled * Door-bell address offset on success, a negative error value otherwise. 224821cae858SDekel Peled */ 224921cae858SDekel Peled int64_t 225021cae858SDekel Peled mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page) 225121cae858SDekel Peled { 225221cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 225321cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 225421cae858SDekel Peled uint32_t i, j; 225521cae858SDekel Peled 225621cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 225721cae858SDekel Peled if (page->dbr_count < MLX5_DBR_PER_PAGE) 225821cae858SDekel Peled break; 225921cae858SDekel Peled if (!page) { /* No page with free door-bell exists. */ 226021cae858SDekel Peled page = mlx5_alloc_dbr_page(dev); 226121cae858SDekel Peled if (!page) /* Failed to allocate new page. */ 226221cae858SDekel Peled return (-1); 226321cae858SDekel Peled LIST_INSERT_HEAD(&priv->dbrpgs, page, next); 226421cae858SDekel Peled } 226521cae858SDekel Peled /* Loop to find bitmap part with clear bit. */ 226621cae858SDekel Peled for (i = 0; 226721cae858SDekel Peled i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX; 226821cae858SDekel Peled i++) 226921cae858SDekel Peled ; /* Empty. */ 227021cae858SDekel Peled /* Find the first clear bit. */ 22719b080425SAsaf Penso MLX5_ASSERT(i < MLX5_DBR_BITMAP_SIZE); 227221cae858SDekel Peled j = rte_bsf64(~page->dbr_bitmap[i]); 227325a59a30SBing Zhao page->dbr_bitmap[i] |= (UINT64_C(1) << j); 227421cae858SDekel Peled page->dbr_count++; 227521cae858SDekel Peled *dbr_page = page; 227621cae858SDekel Peled return (((i * 64) + j) * sizeof(uint64_t)); 227721cae858SDekel Peled } 227821cae858SDekel Peled 227921cae858SDekel Peled /** 228021cae858SDekel Peled * Release a door-bell record. 228121cae858SDekel Peled * 228221cae858SDekel Peled * @param [in] dev 228321cae858SDekel Peled * Pointer to Ethernet device. 228421cae858SDekel Peled * @param [in] umem_id 228521cae858SDekel Peled * UMEM ID of page containing the door-bell record to release. 228621cae858SDekel Peled * @param [in] offset 228721cae858SDekel Peled * Offset of door-bell record in page. 228821cae858SDekel Peled * 228921cae858SDekel Peled * @return 229021cae858SDekel Peled * 0 on success, a negative error value otherwise. 229121cae858SDekel Peled */ 229221cae858SDekel Peled int32_t 229321cae858SDekel Peled mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset) 229421cae858SDekel Peled { 229521cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 229621cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 229721cae858SDekel Peled int ret = 0; 229821cae858SDekel Peled 229921cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 230021cae858SDekel Peled /* Find the page this address belongs to. */ 230121cae858SDekel Peled if (page->umem->umem_id == umem_id) 230221cae858SDekel Peled break; 230321cae858SDekel Peled if (!page) 230421cae858SDekel Peled return -EINVAL; 230521cae858SDekel Peled page->dbr_count--; 230621cae858SDekel Peled if (!page->dbr_count) { 230721cae858SDekel Peled /* Page not used, free it and remove from list. */ 230821cae858SDekel Peled LIST_REMOVE(page, next); 230921cae858SDekel Peled if (page->umem) 231021cae858SDekel Peled ret = -mlx5_glue->devx_umem_dereg(page->umem); 231121cae858SDekel Peled rte_free(page); 231221cae858SDekel Peled } else { 231321cae858SDekel Peled /* Mark in bitmap that this door-bell is not in use. */ 2314a88209b0SDekel Peled offset /= MLX5_DBR_SIZE; 231521cae858SDekel Peled int i = offset / 64; 231621cae858SDekel Peled int j = offset % 64; 231721cae858SDekel Peled 231825a59a30SBing Zhao page->dbr_bitmap[i] &= ~(UINT64_C(1) << j); 231921cae858SDekel Peled } 232021cae858SDekel Peled return ret; 232121cae858SDekel Peled } 232221cae858SDekel Peled 2323efa79e68SOri Kam int 2324efa79e68SOri Kam rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) 2325efa79e68SOri Kam { 2326efa79e68SOri Kam static const char *const dynf_names[] = { 2327efa79e68SOri Kam RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, 2328efa79e68SOri Kam RTE_MBUF_DYNFLAG_METADATA_NAME 2329efa79e68SOri Kam }; 2330efa79e68SOri Kam unsigned int i; 2331efa79e68SOri Kam 2332efa79e68SOri Kam if (n < RTE_DIM(dynf_names)) 2333efa79e68SOri Kam return -ENOMEM; 2334efa79e68SOri Kam for (i = 0; i < RTE_DIM(dynf_names); i++) { 2335efa79e68SOri Kam if (names[i] == NULL) 2336efa79e68SOri Kam return -EINVAL; 2337efa79e68SOri Kam strcpy(names[i], dynf_names[i]); 2338efa79e68SOri Kam } 2339efa79e68SOri Kam return RTE_DIM(dynf_names); 2340efa79e68SOri Kam } 2341efa79e68SOri Kam 234221cae858SDekel Peled /** 234392d5dd48SViacheslav Ovsiienko * Check sibling device configurations. 234492d5dd48SViacheslav Ovsiienko * 234592d5dd48SViacheslav Ovsiienko * Sibling devices sharing the Infiniband device context 234692d5dd48SViacheslav Ovsiienko * should have compatible configurations. This regards 234792d5dd48SViacheslav Ovsiienko * representors and bonding slaves. 234892d5dd48SViacheslav Ovsiienko * 234992d5dd48SViacheslav Ovsiienko * @param priv 235092d5dd48SViacheslav Ovsiienko * Private device descriptor. 235192d5dd48SViacheslav Ovsiienko * @param config 235292d5dd48SViacheslav Ovsiienko * Configuration of the device is going to be created. 235392d5dd48SViacheslav Ovsiienko * 235492d5dd48SViacheslav Ovsiienko * @return 235592d5dd48SViacheslav Ovsiienko * 0 on success, EINVAL otherwise 235692d5dd48SViacheslav Ovsiienko */ 235792d5dd48SViacheslav Ovsiienko static int 235892d5dd48SViacheslav Ovsiienko mlx5_dev_check_sibling_config(struct mlx5_priv *priv, 235992d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *config) 236092d5dd48SViacheslav Ovsiienko { 23616e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 236292d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *sh_conf = NULL; 236392d5dd48SViacheslav Ovsiienko uint16_t port_id; 236492d5dd48SViacheslav Ovsiienko 23658e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 236692d5dd48SViacheslav Ovsiienko /* Nothing to compare for the single/first device. */ 236792d5dd48SViacheslav Ovsiienko if (sh->refcnt == 1) 236892d5dd48SViacheslav Ovsiienko return 0; 236992d5dd48SViacheslav Ovsiienko /* Find the device with shared context. */ 2370fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 237192d5dd48SViacheslav Ovsiienko struct mlx5_priv *opriv = 237292d5dd48SViacheslav Ovsiienko rte_eth_devices[port_id].data->dev_private; 237392d5dd48SViacheslav Ovsiienko 237492d5dd48SViacheslav Ovsiienko if (opriv && opriv != priv && opriv->sh == sh) { 237592d5dd48SViacheslav Ovsiienko sh_conf = &opriv->config; 237692d5dd48SViacheslav Ovsiienko break; 237792d5dd48SViacheslav Ovsiienko } 237892d5dd48SViacheslav Ovsiienko } 237992d5dd48SViacheslav Ovsiienko if (!sh_conf) 238092d5dd48SViacheslav Ovsiienko return 0; 238192d5dd48SViacheslav Ovsiienko if (sh_conf->dv_flow_en ^ config->dv_flow_en) { 238292d5dd48SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch" 238392d5dd48SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 238492d5dd48SViacheslav Ovsiienko rte_errno = EINVAL; 238592d5dd48SViacheslav Ovsiienko return rte_errno; 238692d5dd48SViacheslav Ovsiienko } 23872d241515SViacheslav Ovsiienko if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) { 23882d241515SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch" 23892d241515SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 23902d241515SViacheslav Ovsiienko rte_errno = EINVAL; 23912d241515SViacheslav Ovsiienko return rte_errno; 23922d241515SViacheslav Ovsiienko } 239392d5dd48SViacheslav Ovsiienko return 0; 239492d5dd48SViacheslav Ovsiienko } 239592d5dd48SViacheslav Ovsiienko /** 2396f38c5457SAdrien Mazarguil * Spawn an Ethernet device from Verbs information. 2397771fa900SAdrien Mazarguil * 2398f38c5457SAdrien Mazarguil * @param dpdk_dev 2399f38c5457SAdrien Mazarguil * Backing DPDK device. 2400ad74bc61SViacheslav Ovsiienko * @param spawn 2401ad74bc61SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 2402f87bfa8eSYongseok Koh * @param config 2403f87bfa8eSYongseok Koh * Device configuration parameters. 2404771fa900SAdrien Mazarguil * 2405771fa900SAdrien Mazarguil * @return 2406f38c5457SAdrien Mazarguil * A valid Ethernet device object on success, NULL otherwise and rte_errno 2407206254b7SOphir Munk * is set. The following errors are defined: 24086de569f5SAdrien Mazarguil * 24096de569f5SAdrien Mazarguil * EBUSY: device is not supposed to be spawned. 2410206254b7SOphir Munk * EEXIST: device is already spawned 2411771fa900SAdrien Mazarguil */ 2412f38c5457SAdrien Mazarguil static struct rte_eth_dev * 2413f38c5457SAdrien Mazarguil mlx5_dev_spawn(struct rte_device *dpdk_dev, 2414ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data *spawn, 2415ad74bc61SViacheslav Ovsiienko struct mlx5_dev_config config) 2416771fa900SAdrien Mazarguil { 2417ad74bc61SViacheslav Ovsiienko const struct mlx5_switch_info *switch_info = &spawn->info; 24186e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = NULL; 241968128934SAdrien Mazarguil struct ibv_port_attr port_attr; 24206057a10bSAdrien Mazarguil struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 24219083982cSAdrien Mazarguil struct rte_eth_dev *eth_dev = NULL; 2422dbeba4cfSThomas Monjalon struct mlx5_priv *priv = NULL; 2423771fa900SAdrien Mazarguil int err = 0; 242478c7a16dSYongseok Koh unsigned int hw_padding = 0; 2425e192ef80SYaacov Hazan unsigned int mps; 2426523f5a74SYongseok Koh unsigned int cqe_comp; 2427bc91e8dbSYongseok Koh unsigned int cqe_pad = 0; 2428772d3435SXueming Li unsigned int tunnel_en = 0; 24291f106da2SMatan Azrad unsigned int mpls_en = 0; 24305f8ba81cSXueming Li unsigned int swp = 0; 24317d6bf6b8SYongseok Koh unsigned int mprq = 0; 24327d6bf6b8SYongseok Koh unsigned int mprq_min_stride_size_n = 0; 24337d6bf6b8SYongseok Koh unsigned int mprq_max_stride_size_n = 0; 24347d6bf6b8SYongseok Koh unsigned int mprq_min_stride_num_n = 0; 24357d6bf6b8SYongseok Koh unsigned int mprq_max_stride_num_n = 0; 24366d13ea8eSOlivier Matz struct rte_ether_addr mac; 243768128934SAdrien Mazarguil char name[RTE_ETH_NAME_MAX_LEN]; 24382b730263SAdrien Mazarguil int own_domain_id = 0; 2439206254b7SOphir Munk uint16_t port_id; 24402b730263SAdrien Mazarguil unsigned int i; 2441d5c06b1bSViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR_DEVX_PORT 244239139371SViacheslav Ovsiienko struct mlx5dv_devx_port devx_port = { .comp_mask = 0 }; 2443d5c06b1bSViacheslav Ovsiienko #endif 2444771fa900SAdrien Mazarguil 24456de569f5SAdrien Mazarguil /* Determine if this port representor is supposed to be spawned. */ 24466de569f5SAdrien Mazarguil if (switch_info->representor && dpdk_dev->devargs) { 24476de569f5SAdrien Mazarguil struct rte_eth_devargs eth_da; 24486de569f5SAdrien Mazarguil 24496de569f5SAdrien Mazarguil err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); 24506de569f5SAdrien Mazarguil if (err) { 24516de569f5SAdrien Mazarguil rte_errno = -err; 24526de569f5SAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 24536de569f5SAdrien Mazarguil strerror(rte_errno)); 24546de569f5SAdrien Mazarguil return NULL; 24556de569f5SAdrien Mazarguil } 24566de569f5SAdrien Mazarguil for (i = 0; i < eth_da.nb_representor_ports; ++i) 24576de569f5SAdrien Mazarguil if (eth_da.representor_ports[i] == 24586de569f5SAdrien Mazarguil (uint16_t)switch_info->port_name) 24596de569f5SAdrien Mazarguil break; 24606de569f5SAdrien Mazarguil if (i == eth_da.nb_representor_ports) { 24616de569f5SAdrien Mazarguil rte_errno = EBUSY; 24626de569f5SAdrien Mazarguil return NULL; 24636de569f5SAdrien Mazarguil } 24646de569f5SAdrien Mazarguil } 2465206254b7SOphir Munk /* Build device name. */ 246610dadfcbSViacheslav Ovsiienko if (spawn->pf_bond < 0) { 246710dadfcbSViacheslav Ovsiienko /* Single device. */ 2468206254b7SOphir Munk if (!switch_info->representor) 246909c9c4d2SThomas Monjalon strlcpy(name, dpdk_dev->name, sizeof(name)); 2470206254b7SOphir Munk else 2471206254b7SOphir Munk snprintf(name, sizeof(name), "%s_representor_%u", 2472206254b7SOphir Munk dpdk_dev->name, switch_info->port_name); 247310dadfcbSViacheslav Ovsiienko } else { 247410dadfcbSViacheslav Ovsiienko /* Bonding device. */ 247510dadfcbSViacheslav Ovsiienko if (!switch_info->representor) 247610dadfcbSViacheslav Ovsiienko snprintf(name, sizeof(name), "%s_%s", 247710dadfcbSViacheslav Ovsiienko dpdk_dev->name, spawn->ibv_dev->name); 247810dadfcbSViacheslav Ovsiienko else 247910dadfcbSViacheslav Ovsiienko snprintf(name, sizeof(name), "%s_%s_representor_%u", 248010dadfcbSViacheslav Ovsiienko dpdk_dev->name, spawn->ibv_dev->name, 248110dadfcbSViacheslav Ovsiienko switch_info->port_name); 248210dadfcbSViacheslav Ovsiienko } 2483206254b7SOphir Munk /* check if the device is already spawned */ 2484206254b7SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 2485206254b7SOphir Munk rte_errno = EEXIST; 2486206254b7SOphir Munk return NULL; 2487206254b7SOphir Munk } 248817e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 248917e19bc4SViacheslav Ovsiienko if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 2490a4de9586SVu Pham struct mlx5_mp_id mp_id; 2491a4de9586SVu Pham 249217e19bc4SViacheslav Ovsiienko eth_dev = rte_eth_dev_attach_secondary(name); 249317e19bc4SViacheslav Ovsiienko if (eth_dev == NULL) { 249417e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "can not attach rte ethdev"); 249517e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 2496f38c5457SAdrien Mazarguil return NULL; 2497771fa900SAdrien Mazarguil } 249817e19bc4SViacheslav Ovsiienko eth_dev->device = dpdk_dev; 249917e19bc4SViacheslav Ovsiienko eth_dev->dev_ops = &mlx5_dev_sec_ops; 2500120dc4a7SYongseok Koh err = mlx5_proc_priv_init(eth_dev); 2501120dc4a7SYongseok Koh if (err) 2502120dc4a7SYongseok Koh return NULL; 2503a4de9586SVu Pham mp_id.port_id = eth_dev->data->port_id; 2504a4de9586SVu Pham strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 250517e19bc4SViacheslav Ovsiienko /* Receive command fd from primary process */ 2506a4de9586SVu Pham err = mlx5_mp_req_verbs_cmd_fd(&mp_id); 250717e19bc4SViacheslav Ovsiienko if (err < 0) 25082786b7bfSSuanming Mou goto err_secondary; 250917e19bc4SViacheslav Ovsiienko /* Remap UAR for Tx queues. */ 2510120dc4a7SYongseok Koh err = mlx5_tx_uar_init_secondary(eth_dev, err); 251117e19bc4SViacheslav Ovsiienko if (err) 25122786b7bfSSuanming Mou goto err_secondary; 251317e19bc4SViacheslav Ovsiienko /* 251417e19bc4SViacheslav Ovsiienko * Ethdev pointer is still required as input since 251517e19bc4SViacheslav Ovsiienko * the primary device is not accessible from the 251617e19bc4SViacheslav Ovsiienko * secondary process. 251717e19bc4SViacheslav Ovsiienko */ 251817e19bc4SViacheslav Ovsiienko eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 251917e19bc4SViacheslav Ovsiienko eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 252017e19bc4SViacheslav Ovsiienko return eth_dev; 25212786b7bfSSuanming Mou err_secondary: 25222786b7bfSSuanming Mou mlx5_dev_close(eth_dev); 25232786b7bfSSuanming Mou return NULL; 2524f5bf91deSMoti Haimovsky } 25258409a285SViacheslav Ovsiienko /* 25268409a285SViacheslav Ovsiienko * Some parameters ("tx_db_nc" in particularly) are needed in 25278409a285SViacheslav Ovsiienko * advance to create dv/verbs device context. We proceed the 25288409a285SViacheslav Ovsiienko * devargs here to get ones, and later proceed devargs again 25298409a285SViacheslav Ovsiienko * to override some hardware settings. 25308409a285SViacheslav Ovsiienko */ 25318409a285SViacheslav Ovsiienko err = mlx5_args(&config, dpdk_dev->devargs); 25328409a285SViacheslav Ovsiienko if (err) { 25338409a285SViacheslav Ovsiienko err = rte_errno; 25348409a285SViacheslav Ovsiienko DRV_LOG(ERR, "failed to process device arguments: %s", 25358409a285SViacheslav Ovsiienko strerror(rte_errno)); 25368409a285SViacheslav Ovsiienko goto error; 25378409a285SViacheslav Ovsiienko } 25388409a285SViacheslav Ovsiienko sh = mlx5_alloc_shared_ibctx(spawn, &config); 253917e19bc4SViacheslav Ovsiienko if (!sh) 254017e19bc4SViacheslav Ovsiienko return NULL; 254117e19bc4SViacheslav Ovsiienko config.devx = sh->devx; 25423075bd23SDekel Peled #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR 25433075bd23SDekel Peled config.dest_tir = 1; 25443075bd23SDekel Peled #endif 25455f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 25466057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 25475f8ba81cSXueming Li #endif 254843e9d979SShachar Beiser /* 254943e9d979SShachar Beiser * Multi-packet send is supported by ConnectX-4 Lx PF as well 255043e9d979SShachar Beiser * as all ConnectX-5 devices. 255143e9d979SShachar Beiser */ 2552038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 25536057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 2554038e7251SShahaf Shuler #endif 25557d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 25566057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 25577d6bf6b8SYongseok Koh #endif 255817e19bc4SViacheslav Ovsiienko mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 25596057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 25606057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 2561a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "enhanced MPW is supported"); 256243e9d979SShachar Beiser mps = MLX5_MPW_ENHANCED; 256343e9d979SShachar Beiser } else { 2564a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW is supported"); 2565e589960cSYongseok Koh mps = MLX5_MPW; 2566e589960cSYongseok Koh } 2567e589960cSYongseok Koh } else { 2568a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW isn't supported"); 256943e9d979SShachar Beiser mps = MLX5_MPW_DISABLED; 257043e9d979SShachar Beiser } 25715f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 25726057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 25736057a10bSAdrien Mazarguil swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 25745f8ba81cSXueming Li DRV_LOG(DEBUG, "SWP support: %u", swp); 25755f8ba81cSXueming Li #endif 257668128934SAdrien Mazarguil config.swp = !!swp; 25777d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 25786057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 25797d6bf6b8SYongseok Koh struct mlx5dv_striding_rq_caps mprq_caps = 25806057a10bSAdrien Mazarguil dv_attr.striding_rq_caps; 25817d6bf6b8SYongseok Koh 25827d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 25837d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes); 25847d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 25857d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes); 25867d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 25877d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides); 25887d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 25897d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides); 25907d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tsupported_qpts: %d", 25917d6bf6b8SYongseok Koh mprq_caps.supported_qpts); 25927d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 25937d6bf6b8SYongseok Koh mprq = 1; 25947d6bf6b8SYongseok Koh mprq_min_stride_size_n = 25957d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes; 25967d6bf6b8SYongseok Koh mprq_max_stride_size_n = 25977d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes; 25987d6bf6b8SYongseok Koh mprq_min_stride_num_n = 25997d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides; 26007d6bf6b8SYongseok Koh mprq_max_stride_num_n = 26017d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides; 26027d6bf6b8SYongseok Koh } 26037d6bf6b8SYongseok Koh #endif 2604523f5a74SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128 && 26056057a10bSAdrien Mazarguil !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) 2606523f5a74SYongseok Koh cqe_comp = 0; 2607523f5a74SYongseok Koh else 2608523f5a74SYongseok Koh cqe_comp = 1; 260968128934SAdrien Mazarguil config.cqe_comp = cqe_comp; 2610bc91e8dbSYongseok Koh #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 2611bc91e8dbSYongseok Koh /* Whether device supports 128B Rx CQE padding. */ 2612bc91e8dbSYongseok Koh cqe_pad = RTE_CACHE_LINE_SIZE == 128 && 2613bc91e8dbSYongseok Koh (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); 2614bc91e8dbSYongseok Koh #endif 2615038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 26166057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 26176057a10bSAdrien Mazarguil tunnel_en = ((dv_attr.tunnel_offloads_caps & 2618038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 26196057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 26204acb96fdSSuanming Mou MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) && 26214acb96fdSSuanming Mou (dv_attr.tunnel_offloads_caps & 26224acb96fdSSuanming Mou MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE)); 2623038e7251SShahaf Shuler } 2624a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 2625a170a30dSNélio Laranjeiro tunnel_en ? "" : "not "); 2626038e7251SShahaf Shuler #else 2627a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 2628a170a30dSNélio Laranjeiro "tunnel offloading disabled due to old OFED/rdma-core version"); 2629038e7251SShahaf Shuler #endif 263068128934SAdrien Mazarguil config.tunnel_en = tunnel_en; 26311f106da2SMatan Azrad #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 26326057a10bSAdrien Mazarguil mpls_en = ((dv_attr.tunnel_offloads_caps & 26331f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 26346057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 26351f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 26361f106da2SMatan Azrad DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 26371f106da2SMatan Azrad mpls_en ? "" : "not "); 26381f106da2SMatan Azrad #else 26391f106da2SMatan Azrad DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 26401f106da2SMatan Azrad " old OFED/rdma-core version or firmware configuration"); 26411f106da2SMatan Azrad #endif 264268128934SAdrien Mazarguil config.mpls_en = mpls_en; 2643771fa900SAdrien Mazarguil /* Check port status. */ 264417e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr); 2645771fa900SAdrien Mazarguil if (err) { 2646a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port query failed: %s", strerror(err)); 26479083982cSAdrien Mazarguil goto error; 2648771fa900SAdrien Mazarguil } 26491371f4dfSOr Ami if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 26509083982cSAdrien Mazarguil DRV_LOG(ERR, "port is not configured in Ethernet mode"); 2651e1c3e305SMatan Azrad err = EINVAL; 26529083982cSAdrien Mazarguil goto error; 26531371f4dfSOr Ami } 2654771fa900SAdrien Mazarguil if (port_attr.state != IBV_PORT_ACTIVE) 26559083982cSAdrien Mazarguil DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 2656a170a30dSNélio Laranjeiro mlx5_glue->port_state_str(port_attr.state), 2657771fa900SAdrien Mazarguil port_attr.state); 265817e19bc4SViacheslav Ovsiienko /* Allocate private eth device data. */ 2659771fa900SAdrien Mazarguil priv = rte_zmalloc("ethdev private structure", 2660771fa900SAdrien Mazarguil sizeof(*priv), 2661771fa900SAdrien Mazarguil RTE_CACHE_LINE_SIZE); 2662771fa900SAdrien Mazarguil if (priv == NULL) { 2663a170a30dSNélio Laranjeiro DRV_LOG(ERR, "priv allocation failure"); 2664771fa900SAdrien Mazarguil err = ENOMEM; 26659083982cSAdrien Mazarguil goto error; 2666771fa900SAdrien Mazarguil } 266717e19bc4SViacheslav Ovsiienko priv->sh = sh; 266817e19bc4SViacheslav Ovsiienko priv->ibv_port = spawn->ibv_port; 266946e10a4cSViacheslav Ovsiienko priv->pci_dev = spawn->pci_dev; 267035b2d13fSOlivier Matz priv->mtu = RTE_ETHER_MTU; 2671a4de9586SVu Pham priv->mp_id.port_id = port_id; 2672a4de9586SVu Pham strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 26736bf10ab6SMoti Haimovsky #ifndef RTE_ARCH_64 26746bf10ab6SMoti Haimovsky /* Initialize UAR access locks for 32bit implementations. */ 26756bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock_cq); 26766bf10ab6SMoti Haimovsky for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 26776bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock[i]); 26786bf10ab6SMoti Haimovsky #endif 267926c08b97SAdrien Mazarguil /* Some internal functions rely on Netlink sockets, open them now. */ 26805366074bSNelio Laranjeiro priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 26815366074bSNelio Laranjeiro priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 26822b730263SAdrien Mazarguil priv->representor = !!switch_info->representor; 2683299d7dc2SViacheslav Ovsiienko priv->master = !!switch_info->master; 26842b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 2685d5c06b1bSViacheslav Ovsiienko priv->vport_meta_tag = 0; 2686d5c06b1bSViacheslav Ovsiienko priv->vport_meta_mask = 0; 2687bee57a0aSViacheslav Ovsiienko priv->pf_bond = spawn->pf_bond; 2688d5c06b1bSViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR_DEVX_PORT 2689299d7dc2SViacheslav Ovsiienko /* 2690d5c06b1bSViacheslav Ovsiienko * The DevX port query API is implemented. E-Switch may use 2691d5c06b1bSViacheslav Ovsiienko * either vport or reg_c[0] metadata register to match on 2692d5c06b1bSViacheslav Ovsiienko * vport index. The engaged part of metadata register is 2693d5c06b1bSViacheslav Ovsiienko * defined by mask. 2694d5c06b1bSViacheslav Ovsiienko */ 269539139371SViacheslav Ovsiienko if (switch_info->representor || switch_info->master) { 2696d5c06b1bSViacheslav Ovsiienko devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT | 2697d5c06b1bSViacheslav Ovsiienko MLX5DV_DEVX_PORT_MATCH_REG_C_0; 269839139371SViacheslav Ovsiienko err = mlx5_glue->devx_port_query(sh->ctx, spawn->ibv_port, 269939139371SViacheslav Ovsiienko &devx_port); 2700d5c06b1bSViacheslav Ovsiienko if (err) { 270139139371SViacheslav Ovsiienko DRV_LOG(WARNING, 270239139371SViacheslav Ovsiienko "can't query devx port %d on device %s", 2703d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 2704d5c06b1bSViacheslav Ovsiienko devx_port.comp_mask = 0; 2705d5c06b1bSViacheslav Ovsiienko } 270639139371SViacheslav Ovsiienko } 2707d5c06b1bSViacheslav Ovsiienko if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) { 2708d5c06b1bSViacheslav Ovsiienko priv->vport_meta_tag = devx_port.reg_c_0.value; 2709d5c06b1bSViacheslav Ovsiienko priv->vport_meta_mask = devx_port.reg_c_0.mask; 2710d5c06b1bSViacheslav Ovsiienko if (!priv->vport_meta_mask) { 2711d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "vport zero mask for port %d" 271206fa6988SDekel Peled " on bonding device %s", 2713d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 2714d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 2715d5c06b1bSViacheslav Ovsiienko goto error; 2716d5c06b1bSViacheslav Ovsiienko } 2717d5c06b1bSViacheslav Ovsiienko if (priv->vport_meta_tag & ~priv->vport_meta_mask) { 2718d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "invalid vport tag for port %d" 271906fa6988SDekel Peled " on bonding device %s", 2720d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 2721d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 2722d5c06b1bSViacheslav Ovsiienko goto error; 2723d5c06b1bSViacheslav Ovsiienko } 272485c4bcbcSViacheslav Ovsiienko } 272585c4bcbcSViacheslav Ovsiienko if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) { 2726d5c06b1bSViacheslav Ovsiienko priv->vport_id = devx_port.vport_num; 2727d5c06b1bSViacheslav Ovsiienko } else if (spawn->pf_bond >= 0) { 2728d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "can't deduce vport index for port %d" 272906fa6988SDekel Peled " on bonding device %s", 2730d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 2731d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 2732d5c06b1bSViacheslav Ovsiienko goto error; 2733d5c06b1bSViacheslav Ovsiienko } else { 2734d5c06b1bSViacheslav Ovsiienko /* Suppose vport index in compatible way. */ 2735d5c06b1bSViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 2736d5c06b1bSViacheslav Ovsiienko switch_info->port_name + 1 : -1; 2737d5c06b1bSViacheslav Ovsiienko } 2738d5c06b1bSViacheslav Ovsiienko #else 2739d5c06b1bSViacheslav Ovsiienko /* 2740d5c06b1bSViacheslav Ovsiienko * Kernel/rdma_core support single E-Switch per PF configurations 2741299d7dc2SViacheslav Ovsiienko * only and vport_id field contains the vport index for 2742299d7dc2SViacheslav Ovsiienko * associated VF, which is deduced from representor port name. 2743ae4eb7dcSViacheslav Ovsiienko * For example, let's have the IB device port 10, it has 2744299d7dc2SViacheslav Ovsiienko * attached network device eth0, which has port name attribute 2745299d7dc2SViacheslav Ovsiienko * pf0vf2, we can deduce the VF number as 2, and set vport index 2746299d7dc2SViacheslav Ovsiienko * as 3 (2+1). This assigning schema should be changed if the 2747299d7dc2SViacheslav Ovsiienko * multiple E-Switch instances per PF configurations or/and PCI 2748299d7dc2SViacheslav Ovsiienko * subfunctions are added. 2749299d7dc2SViacheslav Ovsiienko */ 2750299d7dc2SViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 2751299d7dc2SViacheslav Ovsiienko switch_info->port_name + 1 : -1; 2752d5c06b1bSViacheslav Ovsiienko #endif 2753d5c06b1bSViacheslav Ovsiienko /* representor_id field keeps the unmodified VF index. */ 2754299d7dc2SViacheslav Ovsiienko priv->representor_id = switch_info->representor ? 2755299d7dc2SViacheslav Ovsiienko switch_info->port_name : -1; 27562b730263SAdrien Mazarguil /* 27572b730263SAdrien Mazarguil * Look for sibling devices in order to reuse their switch domain 27582b730263SAdrien Mazarguil * if any, otherwise allocate one. 27592b730263SAdrien Mazarguil */ 2760fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 2761dbeba4cfSThomas Monjalon const struct mlx5_priv *opriv = 2762d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 27632b730263SAdrien Mazarguil 27642b730263SAdrien Mazarguil if (!opriv || 2765f7e95215SViacheslav Ovsiienko opriv->sh != priv->sh || 27662b730263SAdrien Mazarguil opriv->domain_id == 27672b730263SAdrien Mazarguil RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 27682b730263SAdrien Mazarguil continue; 27692b730263SAdrien Mazarguil priv->domain_id = opriv->domain_id; 27702b730263SAdrien Mazarguil break; 27712b730263SAdrien Mazarguil } 27722b730263SAdrien Mazarguil if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 27732b730263SAdrien Mazarguil err = rte_eth_switch_domain_alloc(&priv->domain_id); 27742b730263SAdrien Mazarguil if (err) { 27752b730263SAdrien Mazarguil err = rte_errno; 27762b730263SAdrien Mazarguil DRV_LOG(ERR, "unable to allocate switch domain: %s", 27772b730263SAdrien Mazarguil strerror(rte_errno)); 27782b730263SAdrien Mazarguil goto error; 27792b730263SAdrien Mazarguil } 27802b730263SAdrien Mazarguil own_domain_id = 1; 27812b730263SAdrien Mazarguil } 27828409a285SViacheslav Ovsiienko /* Override some values set by hardware configuration. */ 27838409a285SViacheslav Ovsiienko mlx5_args(&config, dpdk_dev->devargs); 278492d5dd48SViacheslav Ovsiienko err = mlx5_dev_check_sibling_config(priv, &config); 278592d5dd48SViacheslav Ovsiienko if (err) 278692d5dd48SViacheslav Ovsiienko goto error; 278717e19bc4SViacheslav Ovsiienko config.hw_csum = !!(sh->device_attr.device_cap_flags_ex & 278817e19bc4SViacheslav Ovsiienko IBV_DEVICE_RAW_IP_CSUM); 2789a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "checksum offloading is %ssupported", 27907fe24446SShahaf Shuler (config.hw_csum ? "" : "not ")); 27912dd8b721SViacheslav Ovsiienko #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 27922dd8b721SViacheslav Ovsiienko !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 27932dd8b721SViacheslav Ovsiienko DRV_LOG(DEBUG, "counters are not supported"); 27949a761de8SOri Kam #endif 27950adf23adSDekel Peled #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR) 279658b1312eSYongseok Koh if (config.dv_flow_en) { 279758b1312eSYongseok Koh DRV_LOG(WARNING, "DV flow is not supported"); 279858b1312eSYongseok Koh config.dv_flow_en = 0; 279958b1312eSYongseok Koh } 280058b1312eSYongseok Koh #endif 28017fe24446SShahaf Shuler config.ind_table_max_size = 280217e19bc4SViacheslav Ovsiienko sh->device_attr.rss_caps.max_rwq_indirection_table_size; 280368128934SAdrien Mazarguil /* 280468128934SAdrien Mazarguil * Remove this check once DPDK supports larger/variable 280568128934SAdrien Mazarguil * indirection tables. 280668128934SAdrien Mazarguil */ 280768128934SAdrien Mazarguil if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 28087fe24446SShahaf Shuler config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; 2809a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 28107fe24446SShahaf Shuler config.ind_table_max_size); 281117e19bc4SViacheslav Ovsiienko config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 281243e9d979SShachar Beiser IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 2813a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 28147fe24446SShahaf Shuler (config.hw_vlan_strip ? "" : "not ")); 281517e19bc4SViacheslav Ovsiienko config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 2816cd230a3eSShahaf Shuler IBV_RAW_PACKET_CAP_SCATTER_FCS); 2817a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 28187fe24446SShahaf Shuler (config.hw_fcs_strip ? "" : "not ")); 28192014a7fbSYongseok Koh #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 282017e19bc4SViacheslav Ovsiienko hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 28212014a7fbSYongseok Koh #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 282217e19bc4SViacheslav Ovsiienko hw_padding = !!(sh->device_attr.device_cap_flags_ex & 28232014a7fbSYongseok Koh IBV_DEVICE_PCI_WRITE_END_PADDING); 282443e9d979SShachar Beiser #endif 282578c7a16dSYongseok Koh if (config.hw_padding && !hw_padding) { 282678c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 282778c7a16dSYongseok Koh config.hw_padding = 0; 282878c7a16dSYongseok Koh } else if (config.hw_padding) { 282978c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 283078c7a16dSYongseok Koh } 283117e19bc4SViacheslav Ovsiienko config.tso = (sh->device_attr.tso_caps.max_tso > 0 && 283217e19bc4SViacheslav Ovsiienko (sh->device_attr.tso_caps.supported_qpts & 283343e9d979SShachar Beiser (1 << IBV_QPT_RAW_PACKET))); 28347fe24446SShahaf Shuler if (config.tso) 283517e19bc4SViacheslav Ovsiienko config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso; 2836f9de8718SShahaf Shuler /* 2837f9de8718SShahaf Shuler * MPW is disabled by default, while the Enhanced MPW is enabled 2838f9de8718SShahaf Shuler * by default. 2839f9de8718SShahaf Shuler */ 2840f9de8718SShahaf Shuler if (config.mps == MLX5_ARG_UNSET) 2841f9de8718SShahaf Shuler config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 2842f9de8718SShahaf Shuler MLX5_MPW_DISABLED; 2843f9de8718SShahaf Shuler else 2844f9de8718SShahaf Shuler config.mps = config.mps ? mps : MLX5_MPW_DISABLED; 2845a170a30dSNélio Laranjeiro DRV_LOG(INFO, "%sMPS is %s", 284682e75f83SViacheslav Ovsiienko config.mps == MLX5_MPW_ENHANCED ? "enhanced " : 284782e75f83SViacheslav Ovsiienko config.mps == MLX5_MPW ? "legacy " : "", 284868128934SAdrien Mazarguil config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 28497fe24446SShahaf Shuler if (config.cqe_comp && !cqe_comp) { 2850a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "Rx CQE compression isn't supported"); 28517fe24446SShahaf Shuler config.cqe_comp = 0; 2852523f5a74SYongseok Koh } 2853bc91e8dbSYongseok Koh if (config.cqe_pad && !cqe_pad) { 2854bc91e8dbSYongseok Koh DRV_LOG(WARNING, "Rx CQE padding isn't supported"); 2855bc91e8dbSYongseok Koh config.cqe_pad = 0; 2856bc91e8dbSYongseok Koh } else if (config.cqe_pad) { 2857bc91e8dbSYongseok Koh DRV_LOG(INFO, "Rx CQE padding is enabled"); 2858bc91e8dbSYongseok Koh } 2859175f1c21SDekel Peled if (config.devx) { 2860175f1c21SDekel Peled priv->counter_fallback = 0; 2861175f1c21SDekel Peled err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr); 2862175f1c21SDekel Peled if (err) { 2863175f1c21SDekel Peled err = -err; 2864175f1c21SDekel Peled goto error; 2865175f1c21SDekel Peled } 2866175f1c21SDekel Peled if (!config.hca_attr.flow_counters_dump) 2867175f1c21SDekel Peled priv->counter_fallback = 1; 2868175f1c21SDekel Peled #ifndef HAVE_IBV_DEVX_ASYNC 2869175f1c21SDekel Peled priv->counter_fallback = 1; 2870175f1c21SDekel Peled #endif 2871175f1c21SDekel Peled if (priv->counter_fallback) 287206fa6988SDekel Peled DRV_LOG(INFO, "Use fall-back DV counter management"); 2873175f1c21SDekel Peled /* Check for LRO support. */ 28742eb5dce8SDekel Peled if (config.dest_tir && config.hca_attr.lro_cap && 28752eb5dce8SDekel Peled config.dv_flow_en) { 2876175f1c21SDekel Peled /* TBD check tunnel lro caps. */ 2877175f1c21SDekel Peled config.lro.supported = config.hca_attr.lro_cap; 2878175f1c21SDekel Peled DRV_LOG(DEBUG, "Device supports LRO"); 2879175f1c21SDekel Peled /* 2880175f1c21SDekel Peled * If LRO timeout is not configured by application, 2881175f1c21SDekel Peled * use the minimal supported value. 2882175f1c21SDekel Peled */ 2883175f1c21SDekel Peled if (!config.lro.timeout) 2884175f1c21SDekel Peled config.lro.timeout = 2885175f1c21SDekel Peled config.hca_attr.lro_timer_supported_periods[0]; 2886175f1c21SDekel Peled DRV_LOG(DEBUG, "LRO session timeout set to %d usec", 2887175f1c21SDekel Peled config.lro.timeout); 2888175f1c21SDekel Peled } 28896bc327b9SSuanming Mou #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) 28906bc327b9SSuanming Mou if (config.hca_attr.qos.sup && config.hca_attr.qos.srtcm_sup && 28916bc327b9SSuanming Mou config.dv_flow_en) { 289227efd5deSSuanming Mou uint8_t reg_c_mask = 289327efd5deSSuanming Mou config.hca_attr.qos.flow_meter_reg_c_ids; 289427efd5deSSuanming Mou /* 289527efd5deSSuanming Mou * Meter needs two REG_C's for color match and pre-sfx 289627efd5deSSuanming Mou * flow match. Here get the REG_C for color match. 289727efd5deSSuanming Mou * REG_C_0 and REG_C_1 is reserved for metadata feature. 289827efd5deSSuanming Mou */ 289927efd5deSSuanming Mou reg_c_mask &= 0xfc; 290027efd5deSSuanming Mou if (__builtin_popcount(reg_c_mask) < 1) { 290127efd5deSSuanming Mou priv->mtr_en = 0; 290227efd5deSSuanming Mou DRV_LOG(WARNING, "No available register for" 290327efd5deSSuanming Mou " meter."); 290427efd5deSSuanming Mou } else { 290527efd5deSSuanming Mou priv->mtr_color_reg = ffs(reg_c_mask) - 1 + 290627efd5deSSuanming Mou REG_C_0; 29076bc327b9SSuanming Mou priv->mtr_en = 1; 2908792e749eSSuanming Mou priv->mtr_reg_share = 2909792e749eSSuanming Mou config.hca_attr.qos.flow_meter_reg_share; 291027efd5deSSuanming Mou DRV_LOG(DEBUG, "The REG_C meter uses is %d", 291127efd5deSSuanming Mou priv->mtr_color_reg); 291227efd5deSSuanming Mou } 29136bc327b9SSuanming Mou } 29146bc327b9SSuanming Mou #endif 2915175f1c21SDekel Peled } 29165c0e2db6SYongseok Koh if (config.mprq.enabled && mprq) { 2917ecb16045SAlexander Kozyrev if (config.mprq.stride_num_n && 2918ecb16045SAlexander Kozyrev (config.mprq.stride_num_n > mprq_max_stride_num_n || 2919ecb16045SAlexander Kozyrev config.mprq.stride_num_n < mprq_min_stride_num_n)) { 29207d6bf6b8SYongseok Koh config.mprq.stride_num_n = 2921ecb16045SAlexander Kozyrev RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 2922ecb16045SAlexander Kozyrev mprq_min_stride_num_n), 2923ecb16045SAlexander Kozyrev mprq_max_stride_num_n); 29247d6bf6b8SYongseok Koh DRV_LOG(WARNING, 29257d6bf6b8SYongseok Koh "the number of strides" 29267d6bf6b8SYongseok Koh " for Multi-Packet RQ is out of range," 29277d6bf6b8SYongseok Koh " setting default value (%u)", 29287d6bf6b8SYongseok Koh 1 << config.mprq.stride_num_n); 29297d6bf6b8SYongseok Koh } 2930ecb16045SAlexander Kozyrev if (config.mprq.stride_size_n && 2931ecb16045SAlexander Kozyrev (config.mprq.stride_size_n > mprq_max_stride_size_n || 2932ecb16045SAlexander Kozyrev config.mprq.stride_size_n < mprq_min_stride_size_n)) { 2933ecb16045SAlexander Kozyrev config.mprq.stride_size_n = 2934ecb16045SAlexander Kozyrev RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N, 2935ecb16045SAlexander Kozyrev mprq_min_stride_size_n), 2936ecb16045SAlexander Kozyrev mprq_max_stride_size_n); 2937ecb16045SAlexander Kozyrev DRV_LOG(WARNING, 2938ecb16045SAlexander Kozyrev "the size of a stride" 2939ecb16045SAlexander Kozyrev " for Multi-Packet RQ is out of range," 2940ecb16045SAlexander Kozyrev " setting default value (%u)", 2941ecb16045SAlexander Kozyrev 1 << config.mprq.stride_size_n); 2942ecb16045SAlexander Kozyrev } 29437d6bf6b8SYongseok Koh config.mprq.min_stride_size_n = mprq_min_stride_size_n; 29447d6bf6b8SYongseok Koh config.mprq.max_stride_size_n = mprq_max_stride_size_n; 29455c0e2db6SYongseok Koh } else if (config.mprq.enabled && !mprq) { 29465c0e2db6SYongseok Koh DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 29475c0e2db6SYongseok Koh config.mprq.enabled = 0; 29487d6bf6b8SYongseok Koh } 2949066cfecdSMatan Azrad if (config.max_dump_files_num == 0) 2950066cfecdSMatan Azrad config.max_dump_files_num = 128; 2951af4f09f2SNélio Laranjeiro eth_dev = rte_eth_dev_allocate(name); 2952af4f09f2SNélio Laranjeiro if (eth_dev == NULL) { 2953a170a30dSNélio Laranjeiro DRV_LOG(ERR, "can not allocate rte ethdev"); 2954af4f09f2SNélio Laranjeiro err = ENOMEM; 29559083982cSAdrien Mazarguil goto error; 2956af4f09f2SNélio Laranjeiro } 295715febafdSThomas Monjalon /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ 295815febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 2959a7d3c627SThomas Monjalon if (priv->representor) { 29602b730263SAdrien Mazarguil eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 2961a7d3c627SThomas Monjalon eth_dev->data->representor_id = priv->representor_id; 2962a7d3c627SThomas Monjalon } 2963fa2e14d4SViacheslav Ovsiienko /* 2964fa2e14d4SViacheslav Ovsiienko * Store associated network device interface index. This index 2965fa2e14d4SViacheslav Ovsiienko * is permanent throughout the lifetime of device. So, we may store 2966fa2e14d4SViacheslav Ovsiienko * the ifindex here and use the cached value further. 2967fa2e14d4SViacheslav Ovsiienko */ 29688e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn->ifindex); 2969fa2e14d4SViacheslav Ovsiienko priv->if_index = spawn->ifindex; 2970af4f09f2SNélio Laranjeiro eth_dev->data->dev_private = priv; 2971df428ceeSYongseok Koh priv->dev_data = eth_dev->data; 2972af4f09f2SNélio Laranjeiro eth_dev->data->mac_addrs = priv->mac; 2973f38c5457SAdrien Mazarguil eth_dev->device = dpdk_dev; 2974771fa900SAdrien Mazarguil /* Configure the first MAC address by default. */ 2975af4f09f2SNélio Laranjeiro if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 2976a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2977a170a30dSNélio Laranjeiro "port %u cannot get MAC address, is mlx5_en" 2978a170a30dSNélio Laranjeiro " loaded? (errno: %s)", 29798c3c2372SAdrien Mazarguil eth_dev->data->port_id, strerror(rte_errno)); 2980e1c3e305SMatan Azrad err = ENODEV; 29819083982cSAdrien Mazarguil goto error; 2982771fa900SAdrien Mazarguil } 2983a170a30dSNélio Laranjeiro DRV_LOG(INFO, 2984a170a30dSNélio Laranjeiro "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 29850f99970bSNélio Laranjeiro eth_dev->data->port_id, 2986771fa900SAdrien Mazarguil mac.addr_bytes[0], mac.addr_bytes[1], 2987771fa900SAdrien Mazarguil mac.addr_bytes[2], mac.addr_bytes[3], 2988771fa900SAdrien Mazarguil mac.addr_bytes[4], mac.addr_bytes[5]); 29890afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG 2990771fa900SAdrien Mazarguil { 2991771fa900SAdrien Mazarguil char ifname[IF_NAMESIZE]; 2992771fa900SAdrien Mazarguil 2993af4f09f2SNélio Laranjeiro if (mlx5_get_ifname(eth_dev, &ifname) == 0) 2994a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 29950f99970bSNélio Laranjeiro eth_dev->data->port_id, ifname); 2996771fa900SAdrien Mazarguil else 2997a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is unknown", 29980f99970bSNélio Laranjeiro eth_dev->data->port_id); 2999771fa900SAdrien Mazarguil } 3000771fa900SAdrien Mazarguil #endif 3001771fa900SAdrien Mazarguil /* Get actual MTU if possible. */ 3002a6d83b6aSNélio Laranjeiro err = mlx5_get_mtu(eth_dev, &priv->mtu); 3003012ad994SShahaf Shuler if (err) { 3004012ad994SShahaf Shuler err = rte_errno; 30059083982cSAdrien Mazarguil goto error; 3006012ad994SShahaf Shuler } 3007a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 3008a170a30dSNélio Laranjeiro priv->mtu); 300968128934SAdrien Mazarguil /* Initialize burst functions to prevent crashes before link-up. */ 3010e313ef4cSShahaf Shuler eth_dev->rx_pkt_burst = removed_rx_burst; 3011e313ef4cSShahaf Shuler eth_dev->tx_pkt_burst = removed_tx_burst; 3012771fa900SAdrien Mazarguil eth_dev->dev_ops = &mlx5_dev_ops; 3013272733b5SNélio Laranjeiro /* Register MAC address. */ 3014272733b5SNélio Laranjeiro claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 3015f87bfa8eSYongseok Koh if (config.vf && config.vf_nl_en) 3016f22442cbSMatan Azrad mlx5_nl_mac_addr_sync(priv->nl_socket_route, 3017f22442cbSMatan Azrad mlx5_ifindex(eth_dev), 3018f22442cbSMatan Azrad eth_dev->data->mac_addrs, 3019f22442cbSMatan Azrad MLX5_MAX_MAC_ADDRESSES); 3020ab612adcSSuanming Mou priv->flows = 0; 3021ab612adcSSuanming Mou priv->ctrl_flows = 0; 30223f373f35SSuanming Mou TAILQ_INIT(&priv->flow_meters); 30233bd26b23SSuanming Mou TAILQ_INIT(&priv->flow_meter_profiles); 30241e3a39f7SXueming Li /* Hint libmlx5 to use PMD allocator for data plane resources */ 30251e3a39f7SXueming Li struct mlx5dv_ctx_allocators alctr = { 30261e3a39f7SXueming Li .alloc = &mlx5_alloc_verbs_buf, 30271e3a39f7SXueming Li .free = &mlx5_free_verbs_buf, 30281e3a39f7SXueming Li .data = priv, 30291e3a39f7SXueming Li }; 303017e19bc4SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 303117e19bc4SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 30321e3a39f7SXueming Li (void *)((uintptr_t)&alctr)); 3033771fa900SAdrien Mazarguil /* Bring Ethernet device up. */ 3034a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 30350f99970bSNélio Laranjeiro eth_dev->data->port_id); 30367ba5320bSNélio Laranjeiro mlx5_set_link_up(eth_dev); 3037a85a606cSShahaf Shuler /* 3038a85a606cSShahaf Shuler * Even though the interrupt handler is not installed yet, 3039ae4eb7dcSViacheslav Ovsiienko * interrupts will still trigger on the async_fd from 3040a85a606cSShahaf Shuler * Verbs context returned by ibv_open_device(). 3041a85a606cSShahaf Shuler */ 3042a85a606cSShahaf Shuler mlx5_link_update(eth_dev, 0); 3043e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 3044e2b4925eSOri Kam if (!(config.hca_attr.eswitch_manager && config.dv_flow_en && 3045e2b4925eSOri Kam (switch_info->representor || switch_info->master))) 3046e2b4925eSOri Kam config.dv_esw_en = 0; 3047e2b4925eSOri Kam #else 3048e2b4925eSOri Kam config.dv_esw_en = 0; 3049e2b4925eSOri Kam #endif 305038b4b397SViacheslav Ovsiienko /* Detect minimal data bytes to inline. */ 305138b4b397SViacheslav Ovsiienko mlx5_set_min_inline(spawn, &config); 30527fe24446SShahaf Shuler /* Store device configuration on private structure. */ 30537fe24446SShahaf Shuler priv->config = config; 3054dfedf3e3SViacheslav Ovsiienko /* Create context for virtual machine VLAN workaround. */ 3055dfedf3e3SViacheslav Ovsiienko priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); 3056e2b4925eSOri Kam if (config.dv_flow_en) { 3057e2b4925eSOri Kam err = mlx5_alloc_shared_dr(priv); 3058e2b4925eSOri Kam if (err) 3059e2b4925eSOri Kam goto error; 3060792e749eSSuanming Mou /* 3061792e749eSSuanming Mou * RSS id is shared with meter flow id. Meter flow id can only 3062792e749eSSuanming Mou * use the 24 MSB of the register. 3063792e749eSSuanming Mou */ 3064792e749eSSuanming Mou priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >> 3065792e749eSSuanming Mou MLX5_MTR_COLOR_BITS); 306671e254bcSViacheslav Ovsiienko if (!priv->qrss_id_pool) { 306771e254bcSViacheslav Ovsiienko DRV_LOG(ERR, "can't create flow id pool"); 306871e254bcSViacheslav Ovsiienko err = ENOMEM; 306971e254bcSViacheslav Ovsiienko goto error; 307071e254bcSViacheslav Ovsiienko } 3071e2b4925eSOri Kam } 307278be8852SNelio Laranjeiro /* Supported Verbs flow priority number detection. */ 30732815702bSNelio Laranjeiro err = mlx5_flow_discover_priorities(eth_dev); 30744fb27c1dSViacheslav Ovsiienko if (err < 0) { 30754fb27c1dSViacheslav Ovsiienko err = -err; 30769083982cSAdrien Mazarguil goto error; 30774fb27c1dSViacheslav Ovsiienko } 30782815702bSNelio Laranjeiro priv->config.flow_prio = err; 30792d241515SViacheslav Ovsiienko if (!priv->config.dv_esw_en && 30802d241515SViacheslav Ovsiienko priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 30812d241515SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata mode %u is not supported " 30822d241515SViacheslav Ovsiienko "(no E-Switch)", priv->config.dv_xmeta_en); 30832d241515SViacheslav Ovsiienko priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; 30842d241515SViacheslav Ovsiienko } 308539139371SViacheslav Ovsiienko mlx5_set_metadata_mask(eth_dev); 308639139371SViacheslav Ovsiienko if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 308739139371SViacheslav Ovsiienko !priv->sh->dv_regc0_mask) { 308839139371SViacheslav Ovsiienko DRV_LOG(ERR, "metadata mode %u is not supported " 308939139371SViacheslav Ovsiienko "(no metadata reg_c[0] is available)", 309039139371SViacheslav Ovsiienko priv->config.dv_xmeta_en); 309139139371SViacheslav Ovsiienko err = ENOTSUP; 309239139371SViacheslav Ovsiienko goto error; 309339139371SViacheslav Ovsiienko } 3094e7bfa359SBing Zhao /* 3095e7bfa359SBing Zhao * Allocate the buffer for flow creating, just once. 3096e7bfa359SBing Zhao * The allocation must be done before any flow creating. 3097e7bfa359SBing Zhao */ 3098e7bfa359SBing Zhao mlx5_flow_alloc_intermediate(eth_dev); 309939139371SViacheslav Ovsiienko /* Query availibility of metadata reg_c's. */ 310039139371SViacheslav Ovsiienko err = mlx5_flow_discover_mreg_c(eth_dev); 310139139371SViacheslav Ovsiienko if (err < 0) { 310239139371SViacheslav Ovsiienko err = -err; 310339139371SViacheslav Ovsiienko goto error; 310439139371SViacheslav Ovsiienko } 31055e61bcddSViacheslav Ovsiienko if (!mlx5_flow_ext_mreg_supported(eth_dev)) { 31065e61bcddSViacheslav Ovsiienko DRV_LOG(DEBUG, 31075e61bcddSViacheslav Ovsiienko "port %u extensive metadata register is not supported", 31085e61bcddSViacheslav Ovsiienko eth_dev->data->port_id); 31092d241515SViacheslav Ovsiienko if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 31102d241515SViacheslav Ovsiienko DRV_LOG(ERR, "metadata mode %u is not supported " 31112d241515SViacheslav Ovsiienko "(no metadata registers available)", 31122d241515SViacheslav Ovsiienko priv->config.dv_xmeta_en); 31132d241515SViacheslav Ovsiienko err = ENOTSUP; 31142d241515SViacheslav Ovsiienko goto error; 31152d241515SViacheslav Ovsiienko } 31165e61bcddSViacheslav Ovsiienko } 3117dd3c774fSViacheslav Ovsiienko if (priv->config.dv_flow_en && 3118dd3c774fSViacheslav Ovsiienko priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 3119dd3c774fSViacheslav Ovsiienko mlx5_flow_ext_mreg_supported(eth_dev) && 3120dd3c774fSViacheslav Ovsiienko priv->sh->dv_regc0_mask) { 3121dd3c774fSViacheslav Ovsiienko priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, 3122dd3c774fSViacheslav Ovsiienko MLX5_FLOW_MREG_HTABLE_SZ); 3123dd3c774fSViacheslav Ovsiienko if (!priv->mreg_cp_tbl) { 3124dd3c774fSViacheslav Ovsiienko err = ENOMEM; 3125dd3c774fSViacheslav Ovsiienko goto error; 3126dd3c774fSViacheslav Ovsiienko } 3127dd3c774fSViacheslav Ovsiienko } 3128f38c5457SAdrien Mazarguil return eth_dev; 31299083982cSAdrien Mazarguil error: 313026c08b97SAdrien Mazarguil if (priv) { 3131dd3c774fSViacheslav Ovsiienko if (priv->mreg_cp_tbl) 3132dd3c774fSViacheslav Ovsiienko mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); 3133b2177648SViacheslav Ovsiienko if (priv->sh) 3134b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 313526c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 313626c08b97SAdrien Mazarguil close(priv->nl_socket_route); 313726c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 313826c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 3139dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 3140dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 314171e254bcSViacheslav Ovsiienko if (priv->qrss_id_pool) 314271e254bcSViacheslav Ovsiienko mlx5_flow_id_pool_release(priv->qrss_id_pool); 31432b730263SAdrien Mazarguil if (own_domain_id) 31442b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 3145771fa900SAdrien Mazarguil rte_free(priv); 3146e16adf08SThomas Monjalon if (eth_dev != NULL) 3147e16adf08SThomas Monjalon eth_dev->data->dev_private = NULL; 314826c08b97SAdrien Mazarguil } 3149e16adf08SThomas Monjalon if (eth_dev != NULL) { 3150e16adf08SThomas Monjalon /* mac_addrs must not be freed alone because part of dev_private */ 3151e16adf08SThomas Monjalon eth_dev->data->mac_addrs = NULL; 3152690de285SRaslan Darawsheh rte_eth_dev_release_port(eth_dev); 3153e16adf08SThomas Monjalon } 315417e19bc4SViacheslav Ovsiienko if (sh) 315517e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(sh); 31568e46d4e1SAlexander Kozyrev MLX5_ASSERT(err > 0); 3157a6d83b6aSNélio Laranjeiro rte_errno = err; 3158f38c5457SAdrien Mazarguil return NULL; 3159f38c5457SAdrien Mazarguil } 3160f38c5457SAdrien Mazarguil 3161116f90adSAdrien Mazarguil /** 3162116f90adSAdrien Mazarguil * Comparison callback to sort device data. 3163116f90adSAdrien Mazarguil * 3164116f90adSAdrien Mazarguil * This is meant to be used with qsort(). 3165116f90adSAdrien Mazarguil * 3166116f90adSAdrien Mazarguil * @param a[in] 3167116f90adSAdrien Mazarguil * Pointer to pointer to first data object. 3168116f90adSAdrien Mazarguil * @param b[in] 3169116f90adSAdrien Mazarguil * Pointer to pointer to second data object. 3170116f90adSAdrien Mazarguil * 3171116f90adSAdrien Mazarguil * @return 3172116f90adSAdrien Mazarguil * 0 if both objects are equal, less than 0 if the first argument is less 3173116f90adSAdrien Mazarguil * than the second, greater than 0 otherwise. 3174116f90adSAdrien Mazarguil */ 3175116f90adSAdrien Mazarguil static int 3176116f90adSAdrien Mazarguil mlx5_dev_spawn_data_cmp(const void *a, const void *b) 3177116f90adSAdrien Mazarguil { 3178116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_a = 3179116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)a)->info; 3180116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_b = 3181116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)b)->info; 3182116f90adSAdrien Mazarguil int ret; 3183116f90adSAdrien Mazarguil 3184116f90adSAdrien Mazarguil /* Master device first. */ 3185116f90adSAdrien Mazarguil ret = si_b->master - si_a->master; 3186116f90adSAdrien Mazarguil if (ret) 3187116f90adSAdrien Mazarguil return ret; 3188116f90adSAdrien Mazarguil /* Then representor devices. */ 3189116f90adSAdrien Mazarguil ret = si_b->representor - si_a->representor; 3190116f90adSAdrien Mazarguil if (ret) 3191116f90adSAdrien Mazarguil return ret; 3192116f90adSAdrien Mazarguil /* Unidentified devices come last in no specific order. */ 3193116f90adSAdrien Mazarguil if (!si_a->representor) 3194116f90adSAdrien Mazarguil return 0; 3195116f90adSAdrien Mazarguil /* Order representors by name. */ 3196116f90adSAdrien Mazarguil return si_a->port_name - si_b->port_name; 3197116f90adSAdrien Mazarguil } 3198116f90adSAdrien Mazarguil 3199f38c5457SAdrien Mazarguil /** 32002e569a37SViacheslav Ovsiienko * Match PCI information for possible slaves of bonding device. 32012e569a37SViacheslav Ovsiienko * 32022e569a37SViacheslav Ovsiienko * @param[in] ibv_dev 32032e569a37SViacheslav Ovsiienko * Pointer to Infiniband device structure. 32042e569a37SViacheslav Ovsiienko * @param[in] pci_dev 32052e569a37SViacheslav Ovsiienko * Pointer to PCI device structure to match PCI address. 32062e569a37SViacheslav Ovsiienko * @param[in] nl_rdma 32072e569a37SViacheslav Ovsiienko * Netlink RDMA group socket handle. 32082e569a37SViacheslav Ovsiienko * 32092e569a37SViacheslav Ovsiienko * @return 32102e569a37SViacheslav Ovsiienko * negative value if no bonding device found, otherwise 32112e569a37SViacheslav Ovsiienko * positive index of slave PF in bonding. 32122e569a37SViacheslav Ovsiienko */ 32132e569a37SViacheslav Ovsiienko static int 32142e569a37SViacheslav Ovsiienko mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev, 32152e569a37SViacheslav Ovsiienko const struct rte_pci_device *pci_dev, 32162e569a37SViacheslav Ovsiienko int nl_rdma) 32172e569a37SViacheslav Ovsiienko { 32182e569a37SViacheslav Ovsiienko char ifname[IF_NAMESIZE + 1]; 32192e569a37SViacheslav Ovsiienko unsigned int ifindex; 32202e569a37SViacheslav Ovsiienko unsigned int np, i; 32212e569a37SViacheslav Ovsiienko FILE *file = NULL; 32222e569a37SViacheslav Ovsiienko int pf = -1; 32232e569a37SViacheslav Ovsiienko 32242e569a37SViacheslav Ovsiienko /* 32252e569a37SViacheslav Ovsiienko * Try to get master device name. If something goes 32262e569a37SViacheslav Ovsiienko * wrong suppose the lack of kernel support and no 32272e569a37SViacheslav Ovsiienko * bonding devices. 32282e569a37SViacheslav Ovsiienko */ 32292e569a37SViacheslav Ovsiienko if (nl_rdma < 0) 32302e569a37SViacheslav Ovsiienko return -1; 32312e569a37SViacheslav Ovsiienko if (!strstr(ibv_dev->name, "bond")) 32322e569a37SViacheslav Ovsiienko return -1; 32332e569a37SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_dev->name); 32342e569a37SViacheslav Ovsiienko if (!np) 32352e569a37SViacheslav Ovsiienko return -1; 32362e569a37SViacheslav Ovsiienko /* 32372e569a37SViacheslav Ovsiienko * The Master device might not be on the predefined 32382e569a37SViacheslav Ovsiienko * port (not on port index 1, it is not garanted), 32392e569a37SViacheslav Ovsiienko * we have to scan all Infiniband device port and 32402e569a37SViacheslav Ovsiienko * find master. 32412e569a37SViacheslav Ovsiienko */ 32422e569a37SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 32432e569a37SViacheslav Ovsiienko /* Check whether Infiniband port is populated. */ 32442e569a37SViacheslav Ovsiienko ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i); 32452e569a37SViacheslav Ovsiienko if (!ifindex) 32462e569a37SViacheslav Ovsiienko continue; 32472e569a37SViacheslav Ovsiienko if (!if_indextoname(ifindex, ifname)) 32482e569a37SViacheslav Ovsiienko continue; 32492e569a37SViacheslav Ovsiienko /* Try to read bonding slave names from sysfs. */ 32502e569a37SViacheslav Ovsiienko MKSTR(slaves, 32512e569a37SViacheslav Ovsiienko "/sys/class/net/%s/master/bonding/slaves", ifname); 32522e569a37SViacheslav Ovsiienko file = fopen(slaves, "r"); 32532e569a37SViacheslav Ovsiienko if (file) 32542e569a37SViacheslav Ovsiienko break; 32552e569a37SViacheslav Ovsiienko } 32562e569a37SViacheslav Ovsiienko if (!file) 32572e569a37SViacheslav Ovsiienko return -1; 32582e569a37SViacheslav Ovsiienko /* Use safe format to check maximal buffer length. */ 32598e46d4e1SAlexander Kozyrev MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE); 32602e569a37SViacheslav Ovsiienko while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) { 32612e569a37SViacheslav Ovsiienko char tmp_str[IF_NAMESIZE + 32]; 32622e569a37SViacheslav Ovsiienko struct rte_pci_addr pci_addr; 32632e569a37SViacheslav Ovsiienko struct mlx5_switch_info info; 32642e569a37SViacheslav Ovsiienko 32652e569a37SViacheslav Ovsiienko /* Process slave interface names in the loop. */ 32662e569a37SViacheslav Ovsiienko snprintf(tmp_str, sizeof(tmp_str), 32672e569a37SViacheslav Ovsiienko "/sys/class/net/%s", ifname); 32682e569a37SViacheslav Ovsiienko if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) { 32692e569a37SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get PCI address" 32702e569a37SViacheslav Ovsiienko " for netdev \"%s\"", ifname); 32712e569a37SViacheslav Ovsiienko continue; 32722e569a37SViacheslav Ovsiienko } 32732e569a37SViacheslav Ovsiienko if (pci_dev->addr.domain != pci_addr.domain || 32742e569a37SViacheslav Ovsiienko pci_dev->addr.bus != pci_addr.bus || 32752e569a37SViacheslav Ovsiienko pci_dev->addr.devid != pci_addr.devid || 32762e569a37SViacheslav Ovsiienko pci_dev->addr.function != pci_addr.function) 32772e569a37SViacheslav Ovsiienko continue; 32782e569a37SViacheslav Ovsiienko /* Slave interface PCI address match found. */ 32792e569a37SViacheslav Ovsiienko fclose(file); 32802e569a37SViacheslav Ovsiienko snprintf(tmp_str, sizeof(tmp_str), 32812e569a37SViacheslav Ovsiienko "/sys/class/net/%s/phys_port_name", ifname); 32822e569a37SViacheslav Ovsiienko file = fopen(tmp_str, "rb"); 32832e569a37SViacheslav Ovsiienko if (!file) 32842e569a37SViacheslav Ovsiienko break; 32852e569a37SViacheslav Ovsiienko info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET; 32862e569a37SViacheslav Ovsiienko if (fscanf(file, "%32s", tmp_str) == 1) 32872e569a37SViacheslav Ovsiienko mlx5_translate_port_name(tmp_str, &info); 32882e569a37SViacheslav Ovsiienko if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY || 32892e569a37SViacheslav Ovsiienko info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) 32902e569a37SViacheslav Ovsiienko pf = info.port_name; 32912e569a37SViacheslav Ovsiienko break; 32922e569a37SViacheslav Ovsiienko } 32932e569a37SViacheslav Ovsiienko if (file) 32942e569a37SViacheslav Ovsiienko fclose(file); 32952e569a37SViacheslav Ovsiienko return pf; 32962e569a37SViacheslav Ovsiienko } 32972e569a37SViacheslav Ovsiienko 32982e569a37SViacheslav Ovsiienko /** 3299f38c5457SAdrien Mazarguil * DPDK callback to register a PCI device. 3300f38c5457SAdrien Mazarguil * 33012b730263SAdrien Mazarguil * This function spawns Ethernet devices out of a given PCI device. 3302f38c5457SAdrien Mazarguil * 3303f38c5457SAdrien Mazarguil * @param[in] pci_drv 3304f38c5457SAdrien Mazarguil * PCI driver structure (mlx5_driver). 3305f38c5457SAdrien Mazarguil * @param[in] pci_dev 3306f38c5457SAdrien Mazarguil * PCI device information. 3307f38c5457SAdrien Mazarguil * 3308f38c5457SAdrien Mazarguil * @return 3309f38c5457SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 3310f38c5457SAdrien Mazarguil */ 3311f38c5457SAdrien Mazarguil static int 3312f38c5457SAdrien Mazarguil mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3313f38c5457SAdrien Mazarguil struct rte_pci_device *pci_dev) 3314f38c5457SAdrien Mazarguil { 3315f38c5457SAdrien Mazarguil struct ibv_device **ibv_list; 3316ad74bc61SViacheslav Ovsiienko /* 3317ad74bc61SViacheslav Ovsiienko * Number of found IB Devices matching with requested PCI BDF. 3318ad74bc61SViacheslav Ovsiienko * nd != 1 means there are multiple IB devices over the same 3319ad74bc61SViacheslav Ovsiienko * PCI device and we have representors and master. 3320ad74bc61SViacheslav Ovsiienko */ 3321ad74bc61SViacheslav Ovsiienko unsigned int nd = 0; 3322ad74bc61SViacheslav Ovsiienko /* 3323ad74bc61SViacheslav Ovsiienko * Number of found IB device Ports. nd = 1 and np = 1..n means 3324ad74bc61SViacheslav Ovsiienko * we have the single multiport IB device, and there may be 3325ad74bc61SViacheslav Ovsiienko * representors attached to some of found ports. 3326ad74bc61SViacheslav Ovsiienko */ 3327ad74bc61SViacheslav Ovsiienko unsigned int np = 0; 3328ad74bc61SViacheslav Ovsiienko /* 3329ad74bc61SViacheslav Ovsiienko * Number of DPDK ethernet devices to Spawn - either over 3330ad74bc61SViacheslav Ovsiienko * multiple IB devices or multiple ports of single IB device. 3331ad74bc61SViacheslav Ovsiienko * Actually this is the number of iterations to spawn. 3332ad74bc61SViacheslav Ovsiienko */ 3333ad74bc61SViacheslav Ovsiienko unsigned int ns = 0; 33342e569a37SViacheslav Ovsiienko /* 33352e569a37SViacheslav Ovsiienko * Bonding device 33362e569a37SViacheslav Ovsiienko * < 0 - no bonding device (single one) 33372e569a37SViacheslav Ovsiienko * >= 0 - bonding device (value is slave PF index) 33382e569a37SViacheslav Ovsiienko */ 33392e569a37SViacheslav Ovsiienko int bd = -1; 3340a62ec991SViacheslav Ovsiienko struct mlx5_dev_spawn_data *list = NULL; 3341f87bfa8eSYongseok Koh struct mlx5_dev_config dev_config; 3342f38c5457SAdrien Mazarguil int ret; 3343f38c5457SAdrien Mazarguil 3344d768f324SMatan Azrad if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_NET) { 3345d768f324SMatan Azrad DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5" 3346d768f324SMatan Azrad " driver."); 3347d768f324SMatan Azrad return 1; 3348d768f324SMatan Azrad } 3349e6cdc54cSXueming Li if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3350e6cdc54cSXueming Li mlx5_pmd_socket_init(); 33517be600c8SYongseok Koh ret = mlx5_init_once(); 33527be600c8SYongseok Koh if (ret) { 33537be600c8SYongseok Koh DRV_LOG(ERR, "unable to init PMD global data: %s", 33547be600c8SYongseok Koh strerror(rte_errno)); 33557be600c8SYongseok Koh return -rte_errno; 33567be600c8SYongseok Koh } 33578e46d4e1SAlexander Kozyrev MLX5_ASSERT(pci_drv == &mlx5_driver); 3358f38c5457SAdrien Mazarguil errno = 0; 3359f38c5457SAdrien Mazarguil ibv_list = mlx5_glue->get_device_list(&ret); 3360f38c5457SAdrien Mazarguil if (!ibv_list) { 3361f38c5457SAdrien Mazarguil rte_errno = errno ? errno : ENOSYS; 3362f38c5457SAdrien Mazarguil DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 3363a6d83b6aSNélio Laranjeiro return -rte_errno; 3364a6d83b6aSNélio Laranjeiro } 3365ad74bc61SViacheslav Ovsiienko /* 3366ad74bc61SViacheslav Ovsiienko * First scan the list of all Infiniband devices to find 3367ad74bc61SViacheslav Ovsiienko * matching ones, gathering into the list. 3368ad74bc61SViacheslav Ovsiienko */ 336926c08b97SAdrien Mazarguil struct ibv_device *ibv_match[ret + 1]; 3370a62ec991SViacheslav Ovsiienko int nl_route = mlx5_nl_init(NETLINK_ROUTE); 3371a62ec991SViacheslav Ovsiienko int nl_rdma = mlx5_nl_init(NETLINK_RDMA); 3372ad74bc61SViacheslav Ovsiienko unsigned int i; 337326c08b97SAdrien Mazarguil 3374f38c5457SAdrien Mazarguil while (ret-- > 0) { 3375f38c5457SAdrien Mazarguil struct rte_pci_addr pci_addr; 3376f38c5457SAdrien Mazarguil 3377f38c5457SAdrien Mazarguil DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 33782e569a37SViacheslav Ovsiienko bd = mlx5_device_bond_pci_match 33792e569a37SViacheslav Ovsiienko (ibv_list[ret], pci_dev, nl_rdma); 33802e569a37SViacheslav Ovsiienko if (bd >= 0) { 33812e569a37SViacheslav Ovsiienko /* 33822e569a37SViacheslav Ovsiienko * Bonding device detected. Only one match is allowed, 33832e569a37SViacheslav Ovsiienko * the bonding is supported over multi-port IB device, 33842e569a37SViacheslav Ovsiienko * there should be no matches on representor PCI 33852e569a37SViacheslav Ovsiienko * functions or non VF LAG bonding devices with 33862e569a37SViacheslav Ovsiienko * specified address. 33872e569a37SViacheslav Ovsiienko */ 33882e569a37SViacheslav Ovsiienko if (nd) { 33892e569a37SViacheslav Ovsiienko DRV_LOG(ERR, 33902e569a37SViacheslav Ovsiienko "multiple PCI match on bonding device" 33912e569a37SViacheslav Ovsiienko "\"%s\" found", ibv_list[ret]->name); 33922e569a37SViacheslav Ovsiienko rte_errno = ENOENT; 33932e569a37SViacheslav Ovsiienko ret = -rte_errno; 33942e569a37SViacheslav Ovsiienko goto exit; 33952e569a37SViacheslav Ovsiienko } 33962e569a37SViacheslav Ovsiienko DRV_LOG(INFO, "PCI information matches for" 33972e569a37SViacheslav Ovsiienko " slave %d bonding device \"%s\"", 33982e569a37SViacheslav Ovsiienko bd, ibv_list[ret]->name); 33992e569a37SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 34002e569a37SViacheslav Ovsiienko break; 34012e569a37SViacheslav Ovsiienko } 34025cf5f710SViacheslav Ovsiienko if (mlx5_dev_to_pci_addr 34035cf5f710SViacheslav Ovsiienko (ibv_list[ret]->ibdev_path, &pci_addr)) 3404f38c5457SAdrien Mazarguil continue; 3405f38c5457SAdrien Mazarguil if (pci_dev->addr.domain != pci_addr.domain || 3406f38c5457SAdrien Mazarguil pci_dev->addr.bus != pci_addr.bus || 3407f38c5457SAdrien Mazarguil pci_dev->addr.devid != pci_addr.devid || 3408f38c5457SAdrien Mazarguil pci_dev->addr.function != pci_addr.function) 3409f38c5457SAdrien Mazarguil continue; 341026c08b97SAdrien Mazarguil DRV_LOG(INFO, "PCI information matches for device \"%s\"", 3411f38c5457SAdrien Mazarguil ibv_list[ret]->name); 3412ad74bc61SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 341326c08b97SAdrien Mazarguil } 3414ad74bc61SViacheslav Ovsiienko ibv_match[nd] = NULL; 3415ad74bc61SViacheslav Ovsiienko if (!nd) { 3416ae4eb7dcSViacheslav Ovsiienko /* No device matches, just complain and bail out. */ 3417ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, 3418ad74bc61SViacheslav Ovsiienko "no Verbs device matches PCI device " PCI_PRI_FMT "," 3419ad74bc61SViacheslav Ovsiienko " are kernel drivers loaded?", 3420ad74bc61SViacheslav Ovsiienko pci_dev->addr.domain, pci_dev->addr.bus, 3421ad74bc61SViacheslav Ovsiienko pci_dev->addr.devid, pci_dev->addr.function); 3422ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 3423ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 3424a62ec991SViacheslav Ovsiienko goto exit; 3425ad74bc61SViacheslav Ovsiienko } 3426ad74bc61SViacheslav Ovsiienko if (nd == 1) { 342726c08b97SAdrien Mazarguil /* 3428ad74bc61SViacheslav Ovsiienko * Found single matching device may have multiple ports. 3429ad74bc61SViacheslav Ovsiienko * Each port may be representor, we have to check the port 3430ad74bc61SViacheslav Ovsiienko * number and check the representors existence. 343126c08b97SAdrien Mazarguil */ 3432ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 3433ad74bc61SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 3434ad74bc61SViacheslav Ovsiienko if (!np) 3435ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get IB device \"%s\"" 3436ad74bc61SViacheslav Ovsiienko " ports number", ibv_match[0]->name); 34372e569a37SViacheslav Ovsiienko if (bd >= 0 && !np) { 34382e569a37SViacheslav Ovsiienko DRV_LOG(ERR, "can not get ports" 34392e569a37SViacheslav Ovsiienko " for bonding device"); 34402e569a37SViacheslav Ovsiienko rte_errno = ENOENT; 34412e569a37SViacheslav Ovsiienko ret = -rte_errno; 34422e569a37SViacheslav Ovsiienko goto exit; 34432e569a37SViacheslav Ovsiienko } 3444ad74bc61SViacheslav Ovsiienko } 3445790164ceSViacheslav Ovsiienko #ifndef HAVE_MLX5DV_DR_DEVX_PORT 3446790164ceSViacheslav Ovsiienko if (bd >= 0) { 3447790164ceSViacheslav Ovsiienko /* 3448790164ceSViacheslav Ovsiienko * This may happen if there is VF LAG kernel support and 3449790164ceSViacheslav Ovsiienko * application is compiled with older rdma_core library. 3450790164ceSViacheslav Ovsiienko */ 3451790164ceSViacheslav Ovsiienko DRV_LOG(ERR, 3452790164ceSViacheslav Ovsiienko "No kernel/verbs support for VF LAG bonding found."); 3453790164ceSViacheslav Ovsiienko rte_errno = ENOTSUP; 3454790164ceSViacheslav Ovsiienko ret = -rte_errno; 3455790164ceSViacheslav Ovsiienko goto exit; 3456790164ceSViacheslav Ovsiienko } 3457790164ceSViacheslav Ovsiienko #endif 3458ad74bc61SViacheslav Ovsiienko /* 3459ad74bc61SViacheslav Ovsiienko * Now we can determine the maximal 3460ad74bc61SViacheslav Ovsiienko * amount of devices to be spawned. 3461ad74bc61SViacheslav Ovsiienko */ 3462a62ec991SViacheslav Ovsiienko list = rte_zmalloc("device spawn data", 3463a62ec991SViacheslav Ovsiienko sizeof(struct mlx5_dev_spawn_data) * 3464a62ec991SViacheslav Ovsiienko (np ? np : nd), 3465a62ec991SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 3466a62ec991SViacheslav Ovsiienko if (!list) { 3467a62ec991SViacheslav Ovsiienko DRV_LOG(ERR, "spawn data array allocation failure"); 3468a62ec991SViacheslav Ovsiienko rte_errno = ENOMEM; 3469a62ec991SViacheslav Ovsiienko ret = -rte_errno; 3470a62ec991SViacheslav Ovsiienko goto exit; 3471a62ec991SViacheslav Ovsiienko } 34722e569a37SViacheslav Ovsiienko if (bd >= 0 || np > 1) { 3473ad74bc61SViacheslav Ovsiienko /* 3474ae4eb7dcSViacheslav Ovsiienko * Single IB device with multiple ports found, 3475ad74bc61SViacheslav Ovsiienko * it may be E-Switch master device and representors. 34765a448a55SMuhammad Bilal * We have to perform identification through the ports. 3477ad74bc61SViacheslav Ovsiienko */ 34788e46d4e1SAlexander Kozyrev MLX5_ASSERT(nl_rdma >= 0); 34798e46d4e1SAlexander Kozyrev MLX5_ASSERT(ns == 0); 34808e46d4e1SAlexander Kozyrev MLX5_ASSERT(nd == 1); 34818e46d4e1SAlexander Kozyrev MLX5_ASSERT(np); 3482ad74bc61SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 3483ad74bc61SViacheslav Ovsiienko list[ns].max_port = np; 3484ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = i; 3485ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[0]; 3486ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 3487ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 34882e569a37SViacheslav Ovsiienko list[ns].pf_bond = bd; 3489ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 3490ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, i); 3491ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 3492ad74bc61SViacheslav Ovsiienko /* 3493ad74bc61SViacheslav Ovsiienko * No network interface index found for the 3494ad74bc61SViacheslav Ovsiienko * specified port, it means there is no 3495ad74bc61SViacheslav Ovsiienko * representor on this port. It's OK, 3496ad74bc61SViacheslav Ovsiienko * there can be disabled ports, for example 3497ad74bc61SViacheslav Ovsiienko * if sriov_numvfs < sriov_totalvfs. 3498ad74bc61SViacheslav Ovsiienko */ 349926c08b97SAdrien Mazarguil continue; 350026c08b97SAdrien Mazarguil } 3501ad74bc61SViacheslav Ovsiienko ret = -1; 350226c08b97SAdrien Mazarguil if (nl_route >= 0) 3503ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 3504ad74bc61SViacheslav Ovsiienko (nl_route, 3505ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 3506ad74bc61SViacheslav Ovsiienko &list[ns].info); 3507ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 3508ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 3509ad74bc61SViacheslav Ovsiienko /* 3510ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 3511ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 3512ad74bc61SViacheslav Ovsiienko * with sysfs. 3513ad74bc61SViacheslav Ovsiienko */ 3514ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 3515ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 3516ad74bc61SViacheslav Ovsiienko &list[ns].info); 3517ad74bc61SViacheslav Ovsiienko } 35182e569a37SViacheslav Ovsiienko if (!ret && bd >= 0) { 35192e569a37SViacheslav Ovsiienko switch (list[ns].info.name_type) { 35202e569a37SViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 35212e569a37SViacheslav Ovsiienko if (list[ns].info.port_name == bd) 35222e569a37SViacheslav Ovsiienko ns++; 35232e569a37SViacheslav Ovsiienko break; 35242e569a37SViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 35252e569a37SViacheslav Ovsiienko if (list[ns].info.pf_num == bd) 35262e569a37SViacheslav Ovsiienko ns++; 35272e569a37SViacheslav Ovsiienko break; 35282e569a37SViacheslav Ovsiienko default: 35292e569a37SViacheslav Ovsiienko break; 35302e569a37SViacheslav Ovsiienko } 35312e569a37SViacheslav Ovsiienko continue; 35322e569a37SViacheslav Ovsiienko } 3533ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 3534ad74bc61SViacheslav Ovsiienko list[ns].info.master)) 3535ad74bc61SViacheslav Ovsiienko ns++; 3536ad74bc61SViacheslav Ovsiienko } 3537ad74bc61SViacheslav Ovsiienko if (!ns) { 353826c08b97SAdrien Mazarguil DRV_LOG(ERR, 3539ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 3540ad74bc61SViacheslav Ovsiienko " on the IB device with multiple ports"); 3541ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 3542ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 3543ad74bc61SViacheslav Ovsiienko goto exit; 3544ad74bc61SViacheslav Ovsiienko } 3545ad74bc61SViacheslav Ovsiienko } else { 3546ad74bc61SViacheslav Ovsiienko /* 3547ad74bc61SViacheslav Ovsiienko * The existence of several matching entries (nd > 1) means 3548ad74bc61SViacheslav Ovsiienko * port representors have been instantiated. No existing Verbs 3549ad74bc61SViacheslav Ovsiienko * call nor sysfs entries can tell them apart, this can only 3550ad74bc61SViacheslav Ovsiienko * be done through Netlink calls assuming kernel drivers are 3551ad74bc61SViacheslav Ovsiienko * recent enough to support them. 3552ad74bc61SViacheslav Ovsiienko * 3553ad74bc61SViacheslav Ovsiienko * In the event of identification failure through Netlink, 3554ad74bc61SViacheslav Ovsiienko * try again through sysfs, then: 3555ad74bc61SViacheslav Ovsiienko * 3556ad74bc61SViacheslav Ovsiienko * 1. A single IB device matches (nd == 1) with single 3557ad74bc61SViacheslav Ovsiienko * port (np=0/1) and is not a representor, assume 3558ad74bc61SViacheslav Ovsiienko * no switch support. 3559ad74bc61SViacheslav Ovsiienko * 3560ad74bc61SViacheslav Ovsiienko * 2. Otherwise no safe assumptions can be made; 3561ad74bc61SViacheslav Ovsiienko * complain louder and bail out. 3562ad74bc61SViacheslav Ovsiienko */ 3563ad74bc61SViacheslav Ovsiienko np = 1; 3564ad74bc61SViacheslav Ovsiienko for (i = 0; i != nd; ++i) { 3565ad74bc61SViacheslav Ovsiienko memset(&list[ns].info, 0, sizeof(list[ns].info)); 3566ad74bc61SViacheslav Ovsiienko list[ns].max_port = 1; 3567ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = 1; 3568ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[i]; 3569ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 3570ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 35712e569a37SViacheslav Ovsiienko list[ns].pf_bond = -1; 3572ad74bc61SViacheslav Ovsiienko list[ns].ifindex = 0; 3573ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 3574ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 3575ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, 1); 3576ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 35779c2bbd04SViacheslav Ovsiienko char ifname[IF_NAMESIZE]; 35789c2bbd04SViacheslav Ovsiienko 3579ad74bc61SViacheslav Ovsiienko /* 35809c2bbd04SViacheslav Ovsiienko * Netlink failed, it may happen with old 35819c2bbd04SViacheslav Ovsiienko * ib_core kernel driver (before 4.16). 35829c2bbd04SViacheslav Ovsiienko * We can assume there is old driver because 35839c2bbd04SViacheslav Ovsiienko * here we are processing single ports IB 35849c2bbd04SViacheslav Ovsiienko * devices. Let's try sysfs to retrieve 35859c2bbd04SViacheslav Ovsiienko * the ifindex. The method works for 35869c2bbd04SViacheslav Ovsiienko * master device only. 35879c2bbd04SViacheslav Ovsiienko */ 35889c2bbd04SViacheslav Ovsiienko if (nd > 1) { 35899c2bbd04SViacheslav Ovsiienko /* 35909c2bbd04SViacheslav Ovsiienko * Multiple devices found, assume 35919c2bbd04SViacheslav Ovsiienko * representors, can not distinguish 35929c2bbd04SViacheslav Ovsiienko * master/representor and retrieve 35939c2bbd04SViacheslav Ovsiienko * ifindex via sysfs. 3594ad74bc61SViacheslav Ovsiienko */ 3595ad74bc61SViacheslav Ovsiienko continue; 3596ad74bc61SViacheslav Ovsiienko } 35979c2bbd04SViacheslav Ovsiienko ret = mlx5_get_master_ifname 35989c2bbd04SViacheslav Ovsiienko (ibv_match[i]->ibdev_path, &ifname); 35999c2bbd04SViacheslav Ovsiienko if (!ret) 36009c2bbd04SViacheslav Ovsiienko list[ns].ifindex = 36019c2bbd04SViacheslav Ovsiienko if_nametoindex(ifname); 36029c2bbd04SViacheslav Ovsiienko if (!list[ns].ifindex) { 36039c2bbd04SViacheslav Ovsiienko /* 36049c2bbd04SViacheslav Ovsiienko * No network interface index found 36059c2bbd04SViacheslav Ovsiienko * for the specified device, it means 36069c2bbd04SViacheslav Ovsiienko * there it is neither representor 36079c2bbd04SViacheslav Ovsiienko * nor master. 36089c2bbd04SViacheslav Ovsiienko */ 36099c2bbd04SViacheslav Ovsiienko continue; 36109c2bbd04SViacheslav Ovsiienko } 36119c2bbd04SViacheslav Ovsiienko } 3612ad74bc61SViacheslav Ovsiienko ret = -1; 3613ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 3614ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 3615ad74bc61SViacheslav Ovsiienko (nl_route, 3616ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 3617ad74bc61SViacheslav Ovsiienko &list[ns].info); 3618ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 3619ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 3620ad74bc61SViacheslav Ovsiienko /* 3621ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 3622ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 3623ad74bc61SViacheslav Ovsiienko * with sysfs. 3624ad74bc61SViacheslav Ovsiienko */ 3625ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 3626ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 3627ad74bc61SViacheslav Ovsiienko &list[ns].info); 3628ad74bc61SViacheslav Ovsiienko } 3629ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 3630ad74bc61SViacheslav Ovsiienko list[ns].info.master)) { 3631ad74bc61SViacheslav Ovsiienko ns++; 3632ad74bc61SViacheslav Ovsiienko } else if ((nd == 1) && 3633ad74bc61SViacheslav Ovsiienko !list[ns].info.representor && 3634ad74bc61SViacheslav Ovsiienko !list[ns].info.master) { 3635ad74bc61SViacheslav Ovsiienko /* 3636ad74bc61SViacheslav Ovsiienko * Single IB device with 3637ad74bc61SViacheslav Ovsiienko * one physical port and 3638ad74bc61SViacheslav Ovsiienko * attached network device. 3639ad74bc61SViacheslav Ovsiienko * May be SRIOV is not enabled 3640ad74bc61SViacheslav Ovsiienko * or there is no representors. 3641ad74bc61SViacheslav Ovsiienko */ 3642ad74bc61SViacheslav Ovsiienko DRV_LOG(INFO, "no E-Switch support detected"); 3643ad74bc61SViacheslav Ovsiienko ns++; 3644ad74bc61SViacheslav Ovsiienko break; 364526c08b97SAdrien Mazarguil } 3646f38c5457SAdrien Mazarguil } 3647ad74bc61SViacheslav Ovsiienko if (!ns) { 3648ad74bc61SViacheslav Ovsiienko DRV_LOG(ERR, 3649ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 3650ad74bc61SViacheslav Ovsiienko " on the multiple IB devices"); 3651ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 3652ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 3653ad74bc61SViacheslav Ovsiienko goto exit; 3654ad74bc61SViacheslav Ovsiienko } 3655ad74bc61SViacheslav Ovsiienko } 36568e46d4e1SAlexander Kozyrev MLX5_ASSERT(ns); 3657116f90adSAdrien Mazarguil /* 3658116f90adSAdrien Mazarguil * Sort list to probe devices in natural order for users convenience 3659116f90adSAdrien Mazarguil * (i.e. master first, then representors from lowest to highest ID). 3660116f90adSAdrien Mazarguil */ 3661ad74bc61SViacheslav Ovsiienko qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 3662f87bfa8eSYongseok Koh /* Default configuration. */ 3663f87bfa8eSYongseok Koh dev_config = (struct mlx5_dev_config){ 366478c7a16dSYongseok Koh .hw_padding = 0, 3665f87bfa8eSYongseok Koh .mps = MLX5_ARG_UNSET, 36668409a285SViacheslav Ovsiienko .dbnc = MLX5_ARG_UNSET, 3667f87bfa8eSYongseok Koh .rx_vec_en = 1, 3668505f1fe4SViacheslav Ovsiienko .txq_inline_max = MLX5_ARG_UNSET, 3669505f1fe4SViacheslav Ovsiienko .txq_inline_min = MLX5_ARG_UNSET, 3670505f1fe4SViacheslav Ovsiienko .txq_inline_mpw = MLX5_ARG_UNSET, 3671f87bfa8eSYongseok Koh .txqs_inline = MLX5_ARG_UNSET, 3672f87bfa8eSYongseok Koh .vf_nl_en = 1, 3673dceb5029SYongseok Koh .mr_ext_memseg_en = 1, 3674f87bfa8eSYongseok Koh .mprq = { 3675f87bfa8eSYongseok Koh .enabled = 0, /* Disabled by default. */ 3676ecb16045SAlexander Kozyrev .stride_num_n = 0, 3677ecb16045SAlexander Kozyrev .stride_size_n = 0, 3678f87bfa8eSYongseok Koh .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, 3679f87bfa8eSYongseok Koh .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, 3680f87bfa8eSYongseok Koh }, 3681e2b4925eSOri Kam .dv_esw_en = 1, 3682cd4569d2SDekel Peled .dv_flow_en = 1, 36831ad9a3d0SBing Zhao .log_hp_size = MLX5_ARG_UNSET, 3684f87bfa8eSYongseok Koh }; 3685ad74bc61SViacheslav Ovsiienko /* Device specific configuration. */ 3686f38c5457SAdrien Mazarguil switch (pci_dev->id.device_id) { 3687f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 3688f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 3689f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 3690f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 3691a40b734bSViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 3692c930f02cSViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 36935fc66630SRaslan Darawsheh case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF: 3694f87bfa8eSYongseok Koh dev_config.vf = 1; 3695f38c5457SAdrien Mazarguil break; 3696f38c5457SAdrien Mazarguil default: 3697f87bfa8eSYongseok Koh break; 3698f38c5457SAdrien Mazarguil } 3699ad74bc61SViacheslav Ovsiienko for (i = 0; i != ns; ++i) { 37002b730263SAdrien Mazarguil uint32_t restore; 37012b730263SAdrien Mazarguil 3702f87bfa8eSYongseok Koh list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 3703ad74bc61SViacheslav Ovsiienko &list[i], 3704ad74bc61SViacheslav Ovsiienko dev_config); 37056de569f5SAdrien Mazarguil if (!list[i].eth_dev) { 3706206254b7SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 37072b730263SAdrien Mazarguil break; 3708206254b7SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 37096de569f5SAdrien Mazarguil continue; 37106de569f5SAdrien Mazarguil } 3711116f90adSAdrien Mazarguil restore = list[i].eth_dev->data->dev_flags; 3712116f90adSAdrien Mazarguil rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 37132b730263SAdrien Mazarguil /* Restore non-PCI flags cleared by the above call. */ 3714116f90adSAdrien Mazarguil list[i].eth_dev->data->dev_flags |= restore; 3715116f90adSAdrien Mazarguil rte_eth_dev_probing_finish(list[i].eth_dev); 37162b730263SAdrien Mazarguil } 3717ad74bc61SViacheslav Ovsiienko if (i != ns) { 3718f38c5457SAdrien Mazarguil DRV_LOG(ERR, 3719f38c5457SAdrien Mazarguil "probe of PCI device " PCI_PRI_FMT " aborted after" 3720f38c5457SAdrien Mazarguil " encountering an error: %s", 3721f38c5457SAdrien Mazarguil pci_dev->addr.domain, pci_dev->addr.bus, 3722f38c5457SAdrien Mazarguil pci_dev->addr.devid, pci_dev->addr.function, 3723f38c5457SAdrien Mazarguil strerror(rte_errno)); 3724f38c5457SAdrien Mazarguil ret = -rte_errno; 37252b730263SAdrien Mazarguil /* Roll back. */ 37262b730263SAdrien Mazarguil while (i--) { 37276de569f5SAdrien Mazarguil if (!list[i].eth_dev) 37286de569f5SAdrien Mazarguil continue; 3729116f90adSAdrien Mazarguil mlx5_dev_close(list[i].eth_dev); 3730e16adf08SThomas Monjalon /* mac_addrs must not be freed because in dev_private */ 3731e16adf08SThomas Monjalon list[i].eth_dev->data->mac_addrs = NULL; 3732116f90adSAdrien Mazarguil claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 37332b730263SAdrien Mazarguil } 37342b730263SAdrien Mazarguil /* Restore original error. */ 37352b730263SAdrien Mazarguil rte_errno = -ret; 3736f38c5457SAdrien Mazarguil } else { 3737f38c5457SAdrien Mazarguil ret = 0; 3738f38c5457SAdrien Mazarguil } 3739ad74bc61SViacheslav Ovsiienko exit: 3740ad74bc61SViacheslav Ovsiienko /* 3741ad74bc61SViacheslav Ovsiienko * Do the routine cleanup: 3742ad74bc61SViacheslav Ovsiienko * - close opened Netlink sockets 3743a62ec991SViacheslav Ovsiienko * - free allocated spawn data array 3744ad74bc61SViacheslav Ovsiienko * - free the Infiniband device list 3745ad74bc61SViacheslav Ovsiienko */ 3746ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 3747ad74bc61SViacheslav Ovsiienko close(nl_rdma); 3748ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 3749ad74bc61SViacheslav Ovsiienko close(nl_route); 3750a62ec991SViacheslav Ovsiienko if (list) 3751a62ec991SViacheslav Ovsiienko rte_free(list); 37528e46d4e1SAlexander Kozyrev MLX5_ASSERT(ibv_list); 3753ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 3754f38c5457SAdrien Mazarguil return ret; 3755771fa900SAdrien Mazarguil } 3756771fa900SAdrien Mazarguil 3757fbc83412SViacheslav Ovsiienko /** 3758fbc83412SViacheslav Ovsiienko * Look for the ethernet device belonging to mlx5 driver. 3759fbc83412SViacheslav Ovsiienko * 3760fbc83412SViacheslav Ovsiienko * @param[in] port_id 3761fbc83412SViacheslav Ovsiienko * port_id to start looking for device. 3762fbc83412SViacheslav Ovsiienko * @param[in] pci_dev 3763fbc83412SViacheslav Ovsiienko * Pointer to the hint PCI device. When device is being probed 3764fbc83412SViacheslav Ovsiienko * the its siblings (master and preceding representors might 3765fbc83412SViacheslav Ovsiienko * not have assigned driver yet (because the mlx5_pci_probe() 3766fbc83412SViacheslav Ovsiienko * is not completed yet, for this case match on hint PCI 3767fbc83412SViacheslav Ovsiienko * device may be used to detect sibling device. 3768fbc83412SViacheslav Ovsiienko * 3769fbc83412SViacheslav Ovsiienko * @return 3770fbc83412SViacheslav Ovsiienko * port_id of found device, RTE_MAX_ETHPORT if not found. 3771fbc83412SViacheslav Ovsiienko */ 3772f7e95215SViacheslav Ovsiienko uint16_t 3773fbc83412SViacheslav Ovsiienko mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) 3774f7e95215SViacheslav Ovsiienko { 3775f7e95215SViacheslav Ovsiienko while (port_id < RTE_MAX_ETHPORTS) { 3776f7e95215SViacheslav Ovsiienko struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3777f7e95215SViacheslav Ovsiienko 3778f7e95215SViacheslav Ovsiienko if (dev->state != RTE_ETH_DEV_UNUSED && 3779f7e95215SViacheslav Ovsiienko dev->device && 3780fbc83412SViacheslav Ovsiienko (dev->device == &pci_dev->device || 3781fbc83412SViacheslav Ovsiienko (dev->device->driver && 3782f7e95215SViacheslav Ovsiienko dev->device->driver->name && 3783fbc83412SViacheslav Ovsiienko !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME)))) 3784f7e95215SViacheslav Ovsiienko break; 3785f7e95215SViacheslav Ovsiienko port_id++; 3786f7e95215SViacheslav Ovsiienko } 3787f7e95215SViacheslav Ovsiienko if (port_id >= RTE_MAX_ETHPORTS) 3788f7e95215SViacheslav Ovsiienko return RTE_MAX_ETHPORTS; 3789f7e95215SViacheslav Ovsiienko return port_id; 3790f7e95215SViacheslav Ovsiienko } 3791f7e95215SViacheslav Ovsiienko 37923a820742SOphir Munk /** 37933a820742SOphir Munk * DPDK callback to remove a PCI device. 37943a820742SOphir Munk * 37953a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 37963a820742SOphir Munk * 37973a820742SOphir Munk * @param[in] pci_dev 37983a820742SOphir Munk * Pointer to the PCI device. 37993a820742SOphir Munk * 38003a820742SOphir Munk * @return 38013a820742SOphir Munk * 0 on success, the function cannot fail. 38023a820742SOphir Munk */ 38033a820742SOphir Munk static int 38043a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 38053a820742SOphir Munk { 38063a820742SOphir Munk uint16_t port_id; 38073a820742SOphir Munk 38082786b7bfSSuanming Mou RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { 38092786b7bfSSuanming Mou /* 38102786b7bfSSuanming Mou * mlx5_dev_close() is not registered to secondary process, 38112786b7bfSSuanming Mou * call the close function explicitly for secondary process. 38122786b7bfSSuanming Mou */ 38132786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) 38142786b7bfSSuanming Mou mlx5_dev_close(&rte_eth_devices[port_id]); 38152786b7bfSSuanming Mou else 38163a820742SOphir Munk rte_eth_dev_close(port_id); 38172786b7bfSSuanming Mou } 38183a820742SOphir Munk return 0; 38193a820742SOphir Munk } 38203a820742SOphir Munk 3821771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 3822771fa900SAdrien Mazarguil { 38231d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 38241d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 3825771fa900SAdrien Mazarguil }, 3826771fa900SAdrien Mazarguil { 38271d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 38281d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 3829771fa900SAdrien Mazarguil }, 3830771fa900SAdrien Mazarguil { 38311d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 38321d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 3833771fa900SAdrien Mazarguil }, 3834771fa900SAdrien Mazarguil { 38351d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 38361d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 3837771fa900SAdrien Mazarguil }, 3838771fa900SAdrien Mazarguil { 3839528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3840528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 3841528a9fbeSYongseok Koh }, 3842528a9fbeSYongseok Koh { 3843528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3844528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 3845528a9fbeSYongseok Koh }, 3846528a9fbeSYongseok Koh { 3847528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3848528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 3849528a9fbeSYongseok Koh }, 3850528a9fbeSYongseok Koh { 3851528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3852528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 3853528a9fbeSYongseok Koh }, 3854528a9fbeSYongseok Koh { 3855dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3856dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 3857dd3331c6SShahaf Shuler }, 3858dd3331c6SShahaf Shuler { 3859c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3860c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 3861c322c0e5SOri Kam }, 3862c322c0e5SOri Kam { 3863f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3864f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 3865f0354d84SWisam Jaddo }, 3866f0354d84SWisam Jaddo { 3867f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3868f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 3869f0354d84SWisam Jaddo }, 3870f0354d84SWisam Jaddo { 38715fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 38725fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DX) 38735fc66630SRaslan Darawsheh }, 38745fc66630SRaslan Darawsheh { 38755fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 38765fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF) 38775fc66630SRaslan Darawsheh }, 38785fc66630SRaslan Darawsheh { 387958b4a2b1SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 388058b4a2b1SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 388158b4a2b1SRaslan Darawsheh }, 388258b4a2b1SRaslan Darawsheh { 3883771fa900SAdrien Mazarguil .vendor_id = 0 3884771fa900SAdrien Mazarguil } 3885771fa900SAdrien Mazarguil }; 3886771fa900SAdrien Mazarguil 3887fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver = { 38882f3193cfSJan Viktorin .driver = { 38892f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 38902f3193cfSJan Viktorin }, 3891771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 3892af424af8SShreyansh Jain .probe = mlx5_pci_probe, 38933a820742SOphir Munk .remove = mlx5_pci_remove, 3894989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 3895989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 389669c06d0eSYongseok Koh .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV | 3897b76fafb1SDavid Marchand RTE_PCI_DRV_PROBE_AGAIN, 3898771fa900SAdrien Mazarguil }; 3899771fa900SAdrien Mazarguil 3900771fa900SAdrien Mazarguil /** 3901771fa900SAdrien Mazarguil * Driver initialization routine. 3902771fa900SAdrien Mazarguil */ 3903f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 3904771fa900SAdrien Mazarguil { 39053d96644aSStephen Hemminger /* Initialize driver log type. */ 39063d96644aSStephen Hemminger mlx5_logtype = rte_log_register("pmd.net.mlx5"); 39073d96644aSStephen Hemminger if (mlx5_logtype >= 0) 39083d96644aSStephen Hemminger rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); 39093d96644aSStephen Hemminger 39105f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 3911ea16068cSYongseok Koh mlx5_set_ptype_table(); 39125f8ba81cSXueming Li mlx5_set_cksum_table(); 39135f8ba81cSXueming Li mlx5_set_swp_types_table(); 39147b4f1e6bSMatan Azrad if (mlx5_glue) 39153dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 3916771fa900SAdrien Mazarguil } 3917771fa900SAdrien Mazarguil 391801f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 391901f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 39200880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 3921