18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <assert.h> 1059b91becSAdrien Mazarguil #include <dlfcn.h> 11771fa900SAdrien Mazarguil #include <stdint.h> 12771fa900SAdrien Mazarguil #include <stdlib.h> 13e72dd09bSNélio Laranjeiro #include <errno.h> 14771fa900SAdrien Mazarguil #include <net/if.h> 154a984153SXueming Li #include <sys/mman.h> 16ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 17771fa900SAdrien Mazarguil 18771fa900SAdrien Mazarguil /* Verbs header. */ 19771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 20771fa900SAdrien Mazarguil #ifdef PEDANTIC 21fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 22771fa900SAdrien Mazarguil #endif 23771fa900SAdrien Mazarguil #include <infiniband/verbs.h> 24771fa900SAdrien Mazarguil #ifdef PEDANTIC 25fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 26771fa900SAdrien Mazarguil #endif 27771fa900SAdrien Mazarguil 28771fa900SAdrien Mazarguil #include <rte_malloc.h> 29ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 30fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 31771fa900SAdrien Mazarguil #include <rte_pci.h> 32c752998bSGaetan Rivet #include <rte_bus_pci.h> 33771fa900SAdrien Mazarguil #include <rte_common.h> 3459b91becSAdrien Mazarguil #include <rte_config.h> 35e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 36e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 37e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 38f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 39f15db67dSMatan Azrad #include <rte_alarm.h> 40771fa900SAdrien Mazarguil 41771fa900SAdrien Mazarguil #include "mlx5.h" 42771fa900SAdrien Mazarguil #include "mlx5_utils.h" 432e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 44771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 4513d57bd5SAdrien Mazarguil #include "mlx5_defs.h" 460e83b8e5SNelio Laranjeiro #include "mlx5_glue.h" 47974f1e7eSYongseok Koh #include "mlx5_mr.h" 4884c406e7SOri Kam #include "mlx5_flow.h" 49771fa900SAdrien Mazarguil 5099c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5199c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5299c12dccSNélio Laranjeiro 53bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 54bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 55bc91e8dbSYongseok Koh 5678c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 5778c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 5878c7a16dSYongseok Koh 597d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 607d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 617d6bf6b8SYongseok Koh 627d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 637d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 647d6bf6b8SYongseok Koh 657d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 667d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 677d6bf6b8SYongseok Koh 687d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 697d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 707d6bf6b8SYongseok Koh 71a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 722a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 732a66cf37SYaacov Hazan 74505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 75505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 76505f1fe4SViacheslav Ovsiienko 77505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 78505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 79505f1fe4SViacheslav Ovsiienko 80505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 81505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 82505f1fe4SViacheslav Ovsiienko 832a66cf37SYaacov Hazan /* 842a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 852a66cf37SYaacov Hazan * enabling inline send. 862a66cf37SYaacov Hazan */ 872a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 882a66cf37SYaacov Hazan 8909d8b416SYongseok Koh /* 9009d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 91a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9209d8b416SYongseok Koh */ 9309d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 9409d8b416SYongseok Koh 95230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 96230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 97230189d9SNélio Laranjeiro 98a6bd4911SViacheslav Ovsiienko /* 99a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 100a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 101a6bd4911SViacheslav Ovsiienko */ 1026ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1036ce84bd8SYongseok Koh 104a6bd4911SViacheslav Ovsiienko /* 105a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 106a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 107a6bd4911SViacheslav Ovsiienko */ 1086ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1096ce84bd8SYongseok Koh 110a6bd4911SViacheslav Ovsiienko /* 111a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 112a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 113a6bd4911SViacheslav Ovsiienko */ 1145644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1155644d5b9SNelio Laranjeiro 1165644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1175644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1185644d5b9SNelio Laranjeiro 11978a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 12078a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 12178a54648SXueming Li 122e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 123e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 124e2b4925eSOri Kam 12551e72d38SOri Kam /* Activate DV flow steering. */ 12651e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 12751e72d38SOri Kam 128db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 129db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 130db209cc3SNélio Laranjeiro 131dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 132dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 133dceb5029SYongseok Koh 1346de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1356de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1366de569f5SAdrien Mazarguil 137066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 138066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 139066cfecdSMatan Azrad 14021bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 14121bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 14221bb6c7eSDekel Peled 14343e9d979SShachar Beiser #ifndef HAVE_IBV_MLX5_MOD_MPW 14443e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 14543e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 14643e9d979SShachar Beiser #endif 14743e9d979SShachar Beiser 148523f5a74SYongseok Koh #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 149523f5a74SYongseok Koh #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 150523f5a74SYongseok Koh #endif 151523f5a74SYongseok Koh 152974f1e7eSYongseok Koh static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 153974f1e7eSYongseok Koh 154974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 155974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 156974f1e7eSYongseok Koh 157974f1e7eSYongseok Koh /* Spinlock for mlx5_shared_data allocation. */ 158974f1e7eSYongseok Koh static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 159974f1e7eSYongseok Koh 1607be600c8SYongseok Koh /* Process local data for secondary processes. */ 1617be600c8SYongseok Koh static struct mlx5_local_data mlx5_local_data; 1627be600c8SYongseok Koh 163a170a30dSNélio Laranjeiro /** Driver-specific log messages type. */ 164a170a30dSNélio Laranjeiro int mlx5_logtype; 165a170a30dSNélio Laranjeiro 166ad74bc61SViacheslav Ovsiienko /** Data associated with devices to spawn. */ 167ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data { 168ad74bc61SViacheslav Ovsiienko uint32_t ifindex; /**< Network interface index. */ 169ad74bc61SViacheslav Ovsiienko uint32_t max_port; /**< IB device maximal port index. */ 170ad74bc61SViacheslav Ovsiienko uint32_t ibv_port; /**< IB device physical port index. */ 1712e569a37SViacheslav Ovsiienko int pf_bond; /**< bonding device PF index. < 0 - no bonding */ 172ad74bc61SViacheslav Ovsiienko struct mlx5_switch_info info; /**< Switch information. */ 173ad74bc61SViacheslav Ovsiienko struct ibv_device *ibv_dev; /**< Associated IB device. */ 174ad74bc61SViacheslav Ovsiienko struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ 175ab3cffcfSViacheslav Ovsiienko struct rte_pci_device *pci_dev; /**< Backend PCI device. */ 176ad74bc61SViacheslav Ovsiienko }; 177ad74bc61SViacheslav Ovsiienko 17817e19bc4SViacheslav Ovsiienko static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER(); 17917e19bc4SViacheslav Ovsiienko static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER; 18017e19bc4SViacheslav Ovsiienko 18117e19bc4SViacheslav Ovsiienko /** 1825382d28cSMatan Azrad * Initialize the counters management structure. 1835382d28cSMatan Azrad * 1845382d28cSMatan Azrad * @param[in] sh 1855382d28cSMatan Azrad * Pointer to mlx5_ibv_shared object to free 1865382d28cSMatan Azrad */ 1875382d28cSMatan Azrad static void 1885382d28cSMatan Azrad mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh) 1895382d28cSMatan Azrad { 1905382d28cSMatan Azrad uint8_t i; 1915382d28cSMatan Azrad 1925382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 1935382d28cSMatan Azrad for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) 1945382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.ccont[i].pool_list); 1955382d28cSMatan Azrad } 1965382d28cSMatan Azrad 1975382d28cSMatan Azrad /** 1985382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 1995382d28cSMatan Azrad * 2005382d28cSMatan Azrad * @param[in] mng 2015382d28cSMatan Azrad * Pointer to the memory management structure. 2025382d28cSMatan Azrad */ 2035382d28cSMatan Azrad static void 2045382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 2055382d28cSMatan Azrad { 2065382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 2075382d28cSMatan Azrad 2085382d28cSMatan Azrad LIST_REMOVE(mng, next); 2095382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 2105382d28cSMatan Azrad claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); 2115382d28cSMatan Azrad rte_free(mem); 2125382d28cSMatan Azrad } 2135382d28cSMatan Azrad 2145382d28cSMatan Azrad /** 2155382d28cSMatan Azrad * Close and release all the resources of the counters management. 2165382d28cSMatan Azrad * 2175382d28cSMatan Azrad * @param[in] sh 2185382d28cSMatan Azrad * Pointer to mlx5_ibv_shared object to free. 2195382d28cSMatan Azrad */ 2205382d28cSMatan Azrad static void 2215382d28cSMatan Azrad mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh) 2225382d28cSMatan Azrad { 2235382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 2245382d28cSMatan Azrad uint8_t i; 2255382d28cSMatan Azrad int j; 226f15db67dSMatan Azrad int retries = 1024; 2275382d28cSMatan Azrad 228f15db67dSMatan Azrad rte_errno = 0; 229f15db67dSMatan Azrad while (--retries) { 230f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 231f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 232f15db67dSMatan Azrad break; 233f15db67dSMatan Azrad rte_pause(); 234f15db67dSMatan Azrad } 2355382d28cSMatan Azrad for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) { 2365382d28cSMatan Azrad struct mlx5_flow_counter_pool *pool; 2375382d28cSMatan Azrad uint32_t batch = !!(i % 2); 2385382d28cSMatan Azrad 2395382d28cSMatan Azrad if (!sh->cmng.ccont[i].pools) 2405382d28cSMatan Azrad continue; 2415382d28cSMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 2425382d28cSMatan Azrad while (pool) { 2435382d28cSMatan Azrad if (batch) { 2445382d28cSMatan Azrad if (pool->min_dcs) 2455382d28cSMatan Azrad claim_zero 2465382d28cSMatan Azrad (mlx5_devx_cmd_destroy(pool->min_dcs)); 2475382d28cSMatan Azrad } 2485382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 2495382d28cSMatan Azrad if (pool->counters_raw[j].action) 2505382d28cSMatan Azrad claim_zero 2515382d28cSMatan Azrad (mlx5_glue->destroy_flow_action 2525382d28cSMatan Azrad (pool->counters_raw[j].action)); 2535382d28cSMatan Azrad if (!batch && pool->counters_raw[j].dcs) 2545382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 2555382d28cSMatan Azrad (pool->counters_raw[j].dcs)); 2565382d28cSMatan Azrad } 2575382d28cSMatan Azrad TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, 2585382d28cSMatan Azrad next); 2595382d28cSMatan Azrad rte_free(pool); 2605382d28cSMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 2615382d28cSMatan Azrad } 2625382d28cSMatan Azrad rte_free(sh->cmng.ccont[i].pools); 2635382d28cSMatan Azrad } 2645382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 2655382d28cSMatan Azrad while (mng) { 2665382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 2675382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 2685382d28cSMatan Azrad } 2695382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 2705382d28cSMatan Azrad } 2715382d28cSMatan Azrad 2725382d28cSMatan Azrad /** 273b9d86122SDekel Peled * Extract pdn of PD object using DV API. 274b9d86122SDekel Peled * 275b9d86122SDekel Peled * @param[in] pd 276b9d86122SDekel Peled * Pointer to the verbs PD object. 277b9d86122SDekel Peled * @param[out] pdn 278b9d86122SDekel Peled * Pointer to the PD object number variable. 279b9d86122SDekel Peled * 280b9d86122SDekel Peled * @return 281b9d86122SDekel Peled * 0 on success, error value otherwise. 282b9d86122SDekel Peled */ 283b9d86122SDekel Peled #ifdef HAVE_IBV_FLOW_DV_SUPPORT 284b9d86122SDekel Peled static int 285b9d86122SDekel Peled mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused) 286b9d86122SDekel Peled { 287b9d86122SDekel Peled struct mlx5dv_obj obj; 288b9d86122SDekel Peled struct mlx5dv_pd pd_info; 289b9d86122SDekel Peled int ret = 0; 290b9d86122SDekel Peled 291b9d86122SDekel Peled obj.pd.in = pd; 292b9d86122SDekel Peled obj.pd.out = &pd_info; 293b9d86122SDekel Peled ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 294b9d86122SDekel Peled if (ret) { 295b9d86122SDekel Peled DRV_LOG(DEBUG, "Fail to get PD object info"); 296b9d86122SDekel Peled return ret; 297b9d86122SDekel Peled } 298b9d86122SDekel Peled *pdn = pd_info.pdn; 299b9d86122SDekel Peled return 0; 300b9d86122SDekel Peled } 301b9d86122SDekel Peled #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 302b9d86122SDekel Peled 303b9d86122SDekel Peled /** 30417e19bc4SViacheslav Ovsiienko * Allocate shared IB device context. If there is multiport device the 30517e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 30617e19bc4SViacheslav Ovsiienko * port dedicated IB device, the context will be used by only given 30717e19bc4SViacheslav Ovsiienko * port due to unification. 30817e19bc4SViacheslav Ovsiienko * 309ae4eb7dcSViacheslav Ovsiienko * Routine first searches the context for the specified IB device name, 31017e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 31117e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 31217e19bc4SViacheslav Ovsiienko * IB device context and parameters. 31317e19bc4SViacheslav Ovsiienko * 31417e19bc4SViacheslav Ovsiienko * @param[in] spawn 31517e19bc4SViacheslav Ovsiienko * Pointer to the IB device attributes (name, port, etc). 31617e19bc4SViacheslav Ovsiienko * 31717e19bc4SViacheslav Ovsiienko * @return 31817e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object on success, 31917e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 32017e19bc4SViacheslav Ovsiienko */ 32117e19bc4SViacheslav Ovsiienko static struct mlx5_ibv_shared * 32217e19bc4SViacheslav Ovsiienko mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn) 32317e19bc4SViacheslav Ovsiienko { 32417e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 32517e19bc4SViacheslav Ovsiienko int err = 0; 32653e5a82fSViacheslav Ovsiienko uint32_t i; 32717e19bc4SViacheslav Ovsiienko 32817e19bc4SViacheslav Ovsiienko assert(spawn); 32917e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 33017e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 33117e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 33217e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 33317e19bc4SViacheslav Ovsiienko LIST_FOREACH(sh, &mlx5_ibv_list, next) { 33417e19bc4SViacheslav Ovsiienko if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) { 33517e19bc4SViacheslav Ovsiienko sh->refcnt++; 33617e19bc4SViacheslav Ovsiienko goto exit; 33717e19bc4SViacheslav Ovsiienko } 33817e19bc4SViacheslav Ovsiienko } 339ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 34017e19bc4SViacheslav Ovsiienko assert(spawn->max_port); 34117e19bc4SViacheslav Ovsiienko sh = rte_zmalloc("ethdev shared ib context", 34217e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared) + 34317e19bc4SViacheslav Ovsiienko spawn->max_port * 34417e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared_port), 34517e19bc4SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 34617e19bc4SViacheslav Ovsiienko if (!sh) { 34717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 34817e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 34917e19bc4SViacheslav Ovsiienko goto exit; 35017e19bc4SViacheslav Ovsiienko } 35117e19bc4SViacheslav Ovsiienko /* Try to open IB device with DV first, then usual Verbs. */ 35217e19bc4SViacheslav Ovsiienko errno = 0; 35317e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev); 35417e19bc4SViacheslav Ovsiienko if (sh->ctx) { 35517e19bc4SViacheslav Ovsiienko sh->devx = 1; 35617e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is supported"); 35717e19bc4SViacheslav Ovsiienko } else { 35817e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->open_device(spawn->ibv_dev); 35917e19bc4SViacheslav Ovsiienko if (!sh->ctx) { 36017e19bc4SViacheslav Ovsiienko err = errno ? errno : ENODEV; 36117e19bc4SViacheslav Ovsiienko goto error; 36217e19bc4SViacheslav Ovsiienko } 36317e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is NOT supported"); 36417e19bc4SViacheslav Ovsiienko } 36517e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr); 36617e19bc4SViacheslav Ovsiienko if (err) { 36717e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "ibv_query_device_ex() failed"); 36817e19bc4SViacheslav Ovsiienko goto error; 36917e19bc4SViacheslav Ovsiienko } 37017e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 37117e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 37217e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_name, sh->ctx->device->name, 37317e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_name)); 37417e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path, 37517e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_path)); 37653e5a82fSViacheslav Ovsiienko pthread_mutex_init(&sh->intr_mutex, NULL); 37753e5a82fSViacheslav Ovsiienko /* 37853e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 37953e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 38053e5a82fSViacheslav Ovsiienko * the given port index i. 38153e5a82fSViacheslav Ovsiienko */ 38253e5a82fSViacheslav Ovsiienko for (i = 0; i < sh->max_port; i++) 38353e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 38417e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 38517e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 38617e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 38717e19bc4SViacheslav Ovsiienko err = ENOMEM; 38817e19bc4SViacheslav Ovsiienko goto error; 38917e19bc4SViacheslav Ovsiienko } 390b9d86122SDekel Peled #ifdef HAVE_IBV_FLOW_DV_SUPPORT 391b9d86122SDekel Peled err = mlx5_get_pdn(sh->pd, &sh->pdn); 392b9d86122SDekel Peled if (err) { 393b9d86122SDekel Peled DRV_LOG(ERR, "Fail to extract pdn from PD"); 394b9d86122SDekel Peled goto error; 395b9d86122SDekel Peled } 396b9d86122SDekel Peled #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 397ab3cffcfSViacheslav Ovsiienko /* 398ab3cffcfSViacheslav Ovsiienko * Once the device is added to the list of memory event 399ab3cffcfSViacheslav Ovsiienko * callback, its global MR cache table cannot be expanded 400ab3cffcfSViacheslav Ovsiienko * on the fly because of deadlock. If it overflows, lookup 401ab3cffcfSViacheslav Ovsiienko * should be done by searching MR list linearly, which is slow. 402ab3cffcfSViacheslav Ovsiienko * 403ab3cffcfSViacheslav Ovsiienko * At this point the device is not added to the memory 404ab3cffcfSViacheslav Ovsiienko * event list yet, context is just being created. 405ab3cffcfSViacheslav Ovsiienko */ 406ab3cffcfSViacheslav Ovsiienko err = mlx5_mr_btree_init(&sh->mr.cache, 407ab3cffcfSViacheslav Ovsiienko MLX5_MR_BTREE_CACHE_N * 2, 40846e10a4cSViacheslav Ovsiienko spawn->pci_dev->device.numa_node); 409ab3cffcfSViacheslav Ovsiienko if (err) { 410ab3cffcfSViacheslav Ovsiienko err = rte_errno; 411ab3cffcfSViacheslav Ovsiienko goto error; 412ab3cffcfSViacheslav Ovsiienko } 4135382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 4140e3d0525SViacheslav Ovsiienko /* Add device to memory callback list. */ 4150e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 4160e3d0525SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 4170e3d0525SViacheslav Ovsiienko sh, mem_event_cb); 4180e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 4190e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 42017e19bc4SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next); 42117e19bc4SViacheslav Ovsiienko exit: 42217e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 42317e19bc4SViacheslav Ovsiienko return sh; 42417e19bc4SViacheslav Ovsiienko error: 42517e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 42617e19bc4SViacheslav Ovsiienko assert(sh); 42717e19bc4SViacheslav Ovsiienko if (sh->pd) 42817e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 42917e19bc4SViacheslav Ovsiienko if (sh->ctx) 43017e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 43117e19bc4SViacheslav Ovsiienko rte_free(sh); 43217e19bc4SViacheslav Ovsiienko assert(err > 0); 43317e19bc4SViacheslav Ovsiienko rte_errno = err; 43417e19bc4SViacheslav Ovsiienko return NULL; 43517e19bc4SViacheslav Ovsiienko } 43617e19bc4SViacheslav Ovsiienko 43717e19bc4SViacheslav Ovsiienko /** 43817e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 43917e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 44017e19bc4SViacheslav Ovsiienko * 44117e19bc4SViacheslav Ovsiienko * @param[in] sh 44217e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object to free 44317e19bc4SViacheslav Ovsiienko */ 44417e19bc4SViacheslav Ovsiienko static void 44517e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) 44617e19bc4SViacheslav Ovsiienko { 44717e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 44817e19bc4SViacheslav Ovsiienko #ifndef NDEBUG 44917e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 45017e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *lctx; 45117e19bc4SViacheslav Ovsiienko 45217e19bc4SViacheslav Ovsiienko LIST_FOREACH(lctx, &mlx5_ibv_list, next) 45317e19bc4SViacheslav Ovsiienko if (lctx == sh) 45417e19bc4SViacheslav Ovsiienko break; 45517e19bc4SViacheslav Ovsiienko assert(lctx); 45617e19bc4SViacheslav Ovsiienko if (lctx != sh) { 45717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 45817e19bc4SViacheslav Ovsiienko goto exit; 45917e19bc4SViacheslav Ovsiienko } 46017e19bc4SViacheslav Ovsiienko #endif 46117e19bc4SViacheslav Ovsiienko assert(sh); 46217e19bc4SViacheslav Ovsiienko assert(sh->refcnt); 46317e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 46417e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 46517e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 46617e19bc4SViacheslav Ovsiienko goto exit; 467ab3cffcfSViacheslav Ovsiienko /* Release created Memory Regions. */ 468ab3cffcfSViacheslav Ovsiienko mlx5_mr_release(sh); 4690e3d0525SViacheslav Ovsiienko /* Remove from memory callback device list. */ 4700e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 4710e3d0525SViacheslav Ovsiienko LIST_REMOVE(sh, mem_event_cb); 4720e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 4730e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 47417e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 47553e5a82fSViacheslav Ovsiienko /* 47653e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 47753e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 47853e5a82fSViacheslav Ovsiienko **/ 4795382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 48053e5a82fSViacheslav Ovsiienko assert(!sh->intr_cnt); 48153e5a82fSViacheslav Ovsiienko if (sh->intr_cnt) 4825897ac13SViacheslav Ovsiienko mlx5_intr_callback_unregister 48353e5a82fSViacheslav Ovsiienko (&sh->intr_handle, mlx5_dev_interrupt_handler, sh); 48453e5a82fSViacheslav Ovsiienko pthread_mutex_destroy(&sh->intr_mutex); 48517e19bc4SViacheslav Ovsiienko if (sh->pd) 48617e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 48717e19bc4SViacheslav Ovsiienko if (sh->ctx) 48817e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 48917e19bc4SViacheslav Ovsiienko rte_free(sh); 49017e19bc4SViacheslav Ovsiienko exit: 49117e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 49217e19bc4SViacheslav Ovsiienko } 49317e19bc4SViacheslav Ovsiienko 494771fa900SAdrien Mazarguil /** 495b2177648SViacheslav Ovsiienko * Initialize DR related data within private structure. 496b2177648SViacheslav Ovsiienko * Routine checks the reference counter and does actual 497ae4eb7dcSViacheslav Ovsiienko * resources creation/initialization only if counter is zero. 498b2177648SViacheslav Ovsiienko * 499b2177648SViacheslav Ovsiienko * @param[in] priv 500b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 501b2177648SViacheslav Ovsiienko * 502b2177648SViacheslav Ovsiienko * @return 503b2177648SViacheslav Ovsiienko * Zero on success, positive error code otherwise. 504b2177648SViacheslav Ovsiienko */ 505b2177648SViacheslav Ovsiienko static int 506b2177648SViacheslav Ovsiienko mlx5_alloc_shared_dr(struct mlx5_priv *priv) 507b2177648SViacheslav Ovsiienko { 508b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 509b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = priv->sh; 510b2177648SViacheslav Ovsiienko int err = 0; 511d1e64fbfSOri Kam void *domain; 512b2177648SViacheslav Ovsiienko 513b2177648SViacheslav Ovsiienko assert(sh); 514b2177648SViacheslav Ovsiienko if (sh->dv_refcnt) { 515b2177648SViacheslav Ovsiienko /* Shared DV/DR structures is already initialized. */ 516b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 517b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 518b2177648SViacheslav Ovsiienko return 0; 519b2177648SViacheslav Ovsiienko } 520b2177648SViacheslav Ovsiienko /* Reference counter is zero, we should initialize structures. */ 521d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 522d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_RX); 523d1e64fbfSOri Kam if (!domain) { 524d1e64fbfSOri Kam DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); 525b2177648SViacheslav Ovsiienko err = errno; 526b2177648SViacheslav Ovsiienko goto error; 527b2177648SViacheslav Ovsiienko } 528d1e64fbfSOri Kam sh->rx_domain = domain; 529d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 530d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_TX); 531d1e64fbfSOri Kam if (!domain) { 532d1e64fbfSOri Kam DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); 533b2177648SViacheslav Ovsiienko err = errno; 534b2177648SViacheslav Ovsiienko goto error; 535b2177648SViacheslav Ovsiienko } 53679e35d0dSViacheslav Ovsiienko pthread_mutex_init(&sh->dv_mutex, NULL); 537d1e64fbfSOri Kam sh->tx_domain = domain; 538e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 539e2b4925eSOri Kam if (priv->config.dv_esw_en) { 540d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain 541d1e64fbfSOri Kam (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); 542d1e64fbfSOri Kam if (!domain) { 543d1e64fbfSOri Kam DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); 544e2b4925eSOri Kam err = errno; 545e2b4925eSOri Kam goto error; 546e2b4925eSOri Kam } 547d1e64fbfSOri Kam sh->fdb_domain = domain; 54834fa7c02SOri Kam sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); 549e2b4925eSOri Kam } 550e2b4925eSOri Kam #endif 551b41e47daSMoti Haimovsky sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); 552b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 553b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 554b2177648SViacheslav Ovsiienko return 0; 555b2177648SViacheslav Ovsiienko 556b2177648SViacheslav Ovsiienko error: 557b2177648SViacheslav Ovsiienko /* Rollback the created objects. */ 558d1e64fbfSOri Kam if (sh->rx_domain) { 559d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 560d1e64fbfSOri Kam sh->rx_domain = NULL; 561b2177648SViacheslav Ovsiienko } 562d1e64fbfSOri Kam if (sh->tx_domain) { 563d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 564d1e64fbfSOri Kam sh->tx_domain = NULL; 565b2177648SViacheslav Ovsiienko } 566d1e64fbfSOri Kam if (sh->fdb_domain) { 567d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 568d1e64fbfSOri Kam sh->fdb_domain = NULL; 569e2b4925eSOri Kam } 57034fa7c02SOri Kam if (sh->esw_drop_action) { 57134fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 57234fa7c02SOri Kam sh->esw_drop_action = NULL; 57334fa7c02SOri Kam } 574b41e47daSMoti Haimovsky if (sh->pop_vlan_action) { 575b41e47daSMoti Haimovsky mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 576b41e47daSMoti Haimovsky sh->pop_vlan_action = NULL; 577b41e47daSMoti Haimovsky } 578b2177648SViacheslav Ovsiienko return err; 579b2177648SViacheslav Ovsiienko #else 580b2177648SViacheslav Ovsiienko (void)priv; 581b2177648SViacheslav Ovsiienko return 0; 582b2177648SViacheslav Ovsiienko #endif 583b2177648SViacheslav Ovsiienko } 584b2177648SViacheslav Ovsiienko 585b2177648SViacheslav Ovsiienko /** 586b2177648SViacheslav Ovsiienko * Destroy DR related data within private structure. 587b2177648SViacheslav Ovsiienko * 588b2177648SViacheslav Ovsiienko * @param[in] priv 589b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 590b2177648SViacheslav Ovsiienko */ 591b2177648SViacheslav Ovsiienko static void 592b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(struct mlx5_priv *priv) 593b2177648SViacheslav Ovsiienko { 594b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 595b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 596b2177648SViacheslav Ovsiienko 597b2177648SViacheslav Ovsiienko if (!priv->dr_shared) 598b2177648SViacheslav Ovsiienko return; 599b2177648SViacheslav Ovsiienko priv->dr_shared = 0; 600b2177648SViacheslav Ovsiienko sh = priv->sh; 601b2177648SViacheslav Ovsiienko assert(sh); 602b2177648SViacheslav Ovsiienko assert(sh->dv_refcnt); 603b2177648SViacheslav Ovsiienko if (sh->dv_refcnt && --sh->dv_refcnt) 604b2177648SViacheslav Ovsiienko return; 605d1e64fbfSOri Kam if (sh->rx_domain) { 606d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 607d1e64fbfSOri Kam sh->rx_domain = NULL; 608b2177648SViacheslav Ovsiienko } 609d1e64fbfSOri Kam if (sh->tx_domain) { 610d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 611d1e64fbfSOri Kam sh->tx_domain = NULL; 612b2177648SViacheslav Ovsiienko } 613e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 614d1e64fbfSOri Kam if (sh->fdb_domain) { 615d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 616d1e64fbfSOri Kam sh->fdb_domain = NULL; 617e2b4925eSOri Kam } 61834fa7c02SOri Kam if (sh->esw_drop_action) { 61934fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 62034fa7c02SOri Kam sh->esw_drop_action = NULL; 62134fa7c02SOri Kam } 622e2b4925eSOri Kam #endif 623b41e47daSMoti Haimovsky if (sh->pop_vlan_action) { 624b41e47daSMoti Haimovsky mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 625b41e47daSMoti Haimovsky sh->pop_vlan_action = NULL; 626b41e47daSMoti Haimovsky } 62779e35d0dSViacheslav Ovsiienko pthread_mutex_destroy(&sh->dv_mutex); 628b2177648SViacheslav Ovsiienko #else 629b2177648SViacheslav Ovsiienko (void)priv; 630b2177648SViacheslav Ovsiienko #endif 631b2177648SViacheslav Ovsiienko } 632b2177648SViacheslav Ovsiienko 633b2177648SViacheslav Ovsiienko /** 6347be600c8SYongseok Koh * Initialize shared data between primary and secondary process. 6357be600c8SYongseok Koh * 6367be600c8SYongseok Koh * A memzone is reserved by primary process and secondary processes attach to 6377be600c8SYongseok Koh * the memzone. 6387be600c8SYongseok Koh * 6397be600c8SYongseok Koh * @return 6407be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 641974f1e7eSYongseok Koh */ 6427be600c8SYongseok Koh static int 6437be600c8SYongseok Koh mlx5_init_shared_data(void) 644974f1e7eSYongseok Koh { 645974f1e7eSYongseok Koh const struct rte_memzone *mz; 6467be600c8SYongseok Koh int ret = 0; 647974f1e7eSYongseok Koh 648974f1e7eSYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 649974f1e7eSYongseok Koh if (mlx5_shared_data == NULL) { 650974f1e7eSYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 651974f1e7eSYongseok Koh /* Allocate shared memory. */ 652974f1e7eSYongseok Koh mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 653974f1e7eSYongseok Koh sizeof(*mlx5_shared_data), 654974f1e7eSYongseok Koh SOCKET_ID_ANY, 0); 6557be600c8SYongseok Koh if (mz == NULL) { 6567be600c8SYongseok Koh DRV_LOG(ERR, 6577be600c8SYongseok Koh "Cannot allocate mlx5 shared data\n"); 6587be600c8SYongseok Koh ret = -rte_errno; 6597be600c8SYongseok Koh goto error; 6607be600c8SYongseok Koh } 6617be600c8SYongseok Koh mlx5_shared_data = mz->addr; 6627be600c8SYongseok Koh memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 6637be600c8SYongseok Koh rte_spinlock_init(&mlx5_shared_data->lock); 664974f1e7eSYongseok Koh } else { 665974f1e7eSYongseok Koh /* Lookup allocated shared memory. */ 666974f1e7eSYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 6677be600c8SYongseok Koh if (mz == NULL) { 6687be600c8SYongseok Koh DRV_LOG(ERR, 6697be600c8SYongseok Koh "Cannot attach mlx5 shared data\n"); 6707be600c8SYongseok Koh ret = -rte_errno; 6717be600c8SYongseok Koh goto error; 672974f1e7eSYongseok Koh } 673974f1e7eSYongseok Koh mlx5_shared_data = mz->addr; 6747be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 6753ebe6580SYongseok Koh } 676974f1e7eSYongseok Koh } 6777be600c8SYongseok Koh error: 6787be600c8SYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 6797be600c8SYongseok Koh return ret; 6807be600c8SYongseok Koh } 6817be600c8SYongseok Koh 6827be600c8SYongseok Koh /** 6834d803a72SOlga Shern * Retrieve integer value from environment variable. 6844d803a72SOlga Shern * 6854d803a72SOlga Shern * @param[in] name 6864d803a72SOlga Shern * Environment variable name. 6874d803a72SOlga Shern * 6884d803a72SOlga Shern * @return 6894d803a72SOlga Shern * Integer value, 0 if the variable is not set. 6904d803a72SOlga Shern */ 6914d803a72SOlga Shern int 6924d803a72SOlga Shern mlx5_getenv_int(const char *name) 6934d803a72SOlga Shern { 6944d803a72SOlga Shern const char *val = getenv(name); 6954d803a72SOlga Shern 6964d803a72SOlga Shern if (val == NULL) 6974d803a72SOlga Shern return 0; 6984d803a72SOlga Shern return atoi(val); 6994d803a72SOlga Shern } 7004d803a72SOlga Shern 7014d803a72SOlga Shern /** 7021e3a39f7SXueming Li * Verbs callback to allocate a memory. This function should allocate the space 7031e3a39f7SXueming Li * according to the size provided residing inside a huge page. 7041e3a39f7SXueming Li * Please note that all allocation must respect the alignment from libmlx5 7051e3a39f7SXueming Li * (i.e. currently sysconf(_SC_PAGESIZE)). 7061e3a39f7SXueming Li * 7071e3a39f7SXueming Li * @param[in] size 7081e3a39f7SXueming Li * The size in bytes of the memory to allocate. 7091e3a39f7SXueming Li * @param[in] data 7101e3a39f7SXueming Li * A pointer to the callback data. 7111e3a39f7SXueming Li * 7121e3a39f7SXueming Li * @return 713a6d83b6aSNélio Laranjeiro * Allocated buffer, NULL otherwise and rte_errno is set. 7141e3a39f7SXueming Li */ 7151e3a39f7SXueming Li static void * 7161e3a39f7SXueming Li mlx5_alloc_verbs_buf(size_t size, void *data) 7171e3a39f7SXueming Li { 718dbeba4cfSThomas Monjalon struct mlx5_priv *priv = data; 7191e3a39f7SXueming Li void *ret; 7201e3a39f7SXueming Li size_t alignment = sysconf(_SC_PAGESIZE); 721d10b09dbSOlivier Matz unsigned int socket = SOCKET_ID_ANY; 7221e3a39f7SXueming Li 723d10b09dbSOlivier Matz if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { 724d10b09dbSOlivier Matz const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 725d10b09dbSOlivier Matz 726d10b09dbSOlivier Matz socket = ctrl->socket; 727d10b09dbSOlivier Matz } else if (priv->verbs_alloc_ctx.type == 728d10b09dbSOlivier Matz MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { 729d10b09dbSOlivier Matz const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 730d10b09dbSOlivier Matz 731d10b09dbSOlivier Matz socket = ctrl->socket; 732d10b09dbSOlivier Matz } 7331e3a39f7SXueming Li assert(data != NULL); 734d10b09dbSOlivier Matz ret = rte_malloc_socket(__func__, size, alignment, socket); 735a6d83b6aSNélio Laranjeiro if (!ret && size) 736a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 7371e3a39f7SXueming Li return ret; 7381e3a39f7SXueming Li } 7391e3a39f7SXueming Li 7401e3a39f7SXueming Li /** 7411e3a39f7SXueming Li * Verbs callback to free a memory. 7421e3a39f7SXueming Li * 7431e3a39f7SXueming Li * @param[in] ptr 7441e3a39f7SXueming Li * A pointer to the memory to free. 7451e3a39f7SXueming Li * @param[in] data 7461e3a39f7SXueming Li * A pointer to the callback data. 7471e3a39f7SXueming Li */ 7481e3a39f7SXueming Li static void 7491e3a39f7SXueming Li mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 7501e3a39f7SXueming Li { 7511e3a39f7SXueming Li assert(data != NULL); 7521e3a39f7SXueming Li rte_free(ptr); 7531e3a39f7SXueming Li } 7541e3a39f7SXueming Li 7551e3a39f7SXueming Li /** 756c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 757c9ba7523SRaslan Darawsheh * 758c9ba7523SRaslan Darawsheh * @param[in] dev 759c9ba7523SRaslan Darawsheh * A pointer to eth_dev 760c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 761c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 762c9ba7523SRaslan Darawsheh * 763c9ba7523SRaslan Darawsheh * @return 764c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 765c9ba7523SRaslan Darawsheh */ 766c9ba7523SRaslan Darawsheh int 767c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 768c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 769c9ba7523SRaslan Darawsheh { 770c9ba7523SRaslan Darawsheh assert(udp_tunnel != NULL); 771c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && 772c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 773c9ba7523SRaslan Darawsheh return 0; 774c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && 775c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 776c9ba7523SRaslan Darawsheh return 0; 777c9ba7523SRaslan Darawsheh return -ENOTSUP; 778c9ba7523SRaslan Darawsheh } 779c9ba7523SRaslan Darawsheh 780c9ba7523SRaslan Darawsheh /** 781120dc4a7SYongseok Koh * Initialize process private data structure. 782120dc4a7SYongseok Koh * 783120dc4a7SYongseok Koh * @param dev 784120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 785120dc4a7SYongseok Koh * 786120dc4a7SYongseok Koh * @return 787120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 788120dc4a7SYongseok Koh */ 789120dc4a7SYongseok Koh int 790120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 791120dc4a7SYongseok Koh { 792120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 793120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 794120dc4a7SYongseok Koh size_t ppriv_size; 795120dc4a7SYongseok Koh 796120dc4a7SYongseok Koh /* 797120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 798120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 799120dc4a7SYongseok Koh */ 800120dc4a7SYongseok Koh ppriv_size = 801120dc4a7SYongseok Koh sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 802120dc4a7SYongseok Koh ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, 803120dc4a7SYongseok Koh RTE_CACHE_LINE_SIZE, dev->device->numa_node); 804120dc4a7SYongseok Koh if (!ppriv) { 805120dc4a7SYongseok Koh rte_errno = ENOMEM; 806120dc4a7SYongseok Koh return -rte_errno; 807120dc4a7SYongseok Koh } 808120dc4a7SYongseok Koh ppriv->uar_table_sz = ppriv_size; 809120dc4a7SYongseok Koh dev->process_private = ppriv; 810120dc4a7SYongseok Koh return 0; 811120dc4a7SYongseok Koh } 812120dc4a7SYongseok Koh 813120dc4a7SYongseok Koh /** 814120dc4a7SYongseok Koh * Un-initialize process private data structure. 815120dc4a7SYongseok Koh * 816120dc4a7SYongseok Koh * @param dev 817120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 818120dc4a7SYongseok Koh */ 819120dc4a7SYongseok Koh static void 820120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 821120dc4a7SYongseok Koh { 822120dc4a7SYongseok Koh if (!dev->process_private) 823120dc4a7SYongseok Koh return; 824120dc4a7SYongseok Koh rte_free(dev->process_private); 825120dc4a7SYongseok Koh dev->process_private = NULL; 826120dc4a7SYongseok Koh } 827120dc4a7SYongseok Koh 828120dc4a7SYongseok Koh /** 829771fa900SAdrien Mazarguil * DPDK callback to close the device. 830771fa900SAdrien Mazarguil * 831771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 832771fa900SAdrien Mazarguil * 833771fa900SAdrien Mazarguil * @param dev 834771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 835771fa900SAdrien Mazarguil */ 836771fa900SAdrien Mazarguil static void 837771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 838771fa900SAdrien Mazarguil { 839dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 8402e22920bSAdrien Mazarguil unsigned int i; 8416af6b973SNélio Laranjeiro int ret; 842771fa900SAdrien Mazarguil 843a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 8440f99970bSNélio Laranjeiro dev->data->port_id, 845f048f3d4SViacheslav Ovsiienko ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : "")); 846ecc1c29dSAdrien Mazarguil /* In case mlx5_dev_stop() has not been called. */ 847af4f09f2SNélio Laranjeiro mlx5_dev_interrupt_handler_uninstall(dev); 848af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 849af689f1fSNelio Laranjeiro mlx5_flow_flush(dev, NULL); 8502e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 8512e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 8522e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 8532aac5b5dSYongseok Koh rte_wmb(); 8542aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 8552aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 8562e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 8572e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 8582e22920bSAdrien Mazarguil usleep(1000); 859a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 860af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 8612e22920bSAdrien Mazarguil priv->rxqs_n = 0; 8622e22920bSAdrien Mazarguil priv->rxqs = NULL; 8632e22920bSAdrien Mazarguil } 8642e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 8652e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 8662e22920bSAdrien Mazarguil usleep(1000); 8676e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 868af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 8692e22920bSAdrien Mazarguil priv->txqs_n = 0; 8702e22920bSAdrien Mazarguil priv->txqs = NULL; 8712e22920bSAdrien Mazarguil } 872120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 8737d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 874b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 87529c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 87629c1d8bbSNélio Laranjeiro rte_free(priv->rss_conf.rss_key); 877634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 878634efbc2SNelio Laranjeiro rte_free(priv->reta_idx); 879ccdcba53SNélio Laranjeiro if (priv->config.vf) 880ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_flush(dev); 88126c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 88226c08b97SAdrien Mazarguil close(priv->nl_socket_route); 88326c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 88426c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 885dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 886dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 887942d13e6SViacheslav Ovsiienko if (priv->sh) { 888942d13e6SViacheslav Ovsiienko /* 889942d13e6SViacheslav Ovsiienko * Free the shared context in last turn, because the cleanup 890942d13e6SViacheslav Ovsiienko * routines above may use some shared fields, like 891942d13e6SViacheslav Ovsiienko * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing 892942d13e6SViacheslav Ovsiienko * ifindex if Netlink fails. 893942d13e6SViacheslav Ovsiienko */ 894942d13e6SViacheslav Ovsiienko mlx5_free_shared_ibctx(priv->sh); 895942d13e6SViacheslav Ovsiienko priv->sh = NULL; 896942d13e6SViacheslav Ovsiienko } 89723820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 898f5479b68SNélio Laranjeiro if (ret) 899a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 9000f99970bSNélio Laranjeiro dev->data->port_id); 90115c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 9024c7a0f5fSNélio Laranjeiro if (ret) 903a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 9040f99970bSNélio Laranjeiro dev->data->port_id); 90593403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 90609cb5b58SNélio Laranjeiro if (ret) 90793403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 9080f99970bSNélio Laranjeiro dev->data->port_id); 909af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 910a1366b1aSNélio Laranjeiro if (ret) 911a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 9120f99970bSNélio Laranjeiro dev->data->port_id); 913af4f09f2SNélio Laranjeiro ret = mlx5_txq_ibv_verify(dev); 914faf2667fSNélio Laranjeiro if (ret) 915a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 9160f99970bSNélio Laranjeiro dev->data->port_id); 917af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 9186e78005aSNélio Laranjeiro if (ret) 919a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 9200f99970bSNélio Laranjeiro dev->data->port_id); 921af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 9226af6b973SNélio Laranjeiro if (ret) 923a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 924a170a30dSNélio Laranjeiro dev->data->port_id); 9252b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 9262b730263SAdrien Mazarguil unsigned int c = 0; 927d874a4eeSThomas Monjalon uint16_t port_id; 9282b730263SAdrien Mazarguil 929d874a4eeSThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) { 930dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 931d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 9322b730263SAdrien Mazarguil 9332b730263SAdrien Mazarguil if (!opriv || 9342b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 935d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 9362b730263SAdrien Mazarguil continue; 9372b730263SAdrien Mazarguil ++c; 9382b730263SAdrien Mazarguil } 9392b730263SAdrien Mazarguil if (!c) 9402b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 9412b730263SAdrien Mazarguil } 942771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 9432b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 94442603bbdSOphir Munk /* 94542603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 94642603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 94742603bbdSOphir Munk * it is freed when dev_private is freed. 94842603bbdSOphir Munk */ 94942603bbdSOphir Munk dev->data->mac_addrs = NULL; 950771fa900SAdrien Mazarguil } 951771fa900SAdrien Mazarguil 9520887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops = { 953e60fbd5bSAdrien Mazarguil .dev_configure = mlx5_dev_configure, 954e60fbd5bSAdrien Mazarguil .dev_start = mlx5_dev_start, 955e60fbd5bSAdrien Mazarguil .dev_stop = mlx5_dev_stop, 95662072098SOr Ami .dev_set_link_down = mlx5_set_link_down, 95762072098SOr Ami .dev_set_link_up = mlx5_set_link_up, 958771fa900SAdrien Mazarguil .dev_close = mlx5_dev_close, 9591bdbe1afSAdrien Mazarguil .promiscuous_enable = mlx5_promiscuous_enable, 9601bdbe1afSAdrien Mazarguil .promiscuous_disable = mlx5_promiscuous_disable, 9611bdbe1afSAdrien Mazarguil .allmulticast_enable = mlx5_allmulticast_enable, 9621bdbe1afSAdrien Mazarguil .allmulticast_disable = mlx5_allmulticast_disable, 963cb8faed7SAdrien Mazarguil .link_update = mlx5_link_update, 96487011737SAdrien Mazarguil .stats_get = mlx5_stats_get, 96587011737SAdrien Mazarguil .stats_reset = mlx5_stats_reset, 966a4193ae3SShahaf Shuler .xstats_get = mlx5_xstats_get, 967a4193ae3SShahaf Shuler .xstats_reset = mlx5_xstats_reset, 968a4193ae3SShahaf Shuler .xstats_get_names = mlx5_xstats_get_names, 969714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 970e60fbd5bSAdrien Mazarguil .dev_infos_get = mlx5_dev_infos_get, 971e571ad55STom Barbette .read_clock = mlx5_read_clock, 97278a38edfSJianfeng Tan .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 973e9086978SAdrien Mazarguil .vlan_filter_set = mlx5_vlan_filter_set, 9742e22920bSAdrien Mazarguil .rx_queue_setup = mlx5_rx_queue_setup, 9752e22920bSAdrien Mazarguil .tx_queue_setup = mlx5_tx_queue_setup, 9762e22920bSAdrien Mazarguil .rx_queue_release = mlx5_rx_queue_release, 9772e22920bSAdrien Mazarguil .tx_queue_release = mlx5_tx_queue_release, 97802d75430SAdrien Mazarguil .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 97902d75430SAdrien Mazarguil .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 9803318aef7SAdrien Mazarguil .mac_addr_remove = mlx5_mac_addr_remove, 9813318aef7SAdrien Mazarguil .mac_addr_add = mlx5_mac_addr_add, 98286977fccSDavid Marchand .mac_addr_set = mlx5_mac_addr_set, 983e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 984cf37ca95SAdrien Mazarguil .mtu_set = mlx5_dev_set_mtu, 985f3db9489SYaacov Hazan .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 986f3db9489SYaacov Hazan .vlan_offload_set = mlx5_vlan_offload_set, 987634efbc2SNelio Laranjeiro .reta_update = mlx5_dev_rss_reta_update, 988634efbc2SNelio Laranjeiro .reta_query = mlx5_dev_rss_reta_query, 9892f97422eSNelio Laranjeiro .rss_hash_update = mlx5_rss_hash_update, 9902f97422eSNelio Laranjeiro .rss_hash_conf_get = mlx5_rss_hash_conf_get, 99176f5c99eSYaacov Hazan .filter_ctrl = mlx5_dev_filter_ctrl, 9928788fec1SOlivier Matz .rx_descriptor_status = mlx5_rx_descriptor_status, 9938788fec1SOlivier Matz .tx_descriptor_status = mlx5_tx_descriptor_status, 99426f04883STom Barbette .rx_queue_count = mlx5_rx_queue_count, 9953c7d44afSShahaf Shuler .rx_queue_intr_enable = mlx5_rx_intr_enable, 9963c7d44afSShahaf Shuler .rx_queue_intr_disable = mlx5_rx_intr_disable, 997d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 998c9ba7523SRaslan Darawsheh .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, 9998a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 10008a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 1001771fa900SAdrien Mazarguil }; 1002771fa900SAdrien Mazarguil 1003714bf46eSThomas Monjalon /* Available operations from secondary process. */ 100487ec44ceSXueming Li static const struct eth_dev_ops mlx5_dev_sec_ops = { 100587ec44ceSXueming Li .stats_get = mlx5_stats_get, 100687ec44ceSXueming Li .stats_reset = mlx5_stats_reset, 100787ec44ceSXueming Li .xstats_get = mlx5_xstats_get, 100887ec44ceSXueming Li .xstats_reset = mlx5_xstats_reset, 100987ec44ceSXueming Li .xstats_get_names = mlx5_xstats_get_names, 1010714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 101187ec44ceSXueming Li .dev_infos_get = mlx5_dev_infos_get, 101287ec44ceSXueming Li .rx_descriptor_status = mlx5_rx_descriptor_status, 101387ec44ceSXueming Li .tx_descriptor_status = mlx5_tx_descriptor_status, 10148a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 10158a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 101687ec44ceSXueming Li }; 101787ec44ceSXueming Li 1018714bf46eSThomas Monjalon /* Available operations in flow isolated mode. */ 10190887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops_isolate = { 10200887aa7fSNélio Laranjeiro .dev_configure = mlx5_dev_configure, 10210887aa7fSNélio Laranjeiro .dev_start = mlx5_dev_start, 10220887aa7fSNélio Laranjeiro .dev_stop = mlx5_dev_stop, 10230887aa7fSNélio Laranjeiro .dev_set_link_down = mlx5_set_link_down, 10240887aa7fSNélio Laranjeiro .dev_set_link_up = mlx5_set_link_up, 10250887aa7fSNélio Laranjeiro .dev_close = mlx5_dev_close, 102624b068adSYongseok Koh .promiscuous_enable = mlx5_promiscuous_enable, 102724b068adSYongseok Koh .promiscuous_disable = mlx5_promiscuous_disable, 10282547ee74SYongseok Koh .allmulticast_enable = mlx5_allmulticast_enable, 10292547ee74SYongseok Koh .allmulticast_disable = mlx5_allmulticast_disable, 10300887aa7fSNélio Laranjeiro .link_update = mlx5_link_update, 10310887aa7fSNélio Laranjeiro .stats_get = mlx5_stats_get, 10320887aa7fSNélio Laranjeiro .stats_reset = mlx5_stats_reset, 10330887aa7fSNélio Laranjeiro .xstats_get = mlx5_xstats_get, 10340887aa7fSNélio Laranjeiro .xstats_reset = mlx5_xstats_reset, 10350887aa7fSNélio Laranjeiro .xstats_get_names = mlx5_xstats_get_names, 1036714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 10370887aa7fSNélio Laranjeiro .dev_infos_get = mlx5_dev_infos_get, 10380887aa7fSNélio Laranjeiro .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 10390887aa7fSNélio Laranjeiro .vlan_filter_set = mlx5_vlan_filter_set, 10400887aa7fSNélio Laranjeiro .rx_queue_setup = mlx5_rx_queue_setup, 10410887aa7fSNélio Laranjeiro .tx_queue_setup = mlx5_tx_queue_setup, 10420887aa7fSNélio Laranjeiro .rx_queue_release = mlx5_rx_queue_release, 10430887aa7fSNélio Laranjeiro .tx_queue_release = mlx5_tx_queue_release, 10440887aa7fSNélio Laranjeiro .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 10450887aa7fSNélio Laranjeiro .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 10460887aa7fSNélio Laranjeiro .mac_addr_remove = mlx5_mac_addr_remove, 10470887aa7fSNélio Laranjeiro .mac_addr_add = mlx5_mac_addr_add, 10480887aa7fSNélio Laranjeiro .mac_addr_set = mlx5_mac_addr_set, 1049e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 10500887aa7fSNélio Laranjeiro .mtu_set = mlx5_dev_set_mtu, 10510887aa7fSNélio Laranjeiro .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 10520887aa7fSNélio Laranjeiro .vlan_offload_set = mlx5_vlan_offload_set, 10530887aa7fSNélio Laranjeiro .filter_ctrl = mlx5_dev_filter_ctrl, 10540887aa7fSNélio Laranjeiro .rx_descriptor_status = mlx5_rx_descriptor_status, 10550887aa7fSNélio Laranjeiro .tx_descriptor_status = mlx5_tx_descriptor_status, 10560887aa7fSNélio Laranjeiro .rx_queue_intr_enable = mlx5_rx_intr_enable, 10570887aa7fSNélio Laranjeiro .rx_queue_intr_disable = mlx5_rx_intr_disable, 1058d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 10598a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 10608a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 10610887aa7fSNélio Laranjeiro }; 10620887aa7fSNélio Laranjeiro 1063e72dd09bSNélio Laranjeiro /** 1064e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 1065e72dd09bSNélio Laranjeiro * 1066e72dd09bSNélio Laranjeiro * @param[in] key 1067e72dd09bSNélio Laranjeiro * Key argument to verify. 1068e72dd09bSNélio Laranjeiro * @param[in] val 1069e72dd09bSNélio Laranjeiro * Value associated with key. 1070e72dd09bSNélio Laranjeiro * @param opaque 1071e72dd09bSNélio Laranjeiro * User data. 1072e72dd09bSNélio Laranjeiro * 1073e72dd09bSNélio Laranjeiro * @return 1074a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1075e72dd09bSNélio Laranjeiro */ 1076e72dd09bSNélio Laranjeiro static int 1077e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 1078e72dd09bSNélio Laranjeiro { 10797fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 108099c12dccSNélio Laranjeiro unsigned long tmp; 1081e72dd09bSNélio Laranjeiro 10826de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 10836de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 10846de569f5SAdrien Mazarguil return 0; 108599c12dccSNélio Laranjeiro errno = 0; 108699c12dccSNélio Laranjeiro tmp = strtoul(val, NULL, 0); 108799c12dccSNélio Laranjeiro if (errno) { 1088a6d83b6aSNélio Laranjeiro rte_errno = errno; 1089a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1090a6d83b6aSNélio Laranjeiro return -rte_errno; 109199c12dccSNélio Laranjeiro } 109299c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 10937fe24446SShahaf Shuler config->cqe_comp = !!tmp; 1094bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 1095bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 109678c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 109778c7a16dSYongseok Koh config->hw_padding = !!tmp; 10987d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 10997d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 11007d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 11017d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 11027d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 11037d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 11047d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 11057d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 11062a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1107505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1108505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1109505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1110505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1111505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1112505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1113505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1114505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1115505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 11162a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 11177fe24446SShahaf Shuler config->txqs_inline = tmp; 111809d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1119a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1120230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1121f9de8718SShahaf Shuler config->mps = !!tmp; 11226ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1123a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 11246ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1125505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1126505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1127505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 11285644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1129a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 11305644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 11317fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 113278a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 113378a54648SXueming Li config->l3_vxlan_en = !!tmp; 1134db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1135db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1136e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1137e2b4925eSOri Kam config->dv_esw_en = !!tmp; 113851e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 113951e72d38SOri Kam config->dv_flow_en = !!tmp; 1140dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1141dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 1142066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1143066cfecdSMatan Azrad config->max_dump_files_num = tmp; 114421bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 114521bb6c7eSDekel Peled config->lro.timeout = tmp; 114699c12dccSNélio Laranjeiro } else { 1147a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 1148a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1149a6d83b6aSNélio Laranjeiro return -rte_errno; 1150e72dd09bSNélio Laranjeiro } 115199c12dccSNélio Laranjeiro return 0; 115299c12dccSNélio Laranjeiro } 1153e72dd09bSNélio Laranjeiro 1154e72dd09bSNélio Laranjeiro /** 1155e72dd09bSNélio Laranjeiro * Parse device parameters. 1156e72dd09bSNélio Laranjeiro * 11577fe24446SShahaf Shuler * @param config 11587fe24446SShahaf Shuler * Pointer to device configuration structure. 1159e72dd09bSNélio Laranjeiro * @param devargs 1160e72dd09bSNélio Laranjeiro * Device arguments structure. 1161e72dd09bSNélio Laranjeiro * 1162e72dd09bSNélio Laranjeiro * @return 1163a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1164e72dd09bSNélio Laranjeiro */ 1165e72dd09bSNélio Laranjeiro static int 11667fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1167e72dd09bSNélio Laranjeiro { 1168e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 116999c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 1170bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 117178c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 11727d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 11737d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 11747d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 11757d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 11762a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 1177505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MIN, 1178505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MAX, 1179505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MPW, 11802a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 118109d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 1182230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 11836ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 11846ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 11855644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 11865644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 118778a54648SXueming Li MLX5_L3_VXLAN_EN, 1188db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 1189e2b4925eSOri Kam MLX5_DV_ESW_EN, 119051e72d38SOri Kam MLX5_DV_FLOW_EN, 1191dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 11926de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 1193066cfecdSMatan Azrad MLX5_MAX_DUMP_FILES_NUM, 119421bb6c7eSDekel Peled MLX5_LRO_TIMEOUT_USEC, 1195e72dd09bSNélio Laranjeiro NULL, 1196e72dd09bSNélio Laranjeiro }; 1197e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 1198e72dd09bSNélio Laranjeiro int ret = 0; 1199e72dd09bSNélio Laranjeiro int i; 1200e72dd09bSNélio Laranjeiro 1201e72dd09bSNélio Laranjeiro if (devargs == NULL) 1202e72dd09bSNélio Laranjeiro return 0; 1203e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 1204e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 120515b0ea00SMatan Azrad if (kvlist == NULL) { 120615b0ea00SMatan Azrad rte_errno = EINVAL; 120715b0ea00SMatan Azrad return -rte_errno; 120815b0ea00SMatan Azrad } 1209e72dd09bSNélio Laranjeiro /* Process parameters. */ 1210e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 1211e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 1212e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 12137fe24446SShahaf Shuler mlx5_args_check, config); 1214a6d83b6aSNélio Laranjeiro if (ret) { 1215a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1216a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 1217a6d83b6aSNélio Laranjeiro return -rte_errno; 1218e72dd09bSNélio Laranjeiro } 1219e72dd09bSNélio Laranjeiro } 1220a67323e4SShahaf Shuler } 1221e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 1222e72dd09bSNélio Laranjeiro return 0; 1223e72dd09bSNélio Laranjeiro } 1224e72dd09bSNélio Laranjeiro 1225fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver; 1226771fa900SAdrien Mazarguil 12277be600c8SYongseok Koh /** 12287be600c8SYongseok Koh * PMD global initialization. 12297be600c8SYongseok Koh * 12307be600c8SYongseok Koh * Independent from individual device, this function initializes global 12317be600c8SYongseok Koh * per-PMD data structures distinguishing primary and secondary processes. 12327be600c8SYongseok Koh * Hence, each initialization is called once per a process. 12337be600c8SYongseok Koh * 12347be600c8SYongseok Koh * @return 12357be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 12367be600c8SYongseok Koh */ 12377be600c8SYongseok Koh static int 12387be600c8SYongseok Koh mlx5_init_once(void) 12397be600c8SYongseok Koh { 12407be600c8SYongseok Koh struct mlx5_shared_data *sd; 12417be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 1242edf73dd3SAnatoly Burakov int ret = 0; 12437be600c8SYongseok Koh 12447be600c8SYongseok Koh if (mlx5_init_shared_data()) 12457be600c8SYongseok Koh return -rte_errno; 12467be600c8SYongseok Koh sd = mlx5_shared_data; 12477be600c8SYongseok Koh assert(sd); 12487be600c8SYongseok Koh rte_spinlock_lock(&sd->lock); 12497be600c8SYongseok Koh switch (rte_eal_process_type()) { 12507be600c8SYongseok Koh case RTE_PROC_PRIMARY: 12517be600c8SYongseok Koh if (sd->init_done) 12527be600c8SYongseok Koh break; 12537be600c8SYongseok Koh LIST_INIT(&sd->mem_event_cb_list); 12547be600c8SYongseok Koh rte_rwlock_init(&sd->mem_event_rwlock); 12557be600c8SYongseok Koh rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 12567be600c8SYongseok Koh mlx5_mr_mem_event_cb, NULL); 1257edf73dd3SAnatoly Burakov ret = mlx5_mp_init_primary(); 1258edf73dd3SAnatoly Burakov if (ret) 1259edf73dd3SAnatoly Burakov goto out; 12607be600c8SYongseok Koh sd->init_done = true; 12617be600c8SYongseok Koh break; 12627be600c8SYongseok Koh case RTE_PROC_SECONDARY: 12637be600c8SYongseok Koh if (ld->init_done) 12647be600c8SYongseok Koh break; 1265edf73dd3SAnatoly Burakov ret = mlx5_mp_init_secondary(); 1266edf73dd3SAnatoly Burakov if (ret) 1267edf73dd3SAnatoly Burakov goto out; 12687be600c8SYongseok Koh ++sd->secondary_cnt; 12697be600c8SYongseok Koh ld->init_done = true; 12707be600c8SYongseok Koh break; 12717be600c8SYongseok Koh default: 12727be600c8SYongseok Koh break; 12737be600c8SYongseok Koh } 1274edf73dd3SAnatoly Burakov out: 12757be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 1276edf73dd3SAnatoly Burakov return ret; 12777be600c8SYongseok Koh } 12787be600c8SYongseok Koh 12797be600c8SYongseok Koh /** 128038b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 128138b4b397SViacheslav Ovsiienko * while sending packets. 128238b4b397SViacheslav Ovsiienko * 128338b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 128438b4b397SViacheslav Ovsiienko * key is specified in devargs 128538b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 128638b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 128738b4b397SViacheslav Ovsiienko * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX 128838b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 128938b4b397SViacheslav Ovsiienko * 129038b4b397SViacheslav Ovsiienko * @param spawn 129138b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 129238b4b397SViacheslav Ovsiienko * @param config 129338b4b397SViacheslav Ovsiienko * Device configuration parameters. 129438b4b397SViacheslav Ovsiienko */ 129538b4b397SViacheslav Ovsiienko static void 129638b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 129738b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 129838b4b397SViacheslav Ovsiienko { 129938b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 130038b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 130138b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 130238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 130338b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 130438b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 130538b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 130638b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 130738b4b397SViacheslav Ovsiienko "txq_inline_mix aligned to minimal" 130838b4b397SViacheslav Ovsiienko " ConnectX-4 required value %d", 130938b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 131038b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 131138b4b397SViacheslav Ovsiienko } 131238b4b397SViacheslav Ovsiienko break; 131338b4b397SViacheslav Ovsiienko } 131438b4b397SViacheslav Ovsiienko goto exit; 131538b4b397SViacheslav Ovsiienko } 131638b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 131738b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 131838b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 131938b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 132038b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 132138b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 132238b4b397SViacheslav Ovsiienko goto exit; 132338b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 132438b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 132538b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 132638b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 132738b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 132838b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 132938b4b397SViacheslav Ovsiienko goto exit; 133038b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 133138b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 133238b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 133338b4b397SViacheslav Ovsiienko break; 133438b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 133538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 133638b4b397SViacheslav Ovsiienko config->txq_inline_min = 133738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 133838b4b397SViacheslav Ovsiienko goto exit; 133938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 134038b4b397SViacheslav Ovsiienko config->txq_inline_min = 134138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 134238b4b397SViacheslav Ovsiienko goto exit; 134338b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 134438b4b397SViacheslav Ovsiienko config->txq_inline_min = 134538b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 134638b4b397SViacheslav Ovsiienko goto exit; 134738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 134838b4b397SViacheslav Ovsiienko config->txq_inline_min = 134938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 135038b4b397SViacheslav Ovsiienko goto exit; 135138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 135238b4b397SViacheslav Ovsiienko config->txq_inline_min = 135338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 135438b4b397SViacheslav Ovsiienko goto exit; 135538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 135638b4b397SViacheslav Ovsiienko config->txq_inline_min = 135738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 135838b4b397SViacheslav Ovsiienko goto exit; 135938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 136038b4b397SViacheslav Ovsiienko config->txq_inline_min = 136138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 136238b4b397SViacheslav Ovsiienko goto exit; 136338b4b397SViacheslav Ovsiienko } 136438b4b397SViacheslav Ovsiienko } 136538b4b397SViacheslav Ovsiienko } 136638b4b397SViacheslav Ovsiienko /* 136738b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 136838b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 136938b4b397SViacheslav Ovsiienko * to determine old NICs. 137038b4b397SViacheslav Ovsiienko */ 137138b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 137238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 137338b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 137438b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 137538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1376614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 137738b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 137838b4b397SViacheslav Ovsiienko break; 137938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 138038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 138138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 138238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 138338b4b397SViacheslav Ovsiienko /* 138438b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 138538b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 138638b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 138738b4b397SViacheslav Ovsiienko */ 138838b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 138920215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 139038b4b397SViacheslav Ovsiienko break; 139138b4b397SViacheslav Ovsiienko default: 139238b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 139338b4b397SViacheslav Ovsiienko break; 139438b4b397SViacheslav Ovsiienko } 139538b4b397SViacheslav Ovsiienko exit: 139638b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 139738b4b397SViacheslav Ovsiienko } 139838b4b397SViacheslav Ovsiienko 139938b4b397SViacheslav Ovsiienko /** 140021cae858SDekel Peled * Allocate page of door-bells and register it using DevX API. 140121cae858SDekel Peled * 140221cae858SDekel Peled * @param [in] dev 140321cae858SDekel Peled * Pointer to Ethernet device. 140421cae858SDekel Peled * 140521cae858SDekel Peled * @return 140621cae858SDekel Peled * Pointer to new page on success, NULL otherwise. 140721cae858SDekel Peled */ 140821cae858SDekel Peled static struct mlx5_devx_dbr_page * 140921cae858SDekel Peled mlx5_alloc_dbr_page(struct rte_eth_dev *dev) 141021cae858SDekel Peled { 141121cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 141221cae858SDekel Peled struct mlx5_devx_dbr_page *page; 141321cae858SDekel Peled 141421cae858SDekel Peled /* Allocate space for door-bell page and management data. */ 141521cae858SDekel Peled page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page), 141621cae858SDekel Peled RTE_CACHE_LINE_SIZE, dev->device->numa_node); 141721cae858SDekel Peled if (!page) { 141821cae858SDekel Peled DRV_LOG(ERR, "port %u cannot allocate dbr page", 141921cae858SDekel Peled dev->data->port_id); 142021cae858SDekel Peled return NULL; 142121cae858SDekel Peled } 142221cae858SDekel Peled /* Register allocated memory. */ 142321cae858SDekel Peled page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs, 142421cae858SDekel Peled MLX5_DBR_PAGE_SIZE, 0); 142521cae858SDekel Peled if (!page->umem) { 142621cae858SDekel Peled DRV_LOG(ERR, "port %u cannot umem reg dbr page", 142721cae858SDekel Peled dev->data->port_id); 142821cae858SDekel Peled rte_free(page); 142921cae858SDekel Peled return NULL; 143021cae858SDekel Peled } 143121cae858SDekel Peled return page; 143221cae858SDekel Peled } 143321cae858SDekel Peled 143421cae858SDekel Peled /** 143521cae858SDekel Peled * Find the next available door-bell, allocate new page if needed. 143621cae858SDekel Peled * 143721cae858SDekel Peled * @param [in] dev 143821cae858SDekel Peled * Pointer to Ethernet device. 143921cae858SDekel Peled * @param [out] dbr_page 144021cae858SDekel Peled * Door-bell page containing the page data. 144121cae858SDekel Peled * 144221cae858SDekel Peled * @return 144321cae858SDekel Peled * Door-bell address offset on success, a negative error value otherwise. 144421cae858SDekel Peled */ 144521cae858SDekel Peled int64_t 144621cae858SDekel Peled mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page) 144721cae858SDekel Peled { 144821cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 144921cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 145021cae858SDekel Peled uint32_t i, j; 145121cae858SDekel Peled 145221cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 145321cae858SDekel Peled if (page->dbr_count < MLX5_DBR_PER_PAGE) 145421cae858SDekel Peled break; 145521cae858SDekel Peled if (!page) { /* No page with free door-bell exists. */ 145621cae858SDekel Peled page = mlx5_alloc_dbr_page(dev); 145721cae858SDekel Peled if (!page) /* Failed to allocate new page. */ 145821cae858SDekel Peled return (-1); 145921cae858SDekel Peled LIST_INSERT_HEAD(&priv->dbrpgs, page, next); 146021cae858SDekel Peled } 146121cae858SDekel Peled /* Loop to find bitmap part with clear bit. */ 146221cae858SDekel Peled for (i = 0; 146321cae858SDekel Peled i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX; 146421cae858SDekel Peled i++) 146521cae858SDekel Peled ; /* Empty. */ 146621cae858SDekel Peled /* Find the first clear bit. */ 146721cae858SDekel Peled j = rte_bsf64(~page->dbr_bitmap[i]); 146821cae858SDekel Peled assert(i < (MLX5_DBR_PER_PAGE / 64)); 146921cae858SDekel Peled page->dbr_bitmap[i] |= (1 << j); 147021cae858SDekel Peled page->dbr_count++; 147121cae858SDekel Peled *dbr_page = page; 147221cae858SDekel Peled return (((i * 64) + j) * sizeof(uint64_t)); 147321cae858SDekel Peled } 147421cae858SDekel Peled 147521cae858SDekel Peled /** 147621cae858SDekel Peled * Release a door-bell record. 147721cae858SDekel Peled * 147821cae858SDekel Peled * @param [in] dev 147921cae858SDekel Peled * Pointer to Ethernet device. 148021cae858SDekel Peled * @param [in] umem_id 148121cae858SDekel Peled * UMEM ID of page containing the door-bell record to release. 148221cae858SDekel Peled * @param [in] offset 148321cae858SDekel Peled * Offset of door-bell record in page. 148421cae858SDekel Peled * 148521cae858SDekel Peled * @return 148621cae858SDekel Peled * 0 on success, a negative error value otherwise. 148721cae858SDekel Peled */ 148821cae858SDekel Peled int32_t 148921cae858SDekel Peled mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset) 149021cae858SDekel Peled { 149121cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 149221cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 149321cae858SDekel Peled int ret = 0; 149421cae858SDekel Peled 149521cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 149621cae858SDekel Peled /* Find the page this address belongs to. */ 149721cae858SDekel Peled if (page->umem->umem_id == umem_id) 149821cae858SDekel Peled break; 149921cae858SDekel Peled if (!page) 150021cae858SDekel Peled return -EINVAL; 150121cae858SDekel Peled page->dbr_count--; 150221cae858SDekel Peled if (!page->dbr_count) { 150321cae858SDekel Peled /* Page not used, free it and remove from list. */ 150421cae858SDekel Peled LIST_REMOVE(page, next); 150521cae858SDekel Peled if (page->umem) 150621cae858SDekel Peled ret = -mlx5_glue->devx_umem_dereg(page->umem); 150721cae858SDekel Peled rte_free(page); 150821cae858SDekel Peled } else { 150921cae858SDekel Peled /* Mark in bitmap that this door-bell is not in use. */ 1510a88209b0SDekel Peled offset /= MLX5_DBR_SIZE; 151121cae858SDekel Peled int i = offset / 64; 151221cae858SDekel Peled int j = offset % 64; 151321cae858SDekel Peled 151421cae858SDekel Peled page->dbr_bitmap[i] &= ~(1 << j); 151521cae858SDekel Peled } 151621cae858SDekel Peled return ret; 151721cae858SDekel Peled } 151821cae858SDekel Peled 151921cae858SDekel Peled /** 1520f38c5457SAdrien Mazarguil * Spawn an Ethernet device from Verbs information. 1521771fa900SAdrien Mazarguil * 1522f38c5457SAdrien Mazarguil * @param dpdk_dev 1523f38c5457SAdrien Mazarguil * Backing DPDK device. 1524ad74bc61SViacheslav Ovsiienko * @param spawn 1525ad74bc61SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 1526f87bfa8eSYongseok Koh * @param config 1527f87bfa8eSYongseok Koh * Device configuration parameters. 1528771fa900SAdrien Mazarguil * 1529771fa900SAdrien Mazarguil * @return 1530f38c5457SAdrien Mazarguil * A valid Ethernet device object on success, NULL otherwise and rte_errno 1531206254b7SOphir Munk * is set. The following errors are defined: 15326de569f5SAdrien Mazarguil * 15336de569f5SAdrien Mazarguil * EBUSY: device is not supposed to be spawned. 1534206254b7SOphir Munk * EEXIST: device is already spawned 1535771fa900SAdrien Mazarguil */ 1536f38c5457SAdrien Mazarguil static struct rte_eth_dev * 1537f38c5457SAdrien Mazarguil mlx5_dev_spawn(struct rte_device *dpdk_dev, 1538ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data *spawn, 1539ad74bc61SViacheslav Ovsiienko struct mlx5_dev_config config) 1540771fa900SAdrien Mazarguil { 1541ad74bc61SViacheslav Ovsiienko const struct mlx5_switch_info *switch_info = &spawn->info; 154217e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = NULL; 154368128934SAdrien Mazarguil struct ibv_port_attr port_attr; 15446057a10bSAdrien Mazarguil struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 15459083982cSAdrien Mazarguil struct rte_eth_dev *eth_dev = NULL; 1546dbeba4cfSThomas Monjalon struct mlx5_priv *priv = NULL; 1547771fa900SAdrien Mazarguil int err = 0; 154878c7a16dSYongseok Koh unsigned int hw_padding = 0; 1549e192ef80SYaacov Hazan unsigned int mps; 1550523f5a74SYongseok Koh unsigned int cqe_comp; 1551bc91e8dbSYongseok Koh unsigned int cqe_pad = 0; 1552772d3435SXueming Li unsigned int tunnel_en = 0; 15531f106da2SMatan Azrad unsigned int mpls_en = 0; 15545f8ba81cSXueming Li unsigned int swp = 0; 15557d6bf6b8SYongseok Koh unsigned int mprq = 0; 15567d6bf6b8SYongseok Koh unsigned int mprq_min_stride_size_n = 0; 15577d6bf6b8SYongseok Koh unsigned int mprq_max_stride_size_n = 0; 15587d6bf6b8SYongseok Koh unsigned int mprq_min_stride_num_n = 0; 15597d6bf6b8SYongseok Koh unsigned int mprq_max_stride_num_n = 0; 15606d13ea8eSOlivier Matz struct rte_ether_addr mac; 156168128934SAdrien Mazarguil char name[RTE_ETH_NAME_MAX_LEN]; 15622b730263SAdrien Mazarguil int own_domain_id = 0; 1563206254b7SOphir Munk uint16_t port_id; 15642b730263SAdrien Mazarguil unsigned int i; 1565*d5c06b1bSViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR_DEVX_PORT 1566*d5c06b1bSViacheslav Ovsiienko struct mlx5dv_devx_port devx_port; 1567*d5c06b1bSViacheslav Ovsiienko #endif 1568771fa900SAdrien Mazarguil 15696de569f5SAdrien Mazarguil /* Determine if this port representor is supposed to be spawned. */ 15706de569f5SAdrien Mazarguil if (switch_info->representor && dpdk_dev->devargs) { 15716de569f5SAdrien Mazarguil struct rte_eth_devargs eth_da; 15726de569f5SAdrien Mazarguil 15736de569f5SAdrien Mazarguil err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); 15746de569f5SAdrien Mazarguil if (err) { 15756de569f5SAdrien Mazarguil rte_errno = -err; 15766de569f5SAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 15776de569f5SAdrien Mazarguil strerror(rte_errno)); 15786de569f5SAdrien Mazarguil return NULL; 15796de569f5SAdrien Mazarguil } 15806de569f5SAdrien Mazarguil for (i = 0; i < eth_da.nb_representor_ports; ++i) 15816de569f5SAdrien Mazarguil if (eth_da.representor_ports[i] == 15826de569f5SAdrien Mazarguil (uint16_t)switch_info->port_name) 15836de569f5SAdrien Mazarguil break; 15846de569f5SAdrien Mazarguil if (i == eth_da.nb_representor_ports) { 15856de569f5SAdrien Mazarguil rte_errno = EBUSY; 15866de569f5SAdrien Mazarguil return NULL; 15876de569f5SAdrien Mazarguil } 15886de569f5SAdrien Mazarguil } 1589206254b7SOphir Munk /* Build device name. */ 159010dadfcbSViacheslav Ovsiienko if (spawn->pf_bond < 0) { 159110dadfcbSViacheslav Ovsiienko /* Single device. */ 1592206254b7SOphir Munk if (!switch_info->representor) 159309c9c4d2SThomas Monjalon strlcpy(name, dpdk_dev->name, sizeof(name)); 1594206254b7SOphir Munk else 1595206254b7SOphir Munk snprintf(name, sizeof(name), "%s_representor_%u", 1596206254b7SOphir Munk dpdk_dev->name, switch_info->port_name); 159710dadfcbSViacheslav Ovsiienko } else { 159810dadfcbSViacheslav Ovsiienko /* Bonding device. */ 159910dadfcbSViacheslav Ovsiienko if (!switch_info->representor) 160010dadfcbSViacheslav Ovsiienko snprintf(name, sizeof(name), "%s_%s", 160110dadfcbSViacheslav Ovsiienko dpdk_dev->name, spawn->ibv_dev->name); 160210dadfcbSViacheslav Ovsiienko else 160310dadfcbSViacheslav Ovsiienko snprintf(name, sizeof(name), "%s_%s_representor_%u", 160410dadfcbSViacheslav Ovsiienko dpdk_dev->name, spawn->ibv_dev->name, 160510dadfcbSViacheslav Ovsiienko switch_info->port_name); 160610dadfcbSViacheslav Ovsiienko } 1607206254b7SOphir Munk /* check if the device is already spawned */ 1608206254b7SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 1609206254b7SOphir Munk rte_errno = EEXIST; 1610206254b7SOphir Munk return NULL; 1611206254b7SOphir Munk } 161217e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 161317e19bc4SViacheslav Ovsiienko if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 161417e19bc4SViacheslav Ovsiienko eth_dev = rte_eth_dev_attach_secondary(name); 161517e19bc4SViacheslav Ovsiienko if (eth_dev == NULL) { 161617e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "can not attach rte ethdev"); 161717e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 1618f38c5457SAdrien Mazarguil return NULL; 1619771fa900SAdrien Mazarguil } 162017e19bc4SViacheslav Ovsiienko eth_dev->device = dpdk_dev; 162117e19bc4SViacheslav Ovsiienko eth_dev->dev_ops = &mlx5_dev_sec_ops; 1622120dc4a7SYongseok Koh err = mlx5_proc_priv_init(eth_dev); 1623120dc4a7SYongseok Koh if (err) 1624120dc4a7SYongseok Koh return NULL; 162517e19bc4SViacheslav Ovsiienko /* Receive command fd from primary process */ 16269a8ab29bSYongseok Koh err = mlx5_mp_req_verbs_cmd_fd(eth_dev); 162717e19bc4SViacheslav Ovsiienko if (err < 0) 162817e19bc4SViacheslav Ovsiienko return NULL; 162917e19bc4SViacheslav Ovsiienko /* Remap UAR for Tx queues. */ 1630120dc4a7SYongseok Koh err = mlx5_tx_uar_init_secondary(eth_dev, err); 163117e19bc4SViacheslav Ovsiienko if (err) 163217e19bc4SViacheslav Ovsiienko return NULL; 163317e19bc4SViacheslav Ovsiienko /* 163417e19bc4SViacheslav Ovsiienko * Ethdev pointer is still required as input since 163517e19bc4SViacheslav Ovsiienko * the primary device is not accessible from the 163617e19bc4SViacheslav Ovsiienko * secondary process. 163717e19bc4SViacheslav Ovsiienko */ 163817e19bc4SViacheslav Ovsiienko eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 163917e19bc4SViacheslav Ovsiienko eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 164017e19bc4SViacheslav Ovsiienko return eth_dev; 1641f5bf91deSMoti Haimovsky } 164217e19bc4SViacheslav Ovsiienko sh = mlx5_alloc_shared_ibctx(spawn); 164317e19bc4SViacheslav Ovsiienko if (!sh) 164417e19bc4SViacheslav Ovsiienko return NULL; 164517e19bc4SViacheslav Ovsiienko config.devx = sh->devx; 16463075bd23SDekel Peled #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR 16473075bd23SDekel Peled config.dest_tir = 1; 16483075bd23SDekel Peled #endif 16495f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 16506057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 16515f8ba81cSXueming Li #endif 165243e9d979SShachar Beiser /* 165343e9d979SShachar Beiser * Multi-packet send is supported by ConnectX-4 Lx PF as well 165443e9d979SShachar Beiser * as all ConnectX-5 devices. 165543e9d979SShachar Beiser */ 1656038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 16576057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 1658038e7251SShahaf Shuler #endif 16597d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 16606057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 16617d6bf6b8SYongseok Koh #endif 166217e19bc4SViacheslav Ovsiienko mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 16636057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 16646057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 1665a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "enhanced MPW is supported"); 166643e9d979SShachar Beiser mps = MLX5_MPW_ENHANCED; 166743e9d979SShachar Beiser } else { 1668a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW is supported"); 1669e589960cSYongseok Koh mps = MLX5_MPW; 1670e589960cSYongseok Koh } 1671e589960cSYongseok Koh } else { 1672a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW isn't supported"); 167343e9d979SShachar Beiser mps = MLX5_MPW_DISABLED; 167443e9d979SShachar Beiser } 16755f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 16766057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 16776057a10bSAdrien Mazarguil swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 16785f8ba81cSXueming Li DRV_LOG(DEBUG, "SWP support: %u", swp); 16795f8ba81cSXueming Li #endif 168068128934SAdrien Mazarguil config.swp = !!swp; 16817d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 16826057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 16837d6bf6b8SYongseok Koh struct mlx5dv_striding_rq_caps mprq_caps = 16846057a10bSAdrien Mazarguil dv_attr.striding_rq_caps; 16857d6bf6b8SYongseok Koh 16867d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 16877d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes); 16887d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 16897d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes); 16907d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 16917d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides); 16927d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 16937d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides); 16947d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tsupported_qpts: %d", 16957d6bf6b8SYongseok Koh mprq_caps.supported_qpts); 16967d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 16977d6bf6b8SYongseok Koh mprq = 1; 16987d6bf6b8SYongseok Koh mprq_min_stride_size_n = 16997d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes; 17007d6bf6b8SYongseok Koh mprq_max_stride_size_n = 17017d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes; 17027d6bf6b8SYongseok Koh mprq_min_stride_num_n = 17037d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides; 17047d6bf6b8SYongseok Koh mprq_max_stride_num_n = 17057d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides; 170668128934SAdrien Mazarguil config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 170768128934SAdrien Mazarguil mprq_min_stride_num_n); 17087d6bf6b8SYongseok Koh } 17097d6bf6b8SYongseok Koh #endif 1710523f5a74SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128 && 17116057a10bSAdrien Mazarguil !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) 1712523f5a74SYongseok Koh cqe_comp = 0; 1713523f5a74SYongseok Koh else 1714523f5a74SYongseok Koh cqe_comp = 1; 171568128934SAdrien Mazarguil config.cqe_comp = cqe_comp; 1716bc91e8dbSYongseok Koh #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 1717bc91e8dbSYongseok Koh /* Whether device supports 128B Rx CQE padding. */ 1718bc91e8dbSYongseok Koh cqe_pad = RTE_CACHE_LINE_SIZE == 128 && 1719bc91e8dbSYongseok Koh (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); 1720bc91e8dbSYongseok Koh #endif 1721038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 17226057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 17236057a10bSAdrien Mazarguil tunnel_en = ((dv_attr.tunnel_offloads_caps & 1724038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 17256057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 1726038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); 1727038e7251SShahaf Shuler } 1728a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 1729a170a30dSNélio Laranjeiro tunnel_en ? "" : "not "); 1730038e7251SShahaf Shuler #else 1731a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 1732a170a30dSNélio Laranjeiro "tunnel offloading disabled due to old OFED/rdma-core version"); 1733038e7251SShahaf Shuler #endif 173468128934SAdrien Mazarguil config.tunnel_en = tunnel_en; 17351f106da2SMatan Azrad #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 17366057a10bSAdrien Mazarguil mpls_en = ((dv_attr.tunnel_offloads_caps & 17371f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 17386057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 17391f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 17401f106da2SMatan Azrad DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 17411f106da2SMatan Azrad mpls_en ? "" : "not "); 17421f106da2SMatan Azrad #else 17431f106da2SMatan Azrad DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 17441f106da2SMatan Azrad " old OFED/rdma-core version or firmware configuration"); 17451f106da2SMatan Azrad #endif 174668128934SAdrien Mazarguil config.mpls_en = mpls_en; 1747771fa900SAdrien Mazarguil /* Check port status. */ 174817e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr); 1749771fa900SAdrien Mazarguil if (err) { 1750a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port query failed: %s", strerror(err)); 17519083982cSAdrien Mazarguil goto error; 1752771fa900SAdrien Mazarguil } 17531371f4dfSOr Ami if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 17549083982cSAdrien Mazarguil DRV_LOG(ERR, "port is not configured in Ethernet mode"); 1755e1c3e305SMatan Azrad err = EINVAL; 17569083982cSAdrien Mazarguil goto error; 17571371f4dfSOr Ami } 1758771fa900SAdrien Mazarguil if (port_attr.state != IBV_PORT_ACTIVE) 17599083982cSAdrien Mazarguil DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 1760a170a30dSNélio Laranjeiro mlx5_glue->port_state_str(port_attr.state), 1761771fa900SAdrien Mazarguil port_attr.state); 176217e19bc4SViacheslav Ovsiienko /* Allocate private eth device data. */ 1763771fa900SAdrien Mazarguil priv = rte_zmalloc("ethdev private structure", 1764771fa900SAdrien Mazarguil sizeof(*priv), 1765771fa900SAdrien Mazarguil RTE_CACHE_LINE_SIZE); 1766771fa900SAdrien Mazarguil if (priv == NULL) { 1767a170a30dSNélio Laranjeiro DRV_LOG(ERR, "priv allocation failure"); 1768771fa900SAdrien Mazarguil err = ENOMEM; 17699083982cSAdrien Mazarguil goto error; 1770771fa900SAdrien Mazarguil } 177117e19bc4SViacheslav Ovsiienko priv->sh = sh; 177217e19bc4SViacheslav Ovsiienko priv->ibv_port = spawn->ibv_port; 177346e10a4cSViacheslav Ovsiienko priv->pci_dev = spawn->pci_dev; 177435b2d13fSOlivier Matz priv->mtu = RTE_ETHER_MTU; 17756bf10ab6SMoti Haimovsky #ifndef RTE_ARCH_64 17766bf10ab6SMoti Haimovsky /* Initialize UAR access locks for 32bit implementations. */ 17776bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock_cq); 17786bf10ab6SMoti Haimovsky for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 17796bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock[i]); 17806bf10ab6SMoti Haimovsky #endif 178126c08b97SAdrien Mazarguil /* Some internal functions rely on Netlink sockets, open them now. */ 17825366074bSNelio Laranjeiro priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 17835366074bSNelio Laranjeiro priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 178426c08b97SAdrien Mazarguil priv->nl_sn = 0; 17852b730263SAdrien Mazarguil priv->representor = !!switch_info->representor; 1786299d7dc2SViacheslav Ovsiienko priv->master = !!switch_info->master; 17872b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 1788*d5c06b1bSViacheslav Ovsiienko priv->vport_meta_tag = 0; 1789*d5c06b1bSViacheslav Ovsiienko priv->vport_meta_mask = 0; 1790*d5c06b1bSViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR_DEVX_PORT 1791299d7dc2SViacheslav Ovsiienko /* 1792*d5c06b1bSViacheslav Ovsiienko * The DevX port query API is implemented. E-Switch may use 1793*d5c06b1bSViacheslav Ovsiienko * either vport or reg_c[0] metadata register to match on 1794*d5c06b1bSViacheslav Ovsiienko * vport index. The engaged part of metadata register is 1795*d5c06b1bSViacheslav Ovsiienko * defined by mask. 1796*d5c06b1bSViacheslav Ovsiienko */ 1797*d5c06b1bSViacheslav Ovsiienko devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT | 1798*d5c06b1bSViacheslav Ovsiienko MLX5DV_DEVX_PORT_MATCH_REG_C_0; 1799*d5c06b1bSViacheslav Ovsiienko err = mlx5dv_query_devx_port(sh->ctx, spawn->ibv_port, &devx_port); 1800*d5c06b1bSViacheslav Ovsiienko if (err) { 1801*d5c06b1bSViacheslav Ovsiienko DRV_LOG(WARNING, "can't query devx port %d on device %s\n", 1802*d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 1803*d5c06b1bSViacheslav Ovsiienko devx_port.comp_mask = 0; 1804*d5c06b1bSViacheslav Ovsiienko } 1805*d5c06b1bSViacheslav Ovsiienko if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) { 1806*d5c06b1bSViacheslav Ovsiienko priv->vport_meta_tag = devx_port.reg_c_0.value; 1807*d5c06b1bSViacheslav Ovsiienko priv->vport_meta_mask = devx_port.reg_c_0.mask; 1808*d5c06b1bSViacheslav Ovsiienko if (!priv->vport_meta_mask) { 1809*d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "vport zero mask for port %d" 1810*d5c06b1bSViacheslav Ovsiienko " on bonding device %s\n", 1811*d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 1812*d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 1813*d5c06b1bSViacheslav Ovsiienko goto error; 1814*d5c06b1bSViacheslav Ovsiienko } 1815*d5c06b1bSViacheslav Ovsiienko if (priv->vport_meta_tag & ~priv->vport_meta_mask) { 1816*d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "invalid vport tag for port %d" 1817*d5c06b1bSViacheslav Ovsiienko " on bonding device %s\n", 1818*d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 1819*d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 1820*d5c06b1bSViacheslav Ovsiienko goto error; 1821*d5c06b1bSViacheslav Ovsiienko } 1822*d5c06b1bSViacheslav Ovsiienko } else if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) { 1823*d5c06b1bSViacheslav Ovsiienko priv->vport_id = devx_port.vport_num; 1824*d5c06b1bSViacheslav Ovsiienko } else if (spawn->pf_bond >= 0) { 1825*d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "can't deduce vport index for port %d" 1826*d5c06b1bSViacheslav Ovsiienko " on bonding device %s\n", 1827*d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 1828*d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 1829*d5c06b1bSViacheslav Ovsiienko goto error; 1830*d5c06b1bSViacheslav Ovsiienko } else { 1831*d5c06b1bSViacheslav Ovsiienko /* Suppose vport index in compatible way. */ 1832*d5c06b1bSViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 1833*d5c06b1bSViacheslav Ovsiienko switch_info->port_name + 1 : -1; 1834*d5c06b1bSViacheslav Ovsiienko } 1835*d5c06b1bSViacheslav Ovsiienko #else 1836*d5c06b1bSViacheslav Ovsiienko /* 1837*d5c06b1bSViacheslav Ovsiienko * Kernel/rdma_core support single E-Switch per PF configurations 1838299d7dc2SViacheslav Ovsiienko * only and vport_id field contains the vport index for 1839299d7dc2SViacheslav Ovsiienko * associated VF, which is deduced from representor port name. 1840ae4eb7dcSViacheslav Ovsiienko * For example, let's have the IB device port 10, it has 1841299d7dc2SViacheslav Ovsiienko * attached network device eth0, which has port name attribute 1842299d7dc2SViacheslav Ovsiienko * pf0vf2, we can deduce the VF number as 2, and set vport index 1843299d7dc2SViacheslav Ovsiienko * as 3 (2+1). This assigning schema should be changed if the 1844299d7dc2SViacheslav Ovsiienko * multiple E-Switch instances per PF configurations or/and PCI 1845299d7dc2SViacheslav Ovsiienko * subfunctions are added. 1846299d7dc2SViacheslav Ovsiienko */ 1847299d7dc2SViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 1848299d7dc2SViacheslav Ovsiienko switch_info->port_name + 1 : -1; 1849*d5c06b1bSViacheslav Ovsiienko #endif 1850*d5c06b1bSViacheslav Ovsiienko /* representor_id field keeps the unmodified VF index. */ 1851299d7dc2SViacheslav Ovsiienko priv->representor_id = switch_info->representor ? 1852299d7dc2SViacheslav Ovsiienko switch_info->port_name : -1; 18532b730263SAdrien Mazarguil /* 18542b730263SAdrien Mazarguil * Look for sibling devices in order to reuse their switch domain 18552b730263SAdrien Mazarguil * if any, otherwise allocate one. 18562b730263SAdrien Mazarguil */ 1857d874a4eeSThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) { 1858dbeba4cfSThomas Monjalon const struct mlx5_priv *opriv = 1859d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 18602b730263SAdrien Mazarguil 18612b730263SAdrien Mazarguil if (!opriv || 18622b730263SAdrien Mazarguil opriv->domain_id == 18632b730263SAdrien Mazarguil RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 18642b730263SAdrien Mazarguil continue; 18652b730263SAdrien Mazarguil priv->domain_id = opriv->domain_id; 18662b730263SAdrien Mazarguil break; 18672b730263SAdrien Mazarguil } 18682b730263SAdrien Mazarguil if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 18692b730263SAdrien Mazarguil err = rte_eth_switch_domain_alloc(&priv->domain_id); 18702b730263SAdrien Mazarguil if (err) { 18712b730263SAdrien Mazarguil err = rte_errno; 18722b730263SAdrien Mazarguil DRV_LOG(ERR, "unable to allocate switch domain: %s", 18732b730263SAdrien Mazarguil strerror(rte_errno)); 18742b730263SAdrien Mazarguil goto error; 18752b730263SAdrien Mazarguil } 18762b730263SAdrien Mazarguil own_domain_id = 1; 18772b730263SAdrien Mazarguil } 1878f38c5457SAdrien Mazarguil err = mlx5_args(&config, dpdk_dev->devargs); 1879e72dd09bSNélio Laranjeiro if (err) { 1880012ad994SShahaf Shuler err = rte_errno; 188193068a9dSAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 188293068a9dSAdrien Mazarguil strerror(rte_errno)); 18839083982cSAdrien Mazarguil goto error; 1884e72dd09bSNélio Laranjeiro } 188517e19bc4SViacheslav Ovsiienko config.hw_csum = !!(sh->device_attr.device_cap_flags_ex & 188617e19bc4SViacheslav Ovsiienko IBV_DEVICE_RAW_IP_CSUM); 1887a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "checksum offloading is %ssupported", 18887fe24446SShahaf Shuler (config.hw_csum ? "" : "not ")); 18892dd8b721SViacheslav Ovsiienko #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 18902dd8b721SViacheslav Ovsiienko !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 18912dd8b721SViacheslav Ovsiienko DRV_LOG(DEBUG, "counters are not supported"); 18929a761de8SOri Kam #endif 189358b1312eSYongseok Koh #ifndef HAVE_IBV_FLOW_DV_SUPPORT 189458b1312eSYongseok Koh if (config.dv_flow_en) { 189558b1312eSYongseok Koh DRV_LOG(WARNING, "DV flow is not supported"); 189658b1312eSYongseok Koh config.dv_flow_en = 0; 189758b1312eSYongseok Koh } 189858b1312eSYongseok Koh #endif 18997fe24446SShahaf Shuler config.ind_table_max_size = 190017e19bc4SViacheslav Ovsiienko sh->device_attr.rss_caps.max_rwq_indirection_table_size; 190168128934SAdrien Mazarguil /* 190268128934SAdrien Mazarguil * Remove this check once DPDK supports larger/variable 190368128934SAdrien Mazarguil * indirection tables. 190468128934SAdrien Mazarguil */ 190568128934SAdrien Mazarguil if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 19067fe24446SShahaf Shuler config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; 1907a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 19087fe24446SShahaf Shuler config.ind_table_max_size); 190917e19bc4SViacheslav Ovsiienko config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 191043e9d979SShachar Beiser IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 1911a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 19127fe24446SShahaf Shuler (config.hw_vlan_strip ? "" : "not ")); 191317e19bc4SViacheslav Ovsiienko config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 1914cd230a3eSShahaf Shuler IBV_RAW_PACKET_CAP_SCATTER_FCS); 1915a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 19167fe24446SShahaf Shuler (config.hw_fcs_strip ? "" : "not ")); 19172014a7fbSYongseok Koh #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 191817e19bc4SViacheslav Ovsiienko hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 19192014a7fbSYongseok Koh #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 192017e19bc4SViacheslav Ovsiienko hw_padding = !!(sh->device_attr.device_cap_flags_ex & 19212014a7fbSYongseok Koh IBV_DEVICE_PCI_WRITE_END_PADDING); 192243e9d979SShachar Beiser #endif 192378c7a16dSYongseok Koh if (config.hw_padding && !hw_padding) { 192478c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 192578c7a16dSYongseok Koh config.hw_padding = 0; 192678c7a16dSYongseok Koh } else if (config.hw_padding) { 192778c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 192878c7a16dSYongseok Koh } 192917e19bc4SViacheslav Ovsiienko config.tso = (sh->device_attr.tso_caps.max_tso > 0 && 193017e19bc4SViacheslav Ovsiienko (sh->device_attr.tso_caps.supported_qpts & 193143e9d979SShachar Beiser (1 << IBV_QPT_RAW_PACKET))); 19327fe24446SShahaf Shuler if (config.tso) 193317e19bc4SViacheslav Ovsiienko config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso; 1934f9de8718SShahaf Shuler /* 1935f9de8718SShahaf Shuler * MPW is disabled by default, while the Enhanced MPW is enabled 1936f9de8718SShahaf Shuler * by default. 1937f9de8718SShahaf Shuler */ 1938f9de8718SShahaf Shuler if (config.mps == MLX5_ARG_UNSET) 1939f9de8718SShahaf Shuler config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 1940f9de8718SShahaf Shuler MLX5_MPW_DISABLED; 1941f9de8718SShahaf Shuler else 1942f9de8718SShahaf Shuler config.mps = config.mps ? mps : MLX5_MPW_DISABLED; 1943a170a30dSNélio Laranjeiro DRV_LOG(INFO, "%sMPS is %s", 19440f99970bSNélio Laranjeiro config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", 194568128934SAdrien Mazarguil config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 19467fe24446SShahaf Shuler if (config.cqe_comp && !cqe_comp) { 1947a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "Rx CQE compression isn't supported"); 19487fe24446SShahaf Shuler config.cqe_comp = 0; 1949523f5a74SYongseok Koh } 1950bc91e8dbSYongseok Koh if (config.cqe_pad && !cqe_pad) { 1951bc91e8dbSYongseok Koh DRV_LOG(WARNING, "Rx CQE padding isn't supported"); 1952bc91e8dbSYongseok Koh config.cqe_pad = 0; 1953bc91e8dbSYongseok Koh } else if (config.cqe_pad) { 1954bc91e8dbSYongseok Koh DRV_LOG(INFO, "Rx CQE padding is enabled"); 1955bc91e8dbSYongseok Koh } 1956175f1c21SDekel Peled if (config.devx) { 1957175f1c21SDekel Peled priv->counter_fallback = 0; 1958175f1c21SDekel Peled err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr); 1959175f1c21SDekel Peled if (err) { 1960175f1c21SDekel Peled err = -err; 1961175f1c21SDekel Peled goto error; 1962175f1c21SDekel Peled } 1963175f1c21SDekel Peled if (!config.hca_attr.flow_counters_dump) 1964175f1c21SDekel Peled priv->counter_fallback = 1; 1965175f1c21SDekel Peled #ifndef HAVE_IBV_DEVX_ASYNC 1966175f1c21SDekel Peled priv->counter_fallback = 1; 1967175f1c21SDekel Peled #endif 1968175f1c21SDekel Peled if (priv->counter_fallback) 1969175f1c21SDekel Peled DRV_LOG(INFO, "Use fall-back DV counter management\n"); 1970175f1c21SDekel Peled /* Check for LRO support. */ 1971bd41389eSMatan Azrad if (config.dest_tir && config.hca_attr.lro_cap) { 1972175f1c21SDekel Peled /* TBD check tunnel lro caps. */ 1973175f1c21SDekel Peled config.lro.supported = config.hca_attr.lro_cap; 1974175f1c21SDekel Peled DRV_LOG(DEBUG, "Device supports LRO"); 1975175f1c21SDekel Peled /* 1976175f1c21SDekel Peled * If LRO timeout is not configured by application, 1977175f1c21SDekel Peled * use the minimal supported value. 1978175f1c21SDekel Peled */ 1979175f1c21SDekel Peled if (!config.lro.timeout) 1980175f1c21SDekel Peled config.lro.timeout = 1981175f1c21SDekel Peled config.hca_attr.lro_timer_supported_periods[0]; 1982175f1c21SDekel Peled DRV_LOG(DEBUG, "LRO session timeout set to %d usec", 1983175f1c21SDekel Peled config.lro.timeout); 1984175f1c21SDekel Peled } 1985175f1c21SDekel Peled } 19865c0e2db6SYongseok Koh if (config.mprq.enabled && mprq) { 19877d6bf6b8SYongseok Koh if (config.mprq.stride_num_n > mprq_max_stride_num_n || 19887d6bf6b8SYongseok Koh config.mprq.stride_num_n < mprq_min_stride_num_n) { 19897d6bf6b8SYongseok Koh config.mprq.stride_num_n = 19907d6bf6b8SYongseok Koh RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 19917d6bf6b8SYongseok Koh mprq_min_stride_num_n); 19927d6bf6b8SYongseok Koh DRV_LOG(WARNING, 19937d6bf6b8SYongseok Koh "the number of strides" 19947d6bf6b8SYongseok Koh " for Multi-Packet RQ is out of range," 19957d6bf6b8SYongseok Koh " setting default value (%u)", 19967d6bf6b8SYongseok Koh 1 << config.mprq.stride_num_n); 19977d6bf6b8SYongseok Koh } 19987d6bf6b8SYongseok Koh config.mprq.min_stride_size_n = mprq_min_stride_size_n; 19997d6bf6b8SYongseok Koh config.mprq.max_stride_size_n = mprq_max_stride_size_n; 20005c0e2db6SYongseok Koh } else if (config.mprq.enabled && !mprq) { 20015c0e2db6SYongseok Koh DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 20025c0e2db6SYongseok Koh config.mprq.enabled = 0; 20037d6bf6b8SYongseok Koh } 2004066cfecdSMatan Azrad if (config.max_dump_files_num == 0) 2005066cfecdSMatan Azrad config.max_dump_files_num = 128; 2006af4f09f2SNélio Laranjeiro eth_dev = rte_eth_dev_allocate(name); 2007af4f09f2SNélio Laranjeiro if (eth_dev == NULL) { 2008a170a30dSNélio Laranjeiro DRV_LOG(ERR, "can not allocate rte ethdev"); 2009af4f09f2SNélio Laranjeiro err = ENOMEM; 20109083982cSAdrien Mazarguil goto error; 2011af4f09f2SNélio Laranjeiro } 201215febafdSThomas Monjalon /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ 201315febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 2014a7d3c627SThomas Monjalon if (priv->representor) { 20152b730263SAdrien Mazarguil eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 2016a7d3c627SThomas Monjalon eth_dev->data->representor_id = priv->representor_id; 2017a7d3c627SThomas Monjalon } 2018fa2e14d4SViacheslav Ovsiienko /* 2019fa2e14d4SViacheslav Ovsiienko * Store associated network device interface index. This index 2020fa2e14d4SViacheslav Ovsiienko * is permanent throughout the lifetime of device. So, we may store 2021fa2e14d4SViacheslav Ovsiienko * the ifindex here and use the cached value further. 2022fa2e14d4SViacheslav Ovsiienko */ 2023fa2e14d4SViacheslav Ovsiienko assert(spawn->ifindex); 2024fa2e14d4SViacheslav Ovsiienko priv->if_index = spawn->ifindex; 2025af4f09f2SNélio Laranjeiro eth_dev->data->dev_private = priv; 2026df428ceeSYongseok Koh priv->dev_data = eth_dev->data; 2027af4f09f2SNélio Laranjeiro eth_dev->data->mac_addrs = priv->mac; 2028f38c5457SAdrien Mazarguil eth_dev->device = dpdk_dev; 2029771fa900SAdrien Mazarguil /* Configure the first MAC address by default. */ 2030af4f09f2SNélio Laranjeiro if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 2031a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2032a170a30dSNélio Laranjeiro "port %u cannot get MAC address, is mlx5_en" 2033a170a30dSNélio Laranjeiro " loaded? (errno: %s)", 20348c3c2372SAdrien Mazarguil eth_dev->data->port_id, strerror(rte_errno)); 2035e1c3e305SMatan Azrad err = ENODEV; 20369083982cSAdrien Mazarguil goto error; 2037771fa900SAdrien Mazarguil } 2038a170a30dSNélio Laranjeiro DRV_LOG(INFO, 2039a170a30dSNélio Laranjeiro "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 20400f99970bSNélio Laranjeiro eth_dev->data->port_id, 2041771fa900SAdrien Mazarguil mac.addr_bytes[0], mac.addr_bytes[1], 2042771fa900SAdrien Mazarguil mac.addr_bytes[2], mac.addr_bytes[3], 2043771fa900SAdrien Mazarguil mac.addr_bytes[4], mac.addr_bytes[5]); 2044771fa900SAdrien Mazarguil #ifndef NDEBUG 2045771fa900SAdrien Mazarguil { 2046771fa900SAdrien Mazarguil char ifname[IF_NAMESIZE]; 2047771fa900SAdrien Mazarguil 2048af4f09f2SNélio Laranjeiro if (mlx5_get_ifname(eth_dev, &ifname) == 0) 2049a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 20500f99970bSNélio Laranjeiro eth_dev->data->port_id, ifname); 2051771fa900SAdrien Mazarguil else 2052a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is unknown", 20530f99970bSNélio Laranjeiro eth_dev->data->port_id); 2054771fa900SAdrien Mazarguil } 2055771fa900SAdrien Mazarguil #endif 2056771fa900SAdrien Mazarguil /* Get actual MTU if possible. */ 2057a6d83b6aSNélio Laranjeiro err = mlx5_get_mtu(eth_dev, &priv->mtu); 2058012ad994SShahaf Shuler if (err) { 2059012ad994SShahaf Shuler err = rte_errno; 20609083982cSAdrien Mazarguil goto error; 2061012ad994SShahaf Shuler } 2062a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 2063a170a30dSNélio Laranjeiro priv->mtu); 206468128934SAdrien Mazarguil /* Initialize burst functions to prevent crashes before link-up. */ 2065e313ef4cSShahaf Shuler eth_dev->rx_pkt_burst = removed_rx_burst; 2066e313ef4cSShahaf Shuler eth_dev->tx_pkt_burst = removed_tx_burst; 2067771fa900SAdrien Mazarguil eth_dev->dev_ops = &mlx5_dev_ops; 2068272733b5SNélio Laranjeiro /* Register MAC address. */ 2069272733b5SNélio Laranjeiro claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 2070f87bfa8eSYongseok Koh if (config.vf && config.vf_nl_en) 2071ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_sync(eth_dev); 2072c8ffb8a9SNélio Laranjeiro TAILQ_INIT(&priv->flows); 20731b37f5d8SNélio Laranjeiro TAILQ_INIT(&priv->ctrl_flows); 20741e3a39f7SXueming Li /* Hint libmlx5 to use PMD allocator for data plane resources */ 20751e3a39f7SXueming Li struct mlx5dv_ctx_allocators alctr = { 20761e3a39f7SXueming Li .alloc = &mlx5_alloc_verbs_buf, 20771e3a39f7SXueming Li .free = &mlx5_free_verbs_buf, 20781e3a39f7SXueming Li .data = priv, 20791e3a39f7SXueming Li }; 208017e19bc4SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 208117e19bc4SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 20821e3a39f7SXueming Li (void *)((uintptr_t)&alctr)); 2083771fa900SAdrien Mazarguil /* Bring Ethernet device up. */ 2084a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 20850f99970bSNélio Laranjeiro eth_dev->data->port_id); 20867ba5320bSNélio Laranjeiro mlx5_set_link_up(eth_dev); 2087a85a606cSShahaf Shuler /* 2088a85a606cSShahaf Shuler * Even though the interrupt handler is not installed yet, 2089ae4eb7dcSViacheslav Ovsiienko * interrupts will still trigger on the async_fd from 2090a85a606cSShahaf Shuler * Verbs context returned by ibv_open_device(). 2091a85a606cSShahaf Shuler */ 2092a85a606cSShahaf Shuler mlx5_link_update(eth_dev, 0); 2093e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 2094e2b4925eSOri Kam if (!(config.hca_attr.eswitch_manager && config.dv_flow_en && 2095e2b4925eSOri Kam (switch_info->representor || switch_info->master))) 2096e2b4925eSOri Kam config.dv_esw_en = 0; 2097e2b4925eSOri Kam #else 2098e2b4925eSOri Kam config.dv_esw_en = 0; 2099e2b4925eSOri Kam #endif 210038b4b397SViacheslav Ovsiienko /* Detect minimal data bytes to inline. */ 210138b4b397SViacheslav Ovsiienko mlx5_set_min_inline(spawn, &config); 21027fe24446SShahaf Shuler /* Store device configuration on private structure. */ 21037fe24446SShahaf Shuler priv->config = config; 2104dfedf3e3SViacheslav Ovsiienko /* Create context for virtual machine VLAN workaround. */ 2105dfedf3e3SViacheslav Ovsiienko priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); 2106e2b4925eSOri Kam if (config.dv_flow_en) { 2107e2b4925eSOri Kam err = mlx5_alloc_shared_dr(priv); 2108e2b4925eSOri Kam if (err) 2109e2b4925eSOri Kam goto error; 2110e2b4925eSOri Kam } 211178be8852SNelio Laranjeiro /* Supported Verbs flow priority number detection. */ 21122815702bSNelio Laranjeiro err = mlx5_flow_discover_priorities(eth_dev); 21134fb27c1dSViacheslav Ovsiienko if (err < 0) { 21144fb27c1dSViacheslav Ovsiienko err = -err; 21159083982cSAdrien Mazarguil goto error; 21164fb27c1dSViacheslav Ovsiienko } 21172815702bSNelio Laranjeiro priv->config.flow_prio = err; 2118f38c5457SAdrien Mazarguil return eth_dev; 21199083982cSAdrien Mazarguil error: 212026c08b97SAdrien Mazarguil if (priv) { 2121b2177648SViacheslav Ovsiienko if (priv->sh) 2122b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 212326c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 212426c08b97SAdrien Mazarguil close(priv->nl_socket_route); 212526c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 212626c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 2127dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 2128dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 21292b730263SAdrien Mazarguil if (own_domain_id) 21302b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 2131771fa900SAdrien Mazarguil rte_free(priv); 2132e16adf08SThomas Monjalon if (eth_dev != NULL) 2133e16adf08SThomas Monjalon eth_dev->data->dev_private = NULL; 213426c08b97SAdrien Mazarguil } 2135e16adf08SThomas Monjalon if (eth_dev != NULL) { 2136e16adf08SThomas Monjalon /* mac_addrs must not be freed alone because part of dev_private */ 2137e16adf08SThomas Monjalon eth_dev->data->mac_addrs = NULL; 2138690de285SRaslan Darawsheh rte_eth_dev_release_port(eth_dev); 2139e16adf08SThomas Monjalon } 214017e19bc4SViacheslav Ovsiienko if (sh) 214117e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(sh); 2142f38c5457SAdrien Mazarguil assert(err > 0); 2143a6d83b6aSNélio Laranjeiro rte_errno = err; 2144f38c5457SAdrien Mazarguil return NULL; 2145f38c5457SAdrien Mazarguil } 2146f38c5457SAdrien Mazarguil 2147116f90adSAdrien Mazarguil /** 2148116f90adSAdrien Mazarguil * Comparison callback to sort device data. 2149116f90adSAdrien Mazarguil * 2150116f90adSAdrien Mazarguil * This is meant to be used with qsort(). 2151116f90adSAdrien Mazarguil * 2152116f90adSAdrien Mazarguil * @param a[in] 2153116f90adSAdrien Mazarguil * Pointer to pointer to first data object. 2154116f90adSAdrien Mazarguil * @param b[in] 2155116f90adSAdrien Mazarguil * Pointer to pointer to second data object. 2156116f90adSAdrien Mazarguil * 2157116f90adSAdrien Mazarguil * @return 2158116f90adSAdrien Mazarguil * 0 if both objects are equal, less than 0 if the first argument is less 2159116f90adSAdrien Mazarguil * than the second, greater than 0 otherwise. 2160116f90adSAdrien Mazarguil */ 2161116f90adSAdrien Mazarguil static int 2162116f90adSAdrien Mazarguil mlx5_dev_spawn_data_cmp(const void *a, const void *b) 2163116f90adSAdrien Mazarguil { 2164116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_a = 2165116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)a)->info; 2166116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_b = 2167116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)b)->info; 2168116f90adSAdrien Mazarguil int ret; 2169116f90adSAdrien Mazarguil 2170116f90adSAdrien Mazarguil /* Master device first. */ 2171116f90adSAdrien Mazarguil ret = si_b->master - si_a->master; 2172116f90adSAdrien Mazarguil if (ret) 2173116f90adSAdrien Mazarguil return ret; 2174116f90adSAdrien Mazarguil /* Then representor devices. */ 2175116f90adSAdrien Mazarguil ret = si_b->representor - si_a->representor; 2176116f90adSAdrien Mazarguil if (ret) 2177116f90adSAdrien Mazarguil return ret; 2178116f90adSAdrien Mazarguil /* Unidentified devices come last in no specific order. */ 2179116f90adSAdrien Mazarguil if (!si_a->representor) 2180116f90adSAdrien Mazarguil return 0; 2181116f90adSAdrien Mazarguil /* Order representors by name. */ 2182116f90adSAdrien Mazarguil return si_a->port_name - si_b->port_name; 2183116f90adSAdrien Mazarguil } 2184116f90adSAdrien Mazarguil 2185f38c5457SAdrien Mazarguil /** 21862e569a37SViacheslav Ovsiienko * Match PCI information for possible slaves of bonding device. 21872e569a37SViacheslav Ovsiienko * 21882e569a37SViacheslav Ovsiienko * @param[in] ibv_dev 21892e569a37SViacheslav Ovsiienko * Pointer to Infiniband device structure. 21902e569a37SViacheslav Ovsiienko * @param[in] pci_dev 21912e569a37SViacheslav Ovsiienko * Pointer to PCI device structure to match PCI address. 21922e569a37SViacheslav Ovsiienko * @param[in] nl_rdma 21932e569a37SViacheslav Ovsiienko * Netlink RDMA group socket handle. 21942e569a37SViacheslav Ovsiienko * 21952e569a37SViacheslav Ovsiienko * @return 21962e569a37SViacheslav Ovsiienko * negative value if no bonding device found, otherwise 21972e569a37SViacheslav Ovsiienko * positive index of slave PF in bonding. 21982e569a37SViacheslav Ovsiienko */ 21992e569a37SViacheslav Ovsiienko static int 22002e569a37SViacheslav Ovsiienko mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev, 22012e569a37SViacheslav Ovsiienko const struct rte_pci_device *pci_dev, 22022e569a37SViacheslav Ovsiienko int nl_rdma) 22032e569a37SViacheslav Ovsiienko { 22042e569a37SViacheslav Ovsiienko char ifname[IF_NAMESIZE + 1]; 22052e569a37SViacheslav Ovsiienko unsigned int ifindex; 22062e569a37SViacheslav Ovsiienko unsigned int np, i; 22072e569a37SViacheslav Ovsiienko FILE *file = NULL; 22082e569a37SViacheslav Ovsiienko int pf = -1; 22092e569a37SViacheslav Ovsiienko 22102e569a37SViacheslav Ovsiienko /* 22112e569a37SViacheslav Ovsiienko * Try to get master device name. If something goes 22122e569a37SViacheslav Ovsiienko * wrong suppose the lack of kernel support and no 22132e569a37SViacheslav Ovsiienko * bonding devices. 22142e569a37SViacheslav Ovsiienko */ 22152e569a37SViacheslav Ovsiienko if (nl_rdma < 0) 22162e569a37SViacheslav Ovsiienko return -1; 22172e569a37SViacheslav Ovsiienko if (!strstr(ibv_dev->name, "bond")) 22182e569a37SViacheslav Ovsiienko return -1; 22192e569a37SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_dev->name); 22202e569a37SViacheslav Ovsiienko if (!np) 22212e569a37SViacheslav Ovsiienko return -1; 22222e569a37SViacheslav Ovsiienko /* 22232e569a37SViacheslav Ovsiienko * The Master device might not be on the predefined 22242e569a37SViacheslav Ovsiienko * port (not on port index 1, it is not garanted), 22252e569a37SViacheslav Ovsiienko * we have to scan all Infiniband device port and 22262e569a37SViacheslav Ovsiienko * find master. 22272e569a37SViacheslav Ovsiienko */ 22282e569a37SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 22292e569a37SViacheslav Ovsiienko /* Check whether Infiniband port is populated. */ 22302e569a37SViacheslav Ovsiienko ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i); 22312e569a37SViacheslav Ovsiienko if (!ifindex) 22322e569a37SViacheslav Ovsiienko continue; 22332e569a37SViacheslav Ovsiienko if (!if_indextoname(ifindex, ifname)) 22342e569a37SViacheslav Ovsiienko continue; 22352e569a37SViacheslav Ovsiienko /* Try to read bonding slave names from sysfs. */ 22362e569a37SViacheslav Ovsiienko MKSTR(slaves, 22372e569a37SViacheslav Ovsiienko "/sys/class/net/%s/master/bonding/slaves", ifname); 22382e569a37SViacheslav Ovsiienko file = fopen(slaves, "r"); 22392e569a37SViacheslav Ovsiienko if (file) 22402e569a37SViacheslav Ovsiienko break; 22412e569a37SViacheslav Ovsiienko } 22422e569a37SViacheslav Ovsiienko if (!file) 22432e569a37SViacheslav Ovsiienko return -1; 22442e569a37SViacheslav Ovsiienko /* Use safe format to check maximal buffer length. */ 22452e569a37SViacheslav Ovsiienko assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE); 22462e569a37SViacheslav Ovsiienko while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) { 22472e569a37SViacheslav Ovsiienko char tmp_str[IF_NAMESIZE + 32]; 22482e569a37SViacheslav Ovsiienko struct rte_pci_addr pci_addr; 22492e569a37SViacheslav Ovsiienko struct mlx5_switch_info info; 22502e569a37SViacheslav Ovsiienko 22512e569a37SViacheslav Ovsiienko /* Process slave interface names in the loop. */ 22522e569a37SViacheslav Ovsiienko snprintf(tmp_str, sizeof(tmp_str), 22532e569a37SViacheslav Ovsiienko "/sys/class/net/%s", ifname); 22542e569a37SViacheslav Ovsiienko if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) { 22552e569a37SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get PCI address" 22562e569a37SViacheslav Ovsiienko " for netdev \"%s\"", ifname); 22572e569a37SViacheslav Ovsiienko continue; 22582e569a37SViacheslav Ovsiienko } 22592e569a37SViacheslav Ovsiienko if (pci_dev->addr.domain != pci_addr.domain || 22602e569a37SViacheslav Ovsiienko pci_dev->addr.bus != pci_addr.bus || 22612e569a37SViacheslav Ovsiienko pci_dev->addr.devid != pci_addr.devid || 22622e569a37SViacheslav Ovsiienko pci_dev->addr.function != pci_addr.function) 22632e569a37SViacheslav Ovsiienko continue; 22642e569a37SViacheslav Ovsiienko /* Slave interface PCI address match found. */ 22652e569a37SViacheslav Ovsiienko fclose(file); 22662e569a37SViacheslav Ovsiienko snprintf(tmp_str, sizeof(tmp_str), 22672e569a37SViacheslav Ovsiienko "/sys/class/net/%s/phys_port_name", ifname); 22682e569a37SViacheslav Ovsiienko file = fopen(tmp_str, "rb"); 22692e569a37SViacheslav Ovsiienko if (!file) 22702e569a37SViacheslav Ovsiienko break; 22712e569a37SViacheslav Ovsiienko info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET; 22722e569a37SViacheslav Ovsiienko if (fscanf(file, "%32s", tmp_str) == 1) 22732e569a37SViacheslav Ovsiienko mlx5_translate_port_name(tmp_str, &info); 22742e569a37SViacheslav Ovsiienko if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY || 22752e569a37SViacheslav Ovsiienko info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) 22762e569a37SViacheslav Ovsiienko pf = info.port_name; 22772e569a37SViacheslav Ovsiienko break; 22782e569a37SViacheslav Ovsiienko } 22792e569a37SViacheslav Ovsiienko if (file) 22802e569a37SViacheslav Ovsiienko fclose(file); 22812e569a37SViacheslav Ovsiienko return pf; 22822e569a37SViacheslav Ovsiienko } 22832e569a37SViacheslav Ovsiienko 22842e569a37SViacheslav Ovsiienko /** 2285f38c5457SAdrien Mazarguil * DPDK callback to register a PCI device. 2286f38c5457SAdrien Mazarguil * 22872b730263SAdrien Mazarguil * This function spawns Ethernet devices out of a given PCI device. 2288f38c5457SAdrien Mazarguil * 2289f38c5457SAdrien Mazarguil * @param[in] pci_drv 2290f38c5457SAdrien Mazarguil * PCI driver structure (mlx5_driver). 2291f38c5457SAdrien Mazarguil * @param[in] pci_dev 2292f38c5457SAdrien Mazarguil * PCI device information. 2293f38c5457SAdrien Mazarguil * 2294f38c5457SAdrien Mazarguil * @return 2295f38c5457SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 2296f38c5457SAdrien Mazarguil */ 2297f38c5457SAdrien Mazarguil static int 2298f38c5457SAdrien Mazarguil mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2299f38c5457SAdrien Mazarguil struct rte_pci_device *pci_dev) 2300f38c5457SAdrien Mazarguil { 2301f38c5457SAdrien Mazarguil struct ibv_device **ibv_list; 2302ad74bc61SViacheslav Ovsiienko /* 2303ad74bc61SViacheslav Ovsiienko * Number of found IB Devices matching with requested PCI BDF. 2304ad74bc61SViacheslav Ovsiienko * nd != 1 means there are multiple IB devices over the same 2305ad74bc61SViacheslav Ovsiienko * PCI device and we have representors and master. 2306ad74bc61SViacheslav Ovsiienko */ 2307ad74bc61SViacheslav Ovsiienko unsigned int nd = 0; 2308ad74bc61SViacheslav Ovsiienko /* 2309ad74bc61SViacheslav Ovsiienko * Number of found IB device Ports. nd = 1 and np = 1..n means 2310ad74bc61SViacheslav Ovsiienko * we have the single multiport IB device, and there may be 2311ad74bc61SViacheslav Ovsiienko * representors attached to some of found ports. 2312ad74bc61SViacheslav Ovsiienko */ 2313ad74bc61SViacheslav Ovsiienko unsigned int np = 0; 2314ad74bc61SViacheslav Ovsiienko /* 2315ad74bc61SViacheslav Ovsiienko * Number of DPDK ethernet devices to Spawn - either over 2316ad74bc61SViacheslav Ovsiienko * multiple IB devices or multiple ports of single IB device. 2317ad74bc61SViacheslav Ovsiienko * Actually this is the number of iterations to spawn. 2318ad74bc61SViacheslav Ovsiienko */ 2319ad74bc61SViacheslav Ovsiienko unsigned int ns = 0; 23202e569a37SViacheslav Ovsiienko /* 23212e569a37SViacheslav Ovsiienko * Bonding device 23222e569a37SViacheslav Ovsiienko * < 0 - no bonding device (single one) 23232e569a37SViacheslav Ovsiienko * >= 0 - bonding device (value is slave PF index) 23242e569a37SViacheslav Ovsiienko */ 23252e569a37SViacheslav Ovsiienko int bd = -1; 2326a62ec991SViacheslav Ovsiienko struct mlx5_dev_spawn_data *list = NULL; 2327f87bfa8eSYongseok Koh struct mlx5_dev_config dev_config; 2328f38c5457SAdrien Mazarguil int ret; 2329f38c5457SAdrien Mazarguil 23307be600c8SYongseok Koh ret = mlx5_init_once(); 23317be600c8SYongseok Koh if (ret) { 23327be600c8SYongseok Koh DRV_LOG(ERR, "unable to init PMD global data: %s", 23337be600c8SYongseok Koh strerror(rte_errno)); 23347be600c8SYongseok Koh return -rte_errno; 23357be600c8SYongseok Koh } 2336f38c5457SAdrien Mazarguil assert(pci_drv == &mlx5_driver); 2337f38c5457SAdrien Mazarguil errno = 0; 2338f38c5457SAdrien Mazarguil ibv_list = mlx5_glue->get_device_list(&ret); 2339f38c5457SAdrien Mazarguil if (!ibv_list) { 2340f38c5457SAdrien Mazarguil rte_errno = errno ? errno : ENOSYS; 2341f38c5457SAdrien Mazarguil DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 2342a6d83b6aSNélio Laranjeiro return -rte_errno; 2343a6d83b6aSNélio Laranjeiro } 2344ad74bc61SViacheslav Ovsiienko /* 2345ad74bc61SViacheslav Ovsiienko * First scan the list of all Infiniband devices to find 2346ad74bc61SViacheslav Ovsiienko * matching ones, gathering into the list. 2347ad74bc61SViacheslav Ovsiienko */ 234826c08b97SAdrien Mazarguil struct ibv_device *ibv_match[ret + 1]; 2349a62ec991SViacheslav Ovsiienko int nl_route = mlx5_nl_init(NETLINK_ROUTE); 2350a62ec991SViacheslav Ovsiienko int nl_rdma = mlx5_nl_init(NETLINK_RDMA); 2351ad74bc61SViacheslav Ovsiienko unsigned int i; 235226c08b97SAdrien Mazarguil 2353f38c5457SAdrien Mazarguil while (ret-- > 0) { 2354f38c5457SAdrien Mazarguil struct rte_pci_addr pci_addr; 2355f38c5457SAdrien Mazarguil 2356f38c5457SAdrien Mazarguil DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 23572e569a37SViacheslav Ovsiienko bd = mlx5_device_bond_pci_match 23582e569a37SViacheslav Ovsiienko (ibv_list[ret], pci_dev, nl_rdma); 23592e569a37SViacheslav Ovsiienko if (bd >= 0) { 23602e569a37SViacheslav Ovsiienko /* 23612e569a37SViacheslav Ovsiienko * Bonding device detected. Only one match is allowed, 23622e569a37SViacheslav Ovsiienko * the bonding is supported over multi-port IB device, 23632e569a37SViacheslav Ovsiienko * there should be no matches on representor PCI 23642e569a37SViacheslav Ovsiienko * functions or non VF LAG bonding devices with 23652e569a37SViacheslav Ovsiienko * specified address. 23662e569a37SViacheslav Ovsiienko */ 23672e569a37SViacheslav Ovsiienko if (nd) { 23682e569a37SViacheslav Ovsiienko DRV_LOG(ERR, 23692e569a37SViacheslav Ovsiienko "multiple PCI match on bonding device" 23702e569a37SViacheslav Ovsiienko "\"%s\" found", ibv_list[ret]->name); 23712e569a37SViacheslav Ovsiienko rte_errno = ENOENT; 23722e569a37SViacheslav Ovsiienko ret = -rte_errno; 23732e569a37SViacheslav Ovsiienko goto exit; 23742e569a37SViacheslav Ovsiienko } 23752e569a37SViacheslav Ovsiienko DRV_LOG(INFO, "PCI information matches for" 23762e569a37SViacheslav Ovsiienko " slave %d bonding device \"%s\"", 23772e569a37SViacheslav Ovsiienko bd, ibv_list[ret]->name); 23782e569a37SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 23792e569a37SViacheslav Ovsiienko break; 23802e569a37SViacheslav Ovsiienko } 23815cf5f710SViacheslav Ovsiienko if (mlx5_dev_to_pci_addr 23825cf5f710SViacheslav Ovsiienko (ibv_list[ret]->ibdev_path, &pci_addr)) 2383f38c5457SAdrien Mazarguil continue; 2384f38c5457SAdrien Mazarguil if (pci_dev->addr.domain != pci_addr.domain || 2385f38c5457SAdrien Mazarguil pci_dev->addr.bus != pci_addr.bus || 2386f38c5457SAdrien Mazarguil pci_dev->addr.devid != pci_addr.devid || 2387f38c5457SAdrien Mazarguil pci_dev->addr.function != pci_addr.function) 2388f38c5457SAdrien Mazarguil continue; 238926c08b97SAdrien Mazarguil DRV_LOG(INFO, "PCI information matches for device \"%s\"", 2390f38c5457SAdrien Mazarguil ibv_list[ret]->name); 2391ad74bc61SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 239226c08b97SAdrien Mazarguil } 2393ad74bc61SViacheslav Ovsiienko ibv_match[nd] = NULL; 2394ad74bc61SViacheslav Ovsiienko if (!nd) { 2395ae4eb7dcSViacheslav Ovsiienko /* No device matches, just complain and bail out. */ 2396ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, 2397ad74bc61SViacheslav Ovsiienko "no Verbs device matches PCI device " PCI_PRI_FMT "," 2398ad74bc61SViacheslav Ovsiienko " are kernel drivers loaded?", 2399ad74bc61SViacheslav Ovsiienko pci_dev->addr.domain, pci_dev->addr.bus, 2400ad74bc61SViacheslav Ovsiienko pci_dev->addr.devid, pci_dev->addr.function); 2401ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2402ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2403a62ec991SViacheslav Ovsiienko goto exit; 2404ad74bc61SViacheslav Ovsiienko } 2405ad74bc61SViacheslav Ovsiienko if (nd == 1) { 240626c08b97SAdrien Mazarguil /* 2407ad74bc61SViacheslav Ovsiienko * Found single matching device may have multiple ports. 2408ad74bc61SViacheslav Ovsiienko * Each port may be representor, we have to check the port 2409ad74bc61SViacheslav Ovsiienko * number and check the representors existence. 241026c08b97SAdrien Mazarguil */ 2411ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2412ad74bc61SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 2413ad74bc61SViacheslav Ovsiienko if (!np) 2414ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get IB device \"%s\"" 2415ad74bc61SViacheslav Ovsiienko " ports number", ibv_match[0]->name); 24162e569a37SViacheslav Ovsiienko if (bd >= 0 && !np) { 24172e569a37SViacheslav Ovsiienko DRV_LOG(ERR, "can not get ports" 24182e569a37SViacheslav Ovsiienko " for bonding device"); 24192e569a37SViacheslav Ovsiienko rte_errno = ENOENT; 24202e569a37SViacheslav Ovsiienko ret = -rte_errno; 24212e569a37SViacheslav Ovsiienko goto exit; 24222e569a37SViacheslav Ovsiienko } 2423ad74bc61SViacheslav Ovsiienko } 2424790164ceSViacheslav Ovsiienko #ifndef HAVE_MLX5DV_DR_DEVX_PORT 2425790164ceSViacheslav Ovsiienko if (bd >= 0) { 2426790164ceSViacheslav Ovsiienko /* 2427790164ceSViacheslav Ovsiienko * This may happen if there is VF LAG kernel support and 2428790164ceSViacheslav Ovsiienko * application is compiled with older rdma_core library. 2429790164ceSViacheslav Ovsiienko */ 2430790164ceSViacheslav Ovsiienko DRV_LOG(ERR, 2431790164ceSViacheslav Ovsiienko "No kernel/verbs support for VF LAG bonding found."); 2432790164ceSViacheslav Ovsiienko rte_errno = ENOTSUP; 2433790164ceSViacheslav Ovsiienko ret = -rte_errno; 2434790164ceSViacheslav Ovsiienko goto exit; 2435790164ceSViacheslav Ovsiienko } 2436790164ceSViacheslav Ovsiienko #endif 2437ad74bc61SViacheslav Ovsiienko /* 2438ad74bc61SViacheslav Ovsiienko * Now we can determine the maximal 2439ad74bc61SViacheslav Ovsiienko * amount of devices to be spawned. 2440ad74bc61SViacheslav Ovsiienko */ 2441a62ec991SViacheslav Ovsiienko list = rte_zmalloc("device spawn data", 2442a62ec991SViacheslav Ovsiienko sizeof(struct mlx5_dev_spawn_data) * 2443a62ec991SViacheslav Ovsiienko (np ? np : nd), 2444a62ec991SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 2445a62ec991SViacheslav Ovsiienko if (!list) { 2446a62ec991SViacheslav Ovsiienko DRV_LOG(ERR, "spawn data array allocation failure"); 2447a62ec991SViacheslav Ovsiienko rte_errno = ENOMEM; 2448a62ec991SViacheslav Ovsiienko ret = -rte_errno; 2449a62ec991SViacheslav Ovsiienko goto exit; 2450a62ec991SViacheslav Ovsiienko } 24512e569a37SViacheslav Ovsiienko if (bd >= 0 || np > 1) { 2452ad74bc61SViacheslav Ovsiienko /* 2453ae4eb7dcSViacheslav Ovsiienko * Single IB device with multiple ports found, 2454ad74bc61SViacheslav Ovsiienko * it may be E-Switch master device and representors. 2455ad74bc61SViacheslav Ovsiienko * We have to perform identification trough the ports. 2456ad74bc61SViacheslav Ovsiienko */ 2457ad74bc61SViacheslav Ovsiienko assert(nl_rdma >= 0); 2458ad74bc61SViacheslav Ovsiienko assert(ns == 0); 2459ad74bc61SViacheslav Ovsiienko assert(nd == 1); 24602e569a37SViacheslav Ovsiienko assert(np); 2461ad74bc61SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 2462ad74bc61SViacheslav Ovsiienko list[ns].max_port = np; 2463ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = i; 2464ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[0]; 2465ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 2466ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 24672e569a37SViacheslav Ovsiienko list[ns].pf_bond = bd; 2468ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 2469ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, i); 2470ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 2471ad74bc61SViacheslav Ovsiienko /* 2472ad74bc61SViacheslav Ovsiienko * No network interface index found for the 2473ad74bc61SViacheslav Ovsiienko * specified port, it means there is no 2474ad74bc61SViacheslav Ovsiienko * representor on this port. It's OK, 2475ad74bc61SViacheslav Ovsiienko * there can be disabled ports, for example 2476ad74bc61SViacheslav Ovsiienko * if sriov_numvfs < sriov_totalvfs. 2477ad74bc61SViacheslav Ovsiienko */ 247826c08b97SAdrien Mazarguil continue; 247926c08b97SAdrien Mazarguil } 2480ad74bc61SViacheslav Ovsiienko ret = -1; 248126c08b97SAdrien Mazarguil if (nl_route >= 0) 2482ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 2483ad74bc61SViacheslav Ovsiienko (nl_route, 2484ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 2485ad74bc61SViacheslav Ovsiienko &list[ns].info); 2486ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 2487ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 2488ad74bc61SViacheslav Ovsiienko /* 2489ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 2490ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 2491ad74bc61SViacheslav Ovsiienko * with sysfs. 2492ad74bc61SViacheslav Ovsiienko */ 2493ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 2494ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 2495ad74bc61SViacheslav Ovsiienko &list[ns].info); 2496ad74bc61SViacheslav Ovsiienko } 24972e569a37SViacheslav Ovsiienko if (!ret && bd >= 0) { 24982e569a37SViacheslav Ovsiienko switch (list[ns].info.name_type) { 24992e569a37SViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 25002e569a37SViacheslav Ovsiienko if (list[ns].info.port_name == bd) 25012e569a37SViacheslav Ovsiienko ns++; 25022e569a37SViacheslav Ovsiienko break; 25032e569a37SViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 25042e569a37SViacheslav Ovsiienko if (list[ns].info.pf_num == bd) 25052e569a37SViacheslav Ovsiienko ns++; 25062e569a37SViacheslav Ovsiienko break; 25072e569a37SViacheslav Ovsiienko default: 25082e569a37SViacheslav Ovsiienko break; 25092e569a37SViacheslav Ovsiienko } 25102e569a37SViacheslav Ovsiienko continue; 25112e569a37SViacheslav Ovsiienko } 2512ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 2513ad74bc61SViacheslav Ovsiienko list[ns].info.master)) 2514ad74bc61SViacheslav Ovsiienko ns++; 2515ad74bc61SViacheslav Ovsiienko } 2516ad74bc61SViacheslav Ovsiienko if (!ns) { 251726c08b97SAdrien Mazarguil DRV_LOG(ERR, 2518ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 2519ad74bc61SViacheslav Ovsiienko " on the IB device with multiple ports"); 2520ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2521ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2522ad74bc61SViacheslav Ovsiienko goto exit; 2523ad74bc61SViacheslav Ovsiienko } 2524ad74bc61SViacheslav Ovsiienko } else { 2525ad74bc61SViacheslav Ovsiienko /* 2526ad74bc61SViacheslav Ovsiienko * The existence of several matching entries (nd > 1) means 2527ad74bc61SViacheslav Ovsiienko * port representors have been instantiated. No existing Verbs 2528ad74bc61SViacheslav Ovsiienko * call nor sysfs entries can tell them apart, this can only 2529ad74bc61SViacheslav Ovsiienko * be done through Netlink calls assuming kernel drivers are 2530ad74bc61SViacheslav Ovsiienko * recent enough to support them. 2531ad74bc61SViacheslav Ovsiienko * 2532ad74bc61SViacheslav Ovsiienko * In the event of identification failure through Netlink, 2533ad74bc61SViacheslav Ovsiienko * try again through sysfs, then: 2534ad74bc61SViacheslav Ovsiienko * 2535ad74bc61SViacheslav Ovsiienko * 1. A single IB device matches (nd == 1) with single 2536ad74bc61SViacheslav Ovsiienko * port (np=0/1) and is not a representor, assume 2537ad74bc61SViacheslav Ovsiienko * no switch support. 2538ad74bc61SViacheslav Ovsiienko * 2539ad74bc61SViacheslav Ovsiienko * 2. Otherwise no safe assumptions can be made; 2540ad74bc61SViacheslav Ovsiienko * complain louder and bail out. 2541ad74bc61SViacheslav Ovsiienko */ 2542ad74bc61SViacheslav Ovsiienko np = 1; 2543ad74bc61SViacheslav Ovsiienko for (i = 0; i != nd; ++i) { 2544ad74bc61SViacheslav Ovsiienko memset(&list[ns].info, 0, sizeof(list[ns].info)); 2545ad74bc61SViacheslav Ovsiienko list[ns].max_port = 1; 2546ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = 1; 2547ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[i]; 2548ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 2549ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 25502e569a37SViacheslav Ovsiienko list[ns].pf_bond = -1; 2551ad74bc61SViacheslav Ovsiienko list[ns].ifindex = 0; 2552ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2553ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 2554ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, 1); 2555ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 25569c2bbd04SViacheslav Ovsiienko char ifname[IF_NAMESIZE]; 25579c2bbd04SViacheslav Ovsiienko 2558ad74bc61SViacheslav Ovsiienko /* 25599c2bbd04SViacheslav Ovsiienko * Netlink failed, it may happen with old 25609c2bbd04SViacheslav Ovsiienko * ib_core kernel driver (before 4.16). 25619c2bbd04SViacheslav Ovsiienko * We can assume there is old driver because 25629c2bbd04SViacheslav Ovsiienko * here we are processing single ports IB 25639c2bbd04SViacheslav Ovsiienko * devices. Let's try sysfs to retrieve 25649c2bbd04SViacheslav Ovsiienko * the ifindex. The method works for 25659c2bbd04SViacheslav Ovsiienko * master device only. 25669c2bbd04SViacheslav Ovsiienko */ 25679c2bbd04SViacheslav Ovsiienko if (nd > 1) { 25689c2bbd04SViacheslav Ovsiienko /* 25699c2bbd04SViacheslav Ovsiienko * Multiple devices found, assume 25709c2bbd04SViacheslav Ovsiienko * representors, can not distinguish 25719c2bbd04SViacheslav Ovsiienko * master/representor and retrieve 25729c2bbd04SViacheslav Ovsiienko * ifindex via sysfs. 2573ad74bc61SViacheslav Ovsiienko */ 2574ad74bc61SViacheslav Ovsiienko continue; 2575ad74bc61SViacheslav Ovsiienko } 25769c2bbd04SViacheslav Ovsiienko ret = mlx5_get_master_ifname 25779c2bbd04SViacheslav Ovsiienko (ibv_match[i]->ibdev_path, &ifname); 25789c2bbd04SViacheslav Ovsiienko if (!ret) 25799c2bbd04SViacheslav Ovsiienko list[ns].ifindex = 25809c2bbd04SViacheslav Ovsiienko if_nametoindex(ifname); 25819c2bbd04SViacheslav Ovsiienko if (!list[ns].ifindex) { 25829c2bbd04SViacheslav Ovsiienko /* 25839c2bbd04SViacheslav Ovsiienko * No network interface index found 25849c2bbd04SViacheslav Ovsiienko * for the specified device, it means 25859c2bbd04SViacheslav Ovsiienko * there it is neither representor 25869c2bbd04SViacheslav Ovsiienko * nor master. 25879c2bbd04SViacheslav Ovsiienko */ 25889c2bbd04SViacheslav Ovsiienko continue; 25899c2bbd04SViacheslav Ovsiienko } 25909c2bbd04SViacheslav Ovsiienko } 2591ad74bc61SViacheslav Ovsiienko ret = -1; 2592ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2593ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 2594ad74bc61SViacheslav Ovsiienko (nl_route, 2595ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 2596ad74bc61SViacheslav Ovsiienko &list[ns].info); 2597ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 2598ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 2599ad74bc61SViacheslav Ovsiienko /* 2600ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 2601ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 2602ad74bc61SViacheslav Ovsiienko * with sysfs. 2603ad74bc61SViacheslav Ovsiienko */ 2604ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 2605ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 2606ad74bc61SViacheslav Ovsiienko &list[ns].info); 2607ad74bc61SViacheslav Ovsiienko } 2608ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 2609ad74bc61SViacheslav Ovsiienko list[ns].info.master)) { 2610ad74bc61SViacheslav Ovsiienko ns++; 2611ad74bc61SViacheslav Ovsiienko } else if ((nd == 1) && 2612ad74bc61SViacheslav Ovsiienko !list[ns].info.representor && 2613ad74bc61SViacheslav Ovsiienko !list[ns].info.master) { 2614ad74bc61SViacheslav Ovsiienko /* 2615ad74bc61SViacheslav Ovsiienko * Single IB device with 2616ad74bc61SViacheslav Ovsiienko * one physical port and 2617ad74bc61SViacheslav Ovsiienko * attached network device. 2618ad74bc61SViacheslav Ovsiienko * May be SRIOV is not enabled 2619ad74bc61SViacheslav Ovsiienko * or there is no representors. 2620ad74bc61SViacheslav Ovsiienko */ 2621ad74bc61SViacheslav Ovsiienko DRV_LOG(INFO, "no E-Switch support detected"); 2622ad74bc61SViacheslav Ovsiienko ns++; 2623ad74bc61SViacheslav Ovsiienko break; 262426c08b97SAdrien Mazarguil } 2625f38c5457SAdrien Mazarguil } 2626ad74bc61SViacheslav Ovsiienko if (!ns) { 2627ad74bc61SViacheslav Ovsiienko DRV_LOG(ERR, 2628ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 2629ad74bc61SViacheslav Ovsiienko " on the multiple IB devices"); 2630ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2631ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2632ad74bc61SViacheslav Ovsiienko goto exit; 2633ad74bc61SViacheslav Ovsiienko } 2634ad74bc61SViacheslav Ovsiienko } 2635ad74bc61SViacheslav Ovsiienko assert(ns); 2636116f90adSAdrien Mazarguil /* 2637116f90adSAdrien Mazarguil * Sort list to probe devices in natural order for users convenience 2638116f90adSAdrien Mazarguil * (i.e. master first, then representors from lowest to highest ID). 2639116f90adSAdrien Mazarguil */ 2640ad74bc61SViacheslav Ovsiienko qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 2641f87bfa8eSYongseok Koh /* Default configuration. */ 2642f87bfa8eSYongseok Koh dev_config = (struct mlx5_dev_config){ 264378c7a16dSYongseok Koh .hw_padding = 0, 2644f87bfa8eSYongseok Koh .mps = MLX5_ARG_UNSET, 2645f87bfa8eSYongseok Koh .rx_vec_en = 1, 2646505f1fe4SViacheslav Ovsiienko .txq_inline_max = MLX5_ARG_UNSET, 2647505f1fe4SViacheslav Ovsiienko .txq_inline_min = MLX5_ARG_UNSET, 2648505f1fe4SViacheslav Ovsiienko .txq_inline_mpw = MLX5_ARG_UNSET, 2649f87bfa8eSYongseok Koh .txqs_inline = MLX5_ARG_UNSET, 2650f87bfa8eSYongseok Koh .vf_nl_en = 1, 2651dceb5029SYongseok Koh .mr_ext_memseg_en = 1, 2652f87bfa8eSYongseok Koh .mprq = { 2653f87bfa8eSYongseok Koh .enabled = 0, /* Disabled by default. */ 2654f87bfa8eSYongseok Koh .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N, 2655f87bfa8eSYongseok Koh .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, 2656f87bfa8eSYongseok Koh .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, 2657f87bfa8eSYongseok Koh }, 2658e2b4925eSOri Kam .dv_esw_en = 1, 2659f87bfa8eSYongseok Koh }; 2660ad74bc61SViacheslav Ovsiienko /* Device specific configuration. */ 2661f38c5457SAdrien Mazarguil switch (pci_dev->id.device_id) { 2662f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 2663f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 2664f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 2665f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 2666a40b734bSViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 2667c930f02cSViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 2668f87bfa8eSYongseok Koh dev_config.vf = 1; 2669f38c5457SAdrien Mazarguil break; 2670f38c5457SAdrien Mazarguil default: 2671f87bfa8eSYongseok Koh break; 2672f38c5457SAdrien Mazarguil } 2673ad74bc61SViacheslav Ovsiienko for (i = 0; i != ns; ++i) { 26742b730263SAdrien Mazarguil uint32_t restore; 26752b730263SAdrien Mazarguil 2676f87bfa8eSYongseok Koh list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 2677ad74bc61SViacheslav Ovsiienko &list[i], 2678ad74bc61SViacheslav Ovsiienko dev_config); 26796de569f5SAdrien Mazarguil if (!list[i].eth_dev) { 2680206254b7SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 26812b730263SAdrien Mazarguil break; 2682206254b7SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 26836de569f5SAdrien Mazarguil continue; 26846de569f5SAdrien Mazarguil } 2685116f90adSAdrien Mazarguil restore = list[i].eth_dev->data->dev_flags; 2686116f90adSAdrien Mazarguil rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 26872b730263SAdrien Mazarguil /* Restore non-PCI flags cleared by the above call. */ 2688116f90adSAdrien Mazarguil list[i].eth_dev->data->dev_flags |= restore; 2689116f90adSAdrien Mazarguil rte_eth_dev_probing_finish(list[i].eth_dev); 26902b730263SAdrien Mazarguil } 2691ad74bc61SViacheslav Ovsiienko if (i != ns) { 2692f38c5457SAdrien Mazarguil DRV_LOG(ERR, 2693f38c5457SAdrien Mazarguil "probe of PCI device " PCI_PRI_FMT " aborted after" 2694f38c5457SAdrien Mazarguil " encountering an error: %s", 2695f38c5457SAdrien Mazarguil pci_dev->addr.domain, pci_dev->addr.bus, 2696f38c5457SAdrien Mazarguil pci_dev->addr.devid, pci_dev->addr.function, 2697f38c5457SAdrien Mazarguil strerror(rte_errno)); 2698f38c5457SAdrien Mazarguil ret = -rte_errno; 26992b730263SAdrien Mazarguil /* Roll back. */ 27002b730263SAdrien Mazarguil while (i--) { 27016de569f5SAdrien Mazarguil if (!list[i].eth_dev) 27026de569f5SAdrien Mazarguil continue; 2703116f90adSAdrien Mazarguil mlx5_dev_close(list[i].eth_dev); 2704e16adf08SThomas Monjalon /* mac_addrs must not be freed because in dev_private */ 2705e16adf08SThomas Monjalon list[i].eth_dev->data->mac_addrs = NULL; 2706116f90adSAdrien Mazarguil claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 27072b730263SAdrien Mazarguil } 27082b730263SAdrien Mazarguil /* Restore original error. */ 27092b730263SAdrien Mazarguil rte_errno = -ret; 2710f38c5457SAdrien Mazarguil } else { 2711f38c5457SAdrien Mazarguil ret = 0; 2712f38c5457SAdrien Mazarguil } 2713ad74bc61SViacheslav Ovsiienko exit: 2714ad74bc61SViacheslav Ovsiienko /* 2715ad74bc61SViacheslav Ovsiienko * Do the routine cleanup: 2716ad74bc61SViacheslav Ovsiienko * - close opened Netlink sockets 2717a62ec991SViacheslav Ovsiienko * - free allocated spawn data array 2718ad74bc61SViacheslav Ovsiienko * - free the Infiniband device list 2719ad74bc61SViacheslav Ovsiienko */ 2720ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2721ad74bc61SViacheslav Ovsiienko close(nl_rdma); 2722ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2723ad74bc61SViacheslav Ovsiienko close(nl_route); 2724a62ec991SViacheslav Ovsiienko if (list) 2725a62ec991SViacheslav Ovsiienko rte_free(list); 2726ad74bc61SViacheslav Ovsiienko assert(ibv_list); 2727ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 2728f38c5457SAdrien Mazarguil return ret; 2729771fa900SAdrien Mazarguil } 2730771fa900SAdrien Mazarguil 27313a820742SOphir Munk /** 27323a820742SOphir Munk * DPDK callback to remove a PCI device. 27333a820742SOphir Munk * 27343a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 27353a820742SOphir Munk * 27363a820742SOphir Munk * @param[in] pci_dev 27373a820742SOphir Munk * Pointer to the PCI device. 27383a820742SOphir Munk * 27393a820742SOphir Munk * @return 27403a820742SOphir Munk * 0 on success, the function cannot fail. 27413a820742SOphir Munk */ 27423a820742SOphir Munk static int 27433a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 27443a820742SOphir Munk { 27453a820742SOphir Munk uint16_t port_id; 27463a820742SOphir Munk 27475294b800SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 27483a820742SOphir Munk rte_eth_dev_close(port_id); 27493a820742SOphir Munk return 0; 27503a820742SOphir Munk } 27513a820742SOphir Munk 2752771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 2753771fa900SAdrien Mazarguil { 27541d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 27551d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 2756771fa900SAdrien Mazarguil }, 2757771fa900SAdrien Mazarguil { 27581d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 27591d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 2760771fa900SAdrien Mazarguil }, 2761771fa900SAdrien Mazarguil { 27621d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 27631d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 2764771fa900SAdrien Mazarguil }, 2765771fa900SAdrien Mazarguil { 27661d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 27671d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 2768771fa900SAdrien Mazarguil }, 2769771fa900SAdrien Mazarguil { 2770528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2771528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 2772528a9fbeSYongseok Koh }, 2773528a9fbeSYongseok Koh { 2774528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2775528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 2776528a9fbeSYongseok Koh }, 2777528a9fbeSYongseok Koh { 2778528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2779528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 2780528a9fbeSYongseok Koh }, 2781528a9fbeSYongseok Koh { 2782528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2783528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 2784528a9fbeSYongseok Koh }, 2785528a9fbeSYongseok Koh { 2786dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2787dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 2788dd3331c6SShahaf Shuler }, 2789dd3331c6SShahaf Shuler { 2790c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2791c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 2792c322c0e5SOri Kam }, 2793c322c0e5SOri Kam { 2794f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2795f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 2796f0354d84SWisam Jaddo }, 2797f0354d84SWisam Jaddo { 2798f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2799f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 2800f0354d84SWisam Jaddo }, 2801f0354d84SWisam Jaddo { 2802771fa900SAdrien Mazarguil .vendor_id = 0 2803771fa900SAdrien Mazarguil } 2804771fa900SAdrien Mazarguil }; 2805771fa900SAdrien Mazarguil 2806fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver = { 28072f3193cfSJan Viktorin .driver = { 28082f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 28092f3193cfSJan Viktorin }, 2810771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 2811af424af8SShreyansh Jain .probe = mlx5_pci_probe, 28123a820742SOphir Munk .remove = mlx5_pci_remove, 2813989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 2814989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 281569c06d0eSYongseok Koh .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV | 2816b76fafb1SDavid Marchand RTE_PCI_DRV_PROBE_AGAIN, 2817771fa900SAdrien Mazarguil }; 2818771fa900SAdrien Mazarguil 281972b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 282059b91becSAdrien Mazarguil 282159b91becSAdrien Mazarguil /** 282208c028d0SAdrien Mazarguil * Suffix RTE_EAL_PMD_PATH with "-glue". 282308c028d0SAdrien Mazarguil * 282408c028d0SAdrien Mazarguil * This function performs a sanity check on RTE_EAL_PMD_PATH before 282508c028d0SAdrien Mazarguil * suffixing its last component. 282608c028d0SAdrien Mazarguil * 282708c028d0SAdrien Mazarguil * @param buf[out] 282808c028d0SAdrien Mazarguil * Output buffer, should be large enough otherwise NULL is returned. 282908c028d0SAdrien Mazarguil * @param size 283008c028d0SAdrien Mazarguil * Size of @p out. 283108c028d0SAdrien Mazarguil * 283208c028d0SAdrien Mazarguil * @return 283308c028d0SAdrien Mazarguil * Pointer to @p buf or @p NULL in case suffix cannot be appended. 283408c028d0SAdrien Mazarguil */ 283508c028d0SAdrien Mazarguil static char * 283608c028d0SAdrien Mazarguil mlx5_glue_path(char *buf, size_t size) 283708c028d0SAdrien Mazarguil { 283808c028d0SAdrien Mazarguil static const char *const bad[] = { "/", ".", "..", NULL }; 283908c028d0SAdrien Mazarguil const char *path = RTE_EAL_PMD_PATH; 284008c028d0SAdrien Mazarguil size_t len = strlen(path); 284108c028d0SAdrien Mazarguil size_t off; 284208c028d0SAdrien Mazarguil int i; 284308c028d0SAdrien Mazarguil 284408c028d0SAdrien Mazarguil while (len && path[len - 1] == '/') 284508c028d0SAdrien Mazarguil --len; 284608c028d0SAdrien Mazarguil for (off = len; off && path[off - 1] != '/'; --off) 284708c028d0SAdrien Mazarguil ; 284808c028d0SAdrien Mazarguil for (i = 0; bad[i]; ++i) 284908c028d0SAdrien Mazarguil if (!strncmp(path + off, bad[i], (int)(len - off))) 285008c028d0SAdrien Mazarguil goto error; 285108c028d0SAdrien Mazarguil i = snprintf(buf, size, "%.*s-glue", (int)len, path); 285208c028d0SAdrien Mazarguil if (i == -1 || (size_t)i >= size) 285308c028d0SAdrien Mazarguil goto error; 285408c028d0SAdrien Mazarguil return buf; 285508c028d0SAdrien Mazarguil error: 2856a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2857a170a30dSNélio Laranjeiro "unable to append \"-glue\" to last component of" 285808c028d0SAdrien Mazarguil " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," 285908c028d0SAdrien Mazarguil " please re-configure DPDK"); 286008c028d0SAdrien Mazarguil return NULL; 286108c028d0SAdrien Mazarguil } 286208c028d0SAdrien Mazarguil 286308c028d0SAdrien Mazarguil /** 286459b91becSAdrien Mazarguil * Initialization routine for run-time dependency on rdma-core. 286559b91becSAdrien Mazarguil */ 286659b91becSAdrien Mazarguil static int 286759b91becSAdrien Mazarguil mlx5_glue_init(void) 286859b91becSAdrien Mazarguil { 286908c028d0SAdrien Mazarguil char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; 2870f6242d06SAdrien Mazarguil const char *path[] = { 2871f6242d06SAdrien Mazarguil /* 2872f6242d06SAdrien Mazarguil * A basic security check is necessary before trusting 2873f6242d06SAdrien Mazarguil * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH. 2874f6242d06SAdrien Mazarguil */ 2875f6242d06SAdrien Mazarguil (geteuid() == getuid() && getegid() == getgid() ? 2876f6242d06SAdrien Mazarguil getenv("MLX5_GLUE_PATH") : NULL), 287708c028d0SAdrien Mazarguil /* 287808c028d0SAdrien Mazarguil * When RTE_EAL_PMD_PATH is set, use its glue-suffixed 287908c028d0SAdrien Mazarguil * variant, otherwise let dlopen() look up libraries on its 288008c028d0SAdrien Mazarguil * own. 288108c028d0SAdrien Mazarguil */ 288208c028d0SAdrien Mazarguil (*RTE_EAL_PMD_PATH ? 288308c028d0SAdrien Mazarguil mlx5_glue_path(glue_path, sizeof(glue_path)) : ""), 2884f6242d06SAdrien Mazarguil }; 2885f6242d06SAdrien Mazarguil unsigned int i = 0; 288659b91becSAdrien Mazarguil void *handle = NULL; 288759b91becSAdrien Mazarguil void **sym; 288859b91becSAdrien Mazarguil const char *dlmsg; 288959b91becSAdrien Mazarguil 2890f6242d06SAdrien Mazarguil while (!handle && i != RTE_DIM(path)) { 2891f6242d06SAdrien Mazarguil const char *end; 2892f6242d06SAdrien Mazarguil size_t len; 2893f6242d06SAdrien Mazarguil int ret; 2894f6242d06SAdrien Mazarguil 2895f6242d06SAdrien Mazarguil if (!path[i]) { 2896f6242d06SAdrien Mazarguil ++i; 2897f6242d06SAdrien Mazarguil continue; 2898f6242d06SAdrien Mazarguil } 2899f6242d06SAdrien Mazarguil end = strpbrk(path[i], ":;"); 2900f6242d06SAdrien Mazarguil if (!end) 2901f6242d06SAdrien Mazarguil end = path[i] + strlen(path[i]); 2902f6242d06SAdrien Mazarguil len = end - path[i]; 2903f6242d06SAdrien Mazarguil ret = 0; 2904f6242d06SAdrien Mazarguil do { 2905f6242d06SAdrien Mazarguil char name[ret + 1]; 2906f6242d06SAdrien Mazarguil 2907f6242d06SAdrien Mazarguil ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE, 2908f6242d06SAdrien Mazarguil (int)len, path[i], 2909f6242d06SAdrien Mazarguil (!len || *(end - 1) == '/') ? "" : "/"); 2910f6242d06SAdrien Mazarguil if (ret == -1) 2911f6242d06SAdrien Mazarguil break; 2912f6242d06SAdrien Mazarguil if (sizeof(name) != (size_t)ret + 1) 2913f6242d06SAdrien Mazarguil continue; 2914a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"", 2915a170a30dSNélio Laranjeiro name); 2916f6242d06SAdrien Mazarguil handle = dlopen(name, RTLD_LAZY); 2917f6242d06SAdrien Mazarguil break; 2918f6242d06SAdrien Mazarguil } while (1); 2919f6242d06SAdrien Mazarguil path[i] = end + 1; 2920f6242d06SAdrien Mazarguil if (!*end) 2921f6242d06SAdrien Mazarguil ++i; 2922f6242d06SAdrien Mazarguil } 292359b91becSAdrien Mazarguil if (!handle) { 292459b91becSAdrien Mazarguil rte_errno = EINVAL; 292559b91becSAdrien Mazarguil dlmsg = dlerror(); 292659b91becSAdrien Mazarguil if (dlmsg) 2927a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg); 292859b91becSAdrien Mazarguil goto glue_error; 292959b91becSAdrien Mazarguil } 293059b91becSAdrien Mazarguil sym = dlsym(handle, "mlx5_glue"); 293159b91becSAdrien Mazarguil if (!sym || !*sym) { 293259b91becSAdrien Mazarguil rte_errno = EINVAL; 293359b91becSAdrien Mazarguil dlmsg = dlerror(); 293459b91becSAdrien Mazarguil if (dlmsg) 2935a170a30dSNélio Laranjeiro DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg); 293659b91becSAdrien Mazarguil goto glue_error; 293759b91becSAdrien Mazarguil } 293859b91becSAdrien Mazarguil mlx5_glue = *sym; 293959b91becSAdrien Mazarguil return 0; 294059b91becSAdrien Mazarguil glue_error: 294159b91becSAdrien Mazarguil if (handle) 294259b91becSAdrien Mazarguil dlclose(handle); 2943a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 2944a170a30dSNélio Laranjeiro "cannot initialize PMD due to missing run-time dependency on" 2945a170a30dSNélio Laranjeiro " rdma-core libraries (libibverbs, libmlx5)"); 294659b91becSAdrien Mazarguil return -rte_errno; 294759b91becSAdrien Mazarguil } 294859b91becSAdrien Mazarguil 294959b91becSAdrien Mazarguil #endif 295059b91becSAdrien Mazarguil 2951771fa900SAdrien Mazarguil /** 2952771fa900SAdrien Mazarguil * Driver initialization routine. 2953771fa900SAdrien Mazarguil */ 2954f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 2955771fa900SAdrien Mazarguil { 29563d96644aSStephen Hemminger /* Initialize driver log type. */ 29573d96644aSStephen Hemminger mlx5_logtype = rte_log_register("pmd.net.mlx5"); 29583d96644aSStephen Hemminger if (mlx5_logtype >= 0) 29593d96644aSStephen Hemminger rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); 29603d96644aSStephen Hemminger 29615f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 2962ea16068cSYongseok Koh mlx5_set_ptype_table(); 29635f8ba81cSXueming Li mlx5_set_cksum_table(); 29645f8ba81cSXueming Li mlx5_set_swp_types_table(); 2965771fa900SAdrien Mazarguil /* 2966771fa900SAdrien Mazarguil * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 2967771fa900SAdrien Mazarguil * huge pages. Calling ibv_fork_init() during init allows 2968771fa900SAdrien Mazarguil * applications to use fork() safely for purposes other than 2969771fa900SAdrien Mazarguil * using this PMD, which is not supported in forked processes. 2970771fa900SAdrien Mazarguil */ 2971771fa900SAdrien Mazarguil setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 2972161b93e5SYongseok Koh /* Match the size of Rx completion entry to the size of a cacheline. */ 2973161b93e5SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128) 2974161b93e5SYongseok Koh setenv("MLX5_CQE_SIZE", "128", 0); 29751ff30d18SMatan Azrad /* 29761ff30d18SMatan Azrad * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to 29771ff30d18SMatan Azrad * cleanup all the Verbs resources even when the device was removed. 29781ff30d18SMatan Azrad */ 29791ff30d18SMatan Azrad setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1); 298072b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 298159b91becSAdrien Mazarguil if (mlx5_glue_init()) 298259b91becSAdrien Mazarguil return; 298359b91becSAdrien Mazarguil assert(mlx5_glue); 298459b91becSAdrien Mazarguil #endif 29852a3b0097SAdrien Mazarguil #ifndef NDEBUG 29862a3b0097SAdrien Mazarguil /* Glue structure must not contain any NULL pointers. */ 29872a3b0097SAdrien Mazarguil { 29882a3b0097SAdrien Mazarguil unsigned int i; 29892a3b0097SAdrien Mazarguil 29902a3b0097SAdrien Mazarguil for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i) 29912a3b0097SAdrien Mazarguil assert(((const void *const *)mlx5_glue)[i]); 29922a3b0097SAdrien Mazarguil } 29932a3b0097SAdrien Mazarguil #endif 29946d5df2eaSAdrien Mazarguil if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) { 2995a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2996a170a30dSNélio Laranjeiro "rdma-core glue \"%s\" mismatch: \"%s\" is required", 29976d5df2eaSAdrien Mazarguil mlx5_glue->version, MLX5_GLUE_VERSION); 29986d5df2eaSAdrien Mazarguil return; 29996d5df2eaSAdrien Mazarguil } 30000e83b8e5SNelio Laranjeiro mlx5_glue->fork_init(); 30013dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 3002771fa900SAdrien Mazarguil } 3003771fa900SAdrien Mazarguil 300401f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 300501f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 30060880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 3007