18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <assert.h> 1059b91becSAdrien Mazarguil #include <dlfcn.h> 11771fa900SAdrien Mazarguil #include <stdint.h> 12771fa900SAdrien Mazarguil #include <stdlib.h> 13e72dd09bSNélio Laranjeiro #include <errno.h> 14771fa900SAdrien Mazarguil #include <net/if.h> 154a984153SXueming Li #include <sys/mman.h> 16ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 17771fa900SAdrien Mazarguil 18771fa900SAdrien Mazarguil /* Verbs header. */ 19771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 20771fa900SAdrien Mazarguil #ifdef PEDANTIC 21fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 22771fa900SAdrien Mazarguil #endif 23771fa900SAdrien Mazarguil #include <infiniband/verbs.h> 24771fa900SAdrien Mazarguil #ifdef PEDANTIC 25fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 26771fa900SAdrien Mazarguil #endif 27771fa900SAdrien Mazarguil 28771fa900SAdrien Mazarguil #include <rte_malloc.h> 29ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 30fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 31771fa900SAdrien Mazarguil #include <rte_pci.h> 32c752998bSGaetan Rivet #include <rte_bus_pci.h> 33771fa900SAdrien Mazarguil #include <rte_common.h> 3459b91becSAdrien Mazarguil #include <rte_config.h> 354a984153SXueming Li #include <rte_eal_memconfig.h> 36e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 37e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 38e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 39f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 40f15db67dSMatan Azrad #include <rte_alarm.h> 41771fa900SAdrien Mazarguil 42771fa900SAdrien Mazarguil #include "mlx5.h" 43771fa900SAdrien Mazarguil #include "mlx5_utils.h" 442e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 45771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 4613d57bd5SAdrien Mazarguil #include "mlx5_defs.h" 470e83b8e5SNelio Laranjeiro #include "mlx5_glue.h" 48974f1e7eSYongseok Koh #include "mlx5_mr.h" 4984c406e7SOri Kam #include "mlx5_flow.h" 50771fa900SAdrien Mazarguil 5199c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5299c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5399c12dccSNélio Laranjeiro 54bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 55bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 56bc91e8dbSYongseok Koh 5778c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 5878c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 5978c7a16dSYongseok Koh 607d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 617d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 627d6bf6b8SYongseok Koh 637d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 647d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 657d6bf6b8SYongseok Koh 667d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 677d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 687d6bf6b8SYongseok Koh 697d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 707d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 717d6bf6b8SYongseok Koh 72a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 732a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 742a66cf37SYaacov Hazan 75505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 76505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 77505f1fe4SViacheslav Ovsiienko 78505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 79505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 80505f1fe4SViacheslav Ovsiienko 81505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 82505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 83505f1fe4SViacheslav Ovsiienko 842a66cf37SYaacov Hazan /* 852a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 862a66cf37SYaacov Hazan * enabling inline send. 872a66cf37SYaacov Hazan */ 882a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 892a66cf37SYaacov Hazan 9009d8b416SYongseok Koh /* 9109d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 92a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9309d8b416SYongseok Koh */ 9409d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 9509d8b416SYongseok Koh 96230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 97230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 98230189d9SNélio Laranjeiro 99a6bd4911SViacheslav Ovsiienko /* 100a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 101a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 102a6bd4911SViacheslav Ovsiienko */ 1036ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1046ce84bd8SYongseok Koh 105a6bd4911SViacheslav Ovsiienko /* 106a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 107a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 108a6bd4911SViacheslav Ovsiienko */ 1096ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1106ce84bd8SYongseok Koh 111a6bd4911SViacheslav Ovsiienko /* 112a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 113a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 114a6bd4911SViacheslav Ovsiienko */ 1155644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1165644d5b9SNelio Laranjeiro 1175644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1185644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1195644d5b9SNelio Laranjeiro 12078a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 12178a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 12278a54648SXueming Li 123e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 124e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 125e2b4925eSOri Kam 12651e72d38SOri Kam /* Activate DV flow steering. */ 12751e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 12851e72d38SOri Kam 129db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 130db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 131db209cc3SNélio Laranjeiro 132dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 133dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 134dceb5029SYongseok Koh 1356de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1366de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1376de569f5SAdrien Mazarguil 138066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 139066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 140066cfecdSMatan Azrad 14121bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 14221bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 14321bb6c7eSDekel Peled 14443e9d979SShachar Beiser #ifndef HAVE_IBV_MLX5_MOD_MPW 14543e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 14643e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 14743e9d979SShachar Beiser #endif 14843e9d979SShachar Beiser 149523f5a74SYongseok Koh #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 150523f5a74SYongseok Koh #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 151523f5a74SYongseok Koh #endif 152523f5a74SYongseok Koh 153974f1e7eSYongseok Koh static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 154974f1e7eSYongseok Koh 155974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 156974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 157974f1e7eSYongseok Koh 158974f1e7eSYongseok Koh /* Spinlock for mlx5_shared_data allocation. */ 159974f1e7eSYongseok Koh static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 160974f1e7eSYongseok Koh 1617be600c8SYongseok Koh /* Process local data for secondary processes. */ 1627be600c8SYongseok Koh static struct mlx5_local_data mlx5_local_data; 1637be600c8SYongseok Koh 164a170a30dSNélio Laranjeiro /** Driver-specific log messages type. */ 165a170a30dSNélio Laranjeiro int mlx5_logtype; 166a170a30dSNélio Laranjeiro 167ad74bc61SViacheslav Ovsiienko /** Data associated with devices to spawn. */ 168ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data { 169ad74bc61SViacheslav Ovsiienko uint32_t ifindex; /**< Network interface index. */ 170ad74bc61SViacheslav Ovsiienko uint32_t max_port; /**< IB device maximal port index. */ 171ad74bc61SViacheslav Ovsiienko uint32_t ibv_port; /**< IB device physical port index. */ 172ad74bc61SViacheslav Ovsiienko struct mlx5_switch_info info; /**< Switch information. */ 173ad74bc61SViacheslav Ovsiienko struct ibv_device *ibv_dev; /**< Associated IB device. */ 174ad74bc61SViacheslav Ovsiienko struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ 175ab3cffcfSViacheslav Ovsiienko struct rte_pci_device *pci_dev; /**< Backend PCI device. */ 176ad74bc61SViacheslav Ovsiienko }; 177ad74bc61SViacheslav Ovsiienko 17817e19bc4SViacheslav Ovsiienko static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER(); 17917e19bc4SViacheslav Ovsiienko static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER; 18017e19bc4SViacheslav Ovsiienko 18117e19bc4SViacheslav Ovsiienko /** 1825382d28cSMatan Azrad * Initialize the counters management structure. 1835382d28cSMatan Azrad * 1845382d28cSMatan Azrad * @param[in] sh 1855382d28cSMatan Azrad * Pointer to mlx5_ibv_shared object to free 1865382d28cSMatan Azrad */ 1875382d28cSMatan Azrad static void 1885382d28cSMatan Azrad mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh) 1895382d28cSMatan Azrad { 1905382d28cSMatan Azrad uint8_t i; 1915382d28cSMatan Azrad 1925382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 1935382d28cSMatan Azrad for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) 1945382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.ccont[i].pool_list); 1955382d28cSMatan Azrad } 1965382d28cSMatan Azrad 1975382d28cSMatan Azrad /** 1985382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 1995382d28cSMatan Azrad * 2005382d28cSMatan Azrad * @param[in] mng 2015382d28cSMatan Azrad * Pointer to the memory management structure. 2025382d28cSMatan Azrad */ 2035382d28cSMatan Azrad static void 2045382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 2055382d28cSMatan Azrad { 2065382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 2075382d28cSMatan Azrad 2085382d28cSMatan Azrad LIST_REMOVE(mng, next); 2095382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 2105382d28cSMatan Azrad claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); 2115382d28cSMatan Azrad rte_free(mem); 2125382d28cSMatan Azrad } 2135382d28cSMatan Azrad 2145382d28cSMatan Azrad /** 2155382d28cSMatan Azrad * Close and release all the resources of the counters management. 2165382d28cSMatan Azrad * 2175382d28cSMatan Azrad * @param[in] sh 2185382d28cSMatan Azrad * Pointer to mlx5_ibv_shared object to free. 2195382d28cSMatan Azrad */ 2205382d28cSMatan Azrad static void 2215382d28cSMatan Azrad mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh) 2225382d28cSMatan Azrad { 2235382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 2245382d28cSMatan Azrad uint8_t i; 2255382d28cSMatan Azrad int j; 226f15db67dSMatan Azrad int retries = 1024; 2275382d28cSMatan Azrad 228f15db67dSMatan Azrad rte_errno = 0; 229f15db67dSMatan Azrad while (--retries) { 230f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 231f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 232f15db67dSMatan Azrad break; 233f15db67dSMatan Azrad rte_pause(); 234f15db67dSMatan Azrad } 2355382d28cSMatan Azrad for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) { 2365382d28cSMatan Azrad struct mlx5_flow_counter_pool *pool; 2375382d28cSMatan Azrad uint32_t batch = !!(i % 2); 2385382d28cSMatan Azrad 2395382d28cSMatan Azrad if (!sh->cmng.ccont[i].pools) 2405382d28cSMatan Azrad continue; 2415382d28cSMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 2425382d28cSMatan Azrad while (pool) { 2435382d28cSMatan Azrad if (batch) { 2445382d28cSMatan Azrad if (pool->min_dcs) 2455382d28cSMatan Azrad claim_zero 2465382d28cSMatan Azrad (mlx5_devx_cmd_destroy(pool->min_dcs)); 2475382d28cSMatan Azrad } 2485382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 2495382d28cSMatan Azrad if (pool->counters_raw[j].action) 2505382d28cSMatan Azrad claim_zero 2515382d28cSMatan Azrad (mlx5_glue->destroy_flow_action 2525382d28cSMatan Azrad (pool->counters_raw[j].action)); 2535382d28cSMatan Azrad if (!batch && pool->counters_raw[j].dcs) 2545382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 2555382d28cSMatan Azrad (pool->counters_raw[j].dcs)); 2565382d28cSMatan Azrad } 2575382d28cSMatan Azrad TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, 2585382d28cSMatan Azrad next); 2595382d28cSMatan Azrad rte_free(pool); 2605382d28cSMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 2615382d28cSMatan Azrad } 2625382d28cSMatan Azrad rte_free(sh->cmng.ccont[i].pools); 2635382d28cSMatan Azrad } 2645382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 2655382d28cSMatan Azrad while (mng) { 2665382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 2675382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 2685382d28cSMatan Azrad } 2695382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 2705382d28cSMatan Azrad } 2715382d28cSMatan Azrad 2725382d28cSMatan Azrad /** 273b9d86122SDekel Peled * Extract pdn of PD object using DV API. 274b9d86122SDekel Peled * 275b9d86122SDekel Peled * @param[in] pd 276b9d86122SDekel Peled * Pointer to the verbs PD object. 277b9d86122SDekel Peled * @param[out] pdn 278b9d86122SDekel Peled * Pointer to the PD object number variable. 279b9d86122SDekel Peled * 280b9d86122SDekel Peled * @return 281b9d86122SDekel Peled * 0 on success, error value otherwise. 282b9d86122SDekel Peled */ 283b9d86122SDekel Peled #ifdef HAVE_IBV_FLOW_DV_SUPPORT 284b9d86122SDekel Peled static int 285b9d86122SDekel Peled mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused) 286b9d86122SDekel Peled { 287b9d86122SDekel Peled struct mlx5dv_obj obj; 288b9d86122SDekel Peled struct mlx5dv_pd pd_info; 289b9d86122SDekel Peled int ret = 0; 290b9d86122SDekel Peled 291b9d86122SDekel Peled obj.pd.in = pd; 292b9d86122SDekel Peled obj.pd.out = &pd_info; 293b9d86122SDekel Peled ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 294b9d86122SDekel Peled if (ret) { 295b9d86122SDekel Peled DRV_LOG(DEBUG, "Fail to get PD object info"); 296b9d86122SDekel Peled return ret; 297b9d86122SDekel Peled } 298b9d86122SDekel Peled *pdn = pd_info.pdn; 299b9d86122SDekel Peled return 0; 300b9d86122SDekel Peled } 301b9d86122SDekel Peled #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 302b9d86122SDekel Peled 303b9d86122SDekel Peled /** 30417e19bc4SViacheslav Ovsiienko * Allocate shared IB device context. If there is multiport device the 30517e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 30617e19bc4SViacheslav Ovsiienko * port dedicated IB device, the context will be used by only given 30717e19bc4SViacheslav Ovsiienko * port due to unification. 30817e19bc4SViacheslav Ovsiienko * 309ae4eb7dcSViacheslav Ovsiienko * Routine first searches the context for the specified IB device name, 31017e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 31117e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 31217e19bc4SViacheslav Ovsiienko * IB device context and parameters. 31317e19bc4SViacheslav Ovsiienko * 31417e19bc4SViacheslav Ovsiienko * @param[in] spawn 31517e19bc4SViacheslav Ovsiienko * Pointer to the IB device attributes (name, port, etc). 31617e19bc4SViacheslav Ovsiienko * 31717e19bc4SViacheslav Ovsiienko * @return 31817e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object on success, 31917e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 32017e19bc4SViacheslav Ovsiienko */ 32117e19bc4SViacheslav Ovsiienko static struct mlx5_ibv_shared * 32217e19bc4SViacheslav Ovsiienko mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn) 32317e19bc4SViacheslav Ovsiienko { 32417e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 32517e19bc4SViacheslav Ovsiienko int err = 0; 32653e5a82fSViacheslav Ovsiienko uint32_t i; 32717e19bc4SViacheslav Ovsiienko 32817e19bc4SViacheslav Ovsiienko assert(spawn); 32917e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 33017e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 33117e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 33217e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 33317e19bc4SViacheslav Ovsiienko LIST_FOREACH(sh, &mlx5_ibv_list, next) { 33417e19bc4SViacheslav Ovsiienko if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) { 33517e19bc4SViacheslav Ovsiienko sh->refcnt++; 33617e19bc4SViacheslav Ovsiienko goto exit; 33717e19bc4SViacheslav Ovsiienko } 33817e19bc4SViacheslav Ovsiienko } 339ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 34017e19bc4SViacheslav Ovsiienko assert(spawn->max_port); 34117e19bc4SViacheslav Ovsiienko sh = rte_zmalloc("ethdev shared ib context", 34217e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared) + 34317e19bc4SViacheslav Ovsiienko spawn->max_port * 34417e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared_port), 34517e19bc4SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 34617e19bc4SViacheslav Ovsiienko if (!sh) { 34717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 34817e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 34917e19bc4SViacheslav Ovsiienko goto exit; 35017e19bc4SViacheslav Ovsiienko } 35117e19bc4SViacheslav Ovsiienko /* Try to open IB device with DV first, then usual Verbs. */ 35217e19bc4SViacheslav Ovsiienko errno = 0; 35317e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev); 35417e19bc4SViacheslav Ovsiienko if (sh->ctx) { 35517e19bc4SViacheslav Ovsiienko sh->devx = 1; 35617e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is supported"); 35717e19bc4SViacheslav Ovsiienko } else { 35817e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->open_device(spawn->ibv_dev); 35917e19bc4SViacheslav Ovsiienko if (!sh->ctx) { 36017e19bc4SViacheslav Ovsiienko err = errno ? errno : ENODEV; 36117e19bc4SViacheslav Ovsiienko goto error; 36217e19bc4SViacheslav Ovsiienko } 36317e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is NOT supported"); 36417e19bc4SViacheslav Ovsiienko } 36517e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr); 36617e19bc4SViacheslav Ovsiienko if (err) { 36717e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "ibv_query_device_ex() failed"); 36817e19bc4SViacheslav Ovsiienko goto error; 36917e19bc4SViacheslav Ovsiienko } 37017e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 37117e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 37217e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_name, sh->ctx->device->name, 37317e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_name)); 37417e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path, 37517e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_path)); 376ab3cffcfSViacheslav Ovsiienko sh->pci_dev = spawn->pci_dev; 37753e5a82fSViacheslav Ovsiienko pthread_mutex_init(&sh->intr_mutex, NULL); 37853e5a82fSViacheslav Ovsiienko /* 37953e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 38053e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 38153e5a82fSViacheslav Ovsiienko * the given port index i. 38253e5a82fSViacheslav Ovsiienko */ 38353e5a82fSViacheslav Ovsiienko for (i = 0; i < sh->max_port; i++) 38453e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 38517e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 38617e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 38717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 38817e19bc4SViacheslav Ovsiienko err = ENOMEM; 38917e19bc4SViacheslav Ovsiienko goto error; 39017e19bc4SViacheslav Ovsiienko } 391b9d86122SDekel Peled #ifdef HAVE_IBV_FLOW_DV_SUPPORT 392b9d86122SDekel Peled err = mlx5_get_pdn(sh->pd, &sh->pdn); 393b9d86122SDekel Peled if (err) { 394b9d86122SDekel Peled DRV_LOG(ERR, "Fail to extract pdn from PD"); 395b9d86122SDekel Peled goto error; 396b9d86122SDekel Peled } 397b9d86122SDekel Peled #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 398ab3cffcfSViacheslav Ovsiienko /* 399ab3cffcfSViacheslav Ovsiienko * Once the device is added to the list of memory event 400ab3cffcfSViacheslav Ovsiienko * callback, its global MR cache table cannot be expanded 401ab3cffcfSViacheslav Ovsiienko * on the fly because of deadlock. If it overflows, lookup 402ab3cffcfSViacheslav Ovsiienko * should be done by searching MR list linearly, which is slow. 403ab3cffcfSViacheslav Ovsiienko * 404ab3cffcfSViacheslav Ovsiienko * At this point the device is not added to the memory 405ab3cffcfSViacheslav Ovsiienko * event list yet, context is just being created. 406ab3cffcfSViacheslav Ovsiienko */ 407ab3cffcfSViacheslav Ovsiienko err = mlx5_mr_btree_init(&sh->mr.cache, 408ab3cffcfSViacheslav Ovsiienko MLX5_MR_BTREE_CACHE_N * 2, 409ab3cffcfSViacheslav Ovsiienko sh->pci_dev->device.numa_node); 410ab3cffcfSViacheslav Ovsiienko if (err) { 411ab3cffcfSViacheslav Ovsiienko err = rte_errno; 412ab3cffcfSViacheslav Ovsiienko goto error; 413ab3cffcfSViacheslav Ovsiienko } 4145382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 4150e3d0525SViacheslav Ovsiienko /* Add device to memory callback list. */ 4160e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 4170e3d0525SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 4180e3d0525SViacheslav Ovsiienko sh, mem_event_cb); 4190e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 4200e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 42117e19bc4SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next); 42217e19bc4SViacheslav Ovsiienko exit: 42317e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 42417e19bc4SViacheslav Ovsiienko return sh; 42517e19bc4SViacheslav Ovsiienko error: 42617e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 42717e19bc4SViacheslav Ovsiienko assert(sh); 42817e19bc4SViacheslav Ovsiienko if (sh->pd) 42917e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 43017e19bc4SViacheslav Ovsiienko if (sh->ctx) 43117e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 43217e19bc4SViacheslav Ovsiienko rte_free(sh); 43317e19bc4SViacheslav Ovsiienko assert(err > 0); 43417e19bc4SViacheslav Ovsiienko rte_errno = err; 43517e19bc4SViacheslav Ovsiienko return NULL; 43617e19bc4SViacheslav Ovsiienko } 43717e19bc4SViacheslav Ovsiienko 43817e19bc4SViacheslav Ovsiienko /** 43917e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 44017e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 44117e19bc4SViacheslav Ovsiienko * 44217e19bc4SViacheslav Ovsiienko * @param[in] sh 44317e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object to free 44417e19bc4SViacheslav Ovsiienko */ 44517e19bc4SViacheslav Ovsiienko static void 44617e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) 44717e19bc4SViacheslav Ovsiienko { 44817e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 44917e19bc4SViacheslav Ovsiienko #ifndef NDEBUG 45017e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 45117e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *lctx; 45217e19bc4SViacheslav Ovsiienko 45317e19bc4SViacheslav Ovsiienko LIST_FOREACH(lctx, &mlx5_ibv_list, next) 45417e19bc4SViacheslav Ovsiienko if (lctx == sh) 45517e19bc4SViacheslav Ovsiienko break; 45617e19bc4SViacheslav Ovsiienko assert(lctx); 45717e19bc4SViacheslav Ovsiienko if (lctx != sh) { 45817e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 45917e19bc4SViacheslav Ovsiienko goto exit; 46017e19bc4SViacheslav Ovsiienko } 46117e19bc4SViacheslav Ovsiienko #endif 46217e19bc4SViacheslav Ovsiienko assert(sh); 46317e19bc4SViacheslav Ovsiienko assert(sh->refcnt); 46417e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 46517e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 46617e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 46717e19bc4SViacheslav Ovsiienko goto exit; 468ab3cffcfSViacheslav Ovsiienko /* Release created Memory Regions. */ 469ab3cffcfSViacheslav Ovsiienko mlx5_mr_release(sh); 4700e3d0525SViacheslav Ovsiienko /* Remove from memory callback device list. */ 4710e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 4720e3d0525SViacheslav Ovsiienko LIST_REMOVE(sh, mem_event_cb); 4730e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 4740e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 47517e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 47653e5a82fSViacheslav Ovsiienko /* 47753e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 47853e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 47953e5a82fSViacheslav Ovsiienko **/ 4805382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 48153e5a82fSViacheslav Ovsiienko assert(!sh->intr_cnt); 48253e5a82fSViacheslav Ovsiienko if (sh->intr_cnt) 4835897ac13SViacheslav Ovsiienko mlx5_intr_callback_unregister 48453e5a82fSViacheslav Ovsiienko (&sh->intr_handle, mlx5_dev_interrupt_handler, sh); 48553e5a82fSViacheslav Ovsiienko pthread_mutex_destroy(&sh->intr_mutex); 48617e19bc4SViacheslav Ovsiienko if (sh->pd) 48717e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 48817e19bc4SViacheslav Ovsiienko if (sh->ctx) 48917e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 49017e19bc4SViacheslav Ovsiienko rte_free(sh); 49117e19bc4SViacheslav Ovsiienko exit: 49217e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 49317e19bc4SViacheslav Ovsiienko } 49417e19bc4SViacheslav Ovsiienko 495771fa900SAdrien Mazarguil /** 496b2177648SViacheslav Ovsiienko * Initialize DR related data within private structure. 497b2177648SViacheslav Ovsiienko * Routine checks the reference counter and does actual 498ae4eb7dcSViacheslav Ovsiienko * resources creation/initialization only if counter is zero. 499b2177648SViacheslav Ovsiienko * 500b2177648SViacheslav Ovsiienko * @param[in] priv 501b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 502b2177648SViacheslav Ovsiienko * 503b2177648SViacheslav Ovsiienko * @return 504b2177648SViacheslav Ovsiienko * Zero on success, positive error code otherwise. 505b2177648SViacheslav Ovsiienko */ 506b2177648SViacheslav Ovsiienko static int 507b2177648SViacheslav Ovsiienko mlx5_alloc_shared_dr(struct mlx5_priv *priv) 508b2177648SViacheslav Ovsiienko { 509b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 510b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = priv->sh; 511b2177648SViacheslav Ovsiienko int err = 0; 512d1e64fbfSOri Kam void *domain; 513b2177648SViacheslav Ovsiienko 514b2177648SViacheslav Ovsiienko assert(sh); 515b2177648SViacheslav Ovsiienko if (sh->dv_refcnt) { 516b2177648SViacheslav Ovsiienko /* Shared DV/DR structures is already initialized. */ 517b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 518b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 519b2177648SViacheslav Ovsiienko return 0; 520b2177648SViacheslav Ovsiienko } 521b2177648SViacheslav Ovsiienko /* Reference counter is zero, we should initialize structures. */ 522d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 523d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_RX); 524d1e64fbfSOri Kam if (!domain) { 525d1e64fbfSOri Kam DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); 526b2177648SViacheslav Ovsiienko err = errno; 527b2177648SViacheslav Ovsiienko goto error; 528b2177648SViacheslav Ovsiienko } 529d1e64fbfSOri Kam sh->rx_domain = domain; 530d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 531d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_TX); 532d1e64fbfSOri Kam if (!domain) { 533d1e64fbfSOri Kam DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); 534b2177648SViacheslav Ovsiienko err = errno; 535b2177648SViacheslav Ovsiienko goto error; 536b2177648SViacheslav Ovsiienko } 53779e35d0dSViacheslav Ovsiienko pthread_mutex_init(&sh->dv_mutex, NULL); 538d1e64fbfSOri Kam sh->tx_domain = domain; 539e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 540e2b4925eSOri Kam if (priv->config.dv_esw_en) { 541d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain 542d1e64fbfSOri Kam (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); 543d1e64fbfSOri Kam if (!domain) { 544d1e64fbfSOri Kam DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); 545e2b4925eSOri Kam err = errno; 546e2b4925eSOri Kam goto error; 547e2b4925eSOri Kam } 548d1e64fbfSOri Kam sh->fdb_domain = domain; 54934fa7c02SOri Kam sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); 550e2b4925eSOri Kam } 551e2b4925eSOri Kam #endif 552b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 553b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 554b2177648SViacheslav Ovsiienko return 0; 555b2177648SViacheslav Ovsiienko 556b2177648SViacheslav Ovsiienko error: 557b2177648SViacheslav Ovsiienko /* Rollback the created objects. */ 558d1e64fbfSOri Kam if (sh->rx_domain) { 559d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 560d1e64fbfSOri Kam sh->rx_domain = NULL; 561b2177648SViacheslav Ovsiienko } 562d1e64fbfSOri Kam if (sh->tx_domain) { 563d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 564d1e64fbfSOri Kam sh->tx_domain = NULL; 565b2177648SViacheslav Ovsiienko } 566d1e64fbfSOri Kam if (sh->fdb_domain) { 567d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 568d1e64fbfSOri Kam sh->fdb_domain = NULL; 569e2b4925eSOri Kam } 57034fa7c02SOri Kam if (sh->esw_drop_action) { 57134fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 57234fa7c02SOri Kam sh->esw_drop_action = NULL; 57334fa7c02SOri Kam } 574b2177648SViacheslav Ovsiienko return err; 575b2177648SViacheslav Ovsiienko #else 576b2177648SViacheslav Ovsiienko (void)priv; 577b2177648SViacheslav Ovsiienko return 0; 578b2177648SViacheslav Ovsiienko #endif 579b2177648SViacheslav Ovsiienko } 580b2177648SViacheslav Ovsiienko 581b2177648SViacheslav Ovsiienko /** 582b2177648SViacheslav Ovsiienko * Destroy DR related data within private structure. 583b2177648SViacheslav Ovsiienko * 584b2177648SViacheslav Ovsiienko * @param[in] priv 585b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 586b2177648SViacheslav Ovsiienko */ 587b2177648SViacheslav Ovsiienko static void 588b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(struct mlx5_priv *priv) 589b2177648SViacheslav Ovsiienko { 590b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 591b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 592b2177648SViacheslav Ovsiienko 593b2177648SViacheslav Ovsiienko if (!priv->dr_shared) 594b2177648SViacheslav Ovsiienko return; 595b2177648SViacheslav Ovsiienko priv->dr_shared = 0; 596b2177648SViacheslav Ovsiienko sh = priv->sh; 597b2177648SViacheslav Ovsiienko assert(sh); 598b2177648SViacheslav Ovsiienko assert(sh->dv_refcnt); 599b2177648SViacheslav Ovsiienko if (sh->dv_refcnt && --sh->dv_refcnt) 600b2177648SViacheslav Ovsiienko return; 601d1e64fbfSOri Kam if (sh->rx_domain) { 602d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 603d1e64fbfSOri Kam sh->rx_domain = NULL; 604b2177648SViacheslav Ovsiienko } 605d1e64fbfSOri Kam if (sh->tx_domain) { 606d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 607d1e64fbfSOri Kam sh->tx_domain = NULL; 608b2177648SViacheslav Ovsiienko } 609e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 610d1e64fbfSOri Kam if (sh->fdb_domain) { 611d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 612d1e64fbfSOri Kam sh->fdb_domain = NULL; 613e2b4925eSOri Kam } 61434fa7c02SOri Kam if (sh->esw_drop_action) { 61534fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 61634fa7c02SOri Kam sh->esw_drop_action = NULL; 61734fa7c02SOri Kam } 618e2b4925eSOri Kam #endif 61979e35d0dSViacheslav Ovsiienko pthread_mutex_destroy(&sh->dv_mutex); 620b2177648SViacheslav Ovsiienko #else 621b2177648SViacheslav Ovsiienko (void)priv; 622b2177648SViacheslav Ovsiienko #endif 623b2177648SViacheslav Ovsiienko } 624b2177648SViacheslav Ovsiienko 625b2177648SViacheslav Ovsiienko /** 6267be600c8SYongseok Koh * Initialize shared data between primary and secondary process. 6277be600c8SYongseok Koh * 6287be600c8SYongseok Koh * A memzone is reserved by primary process and secondary processes attach to 6297be600c8SYongseok Koh * the memzone. 6307be600c8SYongseok Koh * 6317be600c8SYongseok Koh * @return 6327be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 633974f1e7eSYongseok Koh */ 6347be600c8SYongseok Koh static int 6357be600c8SYongseok Koh mlx5_init_shared_data(void) 636974f1e7eSYongseok Koh { 637974f1e7eSYongseok Koh const struct rte_memzone *mz; 6387be600c8SYongseok Koh int ret = 0; 639974f1e7eSYongseok Koh 640974f1e7eSYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 641974f1e7eSYongseok Koh if (mlx5_shared_data == NULL) { 642974f1e7eSYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 643974f1e7eSYongseok Koh /* Allocate shared memory. */ 644974f1e7eSYongseok Koh mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 645974f1e7eSYongseok Koh sizeof(*mlx5_shared_data), 646974f1e7eSYongseok Koh SOCKET_ID_ANY, 0); 6477be600c8SYongseok Koh if (mz == NULL) { 6487be600c8SYongseok Koh DRV_LOG(ERR, 6497be600c8SYongseok Koh "Cannot allocate mlx5 shared data\n"); 6507be600c8SYongseok Koh ret = -rte_errno; 6517be600c8SYongseok Koh goto error; 6527be600c8SYongseok Koh } 6537be600c8SYongseok Koh mlx5_shared_data = mz->addr; 6547be600c8SYongseok Koh memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 6557be600c8SYongseok Koh rte_spinlock_init(&mlx5_shared_data->lock); 656974f1e7eSYongseok Koh } else { 657974f1e7eSYongseok Koh /* Lookup allocated shared memory. */ 658974f1e7eSYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 6597be600c8SYongseok Koh if (mz == NULL) { 6607be600c8SYongseok Koh DRV_LOG(ERR, 6617be600c8SYongseok Koh "Cannot attach mlx5 shared data\n"); 6627be600c8SYongseok Koh ret = -rte_errno; 6637be600c8SYongseok Koh goto error; 664974f1e7eSYongseok Koh } 665974f1e7eSYongseok Koh mlx5_shared_data = mz->addr; 6667be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 6673ebe6580SYongseok Koh } 668974f1e7eSYongseok Koh } 6697be600c8SYongseok Koh error: 6707be600c8SYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 6717be600c8SYongseok Koh return ret; 6727be600c8SYongseok Koh } 6737be600c8SYongseok Koh 6747be600c8SYongseok Koh /** 6754d803a72SOlga Shern * Retrieve integer value from environment variable. 6764d803a72SOlga Shern * 6774d803a72SOlga Shern * @param[in] name 6784d803a72SOlga Shern * Environment variable name. 6794d803a72SOlga Shern * 6804d803a72SOlga Shern * @return 6814d803a72SOlga Shern * Integer value, 0 if the variable is not set. 6824d803a72SOlga Shern */ 6834d803a72SOlga Shern int 6844d803a72SOlga Shern mlx5_getenv_int(const char *name) 6854d803a72SOlga Shern { 6864d803a72SOlga Shern const char *val = getenv(name); 6874d803a72SOlga Shern 6884d803a72SOlga Shern if (val == NULL) 6894d803a72SOlga Shern return 0; 6904d803a72SOlga Shern return atoi(val); 6914d803a72SOlga Shern } 6924d803a72SOlga Shern 6934d803a72SOlga Shern /** 6941e3a39f7SXueming Li * Verbs callback to allocate a memory. This function should allocate the space 6951e3a39f7SXueming Li * according to the size provided residing inside a huge page. 6961e3a39f7SXueming Li * Please note that all allocation must respect the alignment from libmlx5 6971e3a39f7SXueming Li * (i.e. currently sysconf(_SC_PAGESIZE)). 6981e3a39f7SXueming Li * 6991e3a39f7SXueming Li * @param[in] size 7001e3a39f7SXueming Li * The size in bytes of the memory to allocate. 7011e3a39f7SXueming Li * @param[in] data 7021e3a39f7SXueming Li * A pointer to the callback data. 7031e3a39f7SXueming Li * 7041e3a39f7SXueming Li * @return 705a6d83b6aSNélio Laranjeiro * Allocated buffer, NULL otherwise and rte_errno is set. 7061e3a39f7SXueming Li */ 7071e3a39f7SXueming Li static void * 7081e3a39f7SXueming Li mlx5_alloc_verbs_buf(size_t size, void *data) 7091e3a39f7SXueming Li { 710dbeba4cfSThomas Monjalon struct mlx5_priv *priv = data; 7111e3a39f7SXueming Li void *ret; 7121e3a39f7SXueming Li size_t alignment = sysconf(_SC_PAGESIZE); 713d10b09dbSOlivier Matz unsigned int socket = SOCKET_ID_ANY; 7141e3a39f7SXueming Li 715d10b09dbSOlivier Matz if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { 716d10b09dbSOlivier Matz const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 717d10b09dbSOlivier Matz 718d10b09dbSOlivier Matz socket = ctrl->socket; 719d10b09dbSOlivier Matz } else if (priv->verbs_alloc_ctx.type == 720d10b09dbSOlivier Matz MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { 721d10b09dbSOlivier Matz const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 722d10b09dbSOlivier Matz 723d10b09dbSOlivier Matz socket = ctrl->socket; 724d10b09dbSOlivier Matz } 7251e3a39f7SXueming Li assert(data != NULL); 726d10b09dbSOlivier Matz ret = rte_malloc_socket(__func__, size, alignment, socket); 727a6d83b6aSNélio Laranjeiro if (!ret && size) 728a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 7291e3a39f7SXueming Li return ret; 7301e3a39f7SXueming Li } 7311e3a39f7SXueming Li 7321e3a39f7SXueming Li /** 7331e3a39f7SXueming Li * Verbs callback to free a memory. 7341e3a39f7SXueming Li * 7351e3a39f7SXueming Li * @param[in] ptr 7361e3a39f7SXueming Li * A pointer to the memory to free. 7371e3a39f7SXueming Li * @param[in] data 7381e3a39f7SXueming Li * A pointer to the callback data. 7391e3a39f7SXueming Li */ 7401e3a39f7SXueming Li static void 7411e3a39f7SXueming Li mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 7421e3a39f7SXueming Li { 7431e3a39f7SXueming Li assert(data != NULL); 7441e3a39f7SXueming Li rte_free(ptr); 7451e3a39f7SXueming Li } 7461e3a39f7SXueming Li 7471e3a39f7SXueming Li /** 748*c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 749*c9ba7523SRaslan Darawsheh * 750*c9ba7523SRaslan Darawsheh * @param[in] dev 751*c9ba7523SRaslan Darawsheh * A pointer to eth_dev 752*c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 753*c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 754*c9ba7523SRaslan Darawsheh * 755*c9ba7523SRaslan Darawsheh * @return 756*c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 757*c9ba7523SRaslan Darawsheh */ 758*c9ba7523SRaslan Darawsheh int 759*c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 760*c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 761*c9ba7523SRaslan Darawsheh { 762*c9ba7523SRaslan Darawsheh assert(udp_tunnel != NULL); 763*c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && 764*c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 765*c9ba7523SRaslan Darawsheh return 0; 766*c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && 767*c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 768*c9ba7523SRaslan Darawsheh return 0; 769*c9ba7523SRaslan Darawsheh return -ENOTSUP; 770*c9ba7523SRaslan Darawsheh } 771*c9ba7523SRaslan Darawsheh 772*c9ba7523SRaslan Darawsheh /** 773120dc4a7SYongseok Koh * Initialize process private data structure. 774120dc4a7SYongseok Koh * 775120dc4a7SYongseok Koh * @param dev 776120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 777120dc4a7SYongseok Koh * 778120dc4a7SYongseok Koh * @return 779120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 780120dc4a7SYongseok Koh */ 781120dc4a7SYongseok Koh int 782120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 783120dc4a7SYongseok Koh { 784120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 785120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 786120dc4a7SYongseok Koh size_t ppriv_size; 787120dc4a7SYongseok Koh 788120dc4a7SYongseok Koh /* 789120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 790120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 791120dc4a7SYongseok Koh */ 792120dc4a7SYongseok Koh ppriv_size = 793120dc4a7SYongseok Koh sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 794120dc4a7SYongseok Koh ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, 795120dc4a7SYongseok Koh RTE_CACHE_LINE_SIZE, dev->device->numa_node); 796120dc4a7SYongseok Koh if (!ppriv) { 797120dc4a7SYongseok Koh rte_errno = ENOMEM; 798120dc4a7SYongseok Koh return -rte_errno; 799120dc4a7SYongseok Koh } 800120dc4a7SYongseok Koh ppriv->uar_table_sz = ppriv_size; 801120dc4a7SYongseok Koh dev->process_private = ppriv; 802120dc4a7SYongseok Koh return 0; 803120dc4a7SYongseok Koh } 804120dc4a7SYongseok Koh 805120dc4a7SYongseok Koh /** 806120dc4a7SYongseok Koh * Un-initialize process private data structure. 807120dc4a7SYongseok Koh * 808120dc4a7SYongseok Koh * @param dev 809120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 810120dc4a7SYongseok Koh */ 811120dc4a7SYongseok Koh static void 812120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 813120dc4a7SYongseok Koh { 814120dc4a7SYongseok Koh if (!dev->process_private) 815120dc4a7SYongseok Koh return; 816120dc4a7SYongseok Koh rte_free(dev->process_private); 817120dc4a7SYongseok Koh dev->process_private = NULL; 818120dc4a7SYongseok Koh } 819120dc4a7SYongseok Koh 820120dc4a7SYongseok Koh /** 821771fa900SAdrien Mazarguil * DPDK callback to close the device. 822771fa900SAdrien Mazarguil * 823771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 824771fa900SAdrien Mazarguil * 825771fa900SAdrien Mazarguil * @param dev 826771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 827771fa900SAdrien Mazarguil */ 828771fa900SAdrien Mazarguil static void 829771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 830771fa900SAdrien Mazarguil { 831dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 8322e22920bSAdrien Mazarguil unsigned int i; 8336af6b973SNélio Laranjeiro int ret; 834771fa900SAdrien Mazarguil 835a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 8360f99970bSNélio Laranjeiro dev->data->port_id, 837f048f3d4SViacheslav Ovsiienko ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : "")); 838ecc1c29dSAdrien Mazarguil /* In case mlx5_dev_stop() has not been called. */ 839af4f09f2SNélio Laranjeiro mlx5_dev_interrupt_handler_uninstall(dev); 840af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 841af689f1fSNelio Laranjeiro mlx5_flow_flush(dev, NULL); 8422e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 8432e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 8442e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 8452aac5b5dSYongseok Koh rte_wmb(); 8462aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 8472aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 8482e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 8492e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 8502e22920bSAdrien Mazarguil usleep(1000); 851a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 852af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 8532e22920bSAdrien Mazarguil priv->rxqs_n = 0; 8542e22920bSAdrien Mazarguil priv->rxqs = NULL; 8552e22920bSAdrien Mazarguil } 8562e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 8572e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 8582e22920bSAdrien Mazarguil usleep(1000); 8596e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 860af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 8612e22920bSAdrien Mazarguil priv->txqs_n = 0; 8622e22920bSAdrien Mazarguil priv->txqs = NULL; 8632e22920bSAdrien Mazarguil } 864120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 8657d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 866b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 86729c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 86829c1d8bbSNélio Laranjeiro rte_free(priv->rss_conf.rss_key); 869634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 870634efbc2SNelio Laranjeiro rte_free(priv->reta_idx); 871ccdcba53SNélio Laranjeiro if (priv->config.vf) 872ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_flush(dev); 87326c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 87426c08b97SAdrien Mazarguil close(priv->nl_socket_route); 87526c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 87626c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 877dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 878dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 879942d13e6SViacheslav Ovsiienko if (priv->sh) { 880942d13e6SViacheslav Ovsiienko /* 881942d13e6SViacheslav Ovsiienko * Free the shared context in last turn, because the cleanup 882942d13e6SViacheslav Ovsiienko * routines above may use some shared fields, like 883942d13e6SViacheslav Ovsiienko * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing 884942d13e6SViacheslav Ovsiienko * ifindex if Netlink fails. 885942d13e6SViacheslav Ovsiienko */ 886942d13e6SViacheslav Ovsiienko mlx5_free_shared_ibctx(priv->sh); 887942d13e6SViacheslav Ovsiienko priv->sh = NULL; 888942d13e6SViacheslav Ovsiienko } 88923820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 890f5479b68SNélio Laranjeiro if (ret) 891a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 8920f99970bSNélio Laranjeiro dev->data->port_id); 89315c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 8944c7a0f5fSNélio Laranjeiro if (ret) 895a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 8960f99970bSNélio Laranjeiro dev->data->port_id); 89793403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 89809cb5b58SNélio Laranjeiro if (ret) 89993403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 9000f99970bSNélio Laranjeiro dev->data->port_id); 901af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 902a1366b1aSNélio Laranjeiro if (ret) 903a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 9040f99970bSNélio Laranjeiro dev->data->port_id); 905af4f09f2SNélio Laranjeiro ret = mlx5_txq_ibv_verify(dev); 906faf2667fSNélio Laranjeiro if (ret) 907a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 9080f99970bSNélio Laranjeiro dev->data->port_id); 909af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 9106e78005aSNélio Laranjeiro if (ret) 911a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 9120f99970bSNélio Laranjeiro dev->data->port_id); 913af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 9146af6b973SNélio Laranjeiro if (ret) 915a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 916a170a30dSNélio Laranjeiro dev->data->port_id); 9172b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 9182b730263SAdrien Mazarguil unsigned int c = 0; 919d874a4eeSThomas Monjalon uint16_t port_id; 9202b730263SAdrien Mazarguil 921d874a4eeSThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) { 922dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 923d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 9242b730263SAdrien Mazarguil 9252b730263SAdrien Mazarguil if (!opriv || 9262b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 927d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 9282b730263SAdrien Mazarguil continue; 9292b730263SAdrien Mazarguil ++c; 9302b730263SAdrien Mazarguil } 9312b730263SAdrien Mazarguil if (!c) 9322b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 9332b730263SAdrien Mazarguil } 934771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 9352b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 93642603bbdSOphir Munk /* 93742603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 93842603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 93942603bbdSOphir Munk * it is freed when dev_private is freed. 94042603bbdSOphir Munk */ 94142603bbdSOphir Munk dev->data->mac_addrs = NULL; 942771fa900SAdrien Mazarguil } 943771fa900SAdrien Mazarguil 9440887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops = { 945e60fbd5bSAdrien Mazarguil .dev_configure = mlx5_dev_configure, 946e60fbd5bSAdrien Mazarguil .dev_start = mlx5_dev_start, 947e60fbd5bSAdrien Mazarguil .dev_stop = mlx5_dev_stop, 94862072098SOr Ami .dev_set_link_down = mlx5_set_link_down, 94962072098SOr Ami .dev_set_link_up = mlx5_set_link_up, 950771fa900SAdrien Mazarguil .dev_close = mlx5_dev_close, 9511bdbe1afSAdrien Mazarguil .promiscuous_enable = mlx5_promiscuous_enable, 9521bdbe1afSAdrien Mazarguil .promiscuous_disable = mlx5_promiscuous_disable, 9531bdbe1afSAdrien Mazarguil .allmulticast_enable = mlx5_allmulticast_enable, 9541bdbe1afSAdrien Mazarguil .allmulticast_disable = mlx5_allmulticast_disable, 955cb8faed7SAdrien Mazarguil .link_update = mlx5_link_update, 95687011737SAdrien Mazarguil .stats_get = mlx5_stats_get, 95787011737SAdrien Mazarguil .stats_reset = mlx5_stats_reset, 958a4193ae3SShahaf Shuler .xstats_get = mlx5_xstats_get, 959a4193ae3SShahaf Shuler .xstats_reset = mlx5_xstats_reset, 960a4193ae3SShahaf Shuler .xstats_get_names = mlx5_xstats_get_names, 961714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 962e60fbd5bSAdrien Mazarguil .dev_infos_get = mlx5_dev_infos_get, 963e571ad55STom Barbette .read_clock = mlx5_read_clock, 96478a38edfSJianfeng Tan .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 965e9086978SAdrien Mazarguil .vlan_filter_set = mlx5_vlan_filter_set, 9662e22920bSAdrien Mazarguil .rx_queue_setup = mlx5_rx_queue_setup, 9672e22920bSAdrien Mazarguil .tx_queue_setup = mlx5_tx_queue_setup, 9682e22920bSAdrien Mazarguil .rx_queue_release = mlx5_rx_queue_release, 9692e22920bSAdrien Mazarguil .tx_queue_release = mlx5_tx_queue_release, 97002d75430SAdrien Mazarguil .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 97102d75430SAdrien Mazarguil .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 9723318aef7SAdrien Mazarguil .mac_addr_remove = mlx5_mac_addr_remove, 9733318aef7SAdrien Mazarguil .mac_addr_add = mlx5_mac_addr_add, 97486977fccSDavid Marchand .mac_addr_set = mlx5_mac_addr_set, 975e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 976cf37ca95SAdrien Mazarguil .mtu_set = mlx5_dev_set_mtu, 977f3db9489SYaacov Hazan .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 978f3db9489SYaacov Hazan .vlan_offload_set = mlx5_vlan_offload_set, 979634efbc2SNelio Laranjeiro .reta_update = mlx5_dev_rss_reta_update, 980634efbc2SNelio Laranjeiro .reta_query = mlx5_dev_rss_reta_query, 9812f97422eSNelio Laranjeiro .rss_hash_update = mlx5_rss_hash_update, 9822f97422eSNelio Laranjeiro .rss_hash_conf_get = mlx5_rss_hash_conf_get, 98376f5c99eSYaacov Hazan .filter_ctrl = mlx5_dev_filter_ctrl, 9848788fec1SOlivier Matz .rx_descriptor_status = mlx5_rx_descriptor_status, 9858788fec1SOlivier Matz .tx_descriptor_status = mlx5_tx_descriptor_status, 98626f04883STom Barbette .rx_queue_count = mlx5_rx_queue_count, 9873c7d44afSShahaf Shuler .rx_queue_intr_enable = mlx5_rx_intr_enable, 9883c7d44afSShahaf Shuler .rx_queue_intr_disable = mlx5_rx_intr_disable, 989d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 990*c9ba7523SRaslan Darawsheh .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, 991771fa900SAdrien Mazarguil }; 992771fa900SAdrien Mazarguil 993714bf46eSThomas Monjalon /* Available operations from secondary process. */ 99487ec44ceSXueming Li static const struct eth_dev_ops mlx5_dev_sec_ops = { 99587ec44ceSXueming Li .stats_get = mlx5_stats_get, 99687ec44ceSXueming Li .stats_reset = mlx5_stats_reset, 99787ec44ceSXueming Li .xstats_get = mlx5_xstats_get, 99887ec44ceSXueming Li .xstats_reset = mlx5_xstats_reset, 99987ec44ceSXueming Li .xstats_get_names = mlx5_xstats_get_names, 1000714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 100187ec44ceSXueming Li .dev_infos_get = mlx5_dev_infos_get, 100287ec44ceSXueming Li .rx_descriptor_status = mlx5_rx_descriptor_status, 100387ec44ceSXueming Li .tx_descriptor_status = mlx5_tx_descriptor_status, 100487ec44ceSXueming Li }; 100587ec44ceSXueming Li 1006714bf46eSThomas Monjalon /* Available operations in flow isolated mode. */ 10070887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops_isolate = { 10080887aa7fSNélio Laranjeiro .dev_configure = mlx5_dev_configure, 10090887aa7fSNélio Laranjeiro .dev_start = mlx5_dev_start, 10100887aa7fSNélio Laranjeiro .dev_stop = mlx5_dev_stop, 10110887aa7fSNélio Laranjeiro .dev_set_link_down = mlx5_set_link_down, 10120887aa7fSNélio Laranjeiro .dev_set_link_up = mlx5_set_link_up, 10130887aa7fSNélio Laranjeiro .dev_close = mlx5_dev_close, 101424b068adSYongseok Koh .promiscuous_enable = mlx5_promiscuous_enable, 101524b068adSYongseok Koh .promiscuous_disable = mlx5_promiscuous_disable, 10162547ee74SYongseok Koh .allmulticast_enable = mlx5_allmulticast_enable, 10172547ee74SYongseok Koh .allmulticast_disable = mlx5_allmulticast_disable, 10180887aa7fSNélio Laranjeiro .link_update = mlx5_link_update, 10190887aa7fSNélio Laranjeiro .stats_get = mlx5_stats_get, 10200887aa7fSNélio Laranjeiro .stats_reset = mlx5_stats_reset, 10210887aa7fSNélio Laranjeiro .xstats_get = mlx5_xstats_get, 10220887aa7fSNélio Laranjeiro .xstats_reset = mlx5_xstats_reset, 10230887aa7fSNélio Laranjeiro .xstats_get_names = mlx5_xstats_get_names, 1024714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 10250887aa7fSNélio Laranjeiro .dev_infos_get = mlx5_dev_infos_get, 10260887aa7fSNélio Laranjeiro .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 10270887aa7fSNélio Laranjeiro .vlan_filter_set = mlx5_vlan_filter_set, 10280887aa7fSNélio Laranjeiro .rx_queue_setup = mlx5_rx_queue_setup, 10290887aa7fSNélio Laranjeiro .tx_queue_setup = mlx5_tx_queue_setup, 10300887aa7fSNélio Laranjeiro .rx_queue_release = mlx5_rx_queue_release, 10310887aa7fSNélio Laranjeiro .tx_queue_release = mlx5_tx_queue_release, 10320887aa7fSNélio Laranjeiro .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 10330887aa7fSNélio Laranjeiro .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 10340887aa7fSNélio Laranjeiro .mac_addr_remove = mlx5_mac_addr_remove, 10350887aa7fSNélio Laranjeiro .mac_addr_add = mlx5_mac_addr_add, 10360887aa7fSNélio Laranjeiro .mac_addr_set = mlx5_mac_addr_set, 1037e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 10380887aa7fSNélio Laranjeiro .mtu_set = mlx5_dev_set_mtu, 10390887aa7fSNélio Laranjeiro .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 10400887aa7fSNélio Laranjeiro .vlan_offload_set = mlx5_vlan_offload_set, 10410887aa7fSNélio Laranjeiro .filter_ctrl = mlx5_dev_filter_ctrl, 10420887aa7fSNélio Laranjeiro .rx_descriptor_status = mlx5_rx_descriptor_status, 10430887aa7fSNélio Laranjeiro .tx_descriptor_status = mlx5_tx_descriptor_status, 10440887aa7fSNélio Laranjeiro .rx_queue_intr_enable = mlx5_rx_intr_enable, 10450887aa7fSNélio Laranjeiro .rx_queue_intr_disable = mlx5_rx_intr_disable, 1046d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 10470887aa7fSNélio Laranjeiro }; 10480887aa7fSNélio Laranjeiro 1049e72dd09bSNélio Laranjeiro /** 1050e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 1051e72dd09bSNélio Laranjeiro * 1052e72dd09bSNélio Laranjeiro * @param[in] key 1053e72dd09bSNélio Laranjeiro * Key argument to verify. 1054e72dd09bSNélio Laranjeiro * @param[in] val 1055e72dd09bSNélio Laranjeiro * Value associated with key. 1056e72dd09bSNélio Laranjeiro * @param opaque 1057e72dd09bSNélio Laranjeiro * User data. 1058e72dd09bSNélio Laranjeiro * 1059e72dd09bSNélio Laranjeiro * @return 1060a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1061e72dd09bSNélio Laranjeiro */ 1062e72dd09bSNélio Laranjeiro static int 1063e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 1064e72dd09bSNélio Laranjeiro { 10657fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 106699c12dccSNélio Laranjeiro unsigned long tmp; 1067e72dd09bSNélio Laranjeiro 10686de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 10696de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 10706de569f5SAdrien Mazarguil return 0; 107199c12dccSNélio Laranjeiro errno = 0; 107299c12dccSNélio Laranjeiro tmp = strtoul(val, NULL, 0); 107399c12dccSNélio Laranjeiro if (errno) { 1074a6d83b6aSNélio Laranjeiro rte_errno = errno; 1075a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1076a6d83b6aSNélio Laranjeiro return -rte_errno; 107799c12dccSNélio Laranjeiro } 107899c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 10797fe24446SShahaf Shuler config->cqe_comp = !!tmp; 1080bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 1081bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 108278c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 108378c7a16dSYongseok Koh config->hw_padding = !!tmp; 10847d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 10857d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 10867d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 10877d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 10887d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 10897d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 10907d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 10917d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 10922a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1093505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1094505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1095505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1096505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1097505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1098505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1099505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1100505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1101505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 11022a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 11037fe24446SShahaf Shuler config->txqs_inline = tmp; 110409d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1105a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1106230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1107f9de8718SShahaf Shuler config->mps = !!tmp; 11086ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1109a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 11106ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1111505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1112505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1113505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 11145644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1115a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 11165644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 11177fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 111878a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 111978a54648SXueming Li config->l3_vxlan_en = !!tmp; 1120db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1121db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1122e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1123e2b4925eSOri Kam config->dv_esw_en = !!tmp; 112451e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 112551e72d38SOri Kam config->dv_flow_en = !!tmp; 1126dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1127dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 1128066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1129066cfecdSMatan Azrad config->max_dump_files_num = tmp; 113021bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 113121bb6c7eSDekel Peled config->lro.timeout = tmp; 113299c12dccSNélio Laranjeiro } else { 1133a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 1134a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1135a6d83b6aSNélio Laranjeiro return -rte_errno; 1136e72dd09bSNélio Laranjeiro } 113799c12dccSNélio Laranjeiro return 0; 113899c12dccSNélio Laranjeiro } 1139e72dd09bSNélio Laranjeiro 1140e72dd09bSNélio Laranjeiro /** 1141e72dd09bSNélio Laranjeiro * Parse device parameters. 1142e72dd09bSNélio Laranjeiro * 11437fe24446SShahaf Shuler * @param config 11447fe24446SShahaf Shuler * Pointer to device configuration structure. 1145e72dd09bSNélio Laranjeiro * @param devargs 1146e72dd09bSNélio Laranjeiro * Device arguments structure. 1147e72dd09bSNélio Laranjeiro * 1148e72dd09bSNélio Laranjeiro * @return 1149a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1150e72dd09bSNélio Laranjeiro */ 1151e72dd09bSNélio Laranjeiro static int 11527fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1153e72dd09bSNélio Laranjeiro { 1154e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 115599c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 1156bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 115778c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 11587d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 11597d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 11607d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 11617d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 11622a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 1163505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MIN, 1164505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MAX, 1165505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MPW, 11662a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 116709d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 1168230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 11696ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 11706ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 11715644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 11725644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 117378a54648SXueming Li MLX5_L3_VXLAN_EN, 1174db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 1175e2b4925eSOri Kam MLX5_DV_ESW_EN, 117651e72d38SOri Kam MLX5_DV_FLOW_EN, 1177dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 11786de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 1179066cfecdSMatan Azrad MLX5_MAX_DUMP_FILES_NUM, 118021bb6c7eSDekel Peled MLX5_LRO_TIMEOUT_USEC, 1181e72dd09bSNélio Laranjeiro NULL, 1182e72dd09bSNélio Laranjeiro }; 1183e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 1184e72dd09bSNélio Laranjeiro int ret = 0; 1185e72dd09bSNélio Laranjeiro int i; 1186e72dd09bSNélio Laranjeiro 1187e72dd09bSNélio Laranjeiro if (devargs == NULL) 1188e72dd09bSNélio Laranjeiro return 0; 1189e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 1190e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 119115b0ea00SMatan Azrad if (kvlist == NULL) { 119215b0ea00SMatan Azrad rte_errno = EINVAL; 119315b0ea00SMatan Azrad return -rte_errno; 119415b0ea00SMatan Azrad } 1195e72dd09bSNélio Laranjeiro /* Process parameters. */ 1196e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 1197e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 1198e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 11997fe24446SShahaf Shuler mlx5_args_check, config); 1200a6d83b6aSNélio Laranjeiro if (ret) { 1201a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1202a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 1203a6d83b6aSNélio Laranjeiro return -rte_errno; 1204e72dd09bSNélio Laranjeiro } 1205e72dd09bSNélio Laranjeiro } 1206a67323e4SShahaf Shuler } 1207e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 1208e72dd09bSNélio Laranjeiro return 0; 1209e72dd09bSNélio Laranjeiro } 1210e72dd09bSNélio Laranjeiro 1211fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver; 1212771fa900SAdrien Mazarguil 12137be600c8SYongseok Koh /** 12147be600c8SYongseok Koh * PMD global initialization. 12157be600c8SYongseok Koh * 12167be600c8SYongseok Koh * Independent from individual device, this function initializes global 12177be600c8SYongseok Koh * per-PMD data structures distinguishing primary and secondary processes. 12187be600c8SYongseok Koh * Hence, each initialization is called once per a process. 12197be600c8SYongseok Koh * 12207be600c8SYongseok Koh * @return 12217be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 12227be600c8SYongseok Koh */ 12237be600c8SYongseok Koh static int 12247be600c8SYongseok Koh mlx5_init_once(void) 12257be600c8SYongseok Koh { 12267be600c8SYongseok Koh struct mlx5_shared_data *sd; 12277be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 1228edf73dd3SAnatoly Burakov int ret = 0; 12297be600c8SYongseok Koh 12307be600c8SYongseok Koh if (mlx5_init_shared_data()) 12317be600c8SYongseok Koh return -rte_errno; 12327be600c8SYongseok Koh sd = mlx5_shared_data; 12337be600c8SYongseok Koh assert(sd); 12347be600c8SYongseok Koh rte_spinlock_lock(&sd->lock); 12357be600c8SYongseok Koh switch (rte_eal_process_type()) { 12367be600c8SYongseok Koh case RTE_PROC_PRIMARY: 12377be600c8SYongseok Koh if (sd->init_done) 12387be600c8SYongseok Koh break; 12397be600c8SYongseok Koh LIST_INIT(&sd->mem_event_cb_list); 12407be600c8SYongseok Koh rte_rwlock_init(&sd->mem_event_rwlock); 12417be600c8SYongseok Koh rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 12427be600c8SYongseok Koh mlx5_mr_mem_event_cb, NULL); 1243edf73dd3SAnatoly Burakov ret = mlx5_mp_init_primary(); 1244edf73dd3SAnatoly Burakov if (ret) 1245edf73dd3SAnatoly Burakov goto out; 12467be600c8SYongseok Koh sd->init_done = true; 12477be600c8SYongseok Koh break; 12487be600c8SYongseok Koh case RTE_PROC_SECONDARY: 12497be600c8SYongseok Koh if (ld->init_done) 12507be600c8SYongseok Koh break; 1251edf73dd3SAnatoly Burakov ret = mlx5_mp_init_secondary(); 1252edf73dd3SAnatoly Burakov if (ret) 1253edf73dd3SAnatoly Burakov goto out; 12547be600c8SYongseok Koh ++sd->secondary_cnt; 12557be600c8SYongseok Koh ld->init_done = true; 12567be600c8SYongseok Koh break; 12577be600c8SYongseok Koh default: 12587be600c8SYongseok Koh break; 12597be600c8SYongseok Koh } 1260edf73dd3SAnatoly Burakov out: 12617be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 1262edf73dd3SAnatoly Burakov return ret; 12637be600c8SYongseok Koh } 12647be600c8SYongseok Koh 12657be600c8SYongseok Koh /** 126638b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 126738b4b397SViacheslav Ovsiienko * while sending packets. 126838b4b397SViacheslav Ovsiienko * 126938b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 127038b4b397SViacheslav Ovsiienko * key is specified in devargs 127138b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 127238b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 127338b4b397SViacheslav Ovsiienko * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX 127438b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 127538b4b397SViacheslav Ovsiienko * 127638b4b397SViacheslav Ovsiienko * @param spawn 127738b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 127838b4b397SViacheslav Ovsiienko * @param config 127938b4b397SViacheslav Ovsiienko * Device configuration parameters. 128038b4b397SViacheslav Ovsiienko */ 128138b4b397SViacheslav Ovsiienko static void 128238b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 128338b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 128438b4b397SViacheslav Ovsiienko { 128538b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 128638b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 128738b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 128838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 128938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 129038b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 129138b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 129238b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 129338b4b397SViacheslav Ovsiienko "txq_inline_mix aligned to minimal" 129438b4b397SViacheslav Ovsiienko " ConnectX-4 required value %d", 129538b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 129638b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 129738b4b397SViacheslav Ovsiienko } 129838b4b397SViacheslav Ovsiienko break; 129938b4b397SViacheslav Ovsiienko } 130038b4b397SViacheslav Ovsiienko goto exit; 130138b4b397SViacheslav Ovsiienko } 130238b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 130338b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 130438b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 130538b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 130638b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 130738b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 130838b4b397SViacheslav Ovsiienko goto exit; 130938b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 131038b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 131138b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 131238b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 131338b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 131438b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 131538b4b397SViacheslav Ovsiienko goto exit; 131638b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 131738b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 131838b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 131938b4b397SViacheslav Ovsiienko break; 132038b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 132138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 132238b4b397SViacheslav Ovsiienko config->txq_inline_min = 132338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 132438b4b397SViacheslav Ovsiienko goto exit; 132538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 132638b4b397SViacheslav Ovsiienko config->txq_inline_min = 132738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 132838b4b397SViacheslav Ovsiienko goto exit; 132938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 133038b4b397SViacheslav Ovsiienko config->txq_inline_min = 133138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 133238b4b397SViacheslav Ovsiienko goto exit; 133338b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 133438b4b397SViacheslav Ovsiienko config->txq_inline_min = 133538b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 133638b4b397SViacheslav Ovsiienko goto exit; 133738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 133838b4b397SViacheslav Ovsiienko config->txq_inline_min = 133938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 134038b4b397SViacheslav Ovsiienko goto exit; 134138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 134238b4b397SViacheslav Ovsiienko config->txq_inline_min = 134338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 134438b4b397SViacheslav Ovsiienko goto exit; 134538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 134638b4b397SViacheslav Ovsiienko config->txq_inline_min = 134738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 134838b4b397SViacheslav Ovsiienko goto exit; 134938b4b397SViacheslav Ovsiienko } 135038b4b397SViacheslav Ovsiienko } 135138b4b397SViacheslav Ovsiienko } 135238b4b397SViacheslav Ovsiienko /* 135338b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 135438b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 135538b4b397SViacheslav Ovsiienko * to determine old NICs. 135638b4b397SViacheslav Ovsiienko */ 135738b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 135838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 135938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 136038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 136138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1362614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 136338b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 136438b4b397SViacheslav Ovsiienko break; 136538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 136638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 136738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 136838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 136938b4b397SViacheslav Ovsiienko /* 137038b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 137138b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 137238b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 137338b4b397SViacheslav Ovsiienko */ 137438b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 137520215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 137638b4b397SViacheslav Ovsiienko break; 137738b4b397SViacheslav Ovsiienko default: 137838b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 137938b4b397SViacheslav Ovsiienko break; 138038b4b397SViacheslav Ovsiienko } 138138b4b397SViacheslav Ovsiienko exit: 138238b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 138338b4b397SViacheslav Ovsiienko } 138438b4b397SViacheslav Ovsiienko 138538b4b397SViacheslav Ovsiienko /** 138621cae858SDekel Peled * Allocate page of door-bells and register it using DevX API. 138721cae858SDekel Peled * 138821cae858SDekel Peled * @param [in] dev 138921cae858SDekel Peled * Pointer to Ethernet device. 139021cae858SDekel Peled * 139121cae858SDekel Peled * @return 139221cae858SDekel Peled * Pointer to new page on success, NULL otherwise. 139321cae858SDekel Peled */ 139421cae858SDekel Peled static struct mlx5_devx_dbr_page * 139521cae858SDekel Peled mlx5_alloc_dbr_page(struct rte_eth_dev *dev) 139621cae858SDekel Peled { 139721cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 139821cae858SDekel Peled struct mlx5_devx_dbr_page *page; 139921cae858SDekel Peled 140021cae858SDekel Peled /* Allocate space for door-bell page and management data. */ 140121cae858SDekel Peled page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page), 140221cae858SDekel Peled RTE_CACHE_LINE_SIZE, dev->device->numa_node); 140321cae858SDekel Peled if (!page) { 140421cae858SDekel Peled DRV_LOG(ERR, "port %u cannot allocate dbr page", 140521cae858SDekel Peled dev->data->port_id); 140621cae858SDekel Peled return NULL; 140721cae858SDekel Peled } 140821cae858SDekel Peled /* Register allocated memory. */ 140921cae858SDekel Peled page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs, 141021cae858SDekel Peled MLX5_DBR_PAGE_SIZE, 0); 141121cae858SDekel Peled if (!page->umem) { 141221cae858SDekel Peled DRV_LOG(ERR, "port %u cannot umem reg dbr page", 141321cae858SDekel Peled dev->data->port_id); 141421cae858SDekel Peled rte_free(page); 141521cae858SDekel Peled return NULL; 141621cae858SDekel Peled } 141721cae858SDekel Peled return page; 141821cae858SDekel Peled } 141921cae858SDekel Peled 142021cae858SDekel Peled /** 142121cae858SDekel Peled * Find the next available door-bell, allocate new page if needed. 142221cae858SDekel Peled * 142321cae858SDekel Peled * @param [in] dev 142421cae858SDekel Peled * Pointer to Ethernet device. 142521cae858SDekel Peled * @param [out] dbr_page 142621cae858SDekel Peled * Door-bell page containing the page data. 142721cae858SDekel Peled * 142821cae858SDekel Peled * @return 142921cae858SDekel Peled * Door-bell address offset on success, a negative error value otherwise. 143021cae858SDekel Peled */ 143121cae858SDekel Peled int64_t 143221cae858SDekel Peled mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page) 143321cae858SDekel Peled { 143421cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 143521cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 143621cae858SDekel Peled uint32_t i, j; 143721cae858SDekel Peled 143821cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 143921cae858SDekel Peled if (page->dbr_count < MLX5_DBR_PER_PAGE) 144021cae858SDekel Peled break; 144121cae858SDekel Peled if (!page) { /* No page with free door-bell exists. */ 144221cae858SDekel Peled page = mlx5_alloc_dbr_page(dev); 144321cae858SDekel Peled if (!page) /* Failed to allocate new page. */ 144421cae858SDekel Peled return (-1); 144521cae858SDekel Peled LIST_INSERT_HEAD(&priv->dbrpgs, page, next); 144621cae858SDekel Peled } 144721cae858SDekel Peled /* Loop to find bitmap part with clear bit. */ 144821cae858SDekel Peled for (i = 0; 144921cae858SDekel Peled i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX; 145021cae858SDekel Peled i++) 145121cae858SDekel Peled ; /* Empty. */ 145221cae858SDekel Peled /* Find the first clear bit. */ 145321cae858SDekel Peled j = rte_bsf64(~page->dbr_bitmap[i]); 145421cae858SDekel Peled assert(i < (MLX5_DBR_PER_PAGE / 64)); 145521cae858SDekel Peled page->dbr_bitmap[i] |= (1 << j); 145621cae858SDekel Peled page->dbr_count++; 145721cae858SDekel Peled *dbr_page = page; 145821cae858SDekel Peled return (((i * 64) + j) * sizeof(uint64_t)); 145921cae858SDekel Peled } 146021cae858SDekel Peled 146121cae858SDekel Peled /** 146221cae858SDekel Peled * Release a door-bell record. 146321cae858SDekel Peled * 146421cae858SDekel Peled * @param [in] dev 146521cae858SDekel Peled * Pointer to Ethernet device. 146621cae858SDekel Peled * @param [in] umem_id 146721cae858SDekel Peled * UMEM ID of page containing the door-bell record to release. 146821cae858SDekel Peled * @param [in] offset 146921cae858SDekel Peled * Offset of door-bell record in page. 147021cae858SDekel Peled * 147121cae858SDekel Peled * @return 147221cae858SDekel Peled * 0 on success, a negative error value otherwise. 147321cae858SDekel Peled */ 147421cae858SDekel Peled int32_t 147521cae858SDekel Peled mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset) 147621cae858SDekel Peled { 147721cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 147821cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 147921cae858SDekel Peled int ret = 0; 148021cae858SDekel Peled 148121cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 148221cae858SDekel Peled /* Find the page this address belongs to. */ 148321cae858SDekel Peled if (page->umem->umem_id == umem_id) 148421cae858SDekel Peled break; 148521cae858SDekel Peled if (!page) 148621cae858SDekel Peled return -EINVAL; 148721cae858SDekel Peled page->dbr_count--; 148821cae858SDekel Peled if (!page->dbr_count) { 148921cae858SDekel Peled /* Page not used, free it and remove from list. */ 149021cae858SDekel Peled LIST_REMOVE(page, next); 149121cae858SDekel Peled if (page->umem) 149221cae858SDekel Peled ret = -mlx5_glue->devx_umem_dereg(page->umem); 149321cae858SDekel Peled rte_free(page); 149421cae858SDekel Peled } else { 149521cae858SDekel Peled /* Mark in bitmap that this door-bell is not in use. */ 1496a88209b0SDekel Peled offset /= MLX5_DBR_SIZE; 149721cae858SDekel Peled int i = offset / 64; 149821cae858SDekel Peled int j = offset % 64; 149921cae858SDekel Peled 150021cae858SDekel Peled page->dbr_bitmap[i] &= ~(1 << j); 150121cae858SDekel Peled } 150221cae858SDekel Peled return ret; 150321cae858SDekel Peled } 150421cae858SDekel Peled 150521cae858SDekel Peled /** 1506f38c5457SAdrien Mazarguil * Spawn an Ethernet device from Verbs information. 1507771fa900SAdrien Mazarguil * 1508f38c5457SAdrien Mazarguil * @param dpdk_dev 1509f38c5457SAdrien Mazarguil * Backing DPDK device. 1510ad74bc61SViacheslav Ovsiienko * @param spawn 1511ad74bc61SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 1512f87bfa8eSYongseok Koh * @param config 1513f87bfa8eSYongseok Koh * Device configuration parameters. 1514771fa900SAdrien Mazarguil * 1515771fa900SAdrien Mazarguil * @return 1516f38c5457SAdrien Mazarguil * A valid Ethernet device object on success, NULL otherwise and rte_errno 1517206254b7SOphir Munk * is set. The following errors are defined: 15186de569f5SAdrien Mazarguil * 15196de569f5SAdrien Mazarguil * EBUSY: device is not supposed to be spawned. 1520206254b7SOphir Munk * EEXIST: device is already spawned 1521771fa900SAdrien Mazarguil */ 1522f38c5457SAdrien Mazarguil static struct rte_eth_dev * 1523f38c5457SAdrien Mazarguil mlx5_dev_spawn(struct rte_device *dpdk_dev, 1524ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data *spawn, 1525ad74bc61SViacheslav Ovsiienko struct mlx5_dev_config config) 1526771fa900SAdrien Mazarguil { 1527ad74bc61SViacheslav Ovsiienko const struct mlx5_switch_info *switch_info = &spawn->info; 152817e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = NULL; 152968128934SAdrien Mazarguil struct ibv_port_attr port_attr; 15306057a10bSAdrien Mazarguil struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 15319083982cSAdrien Mazarguil struct rte_eth_dev *eth_dev = NULL; 1532dbeba4cfSThomas Monjalon struct mlx5_priv *priv = NULL; 1533771fa900SAdrien Mazarguil int err = 0; 153478c7a16dSYongseok Koh unsigned int hw_padding = 0; 1535e192ef80SYaacov Hazan unsigned int mps; 1536523f5a74SYongseok Koh unsigned int cqe_comp; 1537bc91e8dbSYongseok Koh unsigned int cqe_pad = 0; 1538772d3435SXueming Li unsigned int tunnel_en = 0; 15391f106da2SMatan Azrad unsigned int mpls_en = 0; 15405f8ba81cSXueming Li unsigned int swp = 0; 15417d6bf6b8SYongseok Koh unsigned int mprq = 0; 15427d6bf6b8SYongseok Koh unsigned int mprq_min_stride_size_n = 0; 15437d6bf6b8SYongseok Koh unsigned int mprq_max_stride_size_n = 0; 15447d6bf6b8SYongseok Koh unsigned int mprq_min_stride_num_n = 0; 15457d6bf6b8SYongseok Koh unsigned int mprq_max_stride_num_n = 0; 15466d13ea8eSOlivier Matz struct rte_ether_addr mac; 154768128934SAdrien Mazarguil char name[RTE_ETH_NAME_MAX_LEN]; 15482b730263SAdrien Mazarguil int own_domain_id = 0; 1549206254b7SOphir Munk uint16_t port_id; 15502b730263SAdrien Mazarguil unsigned int i; 1551771fa900SAdrien Mazarguil 15526de569f5SAdrien Mazarguil /* Determine if this port representor is supposed to be spawned. */ 15536de569f5SAdrien Mazarguil if (switch_info->representor && dpdk_dev->devargs) { 15546de569f5SAdrien Mazarguil struct rte_eth_devargs eth_da; 15556de569f5SAdrien Mazarguil 15566de569f5SAdrien Mazarguil err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); 15576de569f5SAdrien Mazarguil if (err) { 15586de569f5SAdrien Mazarguil rte_errno = -err; 15596de569f5SAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 15606de569f5SAdrien Mazarguil strerror(rte_errno)); 15616de569f5SAdrien Mazarguil return NULL; 15626de569f5SAdrien Mazarguil } 15636de569f5SAdrien Mazarguil for (i = 0; i < eth_da.nb_representor_ports; ++i) 15646de569f5SAdrien Mazarguil if (eth_da.representor_ports[i] == 15656de569f5SAdrien Mazarguil (uint16_t)switch_info->port_name) 15666de569f5SAdrien Mazarguil break; 15676de569f5SAdrien Mazarguil if (i == eth_da.nb_representor_ports) { 15686de569f5SAdrien Mazarguil rte_errno = EBUSY; 15696de569f5SAdrien Mazarguil return NULL; 15706de569f5SAdrien Mazarguil } 15716de569f5SAdrien Mazarguil } 1572206254b7SOphir Munk /* Build device name. */ 1573206254b7SOphir Munk if (!switch_info->representor) 157409c9c4d2SThomas Monjalon strlcpy(name, dpdk_dev->name, sizeof(name)); 1575206254b7SOphir Munk else 1576206254b7SOphir Munk snprintf(name, sizeof(name), "%s_representor_%u", 1577206254b7SOphir Munk dpdk_dev->name, switch_info->port_name); 1578206254b7SOphir Munk /* check if the device is already spawned */ 1579206254b7SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 1580206254b7SOphir Munk rte_errno = EEXIST; 1581206254b7SOphir Munk return NULL; 1582206254b7SOphir Munk } 158317e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 158417e19bc4SViacheslav Ovsiienko if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 158517e19bc4SViacheslav Ovsiienko eth_dev = rte_eth_dev_attach_secondary(name); 158617e19bc4SViacheslav Ovsiienko if (eth_dev == NULL) { 158717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "can not attach rte ethdev"); 158817e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 1589f38c5457SAdrien Mazarguil return NULL; 1590771fa900SAdrien Mazarguil } 159117e19bc4SViacheslav Ovsiienko eth_dev->device = dpdk_dev; 159217e19bc4SViacheslav Ovsiienko eth_dev->dev_ops = &mlx5_dev_sec_ops; 1593120dc4a7SYongseok Koh err = mlx5_proc_priv_init(eth_dev); 1594120dc4a7SYongseok Koh if (err) 1595120dc4a7SYongseok Koh return NULL; 159617e19bc4SViacheslav Ovsiienko /* Receive command fd from primary process */ 15979a8ab29bSYongseok Koh err = mlx5_mp_req_verbs_cmd_fd(eth_dev); 159817e19bc4SViacheslav Ovsiienko if (err < 0) 159917e19bc4SViacheslav Ovsiienko return NULL; 160017e19bc4SViacheslav Ovsiienko /* Remap UAR for Tx queues. */ 1601120dc4a7SYongseok Koh err = mlx5_tx_uar_init_secondary(eth_dev, err); 160217e19bc4SViacheslav Ovsiienko if (err) 160317e19bc4SViacheslav Ovsiienko return NULL; 160417e19bc4SViacheslav Ovsiienko /* 160517e19bc4SViacheslav Ovsiienko * Ethdev pointer is still required as input since 160617e19bc4SViacheslav Ovsiienko * the primary device is not accessible from the 160717e19bc4SViacheslav Ovsiienko * secondary process. 160817e19bc4SViacheslav Ovsiienko */ 160917e19bc4SViacheslav Ovsiienko eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 161017e19bc4SViacheslav Ovsiienko eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 161117e19bc4SViacheslav Ovsiienko return eth_dev; 1612f5bf91deSMoti Haimovsky } 161317e19bc4SViacheslav Ovsiienko sh = mlx5_alloc_shared_ibctx(spawn); 161417e19bc4SViacheslav Ovsiienko if (!sh) 161517e19bc4SViacheslav Ovsiienko return NULL; 161617e19bc4SViacheslav Ovsiienko config.devx = sh->devx; 16173075bd23SDekel Peled #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR 16183075bd23SDekel Peled config.dest_tir = 1; 16193075bd23SDekel Peled #endif 16205f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 16216057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 16225f8ba81cSXueming Li #endif 162343e9d979SShachar Beiser /* 162443e9d979SShachar Beiser * Multi-packet send is supported by ConnectX-4 Lx PF as well 162543e9d979SShachar Beiser * as all ConnectX-5 devices. 162643e9d979SShachar Beiser */ 1627038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 16286057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 1629038e7251SShahaf Shuler #endif 16307d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 16316057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 16327d6bf6b8SYongseok Koh #endif 163317e19bc4SViacheslav Ovsiienko mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 16346057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 16356057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 1636a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "enhanced MPW is supported"); 163743e9d979SShachar Beiser mps = MLX5_MPW_ENHANCED; 163843e9d979SShachar Beiser } else { 1639a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW is supported"); 1640e589960cSYongseok Koh mps = MLX5_MPW; 1641e589960cSYongseok Koh } 1642e589960cSYongseok Koh } else { 1643a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW isn't supported"); 164443e9d979SShachar Beiser mps = MLX5_MPW_DISABLED; 164543e9d979SShachar Beiser } 16465f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 16476057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 16486057a10bSAdrien Mazarguil swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 16495f8ba81cSXueming Li DRV_LOG(DEBUG, "SWP support: %u", swp); 16505f8ba81cSXueming Li #endif 165168128934SAdrien Mazarguil config.swp = !!swp; 16527d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 16536057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 16547d6bf6b8SYongseok Koh struct mlx5dv_striding_rq_caps mprq_caps = 16556057a10bSAdrien Mazarguil dv_attr.striding_rq_caps; 16567d6bf6b8SYongseok Koh 16577d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 16587d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes); 16597d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 16607d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes); 16617d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 16627d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides); 16637d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 16647d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides); 16657d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tsupported_qpts: %d", 16667d6bf6b8SYongseok Koh mprq_caps.supported_qpts); 16677d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 16687d6bf6b8SYongseok Koh mprq = 1; 16697d6bf6b8SYongseok Koh mprq_min_stride_size_n = 16707d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes; 16717d6bf6b8SYongseok Koh mprq_max_stride_size_n = 16727d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes; 16737d6bf6b8SYongseok Koh mprq_min_stride_num_n = 16747d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides; 16757d6bf6b8SYongseok Koh mprq_max_stride_num_n = 16767d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides; 167768128934SAdrien Mazarguil config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 167868128934SAdrien Mazarguil mprq_min_stride_num_n); 16797d6bf6b8SYongseok Koh } 16807d6bf6b8SYongseok Koh #endif 1681523f5a74SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128 && 16826057a10bSAdrien Mazarguil !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) 1683523f5a74SYongseok Koh cqe_comp = 0; 1684523f5a74SYongseok Koh else 1685523f5a74SYongseok Koh cqe_comp = 1; 168668128934SAdrien Mazarguil config.cqe_comp = cqe_comp; 1687bc91e8dbSYongseok Koh #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 1688bc91e8dbSYongseok Koh /* Whether device supports 128B Rx CQE padding. */ 1689bc91e8dbSYongseok Koh cqe_pad = RTE_CACHE_LINE_SIZE == 128 && 1690bc91e8dbSYongseok Koh (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); 1691bc91e8dbSYongseok Koh #endif 1692038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 16936057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 16946057a10bSAdrien Mazarguil tunnel_en = ((dv_attr.tunnel_offloads_caps & 1695038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 16966057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 1697038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); 1698038e7251SShahaf Shuler } 1699a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 1700a170a30dSNélio Laranjeiro tunnel_en ? "" : "not "); 1701038e7251SShahaf Shuler #else 1702a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 1703a170a30dSNélio Laranjeiro "tunnel offloading disabled due to old OFED/rdma-core version"); 1704038e7251SShahaf Shuler #endif 170568128934SAdrien Mazarguil config.tunnel_en = tunnel_en; 17061f106da2SMatan Azrad #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 17076057a10bSAdrien Mazarguil mpls_en = ((dv_attr.tunnel_offloads_caps & 17081f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 17096057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 17101f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 17111f106da2SMatan Azrad DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 17121f106da2SMatan Azrad mpls_en ? "" : "not "); 17131f106da2SMatan Azrad #else 17141f106da2SMatan Azrad DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 17151f106da2SMatan Azrad " old OFED/rdma-core version or firmware configuration"); 17161f106da2SMatan Azrad #endif 171768128934SAdrien Mazarguil config.mpls_en = mpls_en; 1718771fa900SAdrien Mazarguil /* Check port status. */ 171917e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr); 1720771fa900SAdrien Mazarguil if (err) { 1721a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port query failed: %s", strerror(err)); 17229083982cSAdrien Mazarguil goto error; 1723771fa900SAdrien Mazarguil } 17241371f4dfSOr Ami if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 17259083982cSAdrien Mazarguil DRV_LOG(ERR, "port is not configured in Ethernet mode"); 1726e1c3e305SMatan Azrad err = EINVAL; 17279083982cSAdrien Mazarguil goto error; 17281371f4dfSOr Ami } 1729771fa900SAdrien Mazarguil if (port_attr.state != IBV_PORT_ACTIVE) 17309083982cSAdrien Mazarguil DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 1731a170a30dSNélio Laranjeiro mlx5_glue->port_state_str(port_attr.state), 1732771fa900SAdrien Mazarguil port_attr.state); 173317e19bc4SViacheslav Ovsiienko /* Allocate private eth device data. */ 1734771fa900SAdrien Mazarguil priv = rte_zmalloc("ethdev private structure", 1735771fa900SAdrien Mazarguil sizeof(*priv), 1736771fa900SAdrien Mazarguil RTE_CACHE_LINE_SIZE); 1737771fa900SAdrien Mazarguil if (priv == NULL) { 1738a170a30dSNélio Laranjeiro DRV_LOG(ERR, "priv allocation failure"); 1739771fa900SAdrien Mazarguil err = ENOMEM; 17409083982cSAdrien Mazarguil goto error; 1741771fa900SAdrien Mazarguil } 174217e19bc4SViacheslav Ovsiienko priv->sh = sh; 174317e19bc4SViacheslav Ovsiienko priv->ibv_port = spawn->ibv_port; 174435b2d13fSOlivier Matz priv->mtu = RTE_ETHER_MTU; 17456bf10ab6SMoti Haimovsky #ifndef RTE_ARCH_64 17466bf10ab6SMoti Haimovsky /* Initialize UAR access locks for 32bit implementations. */ 17476bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock_cq); 17486bf10ab6SMoti Haimovsky for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 17496bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock[i]); 17506bf10ab6SMoti Haimovsky #endif 175126c08b97SAdrien Mazarguil /* Some internal functions rely on Netlink sockets, open them now. */ 17525366074bSNelio Laranjeiro priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 17535366074bSNelio Laranjeiro priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 175426c08b97SAdrien Mazarguil priv->nl_sn = 0; 17552b730263SAdrien Mazarguil priv->representor = !!switch_info->representor; 1756299d7dc2SViacheslav Ovsiienko priv->master = !!switch_info->master; 17572b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 1758299d7dc2SViacheslav Ovsiienko /* 1759299d7dc2SViacheslav Ovsiienko * Currently we support single E-Switch per PF configurations 1760299d7dc2SViacheslav Ovsiienko * only and vport_id field contains the vport index for 1761299d7dc2SViacheslav Ovsiienko * associated VF, which is deduced from representor port name. 1762ae4eb7dcSViacheslav Ovsiienko * For example, let's have the IB device port 10, it has 1763299d7dc2SViacheslav Ovsiienko * attached network device eth0, which has port name attribute 1764299d7dc2SViacheslav Ovsiienko * pf0vf2, we can deduce the VF number as 2, and set vport index 1765299d7dc2SViacheslav Ovsiienko * as 3 (2+1). This assigning schema should be changed if the 1766299d7dc2SViacheslav Ovsiienko * multiple E-Switch instances per PF configurations or/and PCI 1767299d7dc2SViacheslav Ovsiienko * subfunctions are added. 1768299d7dc2SViacheslav Ovsiienko */ 1769299d7dc2SViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 1770299d7dc2SViacheslav Ovsiienko switch_info->port_name + 1 : -1; 1771299d7dc2SViacheslav Ovsiienko /* representor_id field keeps the unmodified port/VF index. */ 1772299d7dc2SViacheslav Ovsiienko priv->representor_id = switch_info->representor ? 1773299d7dc2SViacheslav Ovsiienko switch_info->port_name : -1; 17742b730263SAdrien Mazarguil /* 17752b730263SAdrien Mazarguil * Look for sibling devices in order to reuse their switch domain 17762b730263SAdrien Mazarguil * if any, otherwise allocate one. 17772b730263SAdrien Mazarguil */ 1778d874a4eeSThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) { 1779dbeba4cfSThomas Monjalon const struct mlx5_priv *opriv = 1780d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 17812b730263SAdrien Mazarguil 17822b730263SAdrien Mazarguil if (!opriv || 17832b730263SAdrien Mazarguil opriv->domain_id == 17842b730263SAdrien Mazarguil RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 17852b730263SAdrien Mazarguil continue; 17862b730263SAdrien Mazarguil priv->domain_id = opriv->domain_id; 17872b730263SAdrien Mazarguil break; 17882b730263SAdrien Mazarguil } 17892b730263SAdrien Mazarguil if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 17902b730263SAdrien Mazarguil err = rte_eth_switch_domain_alloc(&priv->domain_id); 17912b730263SAdrien Mazarguil if (err) { 17922b730263SAdrien Mazarguil err = rte_errno; 17932b730263SAdrien Mazarguil DRV_LOG(ERR, "unable to allocate switch domain: %s", 17942b730263SAdrien Mazarguil strerror(rte_errno)); 17952b730263SAdrien Mazarguil goto error; 17962b730263SAdrien Mazarguil } 17972b730263SAdrien Mazarguil own_domain_id = 1; 17982b730263SAdrien Mazarguil } 1799f38c5457SAdrien Mazarguil err = mlx5_args(&config, dpdk_dev->devargs); 1800e72dd09bSNélio Laranjeiro if (err) { 1801012ad994SShahaf Shuler err = rte_errno; 180293068a9dSAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 180393068a9dSAdrien Mazarguil strerror(rte_errno)); 18049083982cSAdrien Mazarguil goto error; 1805e72dd09bSNélio Laranjeiro } 180617e19bc4SViacheslav Ovsiienko config.hw_csum = !!(sh->device_attr.device_cap_flags_ex & 180717e19bc4SViacheslav Ovsiienko IBV_DEVICE_RAW_IP_CSUM); 1808a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "checksum offloading is %ssupported", 18097fe24446SShahaf Shuler (config.hw_csum ? "" : "not ")); 18102dd8b721SViacheslav Ovsiienko #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 18112dd8b721SViacheslav Ovsiienko !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 18122dd8b721SViacheslav Ovsiienko DRV_LOG(DEBUG, "counters are not supported"); 18139a761de8SOri Kam #endif 181458b1312eSYongseok Koh #ifndef HAVE_IBV_FLOW_DV_SUPPORT 181558b1312eSYongseok Koh if (config.dv_flow_en) { 181658b1312eSYongseok Koh DRV_LOG(WARNING, "DV flow is not supported"); 181758b1312eSYongseok Koh config.dv_flow_en = 0; 181858b1312eSYongseok Koh } 181958b1312eSYongseok Koh #endif 18207fe24446SShahaf Shuler config.ind_table_max_size = 182117e19bc4SViacheslav Ovsiienko sh->device_attr.rss_caps.max_rwq_indirection_table_size; 182268128934SAdrien Mazarguil /* 182368128934SAdrien Mazarguil * Remove this check once DPDK supports larger/variable 182468128934SAdrien Mazarguil * indirection tables. 182568128934SAdrien Mazarguil */ 182668128934SAdrien Mazarguil if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 18277fe24446SShahaf Shuler config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; 1828a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 18297fe24446SShahaf Shuler config.ind_table_max_size); 183017e19bc4SViacheslav Ovsiienko config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 183143e9d979SShachar Beiser IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 1832a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 18337fe24446SShahaf Shuler (config.hw_vlan_strip ? "" : "not ")); 183417e19bc4SViacheslav Ovsiienko config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 1835cd230a3eSShahaf Shuler IBV_RAW_PACKET_CAP_SCATTER_FCS); 1836a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 18377fe24446SShahaf Shuler (config.hw_fcs_strip ? "" : "not ")); 18382014a7fbSYongseok Koh #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 183917e19bc4SViacheslav Ovsiienko hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 18402014a7fbSYongseok Koh #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 184117e19bc4SViacheslav Ovsiienko hw_padding = !!(sh->device_attr.device_cap_flags_ex & 18422014a7fbSYongseok Koh IBV_DEVICE_PCI_WRITE_END_PADDING); 184343e9d979SShachar Beiser #endif 184478c7a16dSYongseok Koh if (config.hw_padding && !hw_padding) { 184578c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 184678c7a16dSYongseok Koh config.hw_padding = 0; 184778c7a16dSYongseok Koh } else if (config.hw_padding) { 184878c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 184978c7a16dSYongseok Koh } 185017e19bc4SViacheslav Ovsiienko config.tso = (sh->device_attr.tso_caps.max_tso > 0 && 185117e19bc4SViacheslav Ovsiienko (sh->device_attr.tso_caps.supported_qpts & 185243e9d979SShachar Beiser (1 << IBV_QPT_RAW_PACKET))); 18537fe24446SShahaf Shuler if (config.tso) 185417e19bc4SViacheslav Ovsiienko config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso; 1855f9de8718SShahaf Shuler /* 1856f9de8718SShahaf Shuler * MPW is disabled by default, while the Enhanced MPW is enabled 1857f9de8718SShahaf Shuler * by default. 1858f9de8718SShahaf Shuler */ 1859f9de8718SShahaf Shuler if (config.mps == MLX5_ARG_UNSET) 1860f9de8718SShahaf Shuler config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 1861f9de8718SShahaf Shuler MLX5_MPW_DISABLED; 1862f9de8718SShahaf Shuler else 1863f9de8718SShahaf Shuler config.mps = config.mps ? mps : MLX5_MPW_DISABLED; 1864a170a30dSNélio Laranjeiro DRV_LOG(INFO, "%sMPS is %s", 18650f99970bSNélio Laranjeiro config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", 186668128934SAdrien Mazarguil config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 18677fe24446SShahaf Shuler if (config.cqe_comp && !cqe_comp) { 1868a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "Rx CQE compression isn't supported"); 18697fe24446SShahaf Shuler config.cqe_comp = 0; 1870523f5a74SYongseok Koh } 1871bc91e8dbSYongseok Koh if (config.cqe_pad && !cqe_pad) { 1872bc91e8dbSYongseok Koh DRV_LOG(WARNING, "Rx CQE padding isn't supported"); 1873bc91e8dbSYongseok Koh config.cqe_pad = 0; 1874bc91e8dbSYongseok Koh } else if (config.cqe_pad) { 1875bc91e8dbSYongseok Koh DRV_LOG(INFO, "Rx CQE padding is enabled"); 1876bc91e8dbSYongseok Koh } 1877175f1c21SDekel Peled if (config.devx) { 1878175f1c21SDekel Peled priv->counter_fallback = 0; 1879175f1c21SDekel Peled err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr); 1880175f1c21SDekel Peled if (err) { 1881175f1c21SDekel Peled err = -err; 1882175f1c21SDekel Peled goto error; 1883175f1c21SDekel Peled } 1884175f1c21SDekel Peled if (!config.hca_attr.flow_counters_dump) 1885175f1c21SDekel Peled priv->counter_fallback = 1; 1886175f1c21SDekel Peled #ifndef HAVE_IBV_DEVX_ASYNC 1887175f1c21SDekel Peled priv->counter_fallback = 1; 1888175f1c21SDekel Peled #endif 1889175f1c21SDekel Peled if (priv->counter_fallback) 1890175f1c21SDekel Peled DRV_LOG(INFO, "Use fall-back DV counter management\n"); 1891175f1c21SDekel Peled /* Check for LRO support. */ 1892bd41389eSMatan Azrad if (config.dest_tir && config.hca_attr.lro_cap) { 1893175f1c21SDekel Peled /* TBD check tunnel lro caps. */ 1894175f1c21SDekel Peled config.lro.supported = config.hca_attr.lro_cap; 1895175f1c21SDekel Peled DRV_LOG(DEBUG, "Device supports LRO"); 1896175f1c21SDekel Peled /* 1897175f1c21SDekel Peled * If LRO timeout is not configured by application, 1898175f1c21SDekel Peled * use the minimal supported value. 1899175f1c21SDekel Peled */ 1900175f1c21SDekel Peled if (!config.lro.timeout) 1901175f1c21SDekel Peled config.lro.timeout = 1902175f1c21SDekel Peled config.hca_attr.lro_timer_supported_periods[0]; 1903175f1c21SDekel Peled DRV_LOG(DEBUG, "LRO session timeout set to %d usec", 1904175f1c21SDekel Peled config.lro.timeout); 1905175f1c21SDekel Peled } 1906175f1c21SDekel Peled } 19075c0e2db6SYongseok Koh if (config.mprq.enabled && mprq) { 19087d6bf6b8SYongseok Koh if (config.mprq.stride_num_n > mprq_max_stride_num_n || 19097d6bf6b8SYongseok Koh config.mprq.stride_num_n < mprq_min_stride_num_n) { 19107d6bf6b8SYongseok Koh config.mprq.stride_num_n = 19117d6bf6b8SYongseok Koh RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 19127d6bf6b8SYongseok Koh mprq_min_stride_num_n); 19137d6bf6b8SYongseok Koh DRV_LOG(WARNING, 19147d6bf6b8SYongseok Koh "the number of strides" 19157d6bf6b8SYongseok Koh " for Multi-Packet RQ is out of range," 19167d6bf6b8SYongseok Koh " setting default value (%u)", 19177d6bf6b8SYongseok Koh 1 << config.mprq.stride_num_n); 19187d6bf6b8SYongseok Koh } 19197d6bf6b8SYongseok Koh config.mprq.min_stride_size_n = mprq_min_stride_size_n; 19207d6bf6b8SYongseok Koh config.mprq.max_stride_size_n = mprq_max_stride_size_n; 19215c0e2db6SYongseok Koh } else if (config.mprq.enabled && !mprq) { 19225c0e2db6SYongseok Koh DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 19235c0e2db6SYongseok Koh config.mprq.enabled = 0; 19247d6bf6b8SYongseok Koh } 1925066cfecdSMatan Azrad if (config.max_dump_files_num == 0) 1926066cfecdSMatan Azrad config.max_dump_files_num = 128; 1927af4f09f2SNélio Laranjeiro eth_dev = rte_eth_dev_allocate(name); 1928af4f09f2SNélio Laranjeiro if (eth_dev == NULL) { 1929a170a30dSNélio Laranjeiro DRV_LOG(ERR, "can not allocate rte ethdev"); 1930af4f09f2SNélio Laranjeiro err = ENOMEM; 19319083982cSAdrien Mazarguil goto error; 1932af4f09f2SNélio Laranjeiro } 193315febafdSThomas Monjalon /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ 193415febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1935a7d3c627SThomas Monjalon if (priv->representor) { 19362b730263SAdrien Mazarguil eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 1937a7d3c627SThomas Monjalon eth_dev->data->representor_id = priv->representor_id; 1938a7d3c627SThomas Monjalon } 1939fa2e14d4SViacheslav Ovsiienko /* 1940fa2e14d4SViacheslav Ovsiienko * Store associated network device interface index. This index 1941fa2e14d4SViacheslav Ovsiienko * is permanent throughout the lifetime of device. So, we may store 1942fa2e14d4SViacheslav Ovsiienko * the ifindex here and use the cached value further. 1943fa2e14d4SViacheslav Ovsiienko */ 1944fa2e14d4SViacheslav Ovsiienko assert(spawn->ifindex); 1945fa2e14d4SViacheslav Ovsiienko priv->if_index = spawn->ifindex; 1946af4f09f2SNélio Laranjeiro eth_dev->data->dev_private = priv; 1947df428ceeSYongseok Koh priv->dev_data = eth_dev->data; 1948af4f09f2SNélio Laranjeiro eth_dev->data->mac_addrs = priv->mac; 1949f38c5457SAdrien Mazarguil eth_dev->device = dpdk_dev; 1950771fa900SAdrien Mazarguil /* Configure the first MAC address by default. */ 1951af4f09f2SNélio Laranjeiro if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 1952a170a30dSNélio Laranjeiro DRV_LOG(ERR, 1953a170a30dSNélio Laranjeiro "port %u cannot get MAC address, is mlx5_en" 1954a170a30dSNélio Laranjeiro " loaded? (errno: %s)", 19558c3c2372SAdrien Mazarguil eth_dev->data->port_id, strerror(rte_errno)); 1956e1c3e305SMatan Azrad err = ENODEV; 19579083982cSAdrien Mazarguil goto error; 1958771fa900SAdrien Mazarguil } 1959a170a30dSNélio Laranjeiro DRV_LOG(INFO, 1960a170a30dSNélio Laranjeiro "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 19610f99970bSNélio Laranjeiro eth_dev->data->port_id, 1962771fa900SAdrien Mazarguil mac.addr_bytes[0], mac.addr_bytes[1], 1963771fa900SAdrien Mazarguil mac.addr_bytes[2], mac.addr_bytes[3], 1964771fa900SAdrien Mazarguil mac.addr_bytes[4], mac.addr_bytes[5]); 1965771fa900SAdrien Mazarguil #ifndef NDEBUG 1966771fa900SAdrien Mazarguil { 1967771fa900SAdrien Mazarguil char ifname[IF_NAMESIZE]; 1968771fa900SAdrien Mazarguil 1969af4f09f2SNélio Laranjeiro if (mlx5_get_ifname(eth_dev, &ifname) == 0) 1970a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 19710f99970bSNélio Laranjeiro eth_dev->data->port_id, ifname); 1972771fa900SAdrien Mazarguil else 1973a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is unknown", 19740f99970bSNélio Laranjeiro eth_dev->data->port_id); 1975771fa900SAdrien Mazarguil } 1976771fa900SAdrien Mazarguil #endif 1977771fa900SAdrien Mazarguil /* Get actual MTU if possible. */ 1978a6d83b6aSNélio Laranjeiro err = mlx5_get_mtu(eth_dev, &priv->mtu); 1979012ad994SShahaf Shuler if (err) { 1980012ad994SShahaf Shuler err = rte_errno; 19819083982cSAdrien Mazarguil goto error; 1982012ad994SShahaf Shuler } 1983a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 1984a170a30dSNélio Laranjeiro priv->mtu); 198568128934SAdrien Mazarguil /* Initialize burst functions to prevent crashes before link-up. */ 1986e313ef4cSShahaf Shuler eth_dev->rx_pkt_burst = removed_rx_burst; 1987e313ef4cSShahaf Shuler eth_dev->tx_pkt_burst = removed_tx_burst; 1988771fa900SAdrien Mazarguil eth_dev->dev_ops = &mlx5_dev_ops; 1989272733b5SNélio Laranjeiro /* Register MAC address. */ 1990272733b5SNélio Laranjeiro claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 1991f87bfa8eSYongseok Koh if (config.vf && config.vf_nl_en) 1992ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_sync(eth_dev); 1993c8ffb8a9SNélio Laranjeiro TAILQ_INIT(&priv->flows); 19941b37f5d8SNélio Laranjeiro TAILQ_INIT(&priv->ctrl_flows); 19951e3a39f7SXueming Li /* Hint libmlx5 to use PMD allocator for data plane resources */ 19961e3a39f7SXueming Li struct mlx5dv_ctx_allocators alctr = { 19971e3a39f7SXueming Li .alloc = &mlx5_alloc_verbs_buf, 19981e3a39f7SXueming Li .free = &mlx5_free_verbs_buf, 19991e3a39f7SXueming Li .data = priv, 20001e3a39f7SXueming Li }; 200117e19bc4SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 200217e19bc4SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 20031e3a39f7SXueming Li (void *)((uintptr_t)&alctr)); 2004771fa900SAdrien Mazarguil /* Bring Ethernet device up. */ 2005a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 20060f99970bSNélio Laranjeiro eth_dev->data->port_id); 20077ba5320bSNélio Laranjeiro mlx5_set_link_up(eth_dev); 2008a85a606cSShahaf Shuler /* 2009a85a606cSShahaf Shuler * Even though the interrupt handler is not installed yet, 2010ae4eb7dcSViacheslav Ovsiienko * interrupts will still trigger on the async_fd from 2011a85a606cSShahaf Shuler * Verbs context returned by ibv_open_device(). 2012a85a606cSShahaf Shuler */ 2013a85a606cSShahaf Shuler mlx5_link_update(eth_dev, 0); 2014e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 2015e2b4925eSOri Kam if (!(config.hca_attr.eswitch_manager && config.dv_flow_en && 2016e2b4925eSOri Kam (switch_info->representor || switch_info->master))) 2017e2b4925eSOri Kam config.dv_esw_en = 0; 2018e2b4925eSOri Kam #else 2019e2b4925eSOri Kam config.dv_esw_en = 0; 2020e2b4925eSOri Kam #endif 202138b4b397SViacheslav Ovsiienko /* Detect minimal data bytes to inline. */ 202238b4b397SViacheslav Ovsiienko mlx5_set_min_inline(spawn, &config); 20237fe24446SShahaf Shuler /* Store device configuration on private structure. */ 20247fe24446SShahaf Shuler priv->config = config; 2025dfedf3e3SViacheslav Ovsiienko /* Create context for virtual machine VLAN workaround. */ 2026dfedf3e3SViacheslav Ovsiienko priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); 2027e2b4925eSOri Kam if (config.dv_flow_en) { 2028e2b4925eSOri Kam err = mlx5_alloc_shared_dr(priv); 2029e2b4925eSOri Kam if (err) 2030e2b4925eSOri Kam goto error; 2031e2b4925eSOri Kam } 203278be8852SNelio Laranjeiro /* Supported Verbs flow priority number detection. */ 20332815702bSNelio Laranjeiro err = mlx5_flow_discover_priorities(eth_dev); 20344fb27c1dSViacheslav Ovsiienko if (err < 0) { 20354fb27c1dSViacheslav Ovsiienko err = -err; 20369083982cSAdrien Mazarguil goto error; 20374fb27c1dSViacheslav Ovsiienko } 20382815702bSNelio Laranjeiro priv->config.flow_prio = err; 2039f38c5457SAdrien Mazarguil return eth_dev; 20409083982cSAdrien Mazarguil error: 204126c08b97SAdrien Mazarguil if (priv) { 2042b2177648SViacheslav Ovsiienko if (priv->sh) 2043b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 204426c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 204526c08b97SAdrien Mazarguil close(priv->nl_socket_route); 204626c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 204726c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 2048dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 2049dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 20502b730263SAdrien Mazarguil if (own_domain_id) 20512b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 2052771fa900SAdrien Mazarguil rte_free(priv); 2053e16adf08SThomas Monjalon if (eth_dev != NULL) 2054e16adf08SThomas Monjalon eth_dev->data->dev_private = NULL; 205526c08b97SAdrien Mazarguil } 2056e16adf08SThomas Monjalon if (eth_dev != NULL) { 2057e16adf08SThomas Monjalon /* mac_addrs must not be freed alone because part of dev_private */ 2058e16adf08SThomas Monjalon eth_dev->data->mac_addrs = NULL; 2059690de285SRaslan Darawsheh rte_eth_dev_release_port(eth_dev); 2060e16adf08SThomas Monjalon } 206117e19bc4SViacheslav Ovsiienko if (sh) 206217e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(sh); 2063f38c5457SAdrien Mazarguil assert(err > 0); 2064a6d83b6aSNélio Laranjeiro rte_errno = err; 2065f38c5457SAdrien Mazarguil return NULL; 2066f38c5457SAdrien Mazarguil } 2067f38c5457SAdrien Mazarguil 2068116f90adSAdrien Mazarguil /** 2069116f90adSAdrien Mazarguil * Comparison callback to sort device data. 2070116f90adSAdrien Mazarguil * 2071116f90adSAdrien Mazarguil * This is meant to be used with qsort(). 2072116f90adSAdrien Mazarguil * 2073116f90adSAdrien Mazarguil * @param a[in] 2074116f90adSAdrien Mazarguil * Pointer to pointer to first data object. 2075116f90adSAdrien Mazarguil * @param b[in] 2076116f90adSAdrien Mazarguil * Pointer to pointer to second data object. 2077116f90adSAdrien Mazarguil * 2078116f90adSAdrien Mazarguil * @return 2079116f90adSAdrien Mazarguil * 0 if both objects are equal, less than 0 if the first argument is less 2080116f90adSAdrien Mazarguil * than the second, greater than 0 otherwise. 2081116f90adSAdrien Mazarguil */ 2082116f90adSAdrien Mazarguil static int 2083116f90adSAdrien Mazarguil mlx5_dev_spawn_data_cmp(const void *a, const void *b) 2084116f90adSAdrien Mazarguil { 2085116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_a = 2086116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)a)->info; 2087116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_b = 2088116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)b)->info; 2089116f90adSAdrien Mazarguil int ret; 2090116f90adSAdrien Mazarguil 2091116f90adSAdrien Mazarguil /* Master device first. */ 2092116f90adSAdrien Mazarguil ret = si_b->master - si_a->master; 2093116f90adSAdrien Mazarguil if (ret) 2094116f90adSAdrien Mazarguil return ret; 2095116f90adSAdrien Mazarguil /* Then representor devices. */ 2096116f90adSAdrien Mazarguil ret = si_b->representor - si_a->representor; 2097116f90adSAdrien Mazarguil if (ret) 2098116f90adSAdrien Mazarguil return ret; 2099116f90adSAdrien Mazarguil /* Unidentified devices come last in no specific order. */ 2100116f90adSAdrien Mazarguil if (!si_a->representor) 2101116f90adSAdrien Mazarguil return 0; 2102116f90adSAdrien Mazarguil /* Order representors by name. */ 2103116f90adSAdrien Mazarguil return si_a->port_name - si_b->port_name; 2104116f90adSAdrien Mazarguil } 2105116f90adSAdrien Mazarguil 2106f38c5457SAdrien Mazarguil /** 2107f38c5457SAdrien Mazarguil * DPDK callback to register a PCI device. 2108f38c5457SAdrien Mazarguil * 21092b730263SAdrien Mazarguil * This function spawns Ethernet devices out of a given PCI device. 2110f38c5457SAdrien Mazarguil * 2111f38c5457SAdrien Mazarguil * @param[in] pci_drv 2112f38c5457SAdrien Mazarguil * PCI driver structure (mlx5_driver). 2113f38c5457SAdrien Mazarguil * @param[in] pci_dev 2114f38c5457SAdrien Mazarguil * PCI device information. 2115f38c5457SAdrien Mazarguil * 2116f38c5457SAdrien Mazarguil * @return 2117f38c5457SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 2118f38c5457SAdrien Mazarguil */ 2119f38c5457SAdrien Mazarguil static int 2120f38c5457SAdrien Mazarguil mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2121f38c5457SAdrien Mazarguil struct rte_pci_device *pci_dev) 2122f38c5457SAdrien Mazarguil { 2123f38c5457SAdrien Mazarguil struct ibv_device **ibv_list; 2124ad74bc61SViacheslav Ovsiienko /* 2125ad74bc61SViacheslav Ovsiienko * Number of found IB Devices matching with requested PCI BDF. 2126ad74bc61SViacheslav Ovsiienko * nd != 1 means there are multiple IB devices over the same 2127ad74bc61SViacheslav Ovsiienko * PCI device and we have representors and master. 2128ad74bc61SViacheslav Ovsiienko */ 2129ad74bc61SViacheslav Ovsiienko unsigned int nd = 0; 2130ad74bc61SViacheslav Ovsiienko /* 2131ad74bc61SViacheslav Ovsiienko * Number of found IB device Ports. nd = 1 and np = 1..n means 2132ad74bc61SViacheslav Ovsiienko * we have the single multiport IB device, and there may be 2133ad74bc61SViacheslav Ovsiienko * representors attached to some of found ports. 2134ad74bc61SViacheslav Ovsiienko */ 2135ad74bc61SViacheslav Ovsiienko unsigned int np = 0; 2136ad74bc61SViacheslav Ovsiienko /* 2137ad74bc61SViacheslav Ovsiienko * Number of DPDK ethernet devices to Spawn - either over 2138ad74bc61SViacheslav Ovsiienko * multiple IB devices or multiple ports of single IB device. 2139ad74bc61SViacheslav Ovsiienko * Actually this is the number of iterations to spawn. 2140ad74bc61SViacheslav Ovsiienko */ 2141ad74bc61SViacheslav Ovsiienko unsigned int ns = 0; 2142f87bfa8eSYongseok Koh struct mlx5_dev_config dev_config; 2143f38c5457SAdrien Mazarguil int ret; 2144f38c5457SAdrien Mazarguil 21457be600c8SYongseok Koh ret = mlx5_init_once(); 21467be600c8SYongseok Koh if (ret) { 21477be600c8SYongseok Koh DRV_LOG(ERR, "unable to init PMD global data: %s", 21487be600c8SYongseok Koh strerror(rte_errno)); 21497be600c8SYongseok Koh return -rte_errno; 21507be600c8SYongseok Koh } 2151f38c5457SAdrien Mazarguil assert(pci_drv == &mlx5_driver); 2152f38c5457SAdrien Mazarguil errno = 0; 2153f38c5457SAdrien Mazarguil ibv_list = mlx5_glue->get_device_list(&ret); 2154f38c5457SAdrien Mazarguil if (!ibv_list) { 2155f38c5457SAdrien Mazarguil rte_errno = errno ? errno : ENOSYS; 2156f38c5457SAdrien Mazarguil DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 2157a6d83b6aSNélio Laranjeiro return -rte_errno; 2158a6d83b6aSNélio Laranjeiro } 2159ad74bc61SViacheslav Ovsiienko /* 2160ad74bc61SViacheslav Ovsiienko * First scan the list of all Infiniband devices to find 2161ad74bc61SViacheslav Ovsiienko * matching ones, gathering into the list. 2162ad74bc61SViacheslav Ovsiienko */ 216326c08b97SAdrien Mazarguil struct ibv_device *ibv_match[ret + 1]; 2164ad74bc61SViacheslav Ovsiienko int nl_route = -1; 2165ad74bc61SViacheslav Ovsiienko int nl_rdma = -1; 2166ad74bc61SViacheslav Ovsiienko unsigned int i; 216726c08b97SAdrien Mazarguil 2168f38c5457SAdrien Mazarguil while (ret-- > 0) { 2169f38c5457SAdrien Mazarguil struct rte_pci_addr pci_addr; 2170f38c5457SAdrien Mazarguil 2171f38c5457SAdrien Mazarguil DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 2172f38c5457SAdrien Mazarguil if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr)) 2173f38c5457SAdrien Mazarguil continue; 2174f38c5457SAdrien Mazarguil if (pci_dev->addr.domain != pci_addr.domain || 2175f38c5457SAdrien Mazarguil pci_dev->addr.bus != pci_addr.bus || 2176f38c5457SAdrien Mazarguil pci_dev->addr.devid != pci_addr.devid || 2177f38c5457SAdrien Mazarguil pci_dev->addr.function != pci_addr.function) 2178f38c5457SAdrien Mazarguil continue; 217926c08b97SAdrien Mazarguil DRV_LOG(INFO, "PCI information matches for device \"%s\"", 2180f38c5457SAdrien Mazarguil ibv_list[ret]->name); 2181ad74bc61SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 218226c08b97SAdrien Mazarguil } 2183ad74bc61SViacheslav Ovsiienko ibv_match[nd] = NULL; 2184ad74bc61SViacheslav Ovsiienko if (!nd) { 2185ae4eb7dcSViacheslav Ovsiienko /* No device matches, just complain and bail out. */ 2186ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 2187ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, 2188ad74bc61SViacheslav Ovsiienko "no Verbs device matches PCI device " PCI_PRI_FMT "," 2189ad74bc61SViacheslav Ovsiienko " are kernel drivers loaded?", 2190ad74bc61SViacheslav Ovsiienko pci_dev->addr.domain, pci_dev->addr.bus, 2191ad74bc61SViacheslav Ovsiienko pci_dev->addr.devid, pci_dev->addr.function); 2192ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2193ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2194ad74bc61SViacheslav Ovsiienko return ret; 2195ad74bc61SViacheslav Ovsiienko } 2196ad74bc61SViacheslav Ovsiienko nl_route = mlx5_nl_init(NETLINK_ROUTE); 2197ad74bc61SViacheslav Ovsiienko nl_rdma = mlx5_nl_init(NETLINK_RDMA); 2198ad74bc61SViacheslav Ovsiienko if (nd == 1) { 219926c08b97SAdrien Mazarguil /* 2200ad74bc61SViacheslav Ovsiienko * Found single matching device may have multiple ports. 2201ad74bc61SViacheslav Ovsiienko * Each port may be representor, we have to check the port 2202ad74bc61SViacheslav Ovsiienko * number and check the representors existence. 220326c08b97SAdrien Mazarguil */ 2204ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2205ad74bc61SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 2206ad74bc61SViacheslav Ovsiienko if (!np) 2207ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get IB device \"%s\"" 2208ad74bc61SViacheslav Ovsiienko " ports number", ibv_match[0]->name); 2209ad74bc61SViacheslav Ovsiienko } 2210ad74bc61SViacheslav Ovsiienko /* 2211ad74bc61SViacheslav Ovsiienko * Now we can determine the maximal 2212ad74bc61SViacheslav Ovsiienko * amount of devices to be spawned. 2213ad74bc61SViacheslav Ovsiienko */ 2214ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data list[np ? np : nd]; 2215ad74bc61SViacheslav Ovsiienko 2216ad74bc61SViacheslav Ovsiienko if (np > 1) { 2217ad74bc61SViacheslav Ovsiienko /* 2218ae4eb7dcSViacheslav Ovsiienko * Single IB device with multiple ports found, 2219ad74bc61SViacheslav Ovsiienko * it may be E-Switch master device and representors. 2220ad74bc61SViacheslav Ovsiienko * We have to perform identification trough the ports. 2221ad74bc61SViacheslav Ovsiienko */ 2222ad74bc61SViacheslav Ovsiienko assert(nl_rdma >= 0); 2223ad74bc61SViacheslav Ovsiienko assert(ns == 0); 2224ad74bc61SViacheslav Ovsiienko assert(nd == 1); 2225ad74bc61SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 2226ad74bc61SViacheslav Ovsiienko list[ns].max_port = np; 2227ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = i; 2228ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[0]; 2229ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 2230ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 2231ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 2232ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, i); 2233ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 2234ad74bc61SViacheslav Ovsiienko /* 2235ad74bc61SViacheslav Ovsiienko * No network interface index found for the 2236ad74bc61SViacheslav Ovsiienko * specified port, it means there is no 2237ad74bc61SViacheslav Ovsiienko * representor on this port. It's OK, 2238ad74bc61SViacheslav Ovsiienko * there can be disabled ports, for example 2239ad74bc61SViacheslav Ovsiienko * if sriov_numvfs < sriov_totalvfs. 2240ad74bc61SViacheslav Ovsiienko */ 224126c08b97SAdrien Mazarguil continue; 224226c08b97SAdrien Mazarguil } 2243ad74bc61SViacheslav Ovsiienko ret = -1; 224426c08b97SAdrien Mazarguil if (nl_route >= 0) 2245ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 2246ad74bc61SViacheslav Ovsiienko (nl_route, 2247ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 2248ad74bc61SViacheslav Ovsiienko &list[ns].info); 2249ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 2250ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 2251ad74bc61SViacheslav Ovsiienko /* 2252ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 2253ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 2254ad74bc61SViacheslav Ovsiienko * with sysfs. 2255ad74bc61SViacheslav Ovsiienko */ 2256ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 2257ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 2258ad74bc61SViacheslav Ovsiienko &list[ns].info); 2259ad74bc61SViacheslav Ovsiienko } 2260ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 2261ad74bc61SViacheslav Ovsiienko list[ns].info.master)) 2262ad74bc61SViacheslav Ovsiienko ns++; 2263ad74bc61SViacheslav Ovsiienko } 2264ad74bc61SViacheslav Ovsiienko if (!ns) { 226526c08b97SAdrien Mazarguil DRV_LOG(ERR, 2266ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 2267ad74bc61SViacheslav Ovsiienko " on the IB device with multiple ports"); 2268ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2269ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2270ad74bc61SViacheslav Ovsiienko goto exit; 2271ad74bc61SViacheslav Ovsiienko } 2272ad74bc61SViacheslav Ovsiienko } else { 2273ad74bc61SViacheslav Ovsiienko /* 2274ad74bc61SViacheslav Ovsiienko * The existence of several matching entries (nd > 1) means 2275ad74bc61SViacheslav Ovsiienko * port representors have been instantiated. No existing Verbs 2276ad74bc61SViacheslav Ovsiienko * call nor sysfs entries can tell them apart, this can only 2277ad74bc61SViacheslav Ovsiienko * be done through Netlink calls assuming kernel drivers are 2278ad74bc61SViacheslav Ovsiienko * recent enough to support them. 2279ad74bc61SViacheslav Ovsiienko * 2280ad74bc61SViacheslav Ovsiienko * In the event of identification failure through Netlink, 2281ad74bc61SViacheslav Ovsiienko * try again through sysfs, then: 2282ad74bc61SViacheslav Ovsiienko * 2283ad74bc61SViacheslav Ovsiienko * 1. A single IB device matches (nd == 1) with single 2284ad74bc61SViacheslav Ovsiienko * port (np=0/1) and is not a representor, assume 2285ad74bc61SViacheslav Ovsiienko * no switch support. 2286ad74bc61SViacheslav Ovsiienko * 2287ad74bc61SViacheslav Ovsiienko * 2. Otherwise no safe assumptions can be made; 2288ad74bc61SViacheslav Ovsiienko * complain louder and bail out. 2289ad74bc61SViacheslav Ovsiienko */ 2290ad74bc61SViacheslav Ovsiienko np = 1; 2291ad74bc61SViacheslav Ovsiienko for (i = 0; i != nd; ++i) { 2292ad74bc61SViacheslav Ovsiienko memset(&list[ns].info, 0, sizeof(list[ns].info)); 2293ad74bc61SViacheslav Ovsiienko list[ns].max_port = 1; 2294ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = 1; 2295ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[i]; 2296ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 2297ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 2298ad74bc61SViacheslav Ovsiienko list[ns].ifindex = 0; 2299ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2300ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 2301ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, 1); 2302ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 23039c2bbd04SViacheslav Ovsiienko char ifname[IF_NAMESIZE]; 23049c2bbd04SViacheslav Ovsiienko 2305ad74bc61SViacheslav Ovsiienko /* 23069c2bbd04SViacheslav Ovsiienko * Netlink failed, it may happen with old 23079c2bbd04SViacheslav Ovsiienko * ib_core kernel driver (before 4.16). 23089c2bbd04SViacheslav Ovsiienko * We can assume there is old driver because 23099c2bbd04SViacheslav Ovsiienko * here we are processing single ports IB 23109c2bbd04SViacheslav Ovsiienko * devices. Let's try sysfs to retrieve 23119c2bbd04SViacheslav Ovsiienko * the ifindex. The method works for 23129c2bbd04SViacheslav Ovsiienko * master device only. 23139c2bbd04SViacheslav Ovsiienko */ 23149c2bbd04SViacheslav Ovsiienko if (nd > 1) { 23159c2bbd04SViacheslav Ovsiienko /* 23169c2bbd04SViacheslav Ovsiienko * Multiple devices found, assume 23179c2bbd04SViacheslav Ovsiienko * representors, can not distinguish 23189c2bbd04SViacheslav Ovsiienko * master/representor and retrieve 23199c2bbd04SViacheslav Ovsiienko * ifindex via sysfs. 2320ad74bc61SViacheslav Ovsiienko */ 2321ad74bc61SViacheslav Ovsiienko continue; 2322ad74bc61SViacheslav Ovsiienko } 23239c2bbd04SViacheslav Ovsiienko ret = mlx5_get_master_ifname 23249c2bbd04SViacheslav Ovsiienko (ibv_match[i]->ibdev_path, &ifname); 23259c2bbd04SViacheslav Ovsiienko if (!ret) 23269c2bbd04SViacheslav Ovsiienko list[ns].ifindex = 23279c2bbd04SViacheslav Ovsiienko if_nametoindex(ifname); 23289c2bbd04SViacheslav Ovsiienko if (!list[ns].ifindex) { 23299c2bbd04SViacheslav Ovsiienko /* 23309c2bbd04SViacheslav Ovsiienko * No network interface index found 23319c2bbd04SViacheslav Ovsiienko * for the specified device, it means 23329c2bbd04SViacheslav Ovsiienko * there it is neither representor 23339c2bbd04SViacheslav Ovsiienko * nor master. 23349c2bbd04SViacheslav Ovsiienko */ 23359c2bbd04SViacheslav Ovsiienko continue; 23369c2bbd04SViacheslav Ovsiienko } 23379c2bbd04SViacheslav Ovsiienko } 2338ad74bc61SViacheslav Ovsiienko ret = -1; 2339ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2340ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 2341ad74bc61SViacheslav Ovsiienko (nl_route, 2342ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 2343ad74bc61SViacheslav Ovsiienko &list[ns].info); 2344ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 2345ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 2346ad74bc61SViacheslav Ovsiienko /* 2347ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 2348ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 2349ad74bc61SViacheslav Ovsiienko * with sysfs. 2350ad74bc61SViacheslav Ovsiienko */ 2351ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 2352ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 2353ad74bc61SViacheslav Ovsiienko &list[ns].info); 2354ad74bc61SViacheslav Ovsiienko } 2355ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 2356ad74bc61SViacheslav Ovsiienko list[ns].info.master)) { 2357ad74bc61SViacheslav Ovsiienko ns++; 2358ad74bc61SViacheslav Ovsiienko } else if ((nd == 1) && 2359ad74bc61SViacheslav Ovsiienko !list[ns].info.representor && 2360ad74bc61SViacheslav Ovsiienko !list[ns].info.master) { 2361ad74bc61SViacheslav Ovsiienko /* 2362ad74bc61SViacheslav Ovsiienko * Single IB device with 2363ad74bc61SViacheslav Ovsiienko * one physical port and 2364ad74bc61SViacheslav Ovsiienko * attached network device. 2365ad74bc61SViacheslav Ovsiienko * May be SRIOV is not enabled 2366ad74bc61SViacheslav Ovsiienko * or there is no representors. 2367ad74bc61SViacheslav Ovsiienko */ 2368ad74bc61SViacheslav Ovsiienko DRV_LOG(INFO, "no E-Switch support detected"); 2369ad74bc61SViacheslav Ovsiienko ns++; 2370ad74bc61SViacheslav Ovsiienko break; 237126c08b97SAdrien Mazarguil } 2372f38c5457SAdrien Mazarguil } 2373ad74bc61SViacheslav Ovsiienko if (!ns) { 2374ad74bc61SViacheslav Ovsiienko DRV_LOG(ERR, 2375ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 2376ad74bc61SViacheslav Ovsiienko " on the multiple IB devices"); 2377ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2378ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2379ad74bc61SViacheslav Ovsiienko goto exit; 2380ad74bc61SViacheslav Ovsiienko } 2381ad74bc61SViacheslav Ovsiienko } 2382ad74bc61SViacheslav Ovsiienko assert(ns); 2383116f90adSAdrien Mazarguil /* 2384116f90adSAdrien Mazarguil * Sort list to probe devices in natural order for users convenience 2385116f90adSAdrien Mazarguil * (i.e. master first, then representors from lowest to highest ID). 2386116f90adSAdrien Mazarguil */ 2387ad74bc61SViacheslav Ovsiienko qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 2388f87bfa8eSYongseok Koh /* Default configuration. */ 2389f87bfa8eSYongseok Koh dev_config = (struct mlx5_dev_config){ 239078c7a16dSYongseok Koh .hw_padding = 0, 2391f87bfa8eSYongseok Koh .mps = MLX5_ARG_UNSET, 2392f87bfa8eSYongseok Koh .rx_vec_en = 1, 2393505f1fe4SViacheslav Ovsiienko .txq_inline_max = MLX5_ARG_UNSET, 2394505f1fe4SViacheslav Ovsiienko .txq_inline_min = MLX5_ARG_UNSET, 2395505f1fe4SViacheslav Ovsiienko .txq_inline_mpw = MLX5_ARG_UNSET, 2396f87bfa8eSYongseok Koh .txqs_inline = MLX5_ARG_UNSET, 2397f87bfa8eSYongseok Koh .vf_nl_en = 1, 2398dceb5029SYongseok Koh .mr_ext_memseg_en = 1, 2399f87bfa8eSYongseok Koh .mprq = { 2400f87bfa8eSYongseok Koh .enabled = 0, /* Disabled by default. */ 2401f87bfa8eSYongseok Koh .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N, 2402f87bfa8eSYongseok Koh .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, 2403f87bfa8eSYongseok Koh .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, 2404f87bfa8eSYongseok Koh }, 2405e2b4925eSOri Kam .dv_esw_en = 1, 2406f87bfa8eSYongseok Koh }; 2407ad74bc61SViacheslav Ovsiienko /* Device specific configuration. */ 2408f38c5457SAdrien Mazarguil switch (pci_dev->id.device_id) { 2409f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 2410f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 2411f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 2412f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 2413f87bfa8eSYongseok Koh dev_config.vf = 1; 2414f38c5457SAdrien Mazarguil break; 2415f38c5457SAdrien Mazarguil default: 2416f87bfa8eSYongseok Koh break; 2417f38c5457SAdrien Mazarguil } 2418ad74bc61SViacheslav Ovsiienko for (i = 0; i != ns; ++i) { 24192b730263SAdrien Mazarguil uint32_t restore; 24202b730263SAdrien Mazarguil 2421f87bfa8eSYongseok Koh list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 2422ad74bc61SViacheslav Ovsiienko &list[i], 2423ad74bc61SViacheslav Ovsiienko dev_config); 24246de569f5SAdrien Mazarguil if (!list[i].eth_dev) { 2425206254b7SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 24262b730263SAdrien Mazarguil break; 2427206254b7SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 24286de569f5SAdrien Mazarguil continue; 24296de569f5SAdrien Mazarguil } 2430116f90adSAdrien Mazarguil restore = list[i].eth_dev->data->dev_flags; 2431116f90adSAdrien Mazarguil rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 24322b730263SAdrien Mazarguil /* Restore non-PCI flags cleared by the above call. */ 2433116f90adSAdrien Mazarguil list[i].eth_dev->data->dev_flags |= restore; 2434116f90adSAdrien Mazarguil rte_eth_dev_probing_finish(list[i].eth_dev); 24352b730263SAdrien Mazarguil } 2436ad74bc61SViacheslav Ovsiienko if (i != ns) { 2437f38c5457SAdrien Mazarguil DRV_LOG(ERR, 2438f38c5457SAdrien Mazarguil "probe of PCI device " PCI_PRI_FMT " aborted after" 2439f38c5457SAdrien Mazarguil " encountering an error: %s", 2440f38c5457SAdrien Mazarguil pci_dev->addr.domain, pci_dev->addr.bus, 2441f38c5457SAdrien Mazarguil pci_dev->addr.devid, pci_dev->addr.function, 2442f38c5457SAdrien Mazarguil strerror(rte_errno)); 2443f38c5457SAdrien Mazarguil ret = -rte_errno; 24442b730263SAdrien Mazarguil /* Roll back. */ 24452b730263SAdrien Mazarguil while (i--) { 24466de569f5SAdrien Mazarguil if (!list[i].eth_dev) 24476de569f5SAdrien Mazarguil continue; 2448116f90adSAdrien Mazarguil mlx5_dev_close(list[i].eth_dev); 2449e16adf08SThomas Monjalon /* mac_addrs must not be freed because in dev_private */ 2450e16adf08SThomas Monjalon list[i].eth_dev->data->mac_addrs = NULL; 2451116f90adSAdrien Mazarguil claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 24522b730263SAdrien Mazarguil } 24532b730263SAdrien Mazarguil /* Restore original error. */ 24542b730263SAdrien Mazarguil rte_errno = -ret; 2455f38c5457SAdrien Mazarguil } else { 2456f38c5457SAdrien Mazarguil ret = 0; 2457f38c5457SAdrien Mazarguil } 2458ad74bc61SViacheslav Ovsiienko exit: 2459ad74bc61SViacheslav Ovsiienko /* 2460ad74bc61SViacheslav Ovsiienko * Do the routine cleanup: 2461ad74bc61SViacheslav Ovsiienko * - close opened Netlink sockets 2462ad74bc61SViacheslav Ovsiienko * - free the Infiniband device list 2463ad74bc61SViacheslav Ovsiienko */ 2464ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2465ad74bc61SViacheslav Ovsiienko close(nl_rdma); 2466ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2467ad74bc61SViacheslav Ovsiienko close(nl_route); 2468ad74bc61SViacheslav Ovsiienko assert(ibv_list); 2469ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 2470f38c5457SAdrien Mazarguil return ret; 2471771fa900SAdrien Mazarguil } 2472771fa900SAdrien Mazarguil 24733a820742SOphir Munk /** 24743a820742SOphir Munk * DPDK callback to remove a PCI device. 24753a820742SOphir Munk * 24763a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 24773a820742SOphir Munk * 24783a820742SOphir Munk * @param[in] pci_dev 24793a820742SOphir Munk * Pointer to the PCI device. 24803a820742SOphir Munk * 24813a820742SOphir Munk * @return 24823a820742SOphir Munk * 0 on success, the function cannot fail. 24833a820742SOphir Munk */ 24843a820742SOphir Munk static int 24853a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 24863a820742SOphir Munk { 24873a820742SOphir Munk uint16_t port_id; 24883a820742SOphir Munk 24895294b800SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 24903a820742SOphir Munk rte_eth_dev_close(port_id); 24913a820742SOphir Munk return 0; 24923a820742SOphir Munk } 24933a820742SOphir Munk 2494771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 2495771fa900SAdrien Mazarguil { 24961d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 24971d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 2498771fa900SAdrien Mazarguil }, 2499771fa900SAdrien Mazarguil { 25001d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 25011d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 2502771fa900SAdrien Mazarguil }, 2503771fa900SAdrien Mazarguil { 25041d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 25051d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 2506771fa900SAdrien Mazarguil }, 2507771fa900SAdrien Mazarguil { 25081d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 25091d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 2510771fa900SAdrien Mazarguil }, 2511771fa900SAdrien Mazarguil { 2512528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2513528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 2514528a9fbeSYongseok Koh }, 2515528a9fbeSYongseok Koh { 2516528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2517528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 2518528a9fbeSYongseok Koh }, 2519528a9fbeSYongseok Koh { 2520528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2521528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 2522528a9fbeSYongseok Koh }, 2523528a9fbeSYongseok Koh { 2524528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2525528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 2526528a9fbeSYongseok Koh }, 2527528a9fbeSYongseok Koh { 2528dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2529dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 2530dd3331c6SShahaf Shuler }, 2531dd3331c6SShahaf Shuler { 2532c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2533c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 2534c322c0e5SOri Kam }, 2535c322c0e5SOri Kam { 2536f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2537f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 2538f0354d84SWisam Jaddo }, 2539f0354d84SWisam Jaddo { 2540f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2541f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 2542f0354d84SWisam Jaddo }, 2543f0354d84SWisam Jaddo { 2544771fa900SAdrien Mazarguil .vendor_id = 0 2545771fa900SAdrien Mazarguil } 2546771fa900SAdrien Mazarguil }; 2547771fa900SAdrien Mazarguil 2548fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver = { 25492f3193cfSJan Viktorin .driver = { 25502f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 25512f3193cfSJan Viktorin }, 2552771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 2553af424af8SShreyansh Jain .probe = mlx5_pci_probe, 25543a820742SOphir Munk .remove = mlx5_pci_remove, 2555989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 2556989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 255769c06d0eSYongseok Koh .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV | 2558b76fafb1SDavid Marchand RTE_PCI_DRV_PROBE_AGAIN, 2559771fa900SAdrien Mazarguil }; 2560771fa900SAdrien Mazarguil 256172b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 256259b91becSAdrien Mazarguil 256359b91becSAdrien Mazarguil /** 256408c028d0SAdrien Mazarguil * Suffix RTE_EAL_PMD_PATH with "-glue". 256508c028d0SAdrien Mazarguil * 256608c028d0SAdrien Mazarguil * This function performs a sanity check on RTE_EAL_PMD_PATH before 256708c028d0SAdrien Mazarguil * suffixing its last component. 256808c028d0SAdrien Mazarguil * 256908c028d0SAdrien Mazarguil * @param buf[out] 257008c028d0SAdrien Mazarguil * Output buffer, should be large enough otherwise NULL is returned. 257108c028d0SAdrien Mazarguil * @param size 257208c028d0SAdrien Mazarguil * Size of @p out. 257308c028d0SAdrien Mazarguil * 257408c028d0SAdrien Mazarguil * @return 257508c028d0SAdrien Mazarguil * Pointer to @p buf or @p NULL in case suffix cannot be appended. 257608c028d0SAdrien Mazarguil */ 257708c028d0SAdrien Mazarguil static char * 257808c028d0SAdrien Mazarguil mlx5_glue_path(char *buf, size_t size) 257908c028d0SAdrien Mazarguil { 258008c028d0SAdrien Mazarguil static const char *const bad[] = { "/", ".", "..", NULL }; 258108c028d0SAdrien Mazarguil const char *path = RTE_EAL_PMD_PATH; 258208c028d0SAdrien Mazarguil size_t len = strlen(path); 258308c028d0SAdrien Mazarguil size_t off; 258408c028d0SAdrien Mazarguil int i; 258508c028d0SAdrien Mazarguil 258608c028d0SAdrien Mazarguil while (len && path[len - 1] == '/') 258708c028d0SAdrien Mazarguil --len; 258808c028d0SAdrien Mazarguil for (off = len; off && path[off - 1] != '/'; --off) 258908c028d0SAdrien Mazarguil ; 259008c028d0SAdrien Mazarguil for (i = 0; bad[i]; ++i) 259108c028d0SAdrien Mazarguil if (!strncmp(path + off, bad[i], (int)(len - off))) 259208c028d0SAdrien Mazarguil goto error; 259308c028d0SAdrien Mazarguil i = snprintf(buf, size, "%.*s-glue", (int)len, path); 259408c028d0SAdrien Mazarguil if (i == -1 || (size_t)i >= size) 259508c028d0SAdrien Mazarguil goto error; 259608c028d0SAdrien Mazarguil return buf; 259708c028d0SAdrien Mazarguil error: 2598a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2599a170a30dSNélio Laranjeiro "unable to append \"-glue\" to last component of" 260008c028d0SAdrien Mazarguil " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," 260108c028d0SAdrien Mazarguil " please re-configure DPDK"); 260208c028d0SAdrien Mazarguil return NULL; 260308c028d0SAdrien Mazarguil } 260408c028d0SAdrien Mazarguil 260508c028d0SAdrien Mazarguil /** 260659b91becSAdrien Mazarguil * Initialization routine for run-time dependency on rdma-core. 260759b91becSAdrien Mazarguil */ 260859b91becSAdrien Mazarguil static int 260959b91becSAdrien Mazarguil mlx5_glue_init(void) 261059b91becSAdrien Mazarguil { 261108c028d0SAdrien Mazarguil char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; 2612f6242d06SAdrien Mazarguil const char *path[] = { 2613f6242d06SAdrien Mazarguil /* 2614f6242d06SAdrien Mazarguil * A basic security check is necessary before trusting 2615f6242d06SAdrien Mazarguil * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH. 2616f6242d06SAdrien Mazarguil */ 2617f6242d06SAdrien Mazarguil (geteuid() == getuid() && getegid() == getgid() ? 2618f6242d06SAdrien Mazarguil getenv("MLX5_GLUE_PATH") : NULL), 261908c028d0SAdrien Mazarguil /* 262008c028d0SAdrien Mazarguil * When RTE_EAL_PMD_PATH is set, use its glue-suffixed 262108c028d0SAdrien Mazarguil * variant, otherwise let dlopen() look up libraries on its 262208c028d0SAdrien Mazarguil * own. 262308c028d0SAdrien Mazarguil */ 262408c028d0SAdrien Mazarguil (*RTE_EAL_PMD_PATH ? 262508c028d0SAdrien Mazarguil mlx5_glue_path(glue_path, sizeof(glue_path)) : ""), 2626f6242d06SAdrien Mazarguil }; 2627f6242d06SAdrien Mazarguil unsigned int i = 0; 262859b91becSAdrien Mazarguil void *handle = NULL; 262959b91becSAdrien Mazarguil void **sym; 263059b91becSAdrien Mazarguil const char *dlmsg; 263159b91becSAdrien Mazarguil 2632f6242d06SAdrien Mazarguil while (!handle && i != RTE_DIM(path)) { 2633f6242d06SAdrien Mazarguil const char *end; 2634f6242d06SAdrien Mazarguil size_t len; 2635f6242d06SAdrien Mazarguil int ret; 2636f6242d06SAdrien Mazarguil 2637f6242d06SAdrien Mazarguil if (!path[i]) { 2638f6242d06SAdrien Mazarguil ++i; 2639f6242d06SAdrien Mazarguil continue; 2640f6242d06SAdrien Mazarguil } 2641f6242d06SAdrien Mazarguil end = strpbrk(path[i], ":;"); 2642f6242d06SAdrien Mazarguil if (!end) 2643f6242d06SAdrien Mazarguil end = path[i] + strlen(path[i]); 2644f6242d06SAdrien Mazarguil len = end - path[i]; 2645f6242d06SAdrien Mazarguil ret = 0; 2646f6242d06SAdrien Mazarguil do { 2647f6242d06SAdrien Mazarguil char name[ret + 1]; 2648f6242d06SAdrien Mazarguil 2649f6242d06SAdrien Mazarguil ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE, 2650f6242d06SAdrien Mazarguil (int)len, path[i], 2651f6242d06SAdrien Mazarguil (!len || *(end - 1) == '/') ? "" : "/"); 2652f6242d06SAdrien Mazarguil if (ret == -1) 2653f6242d06SAdrien Mazarguil break; 2654f6242d06SAdrien Mazarguil if (sizeof(name) != (size_t)ret + 1) 2655f6242d06SAdrien Mazarguil continue; 2656a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"", 2657a170a30dSNélio Laranjeiro name); 2658f6242d06SAdrien Mazarguil handle = dlopen(name, RTLD_LAZY); 2659f6242d06SAdrien Mazarguil break; 2660f6242d06SAdrien Mazarguil } while (1); 2661f6242d06SAdrien Mazarguil path[i] = end + 1; 2662f6242d06SAdrien Mazarguil if (!*end) 2663f6242d06SAdrien Mazarguil ++i; 2664f6242d06SAdrien Mazarguil } 266559b91becSAdrien Mazarguil if (!handle) { 266659b91becSAdrien Mazarguil rte_errno = EINVAL; 266759b91becSAdrien Mazarguil dlmsg = dlerror(); 266859b91becSAdrien Mazarguil if (dlmsg) 2669a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg); 267059b91becSAdrien Mazarguil goto glue_error; 267159b91becSAdrien Mazarguil } 267259b91becSAdrien Mazarguil sym = dlsym(handle, "mlx5_glue"); 267359b91becSAdrien Mazarguil if (!sym || !*sym) { 267459b91becSAdrien Mazarguil rte_errno = EINVAL; 267559b91becSAdrien Mazarguil dlmsg = dlerror(); 267659b91becSAdrien Mazarguil if (dlmsg) 2677a170a30dSNélio Laranjeiro DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg); 267859b91becSAdrien Mazarguil goto glue_error; 267959b91becSAdrien Mazarguil } 268059b91becSAdrien Mazarguil mlx5_glue = *sym; 268159b91becSAdrien Mazarguil return 0; 268259b91becSAdrien Mazarguil glue_error: 268359b91becSAdrien Mazarguil if (handle) 268459b91becSAdrien Mazarguil dlclose(handle); 2685a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 2686a170a30dSNélio Laranjeiro "cannot initialize PMD due to missing run-time dependency on" 2687a170a30dSNélio Laranjeiro " rdma-core libraries (libibverbs, libmlx5)"); 268859b91becSAdrien Mazarguil return -rte_errno; 268959b91becSAdrien Mazarguil } 269059b91becSAdrien Mazarguil 269159b91becSAdrien Mazarguil #endif 269259b91becSAdrien Mazarguil 2693771fa900SAdrien Mazarguil /** 2694771fa900SAdrien Mazarguil * Driver initialization routine. 2695771fa900SAdrien Mazarguil */ 2696f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 2697771fa900SAdrien Mazarguil { 26983d96644aSStephen Hemminger /* Initialize driver log type. */ 26993d96644aSStephen Hemminger mlx5_logtype = rte_log_register("pmd.net.mlx5"); 27003d96644aSStephen Hemminger if (mlx5_logtype >= 0) 27013d96644aSStephen Hemminger rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); 27023d96644aSStephen Hemminger 27035f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 2704ea16068cSYongseok Koh mlx5_set_ptype_table(); 27055f8ba81cSXueming Li mlx5_set_cksum_table(); 27065f8ba81cSXueming Li mlx5_set_swp_types_table(); 2707771fa900SAdrien Mazarguil /* 2708771fa900SAdrien Mazarguil * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 2709771fa900SAdrien Mazarguil * huge pages. Calling ibv_fork_init() during init allows 2710771fa900SAdrien Mazarguil * applications to use fork() safely for purposes other than 2711771fa900SAdrien Mazarguil * using this PMD, which is not supported in forked processes. 2712771fa900SAdrien Mazarguil */ 2713771fa900SAdrien Mazarguil setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 2714161b93e5SYongseok Koh /* Match the size of Rx completion entry to the size of a cacheline. */ 2715161b93e5SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128) 2716161b93e5SYongseok Koh setenv("MLX5_CQE_SIZE", "128", 0); 27171ff30d18SMatan Azrad /* 27181ff30d18SMatan Azrad * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to 27191ff30d18SMatan Azrad * cleanup all the Verbs resources even when the device was removed. 27201ff30d18SMatan Azrad */ 27211ff30d18SMatan Azrad setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1); 272272b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 272359b91becSAdrien Mazarguil if (mlx5_glue_init()) 272459b91becSAdrien Mazarguil return; 272559b91becSAdrien Mazarguil assert(mlx5_glue); 272659b91becSAdrien Mazarguil #endif 27272a3b0097SAdrien Mazarguil #ifndef NDEBUG 27282a3b0097SAdrien Mazarguil /* Glue structure must not contain any NULL pointers. */ 27292a3b0097SAdrien Mazarguil { 27302a3b0097SAdrien Mazarguil unsigned int i; 27312a3b0097SAdrien Mazarguil 27322a3b0097SAdrien Mazarguil for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i) 27332a3b0097SAdrien Mazarguil assert(((const void *const *)mlx5_glue)[i]); 27342a3b0097SAdrien Mazarguil } 27352a3b0097SAdrien Mazarguil #endif 27366d5df2eaSAdrien Mazarguil if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) { 2737a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2738a170a30dSNélio Laranjeiro "rdma-core glue \"%s\" mismatch: \"%s\" is required", 27396d5df2eaSAdrien Mazarguil mlx5_glue->version, MLX5_GLUE_VERSION); 27406d5df2eaSAdrien Mazarguil return; 27416d5df2eaSAdrien Mazarguil } 27420e83b8e5SNelio Laranjeiro mlx5_glue->fork_init(); 27433dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 2744771fa900SAdrien Mazarguil } 2745771fa900SAdrien Mazarguil 274601f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 274701f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 27480880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 2749