18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <assert.h> 1059b91becSAdrien Mazarguil #include <dlfcn.h> 11771fa900SAdrien Mazarguil #include <stdint.h> 12771fa900SAdrien Mazarguil #include <stdlib.h> 13e72dd09bSNélio Laranjeiro #include <errno.h> 14771fa900SAdrien Mazarguil #include <net/if.h> 154a984153SXueming Li #include <sys/mman.h> 16ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 17771fa900SAdrien Mazarguil 18771fa900SAdrien Mazarguil /* Verbs header. */ 19771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 20771fa900SAdrien Mazarguil #ifdef PEDANTIC 21fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 22771fa900SAdrien Mazarguil #endif 23771fa900SAdrien Mazarguil #include <infiniband/verbs.h> 24771fa900SAdrien Mazarguil #ifdef PEDANTIC 25fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 26771fa900SAdrien Mazarguil #endif 27771fa900SAdrien Mazarguil 28771fa900SAdrien Mazarguil #include <rte_malloc.h> 29ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 30fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 31771fa900SAdrien Mazarguil #include <rte_pci.h> 32c752998bSGaetan Rivet #include <rte_bus_pci.h> 33771fa900SAdrien Mazarguil #include <rte_common.h> 3459b91becSAdrien Mazarguil #include <rte_config.h> 354a984153SXueming Li #include <rte_eal_memconfig.h> 36e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 37e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 38e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 39f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 40f15db67dSMatan Azrad #include <rte_alarm.h> 41771fa900SAdrien Mazarguil 42771fa900SAdrien Mazarguil #include "mlx5.h" 43771fa900SAdrien Mazarguil #include "mlx5_utils.h" 442e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 45771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 4613d57bd5SAdrien Mazarguil #include "mlx5_defs.h" 470e83b8e5SNelio Laranjeiro #include "mlx5_glue.h" 48974f1e7eSYongseok Koh #include "mlx5_mr.h" 4984c406e7SOri Kam #include "mlx5_flow.h" 50771fa900SAdrien Mazarguil 5199c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5299c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5399c12dccSNélio Laranjeiro 54bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 55bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 56bc91e8dbSYongseok Koh 5778c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 5878c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 5978c7a16dSYongseok Koh 607d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 617d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 627d6bf6b8SYongseok Koh 637d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 647d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 657d6bf6b8SYongseok Koh 667d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 677d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 687d6bf6b8SYongseok Koh 697d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 707d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 717d6bf6b8SYongseok Koh 72a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 732a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 742a66cf37SYaacov Hazan 75505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 76505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 77505f1fe4SViacheslav Ovsiienko 78505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 79505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 80505f1fe4SViacheslav Ovsiienko 81505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 82505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 83505f1fe4SViacheslav Ovsiienko 842a66cf37SYaacov Hazan /* 852a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 862a66cf37SYaacov Hazan * enabling inline send. 872a66cf37SYaacov Hazan */ 882a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 892a66cf37SYaacov Hazan 9009d8b416SYongseok Koh /* 9109d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 92a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9309d8b416SYongseok Koh */ 9409d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 9509d8b416SYongseok Koh 96230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 97230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 98230189d9SNélio Laranjeiro 99a6bd4911SViacheslav Ovsiienko /* 100a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 101a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 102a6bd4911SViacheslav Ovsiienko */ 1036ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1046ce84bd8SYongseok Koh 105a6bd4911SViacheslav Ovsiienko /* 106a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 107a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 108a6bd4911SViacheslav Ovsiienko */ 1096ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1106ce84bd8SYongseok Koh 111a6bd4911SViacheslav Ovsiienko /* 112a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 113a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 114a6bd4911SViacheslav Ovsiienko */ 1155644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1165644d5b9SNelio Laranjeiro 1175644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1185644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1195644d5b9SNelio Laranjeiro 12078a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 12178a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 12278a54648SXueming Li 123e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 124e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 125e2b4925eSOri Kam 12651e72d38SOri Kam /* Activate DV flow steering. */ 12751e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 12851e72d38SOri Kam 129db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 130db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 131db209cc3SNélio Laranjeiro 132dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 133dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 134dceb5029SYongseok Koh 1356de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1366de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1376de569f5SAdrien Mazarguil 138066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 139066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 140066cfecdSMatan Azrad 14121bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 14221bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 14321bb6c7eSDekel Peled 14443e9d979SShachar Beiser #ifndef HAVE_IBV_MLX5_MOD_MPW 14543e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 14643e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 14743e9d979SShachar Beiser #endif 14843e9d979SShachar Beiser 149523f5a74SYongseok Koh #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 150523f5a74SYongseok Koh #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 151523f5a74SYongseok Koh #endif 152523f5a74SYongseok Koh 153974f1e7eSYongseok Koh static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 154974f1e7eSYongseok Koh 155974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 156974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 157974f1e7eSYongseok Koh 158974f1e7eSYongseok Koh /* Spinlock for mlx5_shared_data allocation. */ 159974f1e7eSYongseok Koh static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 160974f1e7eSYongseok Koh 1617be600c8SYongseok Koh /* Process local data for secondary processes. */ 1627be600c8SYongseok Koh static struct mlx5_local_data mlx5_local_data; 1637be600c8SYongseok Koh 164a170a30dSNélio Laranjeiro /** Driver-specific log messages type. */ 165a170a30dSNélio Laranjeiro int mlx5_logtype; 166a170a30dSNélio Laranjeiro 167ad74bc61SViacheslav Ovsiienko /** Data associated with devices to spawn. */ 168ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data { 169ad74bc61SViacheslav Ovsiienko uint32_t ifindex; /**< Network interface index. */ 170ad74bc61SViacheslav Ovsiienko uint32_t max_port; /**< IB device maximal port index. */ 171ad74bc61SViacheslav Ovsiienko uint32_t ibv_port; /**< IB device physical port index. */ 172ad74bc61SViacheslav Ovsiienko struct mlx5_switch_info info; /**< Switch information. */ 173ad74bc61SViacheslav Ovsiienko struct ibv_device *ibv_dev; /**< Associated IB device. */ 174ad74bc61SViacheslav Ovsiienko struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ 175ab3cffcfSViacheslav Ovsiienko struct rte_pci_device *pci_dev; /**< Backend PCI device. */ 176ad74bc61SViacheslav Ovsiienko }; 177ad74bc61SViacheslav Ovsiienko 17817e19bc4SViacheslav Ovsiienko static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER(); 17917e19bc4SViacheslav Ovsiienko static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER; 18017e19bc4SViacheslav Ovsiienko 18117e19bc4SViacheslav Ovsiienko /** 1825382d28cSMatan Azrad * Initialize the counters management structure. 1835382d28cSMatan Azrad * 1845382d28cSMatan Azrad * @param[in] sh 1855382d28cSMatan Azrad * Pointer to mlx5_ibv_shared object to free 1865382d28cSMatan Azrad */ 1875382d28cSMatan Azrad static void 1885382d28cSMatan Azrad mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh) 1895382d28cSMatan Azrad { 1905382d28cSMatan Azrad uint8_t i; 1915382d28cSMatan Azrad 1925382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 1935382d28cSMatan Azrad for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) 1945382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.ccont[i].pool_list); 1955382d28cSMatan Azrad } 1965382d28cSMatan Azrad 1975382d28cSMatan Azrad /** 1985382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 1995382d28cSMatan Azrad * 2005382d28cSMatan Azrad * @param[in] mng 2015382d28cSMatan Azrad * Pointer to the memory management structure. 2025382d28cSMatan Azrad */ 2035382d28cSMatan Azrad static void 2045382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 2055382d28cSMatan Azrad { 2065382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 2075382d28cSMatan Azrad 2085382d28cSMatan Azrad LIST_REMOVE(mng, next); 2095382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 2105382d28cSMatan Azrad claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); 2115382d28cSMatan Azrad rte_free(mem); 2125382d28cSMatan Azrad } 2135382d28cSMatan Azrad 2145382d28cSMatan Azrad /** 2155382d28cSMatan Azrad * Close and release all the resources of the counters management. 2165382d28cSMatan Azrad * 2175382d28cSMatan Azrad * @param[in] sh 2185382d28cSMatan Azrad * Pointer to mlx5_ibv_shared object to free. 2195382d28cSMatan Azrad */ 2205382d28cSMatan Azrad static void 2215382d28cSMatan Azrad mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh) 2225382d28cSMatan Azrad { 2235382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 2245382d28cSMatan Azrad uint8_t i; 2255382d28cSMatan Azrad int j; 226f15db67dSMatan Azrad int retries = 1024; 2275382d28cSMatan Azrad 228f15db67dSMatan Azrad rte_errno = 0; 229f15db67dSMatan Azrad while (--retries) { 230f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 231f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 232f15db67dSMatan Azrad break; 233f15db67dSMatan Azrad rte_pause(); 234f15db67dSMatan Azrad } 2355382d28cSMatan Azrad for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) { 2365382d28cSMatan Azrad struct mlx5_flow_counter_pool *pool; 2375382d28cSMatan Azrad uint32_t batch = !!(i % 2); 2385382d28cSMatan Azrad 2395382d28cSMatan Azrad if (!sh->cmng.ccont[i].pools) 2405382d28cSMatan Azrad continue; 2415382d28cSMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 2425382d28cSMatan Azrad while (pool) { 2435382d28cSMatan Azrad if (batch) { 2445382d28cSMatan Azrad if (pool->min_dcs) 2455382d28cSMatan Azrad claim_zero 2465382d28cSMatan Azrad (mlx5_devx_cmd_destroy(pool->min_dcs)); 2475382d28cSMatan Azrad } 2485382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 2495382d28cSMatan Azrad if (pool->counters_raw[j].action) 2505382d28cSMatan Azrad claim_zero 2515382d28cSMatan Azrad (mlx5_glue->destroy_flow_action 2525382d28cSMatan Azrad (pool->counters_raw[j].action)); 2535382d28cSMatan Azrad if (!batch && pool->counters_raw[j].dcs) 2545382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 2555382d28cSMatan Azrad (pool->counters_raw[j].dcs)); 2565382d28cSMatan Azrad } 2575382d28cSMatan Azrad TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, 2585382d28cSMatan Azrad next); 2595382d28cSMatan Azrad rte_free(pool); 2605382d28cSMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 2615382d28cSMatan Azrad } 2625382d28cSMatan Azrad rte_free(sh->cmng.ccont[i].pools); 2635382d28cSMatan Azrad } 2645382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 2655382d28cSMatan Azrad while (mng) { 2665382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 2675382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 2685382d28cSMatan Azrad } 2695382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 2705382d28cSMatan Azrad } 2715382d28cSMatan Azrad 2725382d28cSMatan Azrad /** 27317e19bc4SViacheslav Ovsiienko * Allocate shared IB device context. If there is multiport device the 27417e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 27517e19bc4SViacheslav Ovsiienko * port dedicated IB device, the context will be used by only given 27617e19bc4SViacheslav Ovsiienko * port due to unification. 27717e19bc4SViacheslav Ovsiienko * 278ae4eb7dcSViacheslav Ovsiienko * Routine first searches the context for the specified IB device name, 27917e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 28017e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 28117e19bc4SViacheslav Ovsiienko * IB device context and parameters. 28217e19bc4SViacheslav Ovsiienko * 28317e19bc4SViacheslav Ovsiienko * @param[in] spawn 28417e19bc4SViacheslav Ovsiienko * Pointer to the IB device attributes (name, port, etc). 28517e19bc4SViacheslav Ovsiienko * 28617e19bc4SViacheslav Ovsiienko * @return 28717e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object on success, 28817e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 28917e19bc4SViacheslav Ovsiienko */ 29017e19bc4SViacheslav Ovsiienko static struct mlx5_ibv_shared * 29117e19bc4SViacheslav Ovsiienko mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn) 29217e19bc4SViacheslav Ovsiienko { 29317e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 29417e19bc4SViacheslav Ovsiienko int err = 0; 29553e5a82fSViacheslav Ovsiienko uint32_t i; 29617e19bc4SViacheslav Ovsiienko 29717e19bc4SViacheslav Ovsiienko assert(spawn); 29817e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 29917e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 30017e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 30117e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 30217e19bc4SViacheslav Ovsiienko LIST_FOREACH(sh, &mlx5_ibv_list, next) { 30317e19bc4SViacheslav Ovsiienko if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) { 30417e19bc4SViacheslav Ovsiienko sh->refcnt++; 30517e19bc4SViacheslav Ovsiienko goto exit; 30617e19bc4SViacheslav Ovsiienko } 30717e19bc4SViacheslav Ovsiienko } 308ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 30917e19bc4SViacheslav Ovsiienko assert(spawn->max_port); 31017e19bc4SViacheslav Ovsiienko sh = rte_zmalloc("ethdev shared ib context", 31117e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared) + 31217e19bc4SViacheslav Ovsiienko spawn->max_port * 31317e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared_port), 31417e19bc4SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 31517e19bc4SViacheslav Ovsiienko if (!sh) { 31617e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 31717e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 31817e19bc4SViacheslav Ovsiienko goto exit; 31917e19bc4SViacheslav Ovsiienko } 32017e19bc4SViacheslav Ovsiienko /* Try to open IB device with DV first, then usual Verbs. */ 32117e19bc4SViacheslav Ovsiienko errno = 0; 32217e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev); 32317e19bc4SViacheslav Ovsiienko if (sh->ctx) { 32417e19bc4SViacheslav Ovsiienko sh->devx = 1; 32517e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is supported"); 32617e19bc4SViacheslav Ovsiienko } else { 32717e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->open_device(spawn->ibv_dev); 32817e19bc4SViacheslav Ovsiienko if (!sh->ctx) { 32917e19bc4SViacheslav Ovsiienko err = errno ? errno : ENODEV; 33017e19bc4SViacheslav Ovsiienko goto error; 33117e19bc4SViacheslav Ovsiienko } 33217e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is NOT supported"); 33317e19bc4SViacheslav Ovsiienko } 33417e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr); 33517e19bc4SViacheslav Ovsiienko if (err) { 33617e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "ibv_query_device_ex() failed"); 33717e19bc4SViacheslav Ovsiienko goto error; 33817e19bc4SViacheslav Ovsiienko } 33917e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 34017e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 34117e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_name, sh->ctx->device->name, 34217e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_name)); 34317e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path, 34417e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_path)); 345ab3cffcfSViacheslav Ovsiienko sh->pci_dev = spawn->pci_dev; 34653e5a82fSViacheslav Ovsiienko pthread_mutex_init(&sh->intr_mutex, NULL); 34753e5a82fSViacheslav Ovsiienko /* 34853e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 34953e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 35053e5a82fSViacheslav Ovsiienko * the given port index i. 35153e5a82fSViacheslav Ovsiienko */ 35253e5a82fSViacheslav Ovsiienko for (i = 0; i < sh->max_port; i++) 35353e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 35417e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 35517e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 35617e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 35717e19bc4SViacheslav Ovsiienko err = ENOMEM; 35817e19bc4SViacheslav Ovsiienko goto error; 35917e19bc4SViacheslav Ovsiienko } 360ab3cffcfSViacheslav Ovsiienko /* 361ab3cffcfSViacheslav Ovsiienko * Once the device is added to the list of memory event 362ab3cffcfSViacheslav Ovsiienko * callback, its global MR cache table cannot be expanded 363ab3cffcfSViacheslav Ovsiienko * on the fly because of deadlock. If it overflows, lookup 364ab3cffcfSViacheslav Ovsiienko * should be done by searching MR list linearly, which is slow. 365ab3cffcfSViacheslav Ovsiienko * 366ab3cffcfSViacheslav Ovsiienko * At this point the device is not added to the memory 367ab3cffcfSViacheslav Ovsiienko * event list yet, context is just being created. 368ab3cffcfSViacheslav Ovsiienko */ 369ab3cffcfSViacheslav Ovsiienko err = mlx5_mr_btree_init(&sh->mr.cache, 370ab3cffcfSViacheslav Ovsiienko MLX5_MR_BTREE_CACHE_N * 2, 371ab3cffcfSViacheslav Ovsiienko sh->pci_dev->device.numa_node); 372ab3cffcfSViacheslav Ovsiienko if (err) { 373ab3cffcfSViacheslav Ovsiienko err = rte_errno; 374ab3cffcfSViacheslav Ovsiienko goto error; 375ab3cffcfSViacheslav Ovsiienko } 3765382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 37717e19bc4SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next); 37817e19bc4SViacheslav Ovsiienko exit: 37917e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 38017e19bc4SViacheslav Ovsiienko return sh; 38117e19bc4SViacheslav Ovsiienko error: 38217e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 38317e19bc4SViacheslav Ovsiienko assert(sh); 38417e19bc4SViacheslav Ovsiienko if (sh->pd) 38517e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 38617e19bc4SViacheslav Ovsiienko if (sh->ctx) 38717e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 38817e19bc4SViacheslav Ovsiienko rte_free(sh); 38917e19bc4SViacheslav Ovsiienko assert(err > 0); 39017e19bc4SViacheslav Ovsiienko rte_errno = err; 39117e19bc4SViacheslav Ovsiienko return NULL; 39217e19bc4SViacheslav Ovsiienko } 39317e19bc4SViacheslav Ovsiienko 39417e19bc4SViacheslav Ovsiienko /** 39517e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 39617e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 39717e19bc4SViacheslav Ovsiienko * 39817e19bc4SViacheslav Ovsiienko * @param[in] sh 39917e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object to free 40017e19bc4SViacheslav Ovsiienko */ 40117e19bc4SViacheslav Ovsiienko static void 40217e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) 40317e19bc4SViacheslav Ovsiienko { 40417e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 40517e19bc4SViacheslav Ovsiienko #ifndef NDEBUG 40617e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 40717e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *lctx; 40817e19bc4SViacheslav Ovsiienko 40917e19bc4SViacheslav Ovsiienko LIST_FOREACH(lctx, &mlx5_ibv_list, next) 41017e19bc4SViacheslav Ovsiienko if (lctx == sh) 41117e19bc4SViacheslav Ovsiienko break; 41217e19bc4SViacheslav Ovsiienko assert(lctx); 41317e19bc4SViacheslav Ovsiienko if (lctx != sh) { 41417e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 41517e19bc4SViacheslav Ovsiienko goto exit; 41617e19bc4SViacheslav Ovsiienko } 41717e19bc4SViacheslav Ovsiienko #endif 41817e19bc4SViacheslav Ovsiienko assert(sh); 41917e19bc4SViacheslav Ovsiienko assert(sh->refcnt); 42017e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 42117e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 42217e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 42317e19bc4SViacheslav Ovsiienko goto exit; 424ab3cffcfSViacheslav Ovsiienko /* Release created Memory Regions. */ 425ab3cffcfSViacheslav Ovsiienko mlx5_mr_release(sh); 42617e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 42753e5a82fSViacheslav Ovsiienko /* 42853e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 42953e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 43053e5a82fSViacheslav Ovsiienko **/ 4315382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 43253e5a82fSViacheslav Ovsiienko assert(!sh->intr_cnt); 43353e5a82fSViacheslav Ovsiienko if (sh->intr_cnt) 4345897ac13SViacheslav Ovsiienko mlx5_intr_callback_unregister 43553e5a82fSViacheslav Ovsiienko (&sh->intr_handle, mlx5_dev_interrupt_handler, sh); 43653e5a82fSViacheslav Ovsiienko pthread_mutex_destroy(&sh->intr_mutex); 43717e19bc4SViacheslav Ovsiienko if (sh->pd) 43817e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 43917e19bc4SViacheslav Ovsiienko if (sh->ctx) 44017e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 44117e19bc4SViacheslav Ovsiienko rte_free(sh); 44217e19bc4SViacheslav Ovsiienko exit: 44317e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 44417e19bc4SViacheslav Ovsiienko } 44517e19bc4SViacheslav Ovsiienko 446771fa900SAdrien Mazarguil /** 447b2177648SViacheslav Ovsiienko * Initialize DR related data within private structure. 448b2177648SViacheslav Ovsiienko * Routine checks the reference counter and does actual 449ae4eb7dcSViacheslav Ovsiienko * resources creation/initialization only if counter is zero. 450b2177648SViacheslav Ovsiienko * 451b2177648SViacheslav Ovsiienko * @param[in] priv 452b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 453b2177648SViacheslav Ovsiienko * 454b2177648SViacheslav Ovsiienko * @return 455b2177648SViacheslav Ovsiienko * Zero on success, positive error code otherwise. 456b2177648SViacheslav Ovsiienko */ 457b2177648SViacheslav Ovsiienko static int 458b2177648SViacheslav Ovsiienko mlx5_alloc_shared_dr(struct mlx5_priv *priv) 459b2177648SViacheslav Ovsiienko { 460b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 461b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = priv->sh; 462b2177648SViacheslav Ovsiienko int err = 0; 463d1e64fbfSOri Kam void *domain; 464b2177648SViacheslav Ovsiienko 465b2177648SViacheslav Ovsiienko assert(sh); 466b2177648SViacheslav Ovsiienko if (sh->dv_refcnt) { 467b2177648SViacheslav Ovsiienko /* Shared DV/DR structures is already initialized. */ 468b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 469b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 470b2177648SViacheslav Ovsiienko return 0; 471b2177648SViacheslav Ovsiienko } 472b2177648SViacheslav Ovsiienko /* Reference counter is zero, we should initialize structures. */ 473d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 474d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_RX); 475d1e64fbfSOri Kam if (!domain) { 476d1e64fbfSOri Kam DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); 477b2177648SViacheslav Ovsiienko err = errno; 478b2177648SViacheslav Ovsiienko goto error; 479b2177648SViacheslav Ovsiienko } 480d1e64fbfSOri Kam sh->rx_domain = domain; 481d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 482d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_TX); 483d1e64fbfSOri Kam if (!domain) { 484d1e64fbfSOri Kam DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); 485b2177648SViacheslav Ovsiienko err = errno; 486b2177648SViacheslav Ovsiienko goto error; 487b2177648SViacheslav Ovsiienko } 48879e35d0dSViacheslav Ovsiienko pthread_mutex_init(&sh->dv_mutex, NULL); 489d1e64fbfSOri Kam sh->tx_domain = domain; 490e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 491e2b4925eSOri Kam if (priv->config.dv_esw_en) { 492d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain 493d1e64fbfSOri Kam (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); 494d1e64fbfSOri Kam if (!domain) { 495d1e64fbfSOri Kam DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); 496e2b4925eSOri Kam err = errno; 497e2b4925eSOri Kam goto error; 498e2b4925eSOri Kam } 499d1e64fbfSOri Kam sh->fdb_domain = domain; 50034fa7c02SOri Kam sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); 501e2b4925eSOri Kam } 502e2b4925eSOri Kam #endif 503b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 504b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 505b2177648SViacheslav Ovsiienko return 0; 506b2177648SViacheslav Ovsiienko 507b2177648SViacheslav Ovsiienko error: 508b2177648SViacheslav Ovsiienko /* Rollback the created objects. */ 509d1e64fbfSOri Kam if (sh->rx_domain) { 510d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 511d1e64fbfSOri Kam sh->rx_domain = NULL; 512b2177648SViacheslav Ovsiienko } 513d1e64fbfSOri Kam if (sh->tx_domain) { 514d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 515d1e64fbfSOri Kam sh->tx_domain = NULL; 516b2177648SViacheslav Ovsiienko } 517d1e64fbfSOri Kam if (sh->fdb_domain) { 518d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 519d1e64fbfSOri Kam sh->fdb_domain = NULL; 520e2b4925eSOri Kam } 52134fa7c02SOri Kam if (sh->esw_drop_action) { 52234fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 52334fa7c02SOri Kam sh->esw_drop_action = NULL; 52434fa7c02SOri Kam } 525b2177648SViacheslav Ovsiienko return err; 526b2177648SViacheslav Ovsiienko #else 527b2177648SViacheslav Ovsiienko (void)priv; 528b2177648SViacheslav Ovsiienko return 0; 529b2177648SViacheslav Ovsiienko #endif 530b2177648SViacheslav Ovsiienko } 531b2177648SViacheslav Ovsiienko 532b2177648SViacheslav Ovsiienko /** 533b2177648SViacheslav Ovsiienko * Destroy DR related data within private structure. 534b2177648SViacheslav Ovsiienko * 535b2177648SViacheslav Ovsiienko * @param[in] priv 536b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 537b2177648SViacheslav Ovsiienko */ 538b2177648SViacheslav Ovsiienko static void 539b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(struct mlx5_priv *priv) 540b2177648SViacheslav Ovsiienko { 541b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 542b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 543b2177648SViacheslav Ovsiienko 544b2177648SViacheslav Ovsiienko if (!priv->dr_shared) 545b2177648SViacheslav Ovsiienko return; 546b2177648SViacheslav Ovsiienko priv->dr_shared = 0; 547b2177648SViacheslav Ovsiienko sh = priv->sh; 548b2177648SViacheslav Ovsiienko assert(sh); 549b2177648SViacheslav Ovsiienko assert(sh->dv_refcnt); 550b2177648SViacheslav Ovsiienko if (sh->dv_refcnt && --sh->dv_refcnt) 551b2177648SViacheslav Ovsiienko return; 552d1e64fbfSOri Kam if (sh->rx_domain) { 553d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 554d1e64fbfSOri Kam sh->rx_domain = NULL; 555b2177648SViacheslav Ovsiienko } 556d1e64fbfSOri Kam if (sh->tx_domain) { 557d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 558d1e64fbfSOri Kam sh->tx_domain = NULL; 559b2177648SViacheslav Ovsiienko } 560e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 561d1e64fbfSOri Kam if (sh->fdb_domain) { 562d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 563d1e64fbfSOri Kam sh->fdb_domain = NULL; 564e2b4925eSOri Kam } 56534fa7c02SOri Kam if (sh->esw_drop_action) { 56634fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 56734fa7c02SOri Kam sh->esw_drop_action = NULL; 56834fa7c02SOri Kam } 569e2b4925eSOri Kam #endif 57079e35d0dSViacheslav Ovsiienko pthread_mutex_destroy(&sh->dv_mutex); 571b2177648SViacheslav Ovsiienko #else 572b2177648SViacheslav Ovsiienko (void)priv; 573b2177648SViacheslav Ovsiienko #endif 574b2177648SViacheslav Ovsiienko } 575b2177648SViacheslav Ovsiienko 576b2177648SViacheslav Ovsiienko /** 5777be600c8SYongseok Koh * Initialize shared data between primary and secondary process. 5787be600c8SYongseok Koh * 5797be600c8SYongseok Koh * A memzone is reserved by primary process and secondary processes attach to 5807be600c8SYongseok Koh * the memzone. 5817be600c8SYongseok Koh * 5827be600c8SYongseok Koh * @return 5837be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 584974f1e7eSYongseok Koh */ 5857be600c8SYongseok Koh static int 5867be600c8SYongseok Koh mlx5_init_shared_data(void) 587974f1e7eSYongseok Koh { 588974f1e7eSYongseok Koh const struct rte_memzone *mz; 5897be600c8SYongseok Koh int ret = 0; 590974f1e7eSYongseok Koh 591974f1e7eSYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 592974f1e7eSYongseok Koh if (mlx5_shared_data == NULL) { 593974f1e7eSYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 594974f1e7eSYongseok Koh /* Allocate shared memory. */ 595974f1e7eSYongseok Koh mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 596974f1e7eSYongseok Koh sizeof(*mlx5_shared_data), 597974f1e7eSYongseok Koh SOCKET_ID_ANY, 0); 5987be600c8SYongseok Koh if (mz == NULL) { 5997be600c8SYongseok Koh DRV_LOG(ERR, 6007be600c8SYongseok Koh "Cannot allocate mlx5 shared data\n"); 6017be600c8SYongseok Koh ret = -rte_errno; 6027be600c8SYongseok Koh goto error; 6037be600c8SYongseok Koh } 6047be600c8SYongseok Koh mlx5_shared_data = mz->addr; 6057be600c8SYongseok Koh memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 6067be600c8SYongseok Koh rte_spinlock_init(&mlx5_shared_data->lock); 607974f1e7eSYongseok Koh } else { 608974f1e7eSYongseok Koh /* Lookup allocated shared memory. */ 609974f1e7eSYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 6107be600c8SYongseok Koh if (mz == NULL) { 6117be600c8SYongseok Koh DRV_LOG(ERR, 6127be600c8SYongseok Koh "Cannot attach mlx5 shared data\n"); 6137be600c8SYongseok Koh ret = -rte_errno; 6147be600c8SYongseok Koh goto error; 615974f1e7eSYongseok Koh } 616974f1e7eSYongseok Koh mlx5_shared_data = mz->addr; 6177be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 6183ebe6580SYongseok Koh } 619974f1e7eSYongseok Koh } 6207be600c8SYongseok Koh error: 6217be600c8SYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 6227be600c8SYongseok Koh return ret; 6237be600c8SYongseok Koh } 6247be600c8SYongseok Koh 6257be600c8SYongseok Koh /** 6264d803a72SOlga Shern * Retrieve integer value from environment variable. 6274d803a72SOlga Shern * 6284d803a72SOlga Shern * @param[in] name 6294d803a72SOlga Shern * Environment variable name. 6304d803a72SOlga Shern * 6314d803a72SOlga Shern * @return 6324d803a72SOlga Shern * Integer value, 0 if the variable is not set. 6334d803a72SOlga Shern */ 6344d803a72SOlga Shern int 6354d803a72SOlga Shern mlx5_getenv_int(const char *name) 6364d803a72SOlga Shern { 6374d803a72SOlga Shern const char *val = getenv(name); 6384d803a72SOlga Shern 6394d803a72SOlga Shern if (val == NULL) 6404d803a72SOlga Shern return 0; 6414d803a72SOlga Shern return atoi(val); 6424d803a72SOlga Shern } 6434d803a72SOlga Shern 6444d803a72SOlga Shern /** 6451e3a39f7SXueming Li * Verbs callback to allocate a memory. This function should allocate the space 6461e3a39f7SXueming Li * according to the size provided residing inside a huge page. 6471e3a39f7SXueming Li * Please note that all allocation must respect the alignment from libmlx5 6481e3a39f7SXueming Li * (i.e. currently sysconf(_SC_PAGESIZE)). 6491e3a39f7SXueming Li * 6501e3a39f7SXueming Li * @param[in] size 6511e3a39f7SXueming Li * The size in bytes of the memory to allocate. 6521e3a39f7SXueming Li * @param[in] data 6531e3a39f7SXueming Li * A pointer to the callback data. 6541e3a39f7SXueming Li * 6551e3a39f7SXueming Li * @return 656a6d83b6aSNélio Laranjeiro * Allocated buffer, NULL otherwise and rte_errno is set. 6571e3a39f7SXueming Li */ 6581e3a39f7SXueming Li static void * 6591e3a39f7SXueming Li mlx5_alloc_verbs_buf(size_t size, void *data) 6601e3a39f7SXueming Li { 661dbeba4cfSThomas Monjalon struct mlx5_priv *priv = data; 6621e3a39f7SXueming Li void *ret; 6631e3a39f7SXueming Li size_t alignment = sysconf(_SC_PAGESIZE); 664d10b09dbSOlivier Matz unsigned int socket = SOCKET_ID_ANY; 6651e3a39f7SXueming Li 666d10b09dbSOlivier Matz if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { 667d10b09dbSOlivier Matz const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 668d10b09dbSOlivier Matz 669d10b09dbSOlivier Matz socket = ctrl->socket; 670d10b09dbSOlivier Matz } else if (priv->verbs_alloc_ctx.type == 671d10b09dbSOlivier Matz MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { 672d10b09dbSOlivier Matz const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 673d10b09dbSOlivier Matz 674d10b09dbSOlivier Matz socket = ctrl->socket; 675d10b09dbSOlivier Matz } 6761e3a39f7SXueming Li assert(data != NULL); 677d10b09dbSOlivier Matz ret = rte_malloc_socket(__func__, size, alignment, socket); 678a6d83b6aSNélio Laranjeiro if (!ret && size) 679a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 6801e3a39f7SXueming Li return ret; 6811e3a39f7SXueming Li } 6821e3a39f7SXueming Li 6831e3a39f7SXueming Li /** 6841e3a39f7SXueming Li * Verbs callback to free a memory. 6851e3a39f7SXueming Li * 6861e3a39f7SXueming Li * @param[in] ptr 6871e3a39f7SXueming Li * A pointer to the memory to free. 6881e3a39f7SXueming Li * @param[in] data 6891e3a39f7SXueming Li * A pointer to the callback data. 6901e3a39f7SXueming Li */ 6911e3a39f7SXueming Li static void 6921e3a39f7SXueming Li mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 6931e3a39f7SXueming Li { 6941e3a39f7SXueming Li assert(data != NULL); 6951e3a39f7SXueming Li rte_free(ptr); 6961e3a39f7SXueming Li } 6971e3a39f7SXueming Li 6981e3a39f7SXueming Li /** 699120dc4a7SYongseok Koh * Initialize process private data structure. 700120dc4a7SYongseok Koh * 701120dc4a7SYongseok Koh * @param dev 702120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 703120dc4a7SYongseok Koh * 704120dc4a7SYongseok Koh * @return 705120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 706120dc4a7SYongseok Koh */ 707120dc4a7SYongseok Koh int 708120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 709120dc4a7SYongseok Koh { 710120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 711120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 712120dc4a7SYongseok Koh size_t ppriv_size; 713120dc4a7SYongseok Koh 714120dc4a7SYongseok Koh /* 715120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 716120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 717120dc4a7SYongseok Koh */ 718120dc4a7SYongseok Koh ppriv_size = 719120dc4a7SYongseok Koh sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 720120dc4a7SYongseok Koh ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, 721120dc4a7SYongseok Koh RTE_CACHE_LINE_SIZE, dev->device->numa_node); 722120dc4a7SYongseok Koh if (!ppriv) { 723120dc4a7SYongseok Koh rte_errno = ENOMEM; 724120dc4a7SYongseok Koh return -rte_errno; 725120dc4a7SYongseok Koh } 726120dc4a7SYongseok Koh ppriv->uar_table_sz = ppriv_size; 727120dc4a7SYongseok Koh dev->process_private = ppriv; 728120dc4a7SYongseok Koh return 0; 729120dc4a7SYongseok Koh } 730120dc4a7SYongseok Koh 731120dc4a7SYongseok Koh /** 732120dc4a7SYongseok Koh * Un-initialize process private data structure. 733120dc4a7SYongseok Koh * 734120dc4a7SYongseok Koh * @param dev 735120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 736120dc4a7SYongseok Koh */ 737120dc4a7SYongseok Koh static void 738120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 739120dc4a7SYongseok Koh { 740120dc4a7SYongseok Koh if (!dev->process_private) 741120dc4a7SYongseok Koh return; 742120dc4a7SYongseok Koh rte_free(dev->process_private); 743120dc4a7SYongseok Koh dev->process_private = NULL; 744120dc4a7SYongseok Koh } 745120dc4a7SYongseok Koh 746120dc4a7SYongseok Koh /** 747771fa900SAdrien Mazarguil * DPDK callback to close the device. 748771fa900SAdrien Mazarguil * 749771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 750771fa900SAdrien Mazarguil * 751771fa900SAdrien Mazarguil * @param dev 752771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 753771fa900SAdrien Mazarguil */ 754771fa900SAdrien Mazarguil static void 755771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 756771fa900SAdrien Mazarguil { 757dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 7582e22920bSAdrien Mazarguil unsigned int i; 7596af6b973SNélio Laranjeiro int ret; 760771fa900SAdrien Mazarguil 761a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 7620f99970bSNélio Laranjeiro dev->data->port_id, 763f048f3d4SViacheslav Ovsiienko ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : "")); 764ecc1c29dSAdrien Mazarguil /* In case mlx5_dev_stop() has not been called. */ 765af4f09f2SNélio Laranjeiro mlx5_dev_interrupt_handler_uninstall(dev); 766af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 767af689f1fSNelio Laranjeiro mlx5_flow_flush(dev, NULL); 7682e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 7692e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 7702e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 7712aac5b5dSYongseok Koh rte_wmb(); 7722aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 7732aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 7742e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 7752e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 7762e22920bSAdrien Mazarguil usleep(1000); 777a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 778af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 7792e22920bSAdrien Mazarguil priv->rxqs_n = 0; 7802e22920bSAdrien Mazarguil priv->rxqs = NULL; 7812e22920bSAdrien Mazarguil } 7822e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 7832e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 7842e22920bSAdrien Mazarguil usleep(1000); 7856e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 786af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 7872e22920bSAdrien Mazarguil priv->txqs_n = 0; 7882e22920bSAdrien Mazarguil priv->txqs = NULL; 7892e22920bSAdrien Mazarguil } 790120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 7917d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 792ab3cffcfSViacheslav Ovsiienko /* Remove from memory callback device list. */ 793ab3cffcfSViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 79417e19bc4SViacheslav Ovsiienko assert(priv->sh); 795ccb38153SViacheslav Ovsiienko LIST_REMOVE(priv->sh, mem_event_cb); 796ccb38153SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 797b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 79829c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 79929c1d8bbSNélio Laranjeiro rte_free(priv->rss_conf.rss_key); 800634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 801634efbc2SNelio Laranjeiro rte_free(priv->reta_idx); 802ccdcba53SNélio Laranjeiro if (priv->config.vf) 803ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_flush(dev); 80426c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 80526c08b97SAdrien Mazarguil close(priv->nl_socket_route); 80626c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 80726c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 808942d13e6SViacheslav Ovsiienko if (priv->sh) { 809942d13e6SViacheslav Ovsiienko /* 810942d13e6SViacheslav Ovsiienko * Free the shared context in last turn, because the cleanup 811942d13e6SViacheslav Ovsiienko * routines above may use some shared fields, like 812942d13e6SViacheslav Ovsiienko * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing 813942d13e6SViacheslav Ovsiienko * ifindex if Netlink fails. 814942d13e6SViacheslav Ovsiienko */ 815942d13e6SViacheslav Ovsiienko mlx5_free_shared_ibctx(priv->sh); 816942d13e6SViacheslav Ovsiienko priv->sh = NULL; 817942d13e6SViacheslav Ovsiienko } 818af4f09f2SNélio Laranjeiro ret = mlx5_hrxq_ibv_verify(dev); 819f5479b68SNélio Laranjeiro if (ret) 820a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 8210f99970bSNélio Laranjeiro dev->data->port_id); 822af4f09f2SNélio Laranjeiro ret = mlx5_ind_table_ibv_verify(dev); 8234c7a0f5fSNélio Laranjeiro if (ret) 824a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 8250f99970bSNélio Laranjeiro dev->data->port_id); 826af4f09f2SNélio Laranjeiro ret = mlx5_rxq_ibv_verify(dev); 82709cb5b58SNélio Laranjeiro if (ret) 828a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain", 8290f99970bSNélio Laranjeiro dev->data->port_id); 830af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 831a1366b1aSNélio Laranjeiro if (ret) 832a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 8330f99970bSNélio Laranjeiro dev->data->port_id); 834af4f09f2SNélio Laranjeiro ret = mlx5_txq_ibv_verify(dev); 835faf2667fSNélio Laranjeiro if (ret) 836a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 8370f99970bSNélio Laranjeiro dev->data->port_id); 838af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 8396e78005aSNélio Laranjeiro if (ret) 840a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 8410f99970bSNélio Laranjeiro dev->data->port_id); 842af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 8436af6b973SNélio Laranjeiro if (ret) 844a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 845a170a30dSNélio Laranjeiro dev->data->port_id); 8462b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 8472b730263SAdrien Mazarguil unsigned int c = 0; 848d874a4eeSThomas Monjalon uint16_t port_id; 8492b730263SAdrien Mazarguil 850d874a4eeSThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) { 851dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 852d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 8532b730263SAdrien Mazarguil 8542b730263SAdrien Mazarguil if (!opriv || 8552b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 856d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 8572b730263SAdrien Mazarguil continue; 8582b730263SAdrien Mazarguil ++c; 8592b730263SAdrien Mazarguil } 8602b730263SAdrien Mazarguil if (!c) 8612b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 8622b730263SAdrien Mazarguil } 863771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 8642b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 86542603bbdSOphir Munk /* 86642603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 86742603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 86842603bbdSOphir Munk * it is freed when dev_private is freed. 86942603bbdSOphir Munk */ 87042603bbdSOphir Munk dev->data->mac_addrs = NULL; 871771fa900SAdrien Mazarguil } 872771fa900SAdrien Mazarguil 8730887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops = { 874e60fbd5bSAdrien Mazarguil .dev_configure = mlx5_dev_configure, 875e60fbd5bSAdrien Mazarguil .dev_start = mlx5_dev_start, 876e60fbd5bSAdrien Mazarguil .dev_stop = mlx5_dev_stop, 87762072098SOr Ami .dev_set_link_down = mlx5_set_link_down, 87862072098SOr Ami .dev_set_link_up = mlx5_set_link_up, 879771fa900SAdrien Mazarguil .dev_close = mlx5_dev_close, 8801bdbe1afSAdrien Mazarguil .promiscuous_enable = mlx5_promiscuous_enable, 8811bdbe1afSAdrien Mazarguil .promiscuous_disable = mlx5_promiscuous_disable, 8821bdbe1afSAdrien Mazarguil .allmulticast_enable = mlx5_allmulticast_enable, 8831bdbe1afSAdrien Mazarguil .allmulticast_disable = mlx5_allmulticast_disable, 884cb8faed7SAdrien Mazarguil .link_update = mlx5_link_update, 88587011737SAdrien Mazarguil .stats_get = mlx5_stats_get, 88687011737SAdrien Mazarguil .stats_reset = mlx5_stats_reset, 887a4193ae3SShahaf Shuler .xstats_get = mlx5_xstats_get, 888a4193ae3SShahaf Shuler .xstats_reset = mlx5_xstats_reset, 889a4193ae3SShahaf Shuler .xstats_get_names = mlx5_xstats_get_names, 890714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 891e60fbd5bSAdrien Mazarguil .dev_infos_get = mlx5_dev_infos_get, 892e571ad55STom Barbette .read_clock = mlx5_read_clock, 89378a38edfSJianfeng Tan .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 894e9086978SAdrien Mazarguil .vlan_filter_set = mlx5_vlan_filter_set, 8952e22920bSAdrien Mazarguil .rx_queue_setup = mlx5_rx_queue_setup, 8962e22920bSAdrien Mazarguil .tx_queue_setup = mlx5_tx_queue_setup, 8972e22920bSAdrien Mazarguil .rx_queue_release = mlx5_rx_queue_release, 8982e22920bSAdrien Mazarguil .tx_queue_release = mlx5_tx_queue_release, 89902d75430SAdrien Mazarguil .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 90002d75430SAdrien Mazarguil .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 9013318aef7SAdrien Mazarguil .mac_addr_remove = mlx5_mac_addr_remove, 9023318aef7SAdrien Mazarguil .mac_addr_add = mlx5_mac_addr_add, 90386977fccSDavid Marchand .mac_addr_set = mlx5_mac_addr_set, 904e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 905cf37ca95SAdrien Mazarguil .mtu_set = mlx5_dev_set_mtu, 906f3db9489SYaacov Hazan .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 907f3db9489SYaacov Hazan .vlan_offload_set = mlx5_vlan_offload_set, 908634efbc2SNelio Laranjeiro .reta_update = mlx5_dev_rss_reta_update, 909634efbc2SNelio Laranjeiro .reta_query = mlx5_dev_rss_reta_query, 9102f97422eSNelio Laranjeiro .rss_hash_update = mlx5_rss_hash_update, 9112f97422eSNelio Laranjeiro .rss_hash_conf_get = mlx5_rss_hash_conf_get, 91276f5c99eSYaacov Hazan .filter_ctrl = mlx5_dev_filter_ctrl, 9138788fec1SOlivier Matz .rx_descriptor_status = mlx5_rx_descriptor_status, 9148788fec1SOlivier Matz .tx_descriptor_status = mlx5_tx_descriptor_status, 91526f04883STom Barbette .rx_queue_count = mlx5_rx_queue_count, 9163c7d44afSShahaf Shuler .rx_queue_intr_enable = mlx5_rx_intr_enable, 9173c7d44afSShahaf Shuler .rx_queue_intr_disable = mlx5_rx_intr_disable, 918d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 919771fa900SAdrien Mazarguil }; 920771fa900SAdrien Mazarguil 921714bf46eSThomas Monjalon /* Available operations from secondary process. */ 92287ec44ceSXueming Li static const struct eth_dev_ops mlx5_dev_sec_ops = { 92387ec44ceSXueming Li .stats_get = mlx5_stats_get, 92487ec44ceSXueming Li .stats_reset = mlx5_stats_reset, 92587ec44ceSXueming Li .xstats_get = mlx5_xstats_get, 92687ec44ceSXueming Li .xstats_reset = mlx5_xstats_reset, 92787ec44ceSXueming Li .xstats_get_names = mlx5_xstats_get_names, 928714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 92987ec44ceSXueming Li .dev_infos_get = mlx5_dev_infos_get, 93087ec44ceSXueming Li .rx_descriptor_status = mlx5_rx_descriptor_status, 93187ec44ceSXueming Li .tx_descriptor_status = mlx5_tx_descriptor_status, 93287ec44ceSXueming Li }; 93387ec44ceSXueming Li 934714bf46eSThomas Monjalon /* Available operations in flow isolated mode. */ 9350887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops_isolate = { 9360887aa7fSNélio Laranjeiro .dev_configure = mlx5_dev_configure, 9370887aa7fSNélio Laranjeiro .dev_start = mlx5_dev_start, 9380887aa7fSNélio Laranjeiro .dev_stop = mlx5_dev_stop, 9390887aa7fSNélio Laranjeiro .dev_set_link_down = mlx5_set_link_down, 9400887aa7fSNélio Laranjeiro .dev_set_link_up = mlx5_set_link_up, 9410887aa7fSNélio Laranjeiro .dev_close = mlx5_dev_close, 94224b068adSYongseok Koh .promiscuous_enable = mlx5_promiscuous_enable, 94324b068adSYongseok Koh .promiscuous_disable = mlx5_promiscuous_disable, 9442547ee74SYongseok Koh .allmulticast_enable = mlx5_allmulticast_enable, 9452547ee74SYongseok Koh .allmulticast_disable = mlx5_allmulticast_disable, 9460887aa7fSNélio Laranjeiro .link_update = mlx5_link_update, 9470887aa7fSNélio Laranjeiro .stats_get = mlx5_stats_get, 9480887aa7fSNélio Laranjeiro .stats_reset = mlx5_stats_reset, 9490887aa7fSNélio Laranjeiro .xstats_get = mlx5_xstats_get, 9500887aa7fSNélio Laranjeiro .xstats_reset = mlx5_xstats_reset, 9510887aa7fSNélio Laranjeiro .xstats_get_names = mlx5_xstats_get_names, 952714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 9530887aa7fSNélio Laranjeiro .dev_infos_get = mlx5_dev_infos_get, 9540887aa7fSNélio Laranjeiro .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 9550887aa7fSNélio Laranjeiro .vlan_filter_set = mlx5_vlan_filter_set, 9560887aa7fSNélio Laranjeiro .rx_queue_setup = mlx5_rx_queue_setup, 9570887aa7fSNélio Laranjeiro .tx_queue_setup = mlx5_tx_queue_setup, 9580887aa7fSNélio Laranjeiro .rx_queue_release = mlx5_rx_queue_release, 9590887aa7fSNélio Laranjeiro .tx_queue_release = mlx5_tx_queue_release, 9600887aa7fSNélio Laranjeiro .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 9610887aa7fSNélio Laranjeiro .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 9620887aa7fSNélio Laranjeiro .mac_addr_remove = mlx5_mac_addr_remove, 9630887aa7fSNélio Laranjeiro .mac_addr_add = mlx5_mac_addr_add, 9640887aa7fSNélio Laranjeiro .mac_addr_set = mlx5_mac_addr_set, 965e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 9660887aa7fSNélio Laranjeiro .mtu_set = mlx5_dev_set_mtu, 9670887aa7fSNélio Laranjeiro .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 9680887aa7fSNélio Laranjeiro .vlan_offload_set = mlx5_vlan_offload_set, 9690887aa7fSNélio Laranjeiro .filter_ctrl = mlx5_dev_filter_ctrl, 9700887aa7fSNélio Laranjeiro .rx_descriptor_status = mlx5_rx_descriptor_status, 9710887aa7fSNélio Laranjeiro .tx_descriptor_status = mlx5_tx_descriptor_status, 9720887aa7fSNélio Laranjeiro .rx_queue_intr_enable = mlx5_rx_intr_enable, 9730887aa7fSNélio Laranjeiro .rx_queue_intr_disable = mlx5_rx_intr_disable, 974d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 9750887aa7fSNélio Laranjeiro }; 9760887aa7fSNélio Laranjeiro 977e72dd09bSNélio Laranjeiro /** 978e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 979e72dd09bSNélio Laranjeiro * 980e72dd09bSNélio Laranjeiro * @param[in] key 981e72dd09bSNélio Laranjeiro * Key argument to verify. 982e72dd09bSNélio Laranjeiro * @param[in] val 983e72dd09bSNélio Laranjeiro * Value associated with key. 984e72dd09bSNélio Laranjeiro * @param opaque 985e72dd09bSNélio Laranjeiro * User data. 986e72dd09bSNélio Laranjeiro * 987e72dd09bSNélio Laranjeiro * @return 988a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 989e72dd09bSNélio Laranjeiro */ 990e72dd09bSNélio Laranjeiro static int 991e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 992e72dd09bSNélio Laranjeiro { 9937fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 99499c12dccSNélio Laranjeiro unsigned long tmp; 995e72dd09bSNélio Laranjeiro 9966de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 9976de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 9986de569f5SAdrien Mazarguil return 0; 99999c12dccSNélio Laranjeiro errno = 0; 100099c12dccSNélio Laranjeiro tmp = strtoul(val, NULL, 0); 100199c12dccSNélio Laranjeiro if (errno) { 1002a6d83b6aSNélio Laranjeiro rte_errno = errno; 1003a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1004a6d83b6aSNélio Laranjeiro return -rte_errno; 100599c12dccSNélio Laranjeiro } 100699c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 10077fe24446SShahaf Shuler config->cqe_comp = !!tmp; 1008bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 1009bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 101078c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 101178c7a16dSYongseok Koh config->hw_padding = !!tmp; 10127d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 10137d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 10147d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 10157d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 10167d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 10177d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 10187d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 10197d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 10202a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1021505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1022505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1023505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1024505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1025505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1026505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1027505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1028505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1029505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 10302a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 10317fe24446SShahaf Shuler config->txqs_inline = tmp; 103209d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1033a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1034230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1035f9de8718SShahaf Shuler config->mps = !!tmp; 10366ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1037a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 10386ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1039505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1040505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1041505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 10425644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1043a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 10445644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 10457fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 104678a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 104778a54648SXueming Li config->l3_vxlan_en = !!tmp; 1048db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1049db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1050e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1051e2b4925eSOri Kam config->dv_esw_en = !!tmp; 105251e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 105351e72d38SOri Kam config->dv_flow_en = !!tmp; 1054dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1055dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 1056066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1057066cfecdSMatan Azrad config->max_dump_files_num = tmp; 105821bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 105921bb6c7eSDekel Peled config->lro.timeout = tmp; 106099c12dccSNélio Laranjeiro } else { 1061a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 1062a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1063a6d83b6aSNélio Laranjeiro return -rte_errno; 1064e72dd09bSNélio Laranjeiro } 106599c12dccSNélio Laranjeiro return 0; 106699c12dccSNélio Laranjeiro } 1067e72dd09bSNélio Laranjeiro 1068e72dd09bSNélio Laranjeiro /** 1069e72dd09bSNélio Laranjeiro * Parse device parameters. 1070e72dd09bSNélio Laranjeiro * 10717fe24446SShahaf Shuler * @param config 10727fe24446SShahaf Shuler * Pointer to device configuration structure. 1073e72dd09bSNélio Laranjeiro * @param devargs 1074e72dd09bSNélio Laranjeiro * Device arguments structure. 1075e72dd09bSNélio Laranjeiro * 1076e72dd09bSNélio Laranjeiro * @return 1077a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1078e72dd09bSNélio Laranjeiro */ 1079e72dd09bSNélio Laranjeiro static int 10807fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1081e72dd09bSNélio Laranjeiro { 1082e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 108399c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 1084bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 108578c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 10867d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 10877d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 10887d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 10897d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 10902a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 1091505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MIN, 1092505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MAX, 1093505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MPW, 10942a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 109509d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 1096230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 10976ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 10986ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 10995644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 11005644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 110178a54648SXueming Li MLX5_L3_VXLAN_EN, 1102db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 1103e2b4925eSOri Kam MLX5_DV_ESW_EN, 110451e72d38SOri Kam MLX5_DV_FLOW_EN, 1105dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 11066de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 1107066cfecdSMatan Azrad MLX5_MAX_DUMP_FILES_NUM, 110821bb6c7eSDekel Peled MLX5_LRO_TIMEOUT_USEC, 1109e72dd09bSNélio Laranjeiro NULL, 1110e72dd09bSNélio Laranjeiro }; 1111e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 1112e72dd09bSNélio Laranjeiro int ret = 0; 1113e72dd09bSNélio Laranjeiro int i; 1114e72dd09bSNélio Laranjeiro 1115e72dd09bSNélio Laranjeiro if (devargs == NULL) 1116e72dd09bSNélio Laranjeiro return 0; 1117e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 1118e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 111915b0ea00SMatan Azrad if (kvlist == NULL) { 112015b0ea00SMatan Azrad rte_errno = EINVAL; 112115b0ea00SMatan Azrad return -rte_errno; 112215b0ea00SMatan Azrad } 1123e72dd09bSNélio Laranjeiro /* Process parameters. */ 1124e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 1125e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 1126e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 11277fe24446SShahaf Shuler mlx5_args_check, config); 1128a6d83b6aSNélio Laranjeiro if (ret) { 1129a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1130a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 1131a6d83b6aSNélio Laranjeiro return -rte_errno; 1132e72dd09bSNélio Laranjeiro } 1133e72dd09bSNélio Laranjeiro } 1134a67323e4SShahaf Shuler } 1135e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 1136e72dd09bSNélio Laranjeiro return 0; 1137e72dd09bSNélio Laranjeiro } 1138e72dd09bSNélio Laranjeiro 1139fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver; 1140771fa900SAdrien Mazarguil 11417be600c8SYongseok Koh /** 11427be600c8SYongseok Koh * PMD global initialization. 11437be600c8SYongseok Koh * 11447be600c8SYongseok Koh * Independent from individual device, this function initializes global 11457be600c8SYongseok Koh * per-PMD data structures distinguishing primary and secondary processes. 11467be600c8SYongseok Koh * Hence, each initialization is called once per a process. 11477be600c8SYongseok Koh * 11487be600c8SYongseok Koh * @return 11497be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 11507be600c8SYongseok Koh */ 11517be600c8SYongseok Koh static int 11527be600c8SYongseok Koh mlx5_init_once(void) 11537be600c8SYongseok Koh { 11547be600c8SYongseok Koh struct mlx5_shared_data *sd; 11557be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 1156edf73dd3SAnatoly Burakov int ret = 0; 11577be600c8SYongseok Koh 11587be600c8SYongseok Koh if (mlx5_init_shared_data()) 11597be600c8SYongseok Koh return -rte_errno; 11607be600c8SYongseok Koh sd = mlx5_shared_data; 11617be600c8SYongseok Koh assert(sd); 11627be600c8SYongseok Koh rte_spinlock_lock(&sd->lock); 11637be600c8SYongseok Koh switch (rte_eal_process_type()) { 11647be600c8SYongseok Koh case RTE_PROC_PRIMARY: 11657be600c8SYongseok Koh if (sd->init_done) 11667be600c8SYongseok Koh break; 11677be600c8SYongseok Koh LIST_INIT(&sd->mem_event_cb_list); 11687be600c8SYongseok Koh rte_rwlock_init(&sd->mem_event_rwlock); 11697be600c8SYongseok Koh rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 11707be600c8SYongseok Koh mlx5_mr_mem_event_cb, NULL); 1171edf73dd3SAnatoly Burakov ret = mlx5_mp_init_primary(); 1172edf73dd3SAnatoly Burakov if (ret) 1173edf73dd3SAnatoly Burakov goto out; 11747be600c8SYongseok Koh sd->init_done = true; 11757be600c8SYongseok Koh break; 11767be600c8SYongseok Koh case RTE_PROC_SECONDARY: 11777be600c8SYongseok Koh if (ld->init_done) 11787be600c8SYongseok Koh break; 1179edf73dd3SAnatoly Burakov ret = mlx5_mp_init_secondary(); 1180edf73dd3SAnatoly Burakov if (ret) 1181edf73dd3SAnatoly Burakov goto out; 11827be600c8SYongseok Koh ++sd->secondary_cnt; 11837be600c8SYongseok Koh ld->init_done = true; 11847be600c8SYongseok Koh break; 11857be600c8SYongseok Koh default: 11867be600c8SYongseok Koh break; 11877be600c8SYongseok Koh } 1188edf73dd3SAnatoly Burakov out: 11897be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 1190edf73dd3SAnatoly Burakov return ret; 11917be600c8SYongseok Koh } 11927be600c8SYongseok Koh 11937be600c8SYongseok Koh /** 119438b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 119538b4b397SViacheslav Ovsiienko * while sending packets. 119638b4b397SViacheslav Ovsiienko * 119738b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 119838b4b397SViacheslav Ovsiienko * key is specified in devargs 119938b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 120038b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 120138b4b397SViacheslav Ovsiienko * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX 120238b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 120338b4b397SViacheslav Ovsiienko * 120438b4b397SViacheslav Ovsiienko * @param spawn 120538b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 120638b4b397SViacheslav Ovsiienko * @param config 120738b4b397SViacheslav Ovsiienko * Device configuration parameters. 120838b4b397SViacheslav Ovsiienko */ 120938b4b397SViacheslav Ovsiienko static void 121038b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 121138b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 121238b4b397SViacheslav Ovsiienko { 121338b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 121438b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 121538b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 121638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 121738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 121838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 121938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 122038b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 122138b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 122238b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 122338b4b397SViacheslav Ovsiienko "txq_inline_mix aligned to minimal" 122438b4b397SViacheslav Ovsiienko " ConnectX-4 required value %d", 122538b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 122638b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 122738b4b397SViacheslav Ovsiienko } 122838b4b397SViacheslav Ovsiienko break; 122938b4b397SViacheslav Ovsiienko } 123038b4b397SViacheslav Ovsiienko goto exit; 123138b4b397SViacheslav Ovsiienko } 123238b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 123338b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 123438b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 123538b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 123638b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 123738b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 123838b4b397SViacheslav Ovsiienko goto exit; 123938b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 124038b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 124138b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 124238b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 124338b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 124438b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 124538b4b397SViacheslav Ovsiienko goto exit; 124638b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 124738b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 124838b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 124938b4b397SViacheslav Ovsiienko break; 125038b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 125138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 125238b4b397SViacheslav Ovsiienko config->txq_inline_min = 125338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 125438b4b397SViacheslav Ovsiienko goto exit; 125538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 125638b4b397SViacheslav Ovsiienko config->txq_inline_min = 125738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 125838b4b397SViacheslav Ovsiienko goto exit; 125938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 126038b4b397SViacheslav Ovsiienko config->txq_inline_min = 126138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 126238b4b397SViacheslav Ovsiienko goto exit; 126338b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 126438b4b397SViacheslav Ovsiienko config->txq_inline_min = 126538b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 126638b4b397SViacheslav Ovsiienko goto exit; 126738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 126838b4b397SViacheslav Ovsiienko config->txq_inline_min = 126938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 127038b4b397SViacheslav Ovsiienko goto exit; 127138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 127238b4b397SViacheslav Ovsiienko config->txq_inline_min = 127338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 127438b4b397SViacheslav Ovsiienko goto exit; 127538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 127638b4b397SViacheslav Ovsiienko config->txq_inline_min = 127738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 127838b4b397SViacheslav Ovsiienko goto exit; 127938b4b397SViacheslav Ovsiienko } 128038b4b397SViacheslav Ovsiienko } 128138b4b397SViacheslav Ovsiienko } 128238b4b397SViacheslav Ovsiienko /* 128338b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 128438b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 128538b4b397SViacheslav Ovsiienko * to determine old NICs. 128638b4b397SViacheslav Ovsiienko */ 128738b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 128838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 128938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 129038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 129138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 129238b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 129338b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 129438b4b397SViacheslav Ovsiienko break; 129538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 129638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 129738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 129838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 129938b4b397SViacheslav Ovsiienko /* 130038b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 130138b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 130238b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 130338b4b397SViacheslav Ovsiienko */ 130438b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 130538b4b397SViacheslav Ovsiienko break; 130638b4b397SViacheslav Ovsiienko default: 130738b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 130838b4b397SViacheslav Ovsiienko break; 130938b4b397SViacheslav Ovsiienko } 131038b4b397SViacheslav Ovsiienko exit: 131138b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 131238b4b397SViacheslav Ovsiienko } 131338b4b397SViacheslav Ovsiienko 131438b4b397SViacheslav Ovsiienko /** 1315*21cae858SDekel Peled * Allocate page of door-bells and register it using DevX API. 1316*21cae858SDekel Peled * 1317*21cae858SDekel Peled * @param [in] dev 1318*21cae858SDekel Peled * Pointer to Ethernet device. 1319*21cae858SDekel Peled * 1320*21cae858SDekel Peled * @return 1321*21cae858SDekel Peled * Pointer to new page on success, NULL otherwise. 1322*21cae858SDekel Peled */ 1323*21cae858SDekel Peled static struct mlx5_devx_dbr_page * 1324*21cae858SDekel Peled mlx5_alloc_dbr_page(struct rte_eth_dev *dev) 1325*21cae858SDekel Peled { 1326*21cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 1327*21cae858SDekel Peled struct mlx5_devx_dbr_page *page; 1328*21cae858SDekel Peled 1329*21cae858SDekel Peled /* Allocate space for door-bell page and management data. */ 1330*21cae858SDekel Peled page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page), 1331*21cae858SDekel Peled RTE_CACHE_LINE_SIZE, dev->device->numa_node); 1332*21cae858SDekel Peled if (!page) { 1333*21cae858SDekel Peled DRV_LOG(ERR, "port %u cannot allocate dbr page", 1334*21cae858SDekel Peled dev->data->port_id); 1335*21cae858SDekel Peled return NULL; 1336*21cae858SDekel Peled } 1337*21cae858SDekel Peled /* Register allocated memory. */ 1338*21cae858SDekel Peled page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs, 1339*21cae858SDekel Peled MLX5_DBR_PAGE_SIZE, 0); 1340*21cae858SDekel Peled if (!page->umem) { 1341*21cae858SDekel Peled DRV_LOG(ERR, "port %u cannot umem reg dbr page", 1342*21cae858SDekel Peled dev->data->port_id); 1343*21cae858SDekel Peled rte_free(page); 1344*21cae858SDekel Peled return NULL; 1345*21cae858SDekel Peled } 1346*21cae858SDekel Peled return page; 1347*21cae858SDekel Peled } 1348*21cae858SDekel Peled 1349*21cae858SDekel Peled /** 1350*21cae858SDekel Peled * Find the next available door-bell, allocate new page if needed. 1351*21cae858SDekel Peled * 1352*21cae858SDekel Peled * @param [in] dev 1353*21cae858SDekel Peled * Pointer to Ethernet device. 1354*21cae858SDekel Peled * @param [out] dbr_page 1355*21cae858SDekel Peled * Door-bell page containing the page data. 1356*21cae858SDekel Peled * 1357*21cae858SDekel Peled * @return 1358*21cae858SDekel Peled * Door-bell address offset on success, a negative error value otherwise. 1359*21cae858SDekel Peled */ 1360*21cae858SDekel Peled int64_t 1361*21cae858SDekel Peled mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page) 1362*21cae858SDekel Peled { 1363*21cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 1364*21cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 1365*21cae858SDekel Peled uint32_t i, j; 1366*21cae858SDekel Peled 1367*21cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 1368*21cae858SDekel Peled if (page->dbr_count < MLX5_DBR_PER_PAGE) 1369*21cae858SDekel Peled break; 1370*21cae858SDekel Peled if (!page) { /* No page with free door-bell exists. */ 1371*21cae858SDekel Peled page = mlx5_alloc_dbr_page(dev); 1372*21cae858SDekel Peled if (!page) /* Failed to allocate new page. */ 1373*21cae858SDekel Peled return (-1); 1374*21cae858SDekel Peled LIST_INSERT_HEAD(&priv->dbrpgs, page, next); 1375*21cae858SDekel Peled } 1376*21cae858SDekel Peled /* Loop to find bitmap part with clear bit. */ 1377*21cae858SDekel Peled for (i = 0; 1378*21cae858SDekel Peled i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX; 1379*21cae858SDekel Peled i++) 1380*21cae858SDekel Peled ; /* Empty. */ 1381*21cae858SDekel Peled /* Find the first clear bit. */ 1382*21cae858SDekel Peled j = rte_bsf64(~page->dbr_bitmap[i]); 1383*21cae858SDekel Peled assert(i < (MLX5_DBR_PER_PAGE / 64)); 1384*21cae858SDekel Peled page->dbr_bitmap[i] |= (1 << j); 1385*21cae858SDekel Peled page->dbr_count++; 1386*21cae858SDekel Peled *dbr_page = page; 1387*21cae858SDekel Peled return (((i * 64) + j) * sizeof(uint64_t)); 1388*21cae858SDekel Peled } 1389*21cae858SDekel Peled 1390*21cae858SDekel Peled /** 1391*21cae858SDekel Peled * Release a door-bell record. 1392*21cae858SDekel Peled * 1393*21cae858SDekel Peled * @param [in] dev 1394*21cae858SDekel Peled * Pointer to Ethernet device. 1395*21cae858SDekel Peled * @param [in] umem_id 1396*21cae858SDekel Peled * UMEM ID of page containing the door-bell record to release. 1397*21cae858SDekel Peled * @param [in] offset 1398*21cae858SDekel Peled * Offset of door-bell record in page. 1399*21cae858SDekel Peled * 1400*21cae858SDekel Peled * @return 1401*21cae858SDekel Peled * 0 on success, a negative error value otherwise. 1402*21cae858SDekel Peled */ 1403*21cae858SDekel Peled int32_t 1404*21cae858SDekel Peled mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset) 1405*21cae858SDekel Peled { 1406*21cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 1407*21cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 1408*21cae858SDekel Peled int ret = 0; 1409*21cae858SDekel Peled 1410*21cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 1411*21cae858SDekel Peled /* Find the page this address belongs to. */ 1412*21cae858SDekel Peled if (page->umem->umem_id == umem_id) 1413*21cae858SDekel Peled break; 1414*21cae858SDekel Peled if (!page) 1415*21cae858SDekel Peled return -EINVAL; 1416*21cae858SDekel Peled page->dbr_count--; 1417*21cae858SDekel Peled if (!page->dbr_count) { 1418*21cae858SDekel Peled /* Page not used, free it and remove from list. */ 1419*21cae858SDekel Peled LIST_REMOVE(page, next); 1420*21cae858SDekel Peled if (page->umem) 1421*21cae858SDekel Peled ret = -mlx5_glue->devx_umem_dereg(page->umem); 1422*21cae858SDekel Peled rte_free(page); 1423*21cae858SDekel Peled } else { 1424*21cae858SDekel Peled /* Mark in bitmap that this door-bell is not in use. */ 1425*21cae858SDekel Peled int i = offset / 64; 1426*21cae858SDekel Peled int j = offset % 64; 1427*21cae858SDekel Peled 1428*21cae858SDekel Peled page->dbr_bitmap[i] &= ~(1 << j); 1429*21cae858SDekel Peled } 1430*21cae858SDekel Peled return ret; 1431*21cae858SDekel Peled } 1432*21cae858SDekel Peled 1433*21cae858SDekel Peled /** 1434f38c5457SAdrien Mazarguil * Spawn an Ethernet device from Verbs information. 1435771fa900SAdrien Mazarguil * 1436f38c5457SAdrien Mazarguil * @param dpdk_dev 1437f38c5457SAdrien Mazarguil * Backing DPDK device. 1438ad74bc61SViacheslav Ovsiienko * @param spawn 1439ad74bc61SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 1440f87bfa8eSYongseok Koh * @param config 1441f87bfa8eSYongseok Koh * Device configuration parameters. 1442771fa900SAdrien Mazarguil * 1443771fa900SAdrien Mazarguil * @return 1444f38c5457SAdrien Mazarguil * A valid Ethernet device object on success, NULL otherwise and rte_errno 1445206254b7SOphir Munk * is set. The following errors are defined: 14466de569f5SAdrien Mazarguil * 14476de569f5SAdrien Mazarguil * EBUSY: device is not supposed to be spawned. 1448206254b7SOphir Munk * EEXIST: device is already spawned 1449771fa900SAdrien Mazarguil */ 1450f38c5457SAdrien Mazarguil static struct rte_eth_dev * 1451f38c5457SAdrien Mazarguil mlx5_dev_spawn(struct rte_device *dpdk_dev, 1452ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data *spawn, 1453ad74bc61SViacheslav Ovsiienko struct mlx5_dev_config config) 1454771fa900SAdrien Mazarguil { 1455ad74bc61SViacheslav Ovsiienko const struct mlx5_switch_info *switch_info = &spawn->info; 145617e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = NULL; 145768128934SAdrien Mazarguil struct ibv_port_attr port_attr; 14586057a10bSAdrien Mazarguil struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 14599083982cSAdrien Mazarguil struct rte_eth_dev *eth_dev = NULL; 1460dbeba4cfSThomas Monjalon struct mlx5_priv *priv = NULL; 1461771fa900SAdrien Mazarguil int err = 0; 146278c7a16dSYongseok Koh unsigned int hw_padding = 0; 1463e192ef80SYaacov Hazan unsigned int mps; 1464523f5a74SYongseok Koh unsigned int cqe_comp; 1465bc91e8dbSYongseok Koh unsigned int cqe_pad = 0; 1466772d3435SXueming Li unsigned int tunnel_en = 0; 14671f106da2SMatan Azrad unsigned int mpls_en = 0; 14685f8ba81cSXueming Li unsigned int swp = 0; 14697d6bf6b8SYongseok Koh unsigned int mprq = 0; 14707d6bf6b8SYongseok Koh unsigned int mprq_min_stride_size_n = 0; 14717d6bf6b8SYongseok Koh unsigned int mprq_max_stride_size_n = 0; 14727d6bf6b8SYongseok Koh unsigned int mprq_min_stride_num_n = 0; 14737d6bf6b8SYongseok Koh unsigned int mprq_max_stride_num_n = 0; 14746d13ea8eSOlivier Matz struct rte_ether_addr mac; 147568128934SAdrien Mazarguil char name[RTE_ETH_NAME_MAX_LEN]; 14762b730263SAdrien Mazarguil int own_domain_id = 0; 1477206254b7SOphir Munk uint16_t port_id; 14782b730263SAdrien Mazarguil unsigned int i; 1479771fa900SAdrien Mazarguil 14806de569f5SAdrien Mazarguil /* Determine if this port representor is supposed to be spawned. */ 14816de569f5SAdrien Mazarguil if (switch_info->representor && dpdk_dev->devargs) { 14826de569f5SAdrien Mazarguil struct rte_eth_devargs eth_da; 14836de569f5SAdrien Mazarguil 14846de569f5SAdrien Mazarguil err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); 14856de569f5SAdrien Mazarguil if (err) { 14866de569f5SAdrien Mazarguil rte_errno = -err; 14876de569f5SAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 14886de569f5SAdrien Mazarguil strerror(rte_errno)); 14896de569f5SAdrien Mazarguil return NULL; 14906de569f5SAdrien Mazarguil } 14916de569f5SAdrien Mazarguil for (i = 0; i < eth_da.nb_representor_ports; ++i) 14926de569f5SAdrien Mazarguil if (eth_da.representor_ports[i] == 14936de569f5SAdrien Mazarguil (uint16_t)switch_info->port_name) 14946de569f5SAdrien Mazarguil break; 14956de569f5SAdrien Mazarguil if (i == eth_da.nb_representor_ports) { 14966de569f5SAdrien Mazarguil rte_errno = EBUSY; 14976de569f5SAdrien Mazarguil return NULL; 14986de569f5SAdrien Mazarguil } 14996de569f5SAdrien Mazarguil } 1500206254b7SOphir Munk /* Build device name. */ 1501206254b7SOphir Munk if (!switch_info->representor) 150209c9c4d2SThomas Monjalon strlcpy(name, dpdk_dev->name, sizeof(name)); 1503206254b7SOphir Munk else 1504206254b7SOphir Munk snprintf(name, sizeof(name), "%s_representor_%u", 1505206254b7SOphir Munk dpdk_dev->name, switch_info->port_name); 1506206254b7SOphir Munk /* check if the device is already spawned */ 1507206254b7SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 1508206254b7SOphir Munk rte_errno = EEXIST; 1509206254b7SOphir Munk return NULL; 1510206254b7SOphir Munk } 151117e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 151217e19bc4SViacheslav Ovsiienko if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 151317e19bc4SViacheslav Ovsiienko eth_dev = rte_eth_dev_attach_secondary(name); 151417e19bc4SViacheslav Ovsiienko if (eth_dev == NULL) { 151517e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "can not attach rte ethdev"); 151617e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 1517f38c5457SAdrien Mazarguil return NULL; 1518771fa900SAdrien Mazarguil } 151917e19bc4SViacheslav Ovsiienko eth_dev->device = dpdk_dev; 152017e19bc4SViacheslav Ovsiienko eth_dev->dev_ops = &mlx5_dev_sec_ops; 1521120dc4a7SYongseok Koh err = mlx5_proc_priv_init(eth_dev); 1522120dc4a7SYongseok Koh if (err) 1523120dc4a7SYongseok Koh return NULL; 152417e19bc4SViacheslav Ovsiienko /* Receive command fd from primary process */ 15259a8ab29bSYongseok Koh err = mlx5_mp_req_verbs_cmd_fd(eth_dev); 152617e19bc4SViacheslav Ovsiienko if (err < 0) 152717e19bc4SViacheslav Ovsiienko return NULL; 152817e19bc4SViacheslav Ovsiienko /* Remap UAR for Tx queues. */ 1529120dc4a7SYongseok Koh err = mlx5_tx_uar_init_secondary(eth_dev, err); 153017e19bc4SViacheslav Ovsiienko if (err) 153117e19bc4SViacheslav Ovsiienko return NULL; 153217e19bc4SViacheslav Ovsiienko /* 153317e19bc4SViacheslav Ovsiienko * Ethdev pointer is still required as input since 153417e19bc4SViacheslav Ovsiienko * the primary device is not accessible from the 153517e19bc4SViacheslav Ovsiienko * secondary process. 153617e19bc4SViacheslav Ovsiienko */ 153717e19bc4SViacheslav Ovsiienko eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 153817e19bc4SViacheslav Ovsiienko eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 153917e19bc4SViacheslav Ovsiienko return eth_dev; 1540f5bf91deSMoti Haimovsky } 154117e19bc4SViacheslav Ovsiienko sh = mlx5_alloc_shared_ibctx(spawn); 154217e19bc4SViacheslav Ovsiienko if (!sh) 154317e19bc4SViacheslav Ovsiienko return NULL; 154417e19bc4SViacheslav Ovsiienko config.devx = sh->devx; 15453075bd23SDekel Peled #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR 15463075bd23SDekel Peled config.dest_tir = 1; 15473075bd23SDekel Peled #endif 15485f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 15496057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 15505f8ba81cSXueming Li #endif 155143e9d979SShachar Beiser /* 155243e9d979SShachar Beiser * Multi-packet send is supported by ConnectX-4 Lx PF as well 155343e9d979SShachar Beiser * as all ConnectX-5 devices. 155443e9d979SShachar Beiser */ 1555038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 15566057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 1557038e7251SShahaf Shuler #endif 15587d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 15596057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 15607d6bf6b8SYongseok Koh #endif 156117e19bc4SViacheslav Ovsiienko mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 15626057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 15636057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 1564a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "enhanced MPW is supported"); 156543e9d979SShachar Beiser mps = MLX5_MPW_ENHANCED; 156643e9d979SShachar Beiser } else { 1567a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW is supported"); 1568e589960cSYongseok Koh mps = MLX5_MPW; 1569e589960cSYongseok Koh } 1570e589960cSYongseok Koh } else { 1571a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW isn't supported"); 157243e9d979SShachar Beiser mps = MLX5_MPW_DISABLED; 157343e9d979SShachar Beiser } 15745f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 15756057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 15766057a10bSAdrien Mazarguil swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 15775f8ba81cSXueming Li DRV_LOG(DEBUG, "SWP support: %u", swp); 15785f8ba81cSXueming Li #endif 157968128934SAdrien Mazarguil config.swp = !!swp; 15807d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 15816057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 15827d6bf6b8SYongseok Koh struct mlx5dv_striding_rq_caps mprq_caps = 15836057a10bSAdrien Mazarguil dv_attr.striding_rq_caps; 15847d6bf6b8SYongseok Koh 15857d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 15867d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes); 15877d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 15887d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes); 15897d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 15907d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides); 15917d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 15927d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides); 15937d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tsupported_qpts: %d", 15947d6bf6b8SYongseok Koh mprq_caps.supported_qpts); 15957d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 15967d6bf6b8SYongseok Koh mprq = 1; 15977d6bf6b8SYongseok Koh mprq_min_stride_size_n = 15987d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes; 15997d6bf6b8SYongseok Koh mprq_max_stride_size_n = 16007d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes; 16017d6bf6b8SYongseok Koh mprq_min_stride_num_n = 16027d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides; 16037d6bf6b8SYongseok Koh mprq_max_stride_num_n = 16047d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides; 160568128934SAdrien Mazarguil config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 160668128934SAdrien Mazarguil mprq_min_stride_num_n); 16077d6bf6b8SYongseok Koh } 16087d6bf6b8SYongseok Koh #endif 1609523f5a74SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128 && 16106057a10bSAdrien Mazarguil !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) 1611523f5a74SYongseok Koh cqe_comp = 0; 1612523f5a74SYongseok Koh else 1613523f5a74SYongseok Koh cqe_comp = 1; 161468128934SAdrien Mazarguil config.cqe_comp = cqe_comp; 1615bc91e8dbSYongseok Koh #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 1616bc91e8dbSYongseok Koh /* Whether device supports 128B Rx CQE padding. */ 1617bc91e8dbSYongseok Koh cqe_pad = RTE_CACHE_LINE_SIZE == 128 && 1618bc91e8dbSYongseok Koh (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); 1619bc91e8dbSYongseok Koh #endif 1620038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 16216057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 16226057a10bSAdrien Mazarguil tunnel_en = ((dv_attr.tunnel_offloads_caps & 1623038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 16246057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 1625038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); 1626038e7251SShahaf Shuler } 1627a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 1628a170a30dSNélio Laranjeiro tunnel_en ? "" : "not "); 1629038e7251SShahaf Shuler #else 1630a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 1631a170a30dSNélio Laranjeiro "tunnel offloading disabled due to old OFED/rdma-core version"); 1632038e7251SShahaf Shuler #endif 163368128934SAdrien Mazarguil config.tunnel_en = tunnel_en; 16341f106da2SMatan Azrad #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 16356057a10bSAdrien Mazarguil mpls_en = ((dv_attr.tunnel_offloads_caps & 16361f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 16376057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 16381f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 16391f106da2SMatan Azrad DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 16401f106da2SMatan Azrad mpls_en ? "" : "not "); 16411f106da2SMatan Azrad #else 16421f106da2SMatan Azrad DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 16431f106da2SMatan Azrad " old OFED/rdma-core version or firmware configuration"); 16441f106da2SMatan Azrad #endif 164568128934SAdrien Mazarguil config.mpls_en = mpls_en; 1646771fa900SAdrien Mazarguil /* Check port status. */ 164717e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr); 1648771fa900SAdrien Mazarguil if (err) { 1649a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port query failed: %s", strerror(err)); 16509083982cSAdrien Mazarguil goto error; 1651771fa900SAdrien Mazarguil } 16521371f4dfSOr Ami if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 16539083982cSAdrien Mazarguil DRV_LOG(ERR, "port is not configured in Ethernet mode"); 1654e1c3e305SMatan Azrad err = EINVAL; 16559083982cSAdrien Mazarguil goto error; 16561371f4dfSOr Ami } 1657771fa900SAdrien Mazarguil if (port_attr.state != IBV_PORT_ACTIVE) 16589083982cSAdrien Mazarguil DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 1659a170a30dSNélio Laranjeiro mlx5_glue->port_state_str(port_attr.state), 1660771fa900SAdrien Mazarguil port_attr.state); 166117e19bc4SViacheslav Ovsiienko /* Allocate private eth device data. */ 1662771fa900SAdrien Mazarguil priv = rte_zmalloc("ethdev private structure", 1663771fa900SAdrien Mazarguil sizeof(*priv), 1664771fa900SAdrien Mazarguil RTE_CACHE_LINE_SIZE); 1665771fa900SAdrien Mazarguil if (priv == NULL) { 1666a170a30dSNélio Laranjeiro DRV_LOG(ERR, "priv allocation failure"); 1667771fa900SAdrien Mazarguil err = ENOMEM; 16689083982cSAdrien Mazarguil goto error; 1669771fa900SAdrien Mazarguil } 167017e19bc4SViacheslav Ovsiienko priv->sh = sh; 167117e19bc4SViacheslav Ovsiienko priv->ibv_port = spawn->ibv_port; 167235b2d13fSOlivier Matz priv->mtu = RTE_ETHER_MTU; 16736bf10ab6SMoti Haimovsky #ifndef RTE_ARCH_64 16746bf10ab6SMoti Haimovsky /* Initialize UAR access locks for 32bit implementations. */ 16756bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock_cq); 16766bf10ab6SMoti Haimovsky for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 16776bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock[i]); 16786bf10ab6SMoti Haimovsky #endif 167926c08b97SAdrien Mazarguil /* Some internal functions rely on Netlink sockets, open them now. */ 16805366074bSNelio Laranjeiro priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 16815366074bSNelio Laranjeiro priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 168226c08b97SAdrien Mazarguil priv->nl_sn = 0; 16832b730263SAdrien Mazarguil priv->representor = !!switch_info->representor; 1684299d7dc2SViacheslav Ovsiienko priv->master = !!switch_info->master; 16852b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 1686299d7dc2SViacheslav Ovsiienko /* 1687299d7dc2SViacheslav Ovsiienko * Currently we support single E-Switch per PF configurations 1688299d7dc2SViacheslav Ovsiienko * only and vport_id field contains the vport index for 1689299d7dc2SViacheslav Ovsiienko * associated VF, which is deduced from representor port name. 1690ae4eb7dcSViacheslav Ovsiienko * For example, let's have the IB device port 10, it has 1691299d7dc2SViacheslav Ovsiienko * attached network device eth0, which has port name attribute 1692299d7dc2SViacheslav Ovsiienko * pf0vf2, we can deduce the VF number as 2, and set vport index 1693299d7dc2SViacheslav Ovsiienko * as 3 (2+1). This assigning schema should be changed if the 1694299d7dc2SViacheslav Ovsiienko * multiple E-Switch instances per PF configurations or/and PCI 1695299d7dc2SViacheslav Ovsiienko * subfunctions are added. 1696299d7dc2SViacheslav Ovsiienko */ 1697299d7dc2SViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 1698299d7dc2SViacheslav Ovsiienko switch_info->port_name + 1 : -1; 1699299d7dc2SViacheslav Ovsiienko /* representor_id field keeps the unmodified port/VF index. */ 1700299d7dc2SViacheslav Ovsiienko priv->representor_id = switch_info->representor ? 1701299d7dc2SViacheslav Ovsiienko switch_info->port_name : -1; 17022b730263SAdrien Mazarguil /* 17032b730263SAdrien Mazarguil * Look for sibling devices in order to reuse their switch domain 17042b730263SAdrien Mazarguil * if any, otherwise allocate one. 17052b730263SAdrien Mazarguil */ 1706d874a4eeSThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) { 1707dbeba4cfSThomas Monjalon const struct mlx5_priv *opriv = 1708d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 17092b730263SAdrien Mazarguil 17102b730263SAdrien Mazarguil if (!opriv || 17112b730263SAdrien Mazarguil opriv->domain_id == 17122b730263SAdrien Mazarguil RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 17132b730263SAdrien Mazarguil continue; 17142b730263SAdrien Mazarguil priv->domain_id = opriv->domain_id; 17152b730263SAdrien Mazarguil break; 17162b730263SAdrien Mazarguil } 17172b730263SAdrien Mazarguil if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 17182b730263SAdrien Mazarguil err = rte_eth_switch_domain_alloc(&priv->domain_id); 17192b730263SAdrien Mazarguil if (err) { 17202b730263SAdrien Mazarguil err = rte_errno; 17212b730263SAdrien Mazarguil DRV_LOG(ERR, "unable to allocate switch domain: %s", 17222b730263SAdrien Mazarguil strerror(rte_errno)); 17232b730263SAdrien Mazarguil goto error; 17242b730263SAdrien Mazarguil } 17252b730263SAdrien Mazarguil own_domain_id = 1; 17262b730263SAdrien Mazarguil } 1727f38c5457SAdrien Mazarguil err = mlx5_args(&config, dpdk_dev->devargs); 1728e72dd09bSNélio Laranjeiro if (err) { 1729012ad994SShahaf Shuler err = rte_errno; 173093068a9dSAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 173193068a9dSAdrien Mazarguil strerror(rte_errno)); 17329083982cSAdrien Mazarguil goto error; 1733e72dd09bSNélio Laranjeiro } 173417e19bc4SViacheslav Ovsiienko config.hw_csum = !!(sh->device_attr.device_cap_flags_ex & 173517e19bc4SViacheslav Ovsiienko IBV_DEVICE_RAW_IP_CSUM); 1736a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "checksum offloading is %ssupported", 17377fe24446SShahaf Shuler (config.hw_csum ? "" : "not ")); 17382dd8b721SViacheslav Ovsiienko #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 17392dd8b721SViacheslav Ovsiienko !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 17402dd8b721SViacheslav Ovsiienko DRV_LOG(DEBUG, "counters are not supported"); 17419a761de8SOri Kam #endif 174258b1312eSYongseok Koh #ifndef HAVE_IBV_FLOW_DV_SUPPORT 174358b1312eSYongseok Koh if (config.dv_flow_en) { 174458b1312eSYongseok Koh DRV_LOG(WARNING, "DV flow is not supported"); 174558b1312eSYongseok Koh config.dv_flow_en = 0; 174658b1312eSYongseok Koh } 174758b1312eSYongseok Koh #endif 17487fe24446SShahaf Shuler config.ind_table_max_size = 174917e19bc4SViacheslav Ovsiienko sh->device_attr.rss_caps.max_rwq_indirection_table_size; 175068128934SAdrien Mazarguil /* 175168128934SAdrien Mazarguil * Remove this check once DPDK supports larger/variable 175268128934SAdrien Mazarguil * indirection tables. 175368128934SAdrien Mazarguil */ 175468128934SAdrien Mazarguil if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 17557fe24446SShahaf Shuler config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; 1756a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 17577fe24446SShahaf Shuler config.ind_table_max_size); 175817e19bc4SViacheslav Ovsiienko config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 175943e9d979SShachar Beiser IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 1760a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 17617fe24446SShahaf Shuler (config.hw_vlan_strip ? "" : "not ")); 176217e19bc4SViacheslav Ovsiienko config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 1763cd230a3eSShahaf Shuler IBV_RAW_PACKET_CAP_SCATTER_FCS); 1764a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 17657fe24446SShahaf Shuler (config.hw_fcs_strip ? "" : "not ")); 17662014a7fbSYongseok Koh #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 176717e19bc4SViacheslav Ovsiienko hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 17682014a7fbSYongseok Koh #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 176917e19bc4SViacheslav Ovsiienko hw_padding = !!(sh->device_attr.device_cap_flags_ex & 17702014a7fbSYongseok Koh IBV_DEVICE_PCI_WRITE_END_PADDING); 177143e9d979SShachar Beiser #endif 177278c7a16dSYongseok Koh if (config.hw_padding && !hw_padding) { 177378c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 177478c7a16dSYongseok Koh config.hw_padding = 0; 177578c7a16dSYongseok Koh } else if (config.hw_padding) { 177678c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 177778c7a16dSYongseok Koh } 177817e19bc4SViacheslav Ovsiienko config.tso = (sh->device_attr.tso_caps.max_tso > 0 && 177917e19bc4SViacheslav Ovsiienko (sh->device_attr.tso_caps.supported_qpts & 178043e9d979SShachar Beiser (1 << IBV_QPT_RAW_PACKET))); 17817fe24446SShahaf Shuler if (config.tso) 178217e19bc4SViacheslav Ovsiienko config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso; 1783f9de8718SShahaf Shuler /* 1784f9de8718SShahaf Shuler * MPW is disabled by default, while the Enhanced MPW is enabled 1785f9de8718SShahaf Shuler * by default. 1786f9de8718SShahaf Shuler */ 1787f9de8718SShahaf Shuler if (config.mps == MLX5_ARG_UNSET) 1788f9de8718SShahaf Shuler config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 1789f9de8718SShahaf Shuler MLX5_MPW_DISABLED; 1790f9de8718SShahaf Shuler else 1791f9de8718SShahaf Shuler config.mps = config.mps ? mps : MLX5_MPW_DISABLED; 1792a170a30dSNélio Laranjeiro DRV_LOG(INFO, "%sMPS is %s", 17930f99970bSNélio Laranjeiro config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", 179468128934SAdrien Mazarguil config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 17957fe24446SShahaf Shuler if (config.cqe_comp && !cqe_comp) { 1796a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "Rx CQE compression isn't supported"); 17977fe24446SShahaf Shuler config.cqe_comp = 0; 1798523f5a74SYongseok Koh } 1799bc91e8dbSYongseok Koh if (config.cqe_pad && !cqe_pad) { 1800bc91e8dbSYongseok Koh DRV_LOG(WARNING, "Rx CQE padding isn't supported"); 1801bc91e8dbSYongseok Koh config.cqe_pad = 0; 1802bc91e8dbSYongseok Koh } else if (config.cqe_pad) { 1803bc91e8dbSYongseok Koh DRV_LOG(INFO, "Rx CQE padding is enabled"); 1804bc91e8dbSYongseok Koh } 1805175f1c21SDekel Peled if (config.devx) { 1806175f1c21SDekel Peled priv->counter_fallback = 0; 1807175f1c21SDekel Peled err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr); 1808175f1c21SDekel Peled if (err) { 1809175f1c21SDekel Peled err = -err; 1810175f1c21SDekel Peled goto error; 1811175f1c21SDekel Peled } 1812175f1c21SDekel Peled if (!config.hca_attr.flow_counters_dump) 1813175f1c21SDekel Peled priv->counter_fallback = 1; 1814175f1c21SDekel Peled #ifndef HAVE_IBV_DEVX_ASYNC 1815175f1c21SDekel Peled priv->counter_fallback = 1; 1816175f1c21SDekel Peled #endif 1817175f1c21SDekel Peled if (priv->counter_fallback) 1818175f1c21SDekel Peled DRV_LOG(INFO, "Use fall-back DV counter management\n"); 1819175f1c21SDekel Peled /* Check for LRO support. */ 1820175f1c21SDekel Peled if (config.dest_tir && mprq && config.hca_attr.lro_cap) { 1821175f1c21SDekel Peled /* TBD check tunnel lro caps. */ 1822175f1c21SDekel Peled config.lro.supported = config.hca_attr.lro_cap; 1823175f1c21SDekel Peled DRV_LOG(DEBUG, "Device supports LRO"); 1824175f1c21SDekel Peled /* 1825175f1c21SDekel Peled * If LRO timeout is not configured by application, 1826175f1c21SDekel Peled * use the minimal supported value. 1827175f1c21SDekel Peled */ 1828175f1c21SDekel Peled if (!config.lro.timeout) 1829175f1c21SDekel Peled config.lro.timeout = 1830175f1c21SDekel Peled config.hca_attr.lro_timer_supported_periods[0]; 1831175f1c21SDekel Peled DRV_LOG(DEBUG, "LRO session timeout set to %d usec", 1832175f1c21SDekel Peled config.lro.timeout); 1833175f1c21SDekel Peled config.mprq.enabled = 1; 1834175f1c21SDekel Peled DRV_LOG(DEBUG, "Enable MPRQ for LRO use"); 1835175f1c21SDekel Peled } 1836175f1c21SDekel Peled } 18375c0e2db6SYongseok Koh if (config.mprq.enabled && mprq) { 18387d6bf6b8SYongseok Koh if (config.mprq.stride_num_n > mprq_max_stride_num_n || 18397d6bf6b8SYongseok Koh config.mprq.stride_num_n < mprq_min_stride_num_n) { 18407d6bf6b8SYongseok Koh config.mprq.stride_num_n = 18417d6bf6b8SYongseok Koh RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 18427d6bf6b8SYongseok Koh mprq_min_stride_num_n); 18437d6bf6b8SYongseok Koh DRV_LOG(WARNING, 18447d6bf6b8SYongseok Koh "the number of strides" 18457d6bf6b8SYongseok Koh " for Multi-Packet RQ is out of range," 18467d6bf6b8SYongseok Koh " setting default value (%u)", 18477d6bf6b8SYongseok Koh 1 << config.mprq.stride_num_n); 18487d6bf6b8SYongseok Koh } 18497d6bf6b8SYongseok Koh config.mprq.min_stride_size_n = mprq_min_stride_size_n; 18507d6bf6b8SYongseok Koh config.mprq.max_stride_size_n = mprq_max_stride_size_n; 18515c0e2db6SYongseok Koh } else if (config.mprq.enabled && !mprq) { 18525c0e2db6SYongseok Koh DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 18535c0e2db6SYongseok Koh config.mprq.enabled = 0; 18547d6bf6b8SYongseok Koh } 1855066cfecdSMatan Azrad if (config.max_dump_files_num == 0) 1856066cfecdSMatan Azrad config.max_dump_files_num = 128; 1857af4f09f2SNélio Laranjeiro eth_dev = rte_eth_dev_allocate(name); 1858af4f09f2SNélio Laranjeiro if (eth_dev == NULL) { 1859a170a30dSNélio Laranjeiro DRV_LOG(ERR, "can not allocate rte ethdev"); 1860af4f09f2SNélio Laranjeiro err = ENOMEM; 18619083982cSAdrien Mazarguil goto error; 1862af4f09f2SNélio Laranjeiro } 186315febafdSThomas Monjalon /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ 186415febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1865a7d3c627SThomas Monjalon if (priv->representor) { 18662b730263SAdrien Mazarguil eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 1867a7d3c627SThomas Monjalon eth_dev->data->representor_id = priv->representor_id; 1868a7d3c627SThomas Monjalon } 1869fa2e14d4SViacheslav Ovsiienko /* 1870fa2e14d4SViacheslav Ovsiienko * Store associated network device interface index. This index 1871fa2e14d4SViacheslav Ovsiienko * is permanent throughout the lifetime of device. So, we may store 1872fa2e14d4SViacheslav Ovsiienko * the ifindex here and use the cached value further. 1873fa2e14d4SViacheslav Ovsiienko */ 1874fa2e14d4SViacheslav Ovsiienko assert(spawn->ifindex); 1875fa2e14d4SViacheslav Ovsiienko priv->if_index = spawn->ifindex; 1876af4f09f2SNélio Laranjeiro eth_dev->data->dev_private = priv; 1877df428ceeSYongseok Koh priv->dev_data = eth_dev->data; 1878af4f09f2SNélio Laranjeiro eth_dev->data->mac_addrs = priv->mac; 1879f38c5457SAdrien Mazarguil eth_dev->device = dpdk_dev; 1880771fa900SAdrien Mazarguil /* Configure the first MAC address by default. */ 1881af4f09f2SNélio Laranjeiro if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 1882a170a30dSNélio Laranjeiro DRV_LOG(ERR, 1883a170a30dSNélio Laranjeiro "port %u cannot get MAC address, is mlx5_en" 1884a170a30dSNélio Laranjeiro " loaded? (errno: %s)", 18858c3c2372SAdrien Mazarguil eth_dev->data->port_id, strerror(rte_errno)); 1886e1c3e305SMatan Azrad err = ENODEV; 18879083982cSAdrien Mazarguil goto error; 1888771fa900SAdrien Mazarguil } 1889a170a30dSNélio Laranjeiro DRV_LOG(INFO, 1890a170a30dSNélio Laranjeiro "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 18910f99970bSNélio Laranjeiro eth_dev->data->port_id, 1892771fa900SAdrien Mazarguil mac.addr_bytes[0], mac.addr_bytes[1], 1893771fa900SAdrien Mazarguil mac.addr_bytes[2], mac.addr_bytes[3], 1894771fa900SAdrien Mazarguil mac.addr_bytes[4], mac.addr_bytes[5]); 1895771fa900SAdrien Mazarguil #ifndef NDEBUG 1896771fa900SAdrien Mazarguil { 1897771fa900SAdrien Mazarguil char ifname[IF_NAMESIZE]; 1898771fa900SAdrien Mazarguil 1899af4f09f2SNélio Laranjeiro if (mlx5_get_ifname(eth_dev, &ifname) == 0) 1900a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 19010f99970bSNélio Laranjeiro eth_dev->data->port_id, ifname); 1902771fa900SAdrien Mazarguil else 1903a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is unknown", 19040f99970bSNélio Laranjeiro eth_dev->data->port_id); 1905771fa900SAdrien Mazarguil } 1906771fa900SAdrien Mazarguil #endif 1907771fa900SAdrien Mazarguil /* Get actual MTU if possible. */ 1908a6d83b6aSNélio Laranjeiro err = mlx5_get_mtu(eth_dev, &priv->mtu); 1909012ad994SShahaf Shuler if (err) { 1910012ad994SShahaf Shuler err = rte_errno; 19119083982cSAdrien Mazarguil goto error; 1912012ad994SShahaf Shuler } 1913a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 1914a170a30dSNélio Laranjeiro priv->mtu); 191568128934SAdrien Mazarguil /* Initialize burst functions to prevent crashes before link-up. */ 1916e313ef4cSShahaf Shuler eth_dev->rx_pkt_burst = removed_rx_burst; 1917e313ef4cSShahaf Shuler eth_dev->tx_pkt_burst = removed_tx_burst; 1918771fa900SAdrien Mazarguil eth_dev->dev_ops = &mlx5_dev_ops; 1919272733b5SNélio Laranjeiro /* Register MAC address. */ 1920272733b5SNélio Laranjeiro claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 1921f87bfa8eSYongseok Koh if (config.vf && config.vf_nl_en) 1922ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_sync(eth_dev); 1923c8ffb8a9SNélio Laranjeiro TAILQ_INIT(&priv->flows); 19241b37f5d8SNélio Laranjeiro TAILQ_INIT(&priv->ctrl_flows); 19251e3a39f7SXueming Li /* Hint libmlx5 to use PMD allocator for data plane resources */ 19261e3a39f7SXueming Li struct mlx5dv_ctx_allocators alctr = { 19271e3a39f7SXueming Li .alloc = &mlx5_alloc_verbs_buf, 19281e3a39f7SXueming Li .free = &mlx5_free_verbs_buf, 19291e3a39f7SXueming Li .data = priv, 19301e3a39f7SXueming Li }; 193117e19bc4SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 193217e19bc4SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 19331e3a39f7SXueming Li (void *)((uintptr_t)&alctr)); 1934771fa900SAdrien Mazarguil /* Bring Ethernet device up. */ 1935a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 19360f99970bSNélio Laranjeiro eth_dev->data->port_id); 19377ba5320bSNélio Laranjeiro mlx5_set_link_up(eth_dev); 1938a85a606cSShahaf Shuler /* 1939a85a606cSShahaf Shuler * Even though the interrupt handler is not installed yet, 1940ae4eb7dcSViacheslav Ovsiienko * interrupts will still trigger on the async_fd from 1941a85a606cSShahaf Shuler * Verbs context returned by ibv_open_device(). 1942a85a606cSShahaf Shuler */ 1943a85a606cSShahaf Shuler mlx5_link_update(eth_dev, 0); 1944e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 1945e2b4925eSOri Kam if (!(config.hca_attr.eswitch_manager && config.dv_flow_en && 1946e2b4925eSOri Kam (switch_info->representor || switch_info->master))) 1947e2b4925eSOri Kam config.dv_esw_en = 0; 1948e2b4925eSOri Kam #else 1949e2b4925eSOri Kam config.dv_esw_en = 0; 1950e2b4925eSOri Kam #endif 195138b4b397SViacheslav Ovsiienko /* Detect minimal data bytes to inline. */ 195238b4b397SViacheslav Ovsiienko mlx5_set_min_inline(spawn, &config); 19537fe24446SShahaf Shuler /* Store device configuration on private structure. */ 19547fe24446SShahaf Shuler priv->config = config; 1955e2b4925eSOri Kam if (config.dv_flow_en) { 1956e2b4925eSOri Kam err = mlx5_alloc_shared_dr(priv); 1957e2b4925eSOri Kam if (err) 1958e2b4925eSOri Kam goto error; 1959e2b4925eSOri Kam } 196078be8852SNelio Laranjeiro /* Supported Verbs flow priority number detection. */ 19612815702bSNelio Laranjeiro err = mlx5_flow_discover_priorities(eth_dev); 19624fb27c1dSViacheslav Ovsiienko if (err < 0) { 19634fb27c1dSViacheslav Ovsiienko err = -err; 19649083982cSAdrien Mazarguil goto error; 19654fb27c1dSViacheslav Ovsiienko } 19662815702bSNelio Laranjeiro priv->config.flow_prio = err; 1967e89c15b6SAdrien Mazarguil /* Add device to memory callback list. */ 1968e89c15b6SAdrien Mazarguil rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 1969e89c15b6SAdrien Mazarguil LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 1970ccb38153SViacheslav Ovsiienko sh, mem_event_cb); 1971e89c15b6SAdrien Mazarguil rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 1972f38c5457SAdrien Mazarguil return eth_dev; 19739083982cSAdrien Mazarguil error: 197426c08b97SAdrien Mazarguil if (priv) { 1975b2177648SViacheslav Ovsiienko if (priv->sh) 1976b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 197726c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 197826c08b97SAdrien Mazarguil close(priv->nl_socket_route); 197926c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 198026c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 19812b730263SAdrien Mazarguil if (own_domain_id) 19822b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 1983771fa900SAdrien Mazarguil rte_free(priv); 1984e16adf08SThomas Monjalon if (eth_dev != NULL) 1985e16adf08SThomas Monjalon eth_dev->data->dev_private = NULL; 198626c08b97SAdrien Mazarguil } 1987e16adf08SThomas Monjalon if (eth_dev != NULL) { 1988e16adf08SThomas Monjalon /* mac_addrs must not be freed alone because part of dev_private */ 1989e16adf08SThomas Monjalon eth_dev->data->mac_addrs = NULL; 1990690de285SRaslan Darawsheh rte_eth_dev_release_port(eth_dev); 1991e16adf08SThomas Monjalon } 199217e19bc4SViacheslav Ovsiienko if (sh) 199317e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(sh); 1994f38c5457SAdrien Mazarguil assert(err > 0); 1995a6d83b6aSNélio Laranjeiro rte_errno = err; 1996f38c5457SAdrien Mazarguil return NULL; 1997f38c5457SAdrien Mazarguil } 1998f38c5457SAdrien Mazarguil 1999116f90adSAdrien Mazarguil /** 2000116f90adSAdrien Mazarguil * Comparison callback to sort device data. 2001116f90adSAdrien Mazarguil * 2002116f90adSAdrien Mazarguil * This is meant to be used with qsort(). 2003116f90adSAdrien Mazarguil * 2004116f90adSAdrien Mazarguil * @param a[in] 2005116f90adSAdrien Mazarguil * Pointer to pointer to first data object. 2006116f90adSAdrien Mazarguil * @param b[in] 2007116f90adSAdrien Mazarguil * Pointer to pointer to second data object. 2008116f90adSAdrien Mazarguil * 2009116f90adSAdrien Mazarguil * @return 2010116f90adSAdrien Mazarguil * 0 if both objects are equal, less than 0 if the first argument is less 2011116f90adSAdrien Mazarguil * than the second, greater than 0 otherwise. 2012116f90adSAdrien Mazarguil */ 2013116f90adSAdrien Mazarguil static int 2014116f90adSAdrien Mazarguil mlx5_dev_spawn_data_cmp(const void *a, const void *b) 2015116f90adSAdrien Mazarguil { 2016116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_a = 2017116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)a)->info; 2018116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_b = 2019116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)b)->info; 2020116f90adSAdrien Mazarguil int ret; 2021116f90adSAdrien Mazarguil 2022116f90adSAdrien Mazarguil /* Master device first. */ 2023116f90adSAdrien Mazarguil ret = si_b->master - si_a->master; 2024116f90adSAdrien Mazarguil if (ret) 2025116f90adSAdrien Mazarguil return ret; 2026116f90adSAdrien Mazarguil /* Then representor devices. */ 2027116f90adSAdrien Mazarguil ret = si_b->representor - si_a->representor; 2028116f90adSAdrien Mazarguil if (ret) 2029116f90adSAdrien Mazarguil return ret; 2030116f90adSAdrien Mazarguil /* Unidentified devices come last in no specific order. */ 2031116f90adSAdrien Mazarguil if (!si_a->representor) 2032116f90adSAdrien Mazarguil return 0; 2033116f90adSAdrien Mazarguil /* Order representors by name. */ 2034116f90adSAdrien Mazarguil return si_a->port_name - si_b->port_name; 2035116f90adSAdrien Mazarguil } 2036116f90adSAdrien Mazarguil 2037f38c5457SAdrien Mazarguil /** 2038f38c5457SAdrien Mazarguil * DPDK callback to register a PCI device. 2039f38c5457SAdrien Mazarguil * 20402b730263SAdrien Mazarguil * This function spawns Ethernet devices out of a given PCI device. 2041f38c5457SAdrien Mazarguil * 2042f38c5457SAdrien Mazarguil * @param[in] pci_drv 2043f38c5457SAdrien Mazarguil * PCI driver structure (mlx5_driver). 2044f38c5457SAdrien Mazarguil * @param[in] pci_dev 2045f38c5457SAdrien Mazarguil * PCI device information. 2046f38c5457SAdrien Mazarguil * 2047f38c5457SAdrien Mazarguil * @return 2048f38c5457SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 2049f38c5457SAdrien Mazarguil */ 2050f38c5457SAdrien Mazarguil static int 2051f38c5457SAdrien Mazarguil mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2052f38c5457SAdrien Mazarguil struct rte_pci_device *pci_dev) 2053f38c5457SAdrien Mazarguil { 2054f38c5457SAdrien Mazarguil struct ibv_device **ibv_list; 2055ad74bc61SViacheslav Ovsiienko /* 2056ad74bc61SViacheslav Ovsiienko * Number of found IB Devices matching with requested PCI BDF. 2057ad74bc61SViacheslav Ovsiienko * nd != 1 means there are multiple IB devices over the same 2058ad74bc61SViacheslav Ovsiienko * PCI device and we have representors and master. 2059ad74bc61SViacheslav Ovsiienko */ 2060ad74bc61SViacheslav Ovsiienko unsigned int nd = 0; 2061ad74bc61SViacheslav Ovsiienko /* 2062ad74bc61SViacheslav Ovsiienko * Number of found IB device Ports. nd = 1 and np = 1..n means 2063ad74bc61SViacheslav Ovsiienko * we have the single multiport IB device, and there may be 2064ad74bc61SViacheslav Ovsiienko * representors attached to some of found ports. 2065ad74bc61SViacheslav Ovsiienko */ 2066ad74bc61SViacheslav Ovsiienko unsigned int np = 0; 2067ad74bc61SViacheslav Ovsiienko /* 2068ad74bc61SViacheslav Ovsiienko * Number of DPDK ethernet devices to Spawn - either over 2069ad74bc61SViacheslav Ovsiienko * multiple IB devices or multiple ports of single IB device. 2070ad74bc61SViacheslav Ovsiienko * Actually this is the number of iterations to spawn. 2071ad74bc61SViacheslav Ovsiienko */ 2072ad74bc61SViacheslav Ovsiienko unsigned int ns = 0; 2073f87bfa8eSYongseok Koh struct mlx5_dev_config dev_config; 2074f38c5457SAdrien Mazarguil int ret; 2075f38c5457SAdrien Mazarguil 20767be600c8SYongseok Koh ret = mlx5_init_once(); 20777be600c8SYongseok Koh if (ret) { 20787be600c8SYongseok Koh DRV_LOG(ERR, "unable to init PMD global data: %s", 20797be600c8SYongseok Koh strerror(rte_errno)); 20807be600c8SYongseok Koh return -rte_errno; 20817be600c8SYongseok Koh } 2082f38c5457SAdrien Mazarguil assert(pci_drv == &mlx5_driver); 2083f38c5457SAdrien Mazarguil errno = 0; 2084f38c5457SAdrien Mazarguil ibv_list = mlx5_glue->get_device_list(&ret); 2085f38c5457SAdrien Mazarguil if (!ibv_list) { 2086f38c5457SAdrien Mazarguil rte_errno = errno ? errno : ENOSYS; 2087f38c5457SAdrien Mazarguil DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 2088a6d83b6aSNélio Laranjeiro return -rte_errno; 2089a6d83b6aSNélio Laranjeiro } 2090ad74bc61SViacheslav Ovsiienko /* 2091ad74bc61SViacheslav Ovsiienko * First scan the list of all Infiniband devices to find 2092ad74bc61SViacheslav Ovsiienko * matching ones, gathering into the list. 2093ad74bc61SViacheslav Ovsiienko */ 209426c08b97SAdrien Mazarguil struct ibv_device *ibv_match[ret + 1]; 2095ad74bc61SViacheslav Ovsiienko int nl_route = -1; 2096ad74bc61SViacheslav Ovsiienko int nl_rdma = -1; 2097ad74bc61SViacheslav Ovsiienko unsigned int i; 209826c08b97SAdrien Mazarguil 2099f38c5457SAdrien Mazarguil while (ret-- > 0) { 2100f38c5457SAdrien Mazarguil struct rte_pci_addr pci_addr; 2101f38c5457SAdrien Mazarguil 2102f38c5457SAdrien Mazarguil DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 2103f38c5457SAdrien Mazarguil if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr)) 2104f38c5457SAdrien Mazarguil continue; 2105f38c5457SAdrien Mazarguil if (pci_dev->addr.domain != pci_addr.domain || 2106f38c5457SAdrien Mazarguil pci_dev->addr.bus != pci_addr.bus || 2107f38c5457SAdrien Mazarguil pci_dev->addr.devid != pci_addr.devid || 2108f38c5457SAdrien Mazarguil pci_dev->addr.function != pci_addr.function) 2109f38c5457SAdrien Mazarguil continue; 211026c08b97SAdrien Mazarguil DRV_LOG(INFO, "PCI information matches for device \"%s\"", 2111f38c5457SAdrien Mazarguil ibv_list[ret]->name); 2112ad74bc61SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 211326c08b97SAdrien Mazarguil } 2114ad74bc61SViacheslav Ovsiienko ibv_match[nd] = NULL; 2115ad74bc61SViacheslav Ovsiienko if (!nd) { 2116ae4eb7dcSViacheslav Ovsiienko /* No device matches, just complain and bail out. */ 2117ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 2118ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, 2119ad74bc61SViacheslav Ovsiienko "no Verbs device matches PCI device " PCI_PRI_FMT "," 2120ad74bc61SViacheslav Ovsiienko " are kernel drivers loaded?", 2121ad74bc61SViacheslav Ovsiienko pci_dev->addr.domain, pci_dev->addr.bus, 2122ad74bc61SViacheslav Ovsiienko pci_dev->addr.devid, pci_dev->addr.function); 2123ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2124ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2125ad74bc61SViacheslav Ovsiienko return ret; 2126ad74bc61SViacheslav Ovsiienko } 2127ad74bc61SViacheslav Ovsiienko nl_route = mlx5_nl_init(NETLINK_ROUTE); 2128ad74bc61SViacheslav Ovsiienko nl_rdma = mlx5_nl_init(NETLINK_RDMA); 2129ad74bc61SViacheslav Ovsiienko if (nd == 1) { 213026c08b97SAdrien Mazarguil /* 2131ad74bc61SViacheslav Ovsiienko * Found single matching device may have multiple ports. 2132ad74bc61SViacheslav Ovsiienko * Each port may be representor, we have to check the port 2133ad74bc61SViacheslav Ovsiienko * number and check the representors existence. 213426c08b97SAdrien Mazarguil */ 2135ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2136ad74bc61SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 2137ad74bc61SViacheslav Ovsiienko if (!np) 2138ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get IB device \"%s\"" 2139ad74bc61SViacheslav Ovsiienko " ports number", ibv_match[0]->name); 2140ad74bc61SViacheslav Ovsiienko } 2141ad74bc61SViacheslav Ovsiienko /* 2142ad74bc61SViacheslav Ovsiienko * Now we can determine the maximal 2143ad74bc61SViacheslav Ovsiienko * amount of devices to be spawned. 2144ad74bc61SViacheslav Ovsiienko */ 2145ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data list[np ? np : nd]; 2146ad74bc61SViacheslav Ovsiienko 2147ad74bc61SViacheslav Ovsiienko if (np > 1) { 2148ad74bc61SViacheslav Ovsiienko /* 2149ae4eb7dcSViacheslav Ovsiienko * Single IB device with multiple ports found, 2150ad74bc61SViacheslav Ovsiienko * it may be E-Switch master device and representors. 2151ad74bc61SViacheslav Ovsiienko * We have to perform identification trough the ports. 2152ad74bc61SViacheslav Ovsiienko */ 2153ad74bc61SViacheslav Ovsiienko assert(nl_rdma >= 0); 2154ad74bc61SViacheslav Ovsiienko assert(ns == 0); 2155ad74bc61SViacheslav Ovsiienko assert(nd == 1); 2156ad74bc61SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 2157ad74bc61SViacheslav Ovsiienko list[ns].max_port = np; 2158ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = i; 2159ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[0]; 2160ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 2161ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 2162ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 2163ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, i); 2164ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 2165ad74bc61SViacheslav Ovsiienko /* 2166ad74bc61SViacheslav Ovsiienko * No network interface index found for the 2167ad74bc61SViacheslav Ovsiienko * specified port, it means there is no 2168ad74bc61SViacheslav Ovsiienko * representor on this port. It's OK, 2169ad74bc61SViacheslav Ovsiienko * there can be disabled ports, for example 2170ad74bc61SViacheslav Ovsiienko * if sriov_numvfs < sriov_totalvfs. 2171ad74bc61SViacheslav Ovsiienko */ 217226c08b97SAdrien Mazarguil continue; 217326c08b97SAdrien Mazarguil } 2174ad74bc61SViacheslav Ovsiienko ret = -1; 217526c08b97SAdrien Mazarguil if (nl_route >= 0) 2176ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 2177ad74bc61SViacheslav Ovsiienko (nl_route, 2178ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 2179ad74bc61SViacheslav Ovsiienko &list[ns].info); 2180ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 2181ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 2182ad74bc61SViacheslav Ovsiienko /* 2183ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 2184ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 2185ad74bc61SViacheslav Ovsiienko * with sysfs. 2186ad74bc61SViacheslav Ovsiienko */ 2187ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 2188ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 2189ad74bc61SViacheslav Ovsiienko &list[ns].info); 2190ad74bc61SViacheslav Ovsiienko } 2191ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 2192ad74bc61SViacheslav Ovsiienko list[ns].info.master)) 2193ad74bc61SViacheslav Ovsiienko ns++; 2194ad74bc61SViacheslav Ovsiienko } 2195ad74bc61SViacheslav Ovsiienko if (!ns) { 219626c08b97SAdrien Mazarguil DRV_LOG(ERR, 2197ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 2198ad74bc61SViacheslav Ovsiienko " on the IB device with multiple ports"); 2199ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2200ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2201ad74bc61SViacheslav Ovsiienko goto exit; 2202ad74bc61SViacheslav Ovsiienko } 2203ad74bc61SViacheslav Ovsiienko } else { 2204ad74bc61SViacheslav Ovsiienko /* 2205ad74bc61SViacheslav Ovsiienko * The existence of several matching entries (nd > 1) means 2206ad74bc61SViacheslav Ovsiienko * port representors have been instantiated. No existing Verbs 2207ad74bc61SViacheslav Ovsiienko * call nor sysfs entries can tell them apart, this can only 2208ad74bc61SViacheslav Ovsiienko * be done through Netlink calls assuming kernel drivers are 2209ad74bc61SViacheslav Ovsiienko * recent enough to support them. 2210ad74bc61SViacheslav Ovsiienko * 2211ad74bc61SViacheslav Ovsiienko * In the event of identification failure through Netlink, 2212ad74bc61SViacheslav Ovsiienko * try again through sysfs, then: 2213ad74bc61SViacheslav Ovsiienko * 2214ad74bc61SViacheslav Ovsiienko * 1. A single IB device matches (nd == 1) with single 2215ad74bc61SViacheslav Ovsiienko * port (np=0/1) and is not a representor, assume 2216ad74bc61SViacheslav Ovsiienko * no switch support. 2217ad74bc61SViacheslav Ovsiienko * 2218ad74bc61SViacheslav Ovsiienko * 2. Otherwise no safe assumptions can be made; 2219ad74bc61SViacheslav Ovsiienko * complain louder and bail out. 2220ad74bc61SViacheslav Ovsiienko */ 2221ad74bc61SViacheslav Ovsiienko np = 1; 2222ad74bc61SViacheslav Ovsiienko for (i = 0; i != nd; ++i) { 2223ad74bc61SViacheslav Ovsiienko memset(&list[ns].info, 0, sizeof(list[ns].info)); 2224ad74bc61SViacheslav Ovsiienko list[ns].max_port = 1; 2225ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = 1; 2226ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[i]; 2227ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 2228ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 2229ad74bc61SViacheslav Ovsiienko list[ns].ifindex = 0; 2230ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2231ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 2232ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, 1); 2233ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 22349c2bbd04SViacheslav Ovsiienko char ifname[IF_NAMESIZE]; 22359c2bbd04SViacheslav Ovsiienko 2236ad74bc61SViacheslav Ovsiienko /* 22379c2bbd04SViacheslav Ovsiienko * Netlink failed, it may happen with old 22389c2bbd04SViacheslav Ovsiienko * ib_core kernel driver (before 4.16). 22399c2bbd04SViacheslav Ovsiienko * We can assume there is old driver because 22409c2bbd04SViacheslav Ovsiienko * here we are processing single ports IB 22419c2bbd04SViacheslav Ovsiienko * devices. Let's try sysfs to retrieve 22429c2bbd04SViacheslav Ovsiienko * the ifindex. The method works for 22439c2bbd04SViacheslav Ovsiienko * master device only. 22449c2bbd04SViacheslav Ovsiienko */ 22459c2bbd04SViacheslav Ovsiienko if (nd > 1) { 22469c2bbd04SViacheslav Ovsiienko /* 22479c2bbd04SViacheslav Ovsiienko * Multiple devices found, assume 22489c2bbd04SViacheslav Ovsiienko * representors, can not distinguish 22499c2bbd04SViacheslav Ovsiienko * master/representor and retrieve 22509c2bbd04SViacheslav Ovsiienko * ifindex via sysfs. 2251ad74bc61SViacheslav Ovsiienko */ 2252ad74bc61SViacheslav Ovsiienko continue; 2253ad74bc61SViacheslav Ovsiienko } 22549c2bbd04SViacheslav Ovsiienko ret = mlx5_get_master_ifname 22559c2bbd04SViacheslav Ovsiienko (ibv_match[i]->ibdev_path, &ifname); 22569c2bbd04SViacheslav Ovsiienko if (!ret) 22579c2bbd04SViacheslav Ovsiienko list[ns].ifindex = 22589c2bbd04SViacheslav Ovsiienko if_nametoindex(ifname); 22599c2bbd04SViacheslav Ovsiienko if (!list[ns].ifindex) { 22609c2bbd04SViacheslav Ovsiienko /* 22619c2bbd04SViacheslav Ovsiienko * No network interface index found 22629c2bbd04SViacheslav Ovsiienko * for the specified device, it means 22639c2bbd04SViacheslav Ovsiienko * there it is neither representor 22649c2bbd04SViacheslav Ovsiienko * nor master. 22659c2bbd04SViacheslav Ovsiienko */ 22669c2bbd04SViacheslav Ovsiienko continue; 22679c2bbd04SViacheslav Ovsiienko } 22689c2bbd04SViacheslav Ovsiienko } 2269ad74bc61SViacheslav Ovsiienko ret = -1; 2270ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2271ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 2272ad74bc61SViacheslav Ovsiienko (nl_route, 2273ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 2274ad74bc61SViacheslav Ovsiienko &list[ns].info); 2275ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 2276ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 2277ad74bc61SViacheslav Ovsiienko /* 2278ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 2279ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 2280ad74bc61SViacheslav Ovsiienko * with sysfs. 2281ad74bc61SViacheslav Ovsiienko */ 2282ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 2283ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 2284ad74bc61SViacheslav Ovsiienko &list[ns].info); 2285ad74bc61SViacheslav Ovsiienko } 2286ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 2287ad74bc61SViacheslav Ovsiienko list[ns].info.master)) { 2288ad74bc61SViacheslav Ovsiienko ns++; 2289ad74bc61SViacheslav Ovsiienko } else if ((nd == 1) && 2290ad74bc61SViacheslav Ovsiienko !list[ns].info.representor && 2291ad74bc61SViacheslav Ovsiienko !list[ns].info.master) { 2292ad74bc61SViacheslav Ovsiienko /* 2293ad74bc61SViacheslav Ovsiienko * Single IB device with 2294ad74bc61SViacheslav Ovsiienko * one physical port and 2295ad74bc61SViacheslav Ovsiienko * attached network device. 2296ad74bc61SViacheslav Ovsiienko * May be SRIOV is not enabled 2297ad74bc61SViacheslav Ovsiienko * or there is no representors. 2298ad74bc61SViacheslav Ovsiienko */ 2299ad74bc61SViacheslav Ovsiienko DRV_LOG(INFO, "no E-Switch support detected"); 2300ad74bc61SViacheslav Ovsiienko ns++; 2301ad74bc61SViacheslav Ovsiienko break; 230226c08b97SAdrien Mazarguil } 2303f38c5457SAdrien Mazarguil } 2304ad74bc61SViacheslav Ovsiienko if (!ns) { 2305ad74bc61SViacheslav Ovsiienko DRV_LOG(ERR, 2306ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 2307ad74bc61SViacheslav Ovsiienko " on the multiple IB devices"); 2308ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2309ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2310ad74bc61SViacheslav Ovsiienko goto exit; 2311ad74bc61SViacheslav Ovsiienko } 2312ad74bc61SViacheslav Ovsiienko } 2313ad74bc61SViacheslav Ovsiienko assert(ns); 2314116f90adSAdrien Mazarguil /* 2315116f90adSAdrien Mazarguil * Sort list to probe devices in natural order for users convenience 2316116f90adSAdrien Mazarguil * (i.e. master first, then representors from lowest to highest ID). 2317116f90adSAdrien Mazarguil */ 2318ad74bc61SViacheslav Ovsiienko qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 2319f87bfa8eSYongseok Koh /* Default configuration. */ 2320f87bfa8eSYongseok Koh dev_config = (struct mlx5_dev_config){ 232178c7a16dSYongseok Koh .hw_padding = 0, 2322f87bfa8eSYongseok Koh .mps = MLX5_ARG_UNSET, 2323f87bfa8eSYongseok Koh .rx_vec_en = 1, 2324505f1fe4SViacheslav Ovsiienko .txq_inline_max = MLX5_ARG_UNSET, 2325505f1fe4SViacheslav Ovsiienko .txq_inline_min = MLX5_ARG_UNSET, 2326505f1fe4SViacheslav Ovsiienko .txq_inline_mpw = MLX5_ARG_UNSET, 2327f87bfa8eSYongseok Koh .txqs_inline = MLX5_ARG_UNSET, 2328f87bfa8eSYongseok Koh .vf_nl_en = 1, 2329dceb5029SYongseok Koh .mr_ext_memseg_en = 1, 2330f87bfa8eSYongseok Koh .mprq = { 2331f87bfa8eSYongseok Koh .enabled = 0, /* Disabled by default. */ 2332f87bfa8eSYongseok Koh .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N, 2333f87bfa8eSYongseok Koh .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, 2334f87bfa8eSYongseok Koh .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, 2335f87bfa8eSYongseok Koh }, 2336e2b4925eSOri Kam .dv_esw_en = 1, 2337f87bfa8eSYongseok Koh }; 2338ad74bc61SViacheslav Ovsiienko /* Device specific configuration. */ 2339f38c5457SAdrien Mazarguil switch (pci_dev->id.device_id) { 2340f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 2341f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 2342f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 2343f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 2344f87bfa8eSYongseok Koh dev_config.vf = 1; 2345f38c5457SAdrien Mazarguil break; 2346f38c5457SAdrien Mazarguil default: 2347f87bfa8eSYongseok Koh break; 2348f38c5457SAdrien Mazarguil } 2349ad74bc61SViacheslav Ovsiienko for (i = 0; i != ns; ++i) { 23502b730263SAdrien Mazarguil uint32_t restore; 23512b730263SAdrien Mazarguil 2352f87bfa8eSYongseok Koh list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 2353ad74bc61SViacheslav Ovsiienko &list[i], 2354ad74bc61SViacheslav Ovsiienko dev_config); 23556de569f5SAdrien Mazarguil if (!list[i].eth_dev) { 2356206254b7SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 23572b730263SAdrien Mazarguil break; 2358206254b7SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 23596de569f5SAdrien Mazarguil continue; 23606de569f5SAdrien Mazarguil } 2361116f90adSAdrien Mazarguil restore = list[i].eth_dev->data->dev_flags; 2362116f90adSAdrien Mazarguil rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 23632b730263SAdrien Mazarguil /* Restore non-PCI flags cleared by the above call. */ 2364116f90adSAdrien Mazarguil list[i].eth_dev->data->dev_flags |= restore; 2365116f90adSAdrien Mazarguil rte_eth_dev_probing_finish(list[i].eth_dev); 23662b730263SAdrien Mazarguil } 2367ad74bc61SViacheslav Ovsiienko if (i != ns) { 2368f38c5457SAdrien Mazarguil DRV_LOG(ERR, 2369f38c5457SAdrien Mazarguil "probe of PCI device " PCI_PRI_FMT " aborted after" 2370f38c5457SAdrien Mazarguil " encountering an error: %s", 2371f38c5457SAdrien Mazarguil pci_dev->addr.domain, pci_dev->addr.bus, 2372f38c5457SAdrien Mazarguil pci_dev->addr.devid, pci_dev->addr.function, 2373f38c5457SAdrien Mazarguil strerror(rte_errno)); 2374f38c5457SAdrien Mazarguil ret = -rte_errno; 23752b730263SAdrien Mazarguil /* Roll back. */ 23762b730263SAdrien Mazarguil while (i--) { 23776de569f5SAdrien Mazarguil if (!list[i].eth_dev) 23786de569f5SAdrien Mazarguil continue; 2379116f90adSAdrien Mazarguil mlx5_dev_close(list[i].eth_dev); 2380e16adf08SThomas Monjalon /* mac_addrs must not be freed because in dev_private */ 2381e16adf08SThomas Monjalon list[i].eth_dev->data->mac_addrs = NULL; 2382116f90adSAdrien Mazarguil claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 23832b730263SAdrien Mazarguil } 23842b730263SAdrien Mazarguil /* Restore original error. */ 23852b730263SAdrien Mazarguil rte_errno = -ret; 2386f38c5457SAdrien Mazarguil } else { 2387f38c5457SAdrien Mazarguil ret = 0; 2388f38c5457SAdrien Mazarguil } 2389ad74bc61SViacheslav Ovsiienko exit: 2390ad74bc61SViacheslav Ovsiienko /* 2391ad74bc61SViacheslav Ovsiienko * Do the routine cleanup: 2392ad74bc61SViacheslav Ovsiienko * - close opened Netlink sockets 2393ad74bc61SViacheslav Ovsiienko * - free the Infiniband device list 2394ad74bc61SViacheslav Ovsiienko */ 2395ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2396ad74bc61SViacheslav Ovsiienko close(nl_rdma); 2397ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2398ad74bc61SViacheslav Ovsiienko close(nl_route); 2399ad74bc61SViacheslav Ovsiienko assert(ibv_list); 2400ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 2401f38c5457SAdrien Mazarguil return ret; 2402771fa900SAdrien Mazarguil } 2403771fa900SAdrien Mazarguil 24043a820742SOphir Munk /** 24053a820742SOphir Munk * DPDK callback to remove a PCI device. 24063a820742SOphir Munk * 24073a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 24083a820742SOphir Munk * 24093a820742SOphir Munk * @param[in] pci_dev 24103a820742SOphir Munk * Pointer to the PCI device. 24113a820742SOphir Munk * 24123a820742SOphir Munk * @return 24133a820742SOphir Munk * 0 on success, the function cannot fail. 24143a820742SOphir Munk */ 24153a820742SOphir Munk static int 24163a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 24173a820742SOphir Munk { 24183a820742SOphir Munk uint16_t port_id; 24193a820742SOphir Munk 24205294b800SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 24213a820742SOphir Munk rte_eth_dev_close(port_id); 24223a820742SOphir Munk return 0; 24233a820742SOphir Munk } 24243a820742SOphir Munk 2425771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 2426771fa900SAdrien Mazarguil { 24271d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 24281d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 2429771fa900SAdrien Mazarguil }, 2430771fa900SAdrien Mazarguil { 24311d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 24321d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 2433771fa900SAdrien Mazarguil }, 2434771fa900SAdrien Mazarguil { 24351d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 24361d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 2437771fa900SAdrien Mazarguil }, 2438771fa900SAdrien Mazarguil { 24391d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 24401d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 2441771fa900SAdrien Mazarguil }, 2442771fa900SAdrien Mazarguil { 2443528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2444528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 2445528a9fbeSYongseok Koh }, 2446528a9fbeSYongseok Koh { 2447528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2448528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 2449528a9fbeSYongseok Koh }, 2450528a9fbeSYongseok Koh { 2451528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2452528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 2453528a9fbeSYongseok Koh }, 2454528a9fbeSYongseok Koh { 2455528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2456528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 2457528a9fbeSYongseok Koh }, 2458528a9fbeSYongseok Koh { 2459dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2460dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 2461dd3331c6SShahaf Shuler }, 2462dd3331c6SShahaf Shuler { 2463c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2464c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 2465c322c0e5SOri Kam }, 2466c322c0e5SOri Kam { 2467f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2468f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 2469f0354d84SWisam Jaddo }, 2470f0354d84SWisam Jaddo { 2471f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2472f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 2473f0354d84SWisam Jaddo }, 2474f0354d84SWisam Jaddo { 2475771fa900SAdrien Mazarguil .vendor_id = 0 2476771fa900SAdrien Mazarguil } 2477771fa900SAdrien Mazarguil }; 2478771fa900SAdrien Mazarguil 2479fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver = { 24802f3193cfSJan Viktorin .driver = { 24812f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 24822f3193cfSJan Viktorin }, 2483771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 2484af424af8SShreyansh Jain .probe = mlx5_pci_probe, 24853a820742SOphir Munk .remove = mlx5_pci_remove, 2486989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 2487989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 248869c06d0eSYongseok Koh .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV | 2489b76fafb1SDavid Marchand RTE_PCI_DRV_PROBE_AGAIN, 2490771fa900SAdrien Mazarguil }; 2491771fa900SAdrien Mazarguil 249272b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 249359b91becSAdrien Mazarguil 249459b91becSAdrien Mazarguil /** 249508c028d0SAdrien Mazarguil * Suffix RTE_EAL_PMD_PATH with "-glue". 249608c028d0SAdrien Mazarguil * 249708c028d0SAdrien Mazarguil * This function performs a sanity check on RTE_EAL_PMD_PATH before 249808c028d0SAdrien Mazarguil * suffixing its last component. 249908c028d0SAdrien Mazarguil * 250008c028d0SAdrien Mazarguil * @param buf[out] 250108c028d0SAdrien Mazarguil * Output buffer, should be large enough otherwise NULL is returned. 250208c028d0SAdrien Mazarguil * @param size 250308c028d0SAdrien Mazarguil * Size of @p out. 250408c028d0SAdrien Mazarguil * 250508c028d0SAdrien Mazarguil * @return 250608c028d0SAdrien Mazarguil * Pointer to @p buf or @p NULL in case suffix cannot be appended. 250708c028d0SAdrien Mazarguil */ 250808c028d0SAdrien Mazarguil static char * 250908c028d0SAdrien Mazarguil mlx5_glue_path(char *buf, size_t size) 251008c028d0SAdrien Mazarguil { 251108c028d0SAdrien Mazarguil static const char *const bad[] = { "/", ".", "..", NULL }; 251208c028d0SAdrien Mazarguil const char *path = RTE_EAL_PMD_PATH; 251308c028d0SAdrien Mazarguil size_t len = strlen(path); 251408c028d0SAdrien Mazarguil size_t off; 251508c028d0SAdrien Mazarguil int i; 251608c028d0SAdrien Mazarguil 251708c028d0SAdrien Mazarguil while (len && path[len - 1] == '/') 251808c028d0SAdrien Mazarguil --len; 251908c028d0SAdrien Mazarguil for (off = len; off && path[off - 1] != '/'; --off) 252008c028d0SAdrien Mazarguil ; 252108c028d0SAdrien Mazarguil for (i = 0; bad[i]; ++i) 252208c028d0SAdrien Mazarguil if (!strncmp(path + off, bad[i], (int)(len - off))) 252308c028d0SAdrien Mazarguil goto error; 252408c028d0SAdrien Mazarguil i = snprintf(buf, size, "%.*s-glue", (int)len, path); 252508c028d0SAdrien Mazarguil if (i == -1 || (size_t)i >= size) 252608c028d0SAdrien Mazarguil goto error; 252708c028d0SAdrien Mazarguil return buf; 252808c028d0SAdrien Mazarguil error: 2529a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2530a170a30dSNélio Laranjeiro "unable to append \"-glue\" to last component of" 253108c028d0SAdrien Mazarguil " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," 253208c028d0SAdrien Mazarguil " please re-configure DPDK"); 253308c028d0SAdrien Mazarguil return NULL; 253408c028d0SAdrien Mazarguil } 253508c028d0SAdrien Mazarguil 253608c028d0SAdrien Mazarguil /** 253759b91becSAdrien Mazarguil * Initialization routine for run-time dependency on rdma-core. 253859b91becSAdrien Mazarguil */ 253959b91becSAdrien Mazarguil static int 254059b91becSAdrien Mazarguil mlx5_glue_init(void) 254159b91becSAdrien Mazarguil { 254208c028d0SAdrien Mazarguil char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; 2543f6242d06SAdrien Mazarguil const char *path[] = { 2544f6242d06SAdrien Mazarguil /* 2545f6242d06SAdrien Mazarguil * A basic security check is necessary before trusting 2546f6242d06SAdrien Mazarguil * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH. 2547f6242d06SAdrien Mazarguil */ 2548f6242d06SAdrien Mazarguil (geteuid() == getuid() && getegid() == getgid() ? 2549f6242d06SAdrien Mazarguil getenv("MLX5_GLUE_PATH") : NULL), 255008c028d0SAdrien Mazarguil /* 255108c028d0SAdrien Mazarguil * When RTE_EAL_PMD_PATH is set, use its glue-suffixed 255208c028d0SAdrien Mazarguil * variant, otherwise let dlopen() look up libraries on its 255308c028d0SAdrien Mazarguil * own. 255408c028d0SAdrien Mazarguil */ 255508c028d0SAdrien Mazarguil (*RTE_EAL_PMD_PATH ? 255608c028d0SAdrien Mazarguil mlx5_glue_path(glue_path, sizeof(glue_path)) : ""), 2557f6242d06SAdrien Mazarguil }; 2558f6242d06SAdrien Mazarguil unsigned int i = 0; 255959b91becSAdrien Mazarguil void *handle = NULL; 256059b91becSAdrien Mazarguil void **sym; 256159b91becSAdrien Mazarguil const char *dlmsg; 256259b91becSAdrien Mazarguil 2563f6242d06SAdrien Mazarguil while (!handle && i != RTE_DIM(path)) { 2564f6242d06SAdrien Mazarguil const char *end; 2565f6242d06SAdrien Mazarguil size_t len; 2566f6242d06SAdrien Mazarguil int ret; 2567f6242d06SAdrien Mazarguil 2568f6242d06SAdrien Mazarguil if (!path[i]) { 2569f6242d06SAdrien Mazarguil ++i; 2570f6242d06SAdrien Mazarguil continue; 2571f6242d06SAdrien Mazarguil } 2572f6242d06SAdrien Mazarguil end = strpbrk(path[i], ":;"); 2573f6242d06SAdrien Mazarguil if (!end) 2574f6242d06SAdrien Mazarguil end = path[i] + strlen(path[i]); 2575f6242d06SAdrien Mazarguil len = end - path[i]; 2576f6242d06SAdrien Mazarguil ret = 0; 2577f6242d06SAdrien Mazarguil do { 2578f6242d06SAdrien Mazarguil char name[ret + 1]; 2579f6242d06SAdrien Mazarguil 2580f6242d06SAdrien Mazarguil ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE, 2581f6242d06SAdrien Mazarguil (int)len, path[i], 2582f6242d06SAdrien Mazarguil (!len || *(end - 1) == '/') ? "" : "/"); 2583f6242d06SAdrien Mazarguil if (ret == -1) 2584f6242d06SAdrien Mazarguil break; 2585f6242d06SAdrien Mazarguil if (sizeof(name) != (size_t)ret + 1) 2586f6242d06SAdrien Mazarguil continue; 2587a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"", 2588a170a30dSNélio Laranjeiro name); 2589f6242d06SAdrien Mazarguil handle = dlopen(name, RTLD_LAZY); 2590f6242d06SAdrien Mazarguil break; 2591f6242d06SAdrien Mazarguil } while (1); 2592f6242d06SAdrien Mazarguil path[i] = end + 1; 2593f6242d06SAdrien Mazarguil if (!*end) 2594f6242d06SAdrien Mazarguil ++i; 2595f6242d06SAdrien Mazarguil } 259659b91becSAdrien Mazarguil if (!handle) { 259759b91becSAdrien Mazarguil rte_errno = EINVAL; 259859b91becSAdrien Mazarguil dlmsg = dlerror(); 259959b91becSAdrien Mazarguil if (dlmsg) 2600a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg); 260159b91becSAdrien Mazarguil goto glue_error; 260259b91becSAdrien Mazarguil } 260359b91becSAdrien Mazarguil sym = dlsym(handle, "mlx5_glue"); 260459b91becSAdrien Mazarguil if (!sym || !*sym) { 260559b91becSAdrien Mazarguil rte_errno = EINVAL; 260659b91becSAdrien Mazarguil dlmsg = dlerror(); 260759b91becSAdrien Mazarguil if (dlmsg) 2608a170a30dSNélio Laranjeiro DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg); 260959b91becSAdrien Mazarguil goto glue_error; 261059b91becSAdrien Mazarguil } 261159b91becSAdrien Mazarguil mlx5_glue = *sym; 261259b91becSAdrien Mazarguil return 0; 261359b91becSAdrien Mazarguil glue_error: 261459b91becSAdrien Mazarguil if (handle) 261559b91becSAdrien Mazarguil dlclose(handle); 2616a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 2617a170a30dSNélio Laranjeiro "cannot initialize PMD due to missing run-time dependency on" 2618a170a30dSNélio Laranjeiro " rdma-core libraries (libibverbs, libmlx5)"); 261959b91becSAdrien Mazarguil return -rte_errno; 262059b91becSAdrien Mazarguil } 262159b91becSAdrien Mazarguil 262259b91becSAdrien Mazarguil #endif 262359b91becSAdrien Mazarguil 2624771fa900SAdrien Mazarguil /** 2625771fa900SAdrien Mazarguil * Driver initialization routine. 2626771fa900SAdrien Mazarguil */ 2627f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 2628771fa900SAdrien Mazarguil { 26293d96644aSStephen Hemminger /* Initialize driver log type. */ 26303d96644aSStephen Hemminger mlx5_logtype = rte_log_register("pmd.net.mlx5"); 26313d96644aSStephen Hemminger if (mlx5_logtype >= 0) 26323d96644aSStephen Hemminger rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); 26333d96644aSStephen Hemminger 26345f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 2635ea16068cSYongseok Koh mlx5_set_ptype_table(); 26365f8ba81cSXueming Li mlx5_set_cksum_table(); 26375f8ba81cSXueming Li mlx5_set_swp_types_table(); 2638771fa900SAdrien Mazarguil /* 2639771fa900SAdrien Mazarguil * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 2640771fa900SAdrien Mazarguil * huge pages. Calling ibv_fork_init() during init allows 2641771fa900SAdrien Mazarguil * applications to use fork() safely for purposes other than 2642771fa900SAdrien Mazarguil * using this PMD, which is not supported in forked processes. 2643771fa900SAdrien Mazarguil */ 2644771fa900SAdrien Mazarguil setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 2645161b93e5SYongseok Koh /* Match the size of Rx completion entry to the size of a cacheline. */ 2646161b93e5SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128) 2647161b93e5SYongseok Koh setenv("MLX5_CQE_SIZE", "128", 0); 26481ff30d18SMatan Azrad /* 26491ff30d18SMatan Azrad * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to 26501ff30d18SMatan Azrad * cleanup all the Verbs resources even when the device was removed. 26511ff30d18SMatan Azrad */ 26521ff30d18SMatan Azrad setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1); 265372b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 265459b91becSAdrien Mazarguil if (mlx5_glue_init()) 265559b91becSAdrien Mazarguil return; 265659b91becSAdrien Mazarguil assert(mlx5_glue); 265759b91becSAdrien Mazarguil #endif 26582a3b0097SAdrien Mazarguil #ifndef NDEBUG 26592a3b0097SAdrien Mazarguil /* Glue structure must not contain any NULL pointers. */ 26602a3b0097SAdrien Mazarguil { 26612a3b0097SAdrien Mazarguil unsigned int i; 26622a3b0097SAdrien Mazarguil 26632a3b0097SAdrien Mazarguil for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i) 26642a3b0097SAdrien Mazarguil assert(((const void *const *)mlx5_glue)[i]); 26652a3b0097SAdrien Mazarguil } 26662a3b0097SAdrien Mazarguil #endif 26676d5df2eaSAdrien Mazarguil if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) { 2668a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2669a170a30dSNélio Laranjeiro "rdma-core glue \"%s\" mismatch: \"%s\" is required", 26706d5df2eaSAdrien Mazarguil mlx5_glue->version, MLX5_GLUE_VERSION); 26716d5df2eaSAdrien Mazarguil return; 26726d5df2eaSAdrien Mazarguil } 26730e83b8e5SNelio Laranjeiro mlx5_glue->fork_init(); 26743dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 2675771fa900SAdrien Mazarguil } 2676771fa900SAdrien Mazarguil 267701f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 267801f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 26790880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 2680