18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <assert.h> 1059b91becSAdrien Mazarguil #include <dlfcn.h> 11771fa900SAdrien Mazarguil #include <stdint.h> 12771fa900SAdrien Mazarguil #include <stdlib.h> 13e72dd09bSNélio Laranjeiro #include <errno.h> 14771fa900SAdrien Mazarguil #include <net/if.h> 154a984153SXueming Li #include <sys/mman.h> 16ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 17771fa900SAdrien Mazarguil 18771fa900SAdrien Mazarguil /* Verbs header. */ 19771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 20771fa900SAdrien Mazarguil #ifdef PEDANTIC 21fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 22771fa900SAdrien Mazarguil #endif 23771fa900SAdrien Mazarguil #include <infiniband/verbs.h> 24771fa900SAdrien Mazarguil #ifdef PEDANTIC 25fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 26771fa900SAdrien Mazarguil #endif 27771fa900SAdrien Mazarguil 28771fa900SAdrien Mazarguil #include <rte_malloc.h> 29ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 30fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 31771fa900SAdrien Mazarguil #include <rte_pci.h> 32c752998bSGaetan Rivet #include <rte_bus_pci.h> 33771fa900SAdrien Mazarguil #include <rte_common.h> 3459b91becSAdrien Mazarguil #include <rte_config.h> 35e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 36e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 37e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 38f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 39f15db67dSMatan Azrad #include <rte_alarm.h> 40771fa900SAdrien Mazarguil 41771fa900SAdrien Mazarguil #include "mlx5.h" 42771fa900SAdrien Mazarguil #include "mlx5_utils.h" 432e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 44771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 4513d57bd5SAdrien Mazarguil #include "mlx5_defs.h" 460e83b8e5SNelio Laranjeiro #include "mlx5_glue.h" 47974f1e7eSYongseok Koh #include "mlx5_mr.h" 4884c406e7SOri Kam #include "mlx5_flow.h" 49771fa900SAdrien Mazarguil 5099c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5199c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5299c12dccSNélio Laranjeiro 53bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 54bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 55bc91e8dbSYongseok Koh 5678c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 5778c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 5878c7a16dSYongseok Koh 597d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 607d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 617d6bf6b8SYongseok Koh 627d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 637d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 647d6bf6b8SYongseok Koh 657d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 667d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 677d6bf6b8SYongseok Koh 687d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 697d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 707d6bf6b8SYongseok Koh 71a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 722a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 732a66cf37SYaacov Hazan 74505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 75505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 76505f1fe4SViacheslav Ovsiienko 77505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 78505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 79505f1fe4SViacheslav Ovsiienko 80505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 81505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 82505f1fe4SViacheslav Ovsiienko 832a66cf37SYaacov Hazan /* 842a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 852a66cf37SYaacov Hazan * enabling inline send. 862a66cf37SYaacov Hazan */ 872a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 882a66cf37SYaacov Hazan 8909d8b416SYongseok Koh /* 9009d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 91a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 9209d8b416SYongseok Koh */ 9309d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 9409d8b416SYongseok Koh 95230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 96230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 97230189d9SNélio Laranjeiro 98a6bd4911SViacheslav Ovsiienko /* 99a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 100a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 101a6bd4911SViacheslav Ovsiienko */ 1026ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1036ce84bd8SYongseok Koh 104a6bd4911SViacheslav Ovsiienko /* 105a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 106a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 107a6bd4911SViacheslav Ovsiienko */ 1086ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1096ce84bd8SYongseok Koh 110a6bd4911SViacheslav Ovsiienko /* 111a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 112a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 113a6bd4911SViacheslav Ovsiienko */ 1145644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1155644d5b9SNelio Laranjeiro 1165644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1175644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1185644d5b9SNelio Laranjeiro 11978a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 12078a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 12178a54648SXueming Li 122e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 123e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 124e2b4925eSOri Kam 12551e72d38SOri Kam /* Activate DV flow steering. */ 12651e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 12751e72d38SOri Kam 128db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 129db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 130db209cc3SNélio Laranjeiro 131dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 132dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 133dceb5029SYongseok Koh 1346de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1356de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1366de569f5SAdrien Mazarguil 137066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 138066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 139066cfecdSMatan Azrad 14021bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 14121bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 14221bb6c7eSDekel Peled 14343e9d979SShachar Beiser #ifndef HAVE_IBV_MLX5_MOD_MPW 14443e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 14543e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 14643e9d979SShachar Beiser #endif 14743e9d979SShachar Beiser 148523f5a74SYongseok Koh #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 149523f5a74SYongseok Koh #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 150523f5a74SYongseok Koh #endif 151523f5a74SYongseok Koh 152974f1e7eSYongseok Koh static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 153974f1e7eSYongseok Koh 154974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 155974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 156974f1e7eSYongseok Koh 157974f1e7eSYongseok Koh /* Spinlock for mlx5_shared_data allocation. */ 158974f1e7eSYongseok Koh static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 159974f1e7eSYongseok Koh 1607be600c8SYongseok Koh /* Process local data for secondary processes. */ 1617be600c8SYongseok Koh static struct mlx5_local_data mlx5_local_data; 1627be600c8SYongseok Koh 163a170a30dSNélio Laranjeiro /** Driver-specific log messages type. */ 164a170a30dSNélio Laranjeiro int mlx5_logtype; 165a170a30dSNélio Laranjeiro 166ad74bc61SViacheslav Ovsiienko /** Data associated with devices to spawn. */ 167ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data { 168ad74bc61SViacheslav Ovsiienko uint32_t ifindex; /**< Network interface index. */ 169ad74bc61SViacheslav Ovsiienko uint32_t max_port; /**< IB device maximal port index. */ 170ad74bc61SViacheslav Ovsiienko uint32_t ibv_port; /**< IB device physical port index. */ 1712e569a37SViacheslav Ovsiienko int pf_bond; /**< bonding device PF index. < 0 - no bonding */ 172ad74bc61SViacheslav Ovsiienko struct mlx5_switch_info info; /**< Switch information. */ 173ad74bc61SViacheslav Ovsiienko struct ibv_device *ibv_dev; /**< Associated IB device. */ 174ad74bc61SViacheslav Ovsiienko struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ 175ab3cffcfSViacheslav Ovsiienko struct rte_pci_device *pci_dev; /**< Backend PCI device. */ 176ad74bc61SViacheslav Ovsiienko }; 177ad74bc61SViacheslav Ovsiienko 17817e19bc4SViacheslav Ovsiienko static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER(); 17917e19bc4SViacheslav Ovsiienko static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER; 18017e19bc4SViacheslav Ovsiienko 181830d2091SOri Kam #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 182830d2091SOri Kam #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 183830d2091SOri Kam 184830d2091SOri Kam /** 185830d2091SOri Kam * Allocate ID pool structure. 186830d2091SOri Kam * 187830d2091SOri Kam * @return 188830d2091SOri Kam * Pointer to pool object, NULL value otherwise. 189830d2091SOri Kam */ 190830d2091SOri Kam struct mlx5_flow_id_pool * 191830d2091SOri Kam mlx5_flow_id_pool_alloc(void) 192830d2091SOri Kam { 193830d2091SOri Kam struct mlx5_flow_id_pool *pool; 194830d2091SOri Kam void *mem; 195830d2091SOri Kam 196830d2091SOri Kam pool = rte_zmalloc("id pool allocation", sizeof(*pool), 197830d2091SOri Kam RTE_CACHE_LINE_SIZE); 198830d2091SOri Kam if (!pool) { 199830d2091SOri Kam DRV_LOG(ERR, "can't allocate id pool"); 200830d2091SOri Kam rte_errno = ENOMEM; 201830d2091SOri Kam return NULL; 202830d2091SOri Kam } 203830d2091SOri Kam mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), 204830d2091SOri Kam RTE_CACHE_LINE_SIZE); 205830d2091SOri Kam if (!mem) { 206830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 207830d2091SOri Kam rte_errno = ENOMEM; 208830d2091SOri Kam goto error; 209830d2091SOri Kam } 210830d2091SOri Kam pool->free_arr = mem; 211830d2091SOri Kam pool->curr = pool->free_arr; 212830d2091SOri Kam pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; 213830d2091SOri Kam pool->base_index = 0; 214830d2091SOri Kam return pool; 215830d2091SOri Kam error: 216830d2091SOri Kam rte_free(pool); 217830d2091SOri Kam return NULL; 218830d2091SOri Kam } 219830d2091SOri Kam 220830d2091SOri Kam /** 221830d2091SOri Kam * Release ID pool structure. 222830d2091SOri Kam * 223830d2091SOri Kam * @param[in] pool 224830d2091SOri Kam * Pointer to flow id pool object to free. 225830d2091SOri Kam */ 226830d2091SOri Kam void 227830d2091SOri Kam mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) 228830d2091SOri Kam { 229830d2091SOri Kam rte_free(pool->free_arr); 230830d2091SOri Kam rte_free(pool); 231830d2091SOri Kam } 232830d2091SOri Kam 233830d2091SOri Kam /** 234830d2091SOri Kam * Generate ID. 235830d2091SOri Kam * 236830d2091SOri Kam * @param[in] pool 237830d2091SOri Kam * Pointer to flow id pool. 238830d2091SOri Kam * @param[out] id 239830d2091SOri Kam * The generated ID. 240830d2091SOri Kam * 241830d2091SOri Kam * @return 242830d2091SOri Kam * 0 on success, error value otherwise. 243830d2091SOri Kam */ 244830d2091SOri Kam uint32_t 245830d2091SOri Kam mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) 246830d2091SOri Kam { 247830d2091SOri Kam if (pool->curr == pool->free_arr) { 248830d2091SOri Kam if (pool->base_index == UINT32_MAX) { 249830d2091SOri Kam rte_errno = ENOMEM; 250830d2091SOri Kam DRV_LOG(ERR, "no free id"); 251830d2091SOri Kam return -rte_errno; 252830d2091SOri Kam } 253830d2091SOri Kam *id = ++pool->base_index; 254830d2091SOri Kam return 0; 255830d2091SOri Kam } 256830d2091SOri Kam *id = *(--pool->curr); 257830d2091SOri Kam return 0; 258830d2091SOri Kam } 259830d2091SOri Kam 260830d2091SOri Kam /** 261830d2091SOri Kam * Release ID. 262830d2091SOri Kam * 263830d2091SOri Kam * @param[in] pool 264830d2091SOri Kam * Pointer to flow id pool. 265830d2091SOri Kam * @param[out] id 266830d2091SOri Kam * The generated ID. 267830d2091SOri Kam * 268830d2091SOri Kam * @return 269830d2091SOri Kam * 0 on success, error value otherwise. 270830d2091SOri Kam */ 271830d2091SOri Kam uint32_t 272830d2091SOri Kam mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) 273830d2091SOri Kam { 274830d2091SOri Kam uint32_t size; 275830d2091SOri Kam uint32_t size2; 276830d2091SOri Kam void *mem; 277830d2091SOri Kam 278830d2091SOri Kam if (pool->curr == pool->last) { 279830d2091SOri Kam size = pool->curr - pool->free_arr; 280830d2091SOri Kam size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; 281830d2091SOri Kam assert(size2 > size); 282830d2091SOri Kam mem = rte_malloc("", size2 * sizeof(uint32_t), 0); 283830d2091SOri Kam if (!mem) { 284830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 285830d2091SOri Kam rte_errno = ENOMEM; 286830d2091SOri Kam return -rte_errno; 287830d2091SOri Kam } 288830d2091SOri Kam memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); 289830d2091SOri Kam rte_free(pool->free_arr); 290830d2091SOri Kam pool->free_arr = mem; 291830d2091SOri Kam pool->curr = pool->free_arr + size; 292830d2091SOri Kam pool->last = pool->free_arr + size2; 293830d2091SOri Kam } 294830d2091SOri Kam *pool->curr = id; 295830d2091SOri Kam pool->curr++; 296830d2091SOri Kam return 0; 297830d2091SOri Kam } 298830d2091SOri Kam 29917e19bc4SViacheslav Ovsiienko /** 3005382d28cSMatan Azrad * Initialize the counters management structure. 3015382d28cSMatan Azrad * 3025382d28cSMatan Azrad * @param[in] sh 3035382d28cSMatan Azrad * Pointer to mlx5_ibv_shared object to free 3045382d28cSMatan Azrad */ 3055382d28cSMatan Azrad static void 3065382d28cSMatan Azrad mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh) 3075382d28cSMatan Azrad { 3085382d28cSMatan Azrad uint8_t i; 3095382d28cSMatan Azrad 3105382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 3115382d28cSMatan Azrad for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) 3125382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.ccont[i].pool_list); 3135382d28cSMatan Azrad } 3145382d28cSMatan Azrad 3155382d28cSMatan Azrad /** 3165382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 3175382d28cSMatan Azrad * 3185382d28cSMatan Azrad * @param[in] mng 3195382d28cSMatan Azrad * Pointer to the memory management structure. 3205382d28cSMatan Azrad */ 3215382d28cSMatan Azrad static void 3225382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 3235382d28cSMatan Azrad { 3245382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 3255382d28cSMatan Azrad 3265382d28cSMatan Azrad LIST_REMOVE(mng, next); 3275382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 3285382d28cSMatan Azrad claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); 3295382d28cSMatan Azrad rte_free(mem); 3305382d28cSMatan Azrad } 3315382d28cSMatan Azrad 3325382d28cSMatan Azrad /** 3335382d28cSMatan Azrad * Close and release all the resources of the counters management. 3345382d28cSMatan Azrad * 3355382d28cSMatan Azrad * @param[in] sh 3365382d28cSMatan Azrad * Pointer to mlx5_ibv_shared object to free. 3375382d28cSMatan Azrad */ 3385382d28cSMatan Azrad static void 3395382d28cSMatan Azrad mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh) 3405382d28cSMatan Azrad { 3415382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 3425382d28cSMatan Azrad uint8_t i; 3435382d28cSMatan Azrad int j; 344f15db67dSMatan Azrad int retries = 1024; 3455382d28cSMatan Azrad 346f15db67dSMatan Azrad rte_errno = 0; 347f15db67dSMatan Azrad while (--retries) { 348f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 349f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 350f15db67dSMatan Azrad break; 351f15db67dSMatan Azrad rte_pause(); 352f15db67dSMatan Azrad } 3535382d28cSMatan Azrad for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) { 3545382d28cSMatan Azrad struct mlx5_flow_counter_pool *pool; 3555382d28cSMatan Azrad uint32_t batch = !!(i % 2); 3565382d28cSMatan Azrad 3575382d28cSMatan Azrad if (!sh->cmng.ccont[i].pools) 3585382d28cSMatan Azrad continue; 3595382d28cSMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 3605382d28cSMatan Azrad while (pool) { 3615382d28cSMatan Azrad if (batch) { 3625382d28cSMatan Azrad if (pool->min_dcs) 3635382d28cSMatan Azrad claim_zero 3645382d28cSMatan Azrad (mlx5_devx_cmd_destroy(pool->min_dcs)); 3655382d28cSMatan Azrad } 3665382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 3675382d28cSMatan Azrad if (pool->counters_raw[j].action) 3685382d28cSMatan Azrad claim_zero 3695382d28cSMatan Azrad (mlx5_glue->destroy_flow_action 3705382d28cSMatan Azrad (pool->counters_raw[j].action)); 3715382d28cSMatan Azrad if (!batch && pool->counters_raw[j].dcs) 3725382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 3735382d28cSMatan Azrad (pool->counters_raw[j].dcs)); 3745382d28cSMatan Azrad } 3755382d28cSMatan Azrad TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, 3765382d28cSMatan Azrad next); 3775382d28cSMatan Azrad rte_free(pool); 3785382d28cSMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 3795382d28cSMatan Azrad } 3805382d28cSMatan Azrad rte_free(sh->cmng.ccont[i].pools); 3815382d28cSMatan Azrad } 3825382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 3835382d28cSMatan Azrad while (mng) { 3845382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 3855382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 3865382d28cSMatan Azrad } 3875382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 3885382d28cSMatan Azrad } 3895382d28cSMatan Azrad 3905382d28cSMatan Azrad /** 391b9d86122SDekel Peled * Extract pdn of PD object using DV API. 392b9d86122SDekel Peled * 393b9d86122SDekel Peled * @param[in] pd 394b9d86122SDekel Peled * Pointer to the verbs PD object. 395b9d86122SDekel Peled * @param[out] pdn 396b9d86122SDekel Peled * Pointer to the PD object number variable. 397b9d86122SDekel Peled * 398b9d86122SDekel Peled * @return 399b9d86122SDekel Peled * 0 on success, error value otherwise. 400b9d86122SDekel Peled */ 401b9d86122SDekel Peled #ifdef HAVE_IBV_FLOW_DV_SUPPORT 402b9d86122SDekel Peled static int 403b9d86122SDekel Peled mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused) 404b9d86122SDekel Peled { 405b9d86122SDekel Peled struct mlx5dv_obj obj; 406b9d86122SDekel Peled struct mlx5dv_pd pd_info; 407b9d86122SDekel Peled int ret = 0; 408b9d86122SDekel Peled 409b9d86122SDekel Peled obj.pd.in = pd; 410b9d86122SDekel Peled obj.pd.out = &pd_info; 411b9d86122SDekel Peled ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 412b9d86122SDekel Peled if (ret) { 413b9d86122SDekel Peled DRV_LOG(DEBUG, "Fail to get PD object info"); 414b9d86122SDekel Peled return ret; 415b9d86122SDekel Peled } 416b9d86122SDekel Peled *pdn = pd_info.pdn; 417b9d86122SDekel Peled return 0; 418b9d86122SDekel Peled } 419b9d86122SDekel Peled #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 420b9d86122SDekel Peled 421b9d86122SDekel Peled /** 42217e19bc4SViacheslav Ovsiienko * Allocate shared IB device context. If there is multiport device the 42317e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 42417e19bc4SViacheslav Ovsiienko * port dedicated IB device, the context will be used by only given 42517e19bc4SViacheslav Ovsiienko * port due to unification. 42617e19bc4SViacheslav Ovsiienko * 427ae4eb7dcSViacheslav Ovsiienko * Routine first searches the context for the specified IB device name, 42817e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 42917e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 43017e19bc4SViacheslav Ovsiienko * IB device context and parameters. 43117e19bc4SViacheslav Ovsiienko * 43217e19bc4SViacheslav Ovsiienko * @param[in] spawn 43317e19bc4SViacheslav Ovsiienko * Pointer to the IB device attributes (name, port, etc). 43417e19bc4SViacheslav Ovsiienko * 43517e19bc4SViacheslav Ovsiienko * @return 43617e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object on success, 43717e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 43817e19bc4SViacheslav Ovsiienko */ 43917e19bc4SViacheslav Ovsiienko static struct mlx5_ibv_shared * 44017e19bc4SViacheslav Ovsiienko mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn) 44117e19bc4SViacheslav Ovsiienko { 44217e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 44317e19bc4SViacheslav Ovsiienko int err = 0; 44453e5a82fSViacheslav Ovsiienko uint32_t i; 445ae18a1aeSOri Kam #ifdef HAVE_IBV_FLOW_DV_SUPPORT 446ae18a1aeSOri Kam struct mlx5_devx_tis_attr tis_attr = { 0 }; 447ae18a1aeSOri Kam #endif 44817e19bc4SViacheslav Ovsiienko 44917e19bc4SViacheslav Ovsiienko assert(spawn); 45017e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 45117e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 45217e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 45317e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 45417e19bc4SViacheslav Ovsiienko LIST_FOREACH(sh, &mlx5_ibv_list, next) { 45517e19bc4SViacheslav Ovsiienko if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) { 45617e19bc4SViacheslav Ovsiienko sh->refcnt++; 45717e19bc4SViacheslav Ovsiienko goto exit; 45817e19bc4SViacheslav Ovsiienko } 45917e19bc4SViacheslav Ovsiienko } 460ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 46117e19bc4SViacheslav Ovsiienko assert(spawn->max_port); 46217e19bc4SViacheslav Ovsiienko sh = rte_zmalloc("ethdev shared ib context", 46317e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared) + 46417e19bc4SViacheslav Ovsiienko spawn->max_port * 46517e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared_port), 46617e19bc4SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 46717e19bc4SViacheslav Ovsiienko if (!sh) { 46817e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 46917e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 47017e19bc4SViacheslav Ovsiienko goto exit; 47117e19bc4SViacheslav Ovsiienko } 47217e19bc4SViacheslav Ovsiienko /* Try to open IB device with DV first, then usual Verbs. */ 47317e19bc4SViacheslav Ovsiienko errno = 0; 47417e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev); 47517e19bc4SViacheslav Ovsiienko if (sh->ctx) { 47617e19bc4SViacheslav Ovsiienko sh->devx = 1; 47717e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is supported"); 47817e19bc4SViacheslav Ovsiienko } else { 47917e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->open_device(spawn->ibv_dev); 48017e19bc4SViacheslav Ovsiienko if (!sh->ctx) { 48117e19bc4SViacheslav Ovsiienko err = errno ? errno : ENODEV; 48217e19bc4SViacheslav Ovsiienko goto error; 48317e19bc4SViacheslav Ovsiienko } 48417e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is NOT supported"); 48517e19bc4SViacheslav Ovsiienko } 48617e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr); 48717e19bc4SViacheslav Ovsiienko if (err) { 48817e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "ibv_query_device_ex() failed"); 48917e19bc4SViacheslav Ovsiienko goto error; 49017e19bc4SViacheslav Ovsiienko } 49117e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 49217e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 49317e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_name, sh->ctx->device->name, 49417e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_name)); 49517e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path, 49617e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_path)); 49753e5a82fSViacheslav Ovsiienko pthread_mutex_init(&sh->intr_mutex, NULL); 49853e5a82fSViacheslav Ovsiienko /* 49953e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 50053e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 50153e5a82fSViacheslav Ovsiienko * the given port index i. 50253e5a82fSViacheslav Ovsiienko */ 50323242063SMatan Azrad for (i = 0; i < sh->max_port; i++) { 50453e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 50523242063SMatan Azrad sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; 50623242063SMatan Azrad } 50717e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 50817e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 50917e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 51017e19bc4SViacheslav Ovsiienko err = ENOMEM; 51117e19bc4SViacheslav Ovsiienko goto error; 51217e19bc4SViacheslav Ovsiienko } 513b9d86122SDekel Peled #ifdef HAVE_IBV_FLOW_DV_SUPPORT 514ae18a1aeSOri Kam if (sh->devx) { 515b9d86122SDekel Peled err = mlx5_get_pdn(sh->pd, &sh->pdn); 516b9d86122SDekel Peled if (err) { 517b9d86122SDekel Peled DRV_LOG(ERR, "Fail to extract pdn from PD"); 518b9d86122SDekel Peled goto error; 519b9d86122SDekel Peled } 520ae18a1aeSOri Kam sh->td = mlx5_devx_cmd_create_td(sh->ctx); 521ae18a1aeSOri Kam if (!sh->td) { 522ae18a1aeSOri Kam DRV_LOG(ERR, "TD allocation failure"); 523ae18a1aeSOri Kam err = ENOMEM; 524ae18a1aeSOri Kam goto error; 525ae18a1aeSOri Kam } 526ae18a1aeSOri Kam tis_attr.transport_domain = sh->td->id; 527ae18a1aeSOri Kam sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr); 528ae18a1aeSOri Kam if (!sh->tis) { 529ae18a1aeSOri Kam DRV_LOG(ERR, "TIS allocation failure"); 530ae18a1aeSOri Kam err = ENOMEM; 531ae18a1aeSOri Kam goto error; 532ae18a1aeSOri Kam } 533ae18a1aeSOri Kam } 534d85c7b5eSOri Kam sh->flow_id_pool = mlx5_flow_id_pool_alloc(); 535d85c7b5eSOri Kam if (!sh->flow_id_pool) { 536d85c7b5eSOri Kam DRV_LOG(ERR, "can't create flow id pool"); 537d85c7b5eSOri Kam err = ENOMEM; 538d85c7b5eSOri Kam goto error; 539d85c7b5eSOri Kam } 540b9d86122SDekel Peled #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 541ab3cffcfSViacheslav Ovsiienko /* 542ab3cffcfSViacheslav Ovsiienko * Once the device is added to the list of memory event 543ab3cffcfSViacheslav Ovsiienko * callback, its global MR cache table cannot be expanded 544ab3cffcfSViacheslav Ovsiienko * on the fly because of deadlock. If it overflows, lookup 545ab3cffcfSViacheslav Ovsiienko * should be done by searching MR list linearly, which is slow. 546ab3cffcfSViacheslav Ovsiienko * 547ab3cffcfSViacheslav Ovsiienko * At this point the device is not added to the memory 548ab3cffcfSViacheslav Ovsiienko * event list yet, context is just being created. 549ab3cffcfSViacheslav Ovsiienko */ 550ab3cffcfSViacheslav Ovsiienko err = mlx5_mr_btree_init(&sh->mr.cache, 551ab3cffcfSViacheslav Ovsiienko MLX5_MR_BTREE_CACHE_N * 2, 55246e10a4cSViacheslav Ovsiienko spawn->pci_dev->device.numa_node); 553ab3cffcfSViacheslav Ovsiienko if (err) { 554ab3cffcfSViacheslav Ovsiienko err = rte_errno; 555ab3cffcfSViacheslav Ovsiienko goto error; 556ab3cffcfSViacheslav Ovsiienko } 5575382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 5580e3d0525SViacheslav Ovsiienko /* Add device to memory callback list. */ 5590e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 5600e3d0525SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 5610e3d0525SViacheslav Ovsiienko sh, mem_event_cb); 5620e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 5630e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 56417e19bc4SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next); 56517e19bc4SViacheslav Ovsiienko exit: 56617e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 56717e19bc4SViacheslav Ovsiienko return sh; 56817e19bc4SViacheslav Ovsiienko error: 56917e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 57017e19bc4SViacheslav Ovsiienko assert(sh); 571ae18a1aeSOri Kam if (sh->tis) 572ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 573ae18a1aeSOri Kam if (sh->td) 574ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 57517e19bc4SViacheslav Ovsiienko if (sh->pd) 57617e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 57717e19bc4SViacheslav Ovsiienko if (sh->ctx) 57817e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 579d85c7b5eSOri Kam if (sh->flow_id_pool) 580d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 58117e19bc4SViacheslav Ovsiienko rte_free(sh); 58217e19bc4SViacheslav Ovsiienko assert(err > 0); 58317e19bc4SViacheslav Ovsiienko rte_errno = err; 58417e19bc4SViacheslav Ovsiienko return NULL; 58517e19bc4SViacheslav Ovsiienko } 58617e19bc4SViacheslav Ovsiienko 58717e19bc4SViacheslav Ovsiienko /** 58817e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 58917e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 59017e19bc4SViacheslav Ovsiienko * 59117e19bc4SViacheslav Ovsiienko * @param[in] sh 59217e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object to free 59317e19bc4SViacheslav Ovsiienko */ 59417e19bc4SViacheslav Ovsiienko static void 59517e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) 59617e19bc4SViacheslav Ovsiienko { 59717e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 59817e19bc4SViacheslav Ovsiienko #ifndef NDEBUG 59917e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 60017e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *lctx; 60117e19bc4SViacheslav Ovsiienko 60217e19bc4SViacheslav Ovsiienko LIST_FOREACH(lctx, &mlx5_ibv_list, next) 60317e19bc4SViacheslav Ovsiienko if (lctx == sh) 60417e19bc4SViacheslav Ovsiienko break; 60517e19bc4SViacheslav Ovsiienko assert(lctx); 60617e19bc4SViacheslav Ovsiienko if (lctx != sh) { 60717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 60817e19bc4SViacheslav Ovsiienko goto exit; 60917e19bc4SViacheslav Ovsiienko } 61017e19bc4SViacheslav Ovsiienko #endif 61117e19bc4SViacheslav Ovsiienko assert(sh); 61217e19bc4SViacheslav Ovsiienko assert(sh->refcnt); 61317e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 61417e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 61517e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 61617e19bc4SViacheslav Ovsiienko goto exit; 617ab3cffcfSViacheslav Ovsiienko /* Release created Memory Regions. */ 618ab3cffcfSViacheslav Ovsiienko mlx5_mr_release(sh); 6190e3d0525SViacheslav Ovsiienko /* Remove from memory callback device list. */ 6200e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 6210e3d0525SViacheslav Ovsiienko LIST_REMOVE(sh, mem_event_cb); 6220e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 6230e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 62417e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 62553e5a82fSViacheslav Ovsiienko /* 62653e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 62753e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 62853e5a82fSViacheslav Ovsiienko **/ 6295382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 63053e5a82fSViacheslav Ovsiienko assert(!sh->intr_cnt); 63153e5a82fSViacheslav Ovsiienko if (sh->intr_cnt) 6325897ac13SViacheslav Ovsiienko mlx5_intr_callback_unregister 63353e5a82fSViacheslav Ovsiienko (&sh->intr_handle, mlx5_dev_interrupt_handler, sh); 63423242063SMatan Azrad #ifdef HAVE_MLX5_DEVX_ASYNC_SUPPORT 63523242063SMatan Azrad if (sh->devx_intr_cnt) { 63623242063SMatan Azrad if (sh->intr_handle_devx.fd) 63723242063SMatan Azrad rte_intr_callback_unregister(&sh->intr_handle_devx, 63823242063SMatan Azrad mlx5_dev_interrupt_handler_devx, sh); 63923242063SMatan Azrad if (sh->devx_comp) 64023242063SMatan Azrad mlx5dv_devx_destroy_cmd_comp(sh->devx_comp); 64123242063SMatan Azrad } 64223242063SMatan Azrad #endif 64353e5a82fSViacheslav Ovsiienko pthread_mutex_destroy(&sh->intr_mutex); 64417e19bc4SViacheslav Ovsiienko if (sh->pd) 64517e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 646ae18a1aeSOri Kam if (sh->tis) 647ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 648ae18a1aeSOri Kam if (sh->td) 649ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 65017e19bc4SViacheslav Ovsiienko if (sh->ctx) 65117e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 652d85c7b5eSOri Kam if (sh->flow_id_pool) 653d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 65417e19bc4SViacheslav Ovsiienko rte_free(sh); 65517e19bc4SViacheslav Ovsiienko exit: 65617e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 65717e19bc4SViacheslav Ovsiienko } 65817e19bc4SViacheslav Ovsiienko 659771fa900SAdrien Mazarguil /** 660b2177648SViacheslav Ovsiienko * Initialize DR related data within private structure. 661b2177648SViacheslav Ovsiienko * Routine checks the reference counter and does actual 662ae4eb7dcSViacheslav Ovsiienko * resources creation/initialization only if counter is zero. 663b2177648SViacheslav Ovsiienko * 664b2177648SViacheslav Ovsiienko * @param[in] priv 665b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 666b2177648SViacheslav Ovsiienko * 667b2177648SViacheslav Ovsiienko * @return 668b2177648SViacheslav Ovsiienko * Zero on success, positive error code otherwise. 669b2177648SViacheslav Ovsiienko */ 670b2177648SViacheslav Ovsiienko static int 671b2177648SViacheslav Ovsiienko mlx5_alloc_shared_dr(struct mlx5_priv *priv) 672b2177648SViacheslav Ovsiienko { 673b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 674b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = priv->sh; 675b2177648SViacheslav Ovsiienko int err = 0; 676d1e64fbfSOri Kam void *domain; 677b2177648SViacheslav Ovsiienko 678b2177648SViacheslav Ovsiienko assert(sh); 679b2177648SViacheslav Ovsiienko if (sh->dv_refcnt) { 680b2177648SViacheslav Ovsiienko /* Shared DV/DR structures is already initialized. */ 681b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 682b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 683b2177648SViacheslav Ovsiienko return 0; 684b2177648SViacheslav Ovsiienko } 685b2177648SViacheslav Ovsiienko /* Reference counter is zero, we should initialize structures. */ 686d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 687d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_RX); 688d1e64fbfSOri Kam if (!domain) { 689d1e64fbfSOri Kam DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); 690b2177648SViacheslav Ovsiienko err = errno; 691b2177648SViacheslav Ovsiienko goto error; 692b2177648SViacheslav Ovsiienko } 693d1e64fbfSOri Kam sh->rx_domain = domain; 694d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain(sh->ctx, 695d1e64fbfSOri Kam MLX5DV_DR_DOMAIN_TYPE_NIC_TX); 696d1e64fbfSOri Kam if (!domain) { 697d1e64fbfSOri Kam DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); 698b2177648SViacheslav Ovsiienko err = errno; 699b2177648SViacheslav Ovsiienko goto error; 700b2177648SViacheslav Ovsiienko } 70179e35d0dSViacheslav Ovsiienko pthread_mutex_init(&sh->dv_mutex, NULL); 702d1e64fbfSOri Kam sh->tx_domain = domain; 703e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 704e2b4925eSOri Kam if (priv->config.dv_esw_en) { 705d1e64fbfSOri Kam domain = mlx5_glue->dr_create_domain 706d1e64fbfSOri Kam (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); 707d1e64fbfSOri Kam if (!domain) { 708d1e64fbfSOri Kam DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); 709e2b4925eSOri Kam err = errno; 710e2b4925eSOri Kam goto error; 711e2b4925eSOri Kam } 712d1e64fbfSOri Kam sh->fdb_domain = domain; 71334fa7c02SOri Kam sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); 714e2b4925eSOri Kam } 715e2b4925eSOri Kam #endif 716b41e47daSMoti Haimovsky sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); 717b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 718b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 719b2177648SViacheslav Ovsiienko return 0; 720b2177648SViacheslav Ovsiienko 721b2177648SViacheslav Ovsiienko error: 722b2177648SViacheslav Ovsiienko /* Rollback the created objects. */ 723d1e64fbfSOri Kam if (sh->rx_domain) { 724d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 725d1e64fbfSOri Kam sh->rx_domain = NULL; 726b2177648SViacheslav Ovsiienko } 727d1e64fbfSOri Kam if (sh->tx_domain) { 728d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 729d1e64fbfSOri Kam sh->tx_domain = NULL; 730b2177648SViacheslav Ovsiienko } 731d1e64fbfSOri Kam if (sh->fdb_domain) { 732d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 733d1e64fbfSOri Kam sh->fdb_domain = NULL; 734e2b4925eSOri Kam } 73534fa7c02SOri Kam if (sh->esw_drop_action) { 73634fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 73734fa7c02SOri Kam sh->esw_drop_action = NULL; 73834fa7c02SOri Kam } 739b41e47daSMoti Haimovsky if (sh->pop_vlan_action) { 740b41e47daSMoti Haimovsky mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 741b41e47daSMoti Haimovsky sh->pop_vlan_action = NULL; 742b41e47daSMoti Haimovsky } 743b2177648SViacheslav Ovsiienko return err; 744b2177648SViacheslav Ovsiienko #else 745b2177648SViacheslav Ovsiienko (void)priv; 746b2177648SViacheslav Ovsiienko return 0; 747b2177648SViacheslav Ovsiienko #endif 748b2177648SViacheslav Ovsiienko } 749b2177648SViacheslav Ovsiienko 750b2177648SViacheslav Ovsiienko /** 751b2177648SViacheslav Ovsiienko * Destroy DR related data within private structure. 752b2177648SViacheslav Ovsiienko * 753b2177648SViacheslav Ovsiienko * @param[in] priv 754b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 755b2177648SViacheslav Ovsiienko */ 756b2177648SViacheslav Ovsiienko static void 757b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(struct mlx5_priv *priv) 758b2177648SViacheslav Ovsiienko { 759b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 760b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 761b2177648SViacheslav Ovsiienko 762b2177648SViacheslav Ovsiienko if (!priv->dr_shared) 763b2177648SViacheslav Ovsiienko return; 764b2177648SViacheslav Ovsiienko priv->dr_shared = 0; 765b2177648SViacheslav Ovsiienko sh = priv->sh; 766b2177648SViacheslav Ovsiienko assert(sh); 767b2177648SViacheslav Ovsiienko assert(sh->dv_refcnt); 768b2177648SViacheslav Ovsiienko if (sh->dv_refcnt && --sh->dv_refcnt) 769b2177648SViacheslav Ovsiienko return; 770d1e64fbfSOri Kam if (sh->rx_domain) { 771d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->rx_domain); 772d1e64fbfSOri Kam sh->rx_domain = NULL; 773b2177648SViacheslav Ovsiienko } 774d1e64fbfSOri Kam if (sh->tx_domain) { 775d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->tx_domain); 776d1e64fbfSOri Kam sh->tx_domain = NULL; 777b2177648SViacheslav Ovsiienko } 778e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 779d1e64fbfSOri Kam if (sh->fdb_domain) { 780d1e64fbfSOri Kam mlx5_glue->dr_destroy_domain(sh->fdb_domain); 781d1e64fbfSOri Kam sh->fdb_domain = NULL; 782e2b4925eSOri Kam } 78334fa7c02SOri Kam if (sh->esw_drop_action) { 78434fa7c02SOri Kam mlx5_glue->destroy_flow_action(sh->esw_drop_action); 78534fa7c02SOri Kam sh->esw_drop_action = NULL; 78634fa7c02SOri Kam } 787e2b4925eSOri Kam #endif 788b41e47daSMoti Haimovsky if (sh->pop_vlan_action) { 789b41e47daSMoti Haimovsky mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 790b41e47daSMoti Haimovsky sh->pop_vlan_action = NULL; 791b41e47daSMoti Haimovsky } 79279e35d0dSViacheslav Ovsiienko pthread_mutex_destroy(&sh->dv_mutex); 793b2177648SViacheslav Ovsiienko #else 794b2177648SViacheslav Ovsiienko (void)priv; 795b2177648SViacheslav Ovsiienko #endif 796b2177648SViacheslav Ovsiienko } 797b2177648SViacheslav Ovsiienko 798b2177648SViacheslav Ovsiienko /** 7997be600c8SYongseok Koh * Initialize shared data between primary and secondary process. 8007be600c8SYongseok Koh * 8017be600c8SYongseok Koh * A memzone is reserved by primary process and secondary processes attach to 8027be600c8SYongseok Koh * the memzone. 8037be600c8SYongseok Koh * 8047be600c8SYongseok Koh * @return 8057be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 806974f1e7eSYongseok Koh */ 8077be600c8SYongseok Koh static int 8087be600c8SYongseok Koh mlx5_init_shared_data(void) 809974f1e7eSYongseok Koh { 810974f1e7eSYongseok Koh const struct rte_memzone *mz; 8117be600c8SYongseok Koh int ret = 0; 812974f1e7eSYongseok Koh 813974f1e7eSYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 814974f1e7eSYongseok Koh if (mlx5_shared_data == NULL) { 815974f1e7eSYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 816974f1e7eSYongseok Koh /* Allocate shared memory. */ 817974f1e7eSYongseok Koh mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 818974f1e7eSYongseok Koh sizeof(*mlx5_shared_data), 819974f1e7eSYongseok Koh SOCKET_ID_ANY, 0); 8207be600c8SYongseok Koh if (mz == NULL) { 8217be600c8SYongseok Koh DRV_LOG(ERR, 822*06fa6988SDekel Peled "Cannot allocate mlx5 shared data"); 8237be600c8SYongseok Koh ret = -rte_errno; 8247be600c8SYongseok Koh goto error; 8257be600c8SYongseok Koh } 8267be600c8SYongseok Koh mlx5_shared_data = mz->addr; 8277be600c8SYongseok Koh memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 8287be600c8SYongseok Koh rte_spinlock_init(&mlx5_shared_data->lock); 829974f1e7eSYongseok Koh } else { 830974f1e7eSYongseok Koh /* Lookup allocated shared memory. */ 831974f1e7eSYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 8327be600c8SYongseok Koh if (mz == NULL) { 8337be600c8SYongseok Koh DRV_LOG(ERR, 834*06fa6988SDekel Peled "Cannot attach mlx5 shared data"); 8357be600c8SYongseok Koh ret = -rte_errno; 8367be600c8SYongseok Koh goto error; 837974f1e7eSYongseok Koh } 838974f1e7eSYongseok Koh mlx5_shared_data = mz->addr; 8397be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 8403ebe6580SYongseok Koh } 841974f1e7eSYongseok Koh } 8427be600c8SYongseok Koh error: 8437be600c8SYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 8447be600c8SYongseok Koh return ret; 8457be600c8SYongseok Koh } 8467be600c8SYongseok Koh 8477be600c8SYongseok Koh /** 8484d803a72SOlga Shern * Retrieve integer value from environment variable. 8494d803a72SOlga Shern * 8504d803a72SOlga Shern * @param[in] name 8514d803a72SOlga Shern * Environment variable name. 8524d803a72SOlga Shern * 8534d803a72SOlga Shern * @return 8544d803a72SOlga Shern * Integer value, 0 if the variable is not set. 8554d803a72SOlga Shern */ 8564d803a72SOlga Shern int 8574d803a72SOlga Shern mlx5_getenv_int(const char *name) 8584d803a72SOlga Shern { 8594d803a72SOlga Shern const char *val = getenv(name); 8604d803a72SOlga Shern 8614d803a72SOlga Shern if (val == NULL) 8624d803a72SOlga Shern return 0; 8634d803a72SOlga Shern return atoi(val); 8644d803a72SOlga Shern } 8654d803a72SOlga Shern 8664d803a72SOlga Shern /** 8671e3a39f7SXueming Li * Verbs callback to allocate a memory. This function should allocate the space 8681e3a39f7SXueming Li * according to the size provided residing inside a huge page. 8691e3a39f7SXueming Li * Please note that all allocation must respect the alignment from libmlx5 8701e3a39f7SXueming Li * (i.e. currently sysconf(_SC_PAGESIZE)). 8711e3a39f7SXueming Li * 8721e3a39f7SXueming Li * @param[in] size 8731e3a39f7SXueming Li * The size in bytes of the memory to allocate. 8741e3a39f7SXueming Li * @param[in] data 8751e3a39f7SXueming Li * A pointer to the callback data. 8761e3a39f7SXueming Li * 8771e3a39f7SXueming Li * @return 878a6d83b6aSNélio Laranjeiro * Allocated buffer, NULL otherwise and rte_errno is set. 8791e3a39f7SXueming Li */ 8801e3a39f7SXueming Li static void * 8811e3a39f7SXueming Li mlx5_alloc_verbs_buf(size_t size, void *data) 8821e3a39f7SXueming Li { 883dbeba4cfSThomas Monjalon struct mlx5_priv *priv = data; 8841e3a39f7SXueming Li void *ret; 8851e3a39f7SXueming Li size_t alignment = sysconf(_SC_PAGESIZE); 886d10b09dbSOlivier Matz unsigned int socket = SOCKET_ID_ANY; 8871e3a39f7SXueming Li 888d10b09dbSOlivier Matz if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { 889d10b09dbSOlivier Matz const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 890d10b09dbSOlivier Matz 891d10b09dbSOlivier Matz socket = ctrl->socket; 892d10b09dbSOlivier Matz } else if (priv->verbs_alloc_ctx.type == 893d10b09dbSOlivier Matz MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { 894d10b09dbSOlivier Matz const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 895d10b09dbSOlivier Matz 896d10b09dbSOlivier Matz socket = ctrl->socket; 897d10b09dbSOlivier Matz } 8981e3a39f7SXueming Li assert(data != NULL); 899d10b09dbSOlivier Matz ret = rte_malloc_socket(__func__, size, alignment, socket); 900a6d83b6aSNélio Laranjeiro if (!ret && size) 901a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 9021e3a39f7SXueming Li return ret; 9031e3a39f7SXueming Li } 9041e3a39f7SXueming Li 9051e3a39f7SXueming Li /** 9061e3a39f7SXueming Li * Verbs callback to free a memory. 9071e3a39f7SXueming Li * 9081e3a39f7SXueming Li * @param[in] ptr 9091e3a39f7SXueming Li * A pointer to the memory to free. 9101e3a39f7SXueming Li * @param[in] data 9111e3a39f7SXueming Li * A pointer to the callback data. 9121e3a39f7SXueming Li */ 9131e3a39f7SXueming Li static void 9141e3a39f7SXueming Li mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 9151e3a39f7SXueming Li { 9161e3a39f7SXueming Li assert(data != NULL); 9171e3a39f7SXueming Li rte_free(ptr); 9181e3a39f7SXueming Li } 9191e3a39f7SXueming Li 9201e3a39f7SXueming Li /** 921c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 922c9ba7523SRaslan Darawsheh * 923c9ba7523SRaslan Darawsheh * @param[in] dev 924c9ba7523SRaslan Darawsheh * A pointer to eth_dev 925c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 926c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 927c9ba7523SRaslan Darawsheh * 928c9ba7523SRaslan Darawsheh * @return 929c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 930c9ba7523SRaslan Darawsheh */ 931c9ba7523SRaslan Darawsheh int 932c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 933c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 934c9ba7523SRaslan Darawsheh { 935c9ba7523SRaslan Darawsheh assert(udp_tunnel != NULL); 936c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && 937c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 938c9ba7523SRaslan Darawsheh return 0; 939c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && 940c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 941c9ba7523SRaslan Darawsheh return 0; 942c9ba7523SRaslan Darawsheh return -ENOTSUP; 943c9ba7523SRaslan Darawsheh } 944c9ba7523SRaslan Darawsheh 945c9ba7523SRaslan Darawsheh /** 946120dc4a7SYongseok Koh * Initialize process private data structure. 947120dc4a7SYongseok Koh * 948120dc4a7SYongseok Koh * @param dev 949120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 950120dc4a7SYongseok Koh * 951120dc4a7SYongseok Koh * @return 952120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 953120dc4a7SYongseok Koh */ 954120dc4a7SYongseok Koh int 955120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 956120dc4a7SYongseok Koh { 957120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 958120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 959120dc4a7SYongseok Koh size_t ppriv_size; 960120dc4a7SYongseok Koh 961120dc4a7SYongseok Koh /* 962120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 963120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 964120dc4a7SYongseok Koh */ 965120dc4a7SYongseok Koh ppriv_size = 966120dc4a7SYongseok Koh sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 967120dc4a7SYongseok Koh ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, 968120dc4a7SYongseok Koh RTE_CACHE_LINE_SIZE, dev->device->numa_node); 969120dc4a7SYongseok Koh if (!ppriv) { 970120dc4a7SYongseok Koh rte_errno = ENOMEM; 971120dc4a7SYongseok Koh return -rte_errno; 972120dc4a7SYongseok Koh } 973120dc4a7SYongseok Koh ppriv->uar_table_sz = ppriv_size; 974120dc4a7SYongseok Koh dev->process_private = ppriv; 975120dc4a7SYongseok Koh return 0; 976120dc4a7SYongseok Koh } 977120dc4a7SYongseok Koh 978120dc4a7SYongseok Koh /** 979120dc4a7SYongseok Koh * Un-initialize process private data structure. 980120dc4a7SYongseok Koh * 981120dc4a7SYongseok Koh * @param dev 982120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 983120dc4a7SYongseok Koh */ 984120dc4a7SYongseok Koh static void 985120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 986120dc4a7SYongseok Koh { 987120dc4a7SYongseok Koh if (!dev->process_private) 988120dc4a7SYongseok Koh return; 989120dc4a7SYongseok Koh rte_free(dev->process_private); 990120dc4a7SYongseok Koh dev->process_private = NULL; 991120dc4a7SYongseok Koh } 992120dc4a7SYongseok Koh 993120dc4a7SYongseok Koh /** 994771fa900SAdrien Mazarguil * DPDK callback to close the device. 995771fa900SAdrien Mazarguil * 996771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 997771fa900SAdrien Mazarguil * 998771fa900SAdrien Mazarguil * @param dev 999771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 1000771fa900SAdrien Mazarguil */ 1001771fa900SAdrien Mazarguil static void 1002771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 1003771fa900SAdrien Mazarguil { 1004dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 10052e22920bSAdrien Mazarguil unsigned int i; 10066af6b973SNélio Laranjeiro int ret; 1007771fa900SAdrien Mazarguil 1008a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 10090f99970bSNélio Laranjeiro dev->data->port_id, 1010f048f3d4SViacheslav Ovsiienko ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : "")); 1011ecc1c29dSAdrien Mazarguil /* In case mlx5_dev_stop() has not been called. */ 1012af4f09f2SNélio Laranjeiro mlx5_dev_interrupt_handler_uninstall(dev); 101323242063SMatan Azrad mlx5_dev_interrupt_handler_devx_uninstall(dev); 1014af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 1015af689f1fSNelio Laranjeiro mlx5_flow_flush(dev, NULL); 10162e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 10172e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 10182e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 10192aac5b5dSYongseok Koh rte_wmb(); 10202aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 10212aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 10222e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 10232e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 10242e22920bSAdrien Mazarguil usleep(1000); 1025a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 1026af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 10272e22920bSAdrien Mazarguil priv->rxqs_n = 0; 10282e22920bSAdrien Mazarguil priv->rxqs = NULL; 10292e22920bSAdrien Mazarguil } 10302e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 10312e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 10322e22920bSAdrien Mazarguil usleep(1000); 10336e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 1034af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 10352e22920bSAdrien Mazarguil priv->txqs_n = 0; 10362e22920bSAdrien Mazarguil priv->txqs = NULL; 10372e22920bSAdrien Mazarguil } 1038120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 10397d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 1040b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 104129c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 104229c1d8bbSNélio Laranjeiro rte_free(priv->rss_conf.rss_key); 1043634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 1044634efbc2SNelio Laranjeiro rte_free(priv->reta_idx); 1045ccdcba53SNélio Laranjeiro if (priv->config.vf) 1046ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_flush(dev); 104726c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 104826c08b97SAdrien Mazarguil close(priv->nl_socket_route); 104926c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 105026c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 1051dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 1052dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 1053942d13e6SViacheslav Ovsiienko if (priv->sh) { 1054942d13e6SViacheslav Ovsiienko /* 1055942d13e6SViacheslav Ovsiienko * Free the shared context in last turn, because the cleanup 1056942d13e6SViacheslav Ovsiienko * routines above may use some shared fields, like 1057942d13e6SViacheslav Ovsiienko * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing 1058942d13e6SViacheslav Ovsiienko * ifindex if Netlink fails. 1059942d13e6SViacheslav Ovsiienko */ 1060942d13e6SViacheslav Ovsiienko mlx5_free_shared_ibctx(priv->sh); 1061942d13e6SViacheslav Ovsiienko priv->sh = NULL; 1062942d13e6SViacheslav Ovsiienko } 106323820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 1064f5479b68SNélio Laranjeiro if (ret) 1065a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 10660f99970bSNélio Laranjeiro dev->data->port_id); 106715c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 10684c7a0f5fSNélio Laranjeiro if (ret) 1069a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 10700f99970bSNélio Laranjeiro dev->data->port_id); 107193403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 107209cb5b58SNélio Laranjeiro if (ret) 107393403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 10740f99970bSNélio Laranjeiro dev->data->port_id); 1075af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 1076a1366b1aSNélio Laranjeiro if (ret) 1077a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 10780f99970bSNélio Laranjeiro dev->data->port_id); 1079894c4a8eSOri Kam ret = mlx5_txq_obj_verify(dev); 1080faf2667fSNélio Laranjeiro if (ret) 1081a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 10820f99970bSNélio Laranjeiro dev->data->port_id); 1083af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 10846e78005aSNélio Laranjeiro if (ret) 1085a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 10860f99970bSNélio Laranjeiro dev->data->port_id); 1087af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 10886af6b973SNélio Laranjeiro if (ret) 1089a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 1090a170a30dSNélio Laranjeiro dev->data->port_id); 10912b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 10922b730263SAdrien Mazarguil unsigned int c = 0; 1093d874a4eeSThomas Monjalon uint16_t port_id; 10942b730263SAdrien Mazarguil 1095fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 1096dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 1097d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 10982b730263SAdrien Mazarguil 10992b730263SAdrien Mazarguil if (!opriv || 11002b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 1101d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 11022b730263SAdrien Mazarguil continue; 11032b730263SAdrien Mazarguil ++c; 1104f7e95215SViacheslav Ovsiienko break; 11052b730263SAdrien Mazarguil } 11062b730263SAdrien Mazarguil if (!c) 11072b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 11082b730263SAdrien Mazarguil } 1109771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 11102b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 111142603bbdSOphir Munk /* 111242603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 111342603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 111442603bbdSOphir Munk * it is freed when dev_private is freed. 111542603bbdSOphir Munk */ 111642603bbdSOphir Munk dev->data->mac_addrs = NULL; 1117771fa900SAdrien Mazarguil } 1118771fa900SAdrien Mazarguil 11190887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops = { 1120e60fbd5bSAdrien Mazarguil .dev_configure = mlx5_dev_configure, 1121e60fbd5bSAdrien Mazarguil .dev_start = mlx5_dev_start, 1122e60fbd5bSAdrien Mazarguil .dev_stop = mlx5_dev_stop, 112362072098SOr Ami .dev_set_link_down = mlx5_set_link_down, 112462072098SOr Ami .dev_set_link_up = mlx5_set_link_up, 1125771fa900SAdrien Mazarguil .dev_close = mlx5_dev_close, 11261bdbe1afSAdrien Mazarguil .promiscuous_enable = mlx5_promiscuous_enable, 11271bdbe1afSAdrien Mazarguil .promiscuous_disable = mlx5_promiscuous_disable, 11281bdbe1afSAdrien Mazarguil .allmulticast_enable = mlx5_allmulticast_enable, 11291bdbe1afSAdrien Mazarguil .allmulticast_disable = mlx5_allmulticast_disable, 1130cb8faed7SAdrien Mazarguil .link_update = mlx5_link_update, 113187011737SAdrien Mazarguil .stats_get = mlx5_stats_get, 113287011737SAdrien Mazarguil .stats_reset = mlx5_stats_reset, 1133a4193ae3SShahaf Shuler .xstats_get = mlx5_xstats_get, 1134a4193ae3SShahaf Shuler .xstats_reset = mlx5_xstats_reset, 1135a4193ae3SShahaf Shuler .xstats_get_names = mlx5_xstats_get_names, 1136714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 1137e60fbd5bSAdrien Mazarguil .dev_infos_get = mlx5_dev_infos_get, 1138e571ad55STom Barbette .read_clock = mlx5_read_clock, 113978a38edfSJianfeng Tan .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 1140e9086978SAdrien Mazarguil .vlan_filter_set = mlx5_vlan_filter_set, 11412e22920bSAdrien Mazarguil .rx_queue_setup = mlx5_rx_queue_setup, 1142e79c9be9SOri Kam .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 11432e22920bSAdrien Mazarguil .tx_queue_setup = mlx5_tx_queue_setup, 1144ae18a1aeSOri Kam .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 11452e22920bSAdrien Mazarguil .rx_queue_release = mlx5_rx_queue_release, 11462e22920bSAdrien Mazarguil .tx_queue_release = mlx5_tx_queue_release, 114702d75430SAdrien Mazarguil .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 114802d75430SAdrien Mazarguil .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 11493318aef7SAdrien Mazarguil .mac_addr_remove = mlx5_mac_addr_remove, 11503318aef7SAdrien Mazarguil .mac_addr_add = mlx5_mac_addr_add, 115186977fccSDavid Marchand .mac_addr_set = mlx5_mac_addr_set, 1152e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 1153cf37ca95SAdrien Mazarguil .mtu_set = mlx5_dev_set_mtu, 1154f3db9489SYaacov Hazan .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 1155f3db9489SYaacov Hazan .vlan_offload_set = mlx5_vlan_offload_set, 1156634efbc2SNelio Laranjeiro .reta_update = mlx5_dev_rss_reta_update, 1157634efbc2SNelio Laranjeiro .reta_query = mlx5_dev_rss_reta_query, 11582f97422eSNelio Laranjeiro .rss_hash_update = mlx5_rss_hash_update, 11592f97422eSNelio Laranjeiro .rss_hash_conf_get = mlx5_rss_hash_conf_get, 116076f5c99eSYaacov Hazan .filter_ctrl = mlx5_dev_filter_ctrl, 11618788fec1SOlivier Matz .rx_descriptor_status = mlx5_rx_descriptor_status, 11628788fec1SOlivier Matz .tx_descriptor_status = mlx5_tx_descriptor_status, 116326f04883STom Barbette .rx_queue_count = mlx5_rx_queue_count, 11643c7d44afSShahaf Shuler .rx_queue_intr_enable = mlx5_rx_intr_enable, 11653c7d44afSShahaf Shuler .rx_queue_intr_disable = mlx5_rx_intr_disable, 1166d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 1167c9ba7523SRaslan Darawsheh .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, 11688a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 11698a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 1170b6b3bf86SOri Kam .hairpin_cap_get = mlx5_hairpin_cap_get, 1171771fa900SAdrien Mazarguil }; 1172771fa900SAdrien Mazarguil 1173714bf46eSThomas Monjalon /* Available operations from secondary process. */ 117487ec44ceSXueming Li static const struct eth_dev_ops mlx5_dev_sec_ops = { 117587ec44ceSXueming Li .stats_get = mlx5_stats_get, 117687ec44ceSXueming Li .stats_reset = mlx5_stats_reset, 117787ec44ceSXueming Li .xstats_get = mlx5_xstats_get, 117887ec44ceSXueming Li .xstats_reset = mlx5_xstats_reset, 117987ec44ceSXueming Li .xstats_get_names = mlx5_xstats_get_names, 1180714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 118187ec44ceSXueming Li .dev_infos_get = mlx5_dev_infos_get, 118287ec44ceSXueming Li .rx_descriptor_status = mlx5_rx_descriptor_status, 118387ec44ceSXueming Li .tx_descriptor_status = mlx5_tx_descriptor_status, 11848a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 11858a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 118687ec44ceSXueming Li }; 118787ec44ceSXueming Li 1188714bf46eSThomas Monjalon /* Available operations in flow isolated mode. */ 11890887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops_isolate = { 11900887aa7fSNélio Laranjeiro .dev_configure = mlx5_dev_configure, 11910887aa7fSNélio Laranjeiro .dev_start = mlx5_dev_start, 11920887aa7fSNélio Laranjeiro .dev_stop = mlx5_dev_stop, 11930887aa7fSNélio Laranjeiro .dev_set_link_down = mlx5_set_link_down, 11940887aa7fSNélio Laranjeiro .dev_set_link_up = mlx5_set_link_up, 11950887aa7fSNélio Laranjeiro .dev_close = mlx5_dev_close, 119624b068adSYongseok Koh .promiscuous_enable = mlx5_promiscuous_enable, 119724b068adSYongseok Koh .promiscuous_disable = mlx5_promiscuous_disable, 11982547ee74SYongseok Koh .allmulticast_enable = mlx5_allmulticast_enable, 11992547ee74SYongseok Koh .allmulticast_disable = mlx5_allmulticast_disable, 12000887aa7fSNélio Laranjeiro .link_update = mlx5_link_update, 12010887aa7fSNélio Laranjeiro .stats_get = mlx5_stats_get, 12020887aa7fSNélio Laranjeiro .stats_reset = mlx5_stats_reset, 12030887aa7fSNélio Laranjeiro .xstats_get = mlx5_xstats_get, 12040887aa7fSNélio Laranjeiro .xstats_reset = mlx5_xstats_reset, 12050887aa7fSNélio Laranjeiro .xstats_get_names = mlx5_xstats_get_names, 1206714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 12070887aa7fSNélio Laranjeiro .dev_infos_get = mlx5_dev_infos_get, 12080887aa7fSNélio Laranjeiro .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 12090887aa7fSNélio Laranjeiro .vlan_filter_set = mlx5_vlan_filter_set, 12100887aa7fSNélio Laranjeiro .rx_queue_setup = mlx5_rx_queue_setup, 1211e79c9be9SOri Kam .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 12120887aa7fSNélio Laranjeiro .tx_queue_setup = mlx5_tx_queue_setup, 1213ae18a1aeSOri Kam .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 12140887aa7fSNélio Laranjeiro .rx_queue_release = mlx5_rx_queue_release, 12150887aa7fSNélio Laranjeiro .tx_queue_release = mlx5_tx_queue_release, 12160887aa7fSNélio Laranjeiro .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 12170887aa7fSNélio Laranjeiro .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 12180887aa7fSNélio Laranjeiro .mac_addr_remove = mlx5_mac_addr_remove, 12190887aa7fSNélio Laranjeiro .mac_addr_add = mlx5_mac_addr_add, 12200887aa7fSNélio Laranjeiro .mac_addr_set = mlx5_mac_addr_set, 1221e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 12220887aa7fSNélio Laranjeiro .mtu_set = mlx5_dev_set_mtu, 12230887aa7fSNélio Laranjeiro .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 12240887aa7fSNélio Laranjeiro .vlan_offload_set = mlx5_vlan_offload_set, 12250887aa7fSNélio Laranjeiro .filter_ctrl = mlx5_dev_filter_ctrl, 12260887aa7fSNélio Laranjeiro .rx_descriptor_status = mlx5_rx_descriptor_status, 12270887aa7fSNélio Laranjeiro .tx_descriptor_status = mlx5_tx_descriptor_status, 12280887aa7fSNélio Laranjeiro .rx_queue_intr_enable = mlx5_rx_intr_enable, 12290887aa7fSNélio Laranjeiro .rx_queue_intr_disable = mlx5_rx_intr_disable, 1230d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 12318a6a09f8SDekel Peled .get_module_info = mlx5_get_module_info, 12328a6a09f8SDekel Peled .get_module_eeprom = mlx5_get_module_eeprom, 1233b6b3bf86SOri Kam .hairpin_cap_get = mlx5_hairpin_cap_get, 12340887aa7fSNélio Laranjeiro }; 12350887aa7fSNélio Laranjeiro 1236e72dd09bSNélio Laranjeiro /** 1237e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 1238e72dd09bSNélio Laranjeiro * 1239e72dd09bSNélio Laranjeiro * @param[in] key 1240e72dd09bSNélio Laranjeiro * Key argument to verify. 1241e72dd09bSNélio Laranjeiro * @param[in] val 1242e72dd09bSNélio Laranjeiro * Value associated with key. 1243e72dd09bSNélio Laranjeiro * @param opaque 1244e72dd09bSNélio Laranjeiro * User data. 1245e72dd09bSNélio Laranjeiro * 1246e72dd09bSNélio Laranjeiro * @return 1247a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1248e72dd09bSNélio Laranjeiro */ 1249e72dd09bSNélio Laranjeiro static int 1250e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 1251e72dd09bSNélio Laranjeiro { 12527fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 125399c12dccSNélio Laranjeiro unsigned long tmp; 1254e72dd09bSNélio Laranjeiro 12556de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 12566de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 12576de569f5SAdrien Mazarguil return 0; 125899c12dccSNélio Laranjeiro errno = 0; 125999c12dccSNélio Laranjeiro tmp = strtoul(val, NULL, 0); 126099c12dccSNélio Laranjeiro if (errno) { 1261a6d83b6aSNélio Laranjeiro rte_errno = errno; 1262a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1263a6d83b6aSNélio Laranjeiro return -rte_errno; 126499c12dccSNélio Laranjeiro } 126599c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 12667fe24446SShahaf Shuler config->cqe_comp = !!tmp; 1267bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 1268bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 126978c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 127078c7a16dSYongseok Koh config->hw_padding = !!tmp; 12717d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 12727d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 12737d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 12747d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 12757d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 12767d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 12777d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 12787d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 12792a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1280505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1281505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1282505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1283505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1284505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1285505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1286505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1287505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1288505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 12892a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 12907fe24446SShahaf Shuler config->txqs_inline = tmp; 129109d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1292a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1293230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1294f9de8718SShahaf Shuler config->mps = !!tmp; 12956ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1296a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 12976ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1298505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1299505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1300505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 13015644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1302a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 13035644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 13047fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 130578a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 130678a54648SXueming Li config->l3_vxlan_en = !!tmp; 1307db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1308db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1309e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1310e2b4925eSOri Kam config->dv_esw_en = !!tmp; 131151e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 131251e72d38SOri Kam config->dv_flow_en = !!tmp; 1313dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1314dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 1315066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1316066cfecdSMatan Azrad config->max_dump_files_num = tmp; 131721bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 131821bb6c7eSDekel Peled config->lro.timeout = tmp; 131999c12dccSNélio Laranjeiro } else { 1320a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 1321a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1322a6d83b6aSNélio Laranjeiro return -rte_errno; 1323e72dd09bSNélio Laranjeiro } 132499c12dccSNélio Laranjeiro return 0; 132599c12dccSNélio Laranjeiro } 1326e72dd09bSNélio Laranjeiro 1327e72dd09bSNélio Laranjeiro /** 1328e72dd09bSNélio Laranjeiro * Parse device parameters. 1329e72dd09bSNélio Laranjeiro * 13307fe24446SShahaf Shuler * @param config 13317fe24446SShahaf Shuler * Pointer to device configuration structure. 1332e72dd09bSNélio Laranjeiro * @param devargs 1333e72dd09bSNélio Laranjeiro * Device arguments structure. 1334e72dd09bSNélio Laranjeiro * 1335e72dd09bSNélio Laranjeiro * @return 1336a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1337e72dd09bSNélio Laranjeiro */ 1338e72dd09bSNélio Laranjeiro static int 13397fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1340e72dd09bSNélio Laranjeiro { 1341e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 134299c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 1343bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 134478c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 13457d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 13467d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 13477d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 13487d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 13492a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 1350505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MIN, 1351505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MAX, 1352505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MPW, 13532a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 135409d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 1355230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 13566ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 13576ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 13585644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 13595644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 136078a54648SXueming Li MLX5_L3_VXLAN_EN, 1361db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 1362e2b4925eSOri Kam MLX5_DV_ESW_EN, 136351e72d38SOri Kam MLX5_DV_FLOW_EN, 1364dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 13656de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 1366066cfecdSMatan Azrad MLX5_MAX_DUMP_FILES_NUM, 136721bb6c7eSDekel Peled MLX5_LRO_TIMEOUT_USEC, 1368e72dd09bSNélio Laranjeiro NULL, 1369e72dd09bSNélio Laranjeiro }; 1370e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 1371e72dd09bSNélio Laranjeiro int ret = 0; 1372e72dd09bSNélio Laranjeiro int i; 1373e72dd09bSNélio Laranjeiro 1374e72dd09bSNélio Laranjeiro if (devargs == NULL) 1375e72dd09bSNélio Laranjeiro return 0; 1376e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 1377e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 137815b0ea00SMatan Azrad if (kvlist == NULL) { 137915b0ea00SMatan Azrad rte_errno = EINVAL; 138015b0ea00SMatan Azrad return -rte_errno; 138115b0ea00SMatan Azrad } 1382e72dd09bSNélio Laranjeiro /* Process parameters. */ 1383e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 1384e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 1385e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 13867fe24446SShahaf Shuler mlx5_args_check, config); 1387a6d83b6aSNélio Laranjeiro if (ret) { 1388a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1389a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 1390a6d83b6aSNélio Laranjeiro return -rte_errno; 1391e72dd09bSNélio Laranjeiro } 1392e72dd09bSNélio Laranjeiro } 1393a67323e4SShahaf Shuler } 1394e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 1395e72dd09bSNélio Laranjeiro return 0; 1396e72dd09bSNélio Laranjeiro } 1397e72dd09bSNélio Laranjeiro 1398fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver; 1399771fa900SAdrien Mazarguil 14007be600c8SYongseok Koh /** 14017be600c8SYongseok Koh * PMD global initialization. 14027be600c8SYongseok Koh * 14037be600c8SYongseok Koh * Independent from individual device, this function initializes global 14047be600c8SYongseok Koh * per-PMD data structures distinguishing primary and secondary processes. 14057be600c8SYongseok Koh * Hence, each initialization is called once per a process. 14067be600c8SYongseok Koh * 14077be600c8SYongseok Koh * @return 14087be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 14097be600c8SYongseok Koh */ 14107be600c8SYongseok Koh static int 14117be600c8SYongseok Koh mlx5_init_once(void) 14127be600c8SYongseok Koh { 14137be600c8SYongseok Koh struct mlx5_shared_data *sd; 14147be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 1415edf73dd3SAnatoly Burakov int ret = 0; 14167be600c8SYongseok Koh 14177be600c8SYongseok Koh if (mlx5_init_shared_data()) 14187be600c8SYongseok Koh return -rte_errno; 14197be600c8SYongseok Koh sd = mlx5_shared_data; 14207be600c8SYongseok Koh assert(sd); 14217be600c8SYongseok Koh rte_spinlock_lock(&sd->lock); 14227be600c8SYongseok Koh switch (rte_eal_process_type()) { 14237be600c8SYongseok Koh case RTE_PROC_PRIMARY: 14247be600c8SYongseok Koh if (sd->init_done) 14257be600c8SYongseok Koh break; 14267be600c8SYongseok Koh LIST_INIT(&sd->mem_event_cb_list); 14277be600c8SYongseok Koh rte_rwlock_init(&sd->mem_event_rwlock); 14287be600c8SYongseok Koh rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 14297be600c8SYongseok Koh mlx5_mr_mem_event_cb, NULL); 1430edf73dd3SAnatoly Burakov ret = mlx5_mp_init_primary(); 1431edf73dd3SAnatoly Burakov if (ret) 1432edf73dd3SAnatoly Burakov goto out; 14337be600c8SYongseok Koh sd->init_done = true; 14347be600c8SYongseok Koh break; 14357be600c8SYongseok Koh case RTE_PROC_SECONDARY: 14367be600c8SYongseok Koh if (ld->init_done) 14377be600c8SYongseok Koh break; 1438edf73dd3SAnatoly Burakov ret = mlx5_mp_init_secondary(); 1439edf73dd3SAnatoly Burakov if (ret) 1440edf73dd3SAnatoly Burakov goto out; 14417be600c8SYongseok Koh ++sd->secondary_cnt; 14427be600c8SYongseok Koh ld->init_done = true; 14437be600c8SYongseok Koh break; 14447be600c8SYongseok Koh default: 14457be600c8SYongseok Koh break; 14467be600c8SYongseok Koh } 1447edf73dd3SAnatoly Burakov out: 14487be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 1449edf73dd3SAnatoly Burakov return ret; 14507be600c8SYongseok Koh } 14517be600c8SYongseok Koh 14527be600c8SYongseok Koh /** 145338b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 145438b4b397SViacheslav Ovsiienko * while sending packets. 145538b4b397SViacheslav Ovsiienko * 145638b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 145738b4b397SViacheslav Ovsiienko * key is specified in devargs 145838b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 145938b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 146038b4b397SViacheslav Ovsiienko * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX 146138b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 146238b4b397SViacheslav Ovsiienko * 146338b4b397SViacheslav Ovsiienko * @param spawn 146438b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 146538b4b397SViacheslav Ovsiienko * @param config 146638b4b397SViacheslav Ovsiienko * Device configuration parameters. 146738b4b397SViacheslav Ovsiienko */ 146838b4b397SViacheslav Ovsiienko static void 146938b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 147038b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 147138b4b397SViacheslav Ovsiienko { 147238b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 147338b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 147438b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 147538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 147638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 147738b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 147838b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 147938b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 148038b4b397SViacheslav Ovsiienko "txq_inline_mix aligned to minimal" 148138b4b397SViacheslav Ovsiienko " ConnectX-4 required value %d", 148238b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 148338b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 148438b4b397SViacheslav Ovsiienko } 148538b4b397SViacheslav Ovsiienko break; 148638b4b397SViacheslav Ovsiienko } 148738b4b397SViacheslav Ovsiienko goto exit; 148838b4b397SViacheslav Ovsiienko } 148938b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 149038b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 149138b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 149238b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 149338b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 149438b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 149538b4b397SViacheslav Ovsiienko goto exit; 149638b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 149738b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 149838b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 149938b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 150038b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 150138b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 150238b4b397SViacheslav Ovsiienko goto exit; 150338b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 150438b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 150538b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 150638b4b397SViacheslav Ovsiienko break; 150738b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 150838b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 150938b4b397SViacheslav Ovsiienko config->txq_inline_min = 151038b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 151138b4b397SViacheslav Ovsiienko goto exit; 151238b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 151338b4b397SViacheslav Ovsiienko config->txq_inline_min = 151438b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 151538b4b397SViacheslav Ovsiienko goto exit; 151638b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 151738b4b397SViacheslav Ovsiienko config->txq_inline_min = 151838b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 151938b4b397SViacheslav Ovsiienko goto exit; 152038b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 152138b4b397SViacheslav Ovsiienko config->txq_inline_min = 152238b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 152338b4b397SViacheslav Ovsiienko goto exit; 152438b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 152538b4b397SViacheslav Ovsiienko config->txq_inline_min = 152638b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 152738b4b397SViacheslav Ovsiienko goto exit; 152838b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 152938b4b397SViacheslav Ovsiienko config->txq_inline_min = 153038b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 153138b4b397SViacheslav Ovsiienko goto exit; 153238b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 153338b4b397SViacheslav Ovsiienko config->txq_inline_min = 153438b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 153538b4b397SViacheslav Ovsiienko goto exit; 153638b4b397SViacheslav Ovsiienko } 153738b4b397SViacheslav Ovsiienko } 153838b4b397SViacheslav Ovsiienko } 153938b4b397SViacheslav Ovsiienko /* 154038b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 154138b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 154238b4b397SViacheslav Ovsiienko * to determine old NICs. 154338b4b397SViacheslav Ovsiienko */ 154438b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 154538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 154638b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 154738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 154838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1549614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 155038b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 155138b4b397SViacheslav Ovsiienko break; 155238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 155338b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 155438b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 155538b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 155638b4b397SViacheslav Ovsiienko /* 155738b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 155838b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 155938b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 156038b4b397SViacheslav Ovsiienko */ 156138b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 156220215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 156338b4b397SViacheslav Ovsiienko break; 156438b4b397SViacheslav Ovsiienko default: 156538b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 156638b4b397SViacheslav Ovsiienko break; 156738b4b397SViacheslav Ovsiienko } 156838b4b397SViacheslav Ovsiienko exit: 156938b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 157038b4b397SViacheslav Ovsiienko } 157138b4b397SViacheslav Ovsiienko 157238b4b397SViacheslav Ovsiienko /** 157321cae858SDekel Peled * Allocate page of door-bells and register it using DevX API. 157421cae858SDekel Peled * 157521cae858SDekel Peled * @param [in] dev 157621cae858SDekel Peled * Pointer to Ethernet device. 157721cae858SDekel Peled * 157821cae858SDekel Peled * @return 157921cae858SDekel Peled * Pointer to new page on success, NULL otherwise. 158021cae858SDekel Peled */ 158121cae858SDekel Peled static struct mlx5_devx_dbr_page * 158221cae858SDekel Peled mlx5_alloc_dbr_page(struct rte_eth_dev *dev) 158321cae858SDekel Peled { 158421cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 158521cae858SDekel Peled struct mlx5_devx_dbr_page *page; 158621cae858SDekel Peled 158721cae858SDekel Peled /* Allocate space for door-bell page and management data. */ 158821cae858SDekel Peled page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page), 158921cae858SDekel Peled RTE_CACHE_LINE_SIZE, dev->device->numa_node); 159021cae858SDekel Peled if (!page) { 159121cae858SDekel Peled DRV_LOG(ERR, "port %u cannot allocate dbr page", 159221cae858SDekel Peled dev->data->port_id); 159321cae858SDekel Peled return NULL; 159421cae858SDekel Peled } 159521cae858SDekel Peled /* Register allocated memory. */ 159621cae858SDekel Peled page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs, 159721cae858SDekel Peled MLX5_DBR_PAGE_SIZE, 0); 159821cae858SDekel Peled if (!page->umem) { 159921cae858SDekel Peled DRV_LOG(ERR, "port %u cannot umem reg dbr page", 160021cae858SDekel Peled dev->data->port_id); 160121cae858SDekel Peled rte_free(page); 160221cae858SDekel Peled return NULL; 160321cae858SDekel Peled } 160421cae858SDekel Peled return page; 160521cae858SDekel Peled } 160621cae858SDekel Peled 160721cae858SDekel Peled /** 160821cae858SDekel Peled * Find the next available door-bell, allocate new page if needed. 160921cae858SDekel Peled * 161021cae858SDekel Peled * @param [in] dev 161121cae858SDekel Peled * Pointer to Ethernet device. 161221cae858SDekel Peled * @param [out] dbr_page 161321cae858SDekel Peled * Door-bell page containing the page data. 161421cae858SDekel Peled * 161521cae858SDekel Peled * @return 161621cae858SDekel Peled * Door-bell address offset on success, a negative error value otherwise. 161721cae858SDekel Peled */ 161821cae858SDekel Peled int64_t 161921cae858SDekel Peled mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page) 162021cae858SDekel Peled { 162121cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 162221cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 162321cae858SDekel Peled uint32_t i, j; 162421cae858SDekel Peled 162521cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 162621cae858SDekel Peled if (page->dbr_count < MLX5_DBR_PER_PAGE) 162721cae858SDekel Peled break; 162821cae858SDekel Peled if (!page) { /* No page with free door-bell exists. */ 162921cae858SDekel Peled page = mlx5_alloc_dbr_page(dev); 163021cae858SDekel Peled if (!page) /* Failed to allocate new page. */ 163121cae858SDekel Peled return (-1); 163221cae858SDekel Peled LIST_INSERT_HEAD(&priv->dbrpgs, page, next); 163321cae858SDekel Peled } 163421cae858SDekel Peled /* Loop to find bitmap part with clear bit. */ 163521cae858SDekel Peled for (i = 0; 163621cae858SDekel Peled i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX; 163721cae858SDekel Peled i++) 163821cae858SDekel Peled ; /* Empty. */ 163921cae858SDekel Peled /* Find the first clear bit. */ 164021cae858SDekel Peled j = rte_bsf64(~page->dbr_bitmap[i]); 164121cae858SDekel Peled assert(i < (MLX5_DBR_PER_PAGE / 64)); 164221cae858SDekel Peled page->dbr_bitmap[i] |= (1 << j); 164321cae858SDekel Peled page->dbr_count++; 164421cae858SDekel Peled *dbr_page = page; 164521cae858SDekel Peled return (((i * 64) + j) * sizeof(uint64_t)); 164621cae858SDekel Peled } 164721cae858SDekel Peled 164821cae858SDekel Peled /** 164921cae858SDekel Peled * Release a door-bell record. 165021cae858SDekel Peled * 165121cae858SDekel Peled * @param [in] dev 165221cae858SDekel Peled * Pointer to Ethernet device. 165321cae858SDekel Peled * @param [in] umem_id 165421cae858SDekel Peled * UMEM ID of page containing the door-bell record to release. 165521cae858SDekel Peled * @param [in] offset 165621cae858SDekel Peled * Offset of door-bell record in page. 165721cae858SDekel Peled * 165821cae858SDekel Peled * @return 165921cae858SDekel Peled * 0 on success, a negative error value otherwise. 166021cae858SDekel Peled */ 166121cae858SDekel Peled int32_t 166221cae858SDekel Peled mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset) 166321cae858SDekel Peled { 166421cae858SDekel Peled struct mlx5_priv *priv = dev->data->dev_private; 166521cae858SDekel Peled struct mlx5_devx_dbr_page *page = NULL; 166621cae858SDekel Peled int ret = 0; 166721cae858SDekel Peled 166821cae858SDekel Peled LIST_FOREACH(page, &priv->dbrpgs, next) 166921cae858SDekel Peled /* Find the page this address belongs to. */ 167021cae858SDekel Peled if (page->umem->umem_id == umem_id) 167121cae858SDekel Peled break; 167221cae858SDekel Peled if (!page) 167321cae858SDekel Peled return -EINVAL; 167421cae858SDekel Peled page->dbr_count--; 167521cae858SDekel Peled if (!page->dbr_count) { 167621cae858SDekel Peled /* Page not used, free it and remove from list. */ 167721cae858SDekel Peled LIST_REMOVE(page, next); 167821cae858SDekel Peled if (page->umem) 167921cae858SDekel Peled ret = -mlx5_glue->devx_umem_dereg(page->umem); 168021cae858SDekel Peled rte_free(page); 168121cae858SDekel Peled } else { 168221cae858SDekel Peled /* Mark in bitmap that this door-bell is not in use. */ 1683a88209b0SDekel Peled offset /= MLX5_DBR_SIZE; 168421cae858SDekel Peled int i = offset / 64; 168521cae858SDekel Peled int j = offset % 64; 168621cae858SDekel Peled 168721cae858SDekel Peled page->dbr_bitmap[i] &= ~(1 << j); 168821cae858SDekel Peled } 168921cae858SDekel Peled return ret; 169021cae858SDekel Peled } 169121cae858SDekel Peled 169221cae858SDekel Peled /** 169392d5dd48SViacheslav Ovsiienko * Check sibling device configurations. 169492d5dd48SViacheslav Ovsiienko * 169592d5dd48SViacheslav Ovsiienko * Sibling devices sharing the Infiniband device context 169692d5dd48SViacheslav Ovsiienko * should have compatible configurations. This regards 169792d5dd48SViacheslav Ovsiienko * representors and bonding slaves. 169892d5dd48SViacheslav Ovsiienko * 169992d5dd48SViacheslav Ovsiienko * @param priv 170092d5dd48SViacheslav Ovsiienko * Private device descriptor. 170192d5dd48SViacheslav Ovsiienko * @param config 170292d5dd48SViacheslav Ovsiienko * Configuration of the device is going to be created. 170392d5dd48SViacheslav Ovsiienko * 170492d5dd48SViacheslav Ovsiienko * @return 170592d5dd48SViacheslav Ovsiienko * 0 on success, EINVAL otherwise 170692d5dd48SViacheslav Ovsiienko */ 170792d5dd48SViacheslav Ovsiienko static int 170892d5dd48SViacheslav Ovsiienko mlx5_dev_check_sibling_config(struct mlx5_priv *priv, 170992d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *config) 171092d5dd48SViacheslav Ovsiienko { 171192d5dd48SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = priv->sh; 171292d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *sh_conf = NULL; 171392d5dd48SViacheslav Ovsiienko uint16_t port_id; 171492d5dd48SViacheslav Ovsiienko 171592d5dd48SViacheslav Ovsiienko assert(sh); 171692d5dd48SViacheslav Ovsiienko /* Nothing to compare for the single/first device. */ 171792d5dd48SViacheslav Ovsiienko if (sh->refcnt == 1) 171892d5dd48SViacheslav Ovsiienko return 0; 171992d5dd48SViacheslav Ovsiienko /* Find the device with shared context. */ 1720fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 172192d5dd48SViacheslav Ovsiienko struct mlx5_priv *opriv = 172292d5dd48SViacheslav Ovsiienko rte_eth_devices[port_id].data->dev_private; 172392d5dd48SViacheslav Ovsiienko 172492d5dd48SViacheslav Ovsiienko if (opriv && opriv != priv && opriv->sh == sh) { 172592d5dd48SViacheslav Ovsiienko sh_conf = &opriv->config; 172692d5dd48SViacheslav Ovsiienko break; 172792d5dd48SViacheslav Ovsiienko } 172892d5dd48SViacheslav Ovsiienko } 172992d5dd48SViacheslav Ovsiienko if (!sh_conf) 173092d5dd48SViacheslav Ovsiienko return 0; 173192d5dd48SViacheslav Ovsiienko if (sh_conf->dv_flow_en ^ config->dv_flow_en) { 173292d5dd48SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch" 173392d5dd48SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 173492d5dd48SViacheslav Ovsiienko rte_errno = EINVAL; 173592d5dd48SViacheslav Ovsiienko return rte_errno; 173692d5dd48SViacheslav Ovsiienko } 173792d5dd48SViacheslav Ovsiienko return 0; 173892d5dd48SViacheslav Ovsiienko } 173992d5dd48SViacheslav Ovsiienko /** 1740f38c5457SAdrien Mazarguil * Spawn an Ethernet device from Verbs information. 1741771fa900SAdrien Mazarguil * 1742f38c5457SAdrien Mazarguil * @param dpdk_dev 1743f38c5457SAdrien Mazarguil * Backing DPDK device. 1744ad74bc61SViacheslav Ovsiienko * @param spawn 1745ad74bc61SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 1746f87bfa8eSYongseok Koh * @param config 1747f87bfa8eSYongseok Koh * Device configuration parameters. 1748771fa900SAdrien Mazarguil * 1749771fa900SAdrien Mazarguil * @return 1750f38c5457SAdrien Mazarguil * A valid Ethernet device object on success, NULL otherwise and rte_errno 1751206254b7SOphir Munk * is set. The following errors are defined: 17526de569f5SAdrien Mazarguil * 17536de569f5SAdrien Mazarguil * EBUSY: device is not supposed to be spawned. 1754206254b7SOphir Munk * EEXIST: device is already spawned 1755771fa900SAdrien Mazarguil */ 1756f38c5457SAdrien Mazarguil static struct rte_eth_dev * 1757f38c5457SAdrien Mazarguil mlx5_dev_spawn(struct rte_device *dpdk_dev, 1758ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data *spawn, 1759ad74bc61SViacheslav Ovsiienko struct mlx5_dev_config config) 1760771fa900SAdrien Mazarguil { 1761ad74bc61SViacheslav Ovsiienko const struct mlx5_switch_info *switch_info = &spawn->info; 176217e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = NULL; 176368128934SAdrien Mazarguil struct ibv_port_attr port_attr; 17646057a10bSAdrien Mazarguil struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 17659083982cSAdrien Mazarguil struct rte_eth_dev *eth_dev = NULL; 1766dbeba4cfSThomas Monjalon struct mlx5_priv *priv = NULL; 1767771fa900SAdrien Mazarguil int err = 0; 176878c7a16dSYongseok Koh unsigned int hw_padding = 0; 1769e192ef80SYaacov Hazan unsigned int mps; 1770523f5a74SYongseok Koh unsigned int cqe_comp; 1771bc91e8dbSYongseok Koh unsigned int cqe_pad = 0; 1772772d3435SXueming Li unsigned int tunnel_en = 0; 17731f106da2SMatan Azrad unsigned int mpls_en = 0; 17745f8ba81cSXueming Li unsigned int swp = 0; 17757d6bf6b8SYongseok Koh unsigned int mprq = 0; 17767d6bf6b8SYongseok Koh unsigned int mprq_min_stride_size_n = 0; 17777d6bf6b8SYongseok Koh unsigned int mprq_max_stride_size_n = 0; 17787d6bf6b8SYongseok Koh unsigned int mprq_min_stride_num_n = 0; 17797d6bf6b8SYongseok Koh unsigned int mprq_max_stride_num_n = 0; 17806d13ea8eSOlivier Matz struct rte_ether_addr mac; 178168128934SAdrien Mazarguil char name[RTE_ETH_NAME_MAX_LEN]; 17822b730263SAdrien Mazarguil int own_domain_id = 0; 1783206254b7SOphir Munk uint16_t port_id; 17842b730263SAdrien Mazarguil unsigned int i; 1785d5c06b1bSViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR_DEVX_PORT 1786d5c06b1bSViacheslav Ovsiienko struct mlx5dv_devx_port devx_port; 1787d5c06b1bSViacheslav Ovsiienko #endif 1788771fa900SAdrien Mazarguil 17896de569f5SAdrien Mazarguil /* Determine if this port representor is supposed to be spawned. */ 17906de569f5SAdrien Mazarguil if (switch_info->representor && dpdk_dev->devargs) { 17916de569f5SAdrien Mazarguil struct rte_eth_devargs eth_da; 17926de569f5SAdrien Mazarguil 17936de569f5SAdrien Mazarguil err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); 17946de569f5SAdrien Mazarguil if (err) { 17956de569f5SAdrien Mazarguil rte_errno = -err; 17966de569f5SAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 17976de569f5SAdrien Mazarguil strerror(rte_errno)); 17986de569f5SAdrien Mazarguil return NULL; 17996de569f5SAdrien Mazarguil } 18006de569f5SAdrien Mazarguil for (i = 0; i < eth_da.nb_representor_ports; ++i) 18016de569f5SAdrien Mazarguil if (eth_da.representor_ports[i] == 18026de569f5SAdrien Mazarguil (uint16_t)switch_info->port_name) 18036de569f5SAdrien Mazarguil break; 18046de569f5SAdrien Mazarguil if (i == eth_da.nb_representor_ports) { 18056de569f5SAdrien Mazarguil rte_errno = EBUSY; 18066de569f5SAdrien Mazarguil return NULL; 18076de569f5SAdrien Mazarguil } 18086de569f5SAdrien Mazarguil } 1809206254b7SOphir Munk /* Build device name. */ 181010dadfcbSViacheslav Ovsiienko if (spawn->pf_bond < 0) { 181110dadfcbSViacheslav Ovsiienko /* Single device. */ 1812206254b7SOphir Munk if (!switch_info->representor) 181309c9c4d2SThomas Monjalon strlcpy(name, dpdk_dev->name, sizeof(name)); 1814206254b7SOphir Munk else 1815206254b7SOphir Munk snprintf(name, sizeof(name), "%s_representor_%u", 1816206254b7SOphir Munk dpdk_dev->name, switch_info->port_name); 181710dadfcbSViacheslav Ovsiienko } else { 181810dadfcbSViacheslav Ovsiienko /* Bonding device. */ 181910dadfcbSViacheslav Ovsiienko if (!switch_info->representor) 182010dadfcbSViacheslav Ovsiienko snprintf(name, sizeof(name), "%s_%s", 182110dadfcbSViacheslav Ovsiienko dpdk_dev->name, spawn->ibv_dev->name); 182210dadfcbSViacheslav Ovsiienko else 182310dadfcbSViacheslav Ovsiienko snprintf(name, sizeof(name), "%s_%s_representor_%u", 182410dadfcbSViacheslav Ovsiienko dpdk_dev->name, spawn->ibv_dev->name, 182510dadfcbSViacheslav Ovsiienko switch_info->port_name); 182610dadfcbSViacheslav Ovsiienko } 1827206254b7SOphir Munk /* check if the device is already spawned */ 1828206254b7SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 1829206254b7SOphir Munk rte_errno = EEXIST; 1830206254b7SOphir Munk return NULL; 1831206254b7SOphir Munk } 183217e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 183317e19bc4SViacheslav Ovsiienko if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 183417e19bc4SViacheslav Ovsiienko eth_dev = rte_eth_dev_attach_secondary(name); 183517e19bc4SViacheslav Ovsiienko if (eth_dev == NULL) { 183617e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "can not attach rte ethdev"); 183717e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 1838f38c5457SAdrien Mazarguil return NULL; 1839771fa900SAdrien Mazarguil } 184017e19bc4SViacheslav Ovsiienko eth_dev->device = dpdk_dev; 184117e19bc4SViacheslav Ovsiienko eth_dev->dev_ops = &mlx5_dev_sec_ops; 1842120dc4a7SYongseok Koh err = mlx5_proc_priv_init(eth_dev); 1843120dc4a7SYongseok Koh if (err) 1844120dc4a7SYongseok Koh return NULL; 184517e19bc4SViacheslav Ovsiienko /* Receive command fd from primary process */ 18469a8ab29bSYongseok Koh err = mlx5_mp_req_verbs_cmd_fd(eth_dev); 184717e19bc4SViacheslav Ovsiienko if (err < 0) 184817e19bc4SViacheslav Ovsiienko return NULL; 184917e19bc4SViacheslav Ovsiienko /* Remap UAR for Tx queues. */ 1850120dc4a7SYongseok Koh err = mlx5_tx_uar_init_secondary(eth_dev, err); 185117e19bc4SViacheslav Ovsiienko if (err) 185217e19bc4SViacheslav Ovsiienko return NULL; 185317e19bc4SViacheslav Ovsiienko /* 185417e19bc4SViacheslav Ovsiienko * Ethdev pointer is still required as input since 185517e19bc4SViacheslav Ovsiienko * the primary device is not accessible from the 185617e19bc4SViacheslav Ovsiienko * secondary process. 185717e19bc4SViacheslav Ovsiienko */ 185817e19bc4SViacheslav Ovsiienko eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 185917e19bc4SViacheslav Ovsiienko eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 186017e19bc4SViacheslav Ovsiienko return eth_dev; 1861f5bf91deSMoti Haimovsky } 186217e19bc4SViacheslav Ovsiienko sh = mlx5_alloc_shared_ibctx(spawn); 186317e19bc4SViacheslav Ovsiienko if (!sh) 186417e19bc4SViacheslav Ovsiienko return NULL; 186517e19bc4SViacheslav Ovsiienko config.devx = sh->devx; 18663075bd23SDekel Peled #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR 18673075bd23SDekel Peled config.dest_tir = 1; 18683075bd23SDekel Peled #endif 18695f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 18706057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 18715f8ba81cSXueming Li #endif 187243e9d979SShachar Beiser /* 187343e9d979SShachar Beiser * Multi-packet send is supported by ConnectX-4 Lx PF as well 187443e9d979SShachar Beiser * as all ConnectX-5 devices. 187543e9d979SShachar Beiser */ 1876038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 18776057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 1878038e7251SShahaf Shuler #endif 18797d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 18806057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 18817d6bf6b8SYongseok Koh #endif 188217e19bc4SViacheslav Ovsiienko mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 18836057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 18846057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 1885a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "enhanced MPW is supported"); 188643e9d979SShachar Beiser mps = MLX5_MPW_ENHANCED; 188743e9d979SShachar Beiser } else { 1888a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW is supported"); 1889e589960cSYongseok Koh mps = MLX5_MPW; 1890e589960cSYongseok Koh } 1891e589960cSYongseok Koh } else { 1892a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW isn't supported"); 189343e9d979SShachar Beiser mps = MLX5_MPW_DISABLED; 189443e9d979SShachar Beiser } 18955f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 18966057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 18976057a10bSAdrien Mazarguil swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 18985f8ba81cSXueming Li DRV_LOG(DEBUG, "SWP support: %u", swp); 18995f8ba81cSXueming Li #endif 190068128934SAdrien Mazarguil config.swp = !!swp; 19017d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 19026057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 19037d6bf6b8SYongseok Koh struct mlx5dv_striding_rq_caps mprq_caps = 19046057a10bSAdrien Mazarguil dv_attr.striding_rq_caps; 19057d6bf6b8SYongseok Koh 19067d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 19077d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes); 19087d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 19097d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes); 19107d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 19117d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides); 19127d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 19137d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides); 19147d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tsupported_qpts: %d", 19157d6bf6b8SYongseok Koh mprq_caps.supported_qpts); 19167d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 19177d6bf6b8SYongseok Koh mprq = 1; 19187d6bf6b8SYongseok Koh mprq_min_stride_size_n = 19197d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes; 19207d6bf6b8SYongseok Koh mprq_max_stride_size_n = 19217d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes; 19227d6bf6b8SYongseok Koh mprq_min_stride_num_n = 19237d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides; 19247d6bf6b8SYongseok Koh mprq_max_stride_num_n = 19257d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides; 192668128934SAdrien Mazarguil config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 192768128934SAdrien Mazarguil mprq_min_stride_num_n); 19287d6bf6b8SYongseok Koh } 19297d6bf6b8SYongseok Koh #endif 1930523f5a74SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128 && 19316057a10bSAdrien Mazarguil !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) 1932523f5a74SYongseok Koh cqe_comp = 0; 1933523f5a74SYongseok Koh else 1934523f5a74SYongseok Koh cqe_comp = 1; 193568128934SAdrien Mazarguil config.cqe_comp = cqe_comp; 1936bc91e8dbSYongseok Koh #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 1937bc91e8dbSYongseok Koh /* Whether device supports 128B Rx CQE padding. */ 1938bc91e8dbSYongseok Koh cqe_pad = RTE_CACHE_LINE_SIZE == 128 && 1939bc91e8dbSYongseok Koh (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); 1940bc91e8dbSYongseok Koh #endif 1941038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 19426057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 19436057a10bSAdrien Mazarguil tunnel_en = ((dv_attr.tunnel_offloads_caps & 1944038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 19456057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 1946038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); 1947038e7251SShahaf Shuler } 1948a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 1949a170a30dSNélio Laranjeiro tunnel_en ? "" : "not "); 1950038e7251SShahaf Shuler #else 1951a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 1952a170a30dSNélio Laranjeiro "tunnel offloading disabled due to old OFED/rdma-core version"); 1953038e7251SShahaf Shuler #endif 195468128934SAdrien Mazarguil config.tunnel_en = tunnel_en; 19551f106da2SMatan Azrad #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 19566057a10bSAdrien Mazarguil mpls_en = ((dv_attr.tunnel_offloads_caps & 19571f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 19586057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 19591f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 19601f106da2SMatan Azrad DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 19611f106da2SMatan Azrad mpls_en ? "" : "not "); 19621f106da2SMatan Azrad #else 19631f106da2SMatan Azrad DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 19641f106da2SMatan Azrad " old OFED/rdma-core version or firmware configuration"); 19651f106da2SMatan Azrad #endif 196668128934SAdrien Mazarguil config.mpls_en = mpls_en; 1967771fa900SAdrien Mazarguil /* Check port status. */ 196817e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr); 1969771fa900SAdrien Mazarguil if (err) { 1970a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port query failed: %s", strerror(err)); 19719083982cSAdrien Mazarguil goto error; 1972771fa900SAdrien Mazarguil } 19731371f4dfSOr Ami if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 19749083982cSAdrien Mazarguil DRV_LOG(ERR, "port is not configured in Ethernet mode"); 1975e1c3e305SMatan Azrad err = EINVAL; 19769083982cSAdrien Mazarguil goto error; 19771371f4dfSOr Ami } 1978771fa900SAdrien Mazarguil if (port_attr.state != IBV_PORT_ACTIVE) 19799083982cSAdrien Mazarguil DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 1980a170a30dSNélio Laranjeiro mlx5_glue->port_state_str(port_attr.state), 1981771fa900SAdrien Mazarguil port_attr.state); 198217e19bc4SViacheslav Ovsiienko /* Allocate private eth device data. */ 1983771fa900SAdrien Mazarguil priv = rte_zmalloc("ethdev private structure", 1984771fa900SAdrien Mazarguil sizeof(*priv), 1985771fa900SAdrien Mazarguil RTE_CACHE_LINE_SIZE); 1986771fa900SAdrien Mazarguil if (priv == NULL) { 1987a170a30dSNélio Laranjeiro DRV_LOG(ERR, "priv allocation failure"); 1988771fa900SAdrien Mazarguil err = ENOMEM; 19899083982cSAdrien Mazarguil goto error; 1990771fa900SAdrien Mazarguil } 199117e19bc4SViacheslav Ovsiienko priv->sh = sh; 199217e19bc4SViacheslav Ovsiienko priv->ibv_port = spawn->ibv_port; 199346e10a4cSViacheslav Ovsiienko priv->pci_dev = spawn->pci_dev; 199435b2d13fSOlivier Matz priv->mtu = RTE_ETHER_MTU; 19956bf10ab6SMoti Haimovsky #ifndef RTE_ARCH_64 19966bf10ab6SMoti Haimovsky /* Initialize UAR access locks for 32bit implementations. */ 19976bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock_cq); 19986bf10ab6SMoti Haimovsky for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 19996bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock[i]); 20006bf10ab6SMoti Haimovsky #endif 200126c08b97SAdrien Mazarguil /* Some internal functions rely on Netlink sockets, open them now. */ 20025366074bSNelio Laranjeiro priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 20035366074bSNelio Laranjeiro priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 200426c08b97SAdrien Mazarguil priv->nl_sn = 0; 20052b730263SAdrien Mazarguil priv->representor = !!switch_info->representor; 2006299d7dc2SViacheslav Ovsiienko priv->master = !!switch_info->master; 20072b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 2008d5c06b1bSViacheslav Ovsiienko priv->vport_meta_tag = 0; 2009d5c06b1bSViacheslav Ovsiienko priv->vport_meta_mask = 0; 2010bee57a0aSViacheslav Ovsiienko priv->pf_bond = spawn->pf_bond; 2011d5c06b1bSViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR_DEVX_PORT 2012299d7dc2SViacheslav Ovsiienko /* 2013d5c06b1bSViacheslav Ovsiienko * The DevX port query API is implemented. E-Switch may use 2014d5c06b1bSViacheslav Ovsiienko * either vport or reg_c[0] metadata register to match on 2015d5c06b1bSViacheslav Ovsiienko * vport index. The engaged part of metadata register is 2016d5c06b1bSViacheslav Ovsiienko * defined by mask. 2017d5c06b1bSViacheslav Ovsiienko */ 2018d5c06b1bSViacheslav Ovsiienko devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT | 2019d5c06b1bSViacheslav Ovsiienko MLX5DV_DEVX_PORT_MATCH_REG_C_0; 2020cc8627bcSViacheslav Ovsiienko err = mlx5_glue->devx_port_query(sh->ctx, spawn->ibv_port, &devx_port); 2021d5c06b1bSViacheslav Ovsiienko if (err) { 2022*06fa6988SDekel Peled DRV_LOG(WARNING, "can't query devx port %d on device %s", 2023d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 2024d5c06b1bSViacheslav Ovsiienko devx_port.comp_mask = 0; 2025d5c06b1bSViacheslav Ovsiienko } 2026d5c06b1bSViacheslav Ovsiienko if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) { 2027d5c06b1bSViacheslav Ovsiienko priv->vport_meta_tag = devx_port.reg_c_0.value; 2028d5c06b1bSViacheslav Ovsiienko priv->vport_meta_mask = devx_port.reg_c_0.mask; 2029d5c06b1bSViacheslav Ovsiienko if (!priv->vport_meta_mask) { 2030d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "vport zero mask for port %d" 2031*06fa6988SDekel Peled " on bonding device %s", 2032d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 2033d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 2034d5c06b1bSViacheslav Ovsiienko goto error; 2035d5c06b1bSViacheslav Ovsiienko } 2036d5c06b1bSViacheslav Ovsiienko if (priv->vport_meta_tag & ~priv->vport_meta_mask) { 2037d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "invalid vport tag for port %d" 2038*06fa6988SDekel Peled " on bonding device %s", 2039d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 2040d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 2041d5c06b1bSViacheslav Ovsiienko goto error; 2042d5c06b1bSViacheslav Ovsiienko } 2043d5c06b1bSViacheslav Ovsiienko } else if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) { 2044d5c06b1bSViacheslav Ovsiienko priv->vport_id = devx_port.vport_num; 2045d5c06b1bSViacheslav Ovsiienko } else if (spawn->pf_bond >= 0) { 2046d5c06b1bSViacheslav Ovsiienko DRV_LOG(ERR, "can't deduce vport index for port %d" 2047*06fa6988SDekel Peled " on bonding device %s", 2048d5c06b1bSViacheslav Ovsiienko spawn->ibv_port, spawn->ibv_dev->name); 2049d5c06b1bSViacheslav Ovsiienko err = ENOTSUP; 2050d5c06b1bSViacheslav Ovsiienko goto error; 2051d5c06b1bSViacheslav Ovsiienko } else { 2052d5c06b1bSViacheslav Ovsiienko /* Suppose vport index in compatible way. */ 2053d5c06b1bSViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 2054d5c06b1bSViacheslav Ovsiienko switch_info->port_name + 1 : -1; 2055d5c06b1bSViacheslav Ovsiienko } 2056d5c06b1bSViacheslav Ovsiienko #else 2057d5c06b1bSViacheslav Ovsiienko /* 2058d5c06b1bSViacheslav Ovsiienko * Kernel/rdma_core support single E-Switch per PF configurations 2059299d7dc2SViacheslav Ovsiienko * only and vport_id field contains the vport index for 2060299d7dc2SViacheslav Ovsiienko * associated VF, which is deduced from representor port name. 2061ae4eb7dcSViacheslav Ovsiienko * For example, let's have the IB device port 10, it has 2062299d7dc2SViacheslav Ovsiienko * attached network device eth0, which has port name attribute 2063299d7dc2SViacheslav Ovsiienko * pf0vf2, we can deduce the VF number as 2, and set vport index 2064299d7dc2SViacheslav Ovsiienko * as 3 (2+1). This assigning schema should be changed if the 2065299d7dc2SViacheslav Ovsiienko * multiple E-Switch instances per PF configurations or/and PCI 2066299d7dc2SViacheslav Ovsiienko * subfunctions are added. 2067299d7dc2SViacheslav Ovsiienko */ 2068299d7dc2SViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 2069299d7dc2SViacheslav Ovsiienko switch_info->port_name + 1 : -1; 2070d5c06b1bSViacheslav Ovsiienko #endif 2071d5c06b1bSViacheslav Ovsiienko /* representor_id field keeps the unmodified VF index. */ 2072299d7dc2SViacheslav Ovsiienko priv->representor_id = switch_info->representor ? 2073299d7dc2SViacheslav Ovsiienko switch_info->port_name : -1; 20742b730263SAdrien Mazarguil /* 20752b730263SAdrien Mazarguil * Look for sibling devices in order to reuse their switch domain 20762b730263SAdrien Mazarguil * if any, otherwise allocate one. 20772b730263SAdrien Mazarguil */ 2078fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 2079dbeba4cfSThomas Monjalon const struct mlx5_priv *opriv = 2080d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 20812b730263SAdrien Mazarguil 20822b730263SAdrien Mazarguil if (!opriv || 2083f7e95215SViacheslav Ovsiienko opriv->sh != priv->sh || 20842b730263SAdrien Mazarguil opriv->domain_id == 20852b730263SAdrien Mazarguil RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 20862b730263SAdrien Mazarguil continue; 20872b730263SAdrien Mazarguil priv->domain_id = opriv->domain_id; 20882b730263SAdrien Mazarguil break; 20892b730263SAdrien Mazarguil } 20902b730263SAdrien Mazarguil if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 20912b730263SAdrien Mazarguil err = rte_eth_switch_domain_alloc(&priv->domain_id); 20922b730263SAdrien Mazarguil if (err) { 20932b730263SAdrien Mazarguil err = rte_errno; 20942b730263SAdrien Mazarguil DRV_LOG(ERR, "unable to allocate switch domain: %s", 20952b730263SAdrien Mazarguil strerror(rte_errno)); 20962b730263SAdrien Mazarguil goto error; 20972b730263SAdrien Mazarguil } 20982b730263SAdrien Mazarguil own_domain_id = 1; 20992b730263SAdrien Mazarguil } 2100f38c5457SAdrien Mazarguil err = mlx5_args(&config, dpdk_dev->devargs); 2101e72dd09bSNélio Laranjeiro if (err) { 2102012ad994SShahaf Shuler err = rte_errno; 210393068a9dSAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 210493068a9dSAdrien Mazarguil strerror(rte_errno)); 21059083982cSAdrien Mazarguil goto error; 2106e72dd09bSNélio Laranjeiro } 210792d5dd48SViacheslav Ovsiienko err = mlx5_dev_check_sibling_config(priv, &config); 210892d5dd48SViacheslav Ovsiienko if (err) 210992d5dd48SViacheslav Ovsiienko goto error; 211017e19bc4SViacheslav Ovsiienko config.hw_csum = !!(sh->device_attr.device_cap_flags_ex & 211117e19bc4SViacheslav Ovsiienko IBV_DEVICE_RAW_IP_CSUM); 2112a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "checksum offloading is %ssupported", 21137fe24446SShahaf Shuler (config.hw_csum ? "" : "not ")); 21142dd8b721SViacheslav Ovsiienko #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 21152dd8b721SViacheslav Ovsiienko !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 21162dd8b721SViacheslav Ovsiienko DRV_LOG(DEBUG, "counters are not supported"); 21179a761de8SOri Kam #endif 211858b1312eSYongseok Koh #ifndef HAVE_IBV_FLOW_DV_SUPPORT 211958b1312eSYongseok Koh if (config.dv_flow_en) { 212058b1312eSYongseok Koh DRV_LOG(WARNING, "DV flow is not supported"); 212158b1312eSYongseok Koh config.dv_flow_en = 0; 212258b1312eSYongseok Koh } 212358b1312eSYongseok Koh #endif 21247fe24446SShahaf Shuler config.ind_table_max_size = 212517e19bc4SViacheslav Ovsiienko sh->device_attr.rss_caps.max_rwq_indirection_table_size; 212668128934SAdrien Mazarguil /* 212768128934SAdrien Mazarguil * Remove this check once DPDK supports larger/variable 212868128934SAdrien Mazarguil * indirection tables. 212968128934SAdrien Mazarguil */ 213068128934SAdrien Mazarguil if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 21317fe24446SShahaf Shuler config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; 2132a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 21337fe24446SShahaf Shuler config.ind_table_max_size); 213417e19bc4SViacheslav Ovsiienko config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 213543e9d979SShachar Beiser IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 2136a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 21377fe24446SShahaf Shuler (config.hw_vlan_strip ? "" : "not ")); 213817e19bc4SViacheslav Ovsiienko config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 2139cd230a3eSShahaf Shuler IBV_RAW_PACKET_CAP_SCATTER_FCS); 2140a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 21417fe24446SShahaf Shuler (config.hw_fcs_strip ? "" : "not ")); 21422014a7fbSYongseok Koh #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 214317e19bc4SViacheslav Ovsiienko hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 21442014a7fbSYongseok Koh #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 214517e19bc4SViacheslav Ovsiienko hw_padding = !!(sh->device_attr.device_cap_flags_ex & 21462014a7fbSYongseok Koh IBV_DEVICE_PCI_WRITE_END_PADDING); 214743e9d979SShachar Beiser #endif 214878c7a16dSYongseok Koh if (config.hw_padding && !hw_padding) { 214978c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 215078c7a16dSYongseok Koh config.hw_padding = 0; 215178c7a16dSYongseok Koh } else if (config.hw_padding) { 215278c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 215378c7a16dSYongseok Koh } 215417e19bc4SViacheslav Ovsiienko config.tso = (sh->device_attr.tso_caps.max_tso > 0 && 215517e19bc4SViacheslav Ovsiienko (sh->device_attr.tso_caps.supported_qpts & 215643e9d979SShachar Beiser (1 << IBV_QPT_RAW_PACKET))); 21577fe24446SShahaf Shuler if (config.tso) 215817e19bc4SViacheslav Ovsiienko config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso; 2159f9de8718SShahaf Shuler /* 2160f9de8718SShahaf Shuler * MPW is disabled by default, while the Enhanced MPW is enabled 2161f9de8718SShahaf Shuler * by default. 2162f9de8718SShahaf Shuler */ 2163f9de8718SShahaf Shuler if (config.mps == MLX5_ARG_UNSET) 2164f9de8718SShahaf Shuler config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 2165f9de8718SShahaf Shuler MLX5_MPW_DISABLED; 2166f9de8718SShahaf Shuler else 2167f9de8718SShahaf Shuler config.mps = config.mps ? mps : MLX5_MPW_DISABLED; 2168a170a30dSNélio Laranjeiro DRV_LOG(INFO, "%sMPS is %s", 21690f99970bSNélio Laranjeiro config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", 217068128934SAdrien Mazarguil config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 21717fe24446SShahaf Shuler if (config.cqe_comp && !cqe_comp) { 2172a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "Rx CQE compression isn't supported"); 21737fe24446SShahaf Shuler config.cqe_comp = 0; 2174523f5a74SYongseok Koh } 2175bc91e8dbSYongseok Koh if (config.cqe_pad && !cqe_pad) { 2176bc91e8dbSYongseok Koh DRV_LOG(WARNING, "Rx CQE padding isn't supported"); 2177bc91e8dbSYongseok Koh config.cqe_pad = 0; 2178bc91e8dbSYongseok Koh } else if (config.cqe_pad) { 2179bc91e8dbSYongseok Koh DRV_LOG(INFO, "Rx CQE padding is enabled"); 2180bc91e8dbSYongseok Koh } 2181175f1c21SDekel Peled if (config.devx) { 2182175f1c21SDekel Peled priv->counter_fallback = 0; 2183175f1c21SDekel Peled err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr); 2184175f1c21SDekel Peled if (err) { 2185175f1c21SDekel Peled err = -err; 2186175f1c21SDekel Peled goto error; 2187175f1c21SDekel Peled } 2188175f1c21SDekel Peled if (!config.hca_attr.flow_counters_dump) 2189175f1c21SDekel Peled priv->counter_fallback = 1; 2190175f1c21SDekel Peled #ifndef HAVE_IBV_DEVX_ASYNC 2191175f1c21SDekel Peled priv->counter_fallback = 1; 2192175f1c21SDekel Peled #endif 2193175f1c21SDekel Peled if (priv->counter_fallback) 2194*06fa6988SDekel Peled DRV_LOG(INFO, "Use fall-back DV counter management"); 2195175f1c21SDekel Peled /* Check for LRO support. */ 21962eb5dce8SDekel Peled if (config.dest_tir && config.hca_attr.lro_cap && 21972eb5dce8SDekel Peled config.dv_flow_en) { 2198175f1c21SDekel Peled /* TBD check tunnel lro caps. */ 2199175f1c21SDekel Peled config.lro.supported = config.hca_attr.lro_cap; 2200175f1c21SDekel Peled DRV_LOG(DEBUG, "Device supports LRO"); 2201175f1c21SDekel Peled /* 2202175f1c21SDekel Peled * If LRO timeout is not configured by application, 2203175f1c21SDekel Peled * use the minimal supported value. 2204175f1c21SDekel Peled */ 2205175f1c21SDekel Peled if (!config.lro.timeout) 2206175f1c21SDekel Peled config.lro.timeout = 2207175f1c21SDekel Peled config.hca_attr.lro_timer_supported_periods[0]; 2208175f1c21SDekel Peled DRV_LOG(DEBUG, "LRO session timeout set to %d usec", 2209175f1c21SDekel Peled config.lro.timeout); 2210175f1c21SDekel Peled } 2211175f1c21SDekel Peled } 22125c0e2db6SYongseok Koh if (config.mprq.enabled && mprq) { 22137d6bf6b8SYongseok Koh if (config.mprq.stride_num_n > mprq_max_stride_num_n || 22147d6bf6b8SYongseok Koh config.mprq.stride_num_n < mprq_min_stride_num_n) { 22157d6bf6b8SYongseok Koh config.mprq.stride_num_n = 22167d6bf6b8SYongseok Koh RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 22177d6bf6b8SYongseok Koh mprq_min_stride_num_n); 22187d6bf6b8SYongseok Koh DRV_LOG(WARNING, 22197d6bf6b8SYongseok Koh "the number of strides" 22207d6bf6b8SYongseok Koh " for Multi-Packet RQ is out of range," 22217d6bf6b8SYongseok Koh " setting default value (%u)", 22227d6bf6b8SYongseok Koh 1 << config.mprq.stride_num_n); 22237d6bf6b8SYongseok Koh } 22247d6bf6b8SYongseok Koh config.mprq.min_stride_size_n = mprq_min_stride_size_n; 22257d6bf6b8SYongseok Koh config.mprq.max_stride_size_n = mprq_max_stride_size_n; 22265c0e2db6SYongseok Koh } else if (config.mprq.enabled && !mprq) { 22275c0e2db6SYongseok Koh DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 22285c0e2db6SYongseok Koh config.mprq.enabled = 0; 22297d6bf6b8SYongseok Koh } 2230066cfecdSMatan Azrad if (config.max_dump_files_num == 0) 2231066cfecdSMatan Azrad config.max_dump_files_num = 128; 2232af4f09f2SNélio Laranjeiro eth_dev = rte_eth_dev_allocate(name); 2233af4f09f2SNélio Laranjeiro if (eth_dev == NULL) { 2234a170a30dSNélio Laranjeiro DRV_LOG(ERR, "can not allocate rte ethdev"); 2235af4f09f2SNélio Laranjeiro err = ENOMEM; 22369083982cSAdrien Mazarguil goto error; 2237af4f09f2SNélio Laranjeiro } 223815febafdSThomas Monjalon /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ 223915febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 2240a7d3c627SThomas Monjalon if (priv->representor) { 22412b730263SAdrien Mazarguil eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 2242a7d3c627SThomas Monjalon eth_dev->data->representor_id = priv->representor_id; 2243a7d3c627SThomas Monjalon } 2244fa2e14d4SViacheslav Ovsiienko /* 2245fa2e14d4SViacheslav Ovsiienko * Store associated network device interface index. This index 2246fa2e14d4SViacheslav Ovsiienko * is permanent throughout the lifetime of device. So, we may store 2247fa2e14d4SViacheslav Ovsiienko * the ifindex here and use the cached value further. 2248fa2e14d4SViacheslav Ovsiienko */ 2249fa2e14d4SViacheslav Ovsiienko assert(spawn->ifindex); 2250fa2e14d4SViacheslav Ovsiienko priv->if_index = spawn->ifindex; 2251af4f09f2SNélio Laranjeiro eth_dev->data->dev_private = priv; 2252df428ceeSYongseok Koh priv->dev_data = eth_dev->data; 2253af4f09f2SNélio Laranjeiro eth_dev->data->mac_addrs = priv->mac; 2254f38c5457SAdrien Mazarguil eth_dev->device = dpdk_dev; 2255771fa900SAdrien Mazarguil /* Configure the first MAC address by default. */ 2256af4f09f2SNélio Laranjeiro if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 2257a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2258a170a30dSNélio Laranjeiro "port %u cannot get MAC address, is mlx5_en" 2259a170a30dSNélio Laranjeiro " loaded? (errno: %s)", 22608c3c2372SAdrien Mazarguil eth_dev->data->port_id, strerror(rte_errno)); 2261e1c3e305SMatan Azrad err = ENODEV; 22629083982cSAdrien Mazarguil goto error; 2263771fa900SAdrien Mazarguil } 2264a170a30dSNélio Laranjeiro DRV_LOG(INFO, 2265a170a30dSNélio Laranjeiro "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 22660f99970bSNélio Laranjeiro eth_dev->data->port_id, 2267771fa900SAdrien Mazarguil mac.addr_bytes[0], mac.addr_bytes[1], 2268771fa900SAdrien Mazarguil mac.addr_bytes[2], mac.addr_bytes[3], 2269771fa900SAdrien Mazarguil mac.addr_bytes[4], mac.addr_bytes[5]); 2270771fa900SAdrien Mazarguil #ifndef NDEBUG 2271771fa900SAdrien Mazarguil { 2272771fa900SAdrien Mazarguil char ifname[IF_NAMESIZE]; 2273771fa900SAdrien Mazarguil 2274af4f09f2SNélio Laranjeiro if (mlx5_get_ifname(eth_dev, &ifname) == 0) 2275a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 22760f99970bSNélio Laranjeiro eth_dev->data->port_id, ifname); 2277771fa900SAdrien Mazarguil else 2278a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is unknown", 22790f99970bSNélio Laranjeiro eth_dev->data->port_id); 2280771fa900SAdrien Mazarguil } 2281771fa900SAdrien Mazarguil #endif 2282771fa900SAdrien Mazarguil /* Get actual MTU if possible. */ 2283a6d83b6aSNélio Laranjeiro err = mlx5_get_mtu(eth_dev, &priv->mtu); 2284012ad994SShahaf Shuler if (err) { 2285012ad994SShahaf Shuler err = rte_errno; 22869083982cSAdrien Mazarguil goto error; 2287012ad994SShahaf Shuler } 2288a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 2289a170a30dSNélio Laranjeiro priv->mtu); 229068128934SAdrien Mazarguil /* Initialize burst functions to prevent crashes before link-up. */ 2291e313ef4cSShahaf Shuler eth_dev->rx_pkt_burst = removed_rx_burst; 2292e313ef4cSShahaf Shuler eth_dev->tx_pkt_burst = removed_tx_burst; 2293771fa900SAdrien Mazarguil eth_dev->dev_ops = &mlx5_dev_ops; 2294272733b5SNélio Laranjeiro /* Register MAC address. */ 2295272733b5SNélio Laranjeiro claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 2296f87bfa8eSYongseok Koh if (config.vf && config.vf_nl_en) 2297ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_sync(eth_dev); 2298c8ffb8a9SNélio Laranjeiro TAILQ_INIT(&priv->flows); 22991b37f5d8SNélio Laranjeiro TAILQ_INIT(&priv->ctrl_flows); 23001e3a39f7SXueming Li /* Hint libmlx5 to use PMD allocator for data plane resources */ 23011e3a39f7SXueming Li struct mlx5dv_ctx_allocators alctr = { 23021e3a39f7SXueming Li .alloc = &mlx5_alloc_verbs_buf, 23031e3a39f7SXueming Li .free = &mlx5_free_verbs_buf, 23041e3a39f7SXueming Li .data = priv, 23051e3a39f7SXueming Li }; 230617e19bc4SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 230717e19bc4SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 23081e3a39f7SXueming Li (void *)((uintptr_t)&alctr)); 2309771fa900SAdrien Mazarguil /* Bring Ethernet device up. */ 2310a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 23110f99970bSNélio Laranjeiro eth_dev->data->port_id); 23127ba5320bSNélio Laranjeiro mlx5_set_link_up(eth_dev); 2313a85a606cSShahaf Shuler /* 2314a85a606cSShahaf Shuler * Even though the interrupt handler is not installed yet, 2315ae4eb7dcSViacheslav Ovsiienko * interrupts will still trigger on the async_fd from 2316a85a606cSShahaf Shuler * Verbs context returned by ibv_open_device(). 2317a85a606cSShahaf Shuler */ 2318a85a606cSShahaf Shuler mlx5_link_update(eth_dev, 0); 2319e2b4925eSOri Kam #ifdef HAVE_MLX5DV_DR_ESWITCH 2320e2b4925eSOri Kam if (!(config.hca_attr.eswitch_manager && config.dv_flow_en && 2321e2b4925eSOri Kam (switch_info->representor || switch_info->master))) 2322e2b4925eSOri Kam config.dv_esw_en = 0; 2323e2b4925eSOri Kam #else 2324e2b4925eSOri Kam config.dv_esw_en = 0; 2325e2b4925eSOri Kam #endif 232638b4b397SViacheslav Ovsiienko /* Detect minimal data bytes to inline. */ 232738b4b397SViacheslav Ovsiienko mlx5_set_min_inline(spawn, &config); 23287fe24446SShahaf Shuler /* Store device configuration on private structure. */ 23297fe24446SShahaf Shuler priv->config = config; 2330dfedf3e3SViacheslav Ovsiienko /* Create context for virtual machine VLAN workaround. */ 2331dfedf3e3SViacheslav Ovsiienko priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); 2332e2b4925eSOri Kam if (config.dv_flow_en) { 2333e2b4925eSOri Kam err = mlx5_alloc_shared_dr(priv); 2334e2b4925eSOri Kam if (err) 2335e2b4925eSOri Kam goto error; 2336e2b4925eSOri Kam } 233778be8852SNelio Laranjeiro /* Supported Verbs flow priority number detection. */ 23382815702bSNelio Laranjeiro err = mlx5_flow_discover_priorities(eth_dev); 23394fb27c1dSViacheslav Ovsiienko if (err < 0) { 23404fb27c1dSViacheslav Ovsiienko err = -err; 23419083982cSAdrien Mazarguil goto error; 23424fb27c1dSViacheslav Ovsiienko } 23432815702bSNelio Laranjeiro priv->config.flow_prio = err; 2344f38c5457SAdrien Mazarguil return eth_dev; 23459083982cSAdrien Mazarguil error: 234626c08b97SAdrien Mazarguil if (priv) { 2347b2177648SViacheslav Ovsiienko if (priv->sh) 2348b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 234926c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 235026c08b97SAdrien Mazarguil close(priv->nl_socket_route); 235126c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 235226c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 2353dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 2354dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 23552b730263SAdrien Mazarguil if (own_domain_id) 23562b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 2357771fa900SAdrien Mazarguil rte_free(priv); 2358e16adf08SThomas Monjalon if (eth_dev != NULL) 2359e16adf08SThomas Monjalon eth_dev->data->dev_private = NULL; 236026c08b97SAdrien Mazarguil } 2361e16adf08SThomas Monjalon if (eth_dev != NULL) { 2362e16adf08SThomas Monjalon /* mac_addrs must not be freed alone because part of dev_private */ 2363e16adf08SThomas Monjalon eth_dev->data->mac_addrs = NULL; 2364690de285SRaslan Darawsheh rte_eth_dev_release_port(eth_dev); 2365e16adf08SThomas Monjalon } 236617e19bc4SViacheslav Ovsiienko if (sh) 236717e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(sh); 2368f38c5457SAdrien Mazarguil assert(err > 0); 2369a6d83b6aSNélio Laranjeiro rte_errno = err; 2370f38c5457SAdrien Mazarguil return NULL; 2371f38c5457SAdrien Mazarguil } 2372f38c5457SAdrien Mazarguil 2373116f90adSAdrien Mazarguil /** 2374116f90adSAdrien Mazarguil * Comparison callback to sort device data. 2375116f90adSAdrien Mazarguil * 2376116f90adSAdrien Mazarguil * This is meant to be used with qsort(). 2377116f90adSAdrien Mazarguil * 2378116f90adSAdrien Mazarguil * @param a[in] 2379116f90adSAdrien Mazarguil * Pointer to pointer to first data object. 2380116f90adSAdrien Mazarguil * @param b[in] 2381116f90adSAdrien Mazarguil * Pointer to pointer to second data object. 2382116f90adSAdrien Mazarguil * 2383116f90adSAdrien Mazarguil * @return 2384116f90adSAdrien Mazarguil * 0 if both objects are equal, less than 0 if the first argument is less 2385116f90adSAdrien Mazarguil * than the second, greater than 0 otherwise. 2386116f90adSAdrien Mazarguil */ 2387116f90adSAdrien Mazarguil static int 2388116f90adSAdrien Mazarguil mlx5_dev_spawn_data_cmp(const void *a, const void *b) 2389116f90adSAdrien Mazarguil { 2390116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_a = 2391116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)a)->info; 2392116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_b = 2393116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)b)->info; 2394116f90adSAdrien Mazarguil int ret; 2395116f90adSAdrien Mazarguil 2396116f90adSAdrien Mazarguil /* Master device first. */ 2397116f90adSAdrien Mazarguil ret = si_b->master - si_a->master; 2398116f90adSAdrien Mazarguil if (ret) 2399116f90adSAdrien Mazarguil return ret; 2400116f90adSAdrien Mazarguil /* Then representor devices. */ 2401116f90adSAdrien Mazarguil ret = si_b->representor - si_a->representor; 2402116f90adSAdrien Mazarguil if (ret) 2403116f90adSAdrien Mazarguil return ret; 2404116f90adSAdrien Mazarguil /* Unidentified devices come last in no specific order. */ 2405116f90adSAdrien Mazarguil if (!si_a->representor) 2406116f90adSAdrien Mazarguil return 0; 2407116f90adSAdrien Mazarguil /* Order representors by name. */ 2408116f90adSAdrien Mazarguil return si_a->port_name - si_b->port_name; 2409116f90adSAdrien Mazarguil } 2410116f90adSAdrien Mazarguil 2411f38c5457SAdrien Mazarguil /** 24122e569a37SViacheslav Ovsiienko * Match PCI information for possible slaves of bonding device. 24132e569a37SViacheslav Ovsiienko * 24142e569a37SViacheslav Ovsiienko * @param[in] ibv_dev 24152e569a37SViacheslav Ovsiienko * Pointer to Infiniband device structure. 24162e569a37SViacheslav Ovsiienko * @param[in] pci_dev 24172e569a37SViacheslav Ovsiienko * Pointer to PCI device structure to match PCI address. 24182e569a37SViacheslav Ovsiienko * @param[in] nl_rdma 24192e569a37SViacheslav Ovsiienko * Netlink RDMA group socket handle. 24202e569a37SViacheslav Ovsiienko * 24212e569a37SViacheslav Ovsiienko * @return 24222e569a37SViacheslav Ovsiienko * negative value if no bonding device found, otherwise 24232e569a37SViacheslav Ovsiienko * positive index of slave PF in bonding. 24242e569a37SViacheslav Ovsiienko */ 24252e569a37SViacheslav Ovsiienko static int 24262e569a37SViacheslav Ovsiienko mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev, 24272e569a37SViacheslav Ovsiienko const struct rte_pci_device *pci_dev, 24282e569a37SViacheslav Ovsiienko int nl_rdma) 24292e569a37SViacheslav Ovsiienko { 24302e569a37SViacheslav Ovsiienko char ifname[IF_NAMESIZE + 1]; 24312e569a37SViacheslav Ovsiienko unsigned int ifindex; 24322e569a37SViacheslav Ovsiienko unsigned int np, i; 24332e569a37SViacheslav Ovsiienko FILE *file = NULL; 24342e569a37SViacheslav Ovsiienko int pf = -1; 24352e569a37SViacheslav Ovsiienko 24362e569a37SViacheslav Ovsiienko /* 24372e569a37SViacheslav Ovsiienko * Try to get master device name. If something goes 24382e569a37SViacheslav Ovsiienko * wrong suppose the lack of kernel support and no 24392e569a37SViacheslav Ovsiienko * bonding devices. 24402e569a37SViacheslav Ovsiienko */ 24412e569a37SViacheslav Ovsiienko if (nl_rdma < 0) 24422e569a37SViacheslav Ovsiienko return -1; 24432e569a37SViacheslav Ovsiienko if (!strstr(ibv_dev->name, "bond")) 24442e569a37SViacheslav Ovsiienko return -1; 24452e569a37SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_dev->name); 24462e569a37SViacheslav Ovsiienko if (!np) 24472e569a37SViacheslav Ovsiienko return -1; 24482e569a37SViacheslav Ovsiienko /* 24492e569a37SViacheslav Ovsiienko * The Master device might not be on the predefined 24502e569a37SViacheslav Ovsiienko * port (not on port index 1, it is not garanted), 24512e569a37SViacheslav Ovsiienko * we have to scan all Infiniband device port and 24522e569a37SViacheslav Ovsiienko * find master. 24532e569a37SViacheslav Ovsiienko */ 24542e569a37SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 24552e569a37SViacheslav Ovsiienko /* Check whether Infiniband port is populated. */ 24562e569a37SViacheslav Ovsiienko ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i); 24572e569a37SViacheslav Ovsiienko if (!ifindex) 24582e569a37SViacheslav Ovsiienko continue; 24592e569a37SViacheslav Ovsiienko if (!if_indextoname(ifindex, ifname)) 24602e569a37SViacheslav Ovsiienko continue; 24612e569a37SViacheslav Ovsiienko /* Try to read bonding slave names from sysfs. */ 24622e569a37SViacheslav Ovsiienko MKSTR(slaves, 24632e569a37SViacheslav Ovsiienko "/sys/class/net/%s/master/bonding/slaves", ifname); 24642e569a37SViacheslav Ovsiienko file = fopen(slaves, "r"); 24652e569a37SViacheslav Ovsiienko if (file) 24662e569a37SViacheslav Ovsiienko break; 24672e569a37SViacheslav Ovsiienko } 24682e569a37SViacheslav Ovsiienko if (!file) 24692e569a37SViacheslav Ovsiienko return -1; 24702e569a37SViacheslav Ovsiienko /* Use safe format to check maximal buffer length. */ 24712e569a37SViacheslav Ovsiienko assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE); 24722e569a37SViacheslav Ovsiienko while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) { 24732e569a37SViacheslav Ovsiienko char tmp_str[IF_NAMESIZE + 32]; 24742e569a37SViacheslav Ovsiienko struct rte_pci_addr pci_addr; 24752e569a37SViacheslav Ovsiienko struct mlx5_switch_info info; 24762e569a37SViacheslav Ovsiienko 24772e569a37SViacheslav Ovsiienko /* Process slave interface names in the loop. */ 24782e569a37SViacheslav Ovsiienko snprintf(tmp_str, sizeof(tmp_str), 24792e569a37SViacheslav Ovsiienko "/sys/class/net/%s", ifname); 24802e569a37SViacheslav Ovsiienko if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) { 24812e569a37SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get PCI address" 24822e569a37SViacheslav Ovsiienko " for netdev \"%s\"", ifname); 24832e569a37SViacheslav Ovsiienko continue; 24842e569a37SViacheslav Ovsiienko } 24852e569a37SViacheslav Ovsiienko if (pci_dev->addr.domain != pci_addr.domain || 24862e569a37SViacheslav Ovsiienko pci_dev->addr.bus != pci_addr.bus || 24872e569a37SViacheslav Ovsiienko pci_dev->addr.devid != pci_addr.devid || 24882e569a37SViacheslav Ovsiienko pci_dev->addr.function != pci_addr.function) 24892e569a37SViacheslav Ovsiienko continue; 24902e569a37SViacheslav Ovsiienko /* Slave interface PCI address match found. */ 24912e569a37SViacheslav Ovsiienko fclose(file); 24922e569a37SViacheslav Ovsiienko snprintf(tmp_str, sizeof(tmp_str), 24932e569a37SViacheslav Ovsiienko "/sys/class/net/%s/phys_port_name", ifname); 24942e569a37SViacheslav Ovsiienko file = fopen(tmp_str, "rb"); 24952e569a37SViacheslav Ovsiienko if (!file) 24962e569a37SViacheslav Ovsiienko break; 24972e569a37SViacheslav Ovsiienko info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET; 24982e569a37SViacheslav Ovsiienko if (fscanf(file, "%32s", tmp_str) == 1) 24992e569a37SViacheslav Ovsiienko mlx5_translate_port_name(tmp_str, &info); 25002e569a37SViacheslav Ovsiienko if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY || 25012e569a37SViacheslav Ovsiienko info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) 25022e569a37SViacheslav Ovsiienko pf = info.port_name; 25032e569a37SViacheslav Ovsiienko break; 25042e569a37SViacheslav Ovsiienko } 25052e569a37SViacheslav Ovsiienko if (file) 25062e569a37SViacheslav Ovsiienko fclose(file); 25072e569a37SViacheslav Ovsiienko return pf; 25082e569a37SViacheslav Ovsiienko } 25092e569a37SViacheslav Ovsiienko 25102e569a37SViacheslav Ovsiienko /** 2511f38c5457SAdrien Mazarguil * DPDK callback to register a PCI device. 2512f38c5457SAdrien Mazarguil * 25132b730263SAdrien Mazarguil * This function spawns Ethernet devices out of a given PCI device. 2514f38c5457SAdrien Mazarguil * 2515f38c5457SAdrien Mazarguil * @param[in] pci_drv 2516f38c5457SAdrien Mazarguil * PCI driver structure (mlx5_driver). 2517f38c5457SAdrien Mazarguil * @param[in] pci_dev 2518f38c5457SAdrien Mazarguil * PCI device information. 2519f38c5457SAdrien Mazarguil * 2520f38c5457SAdrien Mazarguil * @return 2521f38c5457SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 2522f38c5457SAdrien Mazarguil */ 2523f38c5457SAdrien Mazarguil static int 2524f38c5457SAdrien Mazarguil mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2525f38c5457SAdrien Mazarguil struct rte_pci_device *pci_dev) 2526f38c5457SAdrien Mazarguil { 2527f38c5457SAdrien Mazarguil struct ibv_device **ibv_list; 2528ad74bc61SViacheslav Ovsiienko /* 2529ad74bc61SViacheslav Ovsiienko * Number of found IB Devices matching with requested PCI BDF. 2530ad74bc61SViacheslav Ovsiienko * nd != 1 means there are multiple IB devices over the same 2531ad74bc61SViacheslav Ovsiienko * PCI device and we have representors and master. 2532ad74bc61SViacheslav Ovsiienko */ 2533ad74bc61SViacheslav Ovsiienko unsigned int nd = 0; 2534ad74bc61SViacheslav Ovsiienko /* 2535ad74bc61SViacheslav Ovsiienko * Number of found IB device Ports. nd = 1 and np = 1..n means 2536ad74bc61SViacheslav Ovsiienko * we have the single multiport IB device, and there may be 2537ad74bc61SViacheslav Ovsiienko * representors attached to some of found ports. 2538ad74bc61SViacheslav Ovsiienko */ 2539ad74bc61SViacheslav Ovsiienko unsigned int np = 0; 2540ad74bc61SViacheslav Ovsiienko /* 2541ad74bc61SViacheslav Ovsiienko * Number of DPDK ethernet devices to Spawn - either over 2542ad74bc61SViacheslav Ovsiienko * multiple IB devices or multiple ports of single IB device. 2543ad74bc61SViacheslav Ovsiienko * Actually this is the number of iterations to spawn. 2544ad74bc61SViacheslav Ovsiienko */ 2545ad74bc61SViacheslav Ovsiienko unsigned int ns = 0; 25462e569a37SViacheslav Ovsiienko /* 25472e569a37SViacheslav Ovsiienko * Bonding device 25482e569a37SViacheslav Ovsiienko * < 0 - no bonding device (single one) 25492e569a37SViacheslav Ovsiienko * >= 0 - bonding device (value is slave PF index) 25502e569a37SViacheslav Ovsiienko */ 25512e569a37SViacheslav Ovsiienko int bd = -1; 2552a62ec991SViacheslav Ovsiienko struct mlx5_dev_spawn_data *list = NULL; 2553f87bfa8eSYongseok Koh struct mlx5_dev_config dev_config; 2554f38c5457SAdrien Mazarguil int ret; 2555f38c5457SAdrien Mazarguil 25567be600c8SYongseok Koh ret = mlx5_init_once(); 25577be600c8SYongseok Koh if (ret) { 25587be600c8SYongseok Koh DRV_LOG(ERR, "unable to init PMD global data: %s", 25597be600c8SYongseok Koh strerror(rte_errno)); 25607be600c8SYongseok Koh return -rte_errno; 25617be600c8SYongseok Koh } 2562f38c5457SAdrien Mazarguil assert(pci_drv == &mlx5_driver); 2563f38c5457SAdrien Mazarguil errno = 0; 2564f38c5457SAdrien Mazarguil ibv_list = mlx5_glue->get_device_list(&ret); 2565f38c5457SAdrien Mazarguil if (!ibv_list) { 2566f38c5457SAdrien Mazarguil rte_errno = errno ? errno : ENOSYS; 2567f38c5457SAdrien Mazarguil DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 2568a6d83b6aSNélio Laranjeiro return -rte_errno; 2569a6d83b6aSNélio Laranjeiro } 2570ad74bc61SViacheslav Ovsiienko /* 2571ad74bc61SViacheslav Ovsiienko * First scan the list of all Infiniband devices to find 2572ad74bc61SViacheslav Ovsiienko * matching ones, gathering into the list. 2573ad74bc61SViacheslav Ovsiienko */ 257426c08b97SAdrien Mazarguil struct ibv_device *ibv_match[ret + 1]; 2575a62ec991SViacheslav Ovsiienko int nl_route = mlx5_nl_init(NETLINK_ROUTE); 2576a62ec991SViacheslav Ovsiienko int nl_rdma = mlx5_nl_init(NETLINK_RDMA); 2577ad74bc61SViacheslav Ovsiienko unsigned int i; 257826c08b97SAdrien Mazarguil 2579f38c5457SAdrien Mazarguil while (ret-- > 0) { 2580f38c5457SAdrien Mazarguil struct rte_pci_addr pci_addr; 2581f38c5457SAdrien Mazarguil 2582f38c5457SAdrien Mazarguil DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 25832e569a37SViacheslav Ovsiienko bd = mlx5_device_bond_pci_match 25842e569a37SViacheslav Ovsiienko (ibv_list[ret], pci_dev, nl_rdma); 25852e569a37SViacheslav Ovsiienko if (bd >= 0) { 25862e569a37SViacheslav Ovsiienko /* 25872e569a37SViacheslav Ovsiienko * Bonding device detected. Only one match is allowed, 25882e569a37SViacheslav Ovsiienko * the bonding is supported over multi-port IB device, 25892e569a37SViacheslav Ovsiienko * there should be no matches on representor PCI 25902e569a37SViacheslav Ovsiienko * functions or non VF LAG bonding devices with 25912e569a37SViacheslav Ovsiienko * specified address. 25922e569a37SViacheslav Ovsiienko */ 25932e569a37SViacheslav Ovsiienko if (nd) { 25942e569a37SViacheslav Ovsiienko DRV_LOG(ERR, 25952e569a37SViacheslav Ovsiienko "multiple PCI match on bonding device" 25962e569a37SViacheslav Ovsiienko "\"%s\" found", ibv_list[ret]->name); 25972e569a37SViacheslav Ovsiienko rte_errno = ENOENT; 25982e569a37SViacheslav Ovsiienko ret = -rte_errno; 25992e569a37SViacheslav Ovsiienko goto exit; 26002e569a37SViacheslav Ovsiienko } 26012e569a37SViacheslav Ovsiienko DRV_LOG(INFO, "PCI information matches for" 26022e569a37SViacheslav Ovsiienko " slave %d bonding device \"%s\"", 26032e569a37SViacheslav Ovsiienko bd, ibv_list[ret]->name); 26042e569a37SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 26052e569a37SViacheslav Ovsiienko break; 26062e569a37SViacheslav Ovsiienko } 26075cf5f710SViacheslav Ovsiienko if (mlx5_dev_to_pci_addr 26085cf5f710SViacheslav Ovsiienko (ibv_list[ret]->ibdev_path, &pci_addr)) 2609f38c5457SAdrien Mazarguil continue; 2610f38c5457SAdrien Mazarguil if (pci_dev->addr.domain != pci_addr.domain || 2611f38c5457SAdrien Mazarguil pci_dev->addr.bus != pci_addr.bus || 2612f38c5457SAdrien Mazarguil pci_dev->addr.devid != pci_addr.devid || 2613f38c5457SAdrien Mazarguil pci_dev->addr.function != pci_addr.function) 2614f38c5457SAdrien Mazarguil continue; 261526c08b97SAdrien Mazarguil DRV_LOG(INFO, "PCI information matches for device \"%s\"", 2616f38c5457SAdrien Mazarguil ibv_list[ret]->name); 2617ad74bc61SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 261826c08b97SAdrien Mazarguil } 2619ad74bc61SViacheslav Ovsiienko ibv_match[nd] = NULL; 2620ad74bc61SViacheslav Ovsiienko if (!nd) { 2621ae4eb7dcSViacheslav Ovsiienko /* No device matches, just complain and bail out. */ 2622ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, 2623ad74bc61SViacheslav Ovsiienko "no Verbs device matches PCI device " PCI_PRI_FMT "," 2624ad74bc61SViacheslav Ovsiienko " are kernel drivers loaded?", 2625ad74bc61SViacheslav Ovsiienko pci_dev->addr.domain, pci_dev->addr.bus, 2626ad74bc61SViacheslav Ovsiienko pci_dev->addr.devid, pci_dev->addr.function); 2627ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2628ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2629a62ec991SViacheslav Ovsiienko goto exit; 2630ad74bc61SViacheslav Ovsiienko } 2631ad74bc61SViacheslav Ovsiienko if (nd == 1) { 263226c08b97SAdrien Mazarguil /* 2633ad74bc61SViacheslav Ovsiienko * Found single matching device may have multiple ports. 2634ad74bc61SViacheslav Ovsiienko * Each port may be representor, we have to check the port 2635ad74bc61SViacheslav Ovsiienko * number and check the representors existence. 263626c08b97SAdrien Mazarguil */ 2637ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2638ad74bc61SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 2639ad74bc61SViacheslav Ovsiienko if (!np) 2640ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get IB device \"%s\"" 2641ad74bc61SViacheslav Ovsiienko " ports number", ibv_match[0]->name); 26422e569a37SViacheslav Ovsiienko if (bd >= 0 && !np) { 26432e569a37SViacheslav Ovsiienko DRV_LOG(ERR, "can not get ports" 26442e569a37SViacheslav Ovsiienko " for bonding device"); 26452e569a37SViacheslav Ovsiienko rte_errno = ENOENT; 26462e569a37SViacheslav Ovsiienko ret = -rte_errno; 26472e569a37SViacheslav Ovsiienko goto exit; 26482e569a37SViacheslav Ovsiienko } 2649ad74bc61SViacheslav Ovsiienko } 2650790164ceSViacheslav Ovsiienko #ifndef HAVE_MLX5DV_DR_DEVX_PORT 2651790164ceSViacheslav Ovsiienko if (bd >= 0) { 2652790164ceSViacheslav Ovsiienko /* 2653790164ceSViacheslav Ovsiienko * This may happen if there is VF LAG kernel support and 2654790164ceSViacheslav Ovsiienko * application is compiled with older rdma_core library. 2655790164ceSViacheslav Ovsiienko */ 2656790164ceSViacheslav Ovsiienko DRV_LOG(ERR, 2657790164ceSViacheslav Ovsiienko "No kernel/verbs support for VF LAG bonding found."); 2658790164ceSViacheslav Ovsiienko rte_errno = ENOTSUP; 2659790164ceSViacheslav Ovsiienko ret = -rte_errno; 2660790164ceSViacheslav Ovsiienko goto exit; 2661790164ceSViacheslav Ovsiienko } 2662790164ceSViacheslav Ovsiienko #endif 2663ad74bc61SViacheslav Ovsiienko /* 2664ad74bc61SViacheslav Ovsiienko * Now we can determine the maximal 2665ad74bc61SViacheslav Ovsiienko * amount of devices to be spawned. 2666ad74bc61SViacheslav Ovsiienko */ 2667a62ec991SViacheslav Ovsiienko list = rte_zmalloc("device spawn data", 2668a62ec991SViacheslav Ovsiienko sizeof(struct mlx5_dev_spawn_data) * 2669a62ec991SViacheslav Ovsiienko (np ? np : nd), 2670a62ec991SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 2671a62ec991SViacheslav Ovsiienko if (!list) { 2672a62ec991SViacheslav Ovsiienko DRV_LOG(ERR, "spawn data array allocation failure"); 2673a62ec991SViacheslav Ovsiienko rte_errno = ENOMEM; 2674a62ec991SViacheslav Ovsiienko ret = -rte_errno; 2675a62ec991SViacheslav Ovsiienko goto exit; 2676a62ec991SViacheslav Ovsiienko } 26772e569a37SViacheslav Ovsiienko if (bd >= 0 || np > 1) { 2678ad74bc61SViacheslav Ovsiienko /* 2679ae4eb7dcSViacheslav Ovsiienko * Single IB device with multiple ports found, 2680ad74bc61SViacheslav Ovsiienko * it may be E-Switch master device and representors. 2681ad74bc61SViacheslav Ovsiienko * We have to perform identification trough the ports. 2682ad74bc61SViacheslav Ovsiienko */ 2683ad74bc61SViacheslav Ovsiienko assert(nl_rdma >= 0); 2684ad74bc61SViacheslav Ovsiienko assert(ns == 0); 2685ad74bc61SViacheslav Ovsiienko assert(nd == 1); 26862e569a37SViacheslav Ovsiienko assert(np); 2687ad74bc61SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 2688ad74bc61SViacheslav Ovsiienko list[ns].max_port = np; 2689ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = i; 2690ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[0]; 2691ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 2692ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 26932e569a37SViacheslav Ovsiienko list[ns].pf_bond = bd; 2694ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 2695ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, i); 2696ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 2697ad74bc61SViacheslav Ovsiienko /* 2698ad74bc61SViacheslav Ovsiienko * No network interface index found for the 2699ad74bc61SViacheslav Ovsiienko * specified port, it means there is no 2700ad74bc61SViacheslav Ovsiienko * representor on this port. It's OK, 2701ad74bc61SViacheslav Ovsiienko * there can be disabled ports, for example 2702ad74bc61SViacheslav Ovsiienko * if sriov_numvfs < sriov_totalvfs. 2703ad74bc61SViacheslav Ovsiienko */ 270426c08b97SAdrien Mazarguil continue; 270526c08b97SAdrien Mazarguil } 2706ad74bc61SViacheslav Ovsiienko ret = -1; 270726c08b97SAdrien Mazarguil if (nl_route >= 0) 2708ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 2709ad74bc61SViacheslav Ovsiienko (nl_route, 2710ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 2711ad74bc61SViacheslav Ovsiienko &list[ns].info); 2712ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 2713ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 2714ad74bc61SViacheslav Ovsiienko /* 2715ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 2716ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 2717ad74bc61SViacheslav Ovsiienko * with sysfs. 2718ad74bc61SViacheslav Ovsiienko */ 2719ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 2720ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 2721ad74bc61SViacheslav Ovsiienko &list[ns].info); 2722ad74bc61SViacheslav Ovsiienko } 27232e569a37SViacheslav Ovsiienko if (!ret && bd >= 0) { 27242e569a37SViacheslav Ovsiienko switch (list[ns].info.name_type) { 27252e569a37SViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 27262e569a37SViacheslav Ovsiienko if (list[ns].info.port_name == bd) 27272e569a37SViacheslav Ovsiienko ns++; 27282e569a37SViacheslav Ovsiienko break; 27292e569a37SViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 27302e569a37SViacheslav Ovsiienko if (list[ns].info.pf_num == bd) 27312e569a37SViacheslav Ovsiienko ns++; 27322e569a37SViacheslav Ovsiienko break; 27332e569a37SViacheslav Ovsiienko default: 27342e569a37SViacheslav Ovsiienko break; 27352e569a37SViacheslav Ovsiienko } 27362e569a37SViacheslav Ovsiienko continue; 27372e569a37SViacheslav Ovsiienko } 2738ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 2739ad74bc61SViacheslav Ovsiienko list[ns].info.master)) 2740ad74bc61SViacheslav Ovsiienko ns++; 2741ad74bc61SViacheslav Ovsiienko } 2742ad74bc61SViacheslav Ovsiienko if (!ns) { 274326c08b97SAdrien Mazarguil DRV_LOG(ERR, 2744ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 2745ad74bc61SViacheslav Ovsiienko " on the IB device with multiple ports"); 2746ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2747ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2748ad74bc61SViacheslav Ovsiienko goto exit; 2749ad74bc61SViacheslav Ovsiienko } 2750ad74bc61SViacheslav Ovsiienko } else { 2751ad74bc61SViacheslav Ovsiienko /* 2752ad74bc61SViacheslav Ovsiienko * The existence of several matching entries (nd > 1) means 2753ad74bc61SViacheslav Ovsiienko * port representors have been instantiated. No existing Verbs 2754ad74bc61SViacheslav Ovsiienko * call nor sysfs entries can tell them apart, this can only 2755ad74bc61SViacheslav Ovsiienko * be done through Netlink calls assuming kernel drivers are 2756ad74bc61SViacheslav Ovsiienko * recent enough to support them. 2757ad74bc61SViacheslav Ovsiienko * 2758ad74bc61SViacheslav Ovsiienko * In the event of identification failure through Netlink, 2759ad74bc61SViacheslav Ovsiienko * try again through sysfs, then: 2760ad74bc61SViacheslav Ovsiienko * 2761ad74bc61SViacheslav Ovsiienko * 1. A single IB device matches (nd == 1) with single 2762ad74bc61SViacheslav Ovsiienko * port (np=0/1) and is not a representor, assume 2763ad74bc61SViacheslav Ovsiienko * no switch support. 2764ad74bc61SViacheslav Ovsiienko * 2765ad74bc61SViacheslav Ovsiienko * 2. Otherwise no safe assumptions can be made; 2766ad74bc61SViacheslav Ovsiienko * complain louder and bail out. 2767ad74bc61SViacheslav Ovsiienko */ 2768ad74bc61SViacheslav Ovsiienko np = 1; 2769ad74bc61SViacheslav Ovsiienko for (i = 0; i != nd; ++i) { 2770ad74bc61SViacheslav Ovsiienko memset(&list[ns].info, 0, sizeof(list[ns].info)); 2771ad74bc61SViacheslav Ovsiienko list[ns].max_port = 1; 2772ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = 1; 2773ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[i]; 2774ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 2775ab3cffcfSViacheslav Ovsiienko list[ns].pci_dev = pci_dev; 27762e569a37SViacheslav Ovsiienko list[ns].pf_bond = -1; 2777ad74bc61SViacheslav Ovsiienko list[ns].ifindex = 0; 2778ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2779ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 2780ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, 1); 2781ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 27829c2bbd04SViacheslav Ovsiienko char ifname[IF_NAMESIZE]; 27839c2bbd04SViacheslav Ovsiienko 2784ad74bc61SViacheslav Ovsiienko /* 27859c2bbd04SViacheslav Ovsiienko * Netlink failed, it may happen with old 27869c2bbd04SViacheslav Ovsiienko * ib_core kernel driver (before 4.16). 27879c2bbd04SViacheslav Ovsiienko * We can assume there is old driver because 27889c2bbd04SViacheslav Ovsiienko * here we are processing single ports IB 27899c2bbd04SViacheslav Ovsiienko * devices. Let's try sysfs to retrieve 27909c2bbd04SViacheslav Ovsiienko * the ifindex. The method works for 27919c2bbd04SViacheslav Ovsiienko * master device only. 27929c2bbd04SViacheslav Ovsiienko */ 27939c2bbd04SViacheslav Ovsiienko if (nd > 1) { 27949c2bbd04SViacheslav Ovsiienko /* 27959c2bbd04SViacheslav Ovsiienko * Multiple devices found, assume 27969c2bbd04SViacheslav Ovsiienko * representors, can not distinguish 27979c2bbd04SViacheslav Ovsiienko * master/representor and retrieve 27989c2bbd04SViacheslav Ovsiienko * ifindex via sysfs. 2799ad74bc61SViacheslav Ovsiienko */ 2800ad74bc61SViacheslav Ovsiienko continue; 2801ad74bc61SViacheslav Ovsiienko } 28029c2bbd04SViacheslav Ovsiienko ret = mlx5_get_master_ifname 28039c2bbd04SViacheslav Ovsiienko (ibv_match[i]->ibdev_path, &ifname); 28049c2bbd04SViacheslav Ovsiienko if (!ret) 28059c2bbd04SViacheslav Ovsiienko list[ns].ifindex = 28069c2bbd04SViacheslav Ovsiienko if_nametoindex(ifname); 28079c2bbd04SViacheslav Ovsiienko if (!list[ns].ifindex) { 28089c2bbd04SViacheslav Ovsiienko /* 28099c2bbd04SViacheslav Ovsiienko * No network interface index found 28109c2bbd04SViacheslav Ovsiienko * for the specified device, it means 28119c2bbd04SViacheslav Ovsiienko * there it is neither representor 28129c2bbd04SViacheslav Ovsiienko * nor master. 28139c2bbd04SViacheslav Ovsiienko */ 28149c2bbd04SViacheslav Ovsiienko continue; 28159c2bbd04SViacheslav Ovsiienko } 28169c2bbd04SViacheslav Ovsiienko } 2817ad74bc61SViacheslav Ovsiienko ret = -1; 2818ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2819ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 2820ad74bc61SViacheslav Ovsiienko (nl_route, 2821ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 2822ad74bc61SViacheslav Ovsiienko &list[ns].info); 2823ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 2824ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 2825ad74bc61SViacheslav Ovsiienko /* 2826ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 2827ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 2828ad74bc61SViacheslav Ovsiienko * with sysfs. 2829ad74bc61SViacheslav Ovsiienko */ 2830ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 2831ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 2832ad74bc61SViacheslav Ovsiienko &list[ns].info); 2833ad74bc61SViacheslav Ovsiienko } 2834ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 2835ad74bc61SViacheslav Ovsiienko list[ns].info.master)) { 2836ad74bc61SViacheslav Ovsiienko ns++; 2837ad74bc61SViacheslav Ovsiienko } else if ((nd == 1) && 2838ad74bc61SViacheslav Ovsiienko !list[ns].info.representor && 2839ad74bc61SViacheslav Ovsiienko !list[ns].info.master) { 2840ad74bc61SViacheslav Ovsiienko /* 2841ad74bc61SViacheslav Ovsiienko * Single IB device with 2842ad74bc61SViacheslav Ovsiienko * one physical port and 2843ad74bc61SViacheslav Ovsiienko * attached network device. 2844ad74bc61SViacheslav Ovsiienko * May be SRIOV is not enabled 2845ad74bc61SViacheslav Ovsiienko * or there is no representors. 2846ad74bc61SViacheslav Ovsiienko */ 2847ad74bc61SViacheslav Ovsiienko DRV_LOG(INFO, "no E-Switch support detected"); 2848ad74bc61SViacheslav Ovsiienko ns++; 2849ad74bc61SViacheslav Ovsiienko break; 285026c08b97SAdrien Mazarguil } 2851f38c5457SAdrien Mazarguil } 2852ad74bc61SViacheslav Ovsiienko if (!ns) { 2853ad74bc61SViacheslav Ovsiienko DRV_LOG(ERR, 2854ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 2855ad74bc61SViacheslav Ovsiienko " on the multiple IB devices"); 2856ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 2857ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 2858ad74bc61SViacheslav Ovsiienko goto exit; 2859ad74bc61SViacheslav Ovsiienko } 2860ad74bc61SViacheslav Ovsiienko } 2861ad74bc61SViacheslav Ovsiienko assert(ns); 2862116f90adSAdrien Mazarguil /* 2863116f90adSAdrien Mazarguil * Sort list to probe devices in natural order for users convenience 2864116f90adSAdrien Mazarguil * (i.e. master first, then representors from lowest to highest ID). 2865116f90adSAdrien Mazarguil */ 2866ad74bc61SViacheslav Ovsiienko qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 2867f87bfa8eSYongseok Koh /* Default configuration. */ 2868f87bfa8eSYongseok Koh dev_config = (struct mlx5_dev_config){ 286978c7a16dSYongseok Koh .hw_padding = 0, 2870f87bfa8eSYongseok Koh .mps = MLX5_ARG_UNSET, 2871f87bfa8eSYongseok Koh .rx_vec_en = 1, 2872505f1fe4SViacheslav Ovsiienko .txq_inline_max = MLX5_ARG_UNSET, 2873505f1fe4SViacheslav Ovsiienko .txq_inline_min = MLX5_ARG_UNSET, 2874505f1fe4SViacheslav Ovsiienko .txq_inline_mpw = MLX5_ARG_UNSET, 2875f87bfa8eSYongseok Koh .txqs_inline = MLX5_ARG_UNSET, 2876f87bfa8eSYongseok Koh .vf_nl_en = 1, 2877dceb5029SYongseok Koh .mr_ext_memseg_en = 1, 2878f87bfa8eSYongseok Koh .mprq = { 2879f87bfa8eSYongseok Koh .enabled = 0, /* Disabled by default. */ 2880f87bfa8eSYongseok Koh .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N, 2881f87bfa8eSYongseok Koh .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, 2882f87bfa8eSYongseok Koh .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, 2883f87bfa8eSYongseok Koh }, 2884e2b4925eSOri Kam .dv_esw_en = 1, 2885f87bfa8eSYongseok Koh }; 2886ad74bc61SViacheslav Ovsiienko /* Device specific configuration. */ 2887f38c5457SAdrien Mazarguil switch (pci_dev->id.device_id) { 2888f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 2889f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 2890f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 2891f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 2892a40b734bSViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 2893c930f02cSViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 2894f87bfa8eSYongseok Koh dev_config.vf = 1; 2895f38c5457SAdrien Mazarguil break; 2896f38c5457SAdrien Mazarguil default: 2897f87bfa8eSYongseok Koh break; 2898f38c5457SAdrien Mazarguil } 2899ad74bc61SViacheslav Ovsiienko for (i = 0; i != ns; ++i) { 29002b730263SAdrien Mazarguil uint32_t restore; 29012b730263SAdrien Mazarguil 2902f87bfa8eSYongseok Koh list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 2903ad74bc61SViacheslav Ovsiienko &list[i], 2904ad74bc61SViacheslav Ovsiienko dev_config); 29056de569f5SAdrien Mazarguil if (!list[i].eth_dev) { 2906206254b7SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 29072b730263SAdrien Mazarguil break; 2908206254b7SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 29096de569f5SAdrien Mazarguil continue; 29106de569f5SAdrien Mazarguil } 2911116f90adSAdrien Mazarguil restore = list[i].eth_dev->data->dev_flags; 2912116f90adSAdrien Mazarguil rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 29132b730263SAdrien Mazarguil /* Restore non-PCI flags cleared by the above call. */ 2914116f90adSAdrien Mazarguil list[i].eth_dev->data->dev_flags |= restore; 291523242063SMatan Azrad mlx5_dev_interrupt_handler_devx_install(list[i].eth_dev); 2916116f90adSAdrien Mazarguil rte_eth_dev_probing_finish(list[i].eth_dev); 29172b730263SAdrien Mazarguil } 2918ad74bc61SViacheslav Ovsiienko if (i != ns) { 2919f38c5457SAdrien Mazarguil DRV_LOG(ERR, 2920f38c5457SAdrien Mazarguil "probe of PCI device " PCI_PRI_FMT " aborted after" 2921f38c5457SAdrien Mazarguil " encountering an error: %s", 2922f38c5457SAdrien Mazarguil pci_dev->addr.domain, pci_dev->addr.bus, 2923f38c5457SAdrien Mazarguil pci_dev->addr.devid, pci_dev->addr.function, 2924f38c5457SAdrien Mazarguil strerror(rte_errno)); 2925f38c5457SAdrien Mazarguil ret = -rte_errno; 29262b730263SAdrien Mazarguil /* Roll back. */ 29272b730263SAdrien Mazarguil while (i--) { 29286de569f5SAdrien Mazarguil if (!list[i].eth_dev) 29296de569f5SAdrien Mazarguil continue; 2930116f90adSAdrien Mazarguil mlx5_dev_close(list[i].eth_dev); 2931e16adf08SThomas Monjalon /* mac_addrs must not be freed because in dev_private */ 2932e16adf08SThomas Monjalon list[i].eth_dev->data->mac_addrs = NULL; 2933116f90adSAdrien Mazarguil claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 29342b730263SAdrien Mazarguil } 29352b730263SAdrien Mazarguil /* Restore original error. */ 29362b730263SAdrien Mazarguil rte_errno = -ret; 2937f38c5457SAdrien Mazarguil } else { 2938f38c5457SAdrien Mazarguil ret = 0; 2939f38c5457SAdrien Mazarguil } 2940ad74bc61SViacheslav Ovsiienko exit: 2941ad74bc61SViacheslav Ovsiienko /* 2942ad74bc61SViacheslav Ovsiienko * Do the routine cleanup: 2943ad74bc61SViacheslav Ovsiienko * - close opened Netlink sockets 2944a62ec991SViacheslav Ovsiienko * - free allocated spawn data array 2945ad74bc61SViacheslav Ovsiienko * - free the Infiniband device list 2946ad74bc61SViacheslav Ovsiienko */ 2947ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2948ad74bc61SViacheslav Ovsiienko close(nl_rdma); 2949ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2950ad74bc61SViacheslav Ovsiienko close(nl_route); 2951a62ec991SViacheslav Ovsiienko if (list) 2952a62ec991SViacheslav Ovsiienko rte_free(list); 2953ad74bc61SViacheslav Ovsiienko assert(ibv_list); 2954ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 2955f38c5457SAdrien Mazarguil return ret; 2956771fa900SAdrien Mazarguil } 2957771fa900SAdrien Mazarguil 2958fbc83412SViacheslav Ovsiienko /** 2959fbc83412SViacheslav Ovsiienko * Look for the ethernet device belonging to mlx5 driver. 2960fbc83412SViacheslav Ovsiienko * 2961fbc83412SViacheslav Ovsiienko * @param[in] port_id 2962fbc83412SViacheslav Ovsiienko * port_id to start looking for device. 2963fbc83412SViacheslav Ovsiienko * @param[in] pci_dev 2964fbc83412SViacheslav Ovsiienko * Pointer to the hint PCI device. When device is being probed 2965fbc83412SViacheslav Ovsiienko * the its siblings (master and preceding representors might 2966fbc83412SViacheslav Ovsiienko * not have assigned driver yet (because the mlx5_pci_probe() 2967fbc83412SViacheslav Ovsiienko * is not completed yet, for this case match on hint PCI 2968fbc83412SViacheslav Ovsiienko * device may be used to detect sibling device. 2969fbc83412SViacheslav Ovsiienko * 2970fbc83412SViacheslav Ovsiienko * @return 2971fbc83412SViacheslav Ovsiienko * port_id of found device, RTE_MAX_ETHPORT if not found. 2972fbc83412SViacheslav Ovsiienko */ 2973f7e95215SViacheslav Ovsiienko uint16_t 2974fbc83412SViacheslav Ovsiienko mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) 2975f7e95215SViacheslav Ovsiienko { 2976f7e95215SViacheslav Ovsiienko while (port_id < RTE_MAX_ETHPORTS) { 2977f7e95215SViacheslav Ovsiienko struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2978f7e95215SViacheslav Ovsiienko 2979f7e95215SViacheslav Ovsiienko if (dev->state != RTE_ETH_DEV_UNUSED && 2980f7e95215SViacheslav Ovsiienko dev->device && 2981fbc83412SViacheslav Ovsiienko (dev->device == &pci_dev->device || 2982fbc83412SViacheslav Ovsiienko (dev->device->driver && 2983f7e95215SViacheslav Ovsiienko dev->device->driver->name && 2984fbc83412SViacheslav Ovsiienko !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME)))) 2985f7e95215SViacheslav Ovsiienko break; 2986f7e95215SViacheslav Ovsiienko port_id++; 2987f7e95215SViacheslav Ovsiienko } 2988f7e95215SViacheslav Ovsiienko if (port_id >= RTE_MAX_ETHPORTS) 2989f7e95215SViacheslav Ovsiienko return RTE_MAX_ETHPORTS; 2990f7e95215SViacheslav Ovsiienko return port_id; 2991f7e95215SViacheslav Ovsiienko } 2992f7e95215SViacheslav Ovsiienko 29933a820742SOphir Munk /** 29943a820742SOphir Munk * DPDK callback to remove a PCI device. 29953a820742SOphir Munk * 29963a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 29973a820742SOphir Munk * 29983a820742SOphir Munk * @param[in] pci_dev 29993a820742SOphir Munk * Pointer to the PCI device. 30003a820742SOphir Munk * 30013a820742SOphir Munk * @return 30023a820742SOphir Munk * 0 on success, the function cannot fail. 30033a820742SOphir Munk */ 30043a820742SOphir Munk static int 30053a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 30063a820742SOphir Munk { 30073a820742SOphir Munk uint16_t port_id; 30083a820742SOphir Munk 30095294b800SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 30103a820742SOphir Munk rte_eth_dev_close(port_id); 30113a820742SOphir Munk return 0; 30123a820742SOphir Munk } 30133a820742SOphir Munk 3014771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 3015771fa900SAdrien Mazarguil { 30161d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 30171d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 3018771fa900SAdrien Mazarguil }, 3019771fa900SAdrien Mazarguil { 30201d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 30211d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 3022771fa900SAdrien Mazarguil }, 3023771fa900SAdrien Mazarguil { 30241d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 30251d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 3026771fa900SAdrien Mazarguil }, 3027771fa900SAdrien Mazarguil { 30281d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 30291d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 3030771fa900SAdrien Mazarguil }, 3031771fa900SAdrien Mazarguil { 3032528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3033528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 3034528a9fbeSYongseok Koh }, 3035528a9fbeSYongseok Koh { 3036528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3037528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 3038528a9fbeSYongseok Koh }, 3039528a9fbeSYongseok Koh { 3040528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3041528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 3042528a9fbeSYongseok Koh }, 3043528a9fbeSYongseok Koh { 3044528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3045528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 3046528a9fbeSYongseok Koh }, 3047528a9fbeSYongseok Koh { 3048dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3049dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 3050dd3331c6SShahaf Shuler }, 3051dd3331c6SShahaf Shuler { 3052c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3053c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 3054c322c0e5SOri Kam }, 3055c322c0e5SOri Kam { 3056f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3057f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 3058f0354d84SWisam Jaddo }, 3059f0354d84SWisam Jaddo { 3060f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 3061f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 3062f0354d84SWisam Jaddo }, 3063f0354d84SWisam Jaddo { 3064771fa900SAdrien Mazarguil .vendor_id = 0 3065771fa900SAdrien Mazarguil } 3066771fa900SAdrien Mazarguil }; 3067771fa900SAdrien Mazarguil 3068fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver = { 30692f3193cfSJan Viktorin .driver = { 30702f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 30712f3193cfSJan Viktorin }, 3072771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 3073af424af8SShreyansh Jain .probe = mlx5_pci_probe, 30743a820742SOphir Munk .remove = mlx5_pci_remove, 3075989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 3076989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 307769c06d0eSYongseok Koh .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV | 3078b76fafb1SDavid Marchand RTE_PCI_DRV_PROBE_AGAIN, 3079771fa900SAdrien Mazarguil }; 3080771fa900SAdrien Mazarguil 308172b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 308259b91becSAdrien Mazarguil 308359b91becSAdrien Mazarguil /** 308408c028d0SAdrien Mazarguil * Suffix RTE_EAL_PMD_PATH with "-glue". 308508c028d0SAdrien Mazarguil * 308608c028d0SAdrien Mazarguil * This function performs a sanity check on RTE_EAL_PMD_PATH before 308708c028d0SAdrien Mazarguil * suffixing its last component. 308808c028d0SAdrien Mazarguil * 308908c028d0SAdrien Mazarguil * @param buf[out] 309008c028d0SAdrien Mazarguil * Output buffer, should be large enough otherwise NULL is returned. 309108c028d0SAdrien Mazarguil * @param size 309208c028d0SAdrien Mazarguil * Size of @p out. 309308c028d0SAdrien Mazarguil * 309408c028d0SAdrien Mazarguil * @return 309508c028d0SAdrien Mazarguil * Pointer to @p buf or @p NULL in case suffix cannot be appended. 309608c028d0SAdrien Mazarguil */ 309708c028d0SAdrien Mazarguil static char * 309808c028d0SAdrien Mazarguil mlx5_glue_path(char *buf, size_t size) 309908c028d0SAdrien Mazarguil { 310008c028d0SAdrien Mazarguil static const char *const bad[] = { "/", ".", "..", NULL }; 310108c028d0SAdrien Mazarguil const char *path = RTE_EAL_PMD_PATH; 310208c028d0SAdrien Mazarguil size_t len = strlen(path); 310308c028d0SAdrien Mazarguil size_t off; 310408c028d0SAdrien Mazarguil int i; 310508c028d0SAdrien Mazarguil 310608c028d0SAdrien Mazarguil while (len && path[len - 1] == '/') 310708c028d0SAdrien Mazarguil --len; 310808c028d0SAdrien Mazarguil for (off = len; off && path[off - 1] != '/'; --off) 310908c028d0SAdrien Mazarguil ; 311008c028d0SAdrien Mazarguil for (i = 0; bad[i]; ++i) 311108c028d0SAdrien Mazarguil if (!strncmp(path + off, bad[i], (int)(len - off))) 311208c028d0SAdrien Mazarguil goto error; 311308c028d0SAdrien Mazarguil i = snprintf(buf, size, "%.*s-glue", (int)len, path); 311408c028d0SAdrien Mazarguil if (i == -1 || (size_t)i >= size) 311508c028d0SAdrien Mazarguil goto error; 311608c028d0SAdrien Mazarguil return buf; 311708c028d0SAdrien Mazarguil error: 3118a170a30dSNélio Laranjeiro DRV_LOG(ERR, 3119a170a30dSNélio Laranjeiro "unable to append \"-glue\" to last component of" 312008c028d0SAdrien Mazarguil " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," 312108c028d0SAdrien Mazarguil " please re-configure DPDK"); 312208c028d0SAdrien Mazarguil return NULL; 312308c028d0SAdrien Mazarguil } 312408c028d0SAdrien Mazarguil 312508c028d0SAdrien Mazarguil /** 312659b91becSAdrien Mazarguil * Initialization routine for run-time dependency on rdma-core. 312759b91becSAdrien Mazarguil */ 312859b91becSAdrien Mazarguil static int 312959b91becSAdrien Mazarguil mlx5_glue_init(void) 313059b91becSAdrien Mazarguil { 313108c028d0SAdrien Mazarguil char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; 3132f6242d06SAdrien Mazarguil const char *path[] = { 3133f6242d06SAdrien Mazarguil /* 3134f6242d06SAdrien Mazarguil * A basic security check is necessary before trusting 3135f6242d06SAdrien Mazarguil * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH. 3136f6242d06SAdrien Mazarguil */ 3137f6242d06SAdrien Mazarguil (geteuid() == getuid() && getegid() == getgid() ? 3138f6242d06SAdrien Mazarguil getenv("MLX5_GLUE_PATH") : NULL), 313908c028d0SAdrien Mazarguil /* 314008c028d0SAdrien Mazarguil * When RTE_EAL_PMD_PATH is set, use its glue-suffixed 314108c028d0SAdrien Mazarguil * variant, otherwise let dlopen() look up libraries on its 314208c028d0SAdrien Mazarguil * own. 314308c028d0SAdrien Mazarguil */ 314408c028d0SAdrien Mazarguil (*RTE_EAL_PMD_PATH ? 314508c028d0SAdrien Mazarguil mlx5_glue_path(glue_path, sizeof(glue_path)) : ""), 3146f6242d06SAdrien Mazarguil }; 3147f6242d06SAdrien Mazarguil unsigned int i = 0; 314859b91becSAdrien Mazarguil void *handle = NULL; 314959b91becSAdrien Mazarguil void **sym; 315059b91becSAdrien Mazarguil const char *dlmsg; 315159b91becSAdrien Mazarguil 3152f6242d06SAdrien Mazarguil while (!handle && i != RTE_DIM(path)) { 3153f6242d06SAdrien Mazarguil const char *end; 3154f6242d06SAdrien Mazarguil size_t len; 3155f6242d06SAdrien Mazarguil int ret; 3156f6242d06SAdrien Mazarguil 3157f6242d06SAdrien Mazarguil if (!path[i]) { 3158f6242d06SAdrien Mazarguil ++i; 3159f6242d06SAdrien Mazarguil continue; 3160f6242d06SAdrien Mazarguil } 3161f6242d06SAdrien Mazarguil end = strpbrk(path[i], ":;"); 3162f6242d06SAdrien Mazarguil if (!end) 3163f6242d06SAdrien Mazarguil end = path[i] + strlen(path[i]); 3164f6242d06SAdrien Mazarguil len = end - path[i]; 3165f6242d06SAdrien Mazarguil ret = 0; 3166f6242d06SAdrien Mazarguil do { 3167f6242d06SAdrien Mazarguil char name[ret + 1]; 3168f6242d06SAdrien Mazarguil 3169f6242d06SAdrien Mazarguil ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE, 3170f6242d06SAdrien Mazarguil (int)len, path[i], 3171f6242d06SAdrien Mazarguil (!len || *(end - 1) == '/') ? "" : "/"); 3172f6242d06SAdrien Mazarguil if (ret == -1) 3173f6242d06SAdrien Mazarguil break; 3174f6242d06SAdrien Mazarguil if (sizeof(name) != (size_t)ret + 1) 3175f6242d06SAdrien Mazarguil continue; 3176a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"", 3177a170a30dSNélio Laranjeiro name); 3178f6242d06SAdrien Mazarguil handle = dlopen(name, RTLD_LAZY); 3179f6242d06SAdrien Mazarguil break; 3180f6242d06SAdrien Mazarguil } while (1); 3181f6242d06SAdrien Mazarguil path[i] = end + 1; 3182f6242d06SAdrien Mazarguil if (!*end) 3183f6242d06SAdrien Mazarguil ++i; 3184f6242d06SAdrien Mazarguil } 318559b91becSAdrien Mazarguil if (!handle) { 318659b91becSAdrien Mazarguil rte_errno = EINVAL; 318759b91becSAdrien Mazarguil dlmsg = dlerror(); 318859b91becSAdrien Mazarguil if (dlmsg) 3189a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg); 319059b91becSAdrien Mazarguil goto glue_error; 319159b91becSAdrien Mazarguil } 319259b91becSAdrien Mazarguil sym = dlsym(handle, "mlx5_glue"); 319359b91becSAdrien Mazarguil if (!sym || !*sym) { 319459b91becSAdrien Mazarguil rte_errno = EINVAL; 319559b91becSAdrien Mazarguil dlmsg = dlerror(); 319659b91becSAdrien Mazarguil if (dlmsg) 3197a170a30dSNélio Laranjeiro DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg); 319859b91becSAdrien Mazarguil goto glue_error; 319959b91becSAdrien Mazarguil } 320059b91becSAdrien Mazarguil mlx5_glue = *sym; 320159b91becSAdrien Mazarguil return 0; 320259b91becSAdrien Mazarguil glue_error: 320359b91becSAdrien Mazarguil if (handle) 320459b91becSAdrien Mazarguil dlclose(handle); 3205a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 3206a170a30dSNélio Laranjeiro "cannot initialize PMD due to missing run-time dependency on" 3207a170a30dSNélio Laranjeiro " rdma-core libraries (libibverbs, libmlx5)"); 320859b91becSAdrien Mazarguil return -rte_errno; 320959b91becSAdrien Mazarguil } 321059b91becSAdrien Mazarguil 321159b91becSAdrien Mazarguil #endif 321259b91becSAdrien Mazarguil 3213771fa900SAdrien Mazarguil /** 3214771fa900SAdrien Mazarguil * Driver initialization routine. 3215771fa900SAdrien Mazarguil */ 3216f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 3217771fa900SAdrien Mazarguil { 32183d96644aSStephen Hemminger /* Initialize driver log type. */ 32193d96644aSStephen Hemminger mlx5_logtype = rte_log_register("pmd.net.mlx5"); 32203d96644aSStephen Hemminger if (mlx5_logtype >= 0) 32213d96644aSStephen Hemminger rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); 32223d96644aSStephen Hemminger 32235f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 3224ea16068cSYongseok Koh mlx5_set_ptype_table(); 32255f8ba81cSXueming Li mlx5_set_cksum_table(); 32265f8ba81cSXueming Li mlx5_set_swp_types_table(); 3227771fa900SAdrien Mazarguil /* 3228771fa900SAdrien Mazarguil * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 3229771fa900SAdrien Mazarguil * huge pages. Calling ibv_fork_init() during init allows 3230771fa900SAdrien Mazarguil * applications to use fork() safely for purposes other than 3231771fa900SAdrien Mazarguil * using this PMD, which is not supported in forked processes. 3232771fa900SAdrien Mazarguil */ 3233771fa900SAdrien Mazarguil setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 3234161b93e5SYongseok Koh /* Match the size of Rx completion entry to the size of a cacheline. */ 3235161b93e5SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128) 3236161b93e5SYongseok Koh setenv("MLX5_CQE_SIZE", "128", 0); 32371ff30d18SMatan Azrad /* 32381ff30d18SMatan Azrad * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to 32391ff30d18SMatan Azrad * cleanup all the Verbs resources even when the device was removed. 32401ff30d18SMatan Azrad */ 32411ff30d18SMatan Azrad setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1); 324272b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 324359b91becSAdrien Mazarguil if (mlx5_glue_init()) 324459b91becSAdrien Mazarguil return; 324559b91becSAdrien Mazarguil assert(mlx5_glue); 324659b91becSAdrien Mazarguil #endif 32472a3b0097SAdrien Mazarguil #ifndef NDEBUG 32482a3b0097SAdrien Mazarguil /* Glue structure must not contain any NULL pointers. */ 32492a3b0097SAdrien Mazarguil { 32502a3b0097SAdrien Mazarguil unsigned int i; 32512a3b0097SAdrien Mazarguil 32522a3b0097SAdrien Mazarguil for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i) 32532a3b0097SAdrien Mazarguil assert(((const void *const *)mlx5_glue)[i]); 32542a3b0097SAdrien Mazarguil } 32552a3b0097SAdrien Mazarguil #endif 32566d5df2eaSAdrien Mazarguil if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) { 3257a170a30dSNélio Laranjeiro DRV_LOG(ERR, 3258a170a30dSNélio Laranjeiro "rdma-core glue \"%s\" mismatch: \"%s\" is required", 32596d5df2eaSAdrien Mazarguil mlx5_glue->version, MLX5_GLUE_VERSION); 32606d5df2eaSAdrien Mazarguil return; 32616d5df2eaSAdrien Mazarguil } 32620e83b8e5SNelio Laranjeiro mlx5_glue->fork_init(); 32633dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 3264771fa900SAdrien Mazarguil } 3265771fa900SAdrien Mazarguil 326601f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 326701f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 32680880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 3269