18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <assert.h> 1059b91becSAdrien Mazarguil #include <dlfcn.h> 11771fa900SAdrien Mazarguil #include <stdint.h> 12771fa900SAdrien Mazarguil #include <stdlib.h> 13e72dd09bSNélio Laranjeiro #include <errno.h> 14771fa900SAdrien Mazarguil #include <net/if.h> 154a984153SXueming Li #include <sys/mman.h> 16ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 17771fa900SAdrien Mazarguil 18771fa900SAdrien Mazarguil /* Verbs header. */ 19771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 20771fa900SAdrien Mazarguil #ifdef PEDANTIC 21fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 22771fa900SAdrien Mazarguil #endif 23771fa900SAdrien Mazarguil #include <infiniband/verbs.h> 24771fa900SAdrien Mazarguil #ifdef PEDANTIC 25fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 26771fa900SAdrien Mazarguil #endif 27771fa900SAdrien Mazarguil 28771fa900SAdrien Mazarguil #include <rte_malloc.h> 29ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 30fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 31771fa900SAdrien Mazarguil #include <rte_pci.h> 32c752998bSGaetan Rivet #include <rte_bus_pci.h> 33771fa900SAdrien Mazarguil #include <rte_common.h> 3459b91becSAdrien Mazarguil #include <rte_config.h> 354a984153SXueming Li #include <rte_eal_memconfig.h> 36e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 37e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 38e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 39f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 40771fa900SAdrien Mazarguil 41771fa900SAdrien Mazarguil #include "mlx5.h" 42771fa900SAdrien Mazarguil #include "mlx5_utils.h" 432e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 44771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 4513d57bd5SAdrien Mazarguil #include "mlx5_defs.h" 460e83b8e5SNelio Laranjeiro #include "mlx5_glue.h" 47974f1e7eSYongseok Koh #include "mlx5_mr.h" 4884c406e7SOri Kam #include "mlx5_flow.h" 49771fa900SAdrien Mazarguil 5099c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 5199c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 5299c12dccSNélio Laranjeiro 53bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 54bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 55bc91e8dbSYongseok Koh 5678c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 5778c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 5878c7a16dSYongseok Koh 597d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 607d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 617d6bf6b8SYongseok Koh 627d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 637d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 647d6bf6b8SYongseok Koh 657d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 667d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 677d6bf6b8SYongseok Koh 687d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 697d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 707d6bf6b8SYongseok Koh 712a66cf37SYaacov Hazan /* Device parameter to configure inline send. */ 722a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 732a66cf37SYaacov Hazan 742a66cf37SYaacov Hazan /* 752a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 762a66cf37SYaacov Hazan * enabling inline send. 772a66cf37SYaacov Hazan */ 782a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 792a66cf37SYaacov Hazan 8009d8b416SYongseok Koh /* 8109d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 8209d8b416SYongseok Koh * enabling vectorized Tx. 8309d8b416SYongseok Koh */ 8409d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 8509d8b416SYongseok Koh 86230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 87230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 88230189d9SNélio Laranjeiro 896ce84bd8SYongseok Koh /* Device parameter to include 2 dsegs in the title WQEBB. */ 906ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 916ce84bd8SYongseok Koh 926ce84bd8SYongseok Koh /* Device parameter to limit the size of inlining packet. */ 936ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 946ce84bd8SYongseok Koh 955644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Tx vector. */ 965644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 975644d5b9SNelio Laranjeiro 985644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 995644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1005644d5b9SNelio Laranjeiro 10178a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 10278a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 10378a54648SXueming Li 10451e72d38SOri Kam /* Activate DV flow steering. */ 10551e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 10651e72d38SOri Kam 107db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 108db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 109db209cc3SNélio Laranjeiro 110dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 111dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 112dceb5029SYongseok Koh 1136de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1146de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1156de569f5SAdrien Mazarguil 11643e9d979SShachar Beiser #ifndef HAVE_IBV_MLX5_MOD_MPW 11743e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 11843e9d979SShachar Beiser #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 11943e9d979SShachar Beiser #endif 12043e9d979SShachar Beiser 121523f5a74SYongseok Koh #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 122523f5a74SYongseok Koh #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 123523f5a74SYongseok Koh #endif 124523f5a74SYongseok Koh 125974f1e7eSYongseok Koh static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 126974f1e7eSYongseok Koh 127974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 128974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 129974f1e7eSYongseok Koh 130974f1e7eSYongseok Koh /* Spinlock for mlx5_shared_data allocation. */ 131974f1e7eSYongseok Koh static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 132974f1e7eSYongseok Koh 1337be600c8SYongseok Koh /* Process local data for secondary processes. */ 1347be600c8SYongseok Koh static struct mlx5_local_data mlx5_local_data; 1357be600c8SYongseok Koh 136a170a30dSNélio Laranjeiro /** Driver-specific log messages type. */ 137a170a30dSNélio Laranjeiro int mlx5_logtype; 138a170a30dSNélio Laranjeiro 139ad74bc61SViacheslav Ovsiienko /** Data associated with devices to spawn. */ 140ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data { 141ad74bc61SViacheslav Ovsiienko uint32_t ifindex; /**< Network interface index. */ 142ad74bc61SViacheslav Ovsiienko uint32_t max_port; /**< IB device maximal port index. */ 143ad74bc61SViacheslav Ovsiienko uint32_t ibv_port; /**< IB device physical port index. */ 144ad74bc61SViacheslav Ovsiienko struct mlx5_switch_info info; /**< Switch information. */ 145ad74bc61SViacheslav Ovsiienko struct ibv_device *ibv_dev; /**< Associated IB device. */ 146ad74bc61SViacheslav Ovsiienko struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ 147ad74bc61SViacheslav Ovsiienko }; 148ad74bc61SViacheslav Ovsiienko 14917e19bc4SViacheslav Ovsiienko static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER(); 15017e19bc4SViacheslav Ovsiienko static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER; 15117e19bc4SViacheslav Ovsiienko 15217e19bc4SViacheslav Ovsiienko /** 15317e19bc4SViacheslav Ovsiienko * Allocate shared IB device context. If there is multiport device the 15417e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 15517e19bc4SViacheslav Ovsiienko * port dedicated IB device, the context will be used by only given 15617e19bc4SViacheslav Ovsiienko * port due to unification. 15717e19bc4SViacheslav Ovsiienko * 15817e19bc4SViacheslav Ovsiienko * Routine first searches the context for the spesified IB device name, 15917e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 16017e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 16117e19bc4SViacheslav Ovsiienko * IB device context and parameters. 16217e19bc4SViacheslav Ovsiienko * 16317e19bc4SViacheslav Ovsiienko * @param[in] spawn 16417e19bc4SViacheslav Ovsiienko * Pointer to the IB device attributes (name, port, etc). 16517e19bc4SViacheslav Ovsiienko * 16617e19bc4SViacheslav Ovsiienko * @return 16717e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object on success, 16817e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 16917e19bc4SViacheslav Ovsiienko */ 17017e19bc4SViacheslav Ovsiienko static struct mlx5_ibv_shared * 17117e19bc4SViacheslav Ovsiienko mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn) 17217e19bc4SViacheslav Ovsiienko { 17317e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 17417e19bc4SViacheslav Ovsiienko int err = 0; 17553e5a82fSViacheslav Ovsiienko uint32_t i; 17617e19bc4SViacheslav Ovsiienko 17717e19bc4SViacheslav Ovsiienko assert(spawn); 17817e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 17917e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 18017e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 18117e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 18217e19bc4SViacheslav Ovsiienko LIST_FOREACH(sh, &mlx5_ibv_list, next) { 18317e19bc4SViacheslav Ovsiienko if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) { 18417e19bc4SViacheslav Ovsiienko sh->refcnt++; 18517e19bc4SViacheslav Ovsiienko goto exit; 18617e19bc4SViacheslav Ovsiienko } 18717e19bc4SViacheslav Ovsiienko } 18817e19bc4SViacheslav Ovsiienko /* No device found, we have to create new sharted context. */ 18917e19bc4SViacheslav Ovsiienko assert(spawn->max_port); 19017e19bc4SViacheslav Ovsiienko sh = rte_zmalloc("ethdev shared ib context", 19117e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared) + 19217e19bc4SViacheslav Ovsiienko spawn->max_port * 19317e19bc4SViacheslav Ovsiienko sizeof(struct mlx5_ibv_shared_port), 19417e19bc4SViacheslav Ovsiienko RTE_CACHE_LINE_SIZE); 19517e19bc4SViacheslav Ovsiienko if (!sh) { 19617e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 19717e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 19817e19bc4SViacheslav Ovsiienko goto exit; 19917e19bc4SViacheslav Ovsiienko } 20017e19bc4SViacheslav Ovsiienko /* Try to open IB device with DV first, then usual Verbs. */ 20117e19bc4SViacheslav Ovsiienko errno = 0; 20217e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev); 20317e19bc4SViacheslav Ovsiienko if (sh->ctx) { 20417e19bc4SViacheslav Ovsiienko sh->devx = 1; 20517e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is supported"); 20617e19bc4SViacheslav Ovsiienko } else { 20717e19bc4SViacheslav Ovsiienko sh->ctx = mlx5_glue->open_device(spawn->ibv_dev); 20817e19bc4SViacheslav Ovsiienko if (!sh->ctx) { 20917e19bc4SViacheslav Ovsiienko err = errno ? errno : ENODEV; 21017e19bc4SViacheslav Ovsiienko goto error; 21117e19bc4SViacheslav Ovsiienko } 21217e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "DevX is NOT supported"); 21317e19bc4SViacheslav Ovsiienko } 21417e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr); 21517e19bc4SViacheslav Ovsiienko if (err) { 21617e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "ibv_query_device_ex() failed"); 21717e19bc4SViacheslav Ovsiienko goto error; 21817e19bc4SViacheslav Ovsiienko } 21917e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 22017e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 22117e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_name, sh->ctx->device->name, 22217e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_name)); 22317e19bc4SViacheslav Ovsiienko strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path, 22417e19bc4SViacheslav Ovsiienko sizeof(sh->ibdev_path)); 22553e5a82fSViacheslav Ovsiienko pthread_mutex_init(&sh->intr_mutex, NULL); 22653e5a82fSViacheslav Ovsiienko /* 22753e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 22853e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 22953e5a82fSViacheslav Ovsiienko * the given port index i. 23053e5a82fSViacheslav Ovsiienko */ 23153e5a82fSViacheslav Ovsiienko for (i = 0; i < sh->max_port; i++) 23253e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 23317e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 23417e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 23517e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 23617e19bc4SViacheslav Ovsiienko err = ENOMEM; 23717e19bc4SViacheslav Ovsiienko goto error; 23817e19bc4SViacheslav Ovsiienko } 23917e19bc4SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next); 24017e19bc4SViacheslav Ovsiienko exit: 24117e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 24217e19bc4SViacheslav Ovsiienko return sh; 24317e19bc4SViacheslav Ovsiienko error: 24417e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 24517e19bc4SViacheslav Ovsiienko assert(sh); 24617e19bc4SViacheslav Ovsiienko if (sh->pd) 24717e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 24817e19bc4SViacheslav Ovsiienko if (sh->ctx) 24917e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 25017e19bc4SViacheslav Ovsiienko rte_free(sh); 25117e19bc4SViacheslav Ovsiienko assert(err > 0); 25217e19bc4SViacheslav Ovsiienko rte_errno = err; 25317e19bc4SViacheslav Ovsiienko return NULL; 25417e19bc4SViacheslav Ovsiienko } 25517e19bc4SViacheslav Ovsiienko 25617e19bc4SViacheslav Ovsiienko /** 25717e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 25817e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 25917e19bc4SViacheslav Ovsiienko * 26017e19bc4SViacheslav Ovsiienko * @param[in] sh 26117e19bc4SViacheslav Ovsiienko * Pointer to mlx5_ibv_shared object to free 26217e19bc4SViacheslav Ovsiienko */ 26317e19bc4SViacheslav Ovsiienko static void 26417e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) 26517e19bc4SViacheslav Ovsiienko { 26617e19bc4SViacheslav Ovsiienko pthread_mutex_lock(&mlx5_ibv_list_mutex); 26717e19bc4SViacheslav Ovsiienko #ifndef NDEBUG 26817e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 26917e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *lctx; 27017e19bc4SViacheslav Ovsiienko 27117e19bc4SViacheslav Ovsiienko LIST_FOREACH(lctx, &mlx5_ibv_list, next) 27217e19bc4SViacheslav Ovsiienko if (lctx == sh) 27317e19bc4SViacheslav Ovsiienko break; 27417e19bc4SViacheslav Ovsiienko assert(lctx); 27517e19bc4SViacheslav Ovsiienko if (lctx != sh) { 27617e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 27717e19bc4SViacheslav Ovsiienko goto exit; 27817e19bc4SViacheslav Ovsiienko } 27917e19bc4SViacheslav Ovsiienko #endif 28017e19bc4SViacheslav Ovsiienko assert(sh); 28117e19bc4SViacheslav Ovsiienko assert(sh->refcnt); 28217e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 28317e19bc4SViacheslav Ovsiienko assert(rte_eal_process_type() == RTE_PROC_PRIMARY); 28417e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 28517e19bc4SViacheslav Ovsiienko goto exit; 28617e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 28753e5a82fSViacheslav Ovsiienko /* 28853e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 28953e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 29053e5a82fSViacheslav Ovsiienko **/ 29153e5a82fSViacheslav Ovsiienko assert(!sh->intr_cnt); 29253e5a82fSViacheslav Ovsiienko if (sh->intr_cnt) 29353e5a82fSViacheslav Ovsiienko rte_intr_callback_unregister 29453e5a82fSViacheslav Ovsiienko (&sh->intr_handle, mlx5_dev_interrupt_handler, sh); 29553e5a82fSViacheslav Ovsiienko pthread_mutex_destroy(&sh->intr_mutex); 29617e19bc4SViacheslav Ovsiienko if (sh->pd) 29717e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 29817e19bc4SViacheslav Ovsiienko if (sh->ctx) 29917e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 30017e19bc4SViacheslav Ovsiienko rte_free(sh); 30117e19bc4SViacheslav Ovsiienko exit: 30217e19bc4SViacheslav Ovsiienko pthread_mutex_unlock(&mlx5_ibv_list_mutex); 30317e19bc4SViacheslav Ovsiienko } 30417e19bc4SViacheslav Ovsiienko 305771fa900SAdrien Mazarguil /** 306b2177648SViacheslav Ovsiienko * Initialize DR related data within private structure. 307b2177648SViacheslav Ovsiienko * Routine checks the reference counter and does actual 308b2177648SViacheslav Ovsiienko * resources creation/iniialization only if counter is zero. 309b2177648SViacheslav Ovsiienko * 310b2177648SViacheslav Ovsiienko * @param[in] priv 311b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 312b2177648SViacheslav Ovsiienko * 313b2177648SViacheslav Ovsiienko * @return 314b2177648SViacheslav Ovsiienko * Zero on success, positive error code otherwise. 315b2177648SViacheslav Ovsiienko */ 316b2177648SViacheslav Ovsiienko static int 317b2177648SViacheslav Ovsiienko mlx5_alloc_shared_dr(struct mlx5_priv *priv) 318b2177648SViacheslav Ovsiienko { 319b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 320b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = priv->sh; 321b2177648SViacheslav Ovsiienko int err = 0; 322b2177648SViacheslav Ovsiienko void *ns; 323b2177648SViacheslav Ovsiienko 324b2177648SViacheslav Ovsiienko assert(sh); 325b2177648SViacheslav Ovsiienko if (sh->dv_refcnt) { 326b2177648SViacheslav Ovsiienko /* Shared DV/DR structures is already initialized. */ 327b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 328b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 329b2177648SViacheslav Ovsiienko return 0; 330b2177648SViacheslav Ovsiienko } 331b2177648SViacheslav Ovsiienko /* Reference counter is zero, we should initialize structures. */ 332b2177648SViacheslav Ovsiienko ns = mlx5dv_dr_create_ns(sh->ctx, MLX5DV_DR_NS_DOMAIN_INGRESS_BYPASS); 333b2177648SViacheslav Ovsiienko if (!ns) { 334b2177648SViacheslav Ovsiienko DRV_LOG(ERR, "ingress mlx5dv_dr_create_ns failed"); 335b2177648SViacheslav Ovsiienko err = errno; 336b2177648SViacheslav Ovsiienko goto error; 337b2177648SViacheslav Ovsiienko } 338*79e35d0dSViacheslav Ovsiienko sh->rx_ns = ns; 339b2177648SViacheslav Ovsiienko ns = mlx5dv_dr_create_ns(sh->ctx, MLX5DV_DR_NS_DOMAIN_EGRESS_BYPASS); 340b2177648SViacheslav Ovsiienko if (!ns) { 341b2177648SViacheslav Ovsiienko DRV_LOG(ERR, "egress mlx5dv_dr_create_ns failed"); 342b2177648SViacheslav Ovsiienko err = errno; 343b2177648SViacheslav Ovsiienko goto error; 344b2177648SViacheslav Ovsiienko } 345*79e35d0dSViacheslav Ovsiienko pthread_mutex_init(&sh->dv_mutex, NULL); 346*79e35d0dSViacheslav Ovsiienko sh->tx_ns = ns; 347b2177648SViacheslav Ovsiienko sh->dv_refcnt++; 348b2177648SViacheslav Ovsiienko priv->dr_shared = 1; 349b2177648SViacheslav Ovsiienko return 0; 350b2177648SViacheslav Ovsiienko 351b2177648SViacheslav Ovsiienko error: 352b2177648SViacheslav Ovsiienko /* Rollback the created objects. */ 353*79e35d0dSViacheslav Ovsiienko if (sh->rx_ns) { 354*79e35d0dSViacheslav Ovsiienko mlx5dv_dr_destroy_ns(sh->rx_ns); 355*79e35d0dSViacheslav Ovsiienko sh->rx_ns = NULL; 356b2177648SViacheslav Ovsiienko } 357*79e35d0dSViacheslav Ovsiienko if (sh->tx_ns) { 358*79e35d0dSViacheslav Ovsiienko mlx5dv_dr_destroy_ns(sh->tx_ns); 359*79e35d0dSViacheslav Ovsiienko sh->tx_ns = NULL; 360b2177648SViacheslav Ovsiienko } 361b2177648SViacheslav Ovsiienko return err; 362b2177648SViacheslav Ovsiienko #else 363b2177648SViacheslav Ovsiienko (void)priv; 364b2177648SViacheslav Ovsiienko return 0; 365b2177648SViacheslav Ovsiienko #endif 366b2177648SViacheslav Ovsiienko } 367b2177648SViacheslav Ovsiienko 368b2177648SViacheslav Ovsiienko /** 369b2177648SViacheslav Ovsiienko * Destroy DR related data within private structure. 370b2177648SViacheslav Ovsiienko * 371b2177648SViacheslav Ovsiienko * @param[in] priv 372b2177648SViacheslav Ovsiienko * Pointer to the private device data structure. 373b2177648SViacheslav Ovsiienko */ 374b2177648SViacheslav Ovsiienko static void 375b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(struct mlx5_priv *priv) 376b2177648SViacheslav Ovsiienko { 377b2177648SViacheslav Ovsiienko #ifdef HAVE_MLX5DV_DR 378b2177648SViacheslav Ovsiienko struct mlx5_ibv_shared *sh; 379b2177648SViacheslav Ovsiienko 380b2177648SViacheslav Ovsiienko if (!priv->dr_shared) 381b2177648SViacheslav Ovsiienko return; 382b2177648SViacheslav Ovsiienko priv->dr_shared = 0; 383b2177648SViacheslav Ovsiienko sh = priv->sh; 384b2177648SViacheslav Ovsiienko assert(sh); 385b2177648SViacheslav Ovsiienko assert(sh->dv_refcnt); 386b2177648SViacheslav Ovsiienko if (sh->dv_refcnt && --sh->dv_refcnt) 387b2177648SViacheslav Ovsiienko return; 388*79e35d0dSViacheslav Ovsiienko if (sh->rx_ns) { 389*79e35d0dSViacheslav Ovsiienko mlx5dv_dr_destroy_ns(sh->rx_ns); 390*79e35d0dSViacheslav Ovsiienko sh->rx_ns = NULL; 391b2177648SViacheslav Ovsiienko } 392*79e35d0dSViacheslav Ovsiienko if (sh->tx_ns) { 393*79e35d0dSViacheslav Ovsiienko mlx5dv_dr_destroy_ns(sh->tx_ns); 394*79e35d0dSViacheslav Ovsiienko sh->tx_ns = NULL; 395b2177648SViacheslav Ovsiienko } 396*79e35d0dSViacheslav Ovsiienko pthread_mutex_destroy(&sh->dv_mutex); 397b2177648SViacheslav Ovsiienko #else 398b2177648SViacheslav Ovsiienko (void)priv; 399b2177648SViacheslav Ovsiienko #endif 400b2177648SViacheslav Ovsiienko } 401b2177648SViacheslav Ovsiienko 402b2177648SViacheslav Ovsiienko /** 4037be600c8SYongseok Koh * Initialize shared data between primary and secondary process. 4047be600c8SYongseok Koh * 4057be600c8SYongseok Koh * A memzone is reserved by primary process and secondary processes attach to 4067be600c8SYongseok Koh * the memzone. 4077be600c8SYongseok Koh * 4087be600c8SYongseok Koh * @return 4097be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 410974f1e7eSYongseok Koh */ 4117be600c8SYongseok Koh static int 4127be600c8SYongseok Koh mlx5_init_shared_data(void) 413974f1e7eSYongseok Koh { 414974f1e7eSYongseok Koh const struct rte_memzone *mz; 4157be600c8SYongseok Koh int ret = 0; 416974f1e7eSYongseok Koh 417974f1e7eSYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 418974f1e7eSYongseok Koh if (mlx5_shared_data == NULL) { 419974f1e7eSYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 420974f1e7eSYongseok Koh /* Allocate shared memory. */ 421974f1e7eSYongseok Koh mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 422974f1e7eSYongseok Koh sizeof(*mlx5_shared_data), 423974f1e7eSYongseok Koh SOCKET_ID_ANY, 0); 4247be600c8SYongseok Koh if (mz == NULL) { 4257be600c8SYongseok Koh DRV_LOG(ERR, 4267be600c8SYongseok Koh "Cannot allocate mlx5 shared data\n"); 4277be600c8SYongseok Koh ret = -rte_errno; 4287be600c8SYongseok Koh goto error; 4297be600c8SYongseok Koh } 4307be600c8SYongseok Koh mlx5_shared_data = mz->addr; 4317be600c8SYongseok Koh memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 4327be600c8SYongseok Koh rte_spinlock_init(&mlx5_shared_data->lock); 433974f1e7eSYongseok Koh } else { 434974f1e7eSYongseok Koh /* Lookup allocated shared memory. */ 435974f1e7eSYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 4367be600c8SYongseok Koh if (mz == NULL) { 4377be600c8SYongseok Koh DRV_LOG(ERR, 4387be600c8SYongseok Koh "Cannot attach mlx5 shared data\n"); 4397be600c8SYongseok Koh ret = -rte_errno; 4407be600c8SYongseok Koh goto error; 441974f1e7eSYongseok Koh } 442974f1e7eSYongseok Koh mlx5_shared_data = mz->addr; 4437be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 4443ebe6580SYongseok Koh } 445974f1e7eSYongseok Koh } 4467be600c8SYongseok Koh error: 4477be600c8SYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 4487be600c8SYongseok Koh return ret; 4497be600c8SYongseok Koh } 4507be600c8SYongseok Koh 4517be600c8SYongseok Koh /** 4527be600c8SYongseok Koh * Uninitialize shared data between primary and secondary process. 4537be600c8SYongseok Koh * 4547be600c8SYongseok Koh * The pointer of secondary process is dereferenced and primary process frees 4557be600c8SYongseok Koh * the memzone. 4567be600c8SYongseok Koh */ 4577be600c8SYongseok Koh static void 4587be600c8SYongseok Koh mlx5_uninit_shared_data(void) 4597be600c8SYongseok Koh { 4607be600c8SYongseok Koh const struct rte_memzone *mz; 4617be600c8SYongseok Koh 4627be600c8SYongseok Koh rte_spinlock_lock(&mlx5_shared_data_lock); 4637be600c8SYongseok Koh if (mlx5_shared_data) { 4647be600c8SYongseok Koh if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4657be600c8SYongseok Koh mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 4667be600c8SYongseok Koh rte_memzone_free(mz); 4677be600c8SYongseok Koh } else { 4687be600c8SYongseok Koh memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 4697be600c8SYongseok Koh } 4707be600c8SYongseok Koh mlx5_shared_data = NULL; 4717be600c8SYongseok Koh } 472974f1e7eSYongseok Koh rte_spinlock_unlock(&mlx5_shared_data_lock); 473974f1e7eSYongseok Koh } 474974f1e7eSYongseok Koh 475974f1e7eSYongseok Koh /** 4764d803a72SOlga Shern * Retrieve integer value from environment variable. 4774d803a72SOlga Shern * 4784d803a72SOlga Shern * @param[in] name 4794d803a72SOlga Shern * Environment variable name. 4804d803a72SOlga Shern * 4814d803a72SOlga Shern * @return 4824d803a72SOlga Shern * Integer value, 0 if the variable is not set. 4834d803a72SOlga Shern */ 4844d803a72SOlga Shern int 4854d803a72SOlga Shern mlx5_getenv_int(const char *name) 4864d803a72SOlga Shern { 4874d803a72SOlga Shern const char *val = getenv(name); 4884d803a72SOlga Shern 4894d803a72SOlga Shern if (val == NULL) 4904d803a72SOlga Shern return 0; 4914d803a72SOlga Shern return atoi(val); 4924d803a72SOlga Shern } 4934d803a72SOlga Shern 4944d803a72SOlga Shern /** 4951e3a39f7SXueming Li * Verbs callback to allocate a memory. This function should allocate the space 4961e3a39f7SXueming Li * according to the size provided residing inside a huge page. 4971e3a39f7SXueming Li * Please note that all allocation must respect the alignment from libmlx5 4981e3a39f7SXueming Li * (i.e. currently sysconf(_SC_PAGESIZE)). 4991e3a39f7SXueming Li * 5001e3a39f7SXueming Li * @param[in] size 5011e3a39f7SXueming Li * The size in bytes of the memory to allocate. 5021e3a39f7SXueming Li * @param[in] data 5031e3a39f7SXueming Li * A pointer to the callback data. 5041e3a39f7SXueming Li * 5051e3a39f7SXueming Li * @return 506a6d83b6aSNélio Laranjeiro * Allocated buffer, NULL otherwise and rte_errno is set. 5071e3a39f7SXueming Li */ 5081e3a39f7SXueming Li static void * 5091e3a39f7SXueming Li mlx5_alloc_verbs_buf(size_t size, void *data) 5101e3a39f7SXueming Li { 511dbeba4cfSThomas Monjalon struct mlx5_priv *priv = data; 5121e3a39f7SXueming Li void *ret; 5131e3a39f7SXueming Li size_t alignment = sysconf(_SC_PAGESIZE); 514d10b09dbSOlivier Matz unsigned int socket = SOCKET_ID_ANY; 5151e3a39f7SXueming Li 516d10b09dbSOlivier Matz if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { 517d10b09dbSOlivier Matz const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 518d10b09dbSOlivier Matz 519d10b09dbSOlivier Matz socket = ctrl->socket; 520d10b09dbSOlivier Matz } else if (priv->verbs_alloc_ctx.type == 521d10b09dbSOlivier Matz MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { 522d10b09dbSOlivier Matz const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 523d10b09dbSOlivier Matz 524d10b09dbSOlivier Matz socket = ctrl->socket; 525d10b09dbSOlivier Matz } 5261e3a39f7SXueming Li assert(data != NULL); 527d10b09dbSOlivier Matz ret = rte_malloc_socket(__func__, size, alignment, socket); 528a6d83b6aSNélio Laranjeiro if (!ret && size) 529a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 5301e3a39f7SXueming Li return ret; 5311e3a39f7SXueming Li } 5321e3a39f7SXueming Li 5331e3a39f7SXueming Li /** 5341e3a39f7SXueming Li * Verbs callback to free a memory. 5351e3a39f7SXueming Li * 5361e3a39f7SXueming Li * @param[in] ptr 5371e3a39f7SXueming Li * A pointer to the memory to free. 5381e3a39f7SXueming Li * @param[in] data 5391e3a39f7SXueming Li * A pointer to the callback data. 5401e3a39f7SXueming Li */ 5411e3a39f7SXueming Li static void 5421e3a39f7SXueming Li mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 5431e3a39f7SXueming Li { 5441e3a39f7SXueming Li assert(data != NULL); 5451e3a39f7SXueming Li rte_free(ptr); 5461e3a39f7SXueming Li } 5471e3a39f7SXueming Li 5481e3a39f7SXueming Li /** 549771fa900SAdrien Mazarguil * DPDK callback to close the device. 550771fa900SAdrien Mazarguil * 551771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 552771fa900SAdrien Mazarguil * 553771fa900SAdrien Mazarguil * @param dev 554771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 555771fa900SAdrien Mazarguil */ 556771fa900SAdrien Mazarguil static void 557771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 558771fa900SAdrien Mazarguil { 559dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 5602e22920bSAdrien Mazarguil unsigned int i; 5616af6b973SNélio Laranjeiro int ret; 562771fa900SAdrien Mazarguil 563a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 5640f99970bSNélio Laranjeiro dev->data->port_id, 565f048f3d4SViacheslav Ovsiienko ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : "")); 566ecc1c29dSAdrien Mazarguil /* In case mlx5_dev_stop() has not been called. */ 567af4f09f2SNélio Laranjeiro mlx5_dev_interrupt_handler_uninstall(dev); 568af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 569af689f1fSNelio Laranjeiro mlx5_flow_flush(dev, NULL); 5702e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 5712e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 5722e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 5732aac5b5dSYongseok Koh rte_wmb(); 5742aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 5752aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 5762e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 5772e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 5782e22920bSAdrien Mazarguil usleep(1000); 579a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 580af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 5812e22920bSAdrien Mazarguil priv->rxqs_n = 0; 5822e22920bSAdrien Mazarguil priv->rxqs = NULL; 5832e22920bSAdrien Mazarguil } 5842e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 5852e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 5862e22920bSAdrien Mazarguil usleep(1000); 5876e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 588af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 5892e22920bSAdrien Mazarguil priv->txqs_n = 0; 5902e22920bSAdrien Mazarguil priv->txqs = NULL; 5912e22920bSAdrien Mazarguil } 5927d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 593974f1e7eSYongseok Koh mlx5_mr_release(dev); 59417e19bc4SViacheslav Ovsiienko assert(priv->sh); 595b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 59617e19bc4SViacheslav Ovsiienko if (priv->sh) 59717e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(priv->sh); 59817e19bc4SViacheslav Ovsiienko priv->sh = NULL; 59929c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 60029c1d8bbSNélio Laranjeiro rte_free(priv->rss_conf.rss_key); 601634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 602634efbc2SNelio Laranjeiro rte_free(priv->reta_idx); 603ccdcba53SNélio Laranjeiro if (priv->config.vf) 604ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_flush(dev); 60526c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 60626c08b97SAdrien Mazarguil close(priv->nl_socket_route); 60726c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 60826c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 609d53180afSMoti Haimovsky if (priv->tcf_context) 610d53180afSMoti Haimovsky mlx5_flow_tcf_context_destroy(priv->tcf_context); 611af4f09f2SNélio Laranjeiro ret = mlx5_hrxq_ibv_verify(dev); 612f5479b68SNélio Laranjeiro if (ret) 613a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 6140f99970bSNélio Laranjeiro dev->data->port_id); 615af4f09f2SNélio Laranjeiro ret = mlx5_ind_table_ibv_verify(dev); 6164c7a0f5fSNélio Laranjeiro if (ret) 617a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 6180f99970bSNélio Laranjeiro dev->data->port_id); 619af4f09f2SNélio Laranjeiro ret = mlx5_rxq_ibv_verify(dev); 62009cb5b58SNélio Laranjeiro if (ret) 621a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain", 6220f99970bSNélio Laranjeiro dev->data->port_id); 623af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 624a1366b1aSNélio Laranjeiro if (ret) 625a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 6260f99970bSNélio Laranjeiro dev->data->port_id); 627af4f09f2SNélio Laranjeiro ret = mlx5_txq_ibv_verify(dev); 628faf2667fSNélio Laranjeiro if (ret) 629a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 6300f99970bSNélio Laranjeiro dev->data->port_id); 631af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 6326e78005aSNélio Laranjeiro if (ret) 633a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 6340f99970bSNélio Laranjeiro dev->data->port_id); 635af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 6366af6b973SNélio Laranjeiro if (ret) 637a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 638a170a30dSNélio Laranjeiro dev->data->port_id); 6392b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 6402b730263SAdrien Mazarguil unsigned int c = 0; 641d874a4eeSThomas Monjalon uint16_t port_id; 6422b730263SAdrien Mazarguil 643d874a4eeSThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) { 644dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 645d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 6462b730263SAdrien Mazarguil 6472b730263SAdrien Mazarguil if (!opriv || 6482b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 649d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 6502b730263SAdrien Mazarguil continue; 6512b730263SAdrien Mazarguil ++c; 6522b730263SAdrien Mazarguil } 6532b730263SAdrien Mazarguil if (!c) 6542b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 6552b730263SAdrien Mazarguil } 656771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 6572b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 65842603bbdSOphir Munk /* 65942603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 66042603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 66142603bbdSOphir Munk * it is freed when dev_private is freed. 66242603bbdSOphir Munk */ 66342603bbdSOphir Munk dev->data->mac_addrs = NULL; 664771fa900SAdrien Mazarguil } 665771fa900SAdrien Mazarguil 6660887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops = { 667e60fbd5bSAdrien Mazarguil .dev_configure = mlx5_dev_configure, 668e60fbd5bSAdrien Mazarguil .dev_start = mlx5_dev_start, 669e60fbd5bSAdrien Mazarguil .dev_stop = mlx5_dev_stop, 67062072098SOr Ami .dev_set_link_down = mlx5_set_link_down, 67162072098SOr Ami .dev_set_link_up = mlx5_set_link_up, 672771fa900SAdrien Mazarguil .dev_close = mlx5_dev_close, 6731bdbe1afSAdrien Mazarguil .promiscuous_enable = mlx5_promiscuous_enable, 6741bdbe1afSAdrien Mazarguil .promiscuous_disable = mlx5_promiscuous_disable, 6751bdbe1afSAdrien Mazarguil .allmulticast_enable = mlx5_allmulticast_enable, 6761bdbe1afSAdrien Mazarguil .allmulticast_disable = mlx5_allmulticast_disable, 677cb8faed7SAdrien Mazarguil .link_update = mlx5_link_update, 67887011737SAdrien Mazarguil .stats_get = mlx5_stats_get, 67987011737SAdrien Mazarguil .stats_reset = mlx5_stats_reset, 680a4193ae3SShahaf Shuler .xstats_get = mlx5_xstats_get, 681a4193ae3SShahaf Shuler .xstats_reset = mlx5_xstats_reset, 682a4193ae3SShahaf Shuler .xstats_get_names = mlx5_xstats_get_names, 683714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 684e60fbd5bSAdrien Mazarguil .dev_infos_get = mlx5_dev_infos_get, 68578a38edfSJianfeng Tan .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 686e9086978SAdrien Mazarguil .vlan_filter_set = mlx5_vlan_filter_set, 6872e22920bSAdrien Mazarguil .rx_queue_setup = mlx5_rx_queue_setup, 6882e22920bSAdrien Mazarguil .tx_queue_setup = mlx5_tx_queue_setup, 6892e22920bSAdrien Mazarguil .rx_queue_release = mlx5_rx_queue_release, 6902e22920bSAdrien Mazarguil .tx_queue_release = mlx5_tx_queue_release, 69102d75430SAdrien Mazarguil .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 69202d75430SAdrien Mazarguil .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 6933318aef7SAdrien Mazarguil .mac_addr_remove = mlx5_mac_addr_remove, 6943318aef7SAdrien Mazarguil .mac_addr_add = mlx5_mac_addr_add, 69586977fccSDavid Marchand .mac_addr_set = mlx5_mac_addr_set, 696e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 697cf37ca95SAdrien Mazarguil .mtu_set = mlx5_dev_set_mtu, 698f3db9489SYaacov Hazan .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 699f3db9489SYaacov Hazan .vlan_offload_set = mlx5_vlan_offload_set, 700634efbc2SNelio Laranjeiro .reta_update = mlx5_dev_rss_reta_update, 701634efbc2SNelio Laranjeiro .reta_query = mlx5_dev_rss_reta_query, 7022f97422eSNelio Laranjeiro .rss_hash_update = mlx5_rss_hash_update, 7032f97422eSNelio Laranjeiro .rss_hash_conf_get = mlx5_rss_hash_conf_get, 70476f5c99eSYaacov Hazan .filter_ctrl = mlx5_dev_filter_ctrl, 7058788fec1SOlivier Matz .rx_descriptor_status = mlx5_rx_descriptor_status, 7068788fec1SOlivier Matz .tx_descriptor_status = mlx5_tx_descriptor_status, 70726f04883STom Barbette .rx_queue_count = mlx5_rx_queue_count, 7083c7d44afSShahaf Shuler .rx_queue_intr_enable = mlx5_rx_intr_enable, 7093c7d44afSShahaf Shuler .rx_queue_intr_disable = mlx5_rx_intr_disable, 710d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 711771fa900SAdrien Mazarguil }; 712771fa900SAdrien Mazarguil 713714bf46eSThomas Monjalon /* Available operations from secondary process. */ 71487ec44ceSXueming Li static const struct eth_dev_ops mlx5_dev_sec_ops = { 71587ec44ceSXueming Li .stats_get = mlx5_stats_get, 71687ec44ceSXueming Li .stats_reset = mlx5_stats_reset, 71787ec44ceSXueming Li .xstats_get = mlx5_xstats_get, 71887ec44ceSXueming Li .xstats_reset = mlx5_xstats_reset, 71987ec44ceSXueming Li .xstats_get_names = mlx5_xstats_get_names, 720714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 72187ec44ceSXueming Li .dev_infos_get = mlx5_dev_infos_get, 72287ec44ceSXueming Li .rx_descriptor_status = mlx5_rx_descriptor_status, 72387ec44ceSXueming Li .tx_descriptor_status = mlx5_tx_descriptor_status, 72487ec44ceSXueming Li }; 72587ec44ceSXueming Li 726714bf46eSThomas Monjalon /* Available operations in flow isolated mode. */ 7270887aa7fSNélio Laranjeiro const struct eth_dev_ops mlx5_dev_ops_isolate = { 7280887aa7fSNélio Laranjeiro .dev_configure = mlx5_dev_configure, 7290887aa7fSNélio Laranjeiro .dev_start = mlx5_dev_start, 7300887aa7fSNélio Laranjeiro .dev_stop = mlx5_dev_stop, 7310887aa7fSNélio Laranjeiro .dev_set_link_down = mlx5_set_link_down, 7320887aa7fSNélio Laranjeiro .dev_set_link_up = mlx5_set_link_up, 7330887aa7fSNélio Laranjeiro .dev_close = mlx5_dev_close, 73424b068adSYongseok Koh .promiscuous_enable = mlx5_promiscuous_enable, 73524b068adSYongseok Koh .promiscuous_disable = mlx5_promiscuous_disable, 7362547ee74SYongseok Koh .allmulticast_enable = mlx5_allmulticast_enable, 7372547ee74SYongseok Koh .allmulticast_disable = mlx5_allmulticast_disable, 7380887aa7fSNélio Laranjeiro .link_update = mlx5_link_update, 7390887aa7fSNélio Laranjeiro .stats_get = mlx5_stats_get, 7400887aa7fSNélio Laranjeiro .stats_reset = mlx5_stats_reset, 7410887aa7fSNélio Laranjeiro .xstats_get = mlx5_xstats_get, 7420887aa7fSNélio Laranjeiro .xstats_reset = mlx5_xstats_reset, 7430887aa7fSNélio Laranjeiro .xstats_get_names = mlx5_xstats_get_names, 744714bf46eSThomas Monjalon .fw_version_get = mlx5_fw_version_get, 7450887aa7fSNélio Laranjeiro .dev_infos_get = mlx5_dev_infos_get, 7460887aa7fSNélio Laranjeiro .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 7470887aa7fSNélio Laranjeiro .vlan_filter_set = mlx5_vlan_filter_set, 7480887aa7fSNélio Laranjeiro .rx_queue_setup = mlx5_rx_queue_setup, 7490887aa7fSNélio Laranjeiro .tx_queue_setup = mlx5_tx_queue_setup, 7500887aa7fSNélio Laranjeiro .rx_queue_release = mlx5_rx_queue_release, 7510887aa7fSNélio Laranjeiro .tx_queue_release = mlx5_tx_queue_release, 7520887aa7fSNélio Laranjeiro .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 7530887aa7fSNélio Laranjeiro .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 7540887aa7fSNélio Laranjeiro .mac_addr_remove = mlx5_mac_addr_remove, 7550887aa7fSNélio Laranjeiro .mac_addr_add = mlx5_mac_addr_add, 7560887aa7fSNélio Laranjeiro .mac_addr_set = mlx5_mac_addr_set, 757e0586a8dSNélio Laranjeiro .set_mc_addr_list = mlx5_set_mc_addr_list, 7580887aa7fSNélio Laranjeiro .mtu_set = mlx5_dev_set_mtu, 7590887aa7fSNélio Laranjeiro .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 7600887aa7fSNélio Laranjeiro .vlan_offload_set = mlx5_vlan_offload_set, 7610887aa7fSNélio Laranjeiro .filter_ctrl = mlx5_dev_filter_ctrl, 7620887aa7fSNélio Laranjeiro .rx_descriptor_status = mlx5_rx_descriptor_status, 7630887aa7fSNélio Laranjeiro .tx_descriptor_status = mlx5_tx_descriptor_status, 7640887aa7fSNélio Laranjeiro .rx_queue_intr_enable = mlx5_rx_intr_enable, 7650887aa7fSNélio Laranjeiro .rx_queue_intr_disable = mlx5_rx_intr_disable, 766d3e0f392SMatan Azrad .is_removed = mlx5_is_removed, 7670887aa7fSNélio Laranjeiro }; 7680887aa7fSNélio Laranjeiro 769e72dd09bSNélio Laranjeiro /** 770e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 771e72dd09bSNélio Laranjeiro * 772e72dd09bSNélio Laranjeiro * @param[in] key 773e72dd09bSNélio Laranjeiro * Key argument to verify. 774e72dd09bSNélio Laranjeiro * @param[in] val 775e72dd09bSNélio Laranjeiro * Value associated with key. 776e72dd09bSNélio Laranjeiro * @param opaque 777e72dd09bSNélio Laranjeiro * User data. 778e72dd09bSNélio Laranjeiro * 779e72dd09bSNélio Laranjeiro * @return 780a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 781e72dd09bSNélio Laranjeiro */ 782e72dd09bSNélio Laranjeiro static int 783e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 784e72dd09bSNélio Laranjeiro { 7857fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 78699c12dccSNélio Laranjeiro unsigned long tmp; 787e72dd09bSNélio Laranjeiro 7886de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 7896de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 7906de569f5SAdrien Mazarguil return 0; 79199c12dccSNélio Laranjeiro errno = 0; 79299c12dccSNélio Laranjeiro tmp = strtoul(val, NULL, 0); 79399c12dccSNélio Laranjeiro if (errno) { 794a6d83b6aSNélio Laranjeiro rte_errno = errno; 795a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 796a6d83b6aSNélio Laranjeiro return -rte_errno; 79799c12dccSNélio Laranjeiro } 79899c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 7997fe24446SShahaf Shuler config->cqe_comp = !!tmp; 800bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 801bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 80278c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 80378c7a16dSYongseok Koh config->hw_padding = !!tmp; 8047d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 8057d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 8067d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 8077d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 8087d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 8097d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 8107d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 8117d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 8122a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 8137fe24446SShahaf Shuler config->txq_inline = tmp; 8142a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 8157fe24446SShahaf Shuler config->txqs_inline = tmp; 81609d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 81709d8b416SYongseok Koh config->txqs_vec = tmp; 818230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 819f9de8718SShahaf Shuler config->mps = !!tmp; 8206ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 8217fe24446SShahaf Shuler config->mpw_hdr_dseg = !!tmp; 8226ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 8237fe24446SShahaf Shuler config->inline_max_packet_sz = tmp; 8245644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 8257fe24446SShahaf Shuler config->tx_vec_en = !!tmp; 8265644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 8277fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 82878a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 82978a54648SXueming Li config->l3_vxlan_en = !!tmp; 830db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 831db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 83251e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 83351e72d38SOri Kam config->dv_flow_en = !!tmp; 834dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 835dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 83699c12dccSNélio Laranjeiro } else { 837a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 838a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 839a6d83b6aSNélio Laranjeiro return -rte_errno; 840e72dd09bSNélio Laranjeiro } 84199c12dccSNélio Laranjeiro return 0; 84299c12dccSNélio Laranjeiro } 843e72dd09bSNélio Laranjeiro 844e72dd09bSNélio Laranjeiro /** 845e72dd09bSNélio Laranjeiro * Parse device parameters. 846e72dd09bSNélio Laranjeiro * 8477fe24446SShahaf Shuler * @param config 8487fe24446SShahaf Shuler * Pointer to device configuration structure. 849e72dd09bSNélio Laranjeiro * @param devargs 850e72dd09bSNélio Laranjeiro * Device arguments structure. 851e72dd09bSNélio Laranjeiro * 852e72dd09bSNélio Laranjeiro * @return 853a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 854e72dd09bSNélio Laranjeiro */ 855e72dd09bSNélio Laranjeiro static int 8567fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 857e72dd09bSNélio Laranjeiro { 858e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 85999c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 860bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 86178c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 8627d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 8637d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 8647d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 8657d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 8662a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 8672a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 86809d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 869230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 8706ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 8716ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 8725644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 8735644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 87478a54648SXueming Li MLX5_L3_VXLAN_EN, 875db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 87651e72d38SOri Kam MLX5_DV_FLOW_EN, 877dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 8786de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 879e72dd09bSNélio Laranjeiro NULL, 880e72dd09bSNélio Laranjeiro }; 881e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 882e72dd09bSNélio Laranjeiro int ret = 0; 883e72dd09bSNélio Laranjeiro int i; 884e72dd09bSNélio Laranjeiro 885e72dd09bSNélio Laranjeiro if (devargs == NULL) 886e72dd09bSNélio Laranjeiro return 0; 887e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 888e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 889e72dd09bSNélio Laranjeiro if (kvlist == NULL) 890e72dd09bSNélio Laranjeiro return 0; 891e72dd09bSNélio Laranjeiro /* Process parameters. */ 892e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 893e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 894e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 8957fe24446SShahaf Shuler mlx5_args_check, config); 896a6d83b6aSNélio Laranjeiro if (ret) { 897a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 898a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 899a6d83b6aSNélio Laranjeiro return -rte_errno; 900e72dd09bSNélio Laranjeiro } 901e72dd09bSNélio Laranjeiro } 902a67323e4SShahaf Shuler } 903e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 904e72dd09bSNélio Laranjeiro return 0; 905e72dd09bSNélio Laranjeiro } 906e72dd09bSNélio Laranjeiro 907fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver; 908771fa900SAdrien Mazarguil 9098594a202SAnatoly Burakov static int 9105282bb1cSAnatoly Burakov find_lower_va_bound(const struct rte_memseg_list *msl, 91166cc45e2SAnatoly Burakov const struct rte_memseg *ms, void *arg) 9128594a202SAnatoly Burakov { 9138594a202SAnatoly Burakov void **addr = arg; 9148594a202SAnatoly Burakov 9155282bb1cSAnatoly Burakov if (msl->external) 9165282bb1cSAnatoly Burakov return 0; 9178594a202SAnatoly Burakov if (*addr == NULL) 9188594a202SAnatoly Burakov *addr = ms->addr; 9198594a202SAnatoly Burakov else 9208594a202SAnatoly Burakov *addr = RTE_MIN(*addr, ms->addr); 9218594a202SAnatoly Burakov 9228594a202SAnatoly Burakov return 0; 9238594a202SAnatoly Burakov } 9248594a202SAnatoly Burakov 9254a984153SXueming Li /** 9264a984153SXueming Li * Reserve UAR address space for primary process. 9274a984153SXueming Li * 9287be600c8SYongseok Koh * Process local resource is used by both primary and secondary to avoid 9297be600c8SYongseok Koh * duplicate reservation. The space has to be available on both primary and 9307be600c8SYongseok Koh * secondary process, TXQ UAR maps to this area using fixed mmap w/o double 9317be600c8SYongseok Koh * check. 9324a984153SXueming Li * 9334a984153SXueming Li * @return 934a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 9354a984153SXueming Li */ 9364a984153SXueming Li static int 9377be600c8SYongseok Koh mlx5_uar_init_primary(void) 9384a984153SXueming Li { 9397be600c8SYongseok Koh struct mlx5_shared_data *sd = mlx5_shared_data; 9404a984153SXueming Li void *addr = (void *)0; 9414a984153SXueming Li 9427be600c8SYongseok Koh if (sd->uar_base) 9434a984153SXueming Li return 0; 9444a984153SXueming Li /* find out lower bound of hugepage segments */ 9458594a202SAnatoly Burakov rte_memseg_walk(find_lower_va_bound, &addr); 9464a984153SXueming Li /* keep distance to hugepages to minimize potential conflicts. */ 9476bf10ab6SMoti Haimovsky addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE)); 9484a984153SXueming Li /* anonymous mmap, no real memory consumption. */ 9494a984153SXueming Li addr = mmap(addr, MLX5_UAR_SIZE, 9504a984153SXueming Li PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 9514a984153SXueming Li if (addr == MAP_FAILED) { 952a170a30dSNélio Laranjeiro DRV_LOG(ERR, 9537be600c8SYongseok Koh "Failed to reserve UAR address space, please" 9547be600c8SYongseok Koh " adjust MLX5_UAR_SIZE or try --base-virtaddr"); 955a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 956a6d83b6aSNélio Laranjeiro return -rte_errno; 9574a984153SXueming Li } 9584a984153SXueming Li /* Accept either same addr or a new addr returned from mmap if target 9594a984153SXueming Li * range occupied. 9604a984153SXueming Li */ 9617be600c8SYongseok Koh DRV_LOG(INFO, "Reserved UAR address space: %p", addr); 9627be600c8SYongseok Koh sd->uar_base = addr; /* for primary and secondary UAR re-mmap. */ 9634a984153SXueming Li return 0; 9644a984153SXueming Li } 9654a984153SXueming Li 9664a984153SXueming Li /** 9677be600c8SYongseok Koh * Unmap UAR address space reserved for primary process. 9687be600c8SYongseok Koh */ 9697be600c8SYongseok Koh static void 9707be600c8SYongseok Koh mlx5_uar_uninit_primary(void) 9717be600c8SYongseok Koh { 9727be600c8SYongseok Koh struct mlx5_shared_data *sd = mlx5_shared_data; 9737be600c8SYongseok Koh 9747be600c8SYongseok Koh if (!sd->uar_base) 9757be600c8SYongseok Koh return; 9767be600c8SYongseok Koh munmap(sd->uar_base, MLX5_UAR_SIZE); 9777be600c8SYongseok Koh sd->uar_base = NULL; 9787be600c8SYongseok Koh } 9797be600c8SYongseok Koh 9807be600c8SYongseok Koh /** 9817be600c8SYongseok Koh * Reserve UAR address space for secondary process, align with primary process. 9824a984153SXueming Li * 9834a984153SXueming Li * @return 984a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 9854a984153SXueming Li */ 9864a984153SXueming Li static int 9877be600c8SYongseok Koh mlx5_uar_init_secondary(void) 9884a984153SXueming Li { 9897be600c8SYongseok Koh struct mlx5_shared_data *sd = mlx5_shared_data; 9907be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 9914a984153SXueming Li void *addr; 9924a984153SXueming Li 9937be600c8SYongseok Koh if (ld->uar_base) { /* Already reserved. */ 9947be600c8SYongseok Koh assert(sd->uar_base == ld->uar_base); 9954a984153SXueming Li return 0; 9964a984153SXueming Li } 9977be600c8SYongseok Koh assert(sd->uar_base); 9984a984153SXueming Li /* anonymous mmap, no real memory consumption. */ 9997be600c8SYongseok Koh addr = mmap(sd->uar_base, MLX5_UAR_SIZE, 10004a984153SXueming Li PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 10014a984153SXueming Li if (addr == MAP_FAILED) { 10027be600c8SYongseok Koh DRV_LOG(ERR, "UAR mmap failed: %p size: %llu", 10037be600c8SYongseok Koh sd->uar_base, MLX5_UAR_SIZE); 1004a6d83b6aSNélio Laranjeiro rte_errno = ENXIO; 1005a6d83b6aSNélio Laranjeiro return -rte_errno; 10064a984153SXueming Li } 10077be600c8SYongseok Koh if (sd->uar_base != addr) { 1008a170a30dSNélio Laranjeiro DRV_LOG(ERR, 10097be600c8SYongseok Koh "UAR address %p size %llu occupied, please" 1010a170a30dSNélio Laranjeiro " adjust MLX5_UAR_OFFSET or try EAL parameter" 1011a170a30dSNélio Laranjeiro " --base-virtaddr", 10127be600c8SYongseok Koh sd->uar_base, MLX5_UAR_SIZE); 1013a6d83b6aSNélio Laranjeiro rte_errno = ENXIO; 1014a6d83b6aSNélio Laranjeiro return -rte_errno; 10154a984153SXueming Li } 10167be600c8SYongseok Koh ld->uar_base = addr; 10177be600c8SYongseok Koh DRV_LOG(INFO, "Reserved UAR address space: %p", addr); 10184a984153SXueming Li return 0; 10194a984153SXueming Li } 10204a984153SXueming Li 1021771fa900SAdrien Mazarguil /** 10227be600c8SYongseok Koh * Unmap UAR address space reserved for secondary process. 10237be600c8SYongseok Koh */ 10247be600c8SYongseok Koh static void 10257be600c8SYongseok Koh mlx5_uar_uninit_secondary(void) 10267be600c8SYongseok Koh { 10277be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 10287be600c8SYongseok Koh 10297be600c8SYongseok Koh if (!ld->uar_base) 10307be600c8SYongseok Koh return; 10317be600c8SYongseok Koh munmap(ld->uar_base, MLX5_UAR_SIZE); 10327be600c8SYongseok Koh ld->uar_base = NULL; 10337be600c8SYongseok Koh } 10347be600c8SYongseok Koh 10357be600c8SYongseok Koh /** 10367be600c8SYongseok Koh * PMD global initialization. 10377be600c8SYongseok Koh * 10387be600c8SYongseok Koh * Independent from individual device, this function initializes global 10397be600c8SYongseok Koh * per-PMD data structures distinguishing primary and secondary processes. 10407be600c8SYongseok Koh * Hence, each initialization is called once per a process. 10417be600c8SYongseok Koh * 10427be600c8SYongseok Koh * @return 10437be600c8SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 10447be600c8SYongseok Koh */ 10457be600c8SYongseok Koh static int 10467be600c8SYongseok Koh mlx5_init_once(void) 10477be600c8SYongseok Koh { 10487be600c8SYongseok Koh struct mlx5_shared_data *sd; 10497be600c8SYongseok Koh struct mlx5_local_data *ld = &mlx5_local_data; 10507be600c8SYongseok Koh int ret; 10517be600c8SYongseok Koh 10527be600c8SYongseok Koh if (mlx5_init_shared_data()) 10537be600c8SYongseok Koh return -rte_errno; 10547be600c8SYongseok Koh sd = mlx5_shared_data; 10557be600c8SYongseok Koh assert(sd); 10567be600c8SYongseok Koh rte_spinlock_lock(&sd->lock); 10577be600c8SYongseok Koh switch (rte_eal_process_type()) { 10587be600c8SYongseok Koh case RTE_PROC_PRIMARY: 10597be600c8SYongseok Koh if (sd->init_done) 10607be600c8SYongseok Koh break; 10617be600c8SYongseok Koh LIST_INIT(&sd->mem_event_cb_list); 10627be600c8SYongseok Koh rte_rwlock_init(&sd->mem_event_rwlock); 10637be600c8SYongseok Koh rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 10647be600c8SYongseok Koh mlx5_mr_mem_event_cb, NULL); 10657be600c8SYongseok Koh mlx5_mp_init_primary(); 10667be600c8SYongseok Koh ret = mlx5_uar_init_primary(); 10677be600c8SYongseok Koh if (ret) 10687be600c8SYongseok Koh goto error; 10697be600c8SYongseok Koh sd->init_done = true; 10707be600c8SYongseok Koh break; 10717be600c8SYongseok Koh case RTE_PROC_SECONDARY: 10727be600c8SYongseok Koh if (ld->init_done) 10737be600c8SYongseok Koh break; 10742aac5b5dSYongseok Koh mlx5_mp_init_secondary(); 10757be600c8SYongseok Koh ret = mlx5_uar_init_secondary(); 10767be600c8SYongseok Koh if (ret) 10777be600c8SYongseok Koh goto error; 10787be600c8SYongseok Koh ++sd->secondary_cnt; 10797be600c8SYongseok Koh ld->init_done = true; 10807be600c8SYongseok Koh break; 10817be600c8SYongseok Koh default: 10827be600c8SYongseok Koh break; 10837be600c8SYongseok Koh } 10847be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 10857be600c8SYongseok Koh return 0; 10867be600c8SYongseok Koh error: 10877be600c8SYongseok Koh switch (rte_eal_process_type()) { 10887be600c8SYongseok Koh case RTE_PROC_PRIMARY: 10897be600c8SYongseok Koh mlx5_uar_uninit_primary(); 10907be600c8SYongseok Koh mlx5_mp_uninit_primary(); 10917be600c8SYongseok Koh rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", NULL); 10927be600c8SYongseok Koh break; 10937be600c8SYongseok Koh case RTE_PROC_SECONDARY: 10947be600c8SYongseok Koh mlx5_uar_uninit_secondary(); 10952aac5b5dSYongseok Koh mlx5_mp_uninit_secondary(); 10967be600c8SYongseok Koh break; 10977be600c8SYongseok Koh default: 10987be600c8SYongseok Koh break; 10997be600c8SYongseok Koh } 11007be600c8SYongseok Koh rte_spinlock_unlock(&sd->lock); 11017be600c8SYongseok Koh mlx5_uninit_shared_data(); 11027be600c8SYongseok Koh return -rte_errno; 11037be600c8SYongseok Koh } 11047be600c8SYongseok Koh 11057be600c8SYongseok Koh /** 1106f38c5457SAdrien Mazarguil * Spawn an Ethernet device from Verbs information. 1107771fa900SAdrien Mazarguil * 1108f38c5457SAdrien Mazarguil * @param dpdk_dev 1109f38c5457SAdrien Mazarguil * Backing DPDK device. 1110ad74bc61SViacheslav Ovsiienko * @param spawn 1111ad74bc61SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 1112f87bfa8eSYongseok Koh * @param config 1113f87bfa8eSYongseok Koh * Device configuration parameters. 1114771fa900SAdrien Mazarguil * 1115771fa900SAdrien Mazarguil * @return 1116f38c5457SAdrien Mazarguil * A valid Ethernet device object on success, NULL otherwise and rte_errno 1117206254b7SOphir Munk * is set. The following errors are defined: 11186de569f5SAdrien Mazarguil * 11196de569f5SAdrien Mazarguil * EBUSY: device is not supposed to be spawned. 1120206254b7SOphir Munk * EEXIST: device is already spawned 1121771fa900SAdrien Mazarguil */ 1122f38c5457SAdrien Mazarguil static struct rte_eth_dev * 1123f38c5457SAdrien Mazarguil mlx5_dev_spawn(struct rte_device *dpdk_dev, 1124ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data *spawn, 1125ad74bc61SViacheslav Ovsiienko struct mlx5_dev_config config) 1126771fa900SAdrien Mazarguil { 1127ad74bc61SViacheslav Ovsiienko const struct mlx5_switch_info *switch_info = &spawn->info; 112817e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared *sh = NULL; 112968128934SAdrien Mazarguil struct ibv_port_attr port_attr; 11306057a10bSAdrien Mazarguil struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 11319083982cSAdrien Mazarguil struct rte_eth_dev *eth_dev = NULL; 1132dbeba4cfSThomas Monjalon struct mlx5_priv *priv = NULL; 1133771fa900SAdrien Mazarguil int err = 0; 113478c7a16dSYongseok Koh unsigned int hw_padding = 0; 1135e192ef80SYaacov Hazan unsigned int mps; 1136523f5a74SYongseok Koh unsigned int cqe_comp; 1137bc91e8dbSYongseok Koh unsigned int cqe_pad = 0; 1138772d3435SXueming Li unsigned int tunnel_en = 0; 11391f106da2SMatan Azrad unsigned int mpls_en = 0; 11405f8ba81cSXueming Li unsigned int swp = 0; 11417d6bf6b8SYongseok Koh unsigned int mprq = 0; 11427d6bf6b8SYongseok Koh unsigned int mprq_min_stride_size_n = 0; 11437d6bf6b8SYongseok Koh unsigned int mprq_max_stride_size_n = 0; 11447d6bf6b8SYongseok Koh unsigned int mprq_min_stride_num_n = 0; 11457d6bf6b8SYongseok Koh unsigned int mprq_max_stride_num_n = 0; 114668128934SAdrien Mazarguil struct ether_addr mac; 114768128934SAdrien Mazarguil char name[RTE_ETH_NAME_MAX_LEN]; 11482b730263SAdrien Mazarguil int own_domain_id = 0; 1149206254b7SOphir Munk uint16_t port_id; 11502b730263SAdrien Mazarguil unsigned int i; 1151771fa900SAdrien Mazarguil 11526de569f5SAdrien Mazarguil /* Determine if this port representor is supposed to be spawned. */ 11536de569f5SAdrien Mazarguil if (switch_info->representor && dpdk_dev->devargs) { 11546de569f5SAdrien Mazarguil struct rte_eth_devargs eth_da; 11556de569f5SAdrien Mazarguil 11566de569f5SAdrien Mazarguil err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); 11576de569f5SAdrien Mazarguil if (err) { 11586de569f5SAdrien Mazarguil rte_errno = -err; 11596de569f5SAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 11606de569f5SAdrien Mazarguil strerror(rte_errno)); 11616de569f5SAdrien Mazarguil return NULL; 11626de569f5SAdrien Mazarguil } 11636de569f5SAdrien Mazarguil for (i = 0; i < eth_da.nb_representor_ports; ++i) 11646de569f5SAdrien Mazarguil if (eth_da.representor_ports[i] == 11656de569f5SAdrien Mazarguil (uint16_t)switch_info->port_name) 11666de569f5SAdrien Mazarguil break; 11676de569f5SAdrien Mazarguil if (i == eth_da.nb_representor_ports) { 11686de569f5SAdrien Mazarguil rte_errno = EBUSY; 11696de569f5SAdrien Mazarguil return NULL; 11706de569f5SAdrien Mazarguil } 11716de569f5SAdrien Mazarguil } 1172206254b7SOphir Munk /* Build device name. */ 1173206254b7SOphir Munk if (!switch_info->representor) 117409c9c4d2SThomas Monjalon strlcpy(name, dpdk_dev->name, sizeof(name)); 1175206254b7SOphir Munk else 1176206254b7SOphir Munk snprintf(name, sizeof(name), "%s_representor_%u", 1177206254b7SOphir Munk dpdk_dev->name, switch_info->port_name); 1178206254b7SOphir Munk /* check if the device is already spawned */ 1179206254b7SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 1180206254b7SOphir Munk rte_errno = EEXIST; 1181206254b7SOphir Munk return NULL; 1182206254b7SOphir Munk } 118317e19bc4SViacheslav Ovsiienko DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 118417e19bc4SViacheslav Ovsiienko if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 118517e19bc4SViacheslav Ovsiienko eth_dev = rte_eth_dev_attach_secondary(name); 118617e19bc4SViacheslav Ovsiienko if (eth_dev == NULL) { 118717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "can not attach rte ethdev"); 118817e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 1189f38c5457SAdrien Mazarguil return NULL; 1190771fa900SAdrien Mazarguil } 119117e19bc4SViacheslav Ovsiienko eth_dev->device = dpdk_dev; 119217e19bc4SViacheslav Ovsiienko eth_dev->dev_ops = &mlx5_dev_sec_ops; 119317e19bc4SViacheslav Ovsiienko /* Receive command fd from primary process */ 11949a8ab29bSYongseok Koh err = mlx5_mp_req_verbs_cmd_fd(eth_dev); 119517e19bc4SViacheslav Ovsiienko if (err < 0) 119617e19bc4SViacheslav Ovsiienko return NULL; 119717e19bc4SViacheslav Ovsiienko /* Remap UAR for Tx queues. */ 119817e19bc4SViacheslav Ovsiienko err = mlx5_tx_uar_remap(eth_dev, err); 119917e19bc4SViacheslav Ovsiienko if (err) 120017e19bc4SViacheslav Ovsiienko return NULL; 120117e19bc4SViacheslav Ovsiienko /* 120217e19bc4SViacheslav Ovsiienko * Ethdev pointer is still required as input since 120317e19bc4SViacheslav Ovsiienko * the primary device is not accessible from the 120417e19bc4SViacheslav Ovsiienko * secondary process. 120517e19bc4SViacheslav Ovsiienko */ 120617e19bc4SViacheslav Ovsiienko eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 120717e19bc4SViacheslav Ovsiienko eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 120817e19bc4SViacheslav Ovsiienko return eth_dev; 1209f5bf91deSMoti Haimovsky } 121017e19bc4SViacheslav Ovsiienko sh = mlx5_alloc_shared_ibctx(spawn); 121117e19bc4SViacheslav Ovsiienko if (!sh) 121217e19bc4SViacheslav Ovsiienko return NULL; 121317e19bc4SViacheslav Ovsiienko config.devx = sh->devx; 12145f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 12156057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 12165f8ba81cSXueming Li #endif 121743e9d979SShachar Beiser /* 121843e9d979SShachar Beiser * Multi-packet send is supported by ConnectX-4 Lx PF as well 121943e9d979SShachar Beiser * as all ConnectX-5 devices. 122043e9d979SShachar Beiser */ 1221038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 12226057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 1223038e7251SShahaf Shuler #endif 12247d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 12256057a10bSAdrien Mazarguil dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 12267d6bf6b8SYongseok Koh #endif 122717e19bc4SViacheslav Ovsiienko mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 12286057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 12296057a10bSAdrien Mazarguil if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 1230a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "enhanced MPW is supported"); 123143e9d979SShachar Beiser mps = MLX5_MPW_ENHANCED; 123243e9d979SShachar Beiser } else { 1233a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW is supported"); 1234e589960cSYongseok Koh mps = MLX5_MPW; 1235e589960cSYongseok Koh } 1236e589960cSYongseok Koh } else { 1237a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "MPW isn't supported"); 123843e9d979SShachar Beiser mps = MLX5_MPW_DISABLED; 123943e9d979SShachar Beiser } 12405f8ba81cSXueming Li #ifdef HAVE_IBV_MLX5_MOD_SWP 12416057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 12426057a10bSAdrien Mazarguil swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 12435f8ba81cSXueming Li DRV_LOG(DEBUG, "SWP support: %u", swp); 12445f8ba81cSXueming Li #endif 124568128934SAdrien Mazarguil config.swp = !!swp; 12467d6bf6b8SYongseok Koh #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 12476057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 12487d6bf6b8SYongseok Koh struct mlx5dv_striding_rq_caps mprq_caps = 12496057a10bSAdrien Mazarguil dv_attr.striding_rq_caps; 12507d6bf6b8SYongseok Koh 12517d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 12527d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes); 12537d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 12547d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes); 12557d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 12567d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides); 12577d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 12587d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides); 12597d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "\tsupported_qpts: %d", 12607d6bf6b8SYongseok Koh mprq_caps.supported_qpts); 12617d6bf6b8SYongseok Koh DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 12627d6bf6b8SYongseok Koh mprq = 1; 12637d6bf6b8SYongseok Koh mprq_min_stride_size_n = 12647d6bf6b8SYongseok Koh mprq_caps.min_single_stride_log_num_of_bytes; 12657d6bf6b8SYongseok Koh mprq_max_stride_size_n = 12667d6bf6b8SYongseok Koh mprq_caps.max_single_stride_log_num_of_bytes; 12677d6bf6b8SYongseok Koh mprq_min_stride_num_n = 12687d6bf6b8SYongseok Koh mprq_caps.min_single_wqe_log_num_of_strides; 12697d6bf6b8SYongseok Koh mprq_max_stride_num_n = 12707d6bf6b8SYongseok Koh mprq_caps.max_single_wqe_log_num_of_strides; 127168128934SAdrien Mazarguil config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 127268128934SAdrien Mazarguil mprq_min_stride_num_n); 12737d6bf6b8SYongseok Koh } 12747d6bf6b8SYongseok Koh #endif 1275523f5a74SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128 && 12766057a10bSAdrien Mazarguil !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) 1277523f5a74SYongseok Koh cqe_comp = 0; 1278523f5a74SYongseok Koh else 1279523f5a74SYongseok Koh cqe_comp = 1; 128068128934SAdrien Mazarguil config.cqe_comp = cqe_comp; 1281bc91e8dbSYongseok Koh #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 1282bc91e8dbSYongseok Koh /* Whether device supports 128B Rx CQE padding. */ 1283bc91e8dbSYongseok Koh cqe_pad = RTE_CACHE_LINE_SIZE == 128 && 1284bc91e8dbSYongseok Koh (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); 1285bc91e8dbSYongseok Koh #endif 1286038e7251SShahaf Shuler #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 12876057a10bSAdrien Mazarguil if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 12886057a10bSAdrien Mazarguil tunnel_en = ((dv_attr.tunnel_offloads_caps & 1289038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 12906057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 1291038e7251SShahaf Shuler MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); 1292038e7251SShahaf Shuler } 1293a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 1294a170a30dSNélio Laranjeiro tunnel_en ? "" : "not "); 1295038e7251SShahaf Shuler #else 1296a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 1297a170a30dSNélio Laranjeiro "tunnel offloading disabled due to old OFED/rdma-core version"); 1298038e7251SShahaf Shuler #endif 129968128934SAdrien Mazarguil config.tunnel_en = tunnel_en; 13001f106da2SMatan Azrad #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 13016057a10bSAdrien Mazarguil mpls_en = ((dv_attr.tunnel_offloads_caps & 13021f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 13036057a10bSAdrien Mazarguil (dv_attr.tunnel_offloads_caps & 13041f106da2SMatan Azrad MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 13051f106da2SMatan Azrad DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 13061f106da2SMatan Azrad mpls_en ? "" : "not "); 13071f106da2SMatan Azrad #else 13081f106da2SMatan Azrad DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 13091f106da2SMatan Azrad " old OFED/rdma-core version or firmware configuration"); 13101f106da2SMatan Azrad #endif 131168128934SAdrien Mazarguil config.mpls_en = mpls_en; 1312771fa900SAdrien Mazarguil /* Check port status. */ 131317e19bc4SViacheslav Ovsiienko err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr); 1314771fa900SAdrien Mazarguil if (err) { 1315a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port query failed: %s", strerror(err)); 13169083982cSAdrien Mazarguil goto error; 1317771fa900SAdrien Mazarguil } 13181371f4dfSOr Ami if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 13199083982cSAdrien Mazarguil DRV_LOG(ERR, "port is not configured in Ethernet mode"); 1320e1c3e305SMatan Azrad err = EINVAL; 13219083982cSAdrien Mazarguil goto error; 13221371f4dfSOr Ami } 1323771fa900SAdrien Mazarguil if (port_attr.state != IBV_PORT_ACTIVE) 13249083982cSAdrien Mazarguil DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 1325a170a30dSNélio Laranjeiro mlx5_glue->port_state_str(port_attr.state), 1326771fa900SAdrien Mazarguil port_attr.state); 132717e19bc4SViacheslav Ovsiienko /* Allocate private eth device data. */ 1328771fa900SAdrien Mazarguil priv = rte_zmalloc("ethdev private structure", 1329771fa900SAdrien Mazarguil sizeof(*priv), 1330771fa900SAdrien Mazarguil RTE_CACHE_LINE_SIZE); 1331771fa900SAdrien Mazarguil if (priv == NULL) { 1332a170a30dSNélio Laranjeiro DRV_LOG(ERR, "priv allocation failure"); 1333771fa900SAdrien Mazarguil err = ENOMEM; 13349083982cSAdrien Mazarguil goto error; 1335771fa900SAdrien Mazarguil } 133617e19bc4SViacheslav Ovsiienko priv->sh = sh; 133717e19bc4SViacheslav Ovsiienko priv->ibv_port = spawn->ibv_port; 1338771fa900SAdrien Mazarguil priv->mtu = ETHER_MTU; 13396bf10ab6SMoti Haimovsky #ifndef RTE_ARCH_64 13406bf10ab6SMoti Haimovsky /* Initialize UAR access locks for 32bit implementations. */ 13416bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock_cq); 13426bf10ab6SMoti Haimovsky for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 13436bf10ab6SMoti Haimovsky rte_spinlock_init(&priv->uar_lock[i]); 13446bf10ab6SMoti Haimovsky #endif 134526c08b97SAdrien Mazarguil /* Some internal functions rely on Netlink sockets, open them now. */ 13465366074bSNelio Laranjeiro priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 13475366074bSNelio Laranjeiro priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 134826c08b97SAdrien Mazarguil priv->nl_sn = 0; 13492b730263SAdrien Mazarguil priv->representor = !!switch_info->representor; 1350299d7dc2SViacheslav Ovsiienko priv->master = !!switch_info->master; 13512b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 1352299d7dc2SViacheslav Ovsiienko /* 1353299d7dc2SViacheslav Ovsiienko * Currently we support single E-Switch per PF configurations 1354299d7dc2SViacheslav Ovsiienko * only and vport_id field contains the vport index for 1355299d7dc2SViacheslav Ovsiienko * associated VF, which is deduced from representor port name. 1356299d7dc2SViacheslav Ovsiienko * For exapmple, let's have the IB device port 10, it has 1357299d7dc2SViacheslav Ovsiienko * attached network device eth0, which has port name attribute 1358299d7dc2SViacheslav Ovsiienko * pf0vf2, we can deduce the VF number as 2, and set vport index 1359299d7dc2SViacheslav Ovsiienko * as 3 (2+1). This assigning schema should be changed if the 1360299d7dc2SViacheslav Ovsiienko * multiple E-Switch instances per PF configurations or/and PCI 1361299d7dc2SViacheslav Ovsiienko * subfunctions are added. 1362299d7dc2SViacheslav Ovsiienko */ 1363299d7dc2SViacheslav Ovsiienko priv->vport_id = switch_info->representor ? 1364299d7dc2SViacheslav Ovsiienko switch_info->port_name + 1 : -1; 1365299d7dc2SViacheslav Ovsiienko /* representor_id field keeps the unmodified port/VF index. */ 1366299d7dc2SViacheslav Ovsiienko priv->representor_id = switch_info->representor ? 1367299d7dc2SViacheslav Ovsiienko switch_info->port_name : -1; 13682b730263SAdrien Mazarguil /* 13692b730263SAdrien Mazarguil * Look for sibling devices in order to reuse their switch domain 13702b730263SAdrien Mazarguil * if any, otherwise allocate one. 13712b730263SAdrien Mazarguil */ 1372d874a4eeSThomas Monjalon RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) { 1373dbeba4cfSThomas Monjalon const struct mlx5_priv *opriv = 1374d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 13752b730263SAdrien Mazarguil 13762b730263SAdrien Mazarguil if (!opriv || 13772b730263SAdrien Mazarguil opriv->domain_id == 13782b730263SAdrien Mazarguil RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 13792b730263SAdrien Mazarguil continue; 13802b730263SAdrien Mazarguil priv->domain_id = opriv->domain_id; 13812b730263SAdrien Mazarguil break; 13822b730263SAdrien Mazarguil } 13832b730263SAdrien Mazarguil if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 13842b730263SAdrien Mazarguil err = rte_eth_switch_domain_alloc(&priv->domain_id); 13852b730263SAdrien Mazarguil if (err) { 13862b730263SAdrien Mazarguil err = rte_errno; 13872b730263SAdrien Mazarguil DRV_LOG(ERR, "unable to allocate switch domain: %s", 13882b730263SAdrien Mazarguil strerror(rte_errno)); 13892b730263SAdrien Mazarguil goto error; 13902b730263SAdrien Mazarguil } 13912b730263SAdrien Mazarguil own_domain_id = 1; 13922b730263SAdrien Mazarguil } 1393f38c5457SAdrien Mazarguil err = mlx5_args(&config, dpdk_dev->devargs); 1394e72dd09bSNélio Laranjeiro if (err) { 1395012ad994SShahaf Shuler err = rte_errno; 139693068a9dSAdrien Mazarguil DRV_LOG(ERR, "failed to process device arguments: %s", 139793068a9dSAdrien Mazarguil strerror(rte_errno)); 13989083982cSAdrien Mazarguil goto error; 1399e72dd09bSNélio Laranjeiro } 140017e19bc4SViacheslav Ovsiienko config.hw_csum = !!(sh->device_attr.device_cap_flags_ex & 140117e19bc4SViacheslav Ovsiienko IBV_DEVICE_RAW_IP_CSUM); 1402a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "checksum offloading is %ssupported", 14037fe24446SShahaf Shuler (config.hw_csum ? "" : "not ")); 14042dd8b721SViacheslav Ovsiienko #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 14052dd8b721SViacheslav Ovsiienko !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 14062dd8b721SViacheslav Ovsiienko DRV_LOG(DEBUG, "counters are not supported"); 14079a761de8SOri Kam #endif 140858b1312eSYongseok Koh #ifndef HAVE_IBV_FLOW_DV_SUPPORT 140958b1312eSYongseok Koh if (config.dv_flow_en) { 141058b1312eSYongseok Koh DRV_LOG(WARNING, "DV flow is not supported"); 141158b1312eSYongseok Koh config.dv_flow_en = 0; 141258b1312eSYongseok Koh } 141358b1312eSYongseok Koh #endif 14147fe24446SShahaf Shuler config.ind_table_max_size = 141517e19bc4SViacheslav Ovsiienko sh->device_attr.rss_caps.max_rwq_indirection_table_size; 141668128934SAdrien Mazarguil /* 141768128934SAdrien Mazarguil * Remove this check once DPDK supports larger/variable 141868128934SAdrien Mazarguil * indirection tables. 141968128934SAdrien Mazarguil */ 142068128934SAdrien Mazarguil if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 14217fe24446SShahaf Shuler config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; 1422a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 14237fe24446SShahaf Shuler config.ind_table_max_size); 142417e19bc4SViacheslav Ovsiienko config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 142543e9d979SShachar Beiser IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 1426a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 14277fe24446SShahaf Shuler (config.hw_vlan_strip ? "" : "not ")); 142817e19bc4SViacheslav Ovsiienko config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 1429cd230a3eSShahaf Shuler IBV_RAW_PACKET_CAP_SCATTER_FCS); 1430a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 14317fe24446SShahaf Shuler (config.hw_fcs_strip ? "" : "not ")); 14322014a7fbSYongseok Koh #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 143317e19bc4SViacheslav Ovsiienko hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 14342014a7fbSYongseok Koh #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 143517e19bc4SViacheslav Ovsiienko hw_padding = !!(sh->device_attr.device_cap_flags_ex & 14362014a7fbSYongseok Koh IBV_DEVICE_PCI_WRITE_END_PADDING); 143743e9d979SShachar Beiser #endif 143878c7a16dSYongseok Koh if (config.hw_padding && !hw_padding) { 143978c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 144078c7a16dSYongseok Koh config.hw_padding = 0; 144178c7a16dSYongseok Koh } else if (config.hw_padding) { 144278c7a16dSYongseok Koh DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 144378c7a16dSYongseok Koh } 144417e19bc4SViacheslav Ovsiienko config.tso = (sh->device_attr.tso_caps.max_tso > 0 && 144517e19bc4SViacheslav Ovsiienko (sh->device_attr.tso_caps.supported_qpts & 144643e9d979SShachar Beiser (1 << IBV_QPT_RAW_PACKET))); 14477fe24446SShahaf Shuler if (config.tso) 144817e19bc4SViacheslav Ovsiienko config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso; 1449f9de8718SShahaf Shuler /* 1450f9de8718SShahaf Shuler * MPW is disabled by default, while the Enhanced MPW is enabled 1451f9de8718SShahaf Shuler * by default. 1452f9de8718SShahaf Shuler */ 1453f9de8718SShahaf Shuler if (config.mps == MLX5_ARG_UNSET) 1454f9de8718SShahaf Shuler config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 1455f9de8718SShahaf Shuler MLX5_MPW_DISABLED; 1456f9de8718SShahaf Shuler else 1457f9de8718SShahaf Shuler config.mps = config.mps ? mps : MLX5_MPW_DISABLED; 1458a170a30dSNélio Laranjeiro DRV_LOG(INFO, "%sMPS is %s", 14590f99970bSNélio Laranjeiro config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", 146068128934SAdrien Mazarguil config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 14617fe24446SShahaf Shuler if (config.cqe_comp && !cqe_comp) { 1462a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "Rx CQE compression isn't supported"); 14637fe24446SShahaf Shuler config.cqe_comp = 0; 1464523f5a74SYongseok Koh } 1465bc91e8dbSYongseok Koh if (config.cqe_pad && !cqe_pad) { 1466bc91e8dbSYongseok Koh DRV_LOG(WARNING, "Rx CQE padding isn't supported"); 1467bc91e8dbSYongseok Koh config.cqe_pad = 0; 1468bc91e8dbSYongseok Koh } else if (config.cqe_pad) { 1469bc91e8dbSYongseok Koh DRV_LOG(INFO, "Rx CQE padding is enabled"); 1470bc91e8dbSYongseok Koh } 14715c0e2db6SYongseok Koh if (config.mprq.enabled && mprq) { 14727d6bf6b8SYongseok Koh if (config.mprq.stride_num_n > mprq_max_stride_num_n || 14737d6bf6b8SYongseok Koh config.mprq.stride_num_n < mprq_min_stride_num_n) { 14747d6bf6b8SYongseok Koh config.mprq.stride_num_n = 14757d6bf6b8SYongseok Koh RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 14767d6bf6b8SYongseok Koh mprq_min_stride_num_n); 14777d6bf6b8SYongseok Koh DRV_LOG(WARNING, 14787d6bf6b8SYongseok Koh "the number of strides" 14797d6bf6b8SYongseok Koh " for Multi-Packet RQ is out of range," 14807d6bf6b8SYongseok Koh " setting default value (%u)", 14817d6bf6b8SYongseok Koh 1 << config.mprq.stride_num_n); 14827d6bf6b8SYongseok Koh } 14837d6bf6b8SYongseok Koh config.mprq.min_stride_size_n = mprq_min_stride_size_n; 14847d6bf6b8SYongseok Koh config.mprq.max_stride_size_n = mprq_max_stride_size_n; 14855c0e2db6SYongseok Koh } else if (config.mprq.enabled && !mprq) { 14865c0e2db6SYongseok Koh DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 14875c0e2db6SYongseok Koh config.mprq.enabled = 0; 14887d6bf6b8SYongseok Koh } 1489af4f09f2SNélio Laranjeiro eth_dev = rte_eth_dev_allocate(name); 1490af4f09f2SNélio Laranjeiro if (eth_dev == NULL) { 1491a170a30dSNélio Laranjeiro DRV_LOG(ERR, "can not allocate rte ethdev"); 1492af4f09f2SNélio Laranjeiro err = ENOMEM; 14939083982cSAdrien Mazarguil goto error; 1494af4f09f2SNélio Laranjeiro } 149515febafdSThomas Monjalon /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ 149615febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1497a7d3c627SThomas Monjalon if (priv->representor) { 14982b730263SAdrien Mazarguil eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 1499a7d3c627SThomas Monjalon eth_dev->data->representor_id = priv->representor_id; 1500a7d3c627SThomas Monjalon } 1501af4f09f2SNélio Laranjeiro eth_dev->data->dev_private = priv; 1502df428ceeSYongseok Koh priv->dev_data = eth_dev->data; 1503af4f09f2SNélio Laranjeiro eth_dev->data->mac_addrs = priv->mac; 1504f38c5457SAdrien Mazarguil eth_dev->device = dpdk_dev; 1505771fa900SAdrien Mazarguil /* Configure the first MAC address by default. */ 1506af4f09f2SNélio Laranjeiro if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 1507a170a30dSNélio Laranjeiro DRV_LOG(ERR, 1508a170a30dSNélio Laranjeiro "port %u cannot get MAC address, is mlx5_en" 1509a170a30dSNélio Laranjeiro " loaded? (errno: %s)", 15108c3c2372SAdrien Mazarguil eth_dev->data->port_id, strerror(rte_errno)); 1511e1c3e305SMatan Azrad err = ENODEV; 15129083982cSAdrien Mazarguil goto error; 1513771fa900SAdrien Mazarguil } 1514a170a30dSNélio Laranjeiro DRV_LOG(INFO, 1515a170a30dSNélio Laranjeiro "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 15160f99970bSNélio Laranjeiro eth_dev->data->port_id, 1517771fa900SAdrien Mazarguil mac.addr_bytes[0], mac.addr_bytes[1], 1518771fa900SAdrien Mazarguil mac.addr_bytes[2], mac.addr_bytes[3], 1519771fa900SAdrien Mazarguil mac.addr_bytes[4], mac.addr_bytes[5]); 1520771fa900SAdrien Mazarguil #ifndef NDEBUG 1521771fa900SAdrien Mazarguil { 1522771fa900SAdrien Mazarguil char ifname[IF_NAMESIZE]; 1523771fa900SAdrien Mazarguil 1524af4f09f2SNélio Laranjeiro if (mlx5_get_ifname(eth_dev, &ifname) == 0) 1525a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 15260f99970bSNélio Laranjeiro eth_dev->data->port_id, ifname); 1527771fa900SAdrien Mazarguil else 1528a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u ifname is unknown", 15290f99970bSNélio Laranjeiro eth_dev->data->port_id); 1530771fa900SAdrien Mazarguil } 1531771fa900SAdrien Mazarguil #endif 1532771fa900SAdrien Mazarguil /* Get actual MTU if possible. */ 1533a6d83b6aSNélio Laranjeiro err = mlx5_get_mtu(eth_dev, &priv->mtu); 1534012ad994SShahaf Shuler if (err) { 1535012ad994SShahaf Shuler err = rte_errno; 15369083982cSAdrien Mazarguil goto error; 1537012ad994SShahaf Shuler } 1538a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 1539a170a30dSNélio Laranjeiro priv->mtu); 154068128934SAdrien Mazarguil /* Initialize burst functions to prevent crashes before link-up. */ 1541e313ef4cSShahaf Shuler eth_dev->rx_pkt_burst = removed_rx_burst; 1542e313ef4cSShahaf Shuler eth_dev->tx_pkt_burst = removed_tx_burst; 1543771fa900SAdrien Mazarguil eth_dev->dev_ops = &mlx5_dev_ops; 1544272733b5SNélio Laranjeiro /* Register MAC address. */ 1545272733b5SNélio Laranjeiro claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 1546f87bfa8eSYongseok Koh if (config.vf && config.vf_nl_en) 1547ccdcba53SNélio Laranjeiro mlx5_nl_mac_addr_sync(eth_dev); 1548d53180afSMoti Haimovsky priv->tcf_context = mlx5_flow_tcf_context_create(); 1549d53180afSMoti Haimovsky if (!priv->tcf_context) { 155057123c00SYongseok Koh err = -rte_errno; 155157123c00SYongseok Koh DRV_LOG(WARNING, 155257123c00SYongseok Koh "flow rules relying on switch offloads will not be" 155357123c00SYongseok Koh " supported: cannot open libmnl socket: %s", 155457123c00SYongseok Koh strerror(rte_errno)); 155557123c00SYongseok Koh } else { 155657123c00SYongseok Koh struct rte_flow_error error; 155757123c00SYongseok Koh unsigned int ifindex = mlx5_ifindex(eth_dev); 155857123c00SYongseok Koh 155957123c00SYongseok Koh if (!ifindex) { 156057123c00SYongseok Koh err = -rte_errno; 156157123c00SYongseok Koh error.message = 156257123c00SYongseok Koh "cannot retrieve network interface index"; 156357123c00SYongseok Koh } else { 1564d53180afSMoti Haimovsky err = mlx5_flow_tcf_init(priv->tcf_context, 1565d53180afSMoti Haimovsky ifindex, &error); 156657123c00SYongseok Koh } 156757123c00SYongseok Koh if (err) { 156857123c00SYongseok Koh DRV_LOG(WARNING, 156957123c00SYongseok Koh "flow rules relying on switch offloads will" 157057123c00SYongseok Koh " not be supported: %s: %s", 157157123c00SYongseok Koh error.message, strerror(rte_errno)); 1572d53180afSMoti Haimovsky mlx5_flow_tcf_context_destroy(priv->tcf_context); 1573d53180afSMoti Haimovsky priv->tcf_context = NULL; 157457123c00SYongseok Koh } 157557123c00SYongseok Koh } 1576b2177648SViacheslav Ovsiienko if (config.dv_flow_en) { 1577b2177648SViacheslav Ovsiienko err = mlx5_alloc_shared_dr(priv); 1578b2177648SViacheslav Ovsiienko if (err) 15794f84a197SOri Kam goto error; 15804f84a197SOri Kam } 1581c8ffb8a9SNélio Laranjeiro TAILQ_INIT(&priv->flows); 15821b37f5d8SNélio Laranjeiro TAILQ_INIT(&priv->ctrl_flows); 15831e3a39f7SXueming Li /* Hint libmlx5 to use PMD allocator for data plane resources */ 15841e3a39f7SXueming Li struct mlx5dv_ctx_allocators alctr = { 15851e3a39f7SXueming Li .alloc = &mlx5_alloc_verbs_buf, 15861e3a39f7SXueming Li .free = &mlx5_free_verbs_buf, 15871e3a39f7SXueming Li .data = priv, 15881e3a39f7SXueming Li }; 158917e19bc4SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 159017e19bc4SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 15911e3a39f7SXueming Li (void *)((uintptr_t)&alctr)); 1592771fa900SAdrien Mazarguil /* Bring Ethernet device up. */ 1593a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 15940f99970bSNélio Laranjeiro eth_dev->data->port_id); 15957ba5320bSNélio Laranjeiro mlx5_set_link_up(eth_dev); 1596a85a606cSShahaf Shuler /* 1597a85a606cSShahaf Shuler * Even though the interrupt handler is not installed yet, 1598a85a606cSShahaf Shuler * interrupts will still trigger on the asyn_fd from 1599a85a606cSShahaf Shuler * Verbs context returned by ibv_open_device(). 1600a85a606cSShahaf Shuler */ 1601a85a606cSShahaf Shuler mlx5_link_update(eth_dev, 0); 16027fe24446SShahaf Shuler /* Store device configuration on private structure. */ 16037fe24446SShahaf Shuler priv->config = config; 160478be8852SNelio Laranjeiro /* Supported Verbs flow priority number detection. */ 16052815702bSNelio Laranjeiro err = mlx5_flow_discover_priorities(eth_dev); 16064fb27c1dSViacheslav Ovsiienko if (err < 0) { 16074fb27c1dSViacheslav Ovsiienko err = -err; 16089083982cSAdrien Mazarguil goto error; 16094fb27c1dSViacheslav Ovsiienko } 16102815702bSNelio Laranjeiro priv->config.flow_prio = err; 16110ace586dSXueming Li /* 16120ace586dSXueming Li * Once the device is added to the list of memory event 16130ace586dSXueming Li * callback, its global MR cache table cannot be expanded 16140ace586dSXueming Li * on the fly because of deadlock. If it overflows, lookup 16150ace586dSXueming Li * should be done by searching MR list linearly, which is slow. 16160ace586dSXueming Li */ 16170ace586dSXueming Li err = mlx5_mr_btree_init(&priv->mr.cache, 16180ace586dSXueming Li MLX5_MR_BTREE_CACHE_N * 2, 16190ace586dSXueming Li eth_dev->device->numa_node); 16200ace586dSXueming Li if (err) { 16210ace586dSXueming Li err = rte_errno; 16229083982cSAdrien Mazarguil goto error; 16230ace586dSXueming Li } 1624e89c15b6SAdrien Mazarguil /* Add device to memory callback list. */ 1625e89c15b6SAdrien Mazarguil rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 1626e89c15b6SAdrien Mazarguil LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 1627e89c15b6SAdrien Mazarguil priv, mem_event_cb); 1628e89c15b6SAdrien Mazarguil rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 1629f38c5457SAdrien Mazarguil return eth_dev; 16309083982cSAdrien Mazarguil error: 163126c08b97SAdrien Mazarguil if (priv) { 1632b2177648SViacheslav Ovsiienko if (priv->sh) 1633b2177648SViacheslav Ovsiienko mlx5_free_shared_dr(priv); 163426c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 163526c08b97SAdrien Mazarguil close(priv->nl_socket_route); 163626c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 163726c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 1638d53180afSMoti Haimovsky if (priv->tcf_context) 1639d53180afSMoti Haimovsky mlx5_flow_tcf_context_destroy(priv->tcf_context); 16402b730263SAdrien Mazarguil if (own_domain_id) 16412b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 1642771fa900SAdrien Mazarguil rte_free(priv); 1643e16adf08SThomas Monjalon if (eth_dev != NULL) 1644e16adf08SThomas Monjalon eth_dev->data->dev_private = NULL; 164526c08b97SAdrien Mazarguil } 1646e16adf08SThomas Monjalon if (eth_dev != NULL) { 1647e16adf08SThomas Monjalon /* mac_addrs must not be freed alone because part of dev_private */ 1648e16adf08SThomas Monjalon eth_dev->data->mac_addrs = NULL; 1649690de285SRaslan Darawsheh rte_eth_dev_release_port(eth_dev); 1650e16adf08SThomas Monjalon } 165117e19bc4SViacheslav Ovsiienko if (sh) 165217e19bc4SViacheslav Ovsiienko mlx5_free_shared_ibctx(sh); 1653f38c5457SAdrien Mazarguil assert(err > 0); 1654a6d83b6aSNélio Laranjeiro rte_errno = err; 1655f38c5457SAdrien Mazarguil return NULL; 1656f38c5457SAdrien Mazarguil } 1657f38c5457SAdrien Mazarguil 1658116f90adSAdrien Mazarguil /** 1659116f90adSAdrien Mazarguil * Comparison callback to sort device data. 1660116f90adSAdrien Mazarguil * 1661116f90adSAdrien Mazarguil * This is meant to be used with qsort(). 1662116f90adSAdrien Mazarguil * 1663116f90adSAdrien Mazarguil * @param a[in] 1664116f90adSAdrien Mazarguil * Pointer to pointer to first data object. 1665116f90adSAdrien Mazarguil * @param b[in] 1666116f90adSAdrien Mazarguil * Pointer to pointer to second data object. 1667116f90adSAdrien Mazarguil * 1668116f90adSAdrien Mazarguil * @return 1669116f90adSAdrien Mazarguil * 0 if both objects are equal, less than 0 if the first argument is less 1670116f90adSAdrien Mazarguil * than the second, greater than 0 otherwise. 1671116f90adSAdrien Mazarguil */ 1672116f90adSAdrien Mazarguil static int 1673116f90adSAdrien Mazarguil mlx5_dev_spawn_data_cmp(const void *a, const void *b) 1674116f90adSAdrien Mazarguil { 1675116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_a = 1676116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)a)->info; 1677116f90adSAdrien Mazarguil const struct mlx5_switch_info *si_b = 1678116f90adSAdrien Mazarguil &((const struct mlx5_dev_spawn_data *)b)->info; 1679116f90adSAdrien Mazarguil int ret; 1680116f90adSAdrien Mazarguil 1681116f90adSAdrien Mazarguil /* Master device first. */ 1682116f90adSAdrien Mazarguil ret = si_b->master - si_a->master; 1683116f90adSAdrien Mazarguil if (ret) 1684116f90adSAdrien Mazarguil return ret; 1685116f90adSAdrien Mazarguil /* Then representor devices. */ 1686116f90adSAdrien Mazarguil ret = si_b->representor - si_a->representor; 1687116f90adSAdrien Mazarguil if (ret) 1688116f90adSAdrien Mazarguil return ret; 1689116f90adSAdrien Mazarguil /* Unidentified devices come last in no specific order. */ 1690116f90adSAdrien Mazarguil if (!si_a->representor) 1691116f90adSAdrien Mazarguil return 0; 1692116f90adSAdrien Mazarguil /* Order representors by name. */ 1693116f90adSAdrien Mazarguil return si_a->port_name - si_b->port_name; 1694116f90adSAdrien Mazarguil } 1695116f90adSAdrien Mazarguil 1696f38c5457SAdrien Mazarguil /** 1697f38c5457SAdrien Mazarguil * DPDK callback to register a PCI device. 1698f38c5457SAdrien Mazarguil * 16992b730263SAdrien Mazarguil * This function spawns Ethernet devices out of a given PCI device. 1700f38c5457SAdrien Mazarguil * 1701f38c5457SAdrien Mazarguil * @param[in] pci_drv 1702f38c5457SAdrien Mazarguil * PCI driver structure (mlx5_driver). 1703f38c5457SAdrien Mazarguil * @param[in] pci_dev 1704f38c5457SAdrien Mazarguil * PCI device information. 1705f38c5457SAdrien Mazarguil * 1706f38c5457SAdrien Mazarguil * @return 1707f38c5457SAdrien Mazarguil * 0 on success, a negative errno value otherwise and rte_errno is set. 1708f38c5457SAdrien Mazarguil */ 1709f38c5457SAdrien Mazarguil static int 1710f38c5457SAdrien Mazarguil mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1711f38c5457SAdrien Mazarguil struct rte_pci_device *pci_dev) 1712f38c5457SAdrien Mazarguil { 1713f38c5457SAdrien Mazarguil struct ibv_device **ibv_list; 1714ad74bc61SViacheslav Ovsiienko /* 1715ad74bc61SViacheslav Ovsiienko * Number of found IB Devices matching with requested PCI BDF. 1716ad74bc61SViacheslav Ovsiienko * nd != 1 means there are multiple IB devices over the same 1717ad74bc61SViacheslav Ovsiienko * PCI device and we have representors and master. 1718ad74bc61SViacheslav Ovsiienko */ 1719ad74bc61SViacheslav Ovsiienko unsigned int nd = 0; 1720ad74bc61SViacheslav Ovsiienko /* 1721ad74bc61SViacheslav Ovsiienko * Number of found IB device Ports. nd = 1 and np = 1..n means 1722ad74bc61SViacheslav Ovsiienko * we have the single multiport IB device, and there may be 1723ad74bc61SViacheslav Ovsiienko * representors attached to some of found ports. 1724ad74bc61SViacheslav Ovsiienko */ 1725ad74bc61SViacheslav Ovsiienko unsigned int np = 0; 1726ad74bc61SViacheslav Ovsiienko /* 1727ad74bc61SViacheslav Ovsiienko * Number of DPDK ethernet devices to Spawn - either over 1728ad74bc61SViacheslav Ovsiienko * multiple IB devices or multiple ports of single IB device. 1729ad74bc61SViacheslav Ovsiienko * Actually this is the number of iterations to spawn. 1730ad74bc61SViacheslav Ovsiienko */ 1731ad74bc61SViacheslav Ovsiienko unsigned int ns = 0; 1732f87bfa8eSYongseok Koh struct mlx5_dev_config dev_config; 1733f38c5457SAdrien Mazarguil int ret; 1734f38c5457SAdrien Mazarguil 17357be600c8SYongseok Koh ret = mlx5_init_once(); 17367be600c8SYongseok Koh if (ret) { 17377be600c8SYongseok Koh DRV_LOG(ERR, "unable to init PMD global data: %s", 17387be600c8SYongseok Koh strerror(rte_errno)); 17397be600c8SYongseok Koh return -rte_errno; 17407be600c8SYongseok Koh } 1741f38c5457SAdrien Mazarguil assert(pci_drv == &mlx5_driver); 1742f38c5457SAdrien Mazarguil errno = 0; 1743f38c5457SAdrien Mazarguil ibv_list = mlx5_glue->get_device_list(&ret); 1744f38c5457SAdrien Mazarguil if (!ibv_list) { 1745f38c5457SAdrien Mazarguil rte_errno = errno ? errno : ENOSYS; 1746f38c5457SAdrien Mazarguil DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 1747a6d83b6aSNélio Laranjeiro return -rte_errno; 1748a6d83b6aSNélio Laranjeiro } 1749ad74bc61SViacheslav Ovsiienko /* 1750ad74bc61SViacheslav Ovsiienko * First scan the list of all Infiniband devices to find 1751ad74bc61SViacheslav Ovsiienko * matching ones, gathering into the list. 1752ad74bc61SViacheslav Ovsiienko */ 175326c08b97SAdrien Mazarguil struct ibv_device *ibv_match[ret + 1]; 1754ad74bc61SViacheslav Ovsiienko int nl_route = -1; 1755ad74bc61SViacheslav Ovsiienko int nl_rdma = -1; 1756ad74bc61SViacheslav Ovsiienko unsigned int i; 175726c08b97SAdrien Mazarguil 1758f38c5457SAdrien Mazarguil while (ret-- > 0) { 1759f38c5457SAdrien Mazarguil struct rte_pci_addr pci_addr; 1760f38c5457SAdrien Mazarguil 1761f38c5457SAdrien Mazarguil DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 1762f38c5457SAdrien Mazarguil if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr)) 1763f38c5457SAdrien Mazarguil continue; 1764f38c5457SAdrien Mazarguil if (pci_dev->addr.domain != pci_addr.domain || 1765f38c5457SAdrien Mazarguil pci_dev->addr.bus != pci_addr.bus || 1766f38c5457SAdrien Mazarguil pci_dev->addr.devid != pci_addr.devid || 1767f38c5457SAdrien Mazarguil pci_dev->addr.function != pci_addr.function) 1768f38c5457SAdrien Mazarguil continue; 176926c08b97SAdrien Mazarguil DRV_LOG(INFO, "PCI information matches for device \"%s\"", 1770f38c5457SAdrien Mazarguil ibv_list[ret]->name); 1771ad74bc61SViacheslav Ovsiienko ibv_match[nd++] = ibv_list[ret]; 177226c08b97SAdrien Mazarguil } 1773ad74bc61SViacheslav Ovsiienko ibv_match[nd] = NULL; 1774ad74bc61SViacheslav Ovsiienko if (!nd) { 1775ad74bc61SViacheslav Ovsiienko /* No device macthes, just complain and bail out. */ 1776ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 1777ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, 1778ad74bc61SViacheslav Ovsiienko "no Verbs device matches PCI device " PCI_PRI_FMT "," 1779ad74bc61SViacheslav Ovsiienko " are kernel drivers loaded?", 1780ad74bc61SViacheslav Ovsiienko pci_dev->addr.domain, pci_dev->addr.bus, 1781ad74bc61SViacheslav Ovsiienko pci_dev->addr.devid, pci_dev->addr.function); 1782ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 1783ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 1784ad74bc61SViacheslav Ovsiienko return ret; 1785ad74bc61SViacheslav Ovsiienko } 1786ad74bc61SViacheslav Ovsiienko nl_route = mlx5_nl_init(NETLINK_ROUTE); 1787ad74bc61SViacheslav Ovsiienko nl_rdma = mlx5_nl_init(NETLINK_RDMA); 1788ad74bc61SViacheslav Ovsiienko if (nd == 1) { 178926c08b97SAdrien Mazarguil /* 1790ad74bc61SViacheslav Ovsiienko * Found single matching device may have multiple ports. 1791ad74bc61SViacheslav Ovsiienko * Each port may be representor, we have to check the port 1792ad74bc61SViacheslav Ovsiienko * number and check the representors existence. 179326c08b97SAdrien Mazarguil */ 1794ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 1795ad74bc61SViacheslav Ovsiienko np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 1796ad74bc61SViacheslav Ovsiienko if (!np) 1797ad74bc61SViacheslav Ovsiienko DRV_LOG(WARNING, "can not get IB device \"%s\"" 1798ad74bc61SViacheslav Ovsiienko " ports number", ibv_match[0]->name); 1799ad74bc61SViacheslav Ovsiienko } 1800ad74bc61SViacheslav Ovsiienko /* 1801ad74bc61SViacheslav Ovsiienko * Now we can determine the maximal 1802ad74bc61SViacheslav Ovsiienko * amount of devices to be spawned. 1803ad74bc61SViacheslav Ovsiienko */ 1804ad74bc61SViacheslav Ovsiienko struct mlx5_dev_spawn_data list[np ? np : nd]; 1805ad74bc61SViacheslav Ovsiienko 1806ad74bc61SViacheslav Ovsiienko if (np > 1) { 1807ad74bc61SViacheslav Ovsiienko /* 1808ad74bc61SViacheslav Ovsiienko * Signle IB device with multiple ports found, 1809ad74bc61SViacheslav Ovsiienko * it may be E-Switch master device and representors. 1810ad74bc61SViacheslav Ovsiienko * We have to perform identification trough the ports. 1811ad74bc61SViacheslav Ovsiienko */ 1812ad74bc61SViacheslav Ovsiienko assert(nl_rdma >= 0); 1813ad74bc61SViacheslav Ovsiienko assert(ns == 0); 1814ad74bc61SViacheslav Ovsiienko assert(nd == 1); 1815ad74bc61SViacheslav Ovsiienko for (i = 1; i <= np; ++i) { 1816ad74bc61SViacheslav Ovsiienko list[ns].max_port = np; 1817ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = i; 1818ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[0]; 1819ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 1820ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 1821ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, i); 1822ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 1823ad74bc61SViacheslav Ovsiienko /* 1824ad74bc61SViacheslav Ovsiienko * No network interface index found for the 1825ad74bc61SViacheslav Ovsiienko * specified port, it means there is no 1826ad74bc61SViacheslav Ovsiienko * representor on this port. It's OK, 1827ad74bc61SViacheslav Ovsiienko * there can be disabled ports, for example 1828ad74bc61SViacheslav Ovsiienko * if sriov_numvfs < sriov_totalvfs. 1829ad74bc61SViacheslav Ovsiienko */ 183026c08b97SAdrien Mazarguil continue; 183126c08b97SAdrien Mazarguil } 1832ad74bc61SViacheslav Ovsiienko ret = -1; 183326c08b97SAdrien Mazarguil if (nl_route >= 0) 1834ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 1835ad74bc61SViacheslav Ovsiienko (nl_route, 1836ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 1837ad74bc61SViacheslav Ovsiienko &list[ns].info); 1838ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 1839ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 1840ad74bc61SViacheslav Ovsiienko /* 1841ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 1842ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 1843ad74bc61SViacheslav Ovsiienko * with sysfs. 1844ad74bc61SViacheslav Ovsiienko */ 1845ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 1846ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 1847ad74bc61SViacheslav Ovsiienko &list[ns].info); 1848ad74bc61SViacheslav Ovsiienko } 1849ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 1850ad74bc61SViacheslav Ovsiienko list[ns].info.master)) 1851ad74bc61SViacheslav Ovsiienko ns++; 1852ad74bc61SViacheslav Ovsiienko } 1853ad74bc61SViacheslav Ovsiienko if (!ns) { 185426c08b97SAdrien Mazarguil DRV_LOG(ERR, 1855ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 1856ad74bc61SViacheslav Ovsiienko " on the IB device with multiple ports"); 1857ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 1858ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 1859ad74bc61SViacheslav Ovsiienko goto exit; 1860ad74bc61SViacheslav Ovsiienko } 1861ad74bc61SViacheslav Ovsiienko } else { 1862ad74bc61SViacheslav Ovsiienko /* 1863ad74bc61SViacheslav Ovsiienko * The existence of several matching entries (nd > 1) means 1864ad74bc61SViacheslav Ovsiienko * port representors have been instantiated. No existing Verbs 1865ad74bc61SViacheslav Ovsiienko * call nor sysfs entries can tell them apart, this can only 1866ad74bc61SViacheslav Ovsiienko * be done through Netlink calls assuming kernel drivers are 1867ad74bc61SViacheslav Ovsiienko * recent enough to support them. 1868ad74bc61SViacheslav Ovsiienko * 1869ad74bc61SViacheslav Ovsiienko * In the event of identification failure through Netlink, 1870ad74bc61SViacheslav Ovsiienko * try again through sysfs, then: 1871ad74bc61SViacheslav Ovsiienko * 1872ad74bc61SViacheslav Ovsiienko * 1. A single IB device matches (nd == 1) with single 1873ad74bc61SViacheslav Ovsiienko * port (np=0/1) and is not a representor, assume 1874ad74bc61SViacheslav Ovsiienko * no switch support. 1875ad74bc61SViacheslav Ovsiienko * 1876ad74bc61SViacheslav Ovsiienko * 2. Otherwise no safe assumptions can be made; 1877ad74bc61SViacheslav Ovsiienko * complain louder and bail out. 1878ad74bc61SViacheslav Ovsiienko */ 1879ad74bc61SViacheslav Ovsiienko np = 1; 1880ad74bc61SViacheslav Ovsiienko for (i = 0; i != nd; ++i) { 1881ad74bc61SViacheslav Ovsiienko memset(&list[ns].info, 0, sizeof(list[ns].info)); 1882ad74bc61SViacheslav Ovsiienko list[ns].max_port = 1; 1883ad74bc61SViacheslav Ovsiienko list[ns].ibv_port = 1; 1884ad74bc61SViacheslav Ovsiienko list[ns].ibv_dev = ibv_match[i]; 1885ad74bc61SViacheslav Ovsiienko list[ns].eth_dev = NULL; 1886ad74bc61SViacheslav Ovsiienko list[ns].ifindex = 0; 1887ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 1888ad74bc61SViacheslav Ovsiienko list[ns].ifindex = mlx5_nl_ifindex 1889ad74bc61SViacheslav Ovsiienko (nl_rdma, list[ns].ibv_dev->name, 1); 1890ad74bc61SViacheslav Ovsiienko if (!list[ns].ifindex) { 1891ad74bc61SViacheslav Ovsiienko /* 1892ad74bc61SViacheslav Ovsiienko * No network interface index found for the 1893ad74bc61SViacheslav Ovsiienko * specified device, it means there it is not 1894ad74bc61SViacheslav Ovsiienko * a representor/master. 1895ad74bc61SViacheslav Ovsiienko */ 1896ad74bc61SViacheslav Ovsiienko continue; 1897ad74bc61SViacheslav Ovsiienko } 1898ad74bc61SViacheslav Ovsiienko ret = -1; 1899ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 1900ad74bc61SViacheslav Ovsiienko ret = mlx5_nl_switch_info 1901ad74bc61SViacheslav Ovsiienko (nl_route, 1902ad74bc61SViacheslav Ovsiienko list[ns].ifindex, 1903ad74bc61SViacheslav Ovsiienko &list[ns].info); 1904ad74bc61SViacheslav Ovsiienko if (ret || (!list[ns].info.representor && 1905ad74bc61SViacheslav Ovsiienko !list[ns].info.master)) { 1906ad74bc61SViacheslav Ovsiienko /* 1907ad74bc61SViacheslav Ovsiienko * We failed to recognize representors with 1908ad74bc61SViacheslav Ovsiienko * Netlink, let's try to perform the task 1909ad74bc61SViacheslav Ovsiienko * with sysfs. 1910ad74bc61SViacheslav Ovsiienko */ 1911ad74bc61SViacheslav Ovsiienko ret = mlx5_sysfs_switch_info 1912ad74bc61SViacheslav Ovsiienko (list[ns].ifindex, 1913ad74bc61SViacheslav Ovsiienko &list[ns].info); 1914ad74bc61SViacheslav Ovsiienko } 1915ad74bc61SViacheslav Ovsiienko if (!ret && (list[ns].info.representor ^ 1916ad74bc61SViacheslav Ovsiienko list[ns].info.master)) { 1917ad74bc61SViacheslav Ovsiienko ns++; 1918ad74bc61SViacheslav Ovsiienko } else if ((nd == 1) && 1919ad74bc61SViacheslav Ovsiienko !list[ns].info.representor && 1920ad74bc61SViacheslav Ovsiienko !list[ns].info.master) { 1921ad74bc61SViacheslav Ovsiienko /* 1922ad74bc61SViacheslav Ovsiienko * Single IB device with 1923ad74bc61SViacheslav Ovsiienko * one physical port and 1924ad74bc61SViacheslav Ovsiienko * attached network device. 1925ad74bc61SViacheslav Ovsiienko * May be SRIOV is not enabled 1926ad74bc61SViacheslav Ovsiienko * or there is no representors. 1927ad74bc61SViacheslav Ovsiienko */ 1928ad74bc61SViacheslav Ovsiienko DRV_LOG(INFO, "no E-Switch support detected"); 1929ad74bc61SViacheslav Ovsiienko ns++; 1930ad74bc61SViacheslav Ovsiienko break; 193126c08b97SAdrien Mazarguil } 1932f38c5457SAdrien Mazarguil } 1933ad74bc61SViacheslav Ovsiienko if (!ns) { 1934ad74bc61SViacheslav Ovsiienko DRV_LOG(ERR, 1935ad74bc61SViacheslav Ovsiienko "unable to recognize master/representors" 1936ad74bc61SViacheslav Ovsiienko " on the multiple IB devices"); 1937ad74bc61SViacheslav Ovsiienko rte_errno = ENOENT; 1938ad74bc61SViacheslav Ovsiienko ret = -rte_errno; 1939ad74bc61SViacheslav Ovsiienko goto exit; 1940ad74bc61SViacheslav Ovsiienko } 1941ad74bc61SViacheslav Ovsiienko } 1942ad74bc61SViacheslav Ovsiienko assert(ns); 1943116f90adSAdrien Mazarguil /* 1944116f90adSAdrien Mazarguil * Sort list to probe devices in natural order for users convenience 1945116f90adSAdrien Mazarguil * (i.e. master first, then representors from lowest to highest ID). 1946116f90adSAdrien Mazarguil */ 1947ad74bc61SViacheslav Ovsiienko qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 1948f87bfa8eSYongseok Koh /* Default configuration. */ 1949f87bfa8eSYongseok Koh dev_config = (struct mlx5_dev_config){ 195078c7a16dSYongseok Koh .hw_padding = 0, 1951f87bfa8eSYongseok Koh .mps = MLX5_ARG_UNSET, 1952f87bfa8eSYongseok Koh .tx_vec_en = 1, 1953f87bfa8eSYongseok Koh .rx_vec_en = 1, 1954f87bfa8eSYongseok Koh .txq_inline = MLX5_ARG_UNSET, 1955f87bfa8eSYongseok Koh .txqs_inline = MLX5_ARG_UNSET, 195609d8b416SYongseok Koh .txqs_vec = MLX5_ARG_UNSET, 1957f87bfa8eSYongseok Koh .inline_max_packet_sz = MLX5_ARG_UNSET, 1958f87bfa8eSYongseok Koh .vf_nl_en = 1, 1959dceb5029SYongseok Koh .mr_ext_memseg_en = 1, 1960f87bfa8eSYongseok Koh .mprq = { 1961f87bfa8eSYongseok Koh .enabled = 0, /* Disabled by default. */ 1962f87bfa8eSYongseok Koh .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N, 1963f87bfa8eSYongseok Koh .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, 1964f87bfa8eSYongseok Koh .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, 1965f87bfa8eSYongseok Koh }, 1966f87bfa8eSYongseok Koh }; 1967ad74bc61SViacheslav Ovsiienko /* Device specific configuration. */ 1968f38c5457SAdrien Mazarguil switch (pci_dev->id.device_id) { 196909d8b416SYongseok Koh case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF: 197009d8b416SYongseok Koh dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS_BLUEFIELD; 197109d8b416SYongseok Koh break; 1972f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 1973f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1974f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 1975f38c5457SAdrien Mazarguil case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 1976f87bfa8eSYongseok Koh dev_config.vf = 1; 1977f38c5457SAdrien Mazarguil break; 1978f38c5457SAdrien Mazarguil default: 1979f87bfa8eSYongseok Koh break; 1980f38c5457SAdrien Mazarguil } 198109d8b416SYongseok Koh /* Set architecture-dependent default value if unset. */ 198209d8b416SYongseok Koh if (dev_config.txqs_vec == MLX5_ARG_UNSET) 198309d8b416SYongseok Koh dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS; 1984ad74bc61SViacheslav Ovsiienko for (i = 0; i != ns; ++i) { 19852b730263SAdrien Mazarguil uint32_t restore; 19862b730263SAdrien Mazarguil 1987f87bfa8eSYongseok Koh list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 1988ad74bc61SViacheslav Ovsiienko &list[i], 1989ad74bc61SViacheslav Ovsiienko dev_config); 19906de569f5SAdrien Mazarguil if (!list[i].eth_dev) { 1991206254b7SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 19922b730263SAdrien Mazarguil break; 1993206254b7SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 19946de569f5SAdrien Mazarguil continue; 19956de569f5SAdrien Mazarguil } 1996116f90adSAdrien Mazarguil restore = list[i].eth_dev->data->dev_flags; 1997116f90adSAdrien Mazarguil rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 19982b730263SAdrien Mazarguil /* Restore non-PCI flags cleared by the above call. */ 1999116f90adSAdrien Mazarguil list[i].eth_dev->data->dev_flags |= restore; 2000116f90adSAdrien Mazarguil rte_eth_dev_probing_finish(list[i].eth_dev); 20012b730263SAdrien Mazarguil } 2002ad74bc61SViacheslav Ovsiienko if (i != ns) { 2003f38c5457SAdrien Mazarguil DRV_LOG(ERR, 2004f38c5457SAdrien Mazarguil "probe of PCI device " PCI_PRI_FMT " aborted after" 2005f38c5457SAdrien Mazarguil " encountering an error: %s", 2006f38c5457SAdrien Mazarguil pci_dev->addr.domain, pci_dev->addr.bus, 2007f38c5457SAdrien Mazarguil pci_dev->addr.devid, pci_dev->addr.function, 2008f38c5457SAdrien Mazarguil strerror(rte_errno)); 2009f38c5457SAdrien Mazarguil ret = -rte_errno; 20102b730263SAdrien Mazarguil /* Roll back. */ 20112b730263SAdrien Mazarguil while (i--) { 20126de569f5SAdrien Mazarguil if (!list[i].eth_dev) 20136de569f5SAdrien Mazarguil continue; 2014116f90adSAdrien Mazarguil mlx5_dev_close(list[i].eth_dev); 2015e16adf08SThomas Monjalon /* mac_addrs must not be freed because in dev_private */ 2016e16adf08SThomas Monjalon list[i].eth_dev->data->mac_addrs = NULL; 2017116f90adSAdrien Mazarguil claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 20182b730263SAdrien Mazarguil } 20192b730263SAdrien Mazarguil /* Restore original error. */ 20202b730263SAdrien Mazarguil rte_errno = -ret; 2021f38c5457SAdrien Mazarguil } else { 2022f38c5457SAdrien Mazarguil ret = 0; 2023f38c5457SAdrien Mazarguil } 2024ad74bc61SViacheslav Ovsiienko exit: 2025ad74bc61SViacheslav Ovsiienko /* 2026ad74bc61SViacheslav Ovsiienko * Do the routine cleanup: 2027ad74bc61SViacheslav Ovsiienko * - close opened Netlink sockets 2028ad74bc61SViacheslav Ovsiienko * - free the Infiniband device list 2029ad74bc61SViacheslav Ovsiienko */ 2030ad74bc61SViacheslav Ovsiienko if (nl_rdma >= 0) 2031ad74bc61SViacheslav Ovsiienko close(nl_rdma); 2032ad74bc61SViacheslav Ovsiienko if (nl_route >= 0) 2033ad74bc61SViacheslav Ovsiienko close(nl_route); 2034ad74bc61SViacheslav Ovsiienko assert(ibv_list); 2035ad74bc61SViacheslav Ovsiienko mlx5_glue->free_device_list(ibv_list); 2036f38c5457SAdrien Mazarguil return ret; 2037771fa900SAdrien Mazarguil } 2038771fa900SAdrien Mazarguil 20393a820742SOphir Munk /** 20403a820742SOphir Munk * DPDK callback to remove a PCI device. 20413a820742SOphir Munk * 20423a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 20433a820742SOphir Munk * 20443a820742SOphir Munk * @param[in] pci_dev 20453a820742SOphir Munk * Pointer to the PCI device. 20463a820742SOphir Munk * 20473a820742SOphir Munk * @return 20483a820742SOphir Munk * 0 on success, the function cannot fail. 20493a820742SOphir Munk */ 20503a820742SOphir Munk static int 20513a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 20523a820742SOphir Munk { 20533a820742SOphir Munk uint16_t port_id; 20543a820742SOphir Munk struct rte_eth_dev *port; 20553a820742SOphir Munk 20563a820742SOphir Munk for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 20573a820742SOphir Munk port = &rte_eth_devices[port_id]; 20583a820742SOphir Munk if (port->state != RTE_ETH_DEV_UNUSED && 20593a820742SOphir Munk port->device == &pci_dev->device) 20603a820742SOphir Munk rte_eth_dev_close(port_id); 20613a820742SOphir Munk } 20623a820742SOphir Munk return 0; 20633a820742SOphir Munk } 20643a820742SOphir Munk 2065771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 2066771fa900SAdrien Mazarguil { 20671d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20681d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 2069771fa900SAdrien Mazarguil }, 2070771fa900SAdrien Mazarguil { 20711d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20721d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 2073771fa900SAdrien Mazarguil }, 2074771fa900SAdrien Mazarguil { 20751d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20761d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 2077771fa900SAdrien Mazarguil }, 2078771fa900SAdrien Mazarguil { 20791d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 20801d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 2081771fa900SAdrien Mazarguil }, 2082771fa900SAdrien Mazarguil { 2083528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2084528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 2085528a9fbeSYongseok Koh }, 2086528a9fbeSYongseok Koh { 2087528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2088528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 2089528a9fbeSYongseok Koh }, 2090528a9fbeSYongseok Koh { 2091528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2092528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 2093528a9fbeSYongseok Koh }, 2094528a9fbeSYongseok Koh { 2095528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2096528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 2097528a9fbeSYongseok Koh }, 2098528a9fbeSYongseok Koh { 2099dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2100dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 2101dd3331c6SShahaf Shuler }, 2102dd3331c6SShahaf Shuler { 2103c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2104c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 2105c322c0e5SOri Kam }, 2106c322c0e5SOri Kam { 2107f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2108f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 2109f0354d84SWisam Jaddo }, 2110f0354d84SWisam Jaddo { 2111f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2112f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 2113f0354d84SWisam Jaddo }, 2114f0354d84SWisam Jaddo { 2115771fa900SAdrien Mazarguil .vendor_id = 0 2116771fa900SAdrien Mazarguil } 2117771fa900SAdrien Mazarguil }; 2118771fa900SAdrien Mazarguil 2119fdf91e0fSJan Blunck static struct rte_pci_driver mlx5_driver = { 21202f3193cfSJan Viktorin .driver = { 21212f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 21222f3193cfSJan Viktorin }, 2123771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 2124af424af8SShreyansh Jain .probe = mlx5_pci_probe, 21253a820742SOphir Munk .remove = mlx5_pci_remove, 2126989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 2127989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 2128206254b7SOphir Munk .drv_flags = (RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV | 2129206254b7SOphir Munk RTE_PCI_DRV_PROBE_AGAIN), 2130771fa900SAdrien Mazarguil }; 2131771fa900SAdrien Mazarguil 213272b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 213359b91becSAdrien Mazarguil 213459b91becSAdrien Mazarguil /** 213508c028d0SAdrien Mazarguil * Suffix RTE_EAL_PMD_PATH with "-glue". 213608c028d0SAdrien Mazarguil * 213708c028d0SAdrien Mazarguil * This function performs a sanity check on RTE_EAL_PMD_PATH before 213808c028d0SAdrien Mazarguil * suffixing its last component. 213908c028d0SAdrien Mazarguil * 214008c028d0SAdrien Mazarguil * @param buf[out] 214108c028d0SAdrien Mazarguil * Output buffer, should be large enough otherwise NULL is returned. 214208c028d0SAdrien Mazarguil * @param size 214308c028d0SAdrien Mazarguil * Size of @p out. 214408c028d0SAdrien Mazarguil * 214508c028d0SAdrien Mazarguil * @return 214608c028d0SAdrien Mazarguil * Pointer to @p buf or @p NULL in case suffix cannot be appended. 214708c028d0SAdrien Mazarguil */ 214808c028d0SAdrien Mazarguil static char * 214908c028d0SAdrien Mazarguil mlx5_glue_path(char *buf, size_t size) 215008c028d0SAdrien Mazarguil { 215108c028d0SAdrien Mazarguil static const char *const bad[] = { "/", ".", "..", NULL }; 215208c028d0SAdrien Mazarguil const char *path = RTE_EAL_PMD_PATH; 215308c028d0SAdrien Mazarguil size_t len = strlen(path); 215408c028d0SAdrien Mazarguil size_t off; 215508c028d0SAdrien Mazarguil int i; 215608c028d0SAdrien Mazarguil 215708c028d0SAdrien Mazarguil while (len && path[len - 1] == '/') 215808c028d0SAdrien Mazarguil --len; 215908c028d0SAdrien Mazarguil for (off = len; off && path[off - 1] != '/'; --off) 216008c028d0SAdrien Mazarguil ; 216108c028d0SAdrien Mazarguil for (i = 0; bad[i]; ++i) 216208c028d0SAdrien Mazarguil if (!strncmp(path + off, bad[i], (int)(len - off))) 216308c028d0SAdrien Mazarguil goto error; 216408c028d0SAdrien Mazarguil i = snprintf(buf, size, "%.*s-glue", (int)len, path); 216508c028d0SAdrien Mazarguil if (i == -1 || (size_t)i >= size) 216608c028d0SAdrien Mazarguil goto error; 216708c028d0SAdrien Mazarguil return buf; 216808c028d0SAdrien Mazarguil error: 2169a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2170a170a30dSNélio Laranjeiro "unable to append \"-glue\" to last component of" 217108c028d0SAdrien Mazarguil " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," 217208c028d0SAdrien Mazarguil " please re-configure DPDK"); 217308c028d0SAdrien Mazarguil return NULL; 217408c028d0SAdrien Mazarguil } 217508c028d0SAdrien Mazarguil 217608c028d0SAdrien Mazarguil /** 217759b91becSAdrien Mazarguil * Initialization routine for run-time dependency on rdma-core. 217859b91becSAdrien Mazarguil */ 217959b91becSAdrien Mazarguil static int 218059b91becSAdrien Mazarguil mlx5_glue_init(void) 218159b91becSAdrien Mazarguil { 218208c028d0SAdrien Mazarguil char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; 2183f6242d06SAdrien Mazarguil const char *path[] = { 2184f6242d06SAdrien Mazarguil /* 2185f6242d06SAdrien Mazarguil * A basic security check is necessary before trusting 2186f6242d06SAdrien Mazarguil * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH. 2187f6242d06SAdrien Mazarguil */ 2188f6242d06SAdrien Mazarguil (geteuid() == getuid() && getegid() == getgid() ? 2189f6242d06SAdrien Mazarguil getenv("MLX5_GLUE_PATH") : NULL), 219008c028d0SAdrien Mazarguil /* 219108c028d0SAdrien Mazarguil * When RTE_EAL_PMD_PATH is set, use its glue-suffixed 219208c028d0SAdrien Mazarguil * variant, otherwise let dlopen() look up libraries on its 219308c028d0SAdrien Mazarguil * own. 219408c028d0SAdrien Mazarguil */ 219508c028d0SAdrien Mazarguil (*RTE_EAL_PMD_PATH ? 219608c028d0SAdrien Mazarguil mlx5_glue_path(glue_path, sizeof(glue_path)) : ""), 2197f6242d06SAdrien Mazarguil }; 2198f6242d06SAdrien Mazarguil unsigned int i = 0; 219959b91becSAdrien Mazarguil void *handle = NULL; 220059b91becSAdrien Mazarguil void **sym; 220159b91becSAdrien Mazarguil const char *dlmsg; 220259b91becSAdrien Mazarguil 2203f6242d06SAdrien Mazarguil while (!handle && i != RTE_DIM(path)) { 2204f6242d06SAdrien Mazarguil const char *end; 2205f6242d06SAdrien Mazarguil size_t len; 2206f6242d06SAdrien Mazarguil int ret; 2207f6242d06SAdrien Mazarguil 2208f6242d06SAdrien Mazarguil if (!path[i]) { 2209f6242d06SAdrien Mazarguil ++i; 2210f6242d06SAdrien Mazarguil continue; 2211f6242d06SAdrien Mazarguil } 2212f6242d06SAdrien Mazarguil end = strpbrk(path[i], ":;"); 2213f6242d06SAdrien Mazarguil if (!end) 2214f6242d06SAdrien Mazarguil end = path[i] + strlen(path[i]); 2215f6242d06SAdrien Mazarguil len = end - path[i]; 2216f6242d06SAdrien Mazarguil ret = 0; 2217f6242d06SAdrien Mazarguil do { 2218f6242d06SAdrien Mazarguil char name[ret + 1]; 2219f6242d06SAdrien Mazarguil 2220f6242d06SAdrien Mazarguil ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE, 2221f6242d06SAdrien Mazarguil (int)len, path[i], 2222f6242d06SAdrien Mazarguil (!len || *(end - 1) == '/') ? "" : "/"); 2223f6242d06SAdrien Mazarguil if (ret == -1) 2224f6242d06SAdrien Mazarguil break; 2225f6242d06SAdrien Mazarguil if (sizeof(name) != (size_t)ret + 1) 2226f6242d06SAdrien Mazarguil continue; 2227a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"", 2228a170a30dSNélio Laranjeiro name); 2229f6242d06SAdrien Mazarguil handle = dlopen(name, RTLD_LAZY); 2230f6242d06SAdrien Mazarguil break; 2231f6242d06SAdrien Mazarguil } while (1); 2232f6242d06SAdrien Mazarguil path[i] = end + 1; 2233f6242d06SAdrien Mazarguil if (!*end) 2234f6242d06SAdrien Mazarguil ++i; 2235f6242d06SAdrien Mazarguil } 223659b91becSAdrien Mazarguil if (!handle) { 223759b91becSAdrien Mazarguil rte_errno = EINVAL; 223859b91becSAdrien Mazarguil dlmsg = dlerror(); 223959b91becSAdrien Mazarguil if (dlmsg) 2240a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg); 224159b91becSAdrien Mazarguil goto glue_error; 224259b91becSAdrien Mazarguil } 224359b91becSAdrien Mazarguil sym = dlsym(handle, "mlx5_glue"); 224459b91becSAdrien Mazarguil if (!sym || !*sym) { 224559b91becSAdrien Mazarguil rte_errno = EINVAL; 224659b91becSAdrien Mazarguil dlmsg = dlerror(); 224759b91becSAdrien Mazarguil if (dlmsg) 2248a170a30dSNélio Laranjeiro DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg); 224959b91becSAdrien Mazarguil goto glue_error; 225059b91becSAdrien Mazarguil } 225159b91becSAdrien Mazarguil mlx5_glue = *sym; 225259b91becSAdrien Mazarguil return 0; 225359b91becSAdrien Mazarguil glue_error: 225459b91becSAdrien Mazarguil if (handle) 225559b91becSAdrien Mazarguil dlclose(handle); 2256a170a30dSNélio Laranjeiro DRV_LOG(WARNING, 2257a170a30dSNélio Laranjeiro "cannot initialize PMD due to missing run-time dependency on" 2258a170a30dSNélio Laranjeiro " rdma-core libraries (libibverbs, libmlx5)"); 225959b91becSAdrien Mazarguil return -rte_errno; 226059b91becSAdrien Mazarguil } 226159b91becSAdrien Mazarguil 226259b91becSAdrien Mazarguil #endif 226359b91becSAdrien Mazarguil 2264771fa900SAdrien Mazarguil /** 2265771fa900SAdrien Mazarguil * Driver initialization routine. 2266771fa900SAdrien Mazarguil */ 2267f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 2268771fa900SAdrien Mazarguil { 22693d96644aSStephen Hemminger /* Initialize driver log type. */ 22703d96644aSStephen Hemminger mlx5_logtype = rte_log_register("pmd.net.mlx5"); 22713d96644aSStephen Hemminger if (mlx5_logtype >= 0) 22723d96644aSStephen Hemminger rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); 22733d96644aSStephen Hemminger 22745f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 2275ea16068cSYongseok Koh mlx5_set_ptype_table(); 22765f8ba81cSXueming Li mlx5_set_cksum_table(); 22775f8ba81cSXueming Li mlx5_set_swp_types_table(); 2278771fa900SAdrien Mazarguil /* 2279771fa900SAdrien Mazarguil * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 2280771fa900SAdrien Mazarguil * huge pages. Calling ibv_fork_init() during init allows 2281771fa900SAdrien Mazarguil * applications to use fork() safely for purposes other than 2282771fa900SAdrien Mazarguil * using this PMD, which is not supported in forked processes. 2283771fa900SAdrien Mazarguil */ 2284771fa900SAdrien Mazarguil setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 2285161b93e5SYongseok Koh /* Match the size of Rx completion entry to the size of a cacheline. */ 2286161b93e5SYongseok Koh if (RTE_CACHE_LINE_SIZE == 128) 2287161b93e5SYongseok Koh setenv("MLX5_CQE_SIZE", "128", 0); 22881ff30d18SMatan Azrad /* 22891ff30d18SMatan Azrad * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to 22901ff30d18SMatan Azrad * cleanup all the Verbs resources even when the device was removed. 22911ff30d18SMatan Azrad */ 22921ff30d18SMatan Azrad setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1); 229372b934adSThomas Monjalon #ifdef RTE_IBVERBS_LINK_DLOPEN 229459b91becSAdrien Mazarguil if (mlx5_glue_init()) 229559b91becSAdrien Mazarguil return; 229659b91becSAdrien Mazarguil assert(mlx5_glue); 229759b91becSAdrien Mazarguil #endif 22982a3b0097SAdrien Mazarguil #ifndef NDEBUG 22992a3b0097SAdrien Mazarguil /* Glue structure must not contain any NULL pointers. */ 23002a3b0097SAdrien Mazarguil { 23012a3b0097SAdrien Mazarguil unsigned int i; 23022a3b0097SAdrien Mazarguil 23032a3b0097SAdrien Mazarguil for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i) 23042a3b0097SAdrien Mazarguil assert(((const void *const *)mlx5_glue)[i]); 23052a3b0097SAdrien Mazarguil } 23062a3b0097SAdrien Mazarguil #endif 23076d5df2eaSAdrien Mazarguil if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) { 2308a170a30dSNélio Laranjeiro DRV_LOG(ERR, 2309a170a30dSNélio Laranjeiro "rdma-core glue \"%s\" mismatch: \"%s\" is required", 23106d5df2eaSAdrien Mazarguil mlx5_glue->version, MLX5_GLUE_VERSION); 23116d5df2eaSAdrien Mazarguil return; 23126d5df2eaSAdrien Mazarguil } 23130e83b8e5SNelio Laranjeiro mlx5_glue->fork_init(); 23143dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 2315771fa900SAdrien Mazarguil } 2316771fa900SAdrien Mazarguil 231701f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 231801f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 23190880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 2320