11137eceeSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 21137eceeSOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 31137eceeSOphir Munk */ 41137eceeSOphir Munk 51137eceeSOphir Munk #include <errno.h> 61137eceeSOphir Munk #include <stdalign.h> 71137eceeSOphir Munk #include <stddef.h> 81137eceeSOphir Munk #include <stdint.h> 91137eceeSOphir Munk #include <stdlib.h> 101137eceeSOphir Munk 111137eceeSOphir Munk #include <rte_windows.h> 12df96fd0dSBruce Richardson #include <ethdev_pci.h> 131137eceeSOphir Munk 141137eceeSOphir Munk #include <mlx5_glue.h> 151137eceeSOphir Munk #include <mlx5_devx_cmds.h> 161137eceeSOphir Munk #include <mlx5_common.h> 1759f10207SOphir Munk #include <mlx5_common_mp.h> 1859f10207SOphir Munk #include <mlx5_common_mr.h> 1959f10207SOphir Munk #include <mlx5_malloc.h> 201137eceeSOphir Munk 211137eceeSOphir Munk #include "mlx5_defs.h" 221137eceeSOphir Munk #include "mlx5.h" 2359f10207SOphir Munk #include "mlx5_common_os.h" 2459f10207SOphir Munk #include "mlx5_utils.h" 2559f10207SOphir Munk #include "mlx5_rxtx.h" 26151cbe3aSMichael Baum #include "mlx5_rx.h" 27377b69fbSMichael Baum #include "mlx5_tx.h" 281137eceeSOphir Munk #include "mlx5_autoconf.h" 2959f10207SOphir Munk #include "mlx5_flow.h" 3093f4ece9SOphir Munk #include "mlx5_devx.h" 3193f4ece9SOphir Munk 32980826dcSTal Shnaiderman static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 33980826dcSTal Shnaiderman 34980826dcSTal Shnaiderman /* Spinlock for mlx5_shared_data allocation. */ 35980826dcSTal Shnaiderman static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 36980826dcSTal Shnaiderman 3789a4bcb1SSuanming Mou /* rte flow indexed pool configuration. */ 3889a4bcb1SSuanming Mou static struct mlx5_indexed_pool_config icfg[] = { 3989a4bcb1SSuanming Mou { 4089a4bcb1SSuanming Mou .size = sizeof(struct rte_flow), 4189a4bcb1SSuanming Mou .trunk_size = 64, 4289a4bcb1SSuanming Mou .need_lock = 1, 4389a4bcb1SSuanming Mou .release_mem_en = 0, 4489a4bcb1SSuanming Mou .malloc = mlx5_malloc, 4589a4bcb1SSuanming Mou .free = mlx5_free, 4689a4bcb1SSuanming Mou .per_core_cache = 0, 4789a4bcb1SSuanming Mou .type = "ctl_flow_ipool", 4889a4bcb1SSuanming Mou }, 4989a4bcb1SSuanming Mou { 5089a4bcb1SSuanming Mou .size = sizeof(struct rte_flow), 5189a4bcb1SSuanming Mou .trunk_size = 64, 5289a4bcb1SSuanming Mou .grow_trunk = 3, 5389a4bcb1SSuanming Mou .grow_shift = 2, 5489a4bcb1SSuanming Mou .need_lock = 1, 5589a4bcb1SSuanming Mou .release_mem_en = 0, 5689a4bcb1SSuanming Mou .malloc = mlx5_malloc, 5789a4bcb1SSuanming Mou .free = mlx5_free, 5889a4bcb1SSuanming Mou .per_core_cache = 1 << 14, 5989a4bcb1SSuanming Mou .type = "rte_flow_ipool", 6089a4bcb1SSuanming Mou }, 6189a4bcb1SSuanming Mou { 6289a4bcb1SSuanming Mou .size = sizeof(struct rte_flow), 6389a4bcb1SSuanming Mou .trunk_size = 64, 6489a4bcb1SSuanming Mou .grow_trunk = 3, 6589a4bcb1SSuanming Mou .grow_shift = 2, 6689a4bcb1SSuanming Mou .need_lock = 1, 6789a4bcb1SSuanming Mou .release_mem_en = 0, 6889a4bcb1SSuanming Mou .malloc = mlx5_malloc, 6989a4bcb1SSuanming Mou .free = mlx5_free, 7089a4bcb1SSuanming Mou .per_core_cache = 0, 7189a4bcb1SSuanming Mou .type = "mcp_flow_ipool", 7289a4bcb1SSuanming Mou }, 7389a4bcb1SSuanming Mou }; 7489a4bcb1SSuanming Mou 75e50fe91aSTal Shnaiderman static void 76e50fe91aSTal Shnaiderman mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev) 77e50fe91aSTal Shnaiderman { 78e50fe91aSTal Shnaiderman struct mlx5_priv *priv = dev->data->dev_private; 79e50fe91aSTal Shnaiderman void *ctx = priv->sh->cdev->ctx; 80e50fe91aSTal Shnaiderman 81e50fe91aSTal Shnaiderman priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx); 82e50fe91aSTal Shnaiderman if (!priv->q_counters) { 83e50fe91aSTal Shnaiderman DRV_LOG(ERR, "Port %d queue counter object cannot be created " 84e50fe91aSTal Shnaiderman "by DevX - imissed counter will be unavailable", 85e50fe91aSTal Shnaiderman dev->data->port_id); 86e50fe91aSTal Shnaiderman return; 87e50fe91aSTal Shnaiderman } 88e50fe91aSTal Shnaiderman priv->counter_set_id = priv->q_counters->id; 89e50fe91aSTal Shnaiderman } 90e50fe91aSTal Shnaiderman 91980826dcSTal Shnaiderman /** 92980826dcSTal Shnaiderman * Initialize shared data between primary and secondary process. 93980826dcSTal Shnaiderman * 94980826dcSTal Shnaiderman * A memzone is reserved by primary process and secondary processes attach to 95980826dcSTal Shnaiderman * the memzone. 96980826dcSTal Shnaiderman * 97980826dcSTal Shnaiderman * @return 98980826dcSTal Shnaiderman * 0 on success, a negative errno value otherwise and rte_errno is set. 99980826dcSTal Shnaiderman */ 100980826dcSTal Shnaiderman static int 101980826dcSTal Shnaiderman mlx5_init_shared_data(void) 102980826dcSTal Shnaiderman { 103980826dcSTal Shnaiderman const struct rte_memzone *mz; 104980826dcSTal Shnaiderman int ret = 0; 105980826dcSTal Shnaiderman 106980826dcSTal Shnaiderman rte_spinlock_lock(&mlx5_shared_data_lock); 107980826dcSTal Shnaiderman if (mlx5_shared_data == NULL) { 108980826dcSTal Shnaiderman /* Allocate shared memory. */ 109980826dcSTal Shnaiderman mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 110980826dcSTal Shnaiderman sizeof(*mlx5_shared_data), 111980826dcSTal Shnaiderman SOCKET_ID_ANY, 0); 112980826dcSTal Shnaiderman if (mz == NULL) { 113980826dcSTal Shnaiderman DRV_LOG(ERR, 114980826dcSTal Shnaiderman "Cannot allocate mlx5 shared data"); 115980826dcSTal Shnaiderman ret = -rte_errno; 116980826dcSTal Shnaiderman goto error; 117980826dcSTal Shnaiderman } 118980826dcSTal Shnaiderman mlx5_shared_data = mz->addr; 119980826dcSTal Shnaiderman memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 120980826dcSTal Shnaiderman rte_spinlock_init(&mlx5_shared_data->lock); 121980826dcSTal Shnaiderman } 122980826dcSTal Shnaiderman error: 123980826dcSTal Shnaiderman rte_spinlock_unlock(&mlx5_shared_data_lock); 124980826dcSTal Shnaiderman return ret; 125980826dcSTal Shnaiderman } 126980826dcSTal Shnaiderman 127980826dcSTal Shnaiderman /** 128980826dcSTal Shnaiderman * PMD global initialization. 129980826dcSTal Shnaiderman * 130980826dcSTal Shnaiderman * Independent from individual device, this function initializes global 131980826dcSTal Shnaiderman * per-PMD data structures distinguishing primary and secondary processes. 132980826dcSTal Shnaiderman * Hence, each initialization is called once per a process. 133980826dcSTal Shnaiderman * 134980826dcSTal Shnaiderman * @return 135980826dcSTal Shnaiderman * 0 on success, a negative errno value otherwise and rte_errno is set. 136980826dcSTal Shnaiderman */ 137980826dcSTal Shnaiderman static int 138980826dcSTal Shnaiderman mlx5_init_once(void) 139980826dcSTal Shnaiderman { 140980826dcSTal Shnaiderman if (mlx5_init_shared_data()) 141980826dcSTal Shnaiderman return -rte_errno; 142980826dcSTal Shnaiderman return 0; 143980826dcSTal Shnaiderman } 144980826dcSTal Shnaiderman 1451137eceeSOphir Munk /** 1461137eceeSOphir Munk * Get mlx5 device attributes. 1471137eceeSOphir Munk * 148fe46b20cSMichael Baum * @param cdev 149fe46b20cSMichael Baum * Pointer to mlx5 device. 1501137eceeSOphir Munk * 1511137eceeSOphir Munk * @param device_attr 1521137eceeSOphir Munk * Pointer to mlx5 device attributes. 1531137eceeSOphir Munk * 1541137eceeSOphir Munk * @return 155fe46b20cSMichael Baum * 0 on success, non zero error number otherwise. 1561137eceeSOphir Munk */ 1571137eceeSOphir Munk int 158fe46b20cSMichael Baum mlx5_os_get_dev_attr(struct mlx5_common_device *cdev, 159fe46b20cSMichael Baum struct mlx5_dev_attr *device_attr) 1601137eceeSOphir Munk { 1611137eceeSOphir Munk struct mlx5_context *mlx5_ctx; 1621137eceeSOphir Munk void *pv_iseg = NULL; 1631137eceeSOphir Munk u32 cb_iseg = 0; 1641137eceeSOphir Munk int err = 0; 1651137eceeSOphir Munk 166fe46b20cSMichael Baum if (!cdev || !cdev->ctx) 1671137eceeSOphir Munk return -EINVAL; 168fe46b20cSMichael Baum mlx5_ctx = (struct mlx5_context *)cdev->ctx; 1691137eceeSOphir Munk memset(device_attr, 0, sizeof(*device_attr)); 170fe46b20cSMichael Baum device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq; 171fe46b20cSMichael Baum device_attr->max_qp = 1 << cdev->config.hca_attr.log_max_qp; 172fe46b20cSMichael Baum device_attr->max_qp_wr = 1 << cdev->config.hca_attr.log_max_qp_sz; 173fe46b20cSMichael Baum device_attr->max_cqe = 1 << cdev->config.hca_attr.log_max_cq_sz; 174fe46b20cSMichael Baum device_attr->max_mr = 1 << cdev->config.hca_attr.log_max_mrw_sz; 175fe46b20cSMichael Baum device_attr->max_pd = 1 << cdev->config.hca_attr.log_max_pd; 176fe46b20cSMichael Baum device_attr->max_srq = 1 << cdev->config.hca_attr.log_max_srq; 177fe46b20cSMichael Baum device_attr->max_srq_wr = 1 << cdev->config.hca_attr.log_max_srq_sz; 178fe46b20cSMichael Baum device_attr->max_tso = 1 << cdev->config.hca_attr.max_lso_cap; 179fe46b20cSMichael Baum if (cdev->config.hca_attr.rss_ind_tbl_cap) { 1801137eceeSOphir Munk device_attr->max_rwq_indirection_table_size = 181fe46b20cSMichael Baum 1 << cdev->config.hca_attr.rss_ind_tbl_cap; 1821137eceeSOphir Munk } 183d47fe9daSTal Shnaiderman device_attr->sw_parsing_offloads = 184fe46b20cSMichael Baum mlx5_get_supported_sw_parsing_offloads(&cdev->config.hca_attr); 1856a86ee2eSTal Shnaiderman device_attr->tunnel_offloads_caps = 186fe46b20cSMichael Baum mlx5_get_supported_tunneling_offloads(&cdev->config.hca_attr); 1871137eceeSOphir Munk pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg); 1881137eceeSOphir Munk if (pv_iseg == NULL) { 1891137eceeSOphir Munk DRV_LOG(ERR, "Failed to get device hca_iseg"); 1901137eceeSOphir Munk return errno; 1911137eceeSOphir Munk } 1921137eceeSOphir Munk if (!err) { 1931137eceeSOphir Munk snprintf(device_attr->fw_ver, 64, "%x.%x.%04x", 1941137eceeSOphir Munk MLX5_GET(initial_seg, pv_iseg, fw_rev_major), 1951137eceeSOphir Munk MLX5_GET(initial_seg, pv_iseg, fw_rev_minor), 1961137eceeSOphir Munk MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor)); 1971137eceeSOphir Munk } 1981137eceeSOphir Munk return err; 1991137eceeSOphir Munk } 20059f10207SOphir Munk 20159f10207SOphir Munk /** 20293f4ece9SOphir Munk * Initialize DR related data within private structure. 20393f4ece9SOphir Munk * Routine checks the reference counter and does actual 20493f4ece9SOphir Munk * resources creation/initialization only if counter is zero. 20593f4ece9SOphir Munk * 20693f4ece9SOphir Munk * @param[in] priv 20793f4ece9SOphir Munk * Pointer to the private device data structure. 20893f4ece9SOphir Munk * 20993f4ece9SOphir Munk * @return 21093f4ece9SOphir Munk * Zero on success, positive error code otherwise. 21193f4ece9SOphir Munk */ 21293f4ece9SOphir Munk static int 21393f4ece9SOphir Munk mlx5_alloc_shared_dr(struct mlx5_priv *priv) 21493f4ece9SOphir Munk { 21593f4ece9SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 21693f4ece9SOphir Munk int err = 0; 21793f4ece9SOphir Munk 21893f4ece9SOphir Munk if (!sh->flow_tbls) 21993f4ece9SOphir Munk err = mlx5_alloc_table_hash_list(priv); 22093f4ece9SOphir Munk else 2211b9e9826SThomas Monjalon DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse", 22293f4ece9SOphir Munk (void *)sh->flow_tbls); 22393f4ece9SOphir Munk return err; 22493f4ece9SOphir Munk } 22593f4ece9SOphir Munk /** 22693f4ece9SOphir Munk * Destroy DR related data within private structure. 22793f4ece9SOphir Munk * 22893f4ece9SOphir Munk * @param[in] priv 22993f4ece9SOphir Munk * Pointer to the private device data structure. 23093f4ece9SOphir Munk */ 23193f4ece9SOphir Munk void 23293f4ece9SOphir Munk mlx5_os_free_shared_dr(struct mlx5_priv *priv) 23393f4ece9SOphir Munk { 23493f4ece9SOphir Munk mlx5_free_table_hash_list(priv); 23593f4ece9SOphir Munk } 23693f4ece9SOphir Munk 23793f4ece9SOphir Munk /** 23859f10207SOphir Munk * Set the completion channel file descriptor interrupt as non-blocking. 23959f10207SOphir Munk * Currently it has no support under Windows. 24059f10207SOphir Munk * 24159f10207SOphir Munk * @param[in] rxq_obj 24259f10207SOphir Munk * Pointer to RQ channel object, which includes the channel fd 24359f10207SOphir Munk * 24459f10207SOphir Munk * @param[out] fd 2457be78d02SJosh Soref * The file descriptor (representing the interrupt) used in this channel. 24659f10207SOphir Munk * 24759f10207SOphir Munk * @return 24859f10207SOphir Munk * 0 on successfully setting the fd to non-blocking, non-zero otherwise. 24959f10207SOphir Munk */ 25059f10207SOphir Munk int 25159f10207SOphir Munk mlx5_os_set_nonblock_channel_fd(int fd) 25259f10207SOphir Munk { 25359f10207SOphir Munk (void)fd; 25459f10207SOphir Munk DRV_LOG(WARNING, "%s: is not supported", __func__); 25559f10207SOphir Munk return -ENOTSUP; 25659f10207SOphir Munk } 25759f10207SOphir Munk 25859f10207SOphir Munk /** 25993f4ece9SOphir Munk * DV flow counter mode detect and config. 26093f4ece9SOphir Munk * 26193f4ece9SOphir Munk * @param dev 26293f4ece9SOphir Munk * Pointer to rte_eth_dev structure. 26393f4ece9SOphir Munk * 26493f4ece9SOphir Munk */ 26593f4ece9SOphir Munk static void 26693f4ece9SOphir Munk mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused) 26793f4ece9SOphir Munk { 26893f4ece9SOphir Munk #ifdef HAVE_IBV_FLOW_DV_SUPPORT 26993f4ece9SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 27093f4ece9SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 27193f4ece9SOphir Munk bool fallback; 27293f4ece9SOphir Munk 27393f4ece9SOphir Munk #ifndef HAVE_IBV_DEVX_ASYNC 27493f4ece9SOphir Munk fallback = true; 27593f4ece9SOphir Munk #else 27693f4ece9SOphir Munk fallback = false; 2775bc38358SMichael Baum if (!sh->devx || !priv->config.dv_flow_en || 27893f4ece9SOphir Munk !priv->config.hca_attr.flow_counters_dump || 27993f4ece9SOphir Munk !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) || 28093f4ece9SOphir Munk (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP)) 28193f4ece9SOphir Munk fallback = true; 28293f4ece9SOphir Munk #endif 28393f4ece9SOphir Munk if (fallback) 28493f4ece9SOphir Munk DRV_LOG(INFO, "Use fall-back DV counter management. Flow " 28593f4ece9SOphir Munk "counter dump:%d, bulk_alloc_bitmap:0x%hhx.", 28693f4ece9SOphir Munk priv->config.hca_attr.flow_counters_dump, 28793f4ece9SOphir Munk priv->config.hca_attr.flow_counter_bulk_alloc_bitmap); 28893f4ece9SOphir Munk /* Initialize fallback mode only on the port initializes sh. */ 28993f4ece9SOphir Munk if (sh->refcnt == 1) 29093f4ece9SOphir Munk sh->cmng.counter_fallback = fallback; 29193f4ece9SOphir Munk else if (fallback != sh->cmng.counter_fallback) 29293f4ece9SOphir Munk DRV_LOG(WARNING, "Port %d in sh has different fallback mode " 29393f4ece9SOphir Munk "with others:%d.", PORT_ID(priv), fallback); 29493f4ece9SOphir Munk #endif 29593f4ece9SOphir Munk } 29693f4ece9SOphir Munk 29793f4ece9SOphir Munk /** 2987af08c8fSMichael Baum * Spawn an Ethernet device from DevX information. 299980826dcSTal Shnaiderman * 300980826dcSTal Shnaiderman * @param dpdk_dev 301980826dcSTal Shnaiderman * Backing DPDK device. 302980826dcSTal Shnaiderman * @param spawn 303980826dcSTal Shnaiderman * Verbs device parameters (name, port, switch_info) to spawn. 304980826dcSTal Shnaiderman * @param config 305980826dcSTal Shnaiderman * Device configuration parameters. 306980826dcSTal Shnaiderman * 307980826dcSTal Shnaiderman * @return 30893f4ece9SOphir Munk * A valid Ethernet device object on success, NULL otherwise and rte_errno 30993f4ece9SOphir Munk * is set. The following errors are defined: 31093f4ece9SOphir Munk * 31193f4ece9SOphir Munk * EEXIST: device is already spawned 312980826dcSTal Shnaiderman */ 313980826dcSTal Shnaiderman static struct rte_eth_dev * 314980826dcSTal Shnaiderman mlx5_dev_spawn(struct rte_device *dpdk_dev, 315980826dcSTal Shnaiderman struct mlx5_dev_spawn_data *spawn, 316980826dcSTal Shnaiderman struct mlx5_dev_config *config) 317980826dcSTal Shnaiderman { 31893f4ece9SOphir Munk const struct mlx5_switch_info *switch_info = &spawn->info; 31993f4ece9SOphir Munk struct mlx5_dev_ctx_shared *sh = NULL; 32093f4ece9SOphir Munk struct mlx5_dev_attr device_attr; 32193f4ece9SOphir Munk struct rte_eth_dev *eth_dev = NULL; 32293f4ece9SOphir Munk struct mlx5_priv *priv = NULL; 32393f4ece9SOphir Munk int err = 0; 32493f4ece9SOphir Munk unsigned int cqe_comp; 32593f4ece9SOphir Munk struct rte_ether_addr mac; 32693f4ece9SOphir Munk char name[RTE_ETH_NAME_MAX_LEN]; 32793f4ece9SOphir Munk int own_domain_id = 0; 32893f4ece9SOphir Munk uint16_t port_id; 32989a4bcb1SSuanming Mou int i; 33093f4ece9SOphir Munk 33193f4ece9SOphir Munk /* Build device name. */ 33293f4ece9SOphir Munk strlcpy(name, dpdk_dev->name, sizeof(name)); 33393f4ece9SOphir Munk /* check if the device is already spawned */ 33493f4ece9SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 33593f4ece9SOphir Munk rte_errno = EEXIST; 33693f4ece9SOphir Munk return NULL; 33793f4ece9SOphir Munk } 33893f4ece9SOphir Munk DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 33993f4ece9SOphir Munk /* 34093f4ece9SOphir Munk * Some parameters are needed in advance to create device context. We 34193f4ece9SOphir Munk * process the devargs here to get ones, and later process devargs 34293f4ece9SOphir Munk * again to override some hardware settings. 34393f4ece9SOphir Munk */ 34493f4ece9SOphir Munk err = mlx5_args(config, dpdk_dev->devargs); 34593f4ece9SOphir Munk if (err) { 34693f4ece9SOphir Munk err = rte_errno; 34793f4ece9SOphir Munk DRV_LOG(ERR, "failed to process device arguments: %s", 34893f4ece9SOphir Munk strerror(rte_errno)); 34993f4ece9SOphir Munk goto error; 35093f4ece9SOphir Munk } 35193f4ece9SOphir Munk sh = mlx5_alloc_shared_dev_ctx(spawn, config); 35293f4ece9SOphir Munk if (!sh) 35393f4ece9SOphir Munk return NULL; 35493f4ece9SOphir Munk /* Initialize the shutdown event in mlx5_dev_spawn to 35593f4ece9SOphir Munk * support mlx5_is_removed for Windows. 35693f4ece9SOphir Munk */ 357ca1418ceSMichael Baum err = mlx5_glue->devx_init_showdown_event(sh->cdev->ctx); 35893f4ece9SOphir Munk if (err) { 35993f4ece9SOphir Munk DRV_LOG(ERR, "failed to init showdown event: %s", 36093f4ece9SOphir Munk strerror(errno)); 36193f4ece9SOphir Munk goto error; 36293f4ece9SOphir Munk } 36393f4ece9SOphir Munk DRV_LOG(DEBUG, "MPW isn't supported"); 364fe46b20cSMichael Baum mlx5_os_get_dev_attr(sh->cdev, &device_attr); 365d47fe9daSTal Shnaiderman config->swp = device_attr.sw_parsing_offloads & 366d47fe9daSTal Shnaiderman (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP | 367d47fe9daSTal Shnaiderman MLX5_SW_PARSING_TSO_CAP); 36893f4ece9SOphir Munk config->ind_table_max_size = 36993f4ece9SOphir Munk sh->device_attr.max_rwq_indirection_table_size; 37093f4ece9SOphir Munk cqe_comp = 0; 37193f4ece9SOphir Munk config->cqe_comp = cqe_comp; 3726a86ee2eSTal Shnaiderman config->tunnel_en = device_attr.tunnel_offloads_caps & 3736a86ee2eSTal Shnaiderman (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP | 3746a86ee2eSTal Shnaiderman MLX5_TUNNELED_OFFLOADS_GRE_CAP | 3756a86ee2eSTal Shnaiderman MLX5_TUNNELED_OFFLOADS_GENEVE_CAP); 3766a86ee2eSTal Shnaiderman if (config->tunnel_en) { 3776a86ee2eSTal Shnaiderman DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s", 3786a86ee2eSTal Shnaiderman config->tunnel_en & 3796a86ee2eSTal Shnaiderman MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "", 3806a86ee2eSTal Shnaiderman config->tunnel_en & 3816a86ee2eSTal Shnaiderman MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "", 3826a86ee2eSTal Shnaiderman config->tunnel_en & 3836a86ee2eSTal Shnaiderman MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "" 3846a86ee2eSTal Shnaiderman ); 3856a86ee2eSTal Shnaiderman } else { 38693f4ece9SOphir Munk DRV_LOG(DEBUG, "tunnel offloading is not supported"); 3876a86ee2eSTal Shnaiderman } 38893f4ece9SOphir Munk DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported"); 38993f4ece9SOphir Munk config->mpls_en = 0; 39093f4ece9SOphir Munk /* Allocate private eth device data. */ 39193f4ece9SOphir Munk priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 39293f4ece9SOphir Munk sizeof(*priv), 39393f4ece9SOphir Munk RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 39493f4ece9SOphir Munk if (priv == NULL) { 39593f4ece9SOphir Munk DRV_LOG(ERR, "priv allocation failure"); 39693f4ece9SOphir Munk err = ENOMEM; 39793f4ece9SOphir Munk goto error; 39893f4ece9SOphir Munk } 39993f4ece9SOphir Munk priv->sh = sh; 40093f4ece9SOphir Munk priv->dev_port = spawn->phys_port; 40193f4ece9SOphir Munk priv->pci_dev = spawn->pci_dev; 40293f4ece9SOphir Munk priv->mtu = RTE_ETHER_MTU; 40393f4ece9SOphir Munk priv->mp_id.port_id = port_id; 40493f4ece9SOphir Munk strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 40593f4ece9SOphir Munk priv->representor = !!switch_info->representor; 40693f4ece9SOphir Munk priv->master = !!switch_info->master; 40793f4ece9SOphir Munk priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 40893f4ece9SOphir Munk priv->vport_meta_tag = 0; 40993f4ece9SOphir Munk priv->vport_meta_mask = 0; 41093f4ece9SOphir Munk priv->pf_bond = spawn->pf_bond; 41193f4ece9SOphir Munk priv->vport_id = -1; 41293f4ece9SOphir Munk /* representor_id field keeps the unmodified VF index. */ 41393f4ece9SOphir Munk priv->representor_id = -1; 41493f4ece9SOphir Munk /* 41593f4ece9SOphir Munk * Look for sibling devices in order to reuse their switch domain 41693f4ece9SOphir Munk * if any, otherwise allocate one. 41793f4ece9SOphir Munk */ 418e9d420dfSGregory Etelson MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) { 41993f4ece9SOphir Munk const struct mlx5_priv *opriv = 42093f4ece9SOphir Munk rte_eth_devices[port_id].data->dev_private; 42193f4ece9SOphir Munk 42293f4ece9SOphir Munk if (!opriv || 42393f4ece9SOphir Munk opriv->sh != priv->sh || 42493f4ece9SOphir Munk opriv->domain_id == 42593f4ece9SOphir Munk RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 42693f4ece9SOphir Munk continue; 42793f4ece9SOphir Munk priv->domain_id = opriv->domain_id; 42893f4ece9SOphir Munk break; 42993f4ece9SOphir Munk } 43093f4ece9SOphir Munk if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 43193f4ece9SOphir Munk err = rte_eth_switch_domain_alloc(&priv->domain_id); 43293f4ece9SOphir Munk if (err) { 43393f4ece9SOphir Munk err = rte_errno; 43493f4ece9SOphir Munk DRV_LOG(ERR, "unable to allocate switch domain: %s", 43593f4ece9SOphir Munk strerror(rte_errno)); 43693f4ece9SOphir Munk goto error; 43793f4ece9SOphir Munk } 43893f4ece9SOphir Munk own_domain_id = 1; 43993f4ece9SOphir Munk } 44093f4ece9SOphir Munk /* Override some values set by hardware configuration. */ 44193f4ece9SOphir Munk mlx5_args(config, dpdk_dev->devargs); 4428f6c921bSGregory Etelson err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev); 44393f4ece9SOphir Munk if (err) 44493f4ece9SOphir Munk goto error; 44593f4ece9SOphir Munk DRV_LOG(DEBUG, "counters are not supported"); 44693f4ece9SOphir Munk config->ind_table_max_size = 44793f4ece9SOphir Munk sh->device_attr.max_rwq_indirection_table_size; 44893f4ece9SOphir Munk /* 44993f4ece9SOphir Munk * Remove this check once DPDK supports larger/variable 45093f4ece9SOphir Munk * indirection tables. 45193f4ece9SOphir Munk */ 452295968d1SFerruh Yigit if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512) 453295968d1SFerruh Yigit config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512; 45493f4ece9SOphir Munk DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 45593f4ece9SOphir Munk config->ind_table_max_size); 45693f4ece9SOphir Munk if (config->hw_padding) { 45793f4ece9SOphir Munk DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 45893f4ece9SOphir Munk config->hw_padding = 0; 45993f4ece9SOphir Munk } 460738da9a8STal Shnaiderman config->tso = (sh->device_attr.max_tso > 0); 46193f4ece9SOphir Munk if (config->tso) 46293f4ece9SOphir Munk config->tso_max_payload_sz = sh->device_attr.max_tso; 46393f4ece9SOphir Munk DRV_LOG(DEBUG, "%sMPS is %s.", 46493f4ece9SOphir Munk config->mps == MLX5_MPW_ENHANCED ? "enhanced " : 46593f4ece9SOphir Munk config->mps == MLX5_MPW ? "legacy " : "", 46693f4ece9SOphir Munk config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 46793f4ece9SOphir Munk if (config->cqe_comp && !cqe_comp) { 46893f4ece9SOphir Munk DRV_LOG(WARNING, "Rx CQE compression isn't supported."); 46993f4ece9SOphir Munk config->cqe_comp = 0; 47093f4ece9SOphir Munk } 4715bc38358SMichael Baum if (sh->devx) { 472fe46b20cSMichael Baum config->hca_attr = sh->cdev->config.hca_attr; 473db16bbfbSTal Shnaiderman config->hw_csum = config->hca_attr.csum_cap; 474db16bbfbSTal Shnaiderman DRV_LOG(DEBUG, "checksum offloading is %ssupported", 475db16bbfbSTal Shnaiderman (config->hw_csum ? "" : "not ")); 4766061cc41STal Shnaiderman config->hw_vlan_strip = config->hca_attr.vlan_cap; 4776061cc41STal Shnaiderman DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 4786061cc41STal Shnaiderman (config->hw_vlan_strip ? "" : "not ")); 479c8834a36STal Shnaiderman config->hw_fcs_strip = config->hca_attr.scatter_fcs; 48093f4ece9SOphir Munk } 4815bc38358SMichael Baum if (sh->devx) { 48293f4ece9SOphir Munk uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)]; 48393f4ece9SOphir Munk 48493f4ece9SOphir Munk err = config->hca_attr.access_register_user ? 48593f4ece9SOphir Munk mlx5_devx_cmd_register_read 486ca1418ceSMichael Baum (sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0, 48793f4ece9SOphir Munk reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP; 48893f4ece9SOphir Munk if (!err) { 48993f4ece9SOphir Munk uint32_t ts_mode; 49093f4ece9SOphir Munk 49193f4ece9SOphir Munk /* MTUTC register is read successfully. */ 49293f4ece9SOphir Munk ts_mode = MLX5_GET(register_mtutc, reg, 49393f4ece9SOphir Munk time_stamp_mode); 49493f4ece9SOphir Munk if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME) 49593f4ece9SOphir Munk config->rt_timestamp = 1; 49693f4ece9SOphir Munk } else { 49793f4ece9SOphir Munk /* Kernel does not support register reading. */ 49893f4ece9SOphir Munk if (config->hca_attr.dev_freq_khz == 49993f4ece9SOphir Munk (NS_PER_S / MS_PER_S)) 50093f4ece9SOphir Munk config->rt_timestamp = 1; 50193f4ece9SOphir Munk } 50293f4ece9SOphir Munk } 50393f4ece9SOphir Munk if (config->mprq.enabled) { 50493f4ece9SOphir Munk DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 50593f4ece9SOphir Munk config->mprq.enabled = 0; 50693f4ece9SOphir Munk } 50793f4ece9SOphir Munk if (config->max_dump_files_num == 0) 50893f4ece9SOphir Munk config->max_dump_files_num = 128; 50993f4ece9SOphir Munk eth_dev = rte_eth_dev_allocate(name); 51093f4ece9SOphir Munk if (eth_dev == NULL) { 51193f4ece9SOphir Munk DRV_LOG(ERR, "can not allocate rte ethdev"); 51293f4ece9SOphir Munk err = ENOMEM; 51393f4ece9SOphir Munk goto error; 51493f4ece9SOphir Munk } 51593f4ece9SOphir Munk if (priv->representor) { 51693f4ece9SOphir Munk eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 51793f4ece9SOphir Munk eth_dev->data->representor_id = priv->representor_id; 518ff4e52efSViacheslav Galaktionov MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) { 519ff4e52efSViacheslav Galaktionov struct mlx5_priv *opriv = 520ff4e52efSViacheslav Galaktionov rte_eth_devices[port_id].data->dev_private; 521ff4e52efSViacheslav Galaktionov if (opriv && 522ff4e52efSViacheslav Galaktionov opriv->master && 523ff4e52efSViacheslav Galaktionov opriv->domain_id == priv->domain_id && 524ff4e52efSViacheslav Galaktionov opriv->sh == priv->sh) { 525ff4e52efSViacheslav Galaktionov eth_dev->data->backer_port_id = port_id; 526ff4e52efSViacheslav Galaktionov break; 527ff4e52efSViacheslav Galaktionov } 528ff4e52efSViacheslav Galaktionov } 529ff4e52efSViacheslav Galaktionov if (port_id >= RTE_MAX_ETHPORTS) 530ff4e52efSViacheslav Galaktionov eth_dev->data->backer_port_id = eth_dev->data->port_id; 53193f4ece9SOphir Munk } 53293f4ece9SOphir Munk /* 53393f4ece9SOphir Munk * Store associated network device interface index. This index 53493f4ece9SOphir Munk * is permanent throughout the lifetime of device. So, we may store 53593f4ece9SOphir Munk * the ifindex here and use the cached value further. 53693f4ece9SOphir Munk */ 53793f4ece9SOphir Munk MLX5_ASSERT(spawn->ifindex); 53893f4ece9SOphir Munk priv->if_index = spawn->ifindex; 53993f4ece9SOphir Munk eth_dev->data->dev_private = priv; 54093f4ece9SOphir Munk priv->dev_data = eth_dev->data; 54193f4ece9SOphir Munk eth_dev->data->mac_addrs = priv->mac; 54293f4ece9SOphir Munk eth_dev->device = dpdk_dev; 54393f4ece9SOphir Munk eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 54493f4ece9SOphir Munk /* Configure the first MAC address by default. */ 54593f4ece9SOphir Munk if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 54693f4ece9SOphir Munk DRV_LOG(ERR, 54793f4ece9SOphir Munk "port %u cannot get MAC address, is mlx5_en" 54893f4ece9SOphir Munk " loaded? (errno: %s).", 54993f4ece9SOphir Munk eth_dev->data->port_id, strerror(rte_errno)); 55093f4ece9SOphir Munk err = ENODEV; 55193f4ece9SOphir Munk goto error; 55293f4ece9SOphir Munk } 55393f4ece9SOphir Munk DRV_LOG(INFO, 554c2c4f87bSAman Deep Singh "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT, 555a7db3afcSAman Deep Singh eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac)); 55693f4ece9SOphir Munk #ifdef RTE_LIBRTE_MLX5_DEBUG 55793f4ece9SOphir Munk { 55828743807STal Shnaiderman char ifname[MLX5_NAMESIZE]; 55993f4ece9SOphir Munk 56093f4ece9SOphir Munk if (mlx5_get_ifname(eth_dev, &ifname) == 0) 56193f4ece9SOphir Munk DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 56293f4ece9SOphir Munk eth_dev->data->port_id, ifname); 56393f4ece9SOphir Munk else 56493f4ece9SOphir Munk DRV_LOG(DEBUG, "port %u ifname is unknown.", 56593f4ece9SOphir Munk eth_dev->data->port_id); 56693f4ece9SOphir Munk } 56793f4ece9SOphir Munk #endif 56893f4ece9SOphir Munk /* Get actual MTU if possible. */ 56993f4ece9SOphir Munk err = mlx5_get_mtu(eth_dev, &priv->mtu); 57093f4ece9SOphir Munk if (err) { 57193f4ece9SOphir Munk err = rte_errno; 57293f4ece9SOphir Munk goto error; 57393f4ece9SOphir Munk } 57493f4ece9SOphir Munk DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id, 57593f4ece9SOphir Munk priv->mtu); 57693f4ece9SOphir Munk /* Initialize burst functions to prevent crashes before link-up. */ 577*a41f593fSFerruh Yigit eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 578*a41f593fSFerruh Yigit eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 579b012b4ceSOphir Munk eth_dev->dev_ops = &mlx5_dev_ops; 58093f4ece9SOphir Munk eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 58193f4ece9SOphir Munk eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 58293f4ece9SOphir Munk eth_dev->rx_queue_count = mlx5_rx_queue_count; 58393f4ece9SOphir Munk /* Register MAC address. */ 58493f4ece9SOphir Munk claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 58593f4ece9SOphir Munk priv->ctrl_flows = 0; 58693f4ece9SOphir Munk TAILQ_INIT(&priv->flow_meters); 587a295c69aSShun Hao priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); 588a295c69aSShun Hao if (!priv->mtr_profile_tbl) 589a295c69aSShun Hao goto error; 59093f4ece9SOphir Munk /* Bring Ethernet device up. */ 59193f4ece9SOphir Munk DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.", 59293f4ece9SOphir Munk eth_dev->data->port_id); 59393f4ece9SOphir Munk /* nl calls are unsupported - set to -1 not to fail on release */ 59493f4ece9SOphir Munk priv->nl_socket_rdma = -1; 59593f4ece9SOphir Munk priv->nl_socket_route = -1; 59693f4ece9SOphir Munk mlx5_set_link_up(eth_dev); 59793f4ece9SOphir Munk /* 59893f4ece9SOphir Munk * Even though the interrupt handler is not installed yet, 59993f4ece9SOphir Munk * interrupts will still trigger on the async_fd from 60093f4ece9SOphir Munk * Verbs context returned by ibv_open_device(). 60193f4ece9SOphir Munk */ 60293f4ece9SOphir Munk mlx5_link_update(eth_dev, 0); 60393f4ece9SOphir Munk config->dv_esw_en = 0; 60493f4ece9SOphir Munk /* Detect minimal data bytes to inline. */ 60593f4ece9SOphir Munk mlx5_set_min_inline(spawn, config); 60693f4ece9SOphir Munk /* Store device configuration on private structure. */ 60793f4ece9SOphir Munk priv->config = *config; 60889a4bcb1SSuanming Mou for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { 60989a4bcb1SSuanming Mou icfg[i].release_mem_en = !!config->reclaim_mode; 61089a4bcb1SSuanming Mou if (config->reclaim_mode) 61189a4bcb1SSuanming Mou icfg[i].per_core_cache = 0; 61289a4bcb1SSuanming Mou priv->flows[i] = mlx5_ipool_create(&icfg[i]); 61389a4bcb1SSuanming Mou if (!priv->flows[i]) 61489a4bcb1SSuanming Mou goto error; 61589a4bcb1SSuanming Mou } 61693f4ece9SOphir Munk /* Create context for virtual machine VLAN workaround. */ 61793f4ece9SOphir Munk priv->vmwa_context = NULL; 61893f4ece9SOphir Munk if (config->dv_flow_en) { 61993f4ece9SOphir Munk err = mlx5_alloc_shared_dr(priv); 62093f4ece9SOphir Munk if (err) 62193f4ece9SOphir Munk goto error; 62293f4ece9SOphir Munk } 62393f4ece9SOphir Munk /* No supported flow priority number detection. */ 6243c4338a4SJiawei Wang priv->sh->flow_max_priority = -1; 62593f4ece9SOphir Munk if (!priv->config.dv_esw_en && 62693f4ece9SOphir Munk priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 62793f4ece9SOphir Munk DRV_LOG(WARNING, "metadata mode %u is not supported " 62893f4ece9SOphir Munk "(no E-Switch)", priv->config.dv_xmeta_en); 62993f4ece9SOphir Munk priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; 63093f4ece9SOphir Munk } 63193f4ece9SOphir Munk mlx5_set_metadata_mask(eth_dev); 63293f4ece9SOphir Munk if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 63393f4ece9SOphir Munk !priv->sh->dv_regc0_mask) { 63493f4ece9SOphir Munk DRV_LOG(ERR, "metadata mode %u is not supported " 63593f4ece9SOphir Munk "(no metadata reg_c[0] is available).", 63693f4ece9SOphir Munk priv->config.dv_xmeta_en); 63793f4ece9SOphir Munk err = ENOTSUP; 63893f4ece9SOphir Munk goto error; 63993f4ece9SOphir Munk } 640d03b7860SSuanming Mou priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true, 641e78e5408SMatan Azrad mlx5_hrxq_create_cb, mlx5_hrxq_match_cb, 642491b7137SMatan Azrad mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb, 643491b7137SMatan Azrad mlx5_hrxq_clone_free_cb); 64493f4ece9SOphir Munk /* Query availability of metadata reg_c's. */ 6453c4338a4SJiawei Wang if (!priv->sh->metadata_regc_check_flag) { 64693f4ece9SOphir Munk err = mlx5_flow_discover_mreg_c(eth_dev); 64793f4ece9SOphir Munk if (err < 0) { 64893f4ece9SOphir Munk err = -err; 64993f4ece9SOphir Munk goto error; 65093f4ece9SOphir Munk } 6513c4338a4SJiawei Wang } 65293f4ece9SOphir Munk if (!mlx5_flow_ext_mreg_supported(eth_dev)) { 65393f4ece9SOphir Munk DRV_LOG(DEBUG, 65493f4ece9SOphir Munk "port %u extensive metadata register is not supported.", 65593f4ece9SOphir Munk eth_dev->data->port_id); 65693f4ece9SOphir Munk if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 65793f4ece9SOphir Munk DRV_LOG(ERR, "metadata mode %u is not supported " 65893f4ece9SOphir Munk "(no metadata registers available).", 65993f4ece9SOphir Munk priv->config.dv_xmeta_en); 66093f4ece9SOphir Munk err = ENOTSUP; 66193f4ece9SOphir Munk goto error; 66293f4ece9SOphir Munk } 66393f4ece9SOphir Munk } 6645bc38358SMichael Baum if (sh->devx && config->dv_flow_en) { 66593f4ece9SOphir Munk priv->obj_ops = devx_obj_ops; 66693f4ece9SOphir Munk } else { 66793f4ece9SOphir Munk DRV_LOG(ERR, "Flow mode %u is not supported " 66893f4ece9SOphir Munk "(Windows flow must be DevX with DV flow enabled).", 66993f4ece9SOphir Munk priv->config.dv_flow_en); 67093f4ece9SOphir Munk err = ENOTSUP; 67193f4ece9SOphir Munk goto error; 67293f4ece9SOphir Munk } 67393f4ece9SOphir Munk mlx5_flow_counter_mode_config(eth_dev); 674e50fe91aSTal Shnaiderman mlx5_queue_counter_id_prepare(eth_dev); 67593f4ece9SOphir Munk return eth_dev; 67693f4ece9SOphir Munk error: 67793f4ece9SOphir Munk if (priv) { 678a295c69aSShun Hao if (priv->mtr_profile_tbl) 679a295c69aSShun Hao mlx5_l3t_destroy(priv->mtr_profile_tbl); 68093f4ece9SOphir Munk if (own_domain_id) 68193f4ece9SOphir Munk claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 68293f4ece9SOphir Munk mlx5_free(priv); 68393f4ece9SOphir Munk if (eth_dev != NULL) 68493f4ece9SOphir Munk eth_dev->data->dev_private = NULL; 68593f4ece9SOphir Munk } 68693f4ece9SOphir Munk if (eth_dev != NULL) { 68793f4ece9SOphir Munk /* mac_addrs must not be freed alone because part of 68893f4ece9SOphir Munk * dev_private 68993f4ece9SOphir Munk **/ 69093f4ece9SOphir Munk eth_dev->data->mac_addrs = NULL; 69193f4ece9SOphir Munk rte_eth_dev_release_port(eth_dev); 69293f4ece9SOphir Munk } 69393f4ece9SOphir Munk if (sh) 69493f4ece9SOphir Munk mlx5_free_shared_dev_ctx(sh); 69593f4ece9SOphir Munk MLX5_ASSERT(err > 0); 69693f4ece9SOphir Munk rte_errno = err; 697980826dcSTal Shnaiderman return NULL; 698980826dcSTal Shnaiderman } 699980826dcSTal Shnaiderman 700980826dcSTal Shnaiderman /** 70159f10207SOphir Munk * This function should share events between multiple ports of single IB 70259f10207SOphir Munk * device. Currently it has no support under Windows. 70359f10207SOphir Munk * 70459f10207SOphir Munk * @param sh 70559f10207SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 70659f10207SOphir Munk */ 70759f10207SOphir Munk void 70859f10207SOphir Munk mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) 70959f10207SOphir Munk { 71059f10207SOphir Munk (void)sh; 71159f10207SOphir Munk DRV_LOG(WARNING, "%s: is not supported", __func__); 71259f10207SOphir Munk } 71359f10207SOphir Munk 71459f10207SOphir Munk /** 71559f10207SOphir Munk * This function should share events between multiple ports of single IB 71659f10207SOphir Munk * device. Currently it has no support under Windows. 71759f10207SOphir Munk * 71859f10207SOphir Munk * @param dev 71959f10207SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 72059f10207SOphir Munk */ 72159f10207SOphir Munk void 72259f10207SOphir Munk mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) 72359f10207SOphir Munk { 72459f10207SOphir Munk (void)sh; 72559f10207SOphir Munk DRV_LOG(WARNING, "%s: is not supported", __func__); 72659f10207SOphir Munk } 72759f10207SOphir Munk 72859f10207SOphir Munk /** 72959f10207SOphir Munk * Read statistics by a named counter. 73059f10207SOphir Munk * 73159f10207SOphir Munk * @param[in] priv 73259f10207SOphir Munk * Pointer to the private device data structure. 73359f10207SOphir Munk * @param[in] ctr_name 73459f10207SOphir Munk * Pointer to the name of the statistic counter to read 73559f10207SOphir Munk * @param[out] stat 73659f10207SOphir Munk * Pointer to read statistic value. 73759f10207SOphir Munk * @return 738e50fe91aSTal Shnaiderman * 0 on success and stat is valid, non-zero if failed to read the value 739e50fe91aSTal Shnaiderman * or counter is not supported. 74059f10207SOphir Munk * rte_errno is set. 74159f10207SOphir Munk * 74259f10207SOphir Munk */ 74359f10207SOphir Munk int 74459f10207SOphir Munk mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, 74559f10207SOphir Munk uint64_t *stat) 74659f10207SOphir Munk { 747e50fe91aSTal Shnaiderman if (priv->q_counters != NULL && strcmp(ctr_name, "out_of_buffer") == 0) 748e50fe91aSTal Shnaiderman return mlx5_devx_cmd_queue_counter_query 749e50fe91aSTal Shnaiderman (priv->q_counters, 0, (uint32_t *)stat); 750e50fe91aSTal Shnaiderman DRV_LOG(WARNING, "%s: is not supported for the %s counter", 751e50fe91aSTal Shnaiderman __func__, ctr_name); 75259f10207SOphir Munk return -ENOTSUP; 75359f10207SOphir Munk } 75459f10207SOphir Munk 75559f10207SOphir Munk /** 75659f10207SOphir Munk * Flush device MAC addresses 75759f10207SOphir Munk * Currently it has no support under Windows. 75859f10207SOphir Munk * 75959f10207SOphir Munk * @param dev 76059f10207SOphir Munk * Pointer to Ethernet device structure. 76159f10207SOphir Munk * 76259f10207SOphir Munk */ 76359f10207SOphir Munk void 76459f10207SOphir Munk mlx5_os_mac_addr_flush(struct rte_eth_dev *dev) 76559f10207SOphir Munk { 76659f10207SOphir Munk (void)dev; 76759f10207SOphir Munk DRV_LOG(WARNING, "%s: is not supported", __func__); 76859f10207SOphir Munk } 76959f10207SOphir Munk 77059f10207SOphir Munk /** 77159f10207SOphir Munk * Remove a MAC address from device 77259f10207SOphir Munk * Currently it has no support under Windows. 77359f10207SOphir Munk * 77459f10207SOphir Munk * @param dev 77559f10207SOphir Munk * Pointer to Ethernet device structure. 77659f10207SOphir Munk * @param index 77759f10207SOphir Munk * MAC address index. 77859f10207SOphir Munk */ 77959f10207SOphir Munk void 78059f10207SOphir Munk mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 78159f10207SOphir Munk { 78259f10207SOphir Munk (void)dev; 78359f10207SOphir Munk (void)(index); 78459f10207SOphir Munk DRV_LOG(WARNING, "%s: is not supported", __func__); 78559f10207SOphir Munk } 78659f10207SOphir Munk 78759f10207SOphir Munk /** 788d36bb662STal Shnaiderman * Adds a MAC address to the device 789d36bb662STal Shnaiderman * Currently it has no support under Windows. 790d36bb662STal Shnaiderman * 791d36bb662STal Shnaiderman * @param dev 792d36bb662STal Shnaiderman * Pointer to Ethernet device structure. 793d36bb662STal Shnaiderman * @param mac_addr 794d36bb662STal Shnaiderman * MAC address to register. 795d36bb662STal Shnaiderman * @param index 796d36bb662STal Shnaiderman * MAC address index. 797d36bb662STal Shnaiderman * 798d36bb662STal Shnaiderman * @return 799d36bb662STal Shnaiderman * 0 on success, a negative errno value otherwise 800d36bb662STal Shnaiderman */ 801d36bb662STal Shnaiderman int 802d36bb662STal Shnaiderman mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 803d36bb662STal Shnaiderman uint32_t index) 804d36bb662STal Shnaiderman { 805d36bb662STal Shnaiderman (void)index; 806d36bb662STal Shnaiderman struct rte_ether_addr lmac; 807d36bb662STal Shnaiderman 808d36bb662STal Shnaiderman if (mlx5_get_mac(dev, &lmac.addr_bytes)) { 809d36bb662STal Shnaiderman DRV_LOG(ERR, 810d36bb662STal Shnaiderman "port %u cannot get MAC address, is mlx5_en" 811d36bb662STal Shnaiderman " loaded? (errno: %s)", 812d36bb662STal Shnaiderman dev->data->port_id, strerror(rte_errno)); 813d36bb662STal Shnaiderman return rte_errno; 814d36bb662STal Shnaiderman } 815d36bb662STal Shnaiderman if (!rte_is_same_ether_addr(&lmac, mac)) { 816d36bb662STal Shnaiderman DRV_LOG(ERR, 817d36bb662STal Shnaiderman "adding new mac address to device is unsupported"); 818d36bb662STal Shnaiderman return -ENOTSUP; 819d36bb662STal Shnaiderman } 820d36bb662STal Shnaiderman return 0; 821d36bb662STal Shnaiderman } 822d36bb662STal Shnaiderman 823d36bb662STal Shnaiderman /** 82459f10207SOphir Munk * Modify a VF MAC address 82559f10207SOphir Munk * Currently it has no support under Windows. 82659f10207SOphir Munk * 82759f10207SOphir Munk * @param priv 82859f10207SOphir Munk * Pointer to device private data. 82959f10207SOphir Munk * @param mac_addr 83059f10207SOphir Munk * MAC address to modify into. 83159f10207SOphir Munk * @param iface_idx 83259f10207SOphir Munk * Net device interface index 83359f10207SOphir Munk * @param vf_index 83459f10207SOphir Munk * VF index 83559f10207SOphir Munk * 83659f10207SOphir Munk * @return 83759f10207SOphir Munk * 0 on success, a negative errno value otherwise 83859f10207SOphir Munk */ 83959f10207SOphir Munk int 84059f10207SOphir Munk mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, 84159f10207SOphir Munk unsigned int iface_idx, 84259f10207SOphir Munk struct rte_ether_addr *mac_addr, 84359f10207SOphir Munk int vf_index) 84459f10207SOphir Munk { 84559f10207SOphir Munk (void)priv; 84659f10207SOphir Munk (void)iface_idx; 84759f10207SOphir Munk (void)mac_addr; 84859f10207SOphir Munk (void)vf_index; 84959f10207SOphir Munk DRV_LOG(WARNING, "%s: is not supported", __func__); 85059f10207SOphir Munk return -ENOTSUP; 85159f10207SOphir Munk } 85259f10207SOphir Munk 85359f10207SOphir Munk /** 85459f10207SOphir Munk * Set device promiscuous mode 85559f10207SOphir Munk * Currently it has no support under Windows. 85659f10207SOphir Munk * 85759f10207SOphir Munk * @param dev 85859f10207SOphir Munk * Pointer to Ethernet device structure. 85959f10207SOphir Munk * @param enable 86059f10207SOphir Munk * 0 - promiscuous is disabled, otherwise - enabled 86159f10207SOphir Munk * 86259f10207SOphir Munk * @return 86359f10207SOphir Munk * 0 on success, a negative error value otherwise 86459f10207SOphir Munk */ 86559f10207SOphir Munk int 86659f10207SOphir Munk mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable) 86759f10207SOphir Munk { 86859f10207SOphir Munk (void)dev; 86959f10207SOphir Munk (void)enable; 87059f10207SOphir Munk DRV_LOG(WARNING, "%s: is not supported", __func__); 87159f10207SOphir Munk return -ENOTSUP; 87259f10207SOphir Munk } 87359f10207SOphir Munk 87459f10207SOphir Munk /** 87559f10207SOphir Munk * Set device allmulti mode 87659f10207SOphir Munk * 87759f10207SOphir Munk * @param dev 87859f10207SOphir Munk * Pointer to Ethernet device structure. 87959f10207SOphir Munk * @param enable 88059f10207SOphir Munk * 0 - all multicase is disabled, otherwise - enabled 88159f10207SOphir Munk * 88259f10207SOphir Munk * @return 88359f10207SOphir Munk * 0 on success, a negative error value otherwise 88459f10207SOphir Munk */ 88559f10207SOphir Munk int 88659f10207SOphir Munk mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable) 88759f10207SOphir Munk { 88859f10207SOphir Munk (void)dev; 88959f10207SOphir Munk (void)enable; 89059f10207SOphir Munk DRV_LOG(WARNING, "%s: is not supported", __func__); 89159f10207SOphir Munk return -ENOTSUP; 89259f10207SOphir Munk } 89359f10207SOphir Munk 89438e8684aSOphir Munk /** 895980826dcSTal Shnaiderman * DPDK callback to register a PCI device. 896980826dcSTal Shnaiderman * 897919488fbSXueming Li * This function spawns Ethernet devices out of a given device. 898980826dcSTal Shnaiderman * 899919488fbSXueming Li * @param[in] dev 9007af08c8fSMichael Baum * Pointer to the common device. 901980826dcSTal Shnaiderman * 902980826dcSTal Shnaiderman * @return 903980826dcSTal Shnaiderman * 0 on success, a negative errno value otherwise and rte_errno is set. 904980826dcSTal Shnaiderman */ 905980826dcSTal Shnaiderman int 9067af08c8fSMichael Baum mlx5_os_net_probe(struct mlx5_common_device *cdev) 907980826dcSTal Shnaiderman { 9087af08c8fSMichael Baum struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev); 9095021ce20SMichael Baum struct mlx5_dev_spawn_data spawn = { 9105021ce20SMichael Baum .pf_bond = -1, 9115021ce20SMichael Baum .max_port = 1, 9125021ce20SMichael Baum .phys_port = 1, 913ca1418ceSMichael Baum .phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx), 9145021ce20SMichael Baum .pci_dev = pci_dev, 9155021ce20SMichael Baum .cdev = cdev, 9165021ce20SMichael Baum .ifindex = -1, /* Spawn will assign */ 9175021ce20SMichael Baum .info = (struct mlx5_switch_info){ 9185021ce20SMichael Baum .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK, 9195021ce20SMichael Baum }, 9205021ce20SMichael Baum }; 9215021ce20SMichael Baum struct mlx5_dev_config dev_config = { 9225021ce20SMichael Baum .rx_vec_en = 1, 9235021ce20SMichael Baum .txq_inline_max = MLX5_ARG_UNSET, 9245021ce20SMichael Baum .txq_inline_min = MLX5_ARG_UNSET, 9255021ce20SMichael Baum .txq_inline_mpw = MLX5_ARG_UNSET, 9265021ce20SMichael Baum .txqs_inline = MLX5_ARG_UNSET, 9275021ce20SMichael Baum .mprq = { 9285021ce20SMichael Baum .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, 9295021ce20SMichael Baum .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, 9305021ce20SMichael Baum }, 9315021ce20SMichael Baum .dv_flow_en = 1, 9325021ce20SMichael Baum .log_hp_size = MLX5_ARG_UNSET, 9335021ce20SMichael Baum }; 9345021ce20SMichael Baum int ret; 935980826dcSTal Shnaiderman uint32_t restore; 936980826dcSTal Shnaiderman 937980826dcSTal Shnaiderman if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 938980826dcSTal Shnaiderman DRV_LOG(ERR, "Secondary process is not supported on Windows."); 939980826dcSTal Shnaiderman return -ENOTSUP; 940980826dcSTal Shnaiderman } 941980826dcSTal Shnaiderman ret = mlx5_init_once(); 942980826dcSTal Shnaiderman if (ret) { 943980826dcSTal Shnaiderman DRV_LOG(ERR, "unable to init PMD global data: %s", 944980826dcSTal Shnaiderman strerror(rte_errno)); 945980826dcSTal Shnaiderman return -rte_errno; 946980826dcSTal Shnaiderman } 947980826dcSTal Shnaiderman /* Device specific configuration. */ 948980826dcSTal Shnaiderman switch (pci_dev->id.device_id) { 949980826dcSTal Shnaiderman case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 950980826dcSTal Shnaiderman case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 951980826dcSTal Shnaiderman case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 952980826dcSTal Shnaiderman case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 953980826dcSTal Shnaiderman case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 954980826dcSTal Shnaiderman case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 955980826dcSTal Shnaiderman case PCI_DEVICE_ID_MELLANOX_CONNECTXVF: 9565021ce20SMichael Baum dev_config.vf = 1; 957980826dcSTal Shnaiderman break; 958980826dcSTal Shnaiderman default: 9595021ce20SMichael Baum dev_config.vf = 0; 960980826dcSTal Shnaiderman break; 961980826dcSTal Shnaiderman } 9625021ce20SMichael Baum spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config); 963ca1418ceSMichael Baum if (!spawn.eth_dev) 964887183efSMichael Baum return -rte_errno; 9655021ce20SMichael Baum restore = spawn.eth_dev->data->dev_flags; 9665021ce20SMichael Baum rte_eth_copy_pci_info(spawn.eth_dev, pci_dev); 967980826dcSTal Shnaiderman /* Restore non-PCI flags cleared by the above call. */ 9685021ce20SMichael Baum spawn.eth_dev->data->dev_flags |= restore; 9695021ce20SMichael Baum rte_eth_dev_probing_finish(spawn.eth_dev); 970887183efSMichael Baum return 0; 971980826dcSTal Shnaiderman } 972980826dcSTal Shnaiderman 973ea823b2cSDmitry Kozlyuk /** 974ea823b2cSDmitry Kozlyuk * Cleanup resources when the last device is closed. 975ea823b2cSDmitry Kozlyuk */ 976ea823b2cSDmitry Kozlyuk void 977ea823b2cSDmitry Kozlyuk mlx5_os_net_cleanup(void) 978ea823b2cSDmitry Kozlyuk { 979ea823b2cSDmitry Kozlyuk } 980ea823b2cSDmitry Kozlyuk 98159f10207SOphir Munk const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0}; 982