1f44b09f9SOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 2f44b09f9SOphir Munk * Copyright 2015 6WIND S.A. 3f44b09f9SOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 4f44b09f9SOphir Munk */ 5f44b09f9SOphir Munk 6f44b09f9SOphir Munk #include <stddef.h> 7f44b09f9SOphir Munk #include <unistd.h> 8f44b09f9SOphir Munk #include <string.h> 9f44b09f9SOphir Munk #include <stdint.h> 10f44b09f9SOphir Munk #include <stdlib.h> 11f44b09f9SOphir Munk #include <errno.h> 12f44b09f9SOphir Munk #include <net/if.h> 13f44b09f9SOphir Munk #include <linux/rtnetlink.h> 1473bf9235SOphir Munk #include <linux/sockios.h> 1573bf9235SOphir Munk #include <linux/ethtool.h> 16f44b09f9SOphir Munk #include <fcntl.h> 17f44b09f9SOphir Munk 18f44b09f9SOphir Munk #include <rte_malloc.h> 19f44b09f9SOphir Munk #include <rte_ethdev_driver.h> 20f44b09f9SOphir Munk #include <rte_ethdev_pci.h> 21f44b09f9SOphir Munk #include <rte_pci.h> 22f44b09f9SOphir Munk #include <rte_bus_pci.h> 23f44b09f9SOphir Munk #include <rte_common.h> 24f44b09f9SOphir Munk #include <rte_kvargs.h> 25f44b09f9SOphir Munk #include <rte_rwlock.h> 26f44b09f9SOphir Munk #include <rte_spinlock.h> 27f44b09f9SOphir Munk #include <rte_string_fns.h> 28f44b09f9SOphir Munk #include <rte_alarm.h> 292aba9fc7SOphir Munk #include <rte_eal_paging.h> 30f44b09f9SOphir Munk 31f44b09f9SOphir Munk #include <mlx5_glue.h> 32f44b09f9SOphir Munk #include <mlx5_devx_cmds.h> 33f44b09f9SOphir Munk #include <mlx5_common.h> 342eb4d010SOphir Munk #include <mlx5_common_mp.h> 35d5ed8aa9SOphir Munk #include <mlx5_common_mr.h> 365522da6bSSuanming Mou #include <mlx5_malloc.h> 37f44b09f9SOphir Munk 38f44b09f9SOphir Munk #include "mlx5_defs.h" 39f44b09f9SOphir Munk #include "mlx5.h" 40391b8bccSOphir Munk #include "mlx5_common_os.h" 41f44b09f9SOphir Munk #include "mlx5_utils.h" 42f44b09f9SOphir Munk #include "mlx5_rxtx.h" 43f44b09f9SOphir Munk #include "mlx5_autoconf.h" 44f44b09f9SOphir Munk #include "mlx5_mr.h" 45f44b09f9SOphir Munk #include "mlx5_flow.h" 46f44b09f9SOphir Munk #include "rte_pmd_mlx5.h" 474f96d913SOphir Munk #include "mlx5_verbs.h" 48f00f6562SOphir Munk #include "mlx5_nl.h" 496deb19e1SMichael Baum #include "mlx5_devx.h" 50f44b09f9SOphir Munk 512eb4d010SOphir Munk #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192 522eb4d010SOphir Munk 532eb4d010SOphir Munk #ifndef HAVE_IBV_MLX5_MOD_MPW 542eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 552eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 562eb4d010SOphir Munk #endif 572eb4d010SOphir Munk 582eb4d010SOphir Munk #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 592eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 602eb4d010SOphir Munk #endif 612eb4d010SOphir Munk 622e86c4e5SOphir Munk static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 632e86c4e5SOphir Munk 642e86c4e5SOphir Munk /* Spinlock for mlx5_shared_data allocation. */ 652e86c4e5SOphir Munk static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 662e86c4e5SOphir Munk 672e86c4e5SOphir Munk /* Process local data for secondary processes. */ 682e86c4e5SOphir Munk static struct mlx5_local_data mlx5_local_data; 692e86c4e5SOphir Munk 70f44b09f9SOphir Munk /** 7108d1838fSDekel Peled * Set the completion channel file descriptor interrupt as non-blocking. 7208d1838fSDekel Peled * 7308d1838fSDekel Peled * @param[in] rxq_obj 7408d1838fSDekel Peled * Pointer to RQ channel object, which includes the channel fd 7508d1838fSDekel Peled * 7608d1838fSDekel Peled * @param[out] fd 7708d1838fSDekel Peled * The file descriptor (representing the intetrrupt) used in this channel. 7808d1838fSDekel Peled * 7908d1838fSDekel Peled * @return 8008d1838fSDekel Peled * 0 on successfully setting the fd to non-blocking, non-zero otherwise. 8108d1838fSDekel Peled */ 8208d1838fSDekel Peled int 8308d1838fSDekel Peled mlx5_os_set_nonblock_channel_fd(int fd) 8408d1838fSDekel Peled { 8508d1838fSDekel Peled int flags; 8608d1838fSDekel Peled 8708d1838fSDekel Peled flags = fcntl(fd, F_GETFL); 8808d1838fSDekel Peled return fcntl(fd, F_SETFL, flags | O_NONBLOCK); 8908d1838fSDekel Peled } 9008d1838fSDekel Peled 9108d1838fSDekel Peled /** 92e85f623eSOphir Munk * Get mlx5 device attributes. The glue function query_device_ex() is called 93e85f623eSOphir Munk * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5 94e85f623eSOphir Munk * device attributes from the glue out parameter. 95e85f623eSOphir Munk * 96e85f623eSOphir Munk * @param dev 97e85f623eSOphir Munk * Pointer to ibv context. 98e85f623eSOphir Munk * 99e85f623eSOphir Munk * @param device_attr 100e85f623eSOphir Munk * Pointer to mlx5 device attributes. 101e85f623eSOphir Munk * 102e85f623eSOphir Munk * @return 103e85f623eSOphir Munk * 0 on success, non zero error number otherwise 104e85f623eSOphir Munk */ 105e85f623eSOphir Munk int 106e85f623eSOphir Munk mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr) 107e85f623eSOphir Munk { 108e85f623eSOphir Munk int err; 109e85f623eSOphir Munk struct ibv_device_attr_ex attr_ex; 110e85f623eSOphir Munk memset(device_attr, 0, sizeof(*device_attr)); 111e85f623eSOphir Munk err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex); 112e85f623eSOphir Munk if (err) 113e85f623eSOphir Munk return err; 114e85f623eSOphir Munk 115e85f623eSOphir Munk device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex; 116e85f623eSOphir Munk device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr; 117e85f623eSOphir Munk device_attr->max_sge = attr_ex.orig_attr.max_sge; 118e85f623eSOphir Munk device_attr->max_cq = attr_ex.orig_attr.max_cq; 1191f29d15eSOphir Munk device_attr->max_cqe = attr_ex.orig_attr.max_cqe; 1201f29d15eSOphir Munk device_attr->max_mr = attr_ex.orig_attr.max_mr; 1211f29d15eSOphir Munk device_attr->max_pd = attr_ex.orig_attr.max_pd; 122e85f623eSOphir Munk device_attr->max_qp = attr_ex.orig_attr.max_qp; 1231f29d15eSOphir Munk device_attr->max_srq = attr_ex.orig_attr.max_srq; 1241f29d15eSOphir Munk device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr; 125e85f623eSOphir Munk device_attr->raw_packet_caps = attr_ex.raw_packet_caps; 126e85f623eSOphir Munk device_attr->max_rwq_indirection_table_size = 127e85f623eSOphir Munk attr_ex.rss_caps.max_rwq_indirection_table_size; 128e85f623eSOphir Munk device_attr->max_tso = attr_ex.tso_caps.max_tso; 129e85f623eSOphir Munk device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts; 130e85f623eSOphir Munk 131e85f623eSOphir Munk struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 132e85f623eSOphir Munk err = mlx5_glue->dv_query_device(ctx, &dv_attr); 133e85f623eSOphir Munk if (err) 134e85f623eSOphir Munk return err; 135e85f623eSOphir Munk 136e85f623eSOphir Munk device_attr->flags = dv_attr.flags; 137e85f623eSOphir Munk device_attr->comp_mask = dv_attr.comp_mask; 138e85f623eSOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 139e85f623eSOphir Munk device_attr->sw_parsing_offloads = 140e85f623eSOphir Munk dv_attr.sw_parsing_caps.sw_parsing_offloads; 141e85f623eSOphir Munk #endif 142e85f623eSOphir Munk device_attr->min_single_stride_log_num_of_bytes = 143e85f623eSOphir Munk dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes; 144e85f623eSOphir Munk device_attr->max_single_stride_log_num_of_bytes = 145e85f623eSOphir Munk dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes; 146e85f623eSOphir Munk device_attr->min_single_wqe_log_num_of_strides = 147e85f623eSOphir Munk dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides; 148e85f623eSOphir Munk device_attr->max_single_wqe_log_num_of_strides = 149e85f623eSOphir Munk dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides; 150e85f623eSOphir Munk device_attr->stride_supported_qpts = 151e85f623eSOphir Munk dv_attr.striding_rq_caps.supported_qpts; 152e85f623eSOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 153e85f623eSOphir Munk device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps; 154e85f623eSOphir Munk #endif 155e85f623eSOphir Munk 156e85f623eSOphir Munk return err; 157e85f623eSOphir Munk } 1582eb4d010SOphir Munk 1592eb4d010SOphir Munk /** 1602eb4d010SOphir Munk * Verbs callback to allocate a memory. This function should allocate the space 1612eb4d010SOphir Munk * according to the size provided residing inside a huge page. 1622eb4d010SOphir Munk * Please note that all allocation must respect the alignment from libmlx5 1632aba9fc7SOphir Munk * (i.e. currently rte_mem_page_size()). 1642eb4d010SOphir Munk * 1652eb4d010SOphir Munk * @param[in] size 1662eb4d010SOphir Munk * The size in bytes of the memory to allocate. 1672eb4d010SOphir Munk * @param[in] data 1682eb4d010SOphir Munk * A pointer to the callback data. 1692eb4d010SOphir Munk * 1702eb4d010SOphir Munk * @return 1712eb4d010SOphir Munk * Allocated buffer, NULL otherwise and rte_errno is set. 1722eb4d010SOphir Munk */ 1732eb4d010SOphir Munk static void * 1742eb4d010SOphir Munk mlx5_alloc_verbs_buf(size_t size, void *data) 1752eb4d010SOphir Munk { 17681c3b977SViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = data; 1772eb4d010SOphir Munk void *ret; 1782aba9fc7SOphir Munk size_t alignment = rte_mem_page_size(); 1792aba9fc7SOphir Munk if (alignment == (size_t)-1) { 1802aba9fc7SOphir Munk DRV_LOG(ERR, "Failed to get mem page size"); 1812aba9fc7SOphir Munk rte_errno = ENOMEM; 1822aba9fc7SOphir Munk return NULL; 1832aba9fc7SOphir Munk } 1842eb4d010SOphir Munk 1852eb4d010SOphir Munk MLX5_ASSERT(data != NULL); 18681c3b977SViacheslav Ovsiienko ret = mlx5_malloc(0, size, alignment, sh->numa_node); 1872eb4d010SOphir Munk if (!ret && size) 1882eb4d010SOphir Munk rte_errno = ENOMEM; 1892eb4d010SOphir Munk return ret; 1902eb4d010SOphir Munk } 1912eb4d010SOphir Munk 1922eb4d010SOphir Munk /** 1932eb4d010SOphir Munk * Verbs callback to free a memory. 1942eb4d010SOphir Munk * 1952eb4d010SOphir Munk * @param[in] ptr 1962eb4d010SOphir Munk * A pointer to the memory to free. 1972eb4d010SOphir Munk * @param[in] data 1982eb4d010SOphir Munk * A pointer to the callback data. 1992eb4d010SOphir Munk */ 2002eb4d010SOphir Munk static void 2012eb4d010SOphir Munk mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 2022eb4d010SOphir Munk { 2032eb4d010SOphir Munk MLX5_ASSERT(data != NULL); 2042175c4dcSSuanming Mou mlx5_free(ptr); 2052eb4d010SOphir Munk } 2062eb4d010SOphir Munk 2072eb4d010SOphir Munk /** 2082eb4d010SOphir Munk * Initialize DR related data within private structure. 2092eb4d010SOphir Munk * Routine checks the reference counter and does actual 2102eb4d010SOphir Munk * resources creation/initialization only if counter is zero. 2112eb4d010SOphir Munk * 2122eb4d010SOphir Munk * @param[in] priv 2132eb4d010SOphir Munk * Pointer to the private device data structure. 2142eb4d010SOphir Munk * 2152eb4d010SOphir Munk * @return 2162eb4d010SOphir Munk * Zero on success, positive error code otherwise. 2172eb4d010SOphir Munk */ 2182eb4d010SOphir Munk static int 2192eb4d010SOphir Munk mlx5_alloc_shared_dr(struct mlx5_priv *priv) 2202eb4d010SOphir Munk { 2212eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 222291140c6SSuanming Mou char s[MLX5_HLIST_NAMESIZE] __rte_unused; 22316dbba25SXueming Li int err; 2242eb4d010SOphir Munk 22516dbba25SXueming Li MLX5_ASSERT(sh && sh->refcnt); 22616dbba25SXueming Li if (sh->refcnt > 1) 22716dbba25SXueming Li return 0; 2282eb4d010SOphir Munk err = mlx5_alloc_table_hash_list(priv); 2292eb4d010SOphir Munk if (err) 230291140c6SSuanming Mou goto error; 231291140c6SSuanming Mou /* The resources below are only valid with DV support. */ 232291140c6SSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 2330fd5f82aSXueming Li /* Init port id action cache list. */ 2340fd5f82aSXueming Li snprintf(s, sizeof(s), "%s_port_id_action_cache", sh->ibdev_name); 2350fd5f82aSXueming Li mlx5_cache_list_init(&sh->port_id_action_list, s, 0, sh, 2360fd5f82aSXueming Li flow_dv_port_id_create_cb, 2370fd5f82aSXueming Li flow_dv_port_id_match_cb, 2380fd5f82aSXueming Li flow_dv_port_id_remove_cb); 2393422af2aSXueming Li /* Init push vlan action cache list. */ 2403422af2aSXueming Li snprintf(s, sizeof(s), "%s_push_vlan_action_cache", sh->ibdev_name); 2413422af2aSXueming Li mlx5_cache_list_init(&sh->push_vlan_action_list, s, 0, sh, 2423422af2aSXueming Li flow_dv_push_vlan_create_cb, 2433422af2aSXueming Li flow_dv_push_vlan_match_cb, 2443422af2aSXueming Li flow_dv_push_vlan_remove_cb); 24519784141SSuanming Mou /* Init sample action cache list. */ 24619784141SSuanming Mou snprintf(s, sizeof(s), "%s_sample_action_cache", sh->ibdev_name); 24701c05ee0SSuanming Mou mlx5_cache_list_init(&sh->sample_action_list, s, 0, sh, 24819784141SSuanming Mou flow_dv_sample_create_cb, 24919784141SSuanming Mou flow_dv_sample_match_cb, 25019784141SSuanming Mou flow_dv_sample_remove_cb); 25119784141SSuanming Mou /* Init dest array action cache list. */ 25219784141SSuanming Mou snprintf(s, sizeof(s), "%s_dest_array_cache", sh->ibdev_name); 25301c05ee0SSuanming Mou mlx5_cache_list_init(&sh->dest_array_list, s, 0, sh, 25419784141SSuanming Mou flow_dv_dest_array_create_cb, 25519784141SSuanming Mou flow_dv_dest_array_match_cb, 25619784141SSuanming Mou flow_dv_dest_array_remove_cb); 2572eb4d010SOphir Munk /* Create tags hash list table. */ 2582eb4d010SOphir Munk snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); 259e69a5922SXueming Li sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0, 260fe3f8c52SXueming Li MLX5_HLIST_WRITE_MOST, 261f5b0aed2SSuanming Mou flow_dv_tag_create_cb, 262f5b0aed2SSuanming Mou flow_dv_tag_match_cb, 263fe3f8c52SXueming Li flow_dv_tag_remove_cb); 2642eb4d010SOphir Munk if (!sh->tag_table) { 26563783b01SDavid Marchand DRV_LOG(ERR, "tags with hash creation failed."); 2662eb4d010SOphir Munk err = ENOMEM; 2672eb4d010SOphir Munk goto error; 2682eb4d010SOphir Munk } 269fe3f8c52SXueming Li sh->tag_table->ctx = sh; 2703fe88961SSuanming Mou snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name); 271e69a5922SXueming Li sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ, 27216a7dbc4SXueming Li 0, MLX5_HLIST_WRITE_MOST | 27316a7dbc4SXueming Li MLX5_HLIST_DIRECT_KEY, 27416a7dbc4SXueming Li flow_dv_modify_create_cb, 27516a7dbc4SXueming Li flow_dv_modify_match_cb, 27616a7dbc4SXueming Li flow_dv_modify_remove_cb); 2773fe88961SSuanming Mou if (!sh->modify_cmds) { 2783fe88961SSuanming Mou DRV_LOG(ERR, "hdr modify hash creation failed"); 2793fe88961SSuanming Mou err = ENOMEM; 2803fe88961SSuanming Mou goto error; 2813fe88961SSuanming Mou } 28216a7dbc4SXueming Li sh->modify_cmds->ctx = sh; 283bf615b07SSuanming Mou snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name); 284bf615b07SSuanming Mou sh->encaps_decaps = mlx5_hlist_create(s, 285e69a5922SXueming Li MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ, 286f961fd49SSuanming Mou 0, MLX5_HLIST_DIRECT_KEY | 287f961fd49SSuanming Mou MLX5_HLIST_WRITE_MOST, 288f961fd49SSuanming Mou flow_dv_encap_decap_create_cb, 289f961fd49SSuanming Mou flow_dv_encap_decap_match_cb, 290f961fd49SSuanming Mou flow_dv_encap_decap_remove_cb); 291bf615b07SSuanming Mou if (!sh->encaps_decaps) { 292bf615b07SSuanming Mou DRV_LOG(ERR, "encap decap hash creation failed"); 293bf615b07SSuanming Mou err = ENOMEM; 294bf615b07SSuanming Mou goto error; 295bf615b07SSuanming Mou } 296f961fd49SSuanming Mou sh->encaps_decaps->ctx = sh; 297291140c6SSuanming Mou #endif 2982eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR 2992eb4d010SOphir Munk void *domain; 3002eb4d010SOphir Munk 3012eb4d010SOphir Munk /* Reference counter is zero, we should initialize structures. */ 3022eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain(sh->ctx, 3032eb4d010SOphir Munk MLX5DV_DR_DOMAIN_TYPE_NIC_RX); 3042eb4d010SOphir Munk if (!domain) { 3052eb4d010SOphir Munk DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); 3062eb4d010SOphir Munk err = errno; 3072eb4d010SOphir Munk goto error; 3082eb4d010SOphir Munk } 3092eb4d010SOphir Munk sh->rx_domain = domain; 3102eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain(sh->ctx, 3112eb4d010SOphir Munk MLX5DV_DR_DOMAIN_TYPE_NIC_TX); 3122eb4d010SOphir Munk if (!domain) { 3132eb4d010SOphir Munk DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); 3142eb4d010SOphir Munk err = errno; 3152eb4d010SOphir Munk goto error; 3162eb4d010SOphir Munk } 3172eb4d010SOphir Munk sh->tx_domain = domain; 3182eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 3192eb4d010SOphir Munk if (priv->config.dv_esw_en) { 3202eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain 3212eb4d010SOphir Munk (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); 3222eb4d010SOphir Munk if (!domain) { 3232eb4d010SOphir Munk DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); 3242eb4d010SOphir Munk err = errno; 3252eb4d010SOphir Munk goto error; 3262eb4d010SOphir Munk } 3272eb4d010SOphir Munk sh->fdb_domain = domain; 3282eb4d010SOphir Munk sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); 3292eb4d010SOphir Munk } 3302eb4d010SOphir Munk #endif 3314ec6360dSGregory Etelson if (!sh->tunnel_hub) 3324ec6360dSGregory Etelson err = mlx5_alloc_tunnel_hub(sh); 3334ec6360dSGregory Etelson if (err) { 3344ec6360dSGregory Etelson DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err); 3354ec6360dSGregory Etelson goto error; 3364ec6360dSGregory Etelson } 3372eb4d010SOphir Munk if (priv->config.reclaim_mode == MLX5_RCM_AGGR) { 3382eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1); 3392eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1); 3402eb4d010SOphir Munk if (sh->fdb_domain) 3412eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1); 3422eb4d010SOphir Munk } 3432eb4d010SOphir Munk sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); 3442eb4d010SOphir Munk #endif /* HAVE_MLX5DV_DR */ 345b80726dcSSuanming Mou sh->default_miss_action = 346b80726dcSSuanming Mou mlx5_glue->dr_create_flow_action_default_miss(); 347b80726dcSSuanming Mou if (!sh->default_miss_action) 348b80726dcSSuanming Mou DRV_LOG(WARNING, "Default miss action is not supported."); 3492eb4d010SOphir Munk return 0; 3502eb4d010SOphir Munk error: 3512eb4d010SOphir Munk /* Rollback the created objects. */ 3522eb4d010SOphir Munk if (sh->rx_domain) { 3532eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->rx_domain); 3542eb4d010SOphir Munk sh->rx_domain = NULL; 3552eb4d010SOphir Munk } 3562eb4d010SOphir Munk if (sh->tx_domain) { 3572eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->tx_domain); 3582eb4d010SOphir Munk sh->tx_domain = NULL; 3592eb4d010SOphir Munk } 3602eb4d010SOphir Munk if (sh->fdb_domain) { 3612eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->fdb_domain); 3622eb4d010SOphir Munk sh->fdb_domain = NULL; 3632eb4d010SOphir Munk } 3642eb4d010SOphir Munk if (sh->esw_drop_action) { 3652eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->esw_drop_action); 3662eb4d010SOphir Munk sh->esw_drop_action = NULL; 3672eb4d010SOphir Munk } 3682eb4d010SOphir Munk if (sh->pop_vlan_action) { 3692eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 3702eb4d010SOphir Munk sh->pop_vlan_action = NULL; 3712eb4d010SOphir Munk } 372bf615b07SSuanming Mou if (sh->encaps_decaps) { 373e69a5922SXueming Li mlx5_hlist_destroy(sh->encaps_decaps); 374bf615b07SSuanming Mou sh->encaps_decaps = NULL; 375bf615b07SSuanming Mou } 3763fe88961SSuanming Mou if (sh->modify_cmds) { 377e69a5922SXueming Li mlx5_hlist_destroy(sh->modify_cmds); 3783fe88961SSuanming Mou sh->modify_cmds = NULL; 3793fe88961SSuanming Mou } 3802eb4d010SOphir Munk if (sh->tag_table) { 3812eb4d010SOphir Munk /* tags should be destroyed with flow before. */ 382e69a5922SXueming Li mlx5_hlist_destroy(sh->tag_table); 3832eb4d010SOphir Munk sh->tag_table = NULL; 3842eb4d010SOphir Munk } 3854ec6360dSGregory Etelson if (sh->tunnel_hub) { 3864ec6360dSGregory Etelson mlx5_release_tunnel_hub(sh, priv->dev_port); 3874ec6360dSGregory Etelson sh->tunnel_hub = NULL; 3884ec6360dSGregory Etelson } 3892eb4d010SOphir Munk mlx5_free_table_hash_list(priv); 3902eb4d010SOphir Munk return err; 3912eb4d010SOphir Munk } 3922eb4d010SOphir Munk 3932eb4d010SOphir Munk /** 3942eb4d010SOphir Munk * Destroy DR related data within private structure. 3952eb4d010SOphir Munk * 3962eb4d010SOphir Munk * @param[in] priv 3972eb4d010SOphir Munk * Pointer to the private device data structure. 3982eb4d010SOphir Munk */ 3992eb4d010SOphir Munk void 4002eb4d010SOphir Munk mlx5_os_free_shared_dr(struct mlx5_priv *priv) 4012eb4d010SOphir Munk { 40216dbba25SXueming Li struct mlx5_dev_ctx_shared *sh = priv->sh; 4032eb4d010SOphir Munk 40416dbba25SXueming Li MLX5_ASSERT(sh && sh->refcnt); 40516dbba25SXueming Li if (sh->refcnt > 1) 4062eb4d010SOphir Munk return; 4072eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR 4082eb4d010SOphir Munk if (sh->rx_domain) { 4092eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->rx_domain); 4102eb4d010SOphir Munk sh->rx_domain = NULL; 4112eb4d010SOphir Munk } 4122eb4d010SOphir Munk if (sh->tx_domain) { 4132eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->tx_domain); 4142eb4d010SOphir Munk sh->tx_domain = NULL; 4152eb4d010SOphir Munk } 4162eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 4172eb4d010SOphir Munk if (sh->fdb_domain) { 4182eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->fdb_domain); 4192eb4d010SOphir Munk sh->fdb_domain = NULL; 4202eb4d010SOphir Munk } 4212eb4d010SOphir Munk if (sh->esw_drop_action) { 4222eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->esw_drop_action); 4232eb4d010SOphir Munk sh->esw_drop_action = NULL; 4242eb4d010SOphir Munk } 4252eb4d010SOphir Munk #endif 4262eb4d010SOphir Munk if (sh->pop_vlan_action) { 4272eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 4282eb4d010SOphir Munk sh->pop_vlan_action = NULL; 4292eb4d010SOphir Munk } 4302eb4d010SOphir Munk #endif /* HAVE_MLX5DV_DR */ 431b80726dcSSuanming Mou if (sh->default_miss_action) 432b80726dcSSuanming Mou mlx5_glue->destroy_flow_action 433b80726dcSSuanming Mou (sh->default_miss_action); 434bf615b07SSuanming Mou if (sh->encaps_decaps) { 435e69a5922SXueming Li mlx5_hlist_destroy(sh->encaps_decaps); 436bf615b07SSuanming Mou sh->encaps_decaps = NULL; 437bf615b07SSuanming Mou } 4383fe88961SSuanming Mou if (sh->modify_cmds) { 439e69a5922SXueming Li mlx5_hlist_destroy(sh->modify_cmds); 4403fe88961SSuanming Mou sh->modify_cmds = NULL; 4413fe88961SSuanming Mou } 4422eb4d010SOphir Munk if (sh->tag_table) { 4432eb4d010SOphir Munk /* tags should be destroyed with flow before. */ 444e69a5922SXueming Li mlx5_hlist_destroy(sh->tag_table); 4452eb4d010SOphir Munk sh->tag_table = NULL; 4462eb4d010SOphir Munk } 4474ec6360dSGregory Etelson if (sh->tunnel_hub) { 4484ec6360dSGregory Etelson mlx5_release_tunnel_hub(sh, priv->dev_port); 4494ec6360dSGregory Etelson sh->tunnel_hub = NULL; 4504ec6360dSGregory Etelson } 4510fd5f82aSXueming Li mlx5_cache_list_destroy(&sh->port_id_action_list); 4523422af2aSXueming Li mlx5_cache_list_destroy(&sh->push_vlan_action_list); 4532eb4d010SOphir Munk mlx5_free_table_hash_list(priv); 4542eb4d010SOphir Munk } 4552eb4d010SOphir Munk 4562eb4d010SOphir Munk /** 4572e86c4e5SOphir Munk * Initialize shared data between primary and secondary process. 4582e86c4e5SOphir Munk * 4592e86c4e5SOphir Munk * A memzone is reserved by primary process and secondary processes attach to 4602e86c4e5SOphir Munk * the memzone. 4612e86c4e5SOphir Munk * 4622e86c4e5SOphir Munk * @return 4632e86c4e5SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 4642e86c4e5SOphir Munk */ 4652e86c4e5SOphir Munk static int 4662e86c4e5SOphir Munk mlx5_init_shared_data(void) 4672e86c4e5SOphir Munk { 4682e86c4e5SOphir Munk const struct rte_memzone *mz; 4692e86c4e5SOphir Munk int ret = 0; 4702e86c4e5SOphir Munk 4712e86c4e5SOphir Munk rte_spinlock_lock(&mlx5_shared_data_lock); 4722e86c4e5SOphir Munk if (mlx5_shared_data == NULL) { 4732e86c4e5SOphir Munk if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4742e86c4e5SOphir Munk /* Allocate shared memory. */ 4752e86c4e5SOphir Munk mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 4762e86c4e5SOphir Munk sizeof(*mlx5_shared_data), 4772e86c4e5SOphir Munk SOCKET_ID_ANY, 0); 4782e86c4e5SOphir Munk if (mz == NULL) { 4792e86c4e5SOphir Munk DRV_LOG(ERR, 4802e86c4e5SOphir Munk "Cannot allocate mlx5 shared data"); 4812e86c4e5SOphir Munk ret = -rte_errno; 4822e86c4e5SOphir Munk goto error; 4832e86c4e5SOphir Munk } 4842e86c4e5SOphir Munk mlx5_shared_data = mz->addr; 4852e86c4e5SOphir Munk memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 4862e86c4e5SOphir Munk rte_spinlock_init(&mlx5_shared_data->lock); 4872e86c4e5SOphir Munk } else { 4882e86c4e5SOphir Munk /* Lookup allocated shared memory. */ 4892e86c4e5SOphir Munk mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 4902e86c4e5SOphir Munk if (mz == NULL) { 4912e86c4e5SOphir Munk DRV_LOG(ERR, 4922e86c4e5SOphir Munk "Cannot attach mlx5 shared data"); 4932e86c4e5SOphir Munk ret = -rte_errno; 4942e86c4e5SOphir Munk goto error; 4952e86c4e5SOphir Munk } 4962e86c4e5SOphir Munk mlx5_shared_data = mz->addr; 4972e86c4e5SOphir Munk memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 4982e86c4e5SOphir Munk } 4992e86c4e5SOphir Munk } 5002e86c4e5SOphir Munk error: 5012e86c4e5SOphir Munk rte_spinlock_unlock(&mlx5_shared_data_lock); 5022e86c4e5SOphir Munk return ret; 5032e86c4e5SOphir Munk } 5042e86c4e5SOphir Munk 5052e86c4e5SOphir Munk /** 5062e86c4e5SOphir Munk * PMD global initialization. 5072e86c4e5SOphir Munk * 5082e86c4e5SOphir Munk * Independent from individual device, this function initializes global 5092e86c4e5SOphir Munk * per-PMD data structures distinguishing primary and secondary processes. 5102e86c4e5SOphir Munk * Hence, each initialization is called once per a process. 5112e86c4e5SOphir Munk * 5122e86c4e5SOphir Munk * @return 5132e86c4e5SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 5142e86c4e5SOphir Munk */ 5152e86c4e5SOphir Munk static int 5162e86c4e5SOphir Munk mlx5_init_once(void) 5172e86c4e5SOphir Munk { 5182e86c4e5SOphir Munk struct mlx5_shared_data *sd; 5192e86c4e5SOphir Munk struct mlx5_local_data *ld = &mlx5_local_data; 5202e86c4e5SOphir Munk int ret = 0; 5212e86c4e5SOphir Munk 5222e86c4e5SOphir Munk if (mlx5_init_shared_data()) 5232e86c4e5SOphir Munk return -rte_errno; 5242e86c4e5SOphir Munk sd = mlx5_shared_data; 5252e86c4e5SOphir Munk MLX5_ASSERT(sd); 5262e86c4e5SOphir Munk rte_spinlock_lock(&sd->lock); 5272e86c4e5SOphir Munk switch (rte_eal_process_type()) { 5282e86c4e5SOphir Munk case RTE_PROC_PRIMARY: 5292e86c4e5SOphir Munk if (sd->init_done) 5302e86c4e5SOphir Munk break; 5312e86c4e5SOphir Munk LIST_INIT(&sd->mem_event_cb_list); 5322e86c4e5SOphir Munk rte_rwlock_init(&sd->mem_event_rwlock); 5332e86c4e5SOphir Munk rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 5342e86c4e5SOphir Munk mlx5_mr_mem_event_cb, NULL); 5352e86c4e5SOphir Munk ret = mlx5_mp_init_primary(MLX5_MP_NAME, 5362e86c4e5SOphir Munk mlx5_mp_os_primary_handle); 5372e86c4e5SOphir Munk if (ret) 5382e86c4e5SOphir Munk goto out; 5392e86c4e5SOphir Munk sd->init_done = true; 5402e86c4e5SOphir Munk break; 5412e86c4e5SOphir Munk case RTE_PROC_SECONDARY: 5422e86c4e5SOphir Munk if (ld->init_done) 5432e86c4e5SOphir Munk break; 5442e86c4e5SOphir Munk ret = mlx5_mp_init_secondary(MLX5_MP_NAME, 5452e86c4e5SOphir Munk mlx5_mp_os_secondary_handle); 5462e86c4e5SOphir Munk if (ret) 5472e86c4e5SOphir Munk goto out; 5482e86c4e5SOphir Munk ++sd->secondary_cnt; 5492e86c4e5SOphir Munk ld->init_done = true; 5502e86c4e5SOphir Munk break; 5512e86c4e5SOphir Munk default: 5522e86c4e5SOphir Munk break; 5532e86c4e5SOphir Munk } 5542e86c4e5SOphir Munk out: 5552e86c4e5SOphir Munk rte_spinlock_unlock(&sd->lock); 5562e86c4e5SOphir Munk return ret; 5572e86c4e5SOphir Munk } 5582e86c4e5SOphir Munk 5592e86c4e5SOphir Munk /** 56086d259ceSMichael Baum * Create the Tx queue DevX/Verbs object. 56186d259ceSMichael Baum * 56286d259ceSMichael Baum * @param dev 56386d259ceSMichael Baum * Pointer to Ethernet device. 56486d259ceSMichael Baum * @param idx 56586d259ceSMichael Baum * Queue index in DPDK Tx queue array. 56686d259ceSMichael Baum * 56786d259ceSMichael Baum * @return 568f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 56986d259ceSMichael Baum */ 570f49f4483SMichael Baum static int 57186d259ceSMichael Baum mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx) 57286d259ceSMichael Baum { 57386d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 57486d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 57586d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 57686d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 57786d259ceSMichael Baum 57886d259ceSMichael Baum if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 57986d259ceSMichael Baum return mlx5_txq_devx_obj_new(dev, idx); 58086d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 5813ec73abeSMatan Azrad if (!priv->config.dv_esw_en) 58286d259ceSMichael Baum return mlx5_txq_devx_obj_new(dev, idx); 58386d259ceSMichael Baum #endif 58486d259ceSMichael Baum return mlx5_txq_ibv_obj_new(dev, idx); 58586d259ceSMichael Baum } 58686d259ceSMichael Baum 58786d259ceSMichael Baum /** 58886d259ceSMichael Baum * Release an Tx DevX/verbs queue object. 58986d259ceSMichael Baum * 59086d259ceSMichael Baum * @param txq_obj 59186d259ceSMichael Baum * DevX/Verbs Tx queue object. 59286d259ceSMichael Baum */ 59386d259ceSMichael Baum static void 59486d259ceSMichael Baum mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj) 59586d259ceSMichael Baum { 59686d259ceSMichael Baum if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 59786d259ceSMichael Baum mlx5_txq_devx_obj_release(txq_obj); 59886d259ceSMichael Baum return; 59986d259ceSMichael Baum } 6003ec73abeSMatan Azrad #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 6013ec73abeSMatan Azrad if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) { 6023ec73abeSMatan Azrad mlx5_txq_devx_obj_release(txq_obj); 6033ec73abeSMatan Azrad return; 60486d259ceSMichael Baum } 6053ec73abeSMatan Azrad #endif 60686d259ceSMichael Baum mlx5_txq_ibv_obj_release(txq_obj); 60786d259ceSMichael Baum } 60886d259ceSMichael Baum 60986d259ceSMichael Baum /** 610994829e6SSuanming Mou * DV flow counter mode detect and config. 611994829e6SSuanming Mou * 612994829e6SSuanming Mou * @param dev 613994829e6SSuanming Mou * Pointer to rte_eth_dev structure. 614994829e6SSuanming Mou * 615994829e6SSuanming Mou */ 616994829e6SSuanming Mou static void 617994829e6SSuanming Mou mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused) 618994829e6SSuanming Mou { 619994829e6SSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 620994829e6SSuanming Mou struct mlx5_priv *priv = dev->data->dev_private; 6212b5b1aebSSuanming Mou struct mlx5_dev_ctx_shared *sh = priv->sh; 6222b5b1aebSSuanming Mou bool fallback; 623994829e6SSuanming Mou 624994829e6SSuanming Mou #ifndef HAVE_IBV_DEVX_ASYNC 6252b5b1aebSSuanming Mou fallback = true; 626994829e6SSuanming Mou #else 6272b5b1aebSSuanming Mou fallback = false; 6282b5b1aebSSuanming Mou if (!priv->config.devx || !priv->config.dv_flow_en || 6292b5b1aebSSuanming Mou !priv->config.hca_attr.flow_counters_dump || 630994829e6SSuanming Mou !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) || 631994829e6SSuanming Mou (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP)) 6322b5b1aebSSuanming Mou fallback = true; 633994829e6SSuanming Mou #endif 6342b5b1aebSSuanming Mou if (fallback) 635994829e6SSuanming Mou DRV_LOG(INFO, "Use fall-back DV counter management. Flow " 636994829e6SSuanming Mou "counter dump:%d, bulk_alloc_bitmap:0x%hhx.", 637994829e6SSuanming Mou priv->config.hca_attr.flow_counters_dump, 638994829e6SSuanming Mou priv->config.hca_attr.flow_counter_bulk_alloc_bitmap); 6392b5b1aebSSuanming Mou /* Initialize fallback mode only on the port initializes sh. */ 6402b5b1aebSSuanming Mou if (sh->refcnt == 1) 6412b5b1aebSSuanming Mou sh->cmng.counter_fallback = fallback; 6422b5b1aebSSuanming Mou else if (fallback != sh->cmng.counter_fallback) 6432b5b1aebSSuanming Mou DRV_LOG(WARNING, "Port %d in sh has different fallback mode " 6442b5b1aebSSuanming Mou "with others:%d.", PORT_ID(priv), fallback); 645994829e6SSuanming Mou #endif 646994829e6SSuanming Mou } 647994829e6SSuanming Mou 648994829e6SSuanming Mou /** 6492eb4d010SOphir Munk * Spawn an Ethernet device from Verbs information. 6502eb4d010SOphir Munk * 6512eb4d010SOphir Munk * @param dpdk_dev 6522eb4d010SOphir Munk * Backing DPDK device. 6532eb4d010SOphir Munk * @param spawn 6542eb4d010SOphir Munk * Verbs device parameters (name, port, switch_info) to spawn. 6552eb4d010SOphir Munk * @param config 6562eb4d010SOphir Munk * Device configuration parameters. 6572eb4d010SOphir Munk * 6582eb4d010SOphir Munk * @return 6592eb4d010SOphir Munk * A valid Ethernet device object on success, NULL otherwise and rte_errno 6602eb4d010SOphir Munk * is set. The following errors are defined: 6612eb4d010SOphir Munk * 6622eb4d010SOphir Munk * EBUSY: device is not supposed to be spawned. 6632eb4d010SOphir Munk * EEXIST: device is already spawned 6642eb4d010SOphir Munk */ 6652eb4d010SOphir Munk static struct rte_eth_dev * 6662eb4d010SOphir Munk mlx5_dev_spawn(struct rte_device *dpdk_dev, 6672eb4d010SOphir Munk struct mlx5_dev_spawn_data *spawn, 668d462a83cSMichael Baum struct mlx5_dev_config *config) 6692eb4d010SOphir Munk { 6702eb4d010SOphir Munk const struct mlx5_switch_info *switch_info = &spawn->info; 6712eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh = NULL; 6722eb4d010SOphir Munk struct ibv_port_attr port_attr; 6732eb4d010SOphir Munk struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 6742eb4d010SOphir Munk struct rte_eth_dev *eth_dev = NULL; 6752eb4d010SOphir Munk struct mlx5_priv *priv = NULL; 6762eb4d010SOphir Munk int err = 0; 6772eb4d010SOphir Munk unsigned int hw_padding = 0; 6782eb4d010SOphir Munk unsigned int mps; 6792eb4d010SOphir Munk unsigned int cqe_comp; 6802eb4d010SOphir Munk unsigned int cqe_pad = 0; 6812eb4d010SOphir Munk unsigned int tunnel_en = 0; 6822eb4d010SOphir Munk unsigned int mpls_en = 0; 6832eb4d010SOphir Munk unsigned int swp = 0; 6842eb4d010SOphir Munk unsigned int mprq = 0; 6852eb4d010SOphir Munk unsigned int mprq_min_stride_size_n = 0; 6862eb4d010SOphir Munk unsigned int mprq_max_stride_size_n = 0; 6872eb4d010SOphir Munk unsigned int mprq_min_stride_num_n = 0; 6882eb4d010SOphir Munk unsigned int mprq_max_stride_num_n = 0; 6892eb4d010SOphir Munk struct rte_ether_addr mac; 6902eb4d010SOphir Munk char name[RTE_ETH_NAME_MAX_LEN]; 6912eb4d010SOphir Munk int own_domain_id = 0; 6922eb4d010SOphir Munk uint16_t port_id; 6932eb4d010SOphir Munk unsigned int i; 6942eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_DEVX_PORT 6952eb4d010SOphir Munk struct mlx5dv_devx_port devx_port = { .comp_mask = 0 }; 6962eb4d010SOphir Munk #endif 6972eb4d010SOphir Munk 6982eb4d010SOphir Munk /* Determine if this port representor is supposed to be spawned. */ 6992eb4d010SOphir Munk if (switch_info->representor && dpdk_dev->devargs) { 7002eb4d010SOphir Munk struct rte_eth_devargs eth_da; 7012eb4d010SOphir Munk 7022eb4d010SOphir Munk err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); 7032eb4d010SOphir Munk if (err) { 7042eb4d010SOphir Munk rte_errno = -err; 7052eb4d010SOphir Munk DRV_LOG(ERR, "failed to process device arguments: %s", 7062eb4d010SOphir Munk strerror(rte_errno)); 7072eb4d010SOphir Munk return NULL; 7082eb4d010SOphir Munk } 7092eb4d010SOphir Munk for (i = 0; i < eth_da.nb_representor_ports; ++i) 7102eb4d010SOphir Munk if (eth_da.representor_ports[i] == 7112eb4d010SOphir Munk (uint16_t)switch_info->port_name) 7122eb4d010SOphir Munk break; 7132eb4d010SOphir Munk if (i == eth_da.nb_representor_ports) { 7142eb4d010SOphir Munk rte_errno = EBUSY; 7152eb4d010SOphir Munk return NULL; 7162eb4d010SOphir Munk } 7172eb4d010SOphir Munk } 7182eb4d010SOphir Munk /* Build device name. */ 7192eb4d010SOphir Munk if (spawn->pf_bond < 0) { 7202eb4d010SOphir Munk /* Single device. */ 7212eb4d010SOphir Munk if (!switch_info->representor) 7222eb4d010SOphir Munk strlcpy(name, dpdk_dev->name, sizeof(name)); 7232eb4d010SOphir Munk else 7242eb4d010SOphir Munk snprintf(name, sizeof(name), "%s_representor_%u", 7252eb4d010SOphir Munk dpdk_dev->name, switch_info->port_name); 7262eb4d010SOphir Munk } else { 7272eb4d010SOphir Munk /* Bonding device. */ 7282eb4d010SOphir Munk if (!switch_info->representor) 7292eb4d010SOphir Munk snprintf(name, sizeof(name), "%s_%s", 730834a9019SOphir Munk dpdk_dev->name, 731834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 7322eb4d010SOphir Munk else 7332eb4d010SOphir Munk snprintf(name, sizeof(name), "%s_%s_representor_%u", 734834a9019SOphir Munk dpdk_dev->name, 735834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev), 7362eb4d010SOphir Munk switch_info->port_name); 7372eb4d010SOphir Munk } 7382eb4d010SOphir Munk /* check if the device is already spawned */ 7392eb4d010SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 7402eb4d010SOphir Munk rte_errno = EEXIST; 7412eb4d010SOphir Munk return NULL; 7422eb4d010SOphir Munk } 7432eb4d010SOphir Munk DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 7442eb4d010SOphir Munk if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 7452eb4d010SOphir Munk struct mlx5_mp_id mp_id; 7462eb4d010SOphir Munk 7472eb4d010SOphir Munk eth_dev = rte_eth_dev_attach_secondary(name); 7482eb4d010SOphir Munk if (eth_dev == NULL) { 7492eb4d010SOphir Munk DRV_LOG(ERR, "can not attach rte ethdev"); 7502eb4d010SOphir Munk rte_errno = ENOMEM; 7512eb4d010SOphir Munk return NULL; 7522eb4d010SOphir Munk } 753e6818853SXueming Li priv = eth_dev->data->dev_private; 754e6818853SXueming Li if (priv->sh->bond_dev != UINT16_MAX) 755e6818853SXueming Li /* For bonding port, use primary PCI device. */ 756e6818853SXueming Li eth_dev->device = 757e6818853SXueming Li rte_eth_devices[priv->sh->bond_dev].device; 758e6818853SXueming Li else 7592eb4d010SOphir Munk eth_dev->device = dpdk_dev; 760042f5c94SOphir Munk eth_dev->dev_ops = &mlx5_os_dev_sec_ops; 761cbfc6111SFerruh Yigit eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 762cbfc6111SFerruh Yigit eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 7632eb4d010SOphir Munk err = mlx5_proc_priv_init(eth_dev); 7642eb4d010SOphir Munk if (err) 7652eb4d010SOphir Munk return NULL; 7662eb4d010SOphir Munk mp_id.port_id = eth_dev->data->port_id; 7672eb4d010SOphir Munk strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 7682eb4d010SOphir Munk /* Receive command fd from primary process */ 7692eb4d010SOphir Munk err = mlx5_mp_req_verbs_cmd_fd(&mp_id); 7702eb4d010SOphir Munk if (err < 0) 7712eb4d010SOphir Munk goto err_secondary; 7722eb4d010SOphir Munk /* Remap UAR for Tx queues. */ 7732eb4d010SOphir Munk err = mlx5_tx_uar_init_secondary(eth_dev, err); 7742eb4d010SOphir Munk if (err) 7752eb4d010SOphir Munk goto err_secondary; 7762eb4d010SOphir Munk /* 7772eb4d010SOphir Munk * Ethdev pointer is still required as input since 7782eb4d010SOphir Munk * the primary device is not accessible from the 7792eb4d010SOphir Munk * secondary process. 7802eb4d010SOphir Munk */ 7812eb4d010SOphir Munk eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 7822eb4d010SOphir Munk eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 7832eb4d010SOphir Munk return eth_dev; 7842eb4d010SOphir Munk err_secondary: 7852eb4d010SOphir Munk mlx5_dev_close(eth_dev); 7862eb4d010SOphir Munk return NULL; 7872eb4d010SOphir Munk } 7882eb4d010SOphir Munk /* 7892eb4d010SOphir Munk * Some parameters ("tx_db_nc" in particularly) are needed in 7902eb4d010SOphir Munk * advance to create dv/verbs device context. We proceed the 7912eb4d010SOphir Munk * devargs here to get ones, and later proceed devargs again 7922eb4d010SOphir Munk * to override some hardware settings. 7932eb4d010SOphir Munk */ 794d462a83cSMichael Baum err = mlx5_args(config, dpdk_dev->devargs); 7952eb4d010SOphir Munk if (err) { 7962eb4d010SOphir Munk err = rte_errno; 7972eb4d010SOphir Munk DRV_LOG(ERR, "failed to process device arguments: %s", 7982eb4d010SOphir Munk strerror(rte_errno)); 7992eb4d010SOphir Munk goto error; 8002eb4d010SOphir Munk } 8014ec6360dSGregory Etelson if (config->dv_miss_info) { 8024ec6360dSGregory Etelson if (switch_info->master || switch_info->representor) 8034ec6360dSGregory Etelson config->dv_xmeta_en = MLX5_XMETA_MODE_META16; 8044ec6360dSGregory Etelson } 805d462a83cSMichael Baum mlx5_malloc_mem_select(config->sys_mem_en); 806d462a83cSMichael Baum sh = mlx5_alloc_shared_dev_ctx(spawn, config); 8072eb4d010SOphir Munk if (!sh) 8082eb4d010SOphir Munk return NULL; 809d462a83cSMichael Baum config->devx = sh->devx; 8102eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR 811d462a83cSMichael Baum config->dest_tir = 1; 8122eb4d010SOphir Munk #endif 8132eb4d010SOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 8142eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 8152eb4d010SOphir Munk #endif 8162eb4d010SOphir Munk /* 8172eb4d010SOphir Munk * Multi-packet send is supported by ConnectX-4 Lx PF as well 8182eb4d010SOphir Munk * as all ConnectX-5 devices. 8192eb4d010SOphir Munk */ 8202eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 8212eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 8222eb4d010SOphir Munk #endif 8232eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 8242eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 8252eb4d010SOphir Munk #endif 8262eb4d010SOphir Munk mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 8272eb4d010SOphir Munk if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 8282eb4d010SOphir Munk if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 8292eb4d010SOphir Munk DRV_LOG(DEBUG, "enhanced MPW is supported"); 8302eb4d010SOphir Munk mps = MLX5_MPW_ENHANCED; 8312eb4d010SOphir Munk } else { 8322eb4d010SOphir Munk DRV_LOG(DEBUG, "MPW is supported"); 8332eb4d010SOphir Munk mps = MLX5_MPW; 8342eb4d010SOphir Munk } 8352eb4d010SOphir Munk } else { 8362eb4d010SOphir Munk DRV_LOG(DEBUG, "MPW isn't supported"); 8372eb4d010SOphir Munk mps = MLX5_MPW_DISABLED; 8382eb4d010SOphir Munk } 8392eb4d010SOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 8402eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 8412eb4d010SOphir Munk swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 8422eb4d010SOphir Munk DRV_LOG(DEBUG, "SWP support: %u", swp); 8432eb4d010SOphir Munk #endif 844d462a83cSMichael Baum config->swp = !!swp; 8452eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 8462eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 8472eb4d010SOphir Munk struct mlx5dv_striding_rq_caps mprq_caps = 8482eb4d010SOphir Munk dv_attr.striding_rq_caps; 8492eb4d010SOphir Munk 8502eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 8512eb4d010SOphir Munk mprq_caps.min_single_stride_log_num_of_bytes); 8522eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 8532eb4d010SOphir Munk mprq_caps.max_single_stride_log_num_of_bytes); 8542eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 8552eb4d010SOphir Munk mprq_caps.min_single_wqe_log_num_of_strides); 8562eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 8572eb4d010SOphir Munk mprq_caps.max_single_wqe_log_num_of_strides); 8582eb4d010SOphir Munk DRV_LOG(DEBUG, "\tsupported_qpts: %d", 8592eb4d010SOphir Munk mprq_caps.supported_qpts); 8602eb4d010SOphir Munk DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 8612eb4d010SOphir Munk mprq = 1; 8622eb4d010SOphir Munk mprq_min_stride_size_n = 8632eb4d010SOphir Munk mprq_caps.min_single_stride_log_num_of_bytes; 8642eb4d010SOphir Munk mprq_max_stride_size_n = 8652eb4d010SOphir Munk mprq_caps.max_single_stride_log_num_of_bytes; 8662eb4d010SOphir Munk mprq_min_stride_num_n = 8672eb4d010SOphir Munk mprq_caps.min_single_wqe_log_num_of_strides; 8682eb4d010SOphir Munk mprq_max_stride_num_n = 8692eb4d010SOphir Munk mprq_caps.max_single_wqe_log_num_of_strides; 8702eb4d010SOphir Munk } 8712eb4d010SOphir Munk #endif 8722eb4d010SOphir Munk if (RTE_CACHE_LINE_SIZE == 128 && 8732eb4d010SOphir Munk !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) 8742eb4d010SOphir Munk cqe_comp = 0; 8752eb4d010SOphir Munk else 8762eb4d010SOphir Munk cqe_comp = 1; 877d462a83cSMichael Baum config->cqe_comp = cqe_comp; 8782eb4d010SOphir Munk #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 8792eb4d010SOphir Munk /* Whether device supports 128B Rx CQE padding. */ 8802eb4d010SOphir Munk cqe_pad = RTE_CACHE_LINE_SIZE == 128 && 8812eb4d010SOphir Munk (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); 8822eb4d010SOphir Munk #endif 8832eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 8842eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 8852eb4d010SOphir Munk tunnel_en = ((dv_attr.tunnel_offloads_caps & 8862eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 8872eb4d010SOphir Munk (dv_attr.tunnel_offloads_caps & 8882eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) && 8892eb4d010SOphir Munk (dv_attr.tunnel_offloads_caps & 8902eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE)); 8912eb4d010SOphir Munk } 8922eb4d010SOphir Munk DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 8932eb4d010SOphir Munk tunnel_en ? "" : "not "); 8942eb4d010SOphir Munk #else 8952eb4d010SOphir Munk DRV_LOG(WARNING, 8962eb4d010SOphir Munk "tunnel offloading disabled due to old OFED/rdma-core version"); 8972eb4d010SOphir Munk #endif 898d462a83cSMichael Baum config->tunnel_en = tunnel_en; 8992eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 9002eb4d010SOphir Munk mpls_en = ((dv_attr.tunnel_offloads_caps & 9012eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 9022eb4d010SOphir Munk (dv_attr.tunnel_offloads_caps & 9032eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 9042eb4d010SOphir Munk DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 9052eb4d010SOphir Munk mpls_en ? "" : "not "); 9062eb4d010SOphir Munk #else 9072eb4d010SOphir Munk DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 9082eb4d010SOphir Munk " old OFED/rdma-core version or firmware configuration"); 9092eb4d010SOphir Munk #endif 910d462a83cSMichael Baum config->mpls_en = mpls_en; 9112eb4d010SOphir Munk /* Check port status. */ 912834a9019SOphir Munk err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr); 9132eb4d010SOphir Munk if (err) { 9142eb4d010SOphir Munk DRV_LOG(ERR, "port query failed: %s", strerror(err)); 9152eb4d010SOphir Munk goto error; 9162eb4d010SOphir Munk } 9172eb4d010SOphir Munk if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 9182eb4d010SOphir Munk DRV_LOG(ERR, "port is not configured in Ethernet mode"); 9192eb4d010SOphir Munk err = EINVAL; 9202eb4d010SOphir Munk goto error; 9212eb4d010SOphir Munk } 9222eb4d010SOphir Munk if (port_attr.state != IBV_PORT_ACTIVE) 9232eb4d010SOphir Munk DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 9242eb4d010SOphir Munk mlx5_glue->port_state_str(port_attr.state), 9252eb4d010SOphir Munk port_attr.state); 9262eb4d010SOphir Munk /* Allocate private eth device data. */ 9272175c4dcSSuanming Mou priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 9282eb4d010SOphir Munk sizeof(*priv), 9292175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 9302eb4d010SOphir Munk if (priv == NULL) { 9312eb4d010SOphir Munk DRV_LOG(ERR, "priv allocation failure"); 9322eb4d010SOphir Munk err = ENOMEM; 9332eb4d010SOphir Munk goto error; 9342eb4d010SOphir Munk } 9352eb4d010SOphir Munk priv->sh = sh; 93691389890SOphir Munk priv->dev_port = spawn->phys_port; 9372eb4d010SOphir Munk priv->pci_dev = spawn->pci_dev; 9382eb4d010SOphir Munk priv->mtu = RTE_ETHER_MTU; 9392eb4d010SOphir Munk priv->mp_id.port_id = port_id; 9402eb4d010SOphir Munk strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 9412eb4d010SOphir Munk /* Some internal functions rely on Netlink sockets, open them now. */ 9422eb4d010SOphir Munk priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 9432eb4d010SOphir Munk priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 9442eb4d010SOphir Munk priv->representor = !!switch_info->representor; 9452eb4d010SOphir Munk priv->master = !!switch_info->master; 9462eb4d010SOphir Munk priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 9472eb4d010SOphir Munk priv->vport_meta_tag = 0; 9482eb4d010SOphir Munk priv->vport_meta_mask = 0; 9492eb4d010SOphir Munk priv->pf_bond = spawn->pf_bond; 9502eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_DEVX_PORT 9512eb4d010SOphir Munk /* 9522eb4d010SOphir Munk * The DevX port query API is implemented. E-Switch may use 9532eb4d010SOphir Munk * either vport or reg_c[0] metadata register to match on 9542eb4d010SOphir Munk * vport index. The engaged part of metadata register is 9552eb4d010SOphir Munk * defined by mask. 9562eb4d010SOphir Munk */ 9572eb4d010SOphir Munk if (switch_info->representor || switch_info->master) { 9582eb4d010SOphir Munk devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT | 9592eb4d010SOphir Munk MLX5DV_DEVX_PORT_MATCH_REG_C_0; 960834a9019SOphir Munk err = mlx5_glue->devx_port_query(sh->ctx, spawn->phys_port, 9612eb4d010SOphir Munk &devx_port); 9622eb4d010SOphir Munk if (err) { 9632eb4d010SOphir Munk DRV_LOG(WARNING, 9642eb4d010SOphir Munk "can't query devx port %d on device %s", 965834a9019SOphir Munk spawn->phys_port, 966834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 9672eb4d010SOphir Munk devx_port.comp_mask = 0; 9682eb4d010SOphir Munk } 9692eb4d010SOphir Munk } 9702eb4d010SOphir Munk if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) { 9712eb4d010SOphir Munk priv->vport_meta_tag = devx_port.reg_c_0.value; 9722eb4d010SOphir Munk priv->vport_meta_mask = devx_port.reg_c_0.mask; 9732eb4d010SOphir Munk if (!priv->vport_meta_mask) { 9742eb4d010SOphir Munk DRV_LOG(ERR, "vport zero mask for port %d" 9752eb4d010SOphir Munk " on bonding device %s", 976834a9019SOphir Munk spawn->phys_port, 977834a9019SOphir Munk mlx5_os_get_dev_device_name 978834a9019SOphir Munk (spawn->phys_dev)); 9792eb4d010SOphir Munk err = ENOTSUP; 9802eb4d010SOphir Munk goto error; 9812eb4d010SOphir Munk } 9822eb4d010SOphir Munk if (priv->vport_meta_tag & ~priv->vport_meta_mask) { 9832eb4d010SOphir Munk DRV_LOG(ERR, "invalid vport tag for port %d" 9842eb4d010SOphir Munk " on bonding device %s", 985834a9019SOphir Munk spawn->phys_port, 986834a9019SOphir Munk mlx5_os_get_dev_device_name 987834a9019SOphir Munk (spawn->phys_dev)); 9882eb4d010SOphir Munk err = ENOTSUP; 9892eb4d010SOphir Munk goto error; 9902eb4d010SOphir Munk } 9912eb4d010SOphir Munk } 9922eb4d010SOphir Munk if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) { 9932eb4d010SOphir Munk priv->vport_id = devx_port.vport_num; 9942eb4d010SOphir Munk } else if (spawn->pf_bond >= 0) { 9952eb4d010SOphir Munk DRV_LOG(ERR, "can't deduce vport index for port %d" 9962eb4d010SOphir Munk " on bonding device %s", 997834a9019SOphir Munk spawn->phys_port, 998834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 9992eb4d010SOphir Munk err = ENOTSUP; 10002eb4d010SOphir Munk goto error; 10012eb4d010SOphir Munk } else { 10022eb4d010SOphir Munk /* Suppose vport index in compatible way. */ 10032eb4d010SOphir Munk priv->vport_id = switch_info->representor ? 10042eb4d010SOphir Munk switch_info->port_name + 1 : -1; 10052eb4d010SOphir Munk } 10062eb4d010SOphir Munk #else 10072eb4d010SOphir Munk /* 10082eb4d010SOphir Munk * Kernel/rdma_core support single E-Switch per PF configurations 10092eb4d010SOphir Munk * only and vport_id field contains the vport index for 10102eb4d010SOphir Munk * associated VF, which is deduced from representor port name. 10112eb4d010SOphir Munk * For example, let's have the IB device port 10, it has 10122eb4d010SOphir Munk * attached network device eth0, which has port name attribute 10132eb4d010SOphir Munk * pf0vf2, we can deduce the VF number as 2, and set vport index 10142eb4d010SOphir Munk * as 3 (2+1). This assigning schema should be changed if the 10152eb4d010SOphir Munk * multiple E-Switch instances per PF configurations or/and PCI 10162eb4d010SOphir Munk * subfunctions are added. 10172eb4d010SOphir Munk */ 10182eb4d010SOphir Munk priv->vport_id = switch_info->representor ? 10192eb4d010SOphir Munk switch_info->port_name + 1 : -1; 10202eb4d010SOphir Munk #endif 10212eb4d010SOphir Munk /* representor_id field keeps the unmodified VF index. */ 10222eb4d010SOphir Munk priv->representor_id = switch_info->representor ? 10232eb4d010SOphir Munk switch_info->port_name : -1; 10242eb4d010SOphir Munk /* 10252eb4d010SOphir Munk * Look for sibling devices in order to reuse their switch domain 10262eb4d010SOphir Munk * if any, otherwise allocate one. 10272eb4d010SOphir Munk */ 10282eb4d010SOphir Munk MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 10292eb4d010SOphir Munk const struct mlx5_priv *opriv = 10302eb4d010SOphir Munk rte_eth_devices[port_id].data->dev_private; 10312eb4d010SOphir Munk 10322eb4d010SOphir Munk if (!opriv || 10332eb4d010SOphir Munk opriv->sh != priv->sh || 10342eb4d010SOphir Munk opriv->domain_id == 10352eb4d010SOphir Munk RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 10362eb4d010SOphir Munk continue; 10372eb4d010SOphir Munk priv->domain_id = opriv->domain_id; 10382eb4d010SOphir Munk break; 10392eb4d010SOphir Munk } 10402eb4d010SOphir Munk if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 10412eb4d010SOphir Munk err = rte_eth_switch_domain_alloc(&priv->domain_id); 10422eb4d010SOphir Munk if (err) { 10432eb4d010SOphir Munk err = rte_errno; 10442eb4d010SOphir Munk DRV_LOG(ERR, "unable to allocate switch domain: %s", 10452eb4d010SOphir Munk strerror(rte_errno)); 10462eb4d010SOphir Munk goto error; 10472eb4d010SOphir Munk } 10482eb4d010SOphir Munk own_domain_id = 1; 10492eb4d010SOphir Munk } 10502eb4d010SOphir Munk /* Override some values set by hardware configuration. */ 1051d462a83cSMichael Baum mlx5_args(config, dpdk_dev->devargs); 1052d462a83cSMichael Baum err = mlx5_dev_check_sibling_config(priv, config); 10532eb4d010SOphir Munk if (err) 10542eb4d010SOphir Munk goto error; 1055d462a83cSMichael Baum config->hw_csum = !!(sh->device_attr.device_cap_flags_ex & 10562eb4d010SOphir Munk IBV_DEVICE_RAW_IP_CSUM); 10572eb4d010SOphir Munk DRV_LOG(DEBUG, "checksum offloading is %ssupported", 1058d462a83cSMichael Baum (config->hw_csum ? "" : "not ")); 10592eb4d010SOphir Munk #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 10602eb4d010SOphir Munk !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 10612eb4d010SOphir Munk DRV_LOG(DEBUG, "counters are not supported"); 10622eb4d010SOphir Munk #endif 10632eb4d010SOphir Munk #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR) 1064d462a83cSMichael Baum if (config->dv_flow_en) { 10652eb4d010SOphir Munk DRV_LOG(WARNING, "DV flow is not supported"); 1066d462a83cSMichael Baum config->dv_flow_en = 0; 10672eb4d010SOphir Munk } 10682eb4d010SOphir Munk #endif 1069d462a83cSMichael Baum config->ind_table_max_size = 10702eb4d010SOphir Munk sh->device_attr.max_rwq_indirection_table_size; 10712eb4d010SOphir Munk /* 10722eb4d010SOphir Munk * Remove this check once DPDK supports larger/variable 10732eb4d010SOphir Munk * indirection tables. 10742eb4d010SOphir Munk */ 1075d462a83cSMichael Baum if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 1076d462a83cSMichael Baum config->ind_table_max_size = ETH_RSS_RETA_SIZE_512; 10772eb4d010SOphir Munk DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 1078d462a83cSMichael Baum config->ind_table_max_size); 1079d462a83cSMichael Baum config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 10802eb4d010SOphir Munk IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 10812eb4d010SOphir Munk DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 1082d462a83cSMichael Baum (config->hw_vlan_strip ? "" : "not ")); 1083d462a83cSMichael Baum config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 10842eb4d010SOphir Munk IBV_RAW_PACKET_CAP_SCATTER_FCS); 10852eb4d010SOphir Munk #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 10862eb4d010SOphir Munk hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 10872eb4d010SOphir Munk #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 10882eb4d010SOphir Munk hw_padding = !!(sh->device_attr.device_cap_flags_ex & 10892eb4d010SOphir Munk IBV_DEVICE_PCI_WRITE_END_PADDING); 10902eb4d010SOphir Munk #endif 1091d462a83cSMichael Baum if (config->hw_padding && !hw_padding) { 10922eb4d010SOphir Munk DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 1093d462a83cSMichael Baum config->hw_padding = 0; 1094d462a83cSMichael Baum } else if (config->hw_padding) { 10952eb4d010SOphir Munk DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 10962eb4d010SOphir Munk } 1097d462a83cSMichael Baum config->tso = (sh->device_attr.max_tso > 0 && 10982eb4d010SOphir Munk (sh->device_attr.tso_supported_qpts & 10992eb4d010SOphir Munk (1 << IBV_QPT_RAW_PACKET))); 1100d462a83cSMichael Baum if (config->tso) 1101d462a83cSMichael Baum config->tso_max_payload_sz = sh->device_attr.max_tso; 11022eb4d010SOphir Munk /* 11032eb4d010SOphir Munk * MPW is disabled by default, while the Enhanced MPW is enabled 11042eb4d010SOphir Munk * by default. 11052eb4d010SOphir Munk */ 1106d462a83cSMichael Baum if (config->mps == MLX5_ARG_UNSET) 1107d462a83cSMichael Baum config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 11082eb4d010SOphir Munk MLX5_MPW_DISABLED; 11092eb4d010SOphir Munk else 1110d462a83cSMichael Baum config->mps = config->mps ? mps : MLX5_MPW_DISABLED; 11112eb4d010SOphir Munk DRV_LOG(INFO, "%sMPS is %s", 1112d462a83cSMichael Baum config->mps == MLX5_MPW_ENHANCED ? "enhanced " : 1113d462a83cSMichael Baum config->mps == MLX5_MPW ? "legacy " : "", 1114d462a83cSMichael Baum config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 1115d462a83cSMichael Baum if (config->cqe_comp && !cqe_comp) { 11162eb4d010SOphir Munk DRV_LOG(WARNING, "Rx CQE compression isn't supported"); 1117d462a83cSMichael Baum config->cqe_comp = 0; 11182eb4d010SOphir Munk } 1119d462a83cSMichael Baum if (config->cqe_pad && !cqe_pad) { 11202eb4d010SOphir Munk DRV_LOG(WARNING, "Rx CQE padding isn't supported"); 1121d462a83cSMichael Baum config->cqe_pad = 0; 1122d462a83cSMichael Baum } else if (config->cqe_pad) { 11232eb4d010SOphir Munk DRV_LOG(INFO, "Rx CQE padding is enabled"); 11242eb4d010SOphir Munk } 1125d462a83cSMichael Baum if (config->devx) { 1126d462a83cSMichael Baum err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr); 11272eb4d010SOphir Munk if (err) { 11282eb4d010SOphir Munk err = -err; 11292eb4d010SOphir Munk goto error; 11302eb4d010SOphir Munk } 11313aa27915SSuanming Mou /* Check relax ordering support. */ 1132e82ddd28STal Shnaiderman if (!haswell_broadwell_cpu) { 1133e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_write = 1134e82ddd28STal Shnaiderman config->hca_attr.relaxed_ordering_write; 1135e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_read = 1136e82ddd28STal Shnaiderman config->hca_attr.relaxed_ordering_read; 1137e82ddd28STal Shnaiderman } else { 1138e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_read = 0; 1139e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_write = 0; 1140e82ddd28STal Shnaiderman } 11412eb4d010SOphir Munk /* Check for LRO support. */ 1142d462a83cSMichael Baum if (config->dest_tir && config->hca_attr.lro_cap && 1143d462a83cSMichael Baum config->dv_flow_en) { 11442eb4d010SOphir Munk /* TBD check tunnel lro caps. */ 1145d462a83cSMichael Baum config->lro.supported = config->hca_attr.lro_cap; 11462eb4d010SOphir Munk DRV_LOG(DEBUG, "Device supports LRO"); 11472eb4d010SOphir Munk /* 11482eb4d010SOphir Munk * If LRO timeout is not configured by application, 11492eb4d010SOphir Munk * use the minimal supported value. 11502eb4d010SOphir Munk */ 1151d462a83cSMichael Baum if (!config->lro.timeout) 1152d462a83cSMichael Baum config->lro.timeout = 1153d462a83cSMichael Baum config->hca_attr.lro_timer_supported_periods[0]; 11542eb4d010SOphir Munk DRV_LOG(DEBUG, "LRO session timeout set to %d usec", 1155d462a83cSMichael Baum config->lro.timeout); 1156613d64e4SDekel Peled DRV_LOG(DEBUG, "LRO minimal size of TCP segment " 1157613d64e4SDekel Peled "required for coalescing is %d bytes", 1158613d64e4SDekel Peled config->hca_attr.lro_min_mss_size); 11592eb4d010SOphir Munk } 11602eb4d010SOphir Munk #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) 1161d462a83cSMichael Baum if (config->hca_attr.qos.sup && 1162d462a83cSMichael Baum config->hca_attr.qos.srtcm_sup && 1163d462a83cSMichael Baum config->dv_flow_en) { 11642eb4d010SOphir Munk uint8_t reg_c_mask = 1165d462a83cSMichael Baum config->hca_attr.qos.flow_meter_reg_c_ids; 11662eb4d010SOphir Munk /* 11672eb4d010SOphir Munk * Meter needs two REG_C's for color match and pre-sfx 11682eb4d010SOphir Munk * flow match. Here get the REG_C for color match. 11692eb4d010SOphir Munk * REG_C_0 and REG_C_1 is reserved for metadata feature. 11702eb4d010SOphir Munk */ 11712eb4d010SOphir Munk reg_c_mask &= 0xfc; 11722eb4d010SOphir Munk if (__builtin_popcount(reg_c_mask) < 1) { 11732eb4d010SOphir Munk priv->mtr_en = 0; 11742eb4d010SOphir Munk DRV_LOG(WARNING, "No available register for" 11752eb4d010SOphir Munk " meter."); 11762eb4d010SOphir Munk } else { 117731ef2982SDekel Peled /* 117831ef2982SDekel Peled * The meter color register is used by the 117931ef2982SDekel Peled * flow-hit feature as well. 118031ef2982SDekel Peled * The flow-hit feature must use REG_C_3 118131ef2982SDekel Peled * Prefer REG_C_3 if it is available. 118231ef2982SDekel Peled */ 118331ef2982SDekel Peled if (reg_c_mask & (1 << (REG_C_3 - REG_C_0))) 118431ef2982SDekel Peled priv->mtr_color_reg = REG_C_3; 118531ef2982SDekel Peled else 118631ef2982SDekel Peled priv->mtr_color_reg = ffs(reg_c_mask) 118731ef2982SDekel Peled - 1 + REG_C_0; 11882eb4d010SOphir Munk priv->mtr_en = 1; 11892eb4d010SOphir Munk priv->mtr_reg_share = 1190d462a83cSMichael Baum config->hca_attr.qos.flow_meter_reg_share; 11912eb4d010SOphir Munk DRV_LOG(DEBUG, "The REG_C meter uses is %d", 11922eb4d010SOphir Munk priv->mtr_color_reg); 11932eb4d010SOphir Munk } 11942eb4d010SOphir Munk } 11952eb4d010SOphir Munk #endif 1196a2999c7bSDekel Peled #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO 119731ef2982SDekel Peled if (config->hca_attr.flow_hit_aso && 119831ef2982SDekel Peled priv->mtr_color_reg == REG_C_3) { 119931ef2982SDekel Peled sh->flow_hit_aso_en = 1; 120031ef2982SDekel Peled err = mlx5_flow_aso_age_mng_init(sh); 120131ef2982SDekel Peled if (err) { 120231ef2982SDekel Peled err = -err; 120331ef2982SDekel Peled goto error; 120431ef2982SDekel Peled } 120531ef2982SDekel Peled DRV_LOG(DEBUG, "Flow Hit ASO is supported."); 120631ef2982SDekel Peled } 1207a2999c7bSDekel Peled #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ 120896b1f027SJiawei Wang #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE) 120996b1f027SJiawei Wang if (config->hca_attr.log_max_ft_sampler_num > 0 && 121096b1f027SJiawei Wang config->dv_flow_en) { 121196b1f027SJiawei Wang priv->sampler_en = 1; 121296b1f027SJiawei Wang DRV_LOG(DEBUG, "The Sampler enabled!\n"); 121396b1f027SJiawei Wang } else { 121496b1f027SJiawei Wang priv->sampler_en = 0; 121596b1f027SJiawei Wang if (!config->hca_attr.log_max_ft_sampler_num) 121696b1f027SJiawei Wang DRV_LOG(WARNING, "No available register for" 121796b1f027SJiawei Wang " Sampler."); 121896b1f027SJiawei Wang else 121996b1f027SJiawei Wang DRV_LOG(DEBUG, "DV flow is not supported!\n"); 122096b1f027SJiawei Wang } 122196b1f027SJiawei Wang #endif 12222eb4d010SOphir Munk } 1223d462a83cSMichael Baum if (config->tx_pp) { 12248f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz", 1225d462a83cSMichael Baum config->hca_attr.dev_freq_khz); 12268f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Packet pacing is %ssupported", 1227d462a83cSMichael Baum config->hca_attr.qos.packet_pacing ? "" : "not "); 12288f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Cross channel ops are %ssupported", 1229d462a83cSMichael Baum config->hca_attr.cross_channel ? "" : "not "); 12308f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "WQE index ignore is %ssupported", 1231d462a83cSMichael Baum config->hca_attr.wqe_index_ignore ? "" : "not "); 12328f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported", 1233d462a83cSMichael Baum config->hca_attr.non_wire_sq ? "" : "not "); 12348f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)", 1235d462a83cSMichael Baum config->hca_attr.log_max_static_sq_wq ? "" : "not ", 1236d462a83cSMichael Baum config->hca_attr.log_max_static_sq_wq); 12378f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported", 1238d462a83cSMichael Baum config->hca_attr.qos.wqe_rate_pp ? "" : "not "); 1239d462a83cSMichael Baum if (!config->devx) { 12408f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "DevX is required for packet pacing"); 12418f848f32SViacheslav Ovsiienko err = ENODEV; 12428f848f32SViacheslav Ovsiienko goto error; 12438f848f32SViacheslav Ovsiienko } 1244d462a83cSMichael Baum if (!config->hca_attr.qos.packet_pacing) { 12458f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Packet pacing is not supported"); 12468f848f32SViacheslav Ovsiienko err = ENODEV; 12478f848f32SViacheslav Ovsiienko goto error; 12488f848f32SViacheslav Ovsiienko } 1249d462a83cSMichael Baum if (!config->hca_attr.cross_channel) { 12508f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Cross channel operations are" 12518f848f32SViacheslav Ovsiienko " required for packet pacing"); 12528f848f32SViacheslav Ovsiienko err = ENODEV; 12538f848f32SViacheslav Ovsiienko goto error; 12548f848f32SViacheslav Ovsiienko } 1255d462a83cSMichael Baum if (!config->hca_attr.wqe_index_ignore) { 12568f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "WQE index ignore feature is" 12578f848f32SViacheslav Ovsiienko " required for packet pacing"); 12588f848f32SViacheslav Ovsiienko err = ENODEV; 12598f848f32SViacheslav Ovsiienko goto error; 12608f848f32SViacheslav Ovsiienko } 1261d462a83cSMichael Baum if (!config->hca_attr.non_wire_sq) { 12628f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Non-wire SQ feature is" 12638f848f32SViacheslav Ovsiienko " required for packet pacing"); 12648f848f32SViacheslav Ovsiienko err = ENODEV; 12658f848f32SViacheslav Ovsiienko goto error; 12668f848f32SViacheslav Ovsiienko } 1267d462a83cSMichael Baum if (!config->hca_attr.log_max_static_sq_wq) { 12688f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Static WQE SQ feature is" 12698f848f32SViacheslav Ovsiienko " required for packet pacing"); 12708f848f32SViacheslav Ovsiienko err = ENODEV; 12718f848f32SViacheslav Ovsiienko goto error; 12728f848f32SViacheslav Ovsiienko } 1273d462a83cSMichael Baum if (!config->hca_attr.qos.wqe_rate_pp) { 12748f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "WQE rate mode is required" 12758f848f32SViacheslav Ovsiienko " for packet pacing"); 12768f848f32SViacheslav Ovsiienko err = ENODEV; 12778f848f32SViacheslav Ovsiienko goto error; 12788f848f32SViacheslav Ovsiienko } 12798f848f32SViacheslav Ovsiienko #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 12808f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "DevX does not provide UAR offset," 12818f848f32SViacheslav Ovsiienko " can't create queues for packet pacing"); 12828f848f32SViacheslav Ovsiienko err = ENODEV; 12838f848f32SViacheslav Ovsiienko goto error; 12848f848f32SViacheslav Ovsiienko #endif 12858f848f32SViacheslav Ovsiienko } 1286d462a83cSMichael Baum if (config->devx) { 1287a2854c4dSViacheslav Ovsiienko uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)]; 1288a2854c4dSViacheslav Ovsiienko 1289972a1bf8SViacheslav Ovsiienko err = config->hca_attr.access_register_user ? 1290972a1bf8SViacheslav Ovsiienko mlx5_devx_cmd_register_read 1291a2854c4dSViacheslav Ovsiienko (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0, 1292972a1bf8SViacheslav Ovsiienko reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP; 1293a2854c4dSViacheslav Ovsiienko if (!err) { 1294a2854c4dSViacheslav Ovsiienko uint32_t ts_mode; 1295a2854c4dSViacheslav Ovsiienko 1296a2854c4dSViacheslav Ovsiienko /* MTUTC register is read successfully. */ 1297a2854c4dSViacheslav Ovsiienko ts_mode = MLX5_GET(register_mtutc, reg, 1298a2854c4dSViacheslav Ovsiienko time_stamp_mode); 1299a2854c4dSViacheslav Ovsiienko if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME) 1300d462a83cSMichael Baum config->rt_timestamp = 1; 1301a2854c4dSViacheslav Ovsiienko } else { 1302a2854c4dSViacheslav Ovsiienko /* Kernel does not support register reading. */ 1303d462a83cSMichael Baum if (config->hca_attr.dev_freq_khz == 1304a2854c4dSViacheslav Ovsiienko (NS_PER_S / MS_PER_S)) 1305d462a83cSMichael Baum config->rt_timestamp = 1; 1306a2854c4dSViacheslav Ovsiienko } 1307a2854c4dSViacheslav Ovsiienko } 130850f95b23SSuanming Mou /* 130950f95b23SSuanming Mou * If HW has bug working with tunnel packet decapsulation and 131050f95b23SSuanming Mou * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip 131150f95b23SSuanming Mou * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore. 131250f95b23SSuanming Mou */ 1313d462a83cSMichael Baum if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en) 1314d462a83cSMichael Baum config->hw_fcs_strip = 0; 131550f95b23SSuanming Mou DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 1316d462a83cSMichael Baum (config->hw_fcs_strip ? "" : "not ")); 1317d462a83cSMichael Baum if (config->mprq.enabled && mprq) { 1318d462a83cSMichael Baum if (config->mprq.stride_num_n && 1319d462a83cSMichael Baum (config->mprq.stride_num_n > mprq_max_stride_num_n || 1320d462a83cSMichael Baum config->mprq.stride_num_n < mprq_min_stride_num_n)) { 1321d462a83cSMichael Baum config->mprq.stride_num_n = 13222eb4d010SOphir Munk RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 13232eb4d010SOphir Munk mprq_min_stride_num_n), 13242eb4d010SOphir Munk mprq_max_stride_num_n); 13252eb4d010SOphir Munk DRV_LOG(WARNING, 13262eb4d010SOphir Munk "the number of strides" 13272eb4d010SOphir Munk " for Multi-Packet RQ is out of range," 13282eb4d010SOphir Munk " setting default value (%u)", 1329d462a83cSMichael Baum 1 << config->mprq.stride_num_n); 13302eb4d010SOphir Munk } 1331d462a83cSMichael Baum if (config->mprq.stride_size_n && 1332d462a83cSMichael Baum (config->mprq.stride_size_n > mprq_max_stride_size_n || 1333d462a83cSMichael Baum config->mprq.stride_size_n < mprq_min_stride_size_n)) { 1334d462a83cSMichael Baum config->mprq.stride_size_n = 13352eb4d010SOphir Munk RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N, 13362eb4d010SOphir Munk mprq_min_stride_size_n), 13372eb4d010SOphir Munk mprq_max_stride_size_n); 13382eb4d010SOphir Munk DRV_LOG(WARNING, 13392eb4d010SOphir Munk "the size of a stride" 13402eb4d010SOphir Munk " for Multi-Packet RQ is out of range," 13412eb4d010SOphir Munk " setting default value (%u)", 1342d462a83cSMichael Baum 1 << config->mprq.stride_size_n); 13432eb4d010SOphir Munk } 1344d462a83cSMichael Baum config->mprq.min_stride_size_n = mprq_min_stride_size_n; 1345d462a83cSMichael Baum config->mprq.max_stride_size_n = mprq_max_stride_size_n; 1346d462a83cSMichael Baum } else if (config->mprq.enabled && !mprq) { 13472eb4d010SOphir Munk DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 1348d462a83cSMichael Baum config->mprq.enabled = 0; 13492eb4d010SOphir Munk } 1350d462a83cSMichael Baum if (config->max_dump_files_num == 0) 1351d462a83cSMichael Baum config->max_dump_files_num = 128; 13522eb4d010SOphir Munk eth_dev = rte_eth_dev_allocate(name); 13532eb4d010SOphir Munk if (eth_dev == NULL) { 13542eb4d010SOphir Munk DRV_LOG(ERR, "can not allocate rte ethdev"); 13552eb4d010SOphir Munk err = ENOMEM; 13562eb4d010SOphir Munk goto error; 13572eb4d010SOphir Munk } 13582eb4d010SOphir Munk if (priv->representor) { 13592eb4d010SOphir Munk eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 13602eb4d010SOphir Munk eth_dev->data->representor_id = priv->representor_id; 13612eb4d010SOphir Munk } 13622eb4d010SOphir Munk /* 13632eb4d010SOphir Munk * Store associated network device interface index. This index 13642eb4d010SOphir Munk * is permanent throughout the lifetime of device. So, we may store 13652eb4d010SOphir Munk * the ifindex here and use the cached value further. 13662eb4d010SOphir Munk */ 13672eb4d010SOphir Munk MLX5_ASSERT(spawn->ifindex); 13682eb4d010SOphir Munk priv->if_index = spawn->ifindex; 1369c21e5facSXueming Li if (priv->pf_bond >= 0 && priv->master) { 1370c21e5facSXueming Li /* Get bond interface info */ 1371c21e5facSXueming Li err = mlx5_sysfs_bond_info(priv->if_index, 1372c21e5facSXueming Li &priv->bond_ifindex, 1373c21e5facSXueming Li priv->bond_name); 1374c21e5facSXueming Li if (err) 1375c21e5facSXueming Li DRV_LOG(ERR, "unable to get bond info: %s", 1376c21e5facSXueming Li strerror(rte_errno)); 1377c21e5facSXueming Li else 1378c21e5facSXueming Li DRV_LOG(INFO, "PF device %u, bond device %u(%s)", 1379c21e5facSXueming Li priv->if_index, priv->bond_ifindex, 1380c21e5facSXueming Li priv->bond_name); 1381c21e5facSXueming Li } 13822eb4d010SOphir Munk eth_dev->data->dev_private = priv; 13832eb4d010SOphir Munk priv->dev_data = eth_dev->data; 13842eb4d010SOphir Munk eth_dev->data->mac_addrs = priv->mac; 1385e6818853SXueming Li if (spawn->pf_bond < 0) { 13862eb4d010SOphir Munk eth_dev->device = dpdk_dev; 1387e6818853SXueming Li } else { 1388e6818853SXueming Li /* Use primary bond PCI as device. */ 1389e6818853SXueming Li if (sh->bond_dev == UINT16_MAX) { 1390e6818853SXueming Li sh->bond_dev = eth_dev->data->port_id; 1391e6818853SXueming Li eth_dev->device = dpdk_dev; 1392e6818853SXueming Li } else { 1393e6818853SXueming Li eth_dev->device = rte_eth_devices[sh->bond_dev].device; 1394e6818853SXueming Li } 1395e6818853SXueming Li } 1396f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 13972eb4d010SOphir Munk /* Configure the first MAC address by default. */ 13982eb4d010SOphir Munk if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 13992eb4d010SOphir Munk DRV_LOG(ERR, 14002eb4d010SOphir Munk "port %u cannot get MAC address, is mlx5_en" 14012eb4d010SOphir Munk " loaded? (errno: %s)", 14022eb4d010SOphir Munk eth_dev->data->port_id, strerror(rte_errno)); 14032eb4d010SOphir Munk err = ENODEV; 14042eb4d010SOphir Munk goto error; 14052eb4d010SOphir Munk } 14062eb4d010SOphir Munk DRV_LOG(INFO, 14072eb4d010SOphir Munk "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 14082eb4d010SOphir Munk eth_dev->data->port_id, 14092eb4d010SOphir Munk mac.addr_bytes[0], mac.addr_bytes[1], 14102eb4d010SOphir Munk mac.addr_bytes[2], mac.addr_bytes[3], 14112eb4d010SOphir Munk mac.addr_bytes[4], mac.addr_bytes[5]); 14122eb4d010SOphir Munk #ifdef RTE_LIBRTE_MLX5_DEBUG 14132eb4d010SOphir Munk { 14142eb4d010SOphir Munk char ifname[IF_NAMESIZE]; 14152eb4d010SOphir Munk 14162eb4d010SOphir Munk if (mlx5_get_ifname(eth_dev, &ifname) == 0) 14172eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 14182eb4d010SOphir Munk eth_dev->data->port_id, ifname); 14192eb4d010SOphir Munk else 14202eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u ifname is unknown", 14212eb4d010SOphir Munk eth_dev->data->port_id); 14222eb4d010SOphir Munk } 14232eb4d010SOphir Munk #endif 14242eb4d010SOphir Munk /* Get actual MTU if possible. */ 14252eb4d010SOphir Munk err = mlx5_get_mtu(eth_dev, &priv->mtu); 14262eb4d010SOphir Munk if (err) { 14272eb4d010SOphir Munk err = rte_errno; 14282eb4d010SOphir Munk goto error; 14292eb4d010SOphir Munk } 14302eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 14312eb4d010SOphir Munk priv->mtu); 14322eb4d010SOphir Munk /* Initialize burst functions to prevent crashes before link-up. */ 14332eb4d010SOphir Munk eth_dev->rx_pkt_burst = removed_rx_burst; 14342eb4d010SOphir Munk eth_dev->tx_pkt_burst = removed_tx_burst; 1435042f5c94SOphir Munk eth_dev->dev_ops = &mlx5_os_dev_ops; 1436cbfc6111SFerruh Yigit eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 1437cbfc6111SFerruh Yigit eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 1438cbfc6111SFerruh Yigit eth_dev->rx_queue_count = mlx5_rx_queue_count; 14392eb4d010SOphir Munk /* Register MAC address. */ 14402eb4d010SOphir Munk claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 1441d462a83cSMichael Baum if (config->vf && config->vf_nl_en) 14422eb4d010SOphir Munk mlx5_nl_mac_addr_sync(priv->nl_socket_route, 14432eb4d010SOphir Munk mlx5_ifindex(eth_dev), 14442eb4d010SOphir Munk eth_dev->data->mac_addrs, 14452eb4d010SOphir Munk MLX5_MAX_MAC_ADDRESSES); 14462eb4d010SOphir Munk priv->flows = 0; 14472eb4d010SOphir Munk priv->ctrl_flows = 0; 1448d163fc2dSXueming Li rte_spinlock_init(&priv->flow_list_lock); 14492eb4d010SOphir Munk TAILQ_INIT(&priv->flow_meters); 14502eb4d010SOphir Munk TAILQ_INIT(&priv->flow_meter_profiles); 14512eb4d010SOphir Munk /* Hint libmlx5 to use PMD allocator for data plane resources */ 145236dabceaSMichael Baum mlx5_glue->dv_set_context_attr(sh->ctx, 145336dabceaSMichael Baum MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 145436dabceaSMichael Baum (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ 14552eb4d010SOphir Munk .alloc = &mlx5_alloc_verbs_buf, 14562eb4d010SOphir Munk .free = &mlx5_free_verbs_buf, 145781c3b977SViacheslav Ovsiienko .data = sh, 145836dabceaSMichael Baum })); 14592eb4d010SOphir Munk /* Bring Ethernet device up. */ 14602eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 14612eb4d010SOphir Munk eth_dev->data->port_id); 14622eb4d010SOphir Munk mlx5_set_link_up(eth_dev); 14632eb4d010SOphir Munk /* 14642eb4d010SOphir Munk * Even though the interrupt handler is not installed yet, 14652eb4d010SOphir Munk * interrupts will still trigger on the async_fd from 14662eb4d010SOphir Munk * Verbs context returned by ibv_open_device(). 14672eb4d010SOphir Munk */ 14682eb4d010SOphir Munk mlx5_link_update(eth_dev, 0); 14692eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 1470d462a83cSMichael Baum if (!(config->hca_attr.eswitch_manager && config->dv_flow_en && 14712eb4d010SOphir Munk (switch_info->representor || switch_info->master))) 1472d462a83cSMichael Baum config->dv_esw_en = 0; 14732eb4d010SOphir Munk #else 1474d462a83cSMichael Baum config->dv_esw_en = 0; 14752eb4d010SOphir Munk #endif 14762eb4d010SOphir Munk /* Detect minimal data bytes to inline. */ 1477d462a83cSMichael Baum mlx5_set_min_inline(spawn, config); 14782eb4d010SOphir Munk /* Store device configuration on private structure. */ 1479d462a83cSMichael Baum priv->config = *config; 14802eb4d010SOphir Munk /* Create context for virtual machine VLAN workaround. */ 14812eb4d010SOphir Munk priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); 1482d462a83cSMichael Baum if (config->dv_flow_en) { 14832eb4d010SOphir Munk err = mlx5_alloc_shared_dr(priv); 14842eb4d010SOphir Munk if (err) 14852eb4d010SOphir Munk goto error; 14862eb4d010SOphir Munk } 14877aa9892fSMichael Baum if (config->devx && config->dv_flow_en && config->dest_tir) { 14885eaf882eSMichael Baum priv->obj_ops = devx_obj_ops; 14890c762e81SMichael Baum priv->obj_ops.drop_action_create = 14900c762e81SMichael Baum ibv_obj_ops.drop_action_create; 14910c762e81SMichael Baum priv->obj_ops.drop_action_destroy = 14920c762e81SMichael Baum ibv_obj_ops.drop_action_destroy; 14935d9f3c3fSMichael Baum #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 14945d9f3c3fSMichael Baum priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify; 14955d9f3c3fSMichael Baum #else 14963ec73abeSMatan Azrad if (config->dv_esw_en) 14975d9f3c3fSMichael Baum priv->obj_ops.txq_obj_modify = 14985d9f3c3fSMichael Baum ibv_obj_ops.txq_obj_modify; 14995d9f3c3fSMichael Baum #endif 15003ec73abeSMatan Azrad /* Use specific wrappers for Tx object. */ 15013ec73abeSMatan Azrad priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new; 15023ec73abeSMatan Azrad priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release; 15033ec73abeSMatan Azrad 15045eaf882eSMichael Baum } else { 15055eaf882eSMichael Baum priv->obj_ops = ibv_obj_ops; 15065eaf882eSMichael Baum } 150765b3cd0dSSuanming Mou priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev); 150865b3cd0dSSuanming Mou if (!priv->drop_queue.hrxq) 150965b3cd0dSSuanming Mou goto error; 15102eb4d010SOphir Munk /* Supported Verbs flow priority number detection. */ 15112eb4d010SOphir Munk err = mlx5_flow_discover_priorities(eth_dev); 15122eb4d010SOphir Munk if (err < 0) { 15132eb4d010SOphir Munk err = -err; 15142eb4d010SOphir Munk goto error; 15152eb4d010SOphir Munk } 15162eb4d010SOphir Munk priv->config.flow_prio = err; 15172eb4d010SOphir Munk if (!priv->config.dv_esw_en && 15182eb4d010SOphir Munk priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 15192eb4d010SOphir Munk DRV_LOG(WARNING, "metadata mode %u is not supported " 15202eb4d010SOphir Munk "(no E-Switch)", priv->config.dv_xmeta_en); 15212eb4d010SOphir Munk priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; 15222eb4d010SOphir Munk } 15232eb4d010SOphir Munk mlx5_set_metadata_mask(eth_dev); 15242eb4d010SOphir Munk if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 15252eb4d010SOphir Munk !priv->sh->dv_regc0_mask) { 15262eb4d010SOphir Munk DRV_LOG(ERR, "metadata mode %u is not supported " 15272eb4d010SOphir Munk "(no metadata reg_c[0] is available)", 15282eb4d010SOphir Munk priv->config.dv_xmeta_en); 15292eb4d010SOphir Munk err = ENOTSUP; 15302eb4d010SOphir Munk goto error; 15312eb4d010SOphir Munk } 1532e1592b6cSSuanming Mou mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev, 1533e1592b6cSSuanming Mou mlx5_hrxq_create_cb, 1534e1592b6cSSuanming Mou mlx5_hrxq_match_cb, 1535e1592b6cSSuanming Mou mlx5_hrxq_remove_cb); 15362eb4d010SOphir Munk /* Query availability of metadata reg_c's. */ 15372eb4d010SOphir Munk err = mlx5_flow_discover_mreg_c(eth_dev); 15382eb4d010SOphir Munk if (err < 0) { 15392eb4d010SOphir Munk err = -err; 15402eb4d010SOphir Munk goto error; 15412eb4d010SOphir Munk } 15422eb4d010SOphir Munk if (!mlx5_flow_ext_mreg_supported(eth_dev)) { 15432eb4d010SOphir Munk DRV_LOG(DEBUG, 15442eb4d010SOphir Munk "port %u extensive metadata register is not supported", 15452eb4d010SOphir Munk eth_dev->data->port_id); 15462eb4d010SOphir Munk if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 15472eb4d010SOphir Munk DRV_LOG(ERR, "metadata mode %u is not supported " 15482eb4d010SOphir Munk "(no metadata registers available)", 15492eb4d010SOphir Munk priv->config.dv_xmeta_en); 15502eb4d010SOphir Munk err = ENOTSUP; 15512eb4d010SOphir Munk goto error; 15522eb4d010SOphir Munk } 15532eb4d010SOphir Munk } 15542eb4d010SOphir Munk if (priv->config.dv_flow_en && 15552eb4d010SOphir Munk priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 15562eb4d010SOphir Munk mlx5_flow_ext_mreg_supported(eth_dev) && 15572eb4d010SOphir Munk priv->sh->dv_regc0_mask) { 15582eb4d010SOphir Munk priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, 1559e69a5922SXueming Li MLX5_FLOW_MREG_HTABLE_SZ, 1560e69a5922SXueming Li 0, 0, 1561f7f73ac1SXueming Li flow_dv_mreg_create_cb, 1562f5b0aed2SSuanming Mou flow_dv_mreg_match_cb, 1563f7f73ac1SXueming Li flow_dv_mreg_remove_cb); 15642eb4d010SOphir Munk if (!priv->mreg_cp_tbl) { 15652eb4d010SOphir Munk err = ENOMEM; 15662eb4d010SOphir Munk goto error; 15672eb4d010SOphir Munk } 1568f7f73ac1SXueming Li priv->mreg_cp_tbl->ctx = eth_dev; 15692eb4d010SOphir Munk } 1570cc608e4dSSuanming Mou rte_spinlock_init(&priv->shared_act_sl); 1571994829e6SSuanming Mou mlx5_flow_counter_mode_config(eth_dev); 15729fbe97f0SXueming Li if (priv->config.dv_flow_en) 15739fbe97f0SXueming Li eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 15742eb4d010SOphir Munk return eth_dev; 15752eb4d010SOphir Munk error: 15762eb4d010SOphir Munk if (priv) { 15772eb4d010SOphir Munk if (priv->mreg_cp_tbl) 1578e69a5922SXueming Li mlx5_hlist_destroy(priv->mreg_cp_tbl); 15792eb4d010SOphir Munk if (priv->sh) 15802eb4d010SOphir Munk mlx5_os_free_shared_dr(priv); 15812eb4d010SOphir Munk if (priv->nl_socket_route >= 0) 15822eb4d010SOphir Munk close(priv->nl_socket_route); 15832eb4d010SOphir Munk if (priv->nl_socket_rdma >= 0) 15842eb4d010SOphir Munk close(priv->nl_socket_rdma); 15852eb4d010SOphir Munk if (priv->vmwa_context) 15862eb4d010SOphir Munk mlx5_vlan_vmwa_exit(priv->vmwa_context); 158765b3cd0dSSuanming Mou if (eth_dev && priv->drop_queue.hrxq) 158865b3cd0dSSuanming Mou mlx5_drop_action_destroy(eth_dev); 15892eb4d010SOphir Munk if (own_domain_id) 15902eb4d010SOphir Munk claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 1591e1592b6cSSuanming Mou mlx5_cache_list_destroy(&priv->hrxqs); 15922175c4dcSSuanming Mou mlx5_free(priv); 15932eb4d010SOphir Munk if (eth_dev != NULL) 15942eb4d010SOphir Munk eth_dev->data->dev_private = NULL; 15952eb4d010SOphir Munk } 15962eb4d010SOphir Munk if (eth_dev != NULL) { 15972eb4d010SOphir Munk /* mac_addrs must not be freed alone because part of 15982eb4d010SOphir Munk * dev_private 15992eb4d010SOphir Munk **/ 16002eb4d010SOphir Munk eth_dev->data->mac_addrs = NULL; 16012eb4d010SOphir Munk rte_eth_dev_release_port(eth_dev); 16022eb4d010SOphir Munk } 16032eb4d010SOphir Munk if (sh) 160491389890SOphir Munk mlx5_free_shared_dev_ctx(sh); 16052eb4d010SOphir Munk MLX5_ASSERT(err > 0); 16062eb4d010SOphir Munk rte_errno = err; 16072eb4d010SOphir Munk return NULL; 16082eb4d010SOphir Munk } 16092eb4d010SOphir Munk 16102eb4d010SOphir Munk /** 16112eb4d010SOphir Munk * Comparison callback to sort device data. 16122eb4d010SOphir Munk * 16132eb4d010SOphir Munk * This is meant to be used with qsort(). 16142eb4d010SOphir Munk * 16152eb4d010SOphir Munk * @param a[in] 16162eb4d010SOphir Munk * Pointer to pointer to first data object. 16172eb4d010SOphir Munk * @param b[in] 16182eb4d010SOphir Munk * Pointer to pointer to second data object. 16192eb4d010SOphir Munk * 16202eb4d010SOphir Munk * @return 16212eb4d010SOphir Munk * 0 if both objects are equal, less than 0 if the first argument is less 16222eb4d010SOphir Munk * than the second, greater than 0 otherwise. 16232eb4d010SOphir Munk */ 16242eb4d010SOphir Munk static int 16252eb4d010SOphir Munk mlx5_dev_spawn_data_cmp(const void *a, const void *b) 16262eb4d010SOphir Munk { 16272eb4d010SOphir Munk const struct mlx5_switch_info *si_a = 16282eb4d010SOphir Munk &((const struct mlx5_dev_spawn_data *)a)->info; 16292eb4d010SOphir Munk const struct mlx5_switch_info *si_b = 16302eb4d010SOphir Munk &((const struct mlx5_dev_spawn_data *)b)->info; 16312eb4d010SOphir Munk int ret; 16322eb4d010SOphir Munk 16332eb4d010SOphir Munk /* Master device first. */ 16342eb4d010SOphir Munk ret = si_b->master - si_a->master; 16352eb4d010SOphir Munk if (ret) 16362eb4d010SOphir Munk return ret; 16372eb4d010SOphir Munk /* Then representor devices. */ 16382eb4d010SOphir Munk ret = si_b->representor - si_a->representor; 16392eb4d010SOphir Munk if (ret) 16402eb4d010SOphir Munk return ret; 16412eb4d010SOphir Munk /* Unidentified devices come last in no specific order. */ 16422eb4d010SOphir Munk if (!si_a->representor) 16432eb4d010SOphir Munk return 0; 16442eb4d010SOphir Munk /* Order representors by name. */ 16452eb4d010SOphir Munk return si_a->port_name - si_b->port_name; 16462eb4d010SOphir Munk } 16472eb4d010SOphir Munk 16482eb4d010SOphir Munk /** 16492eb4d010SOphir Munk * Match PCI information for possible slaves of bonding device. 16502eb4d010SOphir Munk * 16512eb4d010SOphir Munk * @param[in] ibv_dev 16522eb4d010SOphir Munk * Pointer to Infiniband device structure. 16532eb4d010SOphir Munk * @param[in] pci_dev 16542eb4d010SOphir Munk * Pointer to PCI device structure to match PCI address. 16552eb4d010SOphir Munk * @param[in] nl_rdma 16562eb4d010SOphir Munk * Netlink RDMA group socket handle. 16572eb4d010SOphir Munk * 16582eb4d010SOphir Munk * @return 16592eb4d010SOphir Munk * negative value if no bonding device found, otherwise 16602eb4d010SOphir Munk * positive index of slave PF in bonding. 16612eb4d010SOphir Munk */ 16622eb4d010SOphir Munk static int 16632eb4d010SOphir Munk mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev, 16642eb4d010SOphir Munk const struct rte_pci_device *pci_dev, 16652eb4d010SOphir Munk int nl_rdma) 16662eb4d010SOphir Munk { 16672eb4d010SOphir Munk char ifname[IF_NAMESIZE + 1]; 16682eb4d010SOphir Munk unsigned int ifindex; 16692eb4d010SOphir Munk unsigned int np, i; 16702eb4d010SOphir Munk FILE *file = NULL; 16712eb4d010SOphir Munk int pf = -1; 16722eb4d010SOphir Munk 16732eb4d010SOphir Munk /* 16742eb4d010SOphir Munk * Try to get master device name. If something goes 16752eb4d010SOphir Munk * wrong suppose the lack of kernel support and no 16762eb4d010SOphir Munk * bonding devices. 16772eb4d010SOphir Munk */ 16782eb4d010SOphir Munk if (nl_rdma < 0) 16792eb4d010SOphir Munk return -1; 16802eb4d010SOphir Munk if (!strstr(ibv_dev->name, "bond")) 16812eb4d010SOphir Munk return -1; 16822eb4d010SOphir Munk np = mlx5_nl_portnum(nl_rdma, ibv_dev->name); 16832eb4d010SOphir Munk if (!np) 16842eb4d010SOphir Munk return -1; 16852eb4d010SOphir Munk /* 16862eb4d010SOphir Munk * The Master device might not be on the predefined 16872eb4d010SOphir Munk * port (not on port index 1, it is not garanted), 16882eb4d010SOphir Munk * we have to scan all Infiniband device port and 16892eb4d010SOphir Munk * find master. 16902eb4d010SOphir Munk */ 16912eb4d010SOphir Munk for (i = 1; i <= np; ++i) { 16922eb4d010SOphir Munk /* Check whether Infiniband port is populated. */ 16932eb4d010SOphir Munk ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i); 16942eb4d010SOphir Munk if (!ifindex) 16952eb4d010SOphir Munk continue; 16962eb4d010SOphir Munk if (!if_indextoname(ifindex, ifname)) 16972eb4d010SOphir Munk continue; 16982eb4d010SOphir Munk /* Try to read bonding slave names from sysfs. */ 16992eb4d010SOphir Munk MKSTR(slaves, 17002eb4d010SOphir Munk "/sys/class/net/%s/master/bonding/slaves", ifname); 17012eb4d010SOphir Munk file = fopen(slaves, "r"); 17022eb4d010SOphir Munk if (file) 17032eb4d010SOphir Munk break; 17042eb4d010SOphir Munk } 17052eb4d010SOphir Munk if (!file) 17062eb4d010SOphir Munk return -1; 17072eb4d010SOphir Munk /* Use safe format to check maximal buffer length. */ 17082eb4d010SOphir Munk MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE); 17092eb4d010SOphir Munk while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) { 17102eb4d010SOphir Munk char tmp_str[IF_NAMESIZE + 32]; 17112eb4d010SOphir Munk struct rte_pci_addr pci_addr; 17122eb4d010SOphir Munk struct mlx5_switch_info info; 17132eb4d010SOphir Munk 17142eb4d010SOphir Munk /* Process slave interface names in the loop. */ 17152eb4d010SOphir Munk snprintf(tmp_str, sizeof(tmp_str), 17162eb4d010SOphir Munk "/sys/class/net/%s", ifname); 17172eb4d010SOphir Munk if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) { 17182eb4d010SOphir Munk DRV_LOG(WARNING, "can not get PCI address" 17192eb4d010SOphir Munk " for netdev \"%s\"", ifname); 17202eb4d010SOphir Munk continue; 17212eb4d010SOphir Munk } 17222eb4d010SOphir Munk if (pci_dev->addr.domain != pci_addr.domain || 17232eb4d010SOphir Munk pci_dev->addr.bus != pci_addr.bus || 17242eb4d010SOphir Munk pci_dev->addr.devid != pci_addr.devid || 17252eb4d010SOphir Munk pci_dev->addr.function != pci_addr.function) 17262eb4d010SOphir Munk continue; 17272eb4d010SOphir Munk /* Slave interface PCI address match found. */ 17282eb4d010SOphir Munk fclose(file); 17292eb4d010SOphir Munk snprintf(tmp_str, sizeof(tmp_str), 17302eb4d010SOphir Munk "/sys/class/net/%s/phys_port_name", ifname); 17312eb4d010SOphir Munk file = fopen(tmp_str, "rb"); 17322eb4d010SOphir Munk if (!file) 17332eb4d010SOphir Munk break; 17342eb4d010SOphir Munk info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET; 17352eb4d010SOphir Munk if (fscanf(file, "%32s", tmp_str) == 1) 17362eb4d010SOphir Munk mlx5_translate_port_name(tmp_str, &info); 17372eb4d010SOphir Munk if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY || 17382eb4d010SOphir Munk info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) 17392eb4d010SOphir Munk pf = info.port_name; 17402eb4d010SOphir Munk break; 17412eb4d010SOphir Munk } 17422eb4d010SOphir Munk if (file) 17432eb4d010SOphir Munk fclose(file); 17442eb4d010SOphir Munk return pf; 17452eb4d010SOphir Munk } 17462eb4d010SOphir Munk 17472eb4d010SOphir Munk /** 17482eb4d010SOphir Munk * DPDK callback to register a PCI device. 17492eb4d010SOphir Munk * 17502eb4d010SOphir Munk * This function spawns Ethernet devices out of a given PCI device. 17512eb4d010SOphir Munk * 17522eb4d010SOphir Munk * @param[in] pci_drv 17532eb4d010SOphir Munk * PCI driver structure (mlx5_driver). 17542eb4d010SOphir Munk * @param[in] pci_dev 17552eb4d010SOphir Munk * PCI device information. 17562eb4d010SOphir Munk * 17572eb4d010SOphir Munk * @return 17582eb4d010SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 17592eb4d010SOphir Munk */ 17602eb4d010SOphir Munk int 17612eb4d010SOphir Munk mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 17622eb4d010SOphir Munk struct rte_pci_device *pci_dev) 17632eb4d010SOphir Munk { 17642eb4d010SOphir Munk struct ibv_device **ibv_list; 17652eb4d010SOphir Munk /* 17662eb4d010SOphir Munk * Number of found IB Devices matching with requested PCI BDF. 17672eb4d010SOphir Munk * nd != 1 means there are multiple IB devices over the same 17682eb4d010SOphir Munk * PCI device and we have representors and master. 17692eb4d010SOphir Munk */ 17702eb4d010SOphir Munk unsigned int nd = 0; 17712eb4d010SOphir Munk /* 17722eb4d010SOphir Munk * Number of found IB device Ports. nd = 1 and np = 1..n means 17732eb4d010SOphir Munk * we have the single multiport IB device, and there may be 17742eb4d010SOphir Munk * representors attached to some of found ports. 17752eb4d010SOphir Munk */ 17762eb4d010SOphir Munk unsigned int np = 0; 17772eb4d010SOphir Munk /* 17782eb4d010SOphir Munk * Number of DPDK ethernet devices to Spawn - either over 17792eb4d010SOphir Munk * multiple IB devices or multiple ports of single IB device. 17802eb4d010SOphir Munk * Actually this is the number of iterations to spawn. 17812eb4d010SOphir Munk */ 17822eb4d010SOphir Munk unsigned int ns = 0; 17832eb4d010SOphir Munk /* 17842eb4d010SOphir Munk * Bonding device 17852eb4d010SOphir Munk * < 0 - no bonding device (single one) 17862eb4d010SOphir Munk * >= 0 - bonding device (value is slave PF index) 17872eb4d010SOphir Munk */ 17882eb4d010SOphir Munk int bd = -1; 17892eb4d010SOphir Munk struct mlx5_dev_spawn_data *list = NULL; 17902eb4d010SOphir Munk struct mlx5_dev_config dev_config; 1791d462a83cSMichael Baum unsigned int dev_config_vf; 17922eb4d010SOphir Munk int ret; 17932eb4d010SOphir Munk 17942eb4d010SOphir Munk if (rte_eal_process_type() == RTE_PROC_PRIMARY) 17952eb4d010SOphir Munk mlx5_pmd_socket_init(); 17962eb4d010SOphir Munk ret = mlx5_init_once(); 17972eb4d010SOphir Munk if (ret) { 17982eb4d010SOphir Munk DRV_LOG(ERR, "unable to init PMD global data: %s", 17992eb4d010SOphir Munk strerror(rte_errno)); 18002eb4d010SOphir Munk return -rte_errno; 18012eb4d010SOphir Munk } 18022eb4d010SOphir Munk errno = 0; 18032eb4d010SOphir Munk ibv_list = mlx5_glue->get_device_list(&ret); 18042eb4d010SOphir Munk if (!ibv_list) { 18052eb4d010SOphir Munk rte_errno = errno ? errno : ENOSYS; 18062eb4d010SOphir Munk DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 18072eb4d010SOphir Munk return -rte_errno; 18082eb4d010SOphir Munk } 18092eb4d010SOphir Munk /* 18102eb4d010SOphir Munk * First scan the list of all Infiniband devices to find 18112eb4d010SOphir Munk * matching ones, gathering into the list. 18122eb4d010SOphir Munk */ 18132eb4d010SOphir Munk struct ibv_device *ibv_match[ret + 1]; 18142eb4d010SOphir Munk int nl_route = mlx5_nl_init(NETLINK_ROUTE); 18152eb4d010SOphir Munk int nl_rdma = mlx5_nl_init(NETLINK_RDMA); 18162eb4d010SOphir Munk unsigned int i; 18172eb4d010SOphir Munk 18182eb4d010SOphir Munk while (ret-- > 0) { 18192eb4d010SOphir Munk struct rte_pci_addr pci_addr; 18202eb4d010SOphir Munk 18212eb4d010SOphir Munk DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 18222eb4d010SOphir Munk bd = mlx5_device_bond_pci_match 18232eb4d010SOphir Munk (ibv_list[ret], pci_dev, nl_rdma); 18242eb4d010SOphir Munk if (bd >= 0) { 18252eb4d010SOphir Munk /* 18262eb4d010SOphir Munk * Bonding device detected. Only one match is allowed, 18272eb4d010SOphir Munk * the bonding is supported over multi-port IB device, 18282eb4d010SOphir Munk * there should be no matches on representor PCI 18292eb4d010SOphir Munk * functions or non VF LAG bonding devices with 18302eb4d010SOphir Munk * specified address. 18312eb4d010SOphir Munk */ 18322eb4d010SOphir Munk if (nd) { 18332eb4d010SOphir Munk DRV_LOG(ERR, 18342eb4d010SOphir Munk "multiple PCI match on bonding device" 18352eb4d010SOphir Munk "\"%s\" found", ibv_list[ret]->name); 18362eb4d010SOphir Munk rte_errno = ENOENT; 18372eb4d010SOphir Munk ret = -rte_errno; 18382eb4d010SOphir Munk goto exit; 18392eb4d010SOphir Munk } 18402eb4d010SOphir Munk DRV_LOG(INFO, "PCI information matches for" 18412eb4d010SOphir Munk " slave %d bonding device \"%s\"", 18422eb4d010SOphir Munk bd, ibv_list[ret]->name); 18432eb4d010SOphir Munk ibv_match[nd++] = ibv_list[ret]; 18442eb4d010SOphir Munk break; 18452eb4d010SOphir Munk } 18462eb4d010SOphir Munk if (mlx5_dev_to_pci_addr 18472eb4d010SOphir Munk (ibv_list[ret]->ibdev_path, &pci_addr)) 18482eb4d010SOphir Munk continue; 18492eb4d010SOphir Munk if (pci_dev->addr.domain != pci_addr.domain || 18502eb4d010SOphir Munk pci_dev->addr.bus != pci_addr.bus || 18512eb4d010SOphir Munk pci_dev->addr.devid != pci_addr.devid || 18522eb4d010SOphir Munk pci_dev->addr.function != pci_addr.function) 18532eb4d010SOphir Munk continue; 18542eb4d010SOphir Munk DRV_LOG(INFO, "PCI information matches for device \"%s\"", 18552eb4d010SOphir Munk ibv_list[ret]->name); 18562eb4d010SOphir Munk ibv_match[nd++] = ibv_list[ret]; 18572eb4d010SOphir Munk } 18582eb4d010SOphir Munk ibv_match[nd] = NULL; 18592eb4d010SOphir Munk if (!nd) { 18602eb4d010SOphir Munk /* No device matches, just complain and bail out. */ 18612eb4d010SOphir Munk DRV_LOG(WARNING, 18622eb4d010SOphir Munk "no Verbs device matches PCI device " PCI_PRI_FMT "," 18632eb4d010SOphir Munk " are kernel drivers loaded?", 18642eb4d010SOphir Munk pci_dev->addr.domain, pci_dev->addr.bus, 18652eb4d010SOphir Munk pci_dev->addr.devid, pci_dev->addr.function); 18662eb4d010SOphir Munk rte_errno = ENOENT; 18672eb4d010SOphir Munk ret = -rte_errno; 18682eb4d010SOphir Munk goto exit; 18692eb4d010SOphir Munk } 18702eb4d010SOphir Munk if (nd == 1) { 18712eb4d010SOphir Munk /* 18722eb4d010SOphir Munk * Found single matching device may have multiple ports. 18732eb4d010SOphir Munk * Each port may be representor, we have to check the port 18742eb4d010SOphir Munk * number and check the representors existence. 18752eb4d010SOphir Munk */ 18762eb4d010SOphir Munk if (nl_rdma >= 0) 18772eb4d010SOphir Munk np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 18782eb4d010SOphir Munk if (!np) 18792eb4d010SOphir Munk DRV_LOG(WARNING, "can not get IB device \"%s\"" 18802eb4d010SOphir Munk " ports number", ibv_match[0]->name); 18812eb4d010SOphir Munk if (bd >= 0 && !np) { 18822eb4d010SOphir Munk DRV_LOG(ERR, "can not get ports" 18832eb4d010SOphir Munk " for bonding device"); 18842eb4d010SOphir Munk rte_errno = ENOENT; 18852eb4d010SOphir Munk ret = -rte_errno; 18862eb4d010SOphir Munk goto exit; 18872eb4d010SOphir Munk } 18882eb4d010SOphir Munk } 18892eb4d010SOphir Munk #ifndef HAVE_MLX5DV_DR_DEVX_PORT 18902eb4d010SOphir Munk if (bd >= 0) { 18912eb4d010SOphir Munk /* 18922eb4d010SOphir Munk * This may happen if there is VF LAG kernel support and 18932eb4d010SOphir Munk * application is compiled with older rdma_core library. 18942eb4d010SOphir Munk */ 18952eb4d010SOphir Munk DRV_LOG(ERR, 18962eb4d010SOphir Munk "No kernel/verbs support for VF LAG bonding found."); 18972eb4d010SOphir Munk rte_errno = ENOTSUP; 18982eb4d010SOphir Munk ret = -rte_errno; 18992eb4d010SOphir Munk goto exit; 19002eb4d010SOphir Munk } 19012eb4d010SOphir Munk #endif 19022eb4d010SOphir Munk /* 19032eb4d010SOphir Munk * Now we can determine the maximal 19042eb4d010SOphir Munk * amount of devices to be spawned. 19052eb4d010SOphir Munk */ 19062175c4dcSSuanming Mou list = mlx5_malloc(MLX5_MEM_ZERO, 19072eb4d010SOphir Munk sizeof(struct mlx5_dev_spawn_data) * 19082eb4d010SOphir Munk (np ? np : nd), 19092175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 19102eb4d010SOphir Munk if (!list) { 19112eb4d010SOphir Munk DRV_LOG(ERR, "spawn data array allocation failure"); 19122eb4d010SOphir Munk rte_errno = ENOMEM; 19132eb4d010SOphir Munk ret = -rte_errno; 19142eb4d010SOphir Munk goto exit; 19152eb4d010SOphir Munk } 19162eb4d010SOphir Munk if (bd >= 0 || np > 1) { 19172eb4d010SOphir Munk /* 19182eb4d010SOphir Munk * Single IB device with multiple ports found, 19192eb4d010SOphir Munk * it may be E-Switch master device and representors. 19202eb4d010SOphir Munk * We have to perform identification through the ports. 19212eb4d010SOphir Munk */ 19222eb4d010SOphir Munk MLX5_ASSERT(nl_rdma >= 0); 19232eb4d010SOphir Munk MLX5_ASSERT(ns == 0); 19242eb4d010SOphir Munk MLX5_ASSERT(nd == 1); 19252eb4d010SOphir Munk MLX5_ASSERT(np); 19262eb4d010SOphir Munk for (i = 1; i <= np; ++i) { 19272eb4d010SOphir Munk list[ns].max_port = np; 1928834a9019SOphir Munk list[ns].phys_port = i; 1929834a9019SOphir Munk list[ns].phys_dev = ibv_match[0]; 19302eb4d010SOphir Munk list[ns].eth_dev = NULL; 19312eb4d010SOphir Munk list[ns].pci_dev = pci_dev; 19322eb4d010SOphir Munk list[ns].pf_bond = bd; 19332eb4d010SOphir Munk list[ns].ifindex = mlx5_nl_ifindex 1934834a9019SOphir Munk (nl_rdma, 1935834a9019SOphir Munk mlx5_os_get_dev_device_name 1936834a9019SOphir Munk (list[ns].phys_dev), i); 19372eb4d010SOphir Munk if (!list[ns].ifindex) { 19382eb4d010SOphir Munk /* 19392eb4d010SOphir Munk * No network interface index found for the 19402eb4d010SOphir Munk * specified port, it means there is no 19412eb4d010SOphir Munk * representor on this port. It's OK, 19422eb4d010SOphir Munk * there can be disabled ports, for example 19432eb4d010SOphir Munk * if sriov_numvfs < sriov_totalvfs. 19442eb4d010SOphir Munk */ 19452eb4d010SOphir Munk continue; 19462eb4d010SOphir Munk } 19472eb4d010SOphir Munk ret = -1; 19482eb4d010SOphir Munk if (nl_route >= 0) 19492eb4d010SOphir Munk ret = mlx5_nl_switch_info 19502eb4d010SOphir Munk (nl_route, 19512eb4d010SOphir Munk list[ns].ifindex, 19522eb4d010SOphir Munk &list[ns].info); 19532eb4d010SOphir Munk if (ret || (!list[ns].info.representor && 19542eb4d010SOphir Munk !list[ns].info.master)) { 19552eb4d010SOphir Munk /* 19562eb4d010SOphir Munk * We failed to recognize representors with 19572eb4d010SOphir Munk * Netlink, let's try to perform the task 19582eb4d010SOphir Munk * with sysfs. 19592eb4d010SOphir Munk */ 19602eb4d010SOphir Munk ret = mlx5_sysfs_switch_info 19612eb4d010SOphir Munk (list[ns].ifindex, 19622eb4d010SOphir Munk &list[ns].info); 19632eb4d010SOphir Munk } 19642a87415cSMichael Baum #ifdef HAVE_MLX5DV_DR_DEVX_PORT 19652eb4d010SOphir Munk if (!ret && bd >= 0) { 19662eb4d010SOphir Munk switch (list[ns].info.name_type) { 19672eb4d010SOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 19682eb4d010SOphir Munk if (list[ns].info.port_name == bd) 19692eb4d010SOphir Munk ns++; 19702eb4d010SOphir Munk break; 1971420bbdaeSViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: 1972420bbdaeSViacheslav Ovsiienko /* Fallthrough */ 19732eb4d010SOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 19742eb4d010SOphir Munk if (list[ns].info.pf_num == bd) 19752eb4d010SOphir Munk ns++; 19762eb4d010SOphir Munk break; 19772eb4d010SOphir Munk default: 19782eb4d010SOphir Munk break; 19792eb4d010SOphir Munk } 19802eb4d010SOphir Munk continue; 19812eb4d010SOphir Munk } 19822a87415cSMichael Baum #endif 19832eb4d010SOphir Munk if (!ret && (list[ns].info.representor ^ 19842eb4d010SOphir Munk list[ns].info.master)) 19852eb4d010SOphir Munk ns++; 19862eb4d010SOphir Munk } 19872eb4d010SOphir Munk if (!ns) { 19882eb4d010SOphir Munk DRV_LOG(ERR, 19892eb4d010SOphir Munk "unable to recognize master/representors" 19902eb4d010SOphir Munk " on the IB device with multiple ports"); 19912eb4d010SOphir Munk rte_errno = ENOENT; 19922eb4d010SOphir Munk ret = -rte_errno; 19932eb4d010SOphir Munk goto exit; 19942eb4d010SOphir Munk } 19952eb4d010SOphir Munk } else { 19962eb4d010SOphir Munk /* 19972eb4d010SOphir Munk * The existence of several matching entries (nd > 1) means 19982eb4d010SOphir Munk * port representors have been instantiated. No existing Verbs 19992eb4d010SOphir Munk * call nor sysfs entries can tell them apart, this can only 20002eb4d010SOphir Munk * be done through Netlink calls assuming kernel drivers are 20012eb4d010SOphir Munk * recent enough to support them. 20022eb4d010SOphir Munk * 20032eb4d010SOphir Munk * In the event of identification failure through Netlink, 20042eb4d010SOphir Munk * try again through sysfs, then: 20052eb4d010SOphir Munk * 20062eb4d010SOphir Munk * 1. A single IB device matches (nd == 1) with single 20072eb4d010SOphir Munk * port (np=0/1) and is not a representor, assume 20082eb4d010SOphir Munk * no switch support. 20092eb4d010SOphir Munk * 20102eb4d010SOphir Munk * 2. Otherwise no safe assumptions can be made; 20112eb4d010SOphir Munk * complain louder and bail out. 20122eb4d010SOphir Munk */ 20132eb4d010SOphir Munk for (i = 0; i != nd; ++i) { 20142eb4d010SOphir Munk memset(&list[ns].info, 0, sizeof(list[ns].info)); 20152eb4d010SOphir Munk list[ns].max_port = 1; 2016834a9019SOphir Munk list[ns].phys_port = 1; 2017834a9019SOphir Munk list[ns].phys_dev = ibv_match[i]; 20182eb4d010SOphir Munk list[ns].eth_dev = NULL; 20192eb4d010SOphir Munk list[ns].pci_dev = pci_dev; 20202eb4d010SOphir Munk list[ns].pf_bond = -1; 20212eb4d010SOphir Munk list[ns].ifindex = 0; 20222eb4d010SOphir Munk if (nl_rdma >= 0) 20232eb4d010SOphir Munk list[ns].ifindex = mlx5_nl_ifindex 2024834a9019SOphir Munk (nl_rdma, 2025834a9019SOphir Munk mlx5_os_get_dev_device_name 2026834a9019SOphir Munk (list[ns].phys_dev), 1); 20272eb4d010SOphir Munk if (!list[ns].ifindex) { 20282eb4d010SOphir Munk char ifname[IF_NAMESIZE]; 20292eb4d010SOphir Munk 20302eb4d010SOphir Munk /* 20312eb4d010SOphir Munk * Netlink failed, it may happen with old 20322eb4d010SOphir Munk * ib_core kernel driver (before 4.16). 20332eb4d010SOphir Munk * We can assume there is old driver because 20342eb4d010SOphir Munk * here we are processing single ports IB 20352eb4d010SOphir Munk * devices. Let's try sysfs to retrieve 20362eb4d010SOphir Munk * the ifindex. The method works for 20372eb4d010SOphir Munk * master device only. 20382eb4d010SOphir Munk */ 20392eb4d010SOphir Munk if (nd > 1) { 20402eb4d010SOphir Munk /* 20412eb4d010SOphir Munk * Multiple devices found, assume 20422eb4d010SOphir Munk * representors, can not distinguish 20432eb4d010SOphir Munk * master/representor and retrieve 20442eb4d010SOphir Munk * ifindex via sysfs. 20452eb4d010SOphir Munk */ 20462eb4d010SOphir Munk continue; 20472eb4d010SOphir Munk } 2048aec086c9SMatan Azrad ret = mlx5_get_ifname_sysfs 2049aec086c9SMatan Azrad (ibv_match[i]->ibdev_path, ifname); 20502eb4d010SOphir Munk if (!ret) 20512eb4d010SOphir Munk list[ns].ifindex = 20522eb4d010SOphir Munk if_nametoindex(ifname); 20532eb4d010SOphir Munk if (!list[ns].ifindex) { 20542eb4d010SOphir Munk /* 20552eb4d010SOphir Munk * No network interface index found 20562eb4d010SOphir Munk * for the specified device, it means 20572eb4d010SOphir Munk * there it is neither representor 20582eb4d010SOphir Munk * nor master. 20592eb4d010SOphir Munk */ 20602eb4d010SOphir Munk continue; 20612eb4d010SOphir Munk } 20622eb4d010SOphir Munk } 20632eb4d010SOphir Munk ret = -1; 20642eb4d010SOphir Munk if (nl_route >= 0) 20652eb4d010SOphir Munk ret = mlx5_nl_switch_info 20662eb4d010SOphir Munk (nl_route, 20672eb4d010SOphir Munk list[ns].ifindex, 20682eb4d010SOphir Munk &list[ns].info); 20692eb4d010SOphir Munk if (ret || (!list[ns].info.representor && 20702eb4d010SOphir Munk !list[ns].info.master)) { 20712eb4d010SOphir Munk /* 20722eb4d010SOphir Munk * We failed to recognize representors with 20732eb4d010SOphir Munk * Netlink, let's try to perform the task 20742eb4d010SOphir Munk * with sysfs. 20752eb4d010SOphir Munk */ 20762eb4d010SOphir Munk ret = mlx5_sysfs_switch_info 20772eb4d010SOphir Munk (list[ns].ifindex, 20782eb4d010SOphir Munk &list[ns].info); 20792eb4d010SOphir Munk } 20802eb4d010SOphir Munk if (!ret && (list[ns].info.representor ^ 20812eb4d010SOphir Munk list[ns].info.master)) { 20822eb4d010SOphir Munk ns++; 20832eb4d010SOphir Munk } else if ((nd == 1) && 20842eb4d010SOphir Munk !list[ns].info.representor && 20852eb4d010SOphir Munk !list[ns].info.master) { 20862eb4d010SOphir Munk /* 20872eb4d010SOphir Munk * Single IB device with 20882eb4d010SOphir Munk * one physical port and 20892eb4d010SOphir Munk * attached network device. 20902eb4d010SOphir Munk * May be SRIOV is not enabled 20912eb4d010SOphir Munk * or there is no representors. 20922eb4d010SOphir Munk */ 20932eb4d010SOphir Munk DRV_LOG(INFO, "no E-Switch support detected"); 20942eb4d010SOphir Munk ns++; 20952eb4d010SOphir Munk break; 20962eb4d010SOphir Munk } 20972eb4d010SOphir Munk } 20982eb4d010SOphir Munk if (!ns) { 20992eb4d010SOphir Munk DRV_LOG(ERR, 21002eb4d010SOphir Munk "unable to recognize master/representors" 21012eb4d010SOphir Munk " on the multiple IB devices"); 21022eb4d010SOphir Munk rte_errno = ENOENT; 21032eb4d010SOphir Munk ret = -rte_errno; 21042eb4d010SOphir Munk goto exit; 21052eb4d010SOphir Munk } 21062eb4d010SOphir Munk } 21072eb4d010SOphir Munk MLX5_ASSERT(ns); 21082eb4d010SOphir Munk /* 21092eb4d010SOphir Munk * Sort list to probe devices in natural order for users convenience 21102eb4d010SOphir Munk * (i.e. master first, then representors from lowest to highest ID). 21112eb4d010SOphir Munk */ 21122eb4d010SOphir Munk qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 21132eb4d010SOphir Munk /* Device specific configuration. */ 21142eb4d010SOphir Munk switch (pci_dev->id.device_id) { 21152eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 21162eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 21172eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 21182eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 21192eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 21202eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 21213ea12cadSRaslan Darawsheh case PCI_DEVICE_ID_MELLANOX_CONNECTXVF: 2122d462a83cSMichael Baum dev_config_vf = 1; 21232eb4d010SOphir Munk break; 21242eb4d010SOphir Munk default: 2125d462a83cSMichael Baum dev_config_vf = 0; 21262eb4d010SOphir Munk break; 21272eb4d010SOphir Munk } 21282eb4d010SOphir Munk for (i = 0; i != ns; ++i) { 21292eb4d010SOphir Munk uint32_t restore; 21302eb4d010SOphir Munk 2131d462a83cSMichael Baum /* Default configuration. */ 2132d462a83cSMichael Baum memset(&dev_config, 0, sizeof(struct mlx5_dev_config)); 2133d462a83cSMichael Baum dev_config.vf = dev_config_vf; 2134d462a83cSMichael Baum dev_config.mps = MLX5_ARG_UNSET; 2135d462a83cSMichael Baum dev_config.dbnc = MLX5_ARG_UNSET; 2136d462a83cSMichael Baum dev_config.rx_vec_en = 1; 2137d462a83cSMichael Baum dev_config.txq_inline_max = MLX5_ARG_UNSET; 2138d462a83cSMichael Baum dev_config.txq_inline_min = MLX5_ARG_UNSET; 2139d462a83cSMichael Baum dev_config.txq_inline_mpw = MLX5_ARG_UNSET; 2140d462a83cSMichael Baum dev_config.txqs_inline = MLX5_ARG_UNSET; 2141d462a83cSMichael Baum dev_config.vf_nl_en = 1; 2142d462a83cSMichael Baum dev_config.mr_ext_memseg_en = 1; 2143d462a83cSMichael Baum dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; 2144d462a83cSMichael Baum dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; 2145d462a83cSMichael Baum dev_config.dv_esw_en = 1; 2146d462a83cSMichael Baum dev_config.dv_flow_en = 1; 2147d462a83cSMichael Baum dev_config.decap_en = 1; 2148d462a83cSMichael Baum dev_config.log_hp_size = MLX5_ARG_UNSET; 21492eb4d010SOphir Munk list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 21502eb4d010SOphir Munk &list[i], 2151d462a83cSMichael Baum &dev_config); 21522eb4d010SOphir Munk if (!list[i].eth_dev) { 21532eb4d010SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 21542eb4d010SOphir Munk break; 21552eb4d010SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 21562eb4d010SOphir Munk continue; 21572eb4d010SOphir Munk } 21582eb4d010SOphir Munk restore = list[i].eth_dev->data->dev_flags; 21592eb4d010SOphir Munk rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 21602eb4d010SOphir Munk /* Restore non-PCI flags cleared by the above call. */ 21612eb4d010SOphir Munk list[i].eth_dev->data->dev_flags |= restore; 21622eb4d010SOphir Munk rte_eth_dev_probing_finish(list[i].eth_dev); 21632eb4d010SOphir Munk } 21642eb4d010SOphir Munk if (i != ns) { 21652eb4d010SOphir Munk DRV_LOG(ERR, 21662eb4d010SOphir Munk "probe of PCI device " PCI_PRI_FMT " aborted after" 21672eb4d010SOphir Munk " encountering an error: %s", 21682eb4d010SOphir Munk pci_dev->addr.domain, pci_dev->addr.bus, 21692eb4d010SOphir Munk pci_dev->addr.devid, pci_dev->addr.function, 21702eb4d010SOphir Munk strerror(rte_errno)); 21712eb4d010SOphir Munk ret = -rte_errno; 21722eb4d010SOphir Munk /* Roll back. */ 21732eb4d010SOphir Munk while (i--) { 21742eb4d010SOphir Munk if (!list[i].eth_dev) 21752eb4d010SOphir Munk continue; 21762eb4d010SOphir Munk mlx5_dev_close(list[i].eth_dev); 21772eb4d010SOphir Munk /* mac_addrs must not be freed because in dev_private */ 21782eb4d010SOphir Munk list[i].eth_dev->data->mac_addrs = NULL; 21792eb4d010SOphir Munk claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 21802eb4d010SOphir Munk } 21812eb4d010SOphir Munk /* Restore original error. */ 21822eb4d010SOphir Munk rte_errno = -ret; 21832eb4d010SOphir Munk } else { 21842eb4d010SOphir Munk ret = 0; 21852eb4d010SOphir Munk } 21862eb4d010SOphir Munk exit: 21872eb4d010SOphir Munk /* 21882eb4d010SOphir Munk * Do the routine cleanup: 21892eb4d010SOphir Munk * - close opened Netlink sockets 21902eb4d010SOphir Munk * - free allocated spawn data array 21912eb4d010SOphir Munk * - free the Infiniband device list 21922eb4d010SOphir Munk */ 21932eb4d010SOphir Munk if (nl_rdma >= 0) 21942eb4d010SOphir Munk close(nl_rdma); 21952eb4d010SOphir Munk if (nl_route >= 0) 21962eb4d010SOphir Munk close(nl_route); 21972eb4d010SOphir Munk if (list) 21982175c4dcSSuanming Mou mlx5_free(list); 21992eb4d010SOphir Munk MLX5_ASSERT(ibv_list); 22002eb4d010SOphir Munk mlx5_glue->free_device_list(ibv_list); 22012eb4d010SOphir Munk return ret; 22022eb4d010SOphir Munk } 22032eb4d010SOphir Munk 22042eb4d010SOphir Munk static int 22052eb4d010SOphir Munk mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config) 22062eb4d010SOphir Munk { 22072eb4d010SOphir Munk char *env; 22082eb4d010SOphir Munk int value; 22092eb4d010SOphir Munk 22102eb4d010SOphir Munk MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 22112eb4d010SOphir Munk /* Get environment variable to store. */ 22122eb4d010SOphir Munk env = getenv(MLX5_SHUT_UP_BF); 22132eb4d010SOphir Munk value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET; 22142eb4d010SOphir Munk if (config->dbnc == MLX5_ARG_UNSET) 22152eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1); 22162eb4d010SOphir Munk else 22172eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, 22182eb4d010SOphir Munk config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1); 22192eb4d010SOphir Munk return value; 22202eb4d010SOphir Munk } 22212eb4d010SOphir Munk 22222eb4d010SOphir Munk static void 22232eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(int value) 22242eb4d010SOphir Munk { 22252eb4d010SOphir Munk MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 22262eb4d010SOphir Munk /* Restore the original environment variable state. */ 22272eb4d010SOphir Munk if (value == MLX5_ARG_UNSET) 22282eb4d010SOphir Munk unsetenv(MLX5_SHUT_UP_BF); 22292eb4d010SOphir Munk else 22302eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1); 22312eb4d010SOphir Munk } 22322eb4d010SOphir Munk 22332eb4d010SOphir Munk /** 22342eb4d010SOphir Munk * Extract pdn of PD object using DV API. 22352eb4d010SOphir Munk * 22362eb4d010SOphir Munk * @param[in] pd 22372eb4d010SOphir Munk * Pointer to the verbs PD object. 22382eb4d010SOphir Munk * @param[out] pdn 22392eb4d010SOphir Munk * Pointer to the PD object number variable. 22402eb4d010SOphir Munk * 22412eb4d010SOphir Munk * @return 22422eb4d010SOphir Munk * 0 on success, error value otherwise. 22432eb4d010SOphir Munk */ 22442eb4d010SOphir Munk int 22452eb4d010SOphir Munk mlx5_os_get_pdn(void *pd, uint32_t *pdn) 22462eb4d010SOphir Munk { 22472eb4d010SOphir Munk #ifdef HAVE_IBV_FLOW_DV_SUPPORT 22482eb4d010SOphir Munk struct mlx5dv_obj obj; 22492eb4d010SOphir Munk struct mlx5dv_pd pd_info; 22502eb4d010SOphir Munk int ret = 0; 22512eb4d010SOphir Munk 22522eb4d010SOphir Munk obj.pd.in = pd; 22532eb4d010SOphir Munk obj.pd.out = &pd_info; 22542eb4d010SOphir Munk ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 22552eb4d010SOphir Munk if (ret) { 22562eb4d010SOphir Munk DRV_LOG(DEBUG, "Fail to get PD object info"); 22572eb4d010SOphir Munk return ret; 22582eb4d010SOphir Munk } 22592eb4d010SOphir Munk *pdn = pd_info.pdn; 22602eb4d010SOphir Munk return 0; 22612eb4d010SOphir Munk #else 22622eb4d010SOphir Munk (void)pd; 22632eb4d010SOphir Munk (void)pdn; 22642eb4d010SOphir Munk return -ENOTSUP; 22652eb4d010SOphir Munk #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 22662eb4d010SOphir Munk } 22672eb4d010SOphir Munk 22682eb4d010SOphir Munk /** 22692eb4d010SOphir Munk * Function API to open IB device. 22702eb4d010SOphir Munk * 22712eb4d010SOphir Munk * This function calls the Linux glue APIs to open a device. 22722eb4d010SOphir Munk * 22732eb4d010SOphir Munk * @param[in] spawn 22742eb4d010SOphir Munk * Pointer to the IB device attributes (name, port, etc). 22752eb4d010SOphir Munk * @param[out] config 22762eb4d010SOphir Munk * Pointer to device configuration structure. 22772eb4d010SOphir Munk * @param[out] sh 22782eb4d010SOphir Munk * Pointer to shared context structure. 22792eb4d010SOphir Munk * 22802eb4d010SOphir Munk * @return 22812eb4d010SOphir Munk * 0 on success, a positive error value otherwise. 22822eb4d010SOphir Munk */ 22832eb4d010SOphir Munk int 22842eb4d010SOphir Munk mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn, 22852eb4d010SOphir Munk const struct mlx5_dev_config *config, 22862eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh) 22872eb4d010SOphir Munk { 22882eb4d010SOphir Munk int dbmap_env; 22892eb4d010SOphir Munk int err = 0; 2290d133f4cdSViacheslav Ovsiienko 2291d133f4cdSViacheslav Ovsiienko sh->numa_node = spawn->pci_dev->device.numa_node; 2292d133f4cdSViacheslav Ovsiienko pthread_mutex_init(&sh->txpp.mutex, NULL); 22932eb4d010SOphir Munk /* 22942eb4d010SOphir Munk * Configure environment variable "MLX5_BF_SHUT_UP" 22952eb4d010SOphir Munk * before the device creation. The rdma_core library 22962eb4d010SOphir Munk * checks the variable at device creation and 22972eb4d010SOphir Munk * stores the result internally. 22982eb4d010SOphir Munk */ 22992eb4d010SOphir Munk dbmap_env = mlx5_config_doorbell_mapping_env(config); 23002eb4d010SOphir Munk /* Try to open IB device with DV first, then usual Verbs. */ 23012eb4d010SOphir Munk errno = 0; 2302834a9019SOphir Munk sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev); 23032eb4d010SOphir Munk if (sh->ctx) { 23042eb4d010SOphir Munk sh->devx = 1; 23052eb4d010SOphir Munk DRV_LOG(DEBUG, "DevX is supported"); 23062eb4d010SOphir Munk /* The device is created, no need for environment. */ 23072eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(dbmap_env); 23082eb4d010SOphir Munk } else { 23092eb4d010SOphir Munk /* The environment variable is still configured. */ 2310834a9019SOphir Munk sh->ctx = mlx5_glue->open_device(spawn->phys_dev); 23112eb4d010SOphir Munk err = errno ? errno : ENODEV; 23122eb4d010SOphir Munk /* 23132eb4d010SOphir Munk * The environment variable is not needed anymore, 23142eb4d010SOphir Munk * all device creation attempts are completed. 23152eb4d010SOphir Munk */ 23162eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(dbmap_env); 23172eb4d010SOphir Munk if (!sh->ctx) 23182eb4d010SOphir Munk return err; 23192eb4d010SOphir Munk DRV_LOG(DEBUG, "DevX is NOT supported"); 23202eb4d010SOphir Munk err = 0; 23212eb4d010SOphir Munk } 232281c3b977SViacheslav Ovsiienko if (!err && sh->ctx) { 232381c3b977SViacheslav Ovsiienko /* Hint libmlx5 to use PMD allocator for data plane resources */ 232481c3b977SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 232581c3b977SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 232681c3b977SViacheslav Ovsiienko (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ 232781c3b977SViacheslav Ovsiienko .alloc = &mlx5_alloc_verbs_buf, 232881c3b977SViacheslav Ovsiienko .free = &mlx5_free_verbs_buf, 232981c3b977SViacheslav Ovsiienko .data = sh, 233081c3b977SViacheslav Ovsiienko })); 233181c3b977SViacheslav Ovsiienko } 23322eb4d010SOphir Munk return err; 23332eb4d010SOphir Munk } 23342eb4d010SOphir Munk 23352eb4d010SOphir Munk /** 23362eb4d010SOphir Munk * Install shared asynchronous device events handler. 23372eb4d010SOphir Munk * This function is implemented to support event sharing 23382eb4d010SOphir Munk * between multiple ports of single IB device. 23392eb4d010SOphir Munk * 23402eb4d010SOphir Munk * @param sh 23412eb4d010SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 23422eb4d010SOphir Munk */ 23432eb4d010SOphir Munk void 23442eb4d010SOphir Munk mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) 23452eb4d010SOphir Munk { 23462eb4d010SOphir Munk int ret; 23472eb4d010SOphir Munk int flags; 23482eb4d010SOphir Munk 23492eb4d010SOphir Munk sh->intr_handle.fd = -1; 23502eb4d010SOphir Munk flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL); 23512eb4d010SOphir Munk ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd, 23522eb4d010SOphir Munk F_SETFL, flags | O_NONBLOCK); 23532eb4d010SOphir Munk if (ret) { 23542eb4d010SOphir Munk DRV_LOG(INFO, "failed to change file descriptor async event" 23552eb4d010SOphir Munk " queue"); 23562eb4d010SOphir Munk } else { 23572eb4d010SOphir Munk sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd; 23582eb4d010SOphir Munk sh->intr_handle.type = RTE_INTR_HANDLE_EXT; 23592eb4d010SOphir Munk if (rte_intr_callback_register(&sh->intr_handle, 23602eb4d010SOphir Munk mlx5_dev_interrupt_handler, sh)) { 23612eb4d010SOphir Munk DRV_LOG(INFO, "Fail to install the shared interrupt."); 23622eb4d010SOphir Munk sh->intr_handle.fd = -1; 23632eb4d010SOphir Munk } 23642eb4d010SOphir Munk } 23652eb4d010SOphir Munk if (sh->devx) { 23662eb4d010SOphir Munk #ifdef HAVE_IBV_DEVX_ASYNC 23672eb4d010SOphir Munk sh->intr_handle_devx.fd = -1; 236821b7c452SOphir Munk sh->devx_comp = 236921b7c452SOphir Munk (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx); 237021b7c452SOphir Munk struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp; 237121b7c452SOphir Munk if (!devx_comp) { 23722eb4d010SOphir Munk DRV_LOG(INFO, "failed to allocate devx_comp."); 23732eb4d010SOphir Munk return; 23742eb4d010SOphir Munk } 237521b7c452SOphir Munk flags = fcntl(devx_comp->fd, F_GETFL); 237621b7c452SOphir Munk ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK); 23772eb4d010SOphir Munk if (ret) { 23782eb4d010SOphir Munk DRV_LOG(INFO, "failed to change file descriptor" 23792eb4d010SOphir Munk " devx comp"); 23802eb4d010SOphir Munk return; 23812eb4d010SOphir Munk } 238221b7c452SOphir Munk sh->intr_handle_devx.fd = devx_comp->fd; 23832eb4d010SOphir Munk sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; 23842eb4d010SOphir Munk if (rte_intr_callback_register(&sh->intr_handle_devx, 23852eb4d010SOphir Munk mlx5_dev_interrupt_handler_devx, sh)) { 23862eb4d010SOphir Munk DRV_LOG(INFO, "Fail to install the devx shared" 23872eb4d010SOphir Munk " interrupt."); 23882eb4d010SOphir Munk sh->intr_handle_devx.fd = -1; 23892eb4d010SOphir Munk } 23902eb4d010SOphir Munk #endif /* HAVE_IBV_DEVX_ASYNC */ 23912eb4d010SOphir Munk } 23922eb4d010SOphir Munk } 23932eb4d010SOphir Munk 23942eb4d010SOphir Munk /** 23952eb4d010SOphir Munk * Uninstall shared asynchronous device events handler. 23962eb4d010SOphir Munk * This function is implemented to support event sharing 23972eb4d010SOphir Munk * between multiple ports of single IB device. 23982eb4d010SOphir Munk * 23992eb4d010SOphir Munk * @param dev 24002eb4d010SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 24012eb4d010SOphir Munk */ 24022eb4d010SOphir Munk void 24032eb4d010SOphir Munk mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) 24042eb4d010SOphir Munk { 24052eb4d010SOphir Munk if (sh->intr_handle.fd >= 0) 24062eb4d010SOphir Munk mlx5_intr_callback_unregister(&sh->intr_handle, 24072eb4d010SOphir Munk mlx5_dev_interrupt_handler, sh); 24082eb4d010SOphir Munk #ifdef HAVE_IBV_DEVX_ASYNC 24092eb4d010SOphir Munk if (sh->intr_handle_devx.fd >= 0) 24102eb4d010SOphir Munk rte_intr_callback_unregister(&sh->intr_handle_devx, 24112eb4d010SOphir Munk mlx5_dev_interrupt_handler_devx, sh); 24122eb4d010SOphir Munk if (sh->devx_comp) 24132eb4d010SOphir Munk mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); 24142eb4d010SOphir Munk #endif 24152eb4d010SOphir Munk } 2416042f5c94SOphir Munk 241773bf9235SOphir Munk /** 241873bf9235SOphir Munk * Read statistics by a named counter. 241973bf9235SOphir Munk * 242073bf9235SOphir Munk * @param[in] priv 242173bf9235SOphir Munk * Pointer to the private device data structure. 242273bf9235SOphir Munk * @param[in] ctr_name 242373bf9235SOphir Munk * Pointer to the name of the statistic counter to read 242473bf9235SOphir Munk * @param[out] stat 242573bf9235SOphir Munk * Pointer to read statistic value. 242673bf9235SOphir Munk * @return 242773bf9235SOphir Munk * 0 on success and stat is valud, 1 if failed to read the value 242873bf9235SOphir Munk * rte_errno is set. 242973bf9235SOphir Munk * 243073bf9235SOphir Munk */ 243173bf9235SOphir Munk int 243273bf9235SOphir Munk mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, 243373bf9235SOphir Munk uint64_t *stat) 243473bf9235SOphir Munk { 243573bf9235SOphir Munk int fd; 243673bf9235SOphir Munk 243773bf9235SOphir Munk if (priv->sh) { 243873bf9235SOphir Munk MKSTR(path, "%s/ports/%d/hw_counters/%s", 243973bf9235SOphir Munk priv->sh->ibdev_path, 244073bf9235SOphir Munk priv->dev_port, 244173bf9235SOphir Munk ctr_name); 244273bf9235SOphir Munk fd = open(path, O_RDONLY); 2443038e7fc0SShy Shyman /* 2444038e7fc0SShy Shyman * in switchdev the file location is not per port 2445038e7fc0SShy Shyman * but rather in <ibdev_path>/hw_counters/<file_name>. 2446038e7fc0SShy Shyman */ 2447038e7fc0SShy Shyman if (fd == -1) { 2448038e7fc0SShy Shyman MKSTR(path1, "%s/hw_counters/%s", 2449038e7fc0SShy Shyman priv->sh->ibdev_path, 2450038e7fc0SShy Shyman ctr_name); 2451038e7fc0SShy Shyman fd = open(path1, O_RDONLY); 2452038e7fc0SShy Shyman } 245373bf9235SOphir Munk if (fd != -1) { 245473bf9235SOphir Munk char buf[21] = {'\0'}; 245573bf9235SOphir Munk ssize_t n = read(fd, buf, sizeof(buf)); 245673bf9235SOphir Munk 245773bf9235SOphir Munk close(fd); 245873bf9235SOphir Munk if (n != -1) { 245973bf9235SOphir Munk *stat = strtoull(buf, NULL, 10); 246073bf9235SOphir Munk return 0; 246173bf9235SOphir Munk } 246273bf9235SOphir Munk } 246373bf9235SOphir Munk } 246473bf9235SOphir Munk *stat = 0; 246573bf9235SOphir Munk return 1; 246673bf9235SOphir Munk } 246773bf9235SOphir Munk 246873bf9235SOphir Munk /** 2469d5ed8aa9SOphir Munk * Set the reg_mr and dereg_mr call backs 2470d5ed8aa9SOphir Munk * 2471d5ed8aa9SOphir Munk * @param reg_mr_cb[out] 2472d5ed8aa9SOphir Munk * Pointer to reg_mr func 2473d5ed8aa9SOphir Munk * @param dereg_mr_cb[out] 2474d5ed8aa9SOphir Munk * Pointer to dereg_mr func 2475d5ed8aa9SOphir Munk * 2476d5ed8aa9SOphir Munk */ 2477d5ed8aa9SOphir Munk void 2478d5ed8aa9SOphir Munk mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, 2479d5ed8aa9SOphir Munk mlx5_dereg_mr_t *dereg_mr_cb) 2480d5ed8aa9SOphir Munk { 2481*db12615bSOphir Munk *reg_mr_cb = mlx5_mr_verbs_ops.reg_mr; 2482*db12615bSOphir Munk *dereg_mr_cb = mlx5_mr_verbs_ops.dereg_mr; 2483d5ed8aa9SOphir Munk } 2484d5ed8aa9SOphir Munk 2485ab27cdd9SOphir Munk /** 2486ab27cdd9SOphir Munk * Remove a MAC address from device 2487ab27cdd9SOphir Munk * 2488ab27cdd9SOphir Munk * @param dev 2489ab27cdd9SOphir Munk * Pointer to Ethernet device structure. 2490ab27cdd9SOphir Munk * @param index 2491ab27cdd9SOphir Munk * MAC address index. 2492ab27cdd9SOphir Munk */ 2493ab27cdd9SOphir Munk void 2494ab27cdd9SOphir Munk mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 2495ab27cdd9SOphir Munk { 2496ab27cdd9SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 2497ab27cdd9SOphir Munk const int vf = priv->config.vf; 2498ab27cdd9SOphir Munk 2499ab27cdd9SOphir Munk if (vf) 2500ab27cdd9SOphir Munk mlx5_nl_mac_addr_remove(priv->nl_socket_route, 2501ab27cdd9SOphir Munk mlx5_ifindex(dev), priv->mac_own, 2502ab27cdd9SOphir Munk &dev->data->mac_addrs[index], index); 2503ab27cdd9SOphir Munk } 2504ab27cdd9SOphir Munk 2505ab27cdd9SOphir Munk /** 2506ab27cdd9SOphir Munk * Adds a MAC address to the device 2507ab27cdd9SOphir Munk * 2508ab27cdd9SOphir Munk * @param dev 2509ab27cdd9SOphir Munk * Pointer to Ethernet device structure. 2510ab27cdd9SOphir Munk * @param mac_addr 2511ab27cdd9SOphir Munk * MAC address to register. 2512ab27cdd9SOphir Munk * @param index 2513ab27cdd9SOphir Munk * MAC address index. 2514ab27cdd9SOphir Munk * 2515ab27cdd9SOphir Munk * @return 2516ab27cdd9SOphir Munk * 0 on success, a negative errno value otherwise 2517ab27cdd9SOphir Munk */ 2518ab27cdd9SOphir Munk int 2519ab27cdd9SOphir Munk mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 2520ab27cdd9SOphir Munk uint32_t index) 2521ab27cdd9SOphir Munk { 2522ab27cdd9SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 2523ab27cdd9SOphir Munk const int vf = priv->config.vf; 2524ab27cdd9SOphir Munk int ret = 0; 2525ab27cdd9SOphir Munk 2526ab27cdd9SOphir Munk if (vf) 2527ab27cdd9SOphir Munk ret = mlx5_nl_mac_addr_add(priv->nl_socket_route, 2528ab27cdd9SOphir Munk mlx5_ifindex(dev), priv->mac_own, 2529ab27cdd9SOphir Munk mac, index); 2530ab27cdd9SOphir Munk return ret; 2531ab27cdd9SOphir Munk } 2532ab27cdd9SOphir Munk 2533ab27cdd9SOphir Munk /** 2534ab27cdd9SOphir Munk * Modify a VF MAC address 2535ab27cdd9SOphir Munk * 2536ab27cdd9SOphir Munk * @param priv 2537ab27cdd9SOphir Munk * Pointer to device private data. 2538ab27cdd9SOphir Munk * @param mac_addr 2539ab27cdd9SOphir Munk * MAC address to modify into. 2540ab27cdd9SOphir Munk * @param iface_idx 2541ab27cdd9SOphir Munk * Net device interface index 2542ab27cdd9SOphir Munk * @param vf_index 2543ab27cdd9SOphir Munk * VF index 2544ab27cdd9SOphir Munk * 2545ab27cdd9SOphir Munk * @return 2546ab27cdd9SOphir Munk * 0 on success, a negative errno value otherwise 2547ab27cdd9SOphir Munk */ 2548ab27cdd9SOphir Munk int 2549ab27cdd9SOphir Munk mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, 2550ab27cdd9SOphir Munk unsigned int iface_idx, 2551ab27cdd9SOphir Munk struct rte_ether_addr *mac_addr, 2552ab27cdd9SOphir Munk int vf_index) 2553ab27cdd9SOphir Munk { 2554ab27cdd9SOphir Munk return mlx5_nl_vf_mac_addr_modify 2555ab27cdd9SOphir Munk (priv->nl_socket_route, iface_idx, mac_addr, vf_index); 2556ab27cdd9SOphir Munk } 2557ab27cdd9SOphir Munk 25584d18abd1SOphir Munk /** 25594d18abd1SOphir Munk * Set device promiscuous mode 25604d18abd1SOphir Munk * 25614d18abd1SOphir Munk * @param dev 25624d18abd1SOphir Munk * Pointer to Ethernet device structure. 25634d18abd1SOphir Munk * @param enable 25644d18abd1SOphir Munk * 0 - promiscuous is disabled, otherwise - enabled 25654d18abd1SOphir Munk * 25664d18abd1SOphir Munk * @return 25674d18abd1SOphir Munk * 0 on success, a negative error value otherwise 25684d18abd1SOphir Munk */ 25694d18abd1SOphir Munk int 25704d18abd1SOphir Munk mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable) 25714d18abd1SOphir Munk { 25724d18abd1SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 25734d18abd1SOphir Munk 25744d18abd1SOphir Munk return mlx5_nl_promisc(priv->nl_socket_route, 25754d18abd1SOphir Munk mlx5_ifindex(dev), !!enable); 25764d18abd1SOphir Munk } 25774d18abd1SOphir Munk 25784d18abd1SOphir Munk /** 25794d18abd1SOphir Munk * Set device promiscuous mode 25804d18abd1SOphir Munk * 25814d18abd1SOphir Munk * @param dev 25824d18abd1SOphir Munk * Pointer to Ethernet device structure. 25834d18abd1SOphir Munk * @param enable 25844d18abd1SOphir Munk * 0 - all multicase is disabled, otherwise - enabled 25854d18abd1SOphir Munk * 25864d18abd1SOphir Munk * @return 25874d18abd1SOphir Munk * 0 on success, a negative error value otherwise 25884d18abd1SOphir Munk */ 25894d18abd1SOphir Munk int 25904d18abd1SOphir Munk mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable) 25914d18abd1SOphir Munk { 25924d18abd1SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 25934d18abd1SOphir Munk 25944d18abd1SOphir Munk return mlx5_nl_allmulti(priv->nl_socket_route, 25954d18abd1SOphir Munk mlx5_ifindex(dev), !!enable); 25964d18abd1SOphir Munk } 25974d18abd1SOphir Munk 2598f00f6562SOphir Munk /** 2599f00f6562SOphir Munk * Flush device MAC addresses 2600f00f6562SOphir Munk * 2601f00f6562SOphir Munk * @param dev 2602f00f6562SOphir Munk * Pointer to Ethernet device structure. 2603f00f6562SOphir Munk * 2604f00f6562SOphir Munk */ 2605f00f6562SOphir Munk void 2606f00f6562SOphir Munk mlx5_os_mac_addr_flush(struct rte_eth_dev *dev) 2607f00f6562SOphir Munk { 2608f00f6562SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 2609f00f6562SOphir Munk 2610f00f6562SOphir Munk mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), 2611f00f6562SOphir Munk dev->data->mac_addrs, 2612f00f6562SOphir Munk MLX5_MAX_MAC_ADDRESSES, priv->mac_own); 2613f00f6562SOphir Munk } 2614f00f6562SOphir Munk 2615042f5c94SOphir Munk const struct eth_dev_ops mlx5_os_dev_ops = { 2616042f5c94SOphir Munk .dev_configure = mlx5_dev_configure, 2617042f5c94SOphir Munk .dev_start = mlx5_dev_start, 2618042f5c94SOphir Munk .dev_stop = mlx5_dev_stop, 2619042f5c94SOphir Munk .dev_set_link_down = mlx5_set_link_down, 2620042f5c94SOphir Munk .dev_set_link_up = mlx5_set_link_up, 2621042f5c94SOphir Munk .dev_close = mlx5_dev_close, 2622042f5c94SOphir Munk .promiscuous_enable = mlx5_promiscuous_enable, 2623042f5c94SOphir Munk .promiscuous_disable = mlx5_promiscuous_disable, 2624042f5c94SOphir Munk .allmulticast_enable = mlx5_allmulticast_enable, 2625042f5c94SOphir Munk .allmulticast_disable = mlx5_allmulticast_disable, 2626042f5c94SOphir Munk .link_update = mlx5_link_update, 2627042f5c94SOphir Munk .stats_get = mlx5_stats_get, 2628042f5c94SOphir Munk .stats_reset = mlx5_stats_reset, 2629042f5c94SOphir Munk .xstats_get = mlx5_xstats_get, 2630042f5c94SOphir Munk .xstats_reset = mlx5_xstats_reset, 2631042f5c94SOphir Munk .xstats_get_names = mlx5_xstats_get_names, 2632042f5c94SOphir Munk .fw_version_get = mlx5_fw_version_get, 2633042f5c94SOphir Munk .dev_infos_get = mlx5_dev_infos_get, 2634b94d93caSViacheslav Ovsiienko .read_clock = mlx5_txpp_read_clock, 2635042f5c94SOphir Munk .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 2636042f5c94SOphir Munk .vlan_filter_set = mlx5_vlan_filter_set, 2637042f5c94SOphir Munk .rx_queue_setup = mlx5_rx_queue_setup, 2638042f5c94SOphir Munk .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 2639042f5c94SOphir Munk .tx_queue_setup = mlx5_tx_queue_setup, 2640042f5c94SOphir Munk .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 2641042f5c94SOphir Munk .rx_queue_release = mlx5_rx_queue_release, 2642042f5c94SOphir Munk .tx_queue_release = mlx5_tx_queue_release, 2643161d103bSViacheslav Ovsiienko .rx_queue_start = mlx5_rx_queue_start, 2644161d103bSViacheslav Ovsiienko .rx_queue_stop = mlx5_rx_queue_stop, 2645161d103bSViacheslav Ovsiienko .tx_queue_start = mlx5_tx_queue_start, 2646161d103bSViacheslav Ovsiienko .tx_queue_stop = mlx5_tx_queue_stop, 2647042f5c94SOphir Munk .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 2648042f5c94SOphir Munk .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 2649042f5c94SOphir Munk .mac_addr_remove = mlx5_mac_addr_remove, 2650042f5c94SOphir Munk .mac_addr_add = mlx5_mac_addr_add, 2651042f5c94SOphir Munk .mac_addr_set = mlx5_mac_addr_set, 2652042f5c94SOphir Munk .set_mc_addr_list = mlx5_set_mc_addr_list, 2653042f5c94SOphir Munk .mtu_set = mlx5_dev_set_mtu, 2654042f5c94SOphir Munk .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 2655042f5c94SOphir Munk .vlan_offload_set = mlx5_vlan_offload_set, 2656042f5c94SOphir Munk .reta_update = mlx5_dev_rss_reta_update, 2657042f5c94SOphir Munk .reta_query = mlx5_dev_rss_reta_query, 2658042f5c94SOphir Munk .rss_hash_update = mlx5_rss_hash_update, 2659042f5c94SOphir Munk .rss_hash_conf_get = mlx5_rss_hash_conf_get, 2660042f5c94SOphir Munk .filter_ctrl = mlx5_dev_filter_ctrl, 2661042f5c94SOphir Munk .rxq_info_get = mlx5_rxq_info_get, 2662042f5c94SOphir Munk .txq_info_get = mlx5_txq_info_get, 2663042f5c94SOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 2664042f5c94SOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 2665042f5c94SOphir Munk .rx_queue_intr_enable = mlx5_rx_intr_enable, 2666042f5c94SOphir Munk .rx_queue_intr_disable = mlx5_rx_intr_disable, 2667042f5c94SOphir Munk .is_removed = mlx5_is_removed, 2668042f5c94SOphir Munk .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, 2669042f5c94SOphir Munk .get_module_info = mlx5_get_module_info, 2670042f5c94SOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 2671042f5c94SOphir Munk .hairpin_cap_get = mlx5_hairpin_cap_get, 2672042f5c94SOphir Munk .mtr_ops_get = mlx5_flow_meter_ops_get, 267337cd4501SBing Zhao .hairpin_bind = mlx5_hairpin_bind, 267437cd4501SBing Zhao .hairpin_unbind = mlx5_hairpin_unbind, 267502109eaeSBing Zhao .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, 267637cd4501SBing Zhao .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, 267737cd4501SBing Zhao .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, 267837cd4501SBing Zhao .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, 2679042f5c94SOphir Munk }; 2680042f5c94SOphir Munk 2681042f5c94SOphir Munk /* Available operations from secondary process. */ 2682042f5c94SOphir Munk const struct eth_dev_ops mlx5_os_dev_sec_ops = { 2683042f5c94SOphir Munk .stats_get = mlx5_stats_get, 2684042f5c94SOphir Munk .stats_reset = mlx5_stats_reset, 2685042f5c94SOphir Munk .xstats_get = mlx5_xstats_get, 2686042f5c94SOphir Munk .xstats_reset = mlx5_xstats_reset, 2687042f5c94SOphir Munk .xstats_get_names = mlx5_xstats_get_names, 2688042f5c94SOphir Munk .fw_version_get = mlx5_fw_version_get, 2689042f5c94SOphir Munk .dev_infos_get = mlx5_dev_infos_get, 2690b94d93caSViacheslav Ovsiienko .read_clock = mlx5_txpp_read_clock, 2691161d103bSViacheslav Ovsiienko .rx_queue_start = mlx5_rx_queue_start, 2692161d103bSViacheslav Ovsiienko .rx_queue_stop = mlx5_rx_queue_stop, 2693161d103bSViacheslav Ovsiienko .tx_queue_start = mlx5_tx_queue_start, 2694161d103bSViacheslav Ovsiienko .tx_queue_stop = mlx5_tx_queue_stop, 2695042f5c94SOphir Munk .rxq_info_get = mlx5_rxq_info_get, 2696042f5c94SOphir Munk .txq_info_get = mlx5_txq_info_get, 2697042f5c94SOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 2698042f5c94SOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 2699042f5c94SOphir Munk .get_module_info = mlx5_get_module_info, 2700042f5c94SOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 2701042f5c94SOphir Munk }; 2702042f5c94SOphir Munk 2703042f5c94SOphir Munk /* Available operations in flow isolated mode. */ 2704042f5c94SOphir Munk const struct eth_dev_ops mlx5_os_dev_ops_isolate = { 2705042f5c94SOphir Munk .dev_configure = mlx5_dev_configure, 2706042f5c94SOphir Munk .dev_start = mlx5_dev_start, 2707042f5c94SOphir Munk .dev_stop = mlx5_dev_stop, 2708042f5c94SOphir Munk .dev_set_link_down = mlx5_set_link_down, 2709042f5c94SOphir Munk .dev_set_link_up = mlx5_set_link_up, 2710042f5c94SOphir Munk .dev_close = mlx5_dev_close, 2711042f5c94SOphir Munk .promiscuous_enable = mlx5_promiscuous_enable, 2712042f5c94SOphir Munk .promiscuous_disable = mlx5_promiscuous_disable, 2713042f5c94SOphir Munk .allmulticast_enable = mlx5_allmulticast_enable, 2714042f5c94SOphir Munk .allmulticast_disable = mlx5_allmulticast_disable, 2715042f5c94SOphir Munk .link_update = mlx5_link_update, 2716042f5c94SOphir Munk .stats_get = mlx5_stats_get, 2717042f5c94SOphir Munk .stats_reset = mlx5_stats_reset, 2718042f5c94SOphir Munk .xstats_get = mlx5_xstats_get, 2719042f5c94SOphir Munk .xstats_reset = mlx5_xstats_reset, 2720042f5c94SOphir Munk .xstats_get_names = mlx5_xstats_get_names, 2721042f5c94SOphir Munk .fw_version_get = mlx5_fw_version_get, 2722042f5c94SOphir Munk .dev_infos_get = mlx5_dev_infos_get, 2723b94d93caSViacheslav Ovsiienko .read_clock = mlx5_txpp_read_clock, 2724042f5c94SOphir Munk .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 2725042f5c94SOphir Munk .vlan_filter_set = mlx5_vlan_filter_set, 2726042f5c94SOphir Munk .rx_queue_setup = mlx5_rx_queue_setup, 2727042f5c94SOphir Munk .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 2728042f5c94SOphir Munk .tx_queue_setup = mlx5_tx_queue_setup, 2729042f5c94SOphir Munk .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 2730042f5c94SOphir Munk .rx_queue_release = mlx5_rx_queue_release, 2731042f5c94SOphir Munk .tx_queue_release = mlx5_tx_queue_release, 2732161d103bSViacheslav Ovsiienko .rx_queue_start = mlx5_rx_queue_start, 2733161d103bSViacheslav Ovsiienko .rx_queue_stop = mlx5_rx_queue_stop, 2734161d103bSViacheslav Ovsiienko .tx_queue_start = mlx5_tx_queue_start, 2735161d103bSViacheslav Ovsiienko .tx_queue_stop = mlx5_tx_queue_stop, 2736042f5c94SOphir Munk .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 2737042f5c94SOphir Munk .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 2738042f5c94SOphir Munk .mac_addr_remove = mlx5_mac_addr_remove, 2739042f5c94SOphir Munk .mac_addr_add = mlx5_mac_addr_add, 2740042f5c94SOphir Munk .mac_addr_set = mlx5_mac_addr_set, 2741042f5c94SOphir Munk .set_mc_addr_list = mlx5_set_mc_addr_list, 2742042f5c94SOphir Munk .mtu_set = mlx5_dev_set_mtu, 2743042f5c94SOphir Munk .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 2744042f5c94SOphir Munk .vlan_offload_set = mlx5_vlan_offload_set, 2745042f5c94SOphir Munk .filter_ctrl = mlx5_dev_filter_ctrl, 2746042f5c94SOphir Munk .rxq_info_get = mlx5_rxq_info_get, 2747042f5c94SOphir Munk .txq_info_get = mlx5_txq_info_get, 2748042f5c94SOphir Munk .rx_burst_mode_get = mlx5_rx_burst_mode_get, 2749042f5c94SOphir Munk .tx_burst_mode_get = mlx5_tx_burst_mode_get, 2750042f5c94SOphir Munk .rx_queue_intr_enable = mlx5_rx_intr_enable, 2751042f5c94SOphir Munk .rx_queue_intr_disable = mlx5_rx_intr_disable, 2752042f5c94SOphir Munk .is_removed = mlx5_is_removed, 2753042f5c94SOphir Munk .get_module_info = mlx5_get_module_info, 2754042f5c94SOphir Munk .get_module_eeprom = mlx5_get_module_eeprom, 2755042f5c94SOphir Munk .hairpin_cap_get = mlx5_hairpin_cap_get, 2756042f5c94SOphir Munk .mtr_ops_get = mlx5_flow_meter_ops_get, 275737cd4501SBing Zhao .hairpin_bind = mlx5_hairpin_bind, 275837cd4501SBing Zhao .hairpin_unbind = mlx5_hairpin_unbind, 275902109eaeSBing Zhao .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, 276037cd4501SBing Zhao .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, 276137cd4501SBing Zhao .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, 276237cd4501SBing Zhao .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, 2763042f5c94SOphir Munk }; 2764