1f44b09f9SOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 2f44b09f9SOphir Munk * Copyright 2015 6WIND S.A. 3f44b09f9SOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 4f44b09f9SOphir Munk */ 5f44b09f9SOphir Munk 6f44b09f9SOphir Munk #include <stddef.h> 7f44b09f9SOphir Munk #include <unistd.h> 8f44b09f9SOphir Munk #include <string.h> 9f44b09f9SOphir Munk #include <stdint.h> 10f44b09f9SOphir Munk #include <stdlib.h> 11f44b09f9SOphir Munk #include <errno.h> 12f44b09f9SOphir Munk #include <net/if.h> 13f44b09f9SOphir Munk #include <linux/rtnetlink.h> 1473bf9235SOphir Munk #include <linux/sockios.h> 1573bf9235SOphir Munk #include <linux/ethtool.h> 16f44b09f9SOphir Munk #include <fcntl.h> 17f44b09f9SOphir Munk 18f44b09f9SOphir Munk #include <rte_malloc.h> 19df96fd0dSBruce Richardson #include <ethdev_driver.h> 20df96fd0dSBruce Richardson #include <ethdev_pci.h> 21f44b09f9SOphir Munk #include <rte_pci.h> 22f44b09f9SOphir Munk #include <rte_bus_pci.h> 23f44b09f9SOphir Munk #include <rte_common.h> 24f44b09f9SOphir Munk #include <rte_kvargs.h> 25f44b09f9SOphir Munk #include <rte_rwlock.h> 26f44b09f9SOphir Munk #include <rte_spinlock.h> 27f44b09f9SOphir Munk #include <rte_string_fns.h> 28f44b09f9SOphir Munk #include <rte_alarm.h> 292aba9fc7SOphir Munk #include <rte_eal_paging.h> 30f44b09f9SOphir Munk 31f44b09f9SOphir Munk #include <mlx5_glue.h> 32f44b09f9SOphir Munk #include <mlx5_devx_cmds.h> 33f44b09f9SOphir Munk #include <mlx5_common.h> 342eb4d010SOphir Munk #include <mlx5_common_mp.h> 35d5ed8aa9SOphir Munk #include <mlx5_common_mr.h> 365522da6bSSuanming Mou #include <mlx5_malloc.h> 37f44b09f9SOphir Munk 38f44b09f9SOphir Munk #include "mlx5_defs.h" 39f44b09f9SOphir Munk #include "mlx5.h" 40391b8bccSOphir Munk #include "mlx5_common_os.h" 41f44b09f9SOphir Munk #include "mlx5_utils.h" 42f44b09f9SOphir Munk #include "mlx5_rxtx.h" 43f44b09f9SOphir Munk #include "mlx5_autoconf.h" 44f44b09f9SOphir Munk #include "mlx5_mr.h" 45f44b09f9SOphir Munk #include "mlx5_flow.h" 46f44b09f9SOphir Munk #include "rte_pmd_mlx5.h" 474f96d913SOphir Munk #include "mlx5_verbs.h" 48f00f6562SOphir Munk #include "mlx5_nl.h" 496deb19e1SMichael Baum #include "mlx5_devx.h" 50f44b09f9SOphir Munk 512eb4d010SOphir Munk #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192 522eb4d010SOphir Munk 532eb4d010SOphir Munk #ifndef HAVE_IBV_MLX5_MOD_MPW 542eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 552eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 562eb4d010SOphir Munk #endif 572eb4d010SOphir Munk 582eb4d010SOphir Munk #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 592eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 602eb4d010SOphir Munk #endif 612eb4d010SOphir Munk 622e86c4e5SOphir Munk static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 632e86c4e5SOphir Munk 642e86c4e5SOphir Munk /* Spinlock for mlx5_shared_data allocation. */ 652e86c4e5SOphir Munk static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 662e86c4e5SOphir Munk 672e86c4e5SOphir Munk /* Process local data for secondary processes. */ 682e86c4e5SOphir Munk static struct mlx5_local_data mlx5_local_data; 692e86c4e5SOphir Munk 70f44b09f9SOphir Munk /** 7108d1838fSDekel Peled * Set the completion channel file descriptor interrupt as non-blocking. 7208d1838fSDekel Peled * 7308d1838fSDekel Peled * @param[in] rxq_obj 7408d1838fSDekel Peled * Pointer to RQ channel object, which includes the channel fd 7508d1838fSDekel Peled * 7608d1838fSDekel Peled * @param[out] fd 7708d1838fSDekel Peled * The file descriptor (representing the intetrrupt) used in this channel. 7808d1838fSDekel Peled * 7908d1838fSDekel Peled * @return 8008d1838fSDekel Peled * 0 on successfully setting the fd to non-blocking, non-zero otherwise. 8108d1838fSDekel Peled */ 8208d1838fSDekel Peled int 8308d1838fSDekel Peled mlx5_os_set_nonblock_channel_fd(int fd) 8408d1838fSDekel Peled { 8508d1838fSDekel Peled int flags; 8608d1838fSDekel Peled 8708d1838fSDekel Peled flags = fcntl(fd, F_GETFL); 8808d1838fSDekel Peled return fcntl(fd, F_SETFL, flags | O_NONBLOCK); 8908d1838fSDekel Peled } 9008d1838fSDekel Peled 9108d1838fSDekel Peled /** 92e85f623eSOphir Munk * Get mlx5 device attributes. The glue function query_device_ex() is called 93e85f623eSOphir Munk * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5 94e85f623eSOphir Munk * device attributes from the glue out parameter. 95e85f623eSOphir Munk * 96e85f623eSOphir Munk * @param dev 97e85f623eSOphir Munk * Pointer to ibv context. 98e85f623eSOphir Munk * 99e85f623eSOphir Munk * @param device_attr 100e85f623eSOphir Munk * Pointer to mlx5 device attributes. 101e85f623eSOphir Munk * 102e85f623eSOphir Munk * @return 103e85f623eSOphir Munk * 0 on success, non zero error number otherwise 104e85f623eSOphir Munk */ 105e85f623eSOphir Munk int 106e85f623eSOphir Munk mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr) 107e85f623eSOphir Munk { 108e85f623eSOphir Munk int err; 109e85f623eSOphir Munk struct ibv_device_attr_ex attr_ex; 110e85f623eSOphir Munk memset(device_attr, 0, sizeof(*device_attr)); 111e85f623eSOphir Munk err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex); 112e85f623eSOphir Munk if (err) 113e85f623eSOphir Munk return err; 114e85f623eSOphir Munk 115e85f623eSOphir Munk device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex; 116e85f623eSOphir Munk device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr; 117e85f623eSOphir Munk device_attr->max_sge = attr_ex.orig_attr.max_sge; 118e85f623eSOphir Munk device_attr->max_cq = attr_ex.orig_attr.max_cq; 1191f29d15eSOphir Munk device_attr->max_cqe = attr_ex.orig_attr.max_cqe; 1201f29d15eSOphir Munk device_attr->max_mr = attr_ex.orig_attr.max_mr; 1211f29d15eSOphir Munk device_attr->max_pd = attr_ex.orig_attr.max_pd; 122e85f623eSOphir Munk device_attr->max_qp = attr_ex.orig_attr.max_qp; 1231f29d15eSOphir Munk device_attr->max_srq = attr_ex.orig_attr.max_srq; 1241f29d15eSOphir Munk device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr; 125e85f623eSOphir Munk device_attr->raw_packet_caps = attr_ex.raw_packet_caps; 126e85f623eSOphir Munk device_attr->max_rwq_indirection_table_size = 127e85f623eSOphir Munk attr_ex.rss_caps.max_rwq_indirection_table_size; 128e85f623eSOphir Munk device_attr->max_tso = attr_ex.tso_caps.max_tso; 129e85f623eSOphir Munk device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts; 130e85f623eSOphir Munk 131e85f623eSOphir Munk struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 132e85f623eSOphir Munk err = mlx5_glue->dv_query_device(ctx, &dv_attr); 133e85f623eSOphir Munk if (err) 134e85f623eSOphir Munk return err; 135e85f623eSOphir Munk 136e85f623eSOphir Munk device_attr->flags = dv_attr.flags; 137e85f623eSOphir Munk device_attr->comp_mask = dv_attr.comp_mask; 138e85f623eSOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 139e85f623eSOphir Munk device_attr->sw_parsing_offloads = 140e85f623eSOphir Munk dv_attr.sw_parsing_caps.sw_parsing_offloads; 141e85f623eSOphir Munk #endif 142e85f623eSOphir Munk device_attr->min_single_stride_log_num_of_bytes = 143e85f623eSOphir Munk dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes; 144e85f623eSOphir Munk device_attr->max_single_stride_log_num_of_bytes = 145e85f623eSOphir Munk dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes; 146e85f623eSOphir Munk device_attr->min_single_wqe_log_num_of_strides = 147e85f623eSOphir Munk dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides; 148e85f623eSOphir Munk device_attr->max_single_wqe_log_num_of_strides = 149e85f623eSOphir Munk dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides; 150e85f623eSOphir Munk device_attr->stride_supported_qpts = 151e85f623eSOphir Munk dv_attr.striding_rq_caps.supported_qpts; 152e85f623eSOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 153e85f623eSOphir Munk device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps; 154e85f623eSOphir Munk #endif 155e85f623eSOphir Munk 156e85f623eSOphir Munk return err; 157e85f623eSOphir Munk } 1582eb4d010SOphir Munk 1592eb4d010SOphir Munk /** 1602eb4d010SOphir Munk * Verbs callback to allocate a memory. This function should allocate the space 1612eb4d010SOphir Munk * according to the size provided residing inside a huge page. 1622eb4d010SOphir Munk * Please note that all allocation must respect the alignment from libmlx5 1632aba9fc7SOphir Munk * (i.e. currently rte_mem_page_size()). 1642eb4d010SOphir Munk * 1652eb4d010SOphir Munk * @param[in] size 1662eb4d010SOphir Munk * The size in bytes of the memory to allocate. 1672eb4d010SOphir Munk * @param[in] data 1682eb4d010SOphir Munk * A pointer to the callback data. 1692eb4d010SOphir Munk * 1702eb4d010SOphir Munk * @return 1712eb4d010SOphir Munk * Allocated buffer, NULL otherwise and rte_errno is set. 1722eb4d010SOphir Munk */ 1732eb4d010SOphir Munk static void * 1742eb4d010SOphir Munk mlx5_alloc_verbs_buf(size_t size, void *data) 1752eb4d010SOphir Munk { 17681c3b977SViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = data; 1772eb4d010SOphir Munk void *ret; 1782aba9fc7SOphir Munk size_t alignment = rte_mem_page_size(); 1792aba9fc7SOphir Munk if (alignment == (size_t)-1) { 1802aba9fc7SOphir Munk DRV_LOG(ERR, "Failed to get mem page size"); 1812aba9fc7SOphir Munk rte_errno = ENOMEM; 1822aba9fc7SOphir Munk return NULL; 1832aba9fc7SOphir Munk } 1842eb4d010SOphir Munk 1852eb4d010SOphir Munk MLX5_ASSERT(data != NULL); 18681c3b977SViacheslav Ovsiienko ret = mlx5_malloc(0, size, alignment, sh->numa_node); 1872eb4d010SOphir Munk if (!ret && size) 1882eb4d010SOphir Munk rte_errno = ENOMEM; 1892eb4d010SOphir Munk return ret; 1902eb4d010SOphir Munk } 1912eb4d010SOphir Munk 1922eb4d010SOphir Munk /** 1932eb4d010SOphir Munk * Verbs callback to free a memory. 1942eb4d010SOphir Munk * 1952eb4d010SOphir Munk * @param[in] ptr 1962eb4d010SOphir Munk * A pointer to the memory to free. 1972eb4d010SOphir Munk * @param[in] data 1982eb4d010SOphir Munk * A pointer to the callback data. 1992eb4d010SOphir Munk */ 2002eb4d010SOphir Munk static void 2012eb4d010SOphir Munk mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 2022eb4d010SOphir Munk { 2032eb4d010SOphir Munk MLX5_ASSERT(data != NULL); 2042175c4dcSSuanming Mou mlx5_free(ptr); 2052eb4d010SOphir Munk } 2062eb4d010SOphir Munk 2072eb4d010SOphir Munk /** 2082eb4d010SOphir Munk * Initialize DR related data within private structure. 2092eb4d010SOphir Munk * Routine checks the reference counter and does actual 2102eb4d010SOphir Munk * resources creation/initialization only if counter is zero. 2112eb4d010SOphir Munk * 2122eb4d010SOphir Munk * @param[in] priv 2132eb4d010SOphir Munk * Pointer to the private device data structure. 2142eb4d010SOphir Munk * 2152eb4d010SOphir Munk * @return 2162eb4d010SOphir Munk * Zero on success, positive error code otherwise. 2172eb4d010SOphir Munk */ 2182eb4d010SOphir Munk static int 2192eb4d010SOphir Munk mlx5_alloc_shared_dr(struct mlx5_priv *priv) 2202eb4d010SOphir Munk { 2212eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 222291140c6SSuanming Mou char s[MLX5_HLIST_NAMESIZE] __rte_unused; 22316dbba25SXueming Li int err; 2242eb4d010SOphir Munk 22516dbba25SXueming Li MLX5_ASSERT(sh && sh->refcnt); 22616dbba25SXueming Li if (sh->refcnt > 1) 22716dbba25SXueming Li return 0; 2282eb4d010SOphir Munk err = mlx5_alloc_table_hash_list(priv); 2292eb4d010SOphir Munk if (err) 230291140c6SSuanming Mou goto error; 231291140c6SSuanming Mou /* The resources below are only valid with DV support. */ 232291140c6SSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 2330fd5f82aSXueming Li /* Init port id action cache list. */ 2340fd5f82aSXueming Li snprintf(s, sizeof(s), "%s_port_id_action_cache", sh->ibdev_name); 2350fd5f82aSXueming Li mlx5_cache_list_init(&sh->port_id_action_list, s, 0, sh, 2360fd5f82aSXueming Li flow_dv_port_id_create_cb, 2370fd5f82aSXueming Li flow_dv_port_id_match_cb, 2380fd5f82aSXueming Li flow_dv_port_id_remove_cb); 2393422af2aSXueming Li /* Init push vlan action cache list. */ 2403422af2aSXueming Li snprintf(s, sizeof(s), "%s_push_vlan_action_cache", sh->ibdev_name); 2413422af2aSXueming Li mlx5_cache_list_init(&sh->push_vlan_action_list, s, 0, sh, 2423422af2aSXueming Li flow_dv_push_vlan_create_cb, 2433422af2aSXueming Li flow_dv_push_vlan_match_cb, 2443422af2aSXueming Li flow_dv_push_vlan_remove_cb); 24519784141SSuanming Mou /* Init sample action cache list. */ 24619784141SSuanming Mou snprintf(s, sizeof(s), "%s_sample_action_cache", sh->ibdev_name); 24701c05ee0SSuanming Mou mlx5_cache_list_init(&sh->sample_action_list, s, 0, sh, 24819784141SSuanming Mou flow_dv_sample_create_cb, 24919784141SSuanming Mou flow_dv_sample_match_cb, 25019784141SSuanming Mou flow_dv_sample_remove_cb); 25119784141SSuanming Mou /* Init dest array action cache list. */ 25219784141SSuanming Mou snprintf(s, sizeof(s), "%s_dest_array_cache", sh->ibdev_name); 25301c05ee0SSuanming Mou mlx5_cache_list_init(&sh->dest_array_list, s, 0, sh, 25419784141SSuanming Mou flow_dv_dest_array_create_cb, 25519784141SSuanming Mou flow_dv_dest_array_match_cb, 25619784141SSuanming Mou flow_dv_dest_array_remove_cb); 2572eb4d010SOphir Munk /* Create tags hash list table. */ 2582eb4d010SOphir Munk snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); 259e69a5922SXueming Li sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0, 260fe3f8c52SXueming Li MLX5_HLIST_WRITE_MOST, 261f5b0aed2SSuanming Mou flow_dv_tag_create_cb, 262f5b0aed2SSuanming Mou flow_dv_tag_match_cb, 263fe3f8c52SXueming Li flow_dv_tag_remove_cb); 2642eb4d010SOphir Munk if (!sh->tag_table) { 26563783b01SDavid Marchand DRV_LOG(ERR, "tags with hash creation failed."); 2662eb4d010SOphir Munk err = ENOMEM; 2672eb4d010SOphir Munk goto error; 2682eb4d010SOphir Munk } 269fe3f8c52SXueming Li sh->tag_table->ctx = sh; 2703fe88961SSuanming Mou snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name); 271e69a5922SXueming Li sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ, 27216a7dbc4SXueming Li 0, MLX5_HLIST_WRITE_MOST | 27316a7dbc4SXueming Li MLX5_HLIST_DIRECT_KEY, 27416a7dbc4SXueming Li flow_dv_modify_create_cb, 27516a7dbc4SXueming Li flow_dv_modify_match_cb, 27616a7dbc4SXueming Li flow_dv_modify_remove_cb); 2773fe88961SSuanming Mou if (!sh->modify_cmds) { 2783fe88961SSuanming Mou DRV_LOG(ERR, "hdr modify hash creation failed"); 2793fe88961SSuanming Mou err = ENOMEM; 2803fe88961SSuanming Mou goto error; 2813fe88961SSuanming Mou } 28216a7dbc4SXueming Li sh->modify_cmds->ctx = sh; 283bf615b07SSuanming Mou snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name); 284bf615b07SSuanming Mou sh->encaps_decaps = mlx5_hlist_create(s, 285e69a5922SXueming Li MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ, 286f961fd49SSuanming Mou 0, MLX5_HLIST_DIRECT_KEY | 287f961fd49SSuanming Mou MLX5_HLIST_WRITE_MOST, 288f961fd49SSuanming Mou flow_dv_encap_decap_create_cb, 289f961fd49SSuanming Mou flow_dv_encap_decap_match_cb, 290f961fd49SSuanming Mou flow_dv_encap_decap_remove_cb); 291bf615b07SSuanming Mou if (!sh->encaps_decaps) { 292bf615b07SSuanming Mou DRV_LOG(ERR, "encap decap hash creation failed"); 293bf615b07SSuanming Mou err = ENOMEM; 294bf615b07SSuanming Mou goto error; 295bf615b07SSuanming Mou } 296f961fd49SSuanming Mou sh->encaps_decaps->ctx = sh; 297291140c6SSuanming Mou #endif 2982eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR 2992eb4d010SOphir Munk void *domain; 3002eb4d010SOphir Munk 3012eb4d010SOphir Munk /* Reference counter is zero, we should initialize structures. */ 3022eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain(sh->ctx, 3032eb4d010SOphir Munk MLX5DV_DR_DOMAIN_TYPE_NIC_RX); 3042eb4d010SOphir Munk if (!domain) { 3052eb4d010SOphir Munk DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); 3062eb4d010SOphir Munk err = errno; 3072eb4d010SOphir Munk goto error; 3082eb4d010SOphir Munk } 3092eb4d010SOphir Munk sh->rx_domain = domain; 3102eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain(sh->ctx, 3112eb4d010SOphir Munk MLX5DV_DR_DOMAIN_TYPE_NIC_TX); 3122eb4d010SOphir Munk if (!domain) { 3132eb4d010SOphir Munk DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); 3142eb4d010SOphir Munk err = errno; 3152eb4d010SOphir Munk goto error; 3162eb4d010SOphir Munk } 3172eb4d010SOphir Munk sh->tx_domain = domain; 3182eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 3192eb4d010SOphir Munk if (priv->config.dv_esw_en) { 3202eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain 3212eb4d010SOphir Munk (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); 3222eb4d010SOphir Munk if (!domain) { 3232eb4d010SOphir Munk DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); 3242eb4d010SOphir Munk err = errno; 3252eb4d010SOphir Munk goto error; 3262eb4d010SOphir Munk } 3272eb4d010SOphir Munk sh->fdb_domain = domain; 3282eb4d010SOphir Munk sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); 3292eb4d010SOphir Munk } 3302eb4d010SOphir Munk #endif 3314ec6360dSGregory Etelson if (!sh->tunnel_hub) 3324ec6360dSGregory Etelson err = mlx5_alloc_tunnel_hub(sh); 3334ec6360dSGregory Etelson if (err) { 3344ec6360dSGregory Etelson DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err); 3354ec6360dSGregory Etelson goto error; 3364ec6360dSGregory Etelson } 3372eb4d010SOphir Munk if (priv->config.reclaim_mode == MLX5_RCM_AGGR) { 3382eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1); 3392eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1); 3402eb4d010SOphir Munk if (sh->fdb_domain) 3412eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1); 3422eb4d010SOphir Munk } 3432eb4d010SOphir Munk sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); 3442eb4d010SOphir Munk #endif /* HAVE_MLX5DV_DR */ 345b80726dcSSuanming Mou sh->default_miss_action = 346b80726dcSSuanming Mou mlx5_glue->dr_create_flow_action_default_miss(); 347b80726dcSSuanming Mou if (!sh->default_miss_action) 348b80726dcSSuanming Mou DRV_LOG(WARNING, "Default miss action is not supported."); 3492eb4d010SOphir Munk return 0; 3502eb4d010SOphir Munk error: 3512eb4d010SOphir Munk /* Rollback the created objects. */ 3522eb4d010SOphir Munk if (sh->rx_domain) { 3532eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->rx_domain); 3542eb4d010SOphir Munk sh->rx_domain = NULL; 3552eb4d010SOphir Munk } 3562eb4d010SOphir Munk if (sh->tx_domain) { 3572eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->tx_domain); 3582eb4d010SOphir Munk sh->tx_domain = NULL; 3592eb4d010SOphir Munk } 3602eb4d010SOphir Munk if (sh->fdb_domain) { 3612eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->fdb_domain); 3622eb4d010SOphir Munk sh->fdb_domain = NULL; 3632eb4d010SOphir Munk } 3642eb4d010SOphir Munk if (sh->esw_drop_action) { 3652eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->esw_drop_action); 3662eb4d010SOphir Munk sh->esw_drop_action = NULL; 3672eb4d010SOphir Munk } 3682eb4d010SOphir Munk if (sh->pop_vlan_action) { 3692eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 3702eb4d010SOphir Munk sh->pop_vlan_action = NULL; 3712eb4d010SOphir Munk } 372bf615b07SSuanming Mou if (sh->encaps_decaps) { 373e69a5922SXueming Li mlx5_hlist_destroy(sh->encaps_decaps); 374bf615b07SSuanming Mou sh->encaps_decaps = NULL; 375bf615b07SSuanming Mou } 3763fe88961SSuanming Mou if (sh->modify_cmds) { 377e69a5922SXueming Li mlx5_hlist_destroy(sh->modify_cmds); 3783fe88961SSuanming Mou sh->modify_cmds = NULL; 3793fe88961SSuanming Mou } 3802eb4d010SOphir Munk if (sh->tag_table) { 3812eb4d010SOphir Munk /* tags should be destroyed with flow before. */ 382e69a5922SXueming Li mlx5_hlist_destroy(sh->tag_table); 3832eb4d010SOphir Munk sh->tag_table = NULL; 3842eb4d010SOphir Munk } 3854ec6360dSGregory Etelson if (sh->tunnel_hub) { 3864ec6360dSGregory Etelson mlx5_release_tunnel_hub(sh, priv->dev_port); 3874ec6360dSGregory Etelson sh->tunnel_hub = NULL; 3884ec6360dSGregory Etelson } 3892eb4d010SOphir Munk mlx5_free_table_hash_list(priv); 3902eb4d010SOphir Munk return err; 3912eb4d010SOphir Munk } 3922eb4d010SOphir Munk 3932eb4d010SOphir Munk /** 3942eb4d010SOphir Munk * Destroy DR related data within private structure. 3952eb4d010SOphir Munk * 3962eb4d010SOphir Munk * @param[in] priv 3972eb4d010SOphir Munk * Pointer to the private device data structure. 3982eb4d010SOphir Munk */ 3992eb4d010SOphir Munk void 4002eb4d010SOphir Munk mlx5_os_free_shared_dr(struct mlx5_priv *priv) 4012eb4d010SOphir Munk { 40216dbba25SXueming Li struct mlx5_dev_ctx_shared *sh = priv->sh; 4032eb4d010SOphir Munk 40416dbba25SXueming Li MLX5_ASSERT(sh && sh->refcnt); 40516dbba25SXueming Li if (sh->refcnt > 1) 4062eb4d010SOphir Munk return; 4072eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR 4082eb4d010SOphir Munk if (sh->rx_domain) { 4092eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->rx_domain); 4102eb4d010SOphir Munk sh->rx_domain = NULL; 4112eb4d010SOphir Munk } 4122eb4d010SOphir Munk if (sh->tx_domain) { 4132eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->tx_domain); 4142eb4d010SOphir Munk sh->tx_domain = NULL; 4152eb4d010SOphir Munk } 4162eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 4172eb4d010SOphir Munk if (sh->fdb_domain) { 4182eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->fdb_domain); 4192eb4d010SOphir Munk sh->fdb_domain = NULL; 4202eb4d010SOphir Munk } 4212eb4d010SOphir Munk if (sh->esw_drop_action) { 4222eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->esw_drop_action); 4232eb4d010SOphir Munk sh->esw_drop_action = NULL; 4242eb4d010SOphir Munk } 4252eb4d010SOphir Munk #endif 4262eb4d010SOphir Munk if (sh->pop_vlan_action) { 4272eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 4282eb4d010SOphir Munk sh->pop_vlan_action = NULL; 4292eb4d010SOphir Munk } 4302eb4d010SOphir Munk #endif /* HAVE_MLX5DV_DR */ 431b80726dcSSuanming Mou if (sh->default_miss_action) 432b80726dcSSuanming Mou mlx5_glue->destroy_flow_action 433b80726dcSSuanming Mou (sh->default_miss_action); 434bf615b07SSuanming Mou if (sh->encaps_decaps) { 435e69a5922SXueming Li mlx5_hlist_destroy(sh->encaps_decaps); 436bf615b07SSuanming Mou sh->encaps_decaps = NULL; 437bf615b07SSuanming Mou } 4383fe88961SSuanming Mou if (sh->modify_cmds) { 439e69a5922SXueming Li mlx5_hlist_destroy(sh->modify_cmds); 4403fe88961SSuanming Mou sh->modify_cmds = NULL; 4413fe88961SSuanming Mou } 4422eb4d010SOphir Munk if (sh->tag_table) { 4432eb4d010SOphir Munk /* tags should be destroyed with flow before. */ 444e69a5922SXueming Li mlx5_hlist_destroy(sh->tag_table); 4452eb4d010SOphir Munk sh->tag_table = NULL; 4462eb4d010SOphir Munk } 4474ec6360dSGregory Etelson if (sh->tunnel_hub) { 4484ec6360dSGregory Etelson mlx5_release_tunnel_hub(sh, priv->dev_port); 4494ec6360dSGregory Etelson sh->tunnel_hub = NULL; 4504ec6360dSGregory Etelson } 4510fd5f82aSXueming Li mlx5_cache_list_destroy(&sh->port_id_action_list); 4523422af2aSXueming Li mlx5_cache_list_destroy(&sh->push_vlan_action_list); 4532eb4d010SOphir Munk mlx5_free_table_hash_list(priv); 4542eb4d010SOphir Munk } 4552eb4d010SOphir Munk 4562eb4d010SOphir Munk /** 4572e86c4e5SOphir Munk * Initialize shared data between primary and secondary process. 4582e86c4e5SOphir Munk * 4592e86c4e5SOphir Munk * A memzone is reserved by primary process and secondary processes attach to 4602e86c4e5SOphir Munk * the memzone. 4612e86c4e5SOphir Munk * 4622e86c4e5SOphir Munk * @return 4632e86c4e5SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 4642e86c4e5SOphir Munk */ 4652e86c4e5SOphir Munk static int 4662e86c4e5SOphir Munk mlx5_init_shared_data(void) 4672e86c4e5SOphir Munk { 4682e86c4e5SOphir Munk const struct rte_memzone *mz; 4692e86c4e5SOphir Munk int ret = 0; 4702e86c4e5SOphir Munk 4712e86c4e5SOphir Munk rte_spinlock_lock(&mlx5_shared_data_lock); 4722e86c4e5SOphir Munk if (mlx5_shared_data == NULL) { 4732e86c4e5SOphir Munk if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4742e86c4e5SOphir Munk /* Allocate shared memory. */ 4752e86c4e5SOphir Munk mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 4762e86c4e5SOphir Munk sizeof(*mlx5_shared_data), 4772e86c4e5SOphir Munk SOCKET_ID_ANY, 0); 4782e86c4e5SOphir Munk if (mz == NULL) { 4792e86c4e5SOphir Munk DRV_LOG(ERR, 4802e86c4e5SOphir Munk "Cannot allocate mlx5 shared data"); 4812e86c4e5SOphir Munk ret = -rte_errno; 4822e86c4e5SOphir Munk goto error; 4832e86c4e5SOphir Munk } 4842e86c4e5SOphir Munk mlx5_shared_data = mz->addr; 4852e86c4e5SOphir Munk memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 4862e86c4e5SOphir Munk rte_spinlock_init(&mlx5_shared_data->lock); 4872e86c4e5SOphir Munk } else { 4882e86c4e5SOphir Munk /* Lookup allocated shared memory. */ 4892e86c4e5SOphir Munk mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 4902e86c4e5SOphir Munk if (mz == NULL) { 4912e86c4e5SOphir Munk DRV_LOG(ERR, 4922e86c4e5SOphir Munk "Cannot attach mlx5 shared data"); 4932e86c4e5SOphir Munk ret = -rte_errno; 4942e86c4e5SOphir Munk goto error; 4952e86c4e5SOphir Munk } 4962e86c4e5SOphir Munk mlx5_shared_data = mz->addr; 4972e86c4e5SOphir Munk memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 4982e86c4e5SOphir Munk } 4992e86c4e5SOphir Munk } 5002e86c4e5SOphir Munk error: 5012e86c4e5SOphir Munk rte_spinlock_unlock(&mlx5_shared_data_lock); 5022e86c4e5SOphir Munk return ret; 5032e86c4e5SOphir Munk } 5042e86c4e5SOphir Munk 5052e86c4e5SOphir Munk /** 5062e86c4e5SOphir Munk * PMD global initialization. 5072e86c4e5SOphir Munk * 5082e86c4e5SOphir Munk * Independent from individual device, this function initializes global 5092e86c4e5SOphir Munk * per-PMD data structures distinguishing primary and secondary processes. 5102e86c4e5SOphir Munk * Hence, each initialization is called once per a process. 5112e86c4e5SOphir Munk * 5122e86c4e5SOphir Munk * @return 5132e86c4e5SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 5142e86c4e5SOphir Munk */ 5152e86c4e5SOphir Munk static int 5162e86c4e5SOphir Munk mlx5_init_once(void) 5172e86c4e5SOphir Munk { 5182e86c4e5SOphir Munk struct mlx5_shared_data *sd; 5192e86c4e5SOphir Munk struct mlx5_local_data *ld = &mlx5_local_data; 5202e86c4e5SOphir Munk int ret = 0; 5212e86c4e5SOphir Munk 5222e86c4e5SOphir Munk if (mlx5_init_shared_data()) 5232e86c4e5SOphir Munk return -rte_errno; 5242e86c4e5SOphir Munk sd = mlx5_shared_data; 5252e86c4e5SOphir Munk MLX5_ASSERT(sd); 5262e86c4e5SOphir Munk rte_spinlock_lock(&sd->lock); 5272e86c4e5SOphir Munk switch (rte_eal_process_type()) { 5282e86c4e5SOphir Munk case RTE_PROC_PRIMARY: 5292e86c4e5SOphir Munk if (sd->init_done) 5302e86c4e5SOphir Munk break; 5312e86c4e5SOphir Munk LIST_INIT(&sd->mem_event_cb_list); 5322e86c4e5SOphir Munk rte_rwlock_init(&sd->mem_event_rwlock); 5332e86c4e5SOphir Munk rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 5342e86c4e5SOphir Munk mlx5_mr_mem_event_cb, NULL); 5352e86c4e5SOphir Munk ret = mlx5_mp_init_primary(MLX5_MP_NAME, 5362e86c4e5SOphir Munk mlx5_mp_os_primary_handle); 5372e86c4e5SOphir Munk if (ret) 5382e86c4e5SOphir Munk goto out; 5392e86c4e5SOphir Munk sd->init_done = true; 5402e86c4e5SOphir Munk break; 5412e86c4e5SOphir Munk case RTE_PROC_SECONDARY: 5422e86c4e5SOphir Munk if (ld->init_done) 5432e86c4e5SOphir Munk break; 5442e86c4e5SOphir Munk ret = mlx5_mp_init_secondary(MLX5_MP_NAME, 5452e86c4e5SOphir Munk mlx5_mp_os_secondary_handle); 5462e86c4e5SOphir Munk if (ret) 5472e86c4e5SOphir Munk goto out; 5482e86c4e5SOphir Munk ++sd->secondary_cnt; 5492e86c4e5SOphir Munk ld->init_done = true; 5502e86c4e5SOphir Munk break; 5512e86c4e5SOphir Munk default: 5522e86c4e5SOphir Munk break; 5532e86c4e5SOphir Munk } 5542e86c4e5SOphir Munk out: 5552e86c4e5SOphir Munk rte_spinlock_unlock(&sd->lock); 5562e86c4e5SOphir Munk return ret; 5572e86c4e5SOphir Munk } 5582e86c4e5SOphir Munk 5592e86c4e5SOphir Munk /** 56086d259ceSMichael Baum * Create the Tx queue DevX/Verbs object. 56186d259ceSMichael Baum * 56286d259ceSMichael Baum * @param dev 56386d259ceSMichael Baum * Pointer to Ethernet device. 56486d259ceSMichael Baum * @param idx 56586d259ceSMichael Baum * Queue index in DPDK Tx queue array. 56686d259ceSMichael Baum * 56786d259ceSMichael Baum * @return 568f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 56986d259ceSMichael Baum */ 570f49f4483SMichael Baum static int 57186d259ceSMichael Baum mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx) 57286d259ceSMichael Baum { 57386d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 57486d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 57586d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 57686d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 57786d259ceSMichael Baum 57886d259ceSMichael Baum if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 57986d259ceSMichael Baum return mlx5_txq_devx_obj_new(dev, idx); 58086d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 5813ec73abeSMatan Azrad if (!priv->config.dv_esw_en) 58286d259ceSMichael Baum return mlx5_txq_devx_obj_new(dev, idx); 58386d259ceSMichael Baum #endif 58486d259ceSMichael Baum return mlx5_txq_ibv_obj_new(dev, idx); 58586d259ceSMichael Baum } 58686d259ceSMichael Baum 58786d259ceSMichael Baum /** 58886d259ceSMichael Baum * Release an Tx DevX/verbs queue object. 58986d259ceSMichael Baum * 59086d259ceSMichael Baum * @param txq_obj 59186d259ceSMichael Baum * DevX/Verbs Tx queue object. 59286d259ceSMichael Baum */ 59386d259ceSMichael Baum static void 59486d259ceSMichael Baum mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj) 59586d259ceSMichael Baum { 59686d259ceSMichael Baum if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 59786d259ceSMichael Baum mlx5_txq_devx_obj_release(txq_obj); 59886d259ceSMichael Baum return; 59986d259ceSMichael Baum } 6003ec73abeSMatan Azrad #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 6013ec73abeSMatan Azrad if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) { 6023ec73abeSMatan Azrad mlx5_txq_devx_obj_release(txq_obj); 6033ec73abeSMatan Azrad return; 60486d259ceSMichael Baum } 6053ec73abeSMatan Azrad #endif 60686d259ceSMichael Baum mlx5_txq_ibv_obj_release(txq_obj); 60786d259ceSMichael Baum } 60886d259ceSMichael Baum 60986d259ceSMichael Baum /** 610994829e6SSuanming Mou * DV flow counter mode detect and config. 611994829e6SSuanming Mou * 612994829e6SSuanming Mou * @param dev 613994829e6SSuanming Mou * Pointer to rte_eth_dev structure. 614994829e6SSuanming Mou * 615994829e6SSuanming Mou */ 616994829e6SSuanming Mou static void 617994829e6SSuanming Mou mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused) 618994829e6SSuanming Mou { 619994829e6SSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 620994829e6SSuanming Mou struct mlx5_priv *priv = dev->data->dev_private; 6212b5b1aebSSuanming Mou struct mlx5_dev_ctx_shared *sh = priv->sh; 6222b5b1aebSSuanming Mou bool fallback; 623994829e6SSuanming Mou 624994829e6SSuanming Mou #ifndef HAVE_IBV_DEVX_ASYNC 6252b5b1aebSSuanming Mou fallback = true; 626994829e6SSuanming Mou #else 6272b5b1aebSSuanming Mou fallback = false; 6282b5b1aebSSuanming Mou if (!priv->config.devx || !priv->config.dv_flow_en || 6292b5b1aebSSuanming Mou !priv->config.hca_attr.flow_counters_dump || 630994829e6SSuanming Mou !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) || 631994829e6SSuanming Mou (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP)) 6322b5b1aebSSuanming Mou fallback = true; 633994829e6SSuanming Mou #endif 6342b5b1aebSSuanming Mou if (fallback) 635994829e6SSuanming Mou DRV_LOG(INFO, "Use fall-back DV counter management. Flow " 636994829e6SSuanming Mou "counter dump:%d, bulk_alloc_bitmap:0x%hhx.", 637994829e6SSuanming Mou priv->config.hca_attr.flow_counters_dump, 638994829e6SSuanming Mou priv->config.hca_attr.flow_counter_bulk_alloc_bitmap); 6392b5b1aebSSuanming Mou /* Initialize fallback mode only on the port initializes sh. */ 6402b5b1aebSSuanming Mou if (sh->refcnt == 1) 6412b5b1aebSSuanming Mou sh->cmng.counter_fallback = fallback; 6422b5b1aebSSuanming Mou else if (fallback != sh->cmng.counter_fallback) 6432b5b1aebSSuanming Mou DRV_LOG(WARNING, "Port %d in sh has different fallback mode " 6442b5b1aebSSuanming Mou "with others:%d.", PORT_ID(priv), fallback); 645994829e6SSuanming Mou #endif 646994829e6SSuanming Mou } 647994829e6SSuanming Mou 648e6988afdSMatan Azrad static void 649e6988afdSMatan Azrad mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev) 650e6988afdSMatan Azrad { 651e6988afdSMatan Azrad struct mlx5_priv *priv = dev->data->dev_private; 652e6988afdSMatan Azrad void *ctx = priv->sh->ctx; 653e6988afdSMatan Azrad 654e6988afdSMatan Azrad priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx); 655e6988afdSMatan Azrad if (!priv->q_counters) { 656e6988afdSMatan Azrad struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0); 657e6988afdSMatan Azrad struct ibv_wq *wq; 658e6988afdSMatan Azrad 659e6988afdSMatan Azrad DRV_LOG(DEBUG, "Port %d queue counter object cannot be created " 660e6988afdSMatan Azrad "by DevX - fall-back to use the kernel driver global " 661e6988afdSMatan Azrad "queue counter.", dev->data->port_id); 662e6988afdSMatan Azrad /* Create WQ by kernel and query its queue counter ID. */ 663e6988afdSMatan Azrad if (cq) { 664e6988afdSMatan Azrad wq = mlx5_glue->create_wq(ctx, 665e6988afdSMatan Azrad &(struct ibv_wq_init_attr){ 666e6988afdSMatan Azrad .wq_type = IBV_WQT_RQ, 667e6988afdSMatan Azrad .max_wr = 1, 668e6988afdSMatan Azrad .max_sge = 1, 669e6988afdSMatan Azrad .pd = priv->sh->pd, 670e6988afdSMatan Azrad .cq = cq, 671e6988afdSMatan Azrad }); 672e6988afdSMatan Azrad if (wq) { 673e6988afdSMatan Azrad /* Counter is assigned only on RDY state. */ 674e6988afdSMatan Azrad int ret = mlx5_glue->modify_wq(wq, 675e6988afdSMatan Azrad &(struct ibv_wq_attr){ 676e6988afdSMatan Azrad .attr_mask = IBV_WQ_ATTR_STATE, 677e6988afdSMatan Azrad .wq_state = IBV_WQS_RDY, 678e6988afdSMatan Azrad }); 679e6988afdSMatan Azrad 680e6988afdSMatan Azrad if (ret == 0) 681e6988afdSMatan Azrad mlx5_devx_cmd_wq_query(wq, 682e6988afdSMatan Azrad &priv->counter_set_id); 683e6988afdSMatan Azrad claim_zero(mlx5_glue->destroy_wq(wq)); 684e6988afdSMatan Azrad } 685e6988afdSMatan Azrad claim_zero(mlx5_glue->destroy_cq(cq)); 686e6988afdSMatan Azrad } 687e6988afdSMatan Azrad } else { 688e6988afdSMatan Azrad priv->counter_set_id = priv->q_counters->id; 689e6988afdSMatan Azrad } 690e6988afdSMatan Azrad if (priv->counter_set_id == 0) 691e6988afdSMatan Azrad DRV_LOG(INFO, "Part of the port %d statistics will not be " 692e6988afdSMatan Azrad "available.", dev->data->port_id); 693e6988afdSMatan Azrad } 694e6988afdSMatan Azrad 695994829e6SSuanming Mou /** 696f926cce3SXueming Li * Check if representor spawn info match devargs. 697f926cce3SXueming Li * 698f926cce3SXueming Li * @param spawn 699f926cce3SXueming Li * Verbs device parameters (name, port, switch_info) to spawn. 700f926cce3SXueming Li * @param eth_da 701f926cce3SXueming Li * Device devargs to probe. 702f926cce3SXueming Li * 703f926cce3SXueming Li * @return 704f926cce3SXueming Li * Match result. 705f926cce3SXueming Li */ 706f926cce3SXueming Li static bool 707f926cce3SXueming Li mlx5_representor_match(struct mlx5_dev_spawn_data *spawn, 708f926cce3SXueming Li struct rte_eth_devargs *eth_da) 709f926cce3SXueming Li { 710f926cce3SXueming Li struct mlx5_switch_info *switch_info = &spawn->info; 711f926cce3SXueming Li unsigned int p, f; 712f926cce3SXueming Li uint16_t id; 713f926cce3SXueming Li uint16_t repr_id = mlx5_representor_id_encode(switch_info); 714f926cce3SXueming Li 715f926cce3SXueming Li switch (eth_da->type) { 716f926cce3SXueming Li case RTE_ETH_REPRESENTOR_SF: 717f926cce3SXueming Li if (switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFSF) { 718f926cce3SXueming Li rte_errno = EBUSY; 719f926cce3SXueming Li return false; 720f926cce3SXueming Li } 721f926cce3SXueming Li break; 722f926cce3SXueming Li case RTE_ETH_REPRESENTOR_VF: 723f926cce3SXueming Li /* Allows HPF representor index -1 as exception. */ 724f926cce3SXueming Li if (!(spawn->info.port_name == -1 && 725f926cce3SXueming Li switch_info->name_type == 726f926cce3SXueming Li MLX5_PHYS_PORT_NAME_TYPE_PFHPF) && 727f926cce3SXueming Li switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFVF) { 728f926cce3SXueming Li rte_errno = EBUSY; 729f926cce3SXueming Li return false; 730f926cce3SXueming Li } 731f926cce3SXueming Li break; 732f926cce3SXueming Li case RTE_ETH_REPRESENTOR_NONE: 733f926cce3SXueming Li rte_errno = EBUSY; 734f926cce3SXueming Li return false; 735f926cce3SXueming Li default: 736f926cce3SXueming Li rte_errno = ENOTSUP; 737f926cce3SXueming Li DRV_LOG(ERR, "unsupported representor type"); 738f926cce3SXueming Li return false; 739f926cce3SXueming Li } 740f926cce3SXueming Li /* Check representor ID: */ 741f926cce3SXueming Li for (p = 0; p < eth_da->nb_ports; ++p) { 742f926cce3SXueming Li if (spawn->pf_bond < 0) { 743f926cce3SXueming Li /* For non-LAG mode, allow and ignore pf. */ 744f926cce3SXueming Li switch_info->pf_num = eth_da->ports[p]; 745f926cce3SXueming Li repr_id = mlx5_representor_id_encode(switch_info); 746f926cce3SXueming Li } 747f926cce3SXueming Li for (f = 0; f < eth_da->nb_representor_ports; ++f) { 748f926cce3SXueming Li id = MLX5_REPRESENTOR_ID 749f926cce3SXueming Li (eth_da->ports[p], eth_da->type, 750f926cce3SXueming Li eth_da->representor_ports[f]); 751f926cce3SXueming Li if (repr_id == id) 752f926cce3SXueming Li return true; 753f926cce3SXueming Li } 754f926cce3SXueming Li } 755f926cce3SXueming Li rte_errno = EBUSY; 756f926cce3SXueming Li return false; 757f926cce3SXueming Li } 758f926cce3SXueming Li 759f926cce3SXueming Li 760f926cce3SXueming Li /** 7612eb4d010SOphir Munk * Spawn an Ethernet device from Verbs information. 7622eb4d010SOphir Munk * 7632eb4d010SOphir Munk * @param dpdk_dev 7642eb4d010SOphir Munk * Backing DPDK device. 7652eb4d010SOphir Munk * @param spawn 7662eb4d010SOphir Munk * Verbs device parameters (name, port, switch_info) to spawn. 7672eb4d010SOphir Munk * @param config 7682eb4d010SOphir Munk * Device configuration parameters. 769cb95feefSXueming Li * @param config 770cb95feefSXueming Li * Device arguments. 7712eb4d010SOphir Munk * 7722eb4d010SOphir Munk * @return 7732eb4d010SOphir Munk * A valid Ethernet device object on success, NULL otherwise and rte_errno 7742eb4d010SOphir Munk * is set. The following errors are defined: 7752eb4d010SOphir Munk * 7762eb4d010SOphir Munk * EBUSY: device is not supposed to be spawned. 7772eb4d010SOphir Munk * EEXIST: device is already spawned 7782eb4d010SOphir Munk */ 7792eb4d010SOphir Munk static struct rte_eth_dev * 7802eb4d010SOphir Munk mlx5_dev_spawn(struct rte_device *dpdk_dev, 7812eb4d010SOphir Munk struct mlx5_dev_spawn_data *spawn, 782cb95feefSXueming Li struct mlx5_dev_config *config, 783cb95feefSXueming Li struct rte_eth_devargs *eth_da) 7842eb4d010SOphir Munk { 7852eb4d010SOphir Munk const struct mlx5_switch_info *switch_info = &spawn->info; 7862eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh = NULL; 7872eb4d010SOphir Munk struct ibv_port_attr port_attr; 7882eb4d010SOphir Munk struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 7892eb4d010SOphir Munk struct rte_eth_dev *eth_dev = NULL; 7902eb4d010SOphir Munk struct mlx5_priv *priv = NULL; 7912eb4d010SOphir Munk int err = 0; 7922eb4d010SOphir Munk unsigned int hw_padding = 0; 7932eb4d010SOphir Munk unsigned int mps; 7942eb4d010SOphir Munk unsigned int tunnel_en = 0; 7952eb4d010SOphir Munk unsigned int mpls_en = 0; 7962eb4d010SOphir Munk unsigned int swp = 0; 7972eb4d010SOphir Munk unsigned int mprq = 0; 7982eb4d010SOphir Munk unsigned int mprq_min_stride_size_n = 0; 7992eb4d010SOphir Munk unsigned int mprq_max_stride_size_n = 0; 8002eb4d010SOphir Munk unsigned int mprq_min_stride_num_n = 0; 8012eb4d010SOphir Munk unsigned int mprq_max_stride_num_n = 0; 8022eb4d010SOphir Munk struct rte_ether_addr mac; 8032eb4d010SOphir Munk char name[RTE_ETH_NAME_MAX_LEN]; 8042eb4d010SOphir Munk int own_domain_id = 0; 8052eb4d010SOphir Munk uint16_t port_id; 8062eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_DEVX_PORT 8072eb4d010SOphir Munk struct mlx5dv_devx_port devx_port = { .comp_mask = 0 }; 8082eb4d010SOphir Munk #endif 8092eb4d010SOphir Munk 8102eb4d010SOphir Munk /* Determine if this port representor is supposed to be spawned. */ 811f926cce3SXueming Li if (switch_info->representor && dpdk_dev->devargs && 812f926cce3SXueming Li !mlx5_representor_match(spawn, eth_da)) 813d6541676SXueming Li return NULL; 8142eb4d010SOphir Munk /* Build device name. */ 8152eb4d010SOphir Munk if (spawn->pf_bond < 0) { 8162eb4d010SOphir Munk /* Single device. */ 8172eb4d010SOphir Munk if (!switch_info->representor) 8182eb4d010SOphir Munk strlcpy(name, dpdk_dev->name, sizeof(name)); 8192eb4d010SOphir Munk else 820f926cce3SXueming Li err = snprintf(name, sizeof(name), "%s_representor_%s%u", 821cb95feefSXueming Li dpdk_dev->name, 822cb95feefSXueming Li switch_info->name_type == 823cb95feefSXueming Li MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf", 824cb95feefSXueming Li switch_info->port_name); 8252eb4d010SOphir Munk } else { 8262eb4d010SOphir Munk /* Bonding device. */ 827f926cce3SXueming Li if (!switch_info->representor) { 828f926cce3SXueming Li err = snprintf(name, sizeof(name), "%s_%s", 829834a9019SOphir Munk dpdk_dev->name, 830834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 831f926cce3SXueming Li } else { 832f926cce3SXueming Li err = snprintf(name, sizeof(name), "%s_%s_representor_c%dpf%d%s%u", 833834a9019SOphir Munk dpdk_dev->name, 834834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev), 835f926cce3SXueming Li switch_info->ctrl_num, 836f926cce3SXueming Li switch_info->pf_num, 837cb95feefSXueming Li switch_info->name_type == 838cb95feefSXueming Li MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf", 8392eb4d010SOphir Munk switch_info->port_name); 8402eb4d010SOphir Munk } 841f926cce3SXueming Li } 842f926cce3SXueming Li if (err >= (int)sizeof(name)) 843f926cce3SXueming Li DRV_LOG(WARNING, "device name overflow %s", name); 8442eb4d010SOphir Munk /* check if the device is already spawned */ 8452eb4d010SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 8462eb4d010SOphir Munk rte_errno = EEXIST; 8472eb4d010SOphir Munk return NULL; 8482eb4d010SOphir Munk } 8492eb4d010SOphir Munk DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 8502eb4d010SOphir Munk if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 8512eb4d010SOphir Munk struct mlx5_mp_id mp_id; 8522eb4d010SOphir Munk 8532eb4d010SOphir Munk eth_dev = rte_eth_dev_attach_secondary(name); 8542eb4d010SOphir Munk if (eth_dev == NULL) { 8552eb4d010SOphir Munk DRV_LOG(ERR, "can not attach rte ethdev"); 8562eb4d010SOphir Munk rte_errno = ENOMEM; 8572eb4d010SOphir Munk return NULL; 8582eb4d010SOphir Munk } 8592eb4d010SOphir Munk eth_dev->device = dpdk_dev; 860b012b4ceSOphir Munk eth_dev->dev_ops = &mlx5_dev_sec_ops; 861cbfc6111SFerruh Yigit eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 862cbfc6111SFerruh Yigit eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 8632eb4d010SOphir Munk err = mlx5_proc_priv_init(eth_dev); 8642eb4d010SOphir Munk if (err) 8652eb4d010SOphir Munk return NULL; 8662eb4d010SOphir Munk mp_id.port_id = eth_dev->data->port_id; 8672eb4d010SOphir Munk strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 8682eb4d010SOphir Munk /* Receive command fd from primary process */ 8692eb4d010SOphir Munk err = mlx5_mp_req_verbs_cmd_fd(&mp_id); 8702eb4d010SOphir Munk if (err < 0) 8712eb4d010SOphir Munk goto err_secondary; 8722eb4d010SOphir Munk /* Remap UAR for Tx queues. */ 8732eb4d010SOphir Munk err = mlx5_tx_uar_init_secondary(eth_dev, err); 8742eb4d010SOphir Munk if (err) 8752eb4d010SOphir Munk goto err_secondary; 8762eb4d010SOphir Munk /* 8772eb4d010SOphir Munk * Ethdev pointer is still required as input since 8782eb4d010SOphir Munk * the primary device is not accessible from the 8792eb4d010SOphir Munk * secondary process. 8802eb4d010SOphir Munk */ 8812eb4d010SOphir Munk eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 8822eb4d010SOphir Munk eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 8832eb4d010SOphir Munk return eth_dev; 8842eb4d010SOphir Munk err_secondary: 8852eb4d010SOphir Munk mlx5_dev_close(eth_dev); 8862eb4d010SOphir Munk return NULL; 8872eb4d010SOphir Munk } 8882eb4d010SOphir Munk /* 8892eb4d010SOphir Munk * Some parameters ("tx_db_nc" in particularly) are needed in 8902eb4d010SOphir Munk * advance to create dv/verbs device context. We proceed the 8912eb4d010SOphir Munk * devargs here to get ones, and later proceed devargs again 8922eb4d010SOphir Munk * to override some hardware settings. 8932eb4d010SOphir Munk */ 894d462a83cSMichael Baum err = mlx5_args(config, dpdk_dev->devargs); 8952eb4d010SOphir Munk if (err) { 8962eb4d010SOphir Munk err = rte_errno; 8972eb4d010SOphir Munk DRV_LOG(ERR, "failed to process device arguments: %s", 8982eb4d010SOphir Munk strerror(rte_errno)); 8992eb4d010SOphir Munk goto error; 9002eb4d010SOphir Munk } 9014ec6360dSGregory Etelson if (config->dv_miss_info) { 9024ec6360dSGregory Etelson if (switch_info->master || switch_info->representor) 9034ec6360dSGregory Etelson config->dv_xmeta_en = MLX5_XMETA_MODE_META16; 9044ec6360dSGregory Etelson } 905d462a83cSMichael Baum mlx5_malloc_mem_select(config->sys_mem_en); 906d462a83cSMichael Baum sh = mlx5_alloc_shared_dev_ctx(spawn, config); 9072eb4d010SOphir Munk if (!sh) 9082eb4d010SOphir Munk return NULL; 909d462a83cSMichael Baum config->devx = sh->devx; 9102eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR 911d462a83cSMichael Baum config->dest_tir = 1; 9122eb4d010SOphir Munk #endif 9132eb4d010SOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 9142eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 9152eb4d010SOphir Munk #endif 9162eb4d010SOphir Munk /* 9172eb4d010SOphir Munk * Multi-packet send is supported by ConnectX-4 Lx PF as well 9182eb4d010SOphir Munk * as all ConnectX-5 devices. 9192eb4d010SOphir Munk */ 9202eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 9212eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 9222eb4d010SOphir Munk #endif 9232eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 9242eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 9252eb4d010SOphir Munk #endif 9262eb4d010SOphir Munk mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 9272eb4d010SOphir Munk if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 9282eb4d010SOphir Munk if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 9292eb4d010SOphir Munk DRV_LOG(DEBUG, "enhanced MPW is supported"); 9302eb4d010SOphir Munk mps = MLX5_MPW_ENHANCED; 9312eb4d010SOphir Munk } else { 9322eb4d010SOphir Munk DRV_LOG(DEBUG, "MPW is supported"); 9332eb4d010SOphir Munk mps = MLX5_MPW; 9342eb4d010SOphir Munk } 9352eb4d010SOphir Munk } else { 9362eb4d010SOphir Munk DRV_LOG(DEBUG, "MPW isn't supported"); 9372eb4d010SOphir Munk mps = MLX5_MPW_DISABLED; 9382eb4d010SOphir Munk } 9392eb4d010SOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 9402eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 9412eb4d010SOphir Munk swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 9422eb4d010SOphir Munk DRV_LOG(DEBUG, "SWP support: %u", swp); 9432eb4d010SOphir Munk #endif 944d462a83cSMichael Baum config->swp = !!swp; 9452eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 9462eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 9472eb4d010SOphir Munk struct mlx5dv_striding_rq_caps mprq_caps = 9482eb4d010SOphir Munk dv_attr.striding_rq_caps; 9492eb4d010SOphir Munk 9502eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 9512eb4d010SOphir Munk mprq_caps.min_single_stride_log_num_of_bytes); 9522eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 9532eb4d010SOphir Munk mprq_caps.max_single_stride_log_num_of_bytes); 9542eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 9552eb4d010SOphir Munk mprq_caps.min_single_wqe_log_num_of_strides); 9562eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 9572eb4d010SOphir Munk mprq_caps.max_single_wqe_log_num_of_strides); 9582eb4d010SOphir Munk DRV_LOG(DEBUG, "\tsupported_qpts: %d", 9592eb4d010SOphir Munk mprq_caps.supported_qpts); 9602eb4d010SOphir Munk DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 9612eb4d010SOphir Munk mprq = 1; 9622eb4d010SOphir Munk mprq_min_stride_size_n = 9632eb4d010SOphir Munk mprq_caps.min_single_stride_log_num_of_bytes; 9642eb4d010SOphir Munk mprq_max_stride_size_n = 9652eb4d010SOphir Munk mprq_caps.max_single_stride_log_num_of_bytes; 9662eb4d010SOphir Munk mprq_min_stride_num_n = 9672eb4d010SOphir Munk mprq_caps.min_single_wqe_log_num_of_strides; 9682eb4d010SOphir Munk mprq_max_stride_num_n = 9692eb4d010SOphir Munk mprq_caps.max_single_wqe_log_num_of_strides; 9702eb4d010SOphir Munk } 9712eb4d010SOphir Munk #endif 9723d3f4e6dSAlexander Kozyrev /* Rx CQE compression is enabled by default. */ 9733d3f4e6dSAlexander Kozyrev config->cqe_comp = 1; 9742eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 9752eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 9762eb4d010SOphir Munk tunnel_en = ((dv_attr.tunnel_offloads_caps & 9772eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 9782eb4d010SOphir Munk (dv_attr.tunnel_offloads_caps & 9792eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) && 9802eb4d010SOphir Munk (dv_attr.tunnel_offloads_caps & 9812eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE)); 9822eb4d010SOphir Munk } 9832eb4d010SOphir Munk DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 9842eb4d010SOphir Munk tunnel_en ? "" : "not "); 9852eb4d010SOphir Munk #else 9862eb4d010SOphir Munk DRV_LOG(WARNING, 9872eb4d010SOphir Munk "tunnel offloading disabled due to old OFED/rdma-core version"); 9882eb4d010SOphir Munk #endif 989d462a83cSMichael Baum config->tunnel_en = tunnel_en; 9902eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 9912eb4d010SOphir Munk mpls_en = ((dv_attr.tunnel_offloads_caps & 9922eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 9932eb4d010SOphir Munk (dv_attr.tunnel_offloads_caps & 9942eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 9952eb4d010SOphir Munk DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 9962eb4d010SOphir Munk mpls_en ? "" : "not "); 9972eb4d010SOphir Munk #else 9982eb4d010SOphir Munk DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 9992eb4d010SOphir Munk " old OFED/rdma-core version or firmware configuration"); 10002eb4d010SOphir Munk #endif 1001d462a83cSMichael Baum config->mpls_en = mpls_en; 10022eb4d010SOphir Munk /* Check port status. */ 1003834a9019SOphir Munk err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr); 10042eb4d010SOphir Munk if (err) { 10052eb4d010SOphir Munk DRV_LOG(ERR, "port query failed: %s", strerror(err)); 10062eb4d010SOphir Munk goto error; 10072eb4d010SOphir Munk } 10082eb4d010SOphir Munk if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 10092eb4d010SOphir Munk DRV_LOG(ERR, "port is not configured in Ethernet mode"); 10102eb4d010SOphir Munk err = EINVAL; 10112eb4d010SOphir Munk goto error; 10122eb4d010SOphir Munk } 10132eb4d010SOphir Munk if (port_attr.state != IBV_PORT_ACTIVE) 10142eb4d010SOphir Munk DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 10152eb4d010SOphir Munk mlx5_glue->port_state_str(port_attr.state), 10162eb4d010SOphir Munk port_attr.state); 10172eb4d010SOphir Munk /* Allocate private eth device data. */ 10182175c4dcSSuanming Mou priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 10192eb4d010SOphir Munk sizeof(*priv), 10202175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 10212eb4d010SOphir Munk if (priv == NULL) { 10222eb4d010SOphir Munk DRV_LOG(ERR, "priv allocation failure"); 10232eb4d010SOphir Munk err = ENOMEM; 10242eb4d010SOphir Munk goto error; 10252eb4d010SOphir Munk } 10262eb4d010SOphir Munk priv->sh = sh; 102791389890SOphir Munk priv->dev_port = spawn->phys_port; 10282eb4d010SOphir Munk priv->pci_dev = spawn->pci_dev; 10292eb4d010SOphir Munk priv->mtu = RTE_ETHER_MTU; 10302eb4d010SOphir Munk /* Some internal functions rely on Netlink sockets, open them now. */ 10312eb4d010SOphir Munk priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 10322eb4d010SOphir Munk priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 10332eb4d010SOphir Munk priv->representor = !!switch_info->representor; 10342eb4d010SOphir Munk priv->master = !!switch_info->master; 10352eb4d010SOphir Munk priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 10362eb4d010SOphir Munk priv->vport_meta_tag = 0; 10372eb4d010SOphir Munk priv->vport_meta_mask = 0; 10382eb4d010SOphir Munk priv->pf_bond = spawn->pf_bond; 10392eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_DEVX_PORT 10402eb4d010SOphir Munk /* 10412eb4d010SOphir Munk * The DevX port query API is implemented. E-Switch may use 10422eb4d010SOphir Munk * either vport or reg_c[0] metadata register to match on 10432eb4d010SOphir Munk * vport index. The engaged part of metadata register is 10442eb4d010SOphir Munk * defined by mask. 10452eb4d010SOphir Munk */ 10462eb4d010SOphir Munk if (switch_info->representor || switch_info->master) { 10472eb4d010SOphir Munk devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT | 10482eb4d010SOphir Munk MLX5DV_DEVX_PORT_MATCH_REG_C_0; 1049834a9019SOphir Munk err = mlx5_glue->devx_port_query(sh->ctx, spawn->phys_port, 10502eb4d010SOphir Munk &devx_port); 10512eb4d010SOphir Munk if (err) { 10522eb4d010SOphir Munk DRV_LOG(WARNING, 10532eb4d010SOphir Munk "can't query devx port %d on device %s", 1054834a9019SOphir Munk spawn->phys_port, 1055834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 10562eb4d010SOphir Munk devx_port.comp_mask = 0; 10572eb4d010SOphir Munk } 10582eb4d010SOphir Munk } 10592eb4d010SOphir Munk if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) { 10602eb4d010SOphir Munk priv->vport_meta_tag = devx_port.reg_c_0.value; 10612eb4d010SOphir Munk priv->vport_meta_mask = devx_port.reg_c_0.mask; 10622eb4d010SOphir Munk if (!priv->vport_meta_mask) { 10632eb4d010SOphir Munk DRV_LOG(ERR, "vport zero mask for port %d" 10642eb4d010SOphir Munk " on bonding device %s", 1065834a9019SOphir Munk spawn->phys_port, 1066834a9019SOphir Munk mlx5_os_get_dev_device_name 1067834a9019SOphir Munk (spawn->phys_dev)); 10682eb4d010SOphir Munk err = ENOTSUP; 10692eb4d010SOphir Munk goto error; 10702eb4d010SOphir Munk } 10712eb4d010SOphir Munk if (priv->vport_meta_tag & ~priv->vport_meta_mask) { 10722eb4d010SOphir Munk DRV_LOG(ERR, "invalid vport tag for port %d" 10732eb4d010SOphir Munk " on bonding device %s", 1074834a9019SOphir Munk spawn->phys_port, 1075834a9019SOphir Munk mlx5_os_get_dev_device_name 1076834a9019SOphir Munk (spawn->phys_dev)); 10772eb4d010SOphir Munk err = ENOTSUP; 10782eb4d010SOphir Munk goto error; 10792eb4d010SOphir Munk } 10802eb4d010SOphir Munk } 10812eb4d010SOphir Munk if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) { 10822eb4d010SOphir Munk priv->vport_id = devx_port.vport_num; 10832eb4d010SOphir Munk } else if (spawn->pf_bond >= 0) { 10842eb4d010SOphir Munk DRV_LOG(ERR, "can't deduce vport index for port %d" 10852eb4d010SOphir Munk " on bonding device %s", 1086834a9019SOphir Munk spawn->phys_port, 1087834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 10882eb4d010SOphir Munk err = ENOTSUP; 10892eb4d010SOphir Munk goto error; 10902eb4d010SOphir Munk } else { 10912eb4d010SOphir Munk /* Suppose vport index in compatible way. */ 10922eb4d010SOphir Munk priv->vport_id = switch_info->representor ? 10932eb4d010SOphir Munk switch_info->port_name + 1 : -1; 10942eb4d010SOphir Munk } 10952eb4d010SOphir Munk #else 10962eb4d010SOphir Munk /* 10972eb4d010SOphir Munk * Kernel/rdma_core support single E-Switch per PF configurations 10982eb4d010SOphir Munk * only and vport_id field contains the vport index for 10992eb4d010SOphir Munk * associated VF, which is deduced from representor port name. 11002eb4d010SOphir Munk * For example, let's have the IB device port 10, it has 11012eb4d010SOphir Munk * attached network device eth0, which has port name attribute 11022eb4d010SOphir Munk * pf0vf2, we can deduce the VF number as 2, and set vport index 11032eb4d010SOphir Munk * as 3 (2+1). This assigning schema should be changed if the 11042eb4d010SOphir Munk * multiple E-Switch instances per PF configurations or/and PCI 11052eb4d010SOphir Munk * subfunctions are added. 11062eb4d010SOphir Munk */ 11072eb4d010SOphir Munk priv->vport_id = switch_info->representor ? 11082eb4d010SOphir Munk switch_info->port_name + 1 : -1; 11092eb4d010SOphir Munk #endif 1110cb95feefSXueming Li priv->representor_id = mlx5_representor_id_encode(switch_info); 11112eb4d010SOphir Munk /* 11122eb4d010SOphir Munk * Look for sibling devices in order to reuse their switch domain 11132eb4d010SOphir Munk * if any, otherwise allocate one. 11142eb4d010SOphir Munk */ 11152eb4d010SOphir Munk MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 11162eb4d010SOphir Munk const struct mlx5_priv *opriv = 11172eb4d010SOphir Munk rte_eth_devices[port_id].data->dev_private; 11182eb4d010SOphir Munk 11192eb4d010SOphir Munk if (!opriv || 11202eb4d010SOphir Munk opriv->sh != priv->sh || 11212eb4d010SOphir Munk opriv->domain_id == 11222eb4d010SOphir Munk RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 11232eb4d010SOphir Munk continue; 11242eb4d010SOphir Munk priv->domain_id = opriv->domain_id; 11252eb4d010SOphir Munk break; 11262eb4d010SOphir Munk } 11272eb4d010SOphir Munk if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 11282eb4d010SOphir Munk err = rte_eth_switch_domain_alloc(&priv->domain_id); 11292eb4d010SOphir Munk if (err) { 11302eb4d010SOphir Munk err = rte_errno; 11312eb4d010SOphir Munk DRV_LOG(ERR, "unable to allocate switch domain: %s", 11322eb4d010SOphir Munk strerror(rte_errno)); 11332eb4d010SOphir Munk goto error; 11342eb4d010SOphir Munk } 11352eb4d010SOphir Munk own_domain_id = 1; 11362eb4d010SOphir Munk } 11372eb4d010SOphir Munk /* Override some values set by hardware configuration. */ 1138d462a83cSMichael Baum mlx5_args(config, dpdk_dev->devargs); 1139d462a83cSMichael Baum err = mlx5_dev_check_sibling_config(priv, config); 11402eb4d010SOphir Munk if (err) 11412eb4d010SOphir Munk goto error; 1142d462a83cSMichael Baum config->hw_csum = !!(sh->device_attr.device_cap_flags_ex & 11432eb4d010SOphir Munk IBV_DEVICE_RAW_IP_CSUM); 11442eb4d010SOphir Munk DRV_LOG(DEBUG, "checksum offloading is %ssupported", 1145d462a83cSMichael Baum (config->hw_csum ? "" : "not ")); 11462eb4d010SOphir Munk #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 11472eb4d010SOphir Munk !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 11482eb4d010SOphir Munk DRV_LOG(DEBUG, "counters are not supported"); 11492eb4d010SOphir Munk #endif 11502eb4d010SOphir Munk #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR) 1151d462a83cSMichael Baum if (config->dv_flow_en) { 11522eb4d010SOphir Munk DRV_LOG(WARNING, "DV flow is not supported"); 1153d462a83cSMichael Baum config->dv_flow_en = 0; 11542eb4d010SOphir Munk } 11552eb4d010SOphir Munk #endif 1156d462a83cSMichael Baum config->ind_table_max_size = 11572eb4d010SOphir Munk sh->device_attr.max_rwq_indirection_table_size; 11582eb4d010SOphir Munk /* 11592eb4d010SOphir Munk * Remove this check once DPDK supports larger/variable 11602eb4d010SOphir Munk * indirection tables. 11612eb4d010SOphir Munk */ 1162d462a83cSMichael Baum if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 1163d462a83cSMichael Baum config->ind_table_max_size = ETH_RSS_RETA_SIZE_512; 11642eb4d010SOphir Munk DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 1165d462a83cSMichael Baum config->ind_table_max_size); 1166d462a83cSMichael Baum config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 11672eb4d010SOphir Munk IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 11682eb4d010SOphir Munk DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 1169d462a83cSMichael Baum (config->hw_vlan_strip ? "" : "not ")); 1170d462a83cSMichael Baum config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 11712eb4d010SOphir Munk IBV_RAW_PACKET_CAP_SCATTER_FCS); 11722eb4d010SOphir Munk #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 11732eb4d010SOphir Munk hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 11742eb4d010SOphir Munk #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 11752eb4d010SOphir Munk hw_padding = !!(sh->device_attr.device_cap_flags_ex & 11762eb4d010SOphir Munk IBV_DEVICE_PCI_WRITE_END_PADDING); 11772eb4d010SOphir Munk #endif 1178d462a83cSMichael Baum if (config->hw_padding && !hw_padding) { 11792eb4d010SOphir Munk DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 1180d462a83cSMichael Baum config->hw_padding = 0; 1181d462a83cSMichael Baum } else if (config->hw_padding) { 11822eb4d010SOphir Munk DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 11832eb4d010SOphir Munk } 1184d462a83cSMichael Baum config->tso = (sh->device_attr.max_tso > 0 && 11852eb4d010SOphir Munk (sh->device_attr.tso_supported_qpts & 11862eb4d010SOphir Munk (1 << IBV_QPT_RAW_PACKET))); 1187d462a83cSMichael Baum if (config->tso) 1188d462a83cSMichael Baum config->tso_max_payload_sz = sh->device_attr.max_tso; 11892eb4d010SOphir Munk /* 11902eb4d010SOphir Munk * MPW is disabled by default, while the Enhanced MPW is enabled 11912eb4d010SOphir Munk * by default. 11922eb4d010SOphir Munk */ 1193d462a83cSMichael Baum if (config->mps == MLX5_ARG_UNSET) 1194d462a83cSMichael Baum config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 11952eb4d010SOphir Munk MLX5_MPW_DISABLED; 11962eb4d010SOphir Munk else 1197d462a83cSMichael Baum config->mps = config->mps ? mps : MLX5_MPW_DISABLED; 11982eb4d010SOphir Munk DRV_LOG(INFO, "%sMPS is %s", 1199d462a83cSMichael Baum config->mps == MLX5_MPW_ENHANCED ? "enhanced " : 1200d462a83cSMichael Baum config->mps == MLX5_MPW ? "legacy " : "", 1201d462a83cSMichael Baum config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 1202d462a83cSMichael Baum if (config->devx) { 1203d462a83cSMichael Baum err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr); 12042eb4d010SOphir Munk if (err) { 12052eb4d010SOphir Munk err = -err; 12062eb4d010SOphir Munk goto error; 12072eb4d010SOphir Munk } 12083aa27915SSuanming Mou /* Check relax ordering support. */ 1209e82ddd28STal Shnaiderman if (!haswell_broadwell_cpu) { 1210e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_write = 1211e82ddd28STal Shnaiderman config->hca_attr.relaxed_ordering_write; 1212e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_read = 1213e82ddd28STal Shnaiderman config->hca_attr.relaxed_ordering_read; 1214e82ddd28STal Shnaiderman } else { 1215e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_read = 0; 1216e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_write = 0; 1217e82ddd28STal Shnaiderman } 1218d61381adSViacheslav Ovsiienko sh->rq_ts_format = config->hca_attr.rq_ts_format; 1219d61381adSViacheslav Ovsiienko sh->sq_ts_format = config->hca_attr.sq_ts_format; 1220d61381adSViacheslav Ovsiienko sh->qp_ts_format = config->hca_attr.qp_ts_format; 12212eb4d010SOphir Munk /* Check for LRO support. */ 1222d462a83cSMichael Baum if (config->dest_tir && config->hca_attr.lro_cap && 1223d462a83cSMichael Baum config->dv_flow_en) { 12242eb4d010SOphir Munk /* TBD check tunnel lro caps. */ 1225d462a83cSMichael Baum config->lro.supported = config->hca_attr.lro_cap; 12262eb4d010SOphir Munk DRV_LOG(DEBUG, "Device supports LRO"); 12272eb4d010SOphir Munk /* 12282eb4d010SOphir Munk * If LRO timeout is not configured by application, 12292eb4d010SOphir Munk * use the minimal supported value. 12302eb4d010SOphir Munk */ 1231d462a83cSMichael Baum if (!config->lro.timeout) 1232d462a83cSMichael Baum config->lro.timeout = 1233d462a83cSMichael Baum config->hca_attr.lro_timer_supported_periods[0]; 12342eb4d010SOphir Munk DRV_LOG(DEBUG, "LRO session timeout set to %d usec", 1235d462a83cSMichael Baum config->lro.timeout); 1236613d64e4SDekel Peled DRV_LOG(DEBUG, "LRO minimal size of TCP segment " 1237613d64e4SDekel Peled "required for coalescing is %d bytes", 1238613d64e4SDekel Peled config->hca_attr.lro_min_mss_size); 12392eb4d010SOphir Munk } 12402eb4d010SOphir Munk #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) 1241d462a83cSMichael Baum if (config->hca_attr.qos.sup && 1242b6505738SDekel Peled config->hca_attr.qos.flow_meter_old && 1243d462a83cSMichael Baum config->dv_flow_en) { 12442eb4d010SOphir Munk uint8_t reg_c_mask = 1245d462a83cSMichael Baum config->hca_attr.qos.flow_meter_reg_c_ids; 12462eb4d010SOphir Munk /* 12472eb4d010SOphir Munk * Meter needs two REG_C's for color match and pre-sfx 12482eb4d010SOphir Munk * flow match. Here get the REG_C for color match. 12492eb4d010SOphir Munk * REG_C_0 and REG_C_1 is reserved for metadata feature. 12502eb4d010SOphir Munk */ 12512eb4d010SOphir Munk reg_c_mask &= 0xfc; 12522eb4d010SOphir Munk if (__builtin_popcount(reg_c_mask) < 1) { 12532eb4d010SOphir Munk priv->mtr_en = 0; 12542eb4d010SOphir Munk DRV_LOG(WARNING, "No available register for" 12552eb4d010SOphir Munk " meter."); 12562eb4d010SOphir Munk } else { 125731ef2982SDekel Peled /* 125831ef2982SDekel Peled * The meter color register is used by the 125931ef2982SDekel Peled * flow-hit feature as well. 126031ef2982SDekel Peled * The flow-hit feature must use REG_C_3 126131ef2982SDekel Peled * Prefer REG_C_3 if it is available. 126231ef2982SDekel Peled */ 126331ef2982SDekel Peled if (reg_c_mask & (1 << (REG_C_3 - REG_C_0))) 126431ef2982SDekel Peled priv->mtr_color_reg = REG_C_3; 126531ef2982SDekel Peled else 126631ef2982SDekel Peled priv->mtr_color_reg = ffs(reg_c_mask) 126731ef2982SDekel Peled - 1 + REG_C_0; 12682eb4d010SOphir Munk priv->mtr_en = 1; 12692eb4d010SOphir Munk priv->mtr_reg_share = 1270b6505738SDekel Peled config->hca_attr.qos.flow_meter; 12712eb4d010SOphir Munk DRV_LOG(DEBUG, "The REG_C meter uses is %d", 12722eb4d010SOphir Munk priv->mtr_color_reg); 12732eb4d010SOphir Munk } 12742eb4d010SOphir Munk } 12752eb4d010SOphir Munk #endif 1276a2999c7bSDekel Peled #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO 127731ef2982SDekel Peled if (config->hca_attr.flow_hit_aso && 127831ef2982SDekel Peled priv->mtr_color_reg == REG_C_3) { 127931ef2982SDekel Peled sh->flow_hit_aso_en = 1; 128031ef2982SDekel Peled err = mlx5_flow_aso_age_mng_init(sh); 128131ef2982SDekel Peled if (err) { 128231ef2982SDekel Peled err = -err; 128331ef2982SDekel Peled goto error; 128431ef2982SDekel Peled } 128531ef2982SDekel Peled DRV_LOG(DEBUG, "Flow Hit ASO is supported."); 128631ef2982SDekel Peled } 1287a2999c7bSDekel Peled #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ 128896b1f027SJiawei Wang #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE) 128996b1f027SJiawei Wang if (config->hca_attr.log_max_ft_sampler_num > 0 && 129096b1f027SJiawei Wang config->dv_flow_en) { 129196b1f027SJiawei Wang priv->sampler_en = 1; 12921b9e9826SThomas Monjalon DRV_LOG(DEBUG, "Sampler enabled!"); 129396b1f027SJiawei Wang } else { 129496b1f027SJiawei Wang priv->sampler_en = 0; 129596b1f027SJiawei Wang if (!config->hca_attr.log_max_ft_sampler_num) 12961b9e9826SThomas Monjalon DRV_LOG(WARNING, 12971b9e9826SThomas Monjalon "No available register for sampler."); 129896b1f027SJiawei Wang else 12991b9e9826SThomas Monjalon DRV_LOG(DEBUG, "DV flow is not supported!"); 130096b1f027SJiawei Wang } 130196b1f027SJiawei Wang #endif 13022eb4d010SOphir Munk } 13033d3f4e6dSAlexander Kozyrev if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 && 13043d3f4e6dSAlexander Kozyrev !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) { 13053d3f4e6dSAlexander Kozyrev DRV_LOG(WARNING, "Rx CQE 128B compression is not supported"); 13063d3f4e6dSAlexander Kozyrev config->cqe_comp = 0; 13073d3f4e6dSAlexander Kozyrev } 13083d3f4e6dSAlexander Kozyrev if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX && 13093d3f4e6dSAlexander Kozyrev (!config->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) { 13103d3f4e6dSAlexander Kozyrev DRV_LOG(WARNING, "Flow Tag CQE compression" 13113d3f4e6dSAlexander Kozyrev " format isn't supported."); 13123d3f4e6dSAlexander Kozyrev config->cqe_comp = 0; 13133d3f4e6dSAlexander Kozyrev } 13143d3f4e6dSAlexander Kozyrev if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX && 13153d3f4e6dSAlexander Kozyrev (!config->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) { 13163d3f4e6dSAlexander Kozyrev DRV_LOG(WARNING, "L3/L4 Header CQE compression" 13173d3f4e6dSAlexander Kozyrev " format isn't supported."); 13183d3f4e6dSAlexander Kozyrev config->cqe_comp = 0; 13193d3f4e6dSAlexander Kozyrev } 13203d3f4e6dSAlexander Kozyrev DRV_LOG(DEBUG, "Rx CQE compression is %ssupported", 13213d3f4e6dSAlexander Kozyrev config->cqe_comp ? "" : "not "); 1322d462a83cSMichael Baum if (config->tx_pp) { 13238f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz", 1324d462a83cSMichael Baum config->hca_attr.dev_freq_khz); 13258f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Packet pacing is %ssupported", 1326d462a83cSMichael Baum config->hca_attr.qos.packet_pacing ? "" : "not "); 13278f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Cross channel ops are %ssupported", 1328d462a83cSMichael Baum config->hca_attr.cross_channel ? "" : "not "); 13298f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "WQE index ignore is %ssupported", 1330d462a83cSMichael Baum config->hca_attr.wqe_index_ignore ? "" : "not "); 13318f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported", 1332d462a83cSMichael Baum config->hca_attr.non_wire_sq ? "" : "not "); 13338f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)", 1334d462a83cSMichael Baum config->hca_attr.log_max_static_sq_wq ? "" : "not ", 1335d462a83cSMichael Baum config->hca_attr.log_max_static_sq_wq); 13368f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported", 1337d462a83cSMichael Baum config->hca_attr.qos.wqe_rate_pp ? "" : "not "); 1338d462a83cSMichael Baum if (!config->devx) { 13398f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "DevX is required for packet pacing"); 13408f848f32SViacheslav Ovsiienko err = ENODEV; 13418f848f32SViacheslav Ovsiienko goto error; 13428f848f32SViacheslav Ovsiienko } 1343d462a83cSMichael Baum if (!config->hca_attr.qos.packet_pacing) { 13448f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Packet pacing is not supported"); 13458f848f32SViacheslav Ovsiienko err = ENODEV; 13468f848f32SViacheslav Ovsiienko goto error; 13478f848f32SViacheslav Ovsiienko } 1348d462a83cSMichael Baum if (!config->hca_attr.cross_channel) { 13498f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Cross channel operations are" 13508f848f32SViacheslav Ovsiienko " required for packet pacing"); 13518f848f32SViacheslav Ovsiienko err = ENODEV; 13528f848f32SViacheslav Ovsiienko goto error; 13538f848f32SViacheslav Ovsiienko } 1354d462a83cSMichael Baum if (!config->hca_attr.wqe_index_ignore) { 13558f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "WQE index ignore feature is" 13568f848f32SViacheslav Ovsiienko " required for packet pacing"); 13578f848f32SViacheslav Ovsiienko err = ENODEV; 13588f848f32SViacheslav Ovsiienko goto error; 13598f848f32SViacheslav Ovsiienko } 1360d462a83cSMichael Baum if (!config->hca_attr.non_wire_sq) { 13618f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Non-wire SQ feature is" 13628f848f32SViacheslav Ovsiienko " required for packet pacing"); 13638f848f32SViacheslav Ovsiienko err = ENODEV; 13648f848f32SViacheslav Ovsiienko goto error; 13658f848f32SViacheslav Ovsiienko } 1366d462a83cSMichael Baum if (!config->hca_attr.log_max_static_sq_wq) { 13678f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Static WQE SQ feature is" 13688f848f32SViacheslav Ovsiienko " required for packet pacing"); 13698f848f32SViacheslav Ovsiienko err = ENODEV; 13708f848f32SViacheslav Ovsiienko goto error; 13718f848f32SViacheslav Ovsiienko } 1372d462a83cSMichael Baum if (!config->hca_attr.qos.wqe_rate_pp) { 13738f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "WQE rate mode is required" 13748f848f32SViacheslav Ovsiienko " for packet pacing"); 13758f848f32SViacheslav Ovsiienko err = ENODEV; 13768f848f32SViacheslav Ovsiienko goto error; 13778f848f32SViacheslav Ovsiienko } 13788f848f32SViacheslav Ovsiienko #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 13798f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "DevX does not provide UAR offset," 13808f848f32SViacheslav Ovsiienko " can't create queues for packet pacing"); 13818f848f32SViacheslav Ovsiienko err = ENODEV; 13828f848f32SViacheslav Ovsiienko goto error; 13838f848f32SViacheslav Ovsiienko #endif 13848f848f32SViacheslav Ovsiienko } 1385d462a83cSMichael Baum if (config->devx) { 1386a2854c4dSViacheslav Ovsiienko uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)]; 1387a2854c4dSViacheslav Ovsiienko 1388972a1bf8SViacheslav Ovsiienko err = config->hca_attr.access_register_user ? 1389972a1bf8SViacheslav Ovsiienko mlx5_devx_cmd_register_read 1390a2854c4dSViacheslav Ovsiienko (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0, 1391972a1bf8SViacheslav Ovsiienko reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP; 1392a2854c4dSViacheslav Ovsiienko if (!err) { 1393a2854c4dSViacheslav Ovsiienko uint32_t ts_mode; 1394a2854c4dSViacheslav Ovsiienko 1395a2854c4dSViacheslav Ovsiienko /* MTUTC register is read successfully. */ 1396a2854c4dSViacheslav Ovsiienko ts_mode = MLX5_GET(register_mtutc, reg, 1397a2854c4dSViacheslav Ovsiienko time_stamp_mode); 1398a2854c4dSViacheslav Ovsiienko if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME) 1399d462a83cSMichael Baum config->rt_timestamp = 1; 1400a2854c4dSViacheslav Ovsiienko } else { 1401a2854c4dSViacheslav Ovsiienko /* Kernel does not support register reading. */ 1402d462a83cSMichael Baum if (config->hca_attr.dev_freq_khz == 1403a2854c4dSViacheslav Ovsiienko (NS_PER_S / MS_PER_S)) 1404d462a83cSMichael Baum config->rt_timestamp = 1; 1405a2854c4dSViacheslav Ovsiienko } 1406a2854c4dSViacheslav Ovsiienko } 140750f95b23SSuanming Mou /* 140850f95b23SSuanming Mou * If HW has bug working with tunnel packet decapsulation and 140950f95b23SSuanming Mou * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip 141050f95b23SSuanming Mou * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore. 141150f95b23SSuanming Mou */ 1412d462a83cSMichael Baum if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en) 1413d462a83cSMichael Baum config->hw_fcs_strip = 0; 141450f95b23SSuanming Mou DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 1415d462a83cSMichael Baum (config->hw_fcs_strip ? "" : "not ")); 1416d462a83cSMichael Baum if (config->mprq.enabled && mprq) { 1417d462a83cSMichael Baum if (config->mprq.stride_num_n && 1418d462a83cSMichael Baum (config->mprq.stride_num_n > mprq_max_stride_num_n || 1419d462a83cSMichael Baum config->mprq.stride_num_n < mprq_min_stride_num_n)) { 1420d462a83cSMichael Baum config->mprq.stride_num_n = 14212eb4d010SOphir Munk RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 14222eb4d010SOphir Munk mprq_min_stride_num_n), 14232eb4d010SOphir Munk mprq_max_stride_num_n); 14242eb4d010SOphir Munk DRV_LOG(WARNING, 14252eb4d010SOphir Munk "the number of strides" 14262eb4d010SOphir Munk " for Multi-Packet RQ is out of range," 14272eb4d010SOphir Munk " setting default value (%u)", 1428d462a83cSMichael Baum 1 << config->mprq.stride_num_n); 14292eb4d010SOphir Munk } 1430d462a83cSMichael Baum if (config->mprq.stride_size_n && 1431d462a83cSMichael Baum (config->mprq.stride_size_n > mprq_max_stride_size_n || 1432d462a83cSMichael Baum config->mprq.stride_size_n < mprq_min_stride_size_n)) { 1433d462a83cSMichael Baum config->mprq.stride_size_n = 14342eb4d010SOphir Munk RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N, 14352eb4d010SOphir Munk mprq_min_stride_size_n), 14362eb4d010SOphir Munk mprq_max_stride_size_n); 14372eb4d010SOphir Munk DRV_LOG(WARNING, 14382eb4d010SOphir Munk "the size of a stride" 14392eb4d010SOphir Munk " for Multi-Packet RQ is out of range," 14402eb4d010SOphir Munk " setting default value (%u)", 1441d462a83cSMichael Baum 1 << config->mprq.stride_size_n); 14422eb4d010SOphir Munk } 1443d462a83cSMichael Baum config->mprq.min_stride_size_n = mprq_min_stride_size_n; 1444d462a83cSMichael Baum config->mprq.max_stride_size_n = mprq_max_stride_size_n; 1445d462a83cSMichael Baum } else if (config->mprq.enabled && !mprq) { 14462eb4d010SOphir Munk DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 1447d462a83cSMichael Baum config->mprq.enabled = 0; 14482eb4d010SOphir Munk } 1449d462a83cSMichael Baum if (config->max_dump_files_num == 0) 1450d462a83cSMichael Baum config->max_dump_files_num = 128; 14512eb4d010SOphir Munk eth_dev = rte_eth_dev_allocate(name); 14522eb4d010SOphir Munk if (eth_dev == NULL) { 14532eb4d010SOphir Munk DRV_LOG(ERR, "can not allocate rte ethdev"); 14542eb4d010SOphir Munk err = ENOMEM; 14552eb4d010SOphir Munk goto error; 14562eb4d010SOphir Munk } 14572eb4d010SOphir Munk if (priv->representor) { 14582eb4d010SOphir Munk eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 14592eb4d010SOphir Munk eth_dev->data->representor_id = priv->representor_id; 14602eb4d010SOphir Munk } 146139ae7577SSuanming Mou priv->mp_id.port_id = eth_dev->data->port_id; 146239ae7577SSuanming Mou strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 14632eb4d010SOphir Munk /* 14642eb4d010SOphir Munk * Store associated network device interface index. This index 14652eb4d010SOphir Munk * is permanent throughout the lifetime of device. So, we may store 14662eb4d010SOphir Munk * the ifindex here and use the cached value further. 14672eb4d010SOphir Munk */ 14682eb4d010SOphir Munk MLX5_ASSERT(spawn->ifindex); 14692eb4d010SOphir Munk priv->if_index = spawn->ifindex; 1470c21e5facSXueming Li if (priv->pf_bond >= 0 && priv->master) { 1471c21e5facSXueming Li /* Get bond interface info */ 1472c21e5facSXueming Li err = mlx5_sysfs_bond_info(priv->if_index, 1473c21e5facSXueming Li &priv->bond_ifindex, 1474c21e5facSXueming Li priv->bond_name); 1475c21e5facSXueming Li if (err) 1476c21e5facSXueming Li DRV_LOG(ERR, "unable to get bond info: %s", 1477c21e5facSXueming Li strerror(rte_errno)); 1478c21e5facSXueming Li else 1479c21e5facSXueming Li DRV_LOG(INFO, "PF device %u, bond device %u(%s)", 1480c21e5facSXueming Li priv->if_index, priv->bond_ifindex, 1481c21e5facSXueming Li priv->bond_name); 1482c21e5facSXueming Li } 14832eb4d010SOphir Munk eth_dev->data->dev_private = priv; 14842eb4d010SOphir Munk priv->dev_data = eth_dev->data; 14852eb4d010SOphir Munk eth_dev->data->mac_addrs = priv->mac; 14862eb4d010SOphir Munk eth_dev->device = dpdk_dev; 1487f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 14882eb4d010SOphir Munk /* Configure the first MAC address by default. */ 14892eb4d010SOphir Munk if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 14902eb4d010SOphir Munk DRV_LOG(ERR, 14912eb4d010SOphir Munk "port %u cannot get MAC address, is mlx5_en" 14922eb4d010SOphir Munk " loaded? (errno: %s)", 14932eb4d010SOphir Munk eth_dev->data->port_id, strerror(rte_errno)); 14942eb4d010SOphir Munk err = ENODEV; 14952eb4d010SOphir Munk goto error; 14962eb4d010SOphir Munk } 14972eb4d010SOphir Munk DRV_LOG(INFO, 14982eb4d010SOphir Munk "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 14992eb4d010SOphir Munk eth_dev->data->port_id, 15002eb4d010SOphir Munk mac.addr_bytes[0], mac.addr_bytes[1], 15012eb4d010SOphir Munk mac.addr_bytes[2], mac.addr_bytes[3], 15022eb4d010SOphir Munk mac.addr_bytes[4], mac.addr_bytes[5]); 15032eb4d010SOphir Munk #ifdef RTE_LIBRTE_MLX5_DEBUG 15042eb4d010SOphir Munk { 150528743807STal Shnaiderman char ifname[MLX5_NAMESIZE]; 15062eb4d010SOphir Munk 15072eb4d010SOphir Munk if (mlx5_get_ifname(eth_dev, &ifname) == 0) 15082eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 15092eb4d010SOphir Munk eth_dev->data->port_id, ifname); 15102eb4d010SOphir Munk else 15112eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u ifname is unknown", 15122eb4d010SOphir Munk eth_dev->data->port_id); 15132eb4d010SOphir Munk } 15142eb4d010SOphir Munk #endif 15152eb4d010SOphir Munk /* Get actual MTU if possible. */ 15162eb4d010SOphir Munk err = mlx5_get_mtu(eth_dev, &priv->mtu); 15172eb4d010SOphir Munk if (err) { 15182eb4d010SOphir Munk err = rte_errno; 15192eb4d010SOphir Munk goto error; 15202eb4d010SOphir Munk } 15212eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 15222eb4d010SOphir Munk priv->mtu); 15232eb4d010SOphir Munk /* Initialize burst functions to prevent crashes before link-up. */ 15242eb4d010SOphir Munk eth_dev->rx_pkt_burst = removed_rx_burst; 15252eb4d010SOphir Munk eth_dev->tx_pkt_burst = removed_tx_burst; 1526b012b4ceSOphir Munk eth_dev->dev_ops = &mlx5_dev_ops; 1527cbfc6111SFerruh Yigit eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 1528cbfc6111SFerruh Yigit eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 1529cbfc6111SFerruh Yigit eth_dev->rx_queue_count = mlx5_rx_queue_count; 15302eb4d010SOphir Munk /* Register MAC address. */ 15312eb4d010SOphir Munk claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 1532d462a83cSMichael Baum if (config->vf && config->vf_nl_en) 15332eb4d010SOphir Munk mlx5_nl_mac_addr_sync(priv->nl_socket_route, 15342eb4d010SOphir Munk mlx5_ifindex(eth_dev), 15352eb4d010SOphir Munk eth_dev->data->mac_addrs, 15362eb4d010SOphir Munk MLX5_MAX_MAC_ADDRESSES); 15372eb4d010SOphir Munk priv->flows = 0; 15382eb4d010SOphir Munk priv->ctrl_flows = 0; 1539d163fc2dSXueming Li rte_spinlock_init(&priv->flow_list_lock); 15402eb4d010SOphir Munk TAILQ_INIT(&priv->flow_meters); 15412eb4d010SOphir Munk TAILQ_INIT(&priv->flow_meter_profiles); 15422eb4d010SOphir Munk /* Hint libmlx5 to use PMD allocator for data plane resources */ 154336dabceaSMichael Baum mlx5_glue->dv_set_context_attr(sh->ctx, 154436dabceaSMichael Baum MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 154536dabceaSMichael Baum (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ 15462eb4d010SOphir Munk .alloc = &mlx5_alloc_verbs_buf, 15472eb4d010SOphir Munk .free = &mlx5_free_verbs_buf, 154881c3b977SViacheslav Ovsiienko .data = sh, 154936dabceaSMichael Baum })); 15502eb4d010SOphir Munk /* Bring Ethernet device up. */ 15512eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 15522eb4d010SOphir Munk eth_dev->data->port_id); 15532eb4d010SOphir Munk mlx5_set_link_up(eth_dev); 15542eb4d010SOphir Munk /* 15552eb4d010SOphir Munk * Even though the interrupt handler is not installed yet, 15562eb4d010SOphir Munk * interrupts will still trigger on the async_fd from 15572eb4d010SOphir Munk * Verbs context returned by ibv_open_device(). 15582eb4d010SOphir Munk */ 15592eb4d010SOphir Munk mlx5_link_update(eth_dev, 0); 15602eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 1561d462a83cSMichael Baum if (!(config->hca_attr.eswitch_manager && config->dv_flow_en && 15622eb4d010SOphir Munk (switch_info->representor || switch_info->master))) 1563d462a83cSMichael Baum config->dv_esw_en = 0; 15642eb4d010SOphir Munk #else 1565d462a83cSMichael Baum config->dv_esw_en = 0; 15662eb4d010SOphir Munk #endif 15672eb4d010SOphir Munk /* Detect minimal data bytes to inline. */ 1568d462a83cSMichael Baum mlx5_set_min_inline(spawn, config); 15692eb4d010SOphir Munk /* Store device configuration on private structure. */ 1570d462a83cSMichael Baum priv->config = *config; 15712eb4d010SOphir Munk /* Create context for virtual machine VLAN workaround. */ 15722eb4d010SOphir Munk priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); 1573d462a83cSMichael Baum if (config->dv_flow_en) { 15742eb4d010SOphir Munk err = mlx5_alloc_shared_dr(priv); 15752eb4d010SOphir Munk if (err) 15762eb4d010SOphir Munk goto error; 15772eb4d010SOphir Munk } 15787aa9892fSMichael Baum if (config->devx && config->dv_flow_en && config->dest_tir) { 15795eaf882eSMichael Baum priv->obj_ops = devx_obj_ops; 15800c762e81SMichael Baum priv->obj_ops.drop_action_create = 15810c762e81SMichael Baum ibv_obj_ops.drop_action_create; 15820c762e81SMichael Baum priv->obj_ops.drop_action_destroy = 15830c762e81SMichael Baum ibv_obj_ops.drop_action_destroy; 15845d9f3c3fSMichael Baum #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 15855d9f3c3fSMichael Baum priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify; 15865d9f3c3fSMichael Baum #else 15873ec73abeSMatan Azrad if (config->dv_esw_en) 15885d9f3c3fSMichael Baum priv->obj_ops.txq_obj_modify = 15895d9f3c3fSMichael Baum ibv_obj_ops.txq_obj_modify; 15905d9f3c3fSMichael Baum #endif 15913ec73abeSMatan Azrad /* Use specific wrappers for Tx object. */ 15923ec73abeSMatan Azrad priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new; 15933ec73abeSMatan Azrad priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release; 1594e6988afdSMatan Azrad mlx5_queue_counter_id_prepare(eth_dev); 15953ec73abeSMatan Azrad 15965eaf882eSMichael Baum } else { 15975eaf882eSMichael Baum priv->obj_ops = ibv_obj_ops; 15985eaf882eSMichael Baum } 159965b3cd0dSSuanming Mou priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev); 160065b3cd0dSSuanming Mou if (!priv->drop_queue.hrxq) 160165b3cd0dSSuanming Mou goto error; 16022eb4d010SOphir Munk /* Supported Verbs flow priority number detection. */ 16032eb4d010SOphir Munk err = mlx5_flow_discover_priorities(eth_dev); 16042eb4d010SOphir Munk if (err < 0) { 16052eb4d010SOphir Munk err = -err; 16062eb4d010SOphir Munk goto error; 16072eb4d010SOphir Munk } 16082eb4d010SOphir Munk priv->config.flow_prio = err; 16092eb4d010SOphir Munk if (!priv->config.dv_esw_en && 16102eb4d010SOphir Munk priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 16112eb4d010SOphir Munk DRV_LOG(WARNING, "metadata mode %u is not supported " 16122eb4d010SOphir Munk "(no E-Switch)", priv->config.dv_xmeta_en); 16132eb4d010SOphir Munk priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; 16142eb4d010SOphir Munk } 16152eb4d010SOphir Munk mlx5_set_metadata_mask(eth_dev); 16162eb4d010SOphir Munk if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 16172eb4d010SOphir Munk !priv->sh->dv_regc0_mask) { 16182eb4d010SOphir Munk DRV_LOG(ERR, "metadata mode %u is not supported " 16192eb4d010SOphir Munk "(no metadata reg_c[0] is available)", 16202eb4d010SOphir Munk priv->config.dv_xmeta_en); 16212eb4d010SOphir Munk err = ENOTSUP; 16222eb4d010SOphir Munk goto error; 16232eb4d010SOphir Munk } 1624e1592b6cSSuanming Mou mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev, 1625e1592b6cSSuanming Mou mlx5_hrxq_create_cb, 1626e1592b6cSSuanming Mou mlx5_hrxq_match_cb, 1627e1592b6cSSuanming Mou mlx5_hrxq_remove_cb); 16282eb4d010SOphir Munk /* Query availability of metadata reg_c's. */ 16292eb4d010SOphir Munk err = mlx5_flow_discover_mreg_c(eth_dev); 16302eb4d010SOphir Munk if (err < 0) { 16312eb4d010SOphir Munk err = -err; 16322eb4d010SOphir Munk goto error; 16332eb4d010SOphir Munk } 16342eb4d010SOphir Munk if (!mlx5_flow_ext_mreg_supported(eth_dev)) { 16352eb4d010SOphir Munk DRV_LOG(DEBUG, 16362eb4d010SOphir Munk "port %u extensive metadata register is not supported", 16372eb4d010SOphir Munk eth_dev->data->port_id); 16382eb4d010SOphir Munk if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 16392eb4d010SOphir Munk DRV_LOG(ERR, "metadata mode %u is not supported " 16402eb4d010SOphir Munk "(no metadata registers available)", 16412eb4d010SOphir Munk priv->config.dv_xmeta_en); 16422eb4d010SOphir Munk err = ENOTSUP; 16432eb4d010SOphir Munk goto error; 16442eb4d010SOphir Munk } 16452eb4d010SOphir Munk } 16462eb4d010SOphir Munk if (priv->config.dv_flow_en && 16472eb4d010SOphir Munk priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 16482eb4d010SOphir Munk mlx5_flow_ext_mreg_supported(eth_dev) && 16492eb4d010SOphir Munk priv->sh->dv_regc0_mask) { 16502eb4d010SOphir Munk priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, 1651e69a5922SXueming Li MLX5_FLOW_MREG_HTABLE_SZ, 1652e69a5922SXueming Li 0, 0, 1653f7f73ac1SXueming Li flow_dv_mreg_create_cb, 1654f5b0aed2SSuanming Mou flow_dv_mreg_match_cb, 1655f7f73ac1SXueming Li flow_dv_mreg_remove_cb); 16562eb4d010SOphir Munk if (!priv->mreg_cp_tbl) { 16572eb4d010SOphir Munk err = ENOMEM; 16582eb4d010SOphir Munk goto error; 16592eb4d010SOphir Munk } 1660f7f73ac1SXueming Li priv->mreg_cp_tbl->ctx = eth_dev; 16612eb4d010SOphir Munk } 1662cc608e4dSSuanming Mou rte_spinlock_init(&priv->shared_act_sl); 1663994829e6SSuanming Mou mlx5_flow_counter_mode_config(eth_dev); 16649fbe97f0SXueming Li if (priv->config.dv_flow_en) 16659fbe97f0SXueming Li eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 16662eb4d010SOphir Munk return eth_dev; 16672eb4d010SOphir Munk error: 16682eb4d010SOphir Munk if (priv) { 16692eb4d010SOphir Munk if (priv->mreg_cp_tbl) 1670e69a5922SXueming Li mlx5_hlist_destroy(priv->mreg_cp_tbl); 16712eb4d010SOphir Munk if (priv->sh) 16722eb4d010SOphir Munk mlx5_os_free_shared_dr(priv); 16732eb4d010SOphir Munk if (priv->nl_socket_route >= 0) 16742eb4d010SOphir Munk close(priv->nl_socket_route); 16752eb4d010SOphir Munk if (priv->nl_socket_rdma >= 0) 16762eb4d010SOphir Munk close(priv->nl_socket_rdma); 16772eb4d010SOphir Munk if (priv->vmwa_context) 16782eb4d010SOphir Munk mlx5_vlan_vmwa_exit(priv->vmwa_context); 167965b3cd0dSSuanming Mou if (eth_dev && priv->drop_queue.hrxq) 168065b3cd0dSSuanming Mou mlx5_drop_action_destroy(eth_dev); 16812eb4d010SOphir Munk if (own_domain_id) 16822eb4d010SOphir Munk claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 1683e1592b6cSSuanming Mou mlx5_cache_list_destroy(&priv->hrxqs); 16842175c4dcSSuanming Mou mlx5_free(priv); 16852eb4d010SOphir Munk if (eth_dev != NULL) 16862eb4d010SOphir Munk eth_dev->data->dev_private = NULL; 16872eb4d010SOphir Munk } 16882eb4d010SOphir Munk if (eth_dev != NULL) { 16892eb4d010SOphir Munk /* mac_addrs must not be freed alone because part of 16902eb4d010SOphir Munk * dev_private 16912eb4d010SOphir Munk **/ 16922eb4d010SOphir Munk eth_dev->data->mac_addrs = NULL; 16932eb4d010SOphir Munk rte_eth_dev_release_port(eth_dev); 16942eb4d010SOphir Munk } 16952eb4d010SOphir Munk if (sh) 169691389890SOphir Munk mlx5_free_shared_dev_ctx(sh); 16972eb4d010SOphir Munk MLX5_ASSERT(err > 0); 16982eb4d010SOphir Munk rte_errno = err; 16992eb4d010SOphir Munk return NULL; 17002eb4d010SOphir Munk } 17012eb4d010SOphir Munk 17022eb4d010SOphir Munk /** 17032eb4d010SOphir Munk * Comparison callback to sort device data. 17042eb4d010SOphir Munk * 17052eb4d010SOphir Munk * This is meant to be used with qsort(). 17062eb4d010SOphir Munk * 17072eb4d010SOphir Munk * @param a[in] 17082eb4d010SOphir Munk * Pointer to pointer to first data object. 17092eb4d010SOphir Munk * @param b[in] 17102eb4d010SOphir Munk * Pointer to pointer to second data object. 17112eb4d010SOphir Munk * 17122eb4d010SOphir Munk * @return 17132eb4d010SOphir Munk * 0 if both objects are equal, less than 0 if the first argument is less 17142eb4d010SOphir Munk * than the second, greater than 0 otherwise. 17152eb4d010SOphir Munk */ 17162eb4d010SOphir Munk static int 17172eb4d010SOphir Munk mlx5_dev_spawn_data_cmp(const void *a, const void *b) 17182eb4d010SOphir Munk { 17192eb4d010SOphir Munk const struct mlx5_switch_info *si_a = 17202eb4d010SOphir Munk &((const struct mlx5_dev_spawn_data *)a)->info; 17212eb4d010SOphir Munk const struct mlx5_switch_info *si_b = 17222eb4d010SOphir Munk &((const struct mlx5_dev_spawn_data *)b)->info; 17232eb4d010SOphir Munk int ret; 17242eb4d010SOphir Munk 17252eb4d010SOphir Munk /* Master device first. */ 17262eb4d010SOphir Munk ret = si_b->master - si_a->master; 17272eb4d010SOphir Munk if (ret) 17282eb4d010SOphir Munk return ret; 17292eb4d010SOphir Munk /* Then representor devices. */ 17302eb4d010SOphir Munk ret = si_b->representor - si_a->representor; 17312eb4d010SOphir Munk if (ret) 17322eb4d010SOphir Munk return ret; 17332eb4d010SOphir Munk /* Unidentified devices come last in no specific order. */ 17342eb4d010SOphir Munk if (!si_a->representor) 17352eb4d010SOphir Munk return 0; 17362eb4d010SOphir Munk /* Order representors by name. */ 17372eb4d010SOphir Munk return si_a->port_name - si_b->port_name; 17382eb4d010SOphir Munk } 17392eb4d010SOphir Munk 17402eb4d010SOphir Munk /** 17412eb4d010SOphir Munk * Match PCI information for possible slaves of bonding device. 17422eb4d010SOphir Munk * 17432eb4d010SOphir Munk * @param[in] ibv_dev 17442eb4d010SOphir Munk * Pointer to Infiniband device structure. 17452eb4d010SOphir Munk * @param[in] pci_dev 1746f926cce3SXueming Li * Pointer to primary PCI address structure to match. 17472eb4d010SOphir Munk * @param[in] nl_rdma 17482eb4d010SOphir Munk * Netlink RDMA group socket handle. 1749f926cce3SXueming Li * @param[in] owner 1750f926cce3SXueming Li * Rerepsentor owner PF index. 17512eb4d010SOphir Munk * 17522eb4d010SOphir Munk * @return 17532eb4d010SOphir Munk * negative value if no bonding device found, otherwise 17542eb4d010SOphir Munk * positive index of slave PF in bonding. 17552eb4d010SOphir Munk */ 17562eb4d010SOphir Munk static int 17572eb4d010SOphir Munk mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev, 1758f926cce3SXueming Li const struct rte_pci_addr *pci_dev, 1759f926cce3SXueming Li int nl_rdma, uint16_t owner) 17602eb4d010SOphir Munk { 17612eb4d010SOphir Munk char ifname[IF_NAMESIZE + 1]; 17622eb4d010SOphir Munk unsigned int ifindex; 17632eb4d010SOphir Munk unsigned int np, i; 17642eb4d010SOphir Munk FILE *file = NULL; 17652eb4d010SOphir Munk int pf = -1; 17662eb4d010SOphir Munk 17672eb4d010SOphir Munk /* 17682eb4d010SOphir Munk * Try to get master device name. If something goes 17692eb4d010SOphir Munk * wrong suppose the lack of kernel support and no 17702eb4d010SOphir Munk * bonding devices. 17712eb4d010SOphir Munk */ 17722eb4d010SOphir Munk if (nl_rdma < 0) 17732eb4d010SOphir Munk return -1; 17742eb4d010SOphir Munk if (!strstr(ibv_dev->name, "bond")) 17752eb4d010SOphir Munk return -1; 17762eb4d010SOphir Munk np = mlx5_nl_portnum(nl_rdma, ibv_dev->name); 17772eb4d010SOphir Munk if (!np) 17782eb4d010SOphir Munk return -1; 17792eb4d010SOphir Munk /* 17802eb4d010SOphir Munk * The Master device might not be on the predefined 17812eb4d010SOphir Munk * port (not on port index 1, it is not garanted), 17822eb4d010SOphir Munk * we have to scan all Infiniband device port and 17832eb4d010SOphir Munk * find master. 17842eb4d010SOphir Munk */ 17852eb4d010SOphir Munk for (i = 1; i <= np; ++i) { 17862eb4d010SOphir Munk /* Check whether Infiniband port is populated. */ 17872eb4d010SOphir Munk ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i); 17882eb4d010SOphir Munk if (!ifindex) 17892eb4d010SOphir Munk continue; 17902eb4d010SOphir Munk if (!if_indextoname(ifindex, ifname)) 17912eb4d010SOphir Munk continue; 17922eb4d010SOphir Munk /* Try to read bonding slave names from sysfs. */ 17932eb4d010SOphir Munk MKSTR(slaves, 17942eb4d010SOphir Munk "/sys/class/net/%s/master/bonding/slaves", ifname); 17952eb4d010SOphir Munk file = fopen(slaves, "r"); 17962eb4d010SOphir Munk if (file) 17972eb4d010SOphir Munk break; 17982eb4d010SOphir Munk } 17992eb4d010SOphir Munk if (!file) 18002eb4d010SOphir Munk return -1; 18012eb4d010SOphir Munk /* Use safe format to check maximal buffer length. */ 18022eb4d010SOphir Munk MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE); 18032eb4d010SOphir Munk while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) { 18042eb4d010SOphir Munk char tmp_str[IF_NAMESIZE + 32]; 18052eb4d010SOphir Munk struct rte_pci_addr pci_addr; 18062eb4d010SOphir Munk struct mlx5_switch_info info; 18072eb4d010SOphir Munk 18082eb4d010SOphir Munk /* Process slave interface names in the loop. */ 18092eb4d010SOphir Munk snprintf(tmp_str, sizeof(tmp_str), 18102eb4d010SOphir Munk "/sys/class/net/%s", ifname); 18112eb4d010SOphir Munk if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) { 18122eb4d010SOphir Munk DRV_LOG(WARNING, "can not get PCI address" 18132eb4d010SOphir Munk " for netdev \"%s\"", ifname); 18142eb4d010SOphir Munk continue; 18152eb4d010SOphir Munk } 1816f926cce3SXueming Li if (pci_dev->domain != pci_addr.domain || 1817f926cce3SXueming Li pci_dev->bus != pci_addr.bus || 1818f926cce3SXueming Li pci_dev->devid != pci_addr.devid || 1819f926cce3SXueming Li pci_dev->function + owner != pci_addr.function) 18202eb4d010SOphir Munk continue; 18212eb4d010SOphir Munk /* Slave interface PCI address match found. */ 18222eb4d010SOphir Munk fclose(file); 18232eb4d010SOphir Munk snprintf(tmp_str, sizeof(tmp_str), 18242eb4d010SOphir Munk "/sys/class/net/%s/phys_port_name", ifname); 18252eb4d010SOphir Munk file = fopen(tmp_str, "rb"); 18262eb4d010SOphir Munk if (!file) 18272eb4d010SOphir Munk break; 18282eb4d010SOphir Munk info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET; 18292eb4d010SOphir Munk if (fscanf(file, "%32s", tmp_str) == 1) 18302eb4d010SOphir Munk mlx5_translate_port_name(tmp_str, &info); 18312eb4d010SOphir Munk if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY || 18322eb4d010SOphir Munk info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) 18332eb4d010SOphir Munk pf = info.port_name; 18342eb4d010SOphir Munk break; 18352eb4d010SOphir Munk } 18362eb4d010SOphir Munk if (file) 18372eb4d010SOphir Munk fclose(file); 18382eb4d010SOphir Munk return pf; 18392eb4d010SOphir Munk } 18402eb4d010SOphir Munk 18412eb4d010SOphir Munk /** 1842*08c2772fSXueming Li * Register a PCI device within bonding. 18432eb4d010SOphir Munk * 1844*08c2772fSXueming Li * This function spawns Ethernet devices out of a given PCI device and 1845*08c2772fSXueming Li * bonding owner PF index. 18462eb4d010SOphir Munk * 18472eb4d010SOphir Munk * @param[in] pci_dev 18482eb4d010SOphir Munk * PCI device information. 1849*08c2772fSXueming Li * @param[in] req_eth_da 1850*08c2772fSXueming Li * Requested ethdev device argument. 1851*08c2772fSXueming Li * @param[in] owner_id 1852*08c2772fSXueming Li * Requested owner PF port ID within bonding device, default to 0. 18532eb4d010SOphir Munk * 18542eb4d010SOphir Munk * @return 18552eb4d010SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 18562eb4d010SOphir Munk */ 1857*08c2772fSXueming Li static int 1858*08c2772fSXueming Li mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev, 1859*08c2772fSXueming Li struct rte_eth_devargs *req_eth_da, 1860*08c2772fSXueming Li uint16_t owner_id) 18612eb4d010SOphir Munk { 18622eb4d010SOphir Munk struct ibv_device **ibv_list; 18632eb4d010SOphir Munk /* 18642eb4d010SOphir Munk * Number of found IB Devices matching with requested PCI BDF. 18652eb4d010SOphir Munk * nd != 1 means there are multiple IB devices over the same 18662eb4d010SOphir Munk * PCI device and we have representors and master. 18672eb4d010SOphir Munk */ 18682eb4d010SOphir Munk unsigned int nd = 0; 18692eb4d010SOphir Munk /* 18702eb4d010SOphir Munk * Number of found IB device Ports. nd = 1 and np = 1..n means 18712eb4d010SOphir Munk * we have the single multiport IB device, and there may be 18722eb4d010SOphir Munk * representors attached to some of found ports. 18732eb4d010SOphir Munk */ 18742eb4d010SOphir Munk unsigned int np = 0; 18752eb4d010SOphir Munk /* 18762eb4d010SOphir Munk * Number of DPDK ethernet devices to Spawn - either over 18772eb4d010SOphir Munk * multiple IB devices or multiple ports of single IB device. 18782eb4d010SOphir Munk * Actually this is the number of iterations to spawn. 18792eb4d010SOphir Munk */ 18802eb4d010SOphir Munk unsigned int ns = 0; 18812eb4d010SOphir Munk /* 18822eb4d010SOphir Munk * Bonding device 18832eb4d010SOphir Munk * < 0 - no bonding device (single one) 18842eb4d010SOphir Munk * >= 0 - bonding device (value is slave PF index) 18852eb4d010SOphir Munk */ 18862eb4d010SOphir Munk int bd = -1; 18872eb4d010SOphir Munk struct mlx5_dev_spawn_data *list = NULL; 18882eb4d010SOphir Munk struct mlx5_dev_config dev_config; 1889d462a83cSMichael Baum unsigned int dev_config_vf; 1890*08c2772fSXueming Li struct rte_eth_devargs eth_da = *req_eth_da; 1891f926cce3SXueming Li struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */ 1892f926cce3SXueming Li int ret = -1; 18932eb4d010SOphir Munk 18942eb4d010SOphir Munk if (rte_eal_process_type() == RTE_PROC_PRIMARY) 18952eb4d010SOphir Munk mlx5_pmd_socket_init(); 18962eb4d010SOphir Munk ret = mlx5_init_once(); 18972eb4d010SOphir Munk if (ret) { 18982eb4d010SOphir Munk DRV_LOG(ERR, "unable to init PMD global data: %s", 18992eb4d010SOphir Munk strerror(rte_errno)); 19002eb4d010SOphir Munk return -rte_errno; 19012eb4d010SOphir Munk } 19022eb4d010SOphir Munk errno = 0; 19032eb4d010SOphir Munk ibv_list = mlx5_glue->get_device_list(&ret); 19042eb4d010SOphir Munk if (!ibv_list) { 19052eb4d010SOphir Munk rte_errno = errno ? errno : ENOSYS; 19062eb4d010SOphir Munk DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 19072eb4d010SOphir Munk return -rte_errno; 19082eb4d010SOphir Munk } 19092eb4d010SOphir Munk /* 19102eb4d010SOphir Munk * First scan the list of all Infiniband devices to find 19112eb4d010SOphir Munk * matching ones, gathering into the list. 19122eb4d010SOphir Munk */ 19132eb4d010SOphir Munk struct ibv_device *ibv_match[ret + 1]; 19142eb4d010SOphir Munk int nl_route = mlx5_nl_init(NETLINK_ROUTE); 19152eb4d010SOphir Munk int nl_rdma = mlx5_nl_init(NETLINK_RDMA); 19162eb4d010SOphir Munk unsigned int i; 19172eb4d010SOphir Munk 19182eb4d010SOphir Munk while (ret-- > 0) { 19192eb4d010SOphir Munk struct rte_pci_addr pci_addr; 19202eb4d010SOphir Munk 19212eb4d010SOphir Munk DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 19222eb4d010SOphir Munk bd = mlx5_device_bond_pci_match 1923*08c2772fSXueming Li (ibv_list[ret], &owner_pci, nl_rdma, owner_id); 19242eb4d010SOphir Munk if (bd >= 0) { 19252eb4d010SOphir Munk /* 19262eb4d010SOphir Munk * Bonding device detected. Only one match is allowed, 19272eb4d010SOphir Munk * the bonding is supported over multi-port IB device, 19282eb4d010SOphir Munk * there should be no matches on representor PCI 19292eb4d010SOphir Munk * functions or non VF LAG bonding devices with 19302eb4d010SOphir Munk * specified address. 19312eb4d010SOphir Munk */ 19322eb4d010SOphir Munk if (nd) { 19332eb4d010SOphir Munk DRV_LOG(ERR, 19342eb4d010SOphir Munk "multiple PCI match on bonding device" 19352eb4d010SOphir Munk "\"%s\" found", ibv_list[ret]->name); 19362eb4d010SOphir Munk rte_errno = ENOENT; 19372eb4d010SOphir Munk ret = -rte_errno; 19382eb4d010SOphir Munk goto exit; 19392eb4d010SOphir Munk } 1940f926cce3SXueming Li /* Amend owner pci address if owner PF ID specified. */ 1941f926cce3SXueming Li if (eth_da.nb_representor_ports) 1942*08c2772fSXueming Li owner_pci.function += owner_id; 19432eb4d010SOphir Munk DRV_LOG(INFO, "PCI information matches for" 19442eb4d010SOphir Munk " slave %d bonding device \"%s\"", 19452eb4d010SOphir Munk bd, ibv_list[ret]->name); 19462eb4d010SOphir Munk ibv_match[nd++] = ibv_list[ret]; 19472eb4d010SOphir Munk break; 1948f926cce3SXueming Li } else { 1949f926cce3SXueming Li /* Bonding device not found. */ 19502eb4d010SOphir Munk if (mlx5_dev_to_pci_addr 19512eb4d010SOphir Munk (ibv_list[ret]->ibdev_path, &pci_addr)) 19522eb4d010SOphir Munk continue; 1953f926cce3SXueming Li if (owner_pci.domain != pci_addr.domain || 1954f926cce3SXueming Li owner_pci.bus != pci_addr.bus || 1955f926cce3SXueming Li owner_pci.devid != pci_addr.devid || 1956f926cce3SXueming Li owner_pci.function != pci_addr.function) 19572eb4d010SOphir Munk continue; 19582eb4d010SOphir Munk DRV_LOG(INFO, "PCI information matches for device \"%s\"", 19592eb4d010SOphir Munk ibv_list[ret]->name); 19602eb4d010SOphir Munk ibv_match[nd++] = ibv_list[ret]; 19612eb4d010SOphir Munk } 1962f926cce3SXueming Li } 19632eb4d010SOphir Munk ibv_match[nd] = NULL; 19642eb4d010SOphir Munk if (!nd) { 19652eb4d010SOphir Munk /* No device matches, just complain and bail out. */ 19662eb4d010SOphir Munk DRV_LOG(WARNING, 19672eb4d010SOphir Munk "no Verbs device matches PCI device " PCI_PRI_FMT "," 19682eb4d010SOphir Munk " are kernel drivers loaded?", 1969f926cce3SXueming Li owner_pci.domain, owner_pci.bus, 1970f926cce3SXueming Li owner_pci.devid, owner_pci.function); 19712eb4d010SOphir Munk rte_errno = ENOENT; 19722eb4d010SOphir Munk ret = -rte_errno; 19732eb4d010SOphir Munk goto exit; 19742eb4d010SOphir Munk } 19752eb4d010SOphir Munk if (nd == 1) { 19762eb4d010SOphir Munk /* 19772eb4d010SOphir Munk * Found single matching device may have multiple ports. 19782eb4d010SOphir Munk * Each port may be representor, we have to check the port 19792eb4d010SOphir Munk * number and check the representors existence. 19802eb4d010SOphir Munk */ 19812eb4d010SOphir Munk if (nl_rdma >= 0) 19822eb4d010SOphir Munk np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 19832eb4d010SOphir Munk if (!np) 19842eb4d010SOphir Munk DRV_LOG(WARNING, "can not get IB device \"%s\"" 19852eb4d010SOphir Munk " ports number", ibv_match[0]->name); 19862eb4d010SOphir Munk if (bd >= 0 && !np) { 19872eb4d010SOphir Munk DRV_LOG(ERR, "can not get ports" 19882eb4d010SOphir Munk " for bonding device"); 19892eb4d010SOphir Munk rte_errno = ENOENT; 19902eb4d010SOphir Munk ret = -rte_errno; 19912eb4d010SOphir Munk goto exit; 19922eb4d010SOphir Munk } 19932eb4d010SOphir Munk } 19942eb4d010SOphir Munk #ifndef HAVE_MLX5DV_DR_DEVX_PORT 19952eb4d010SOphir Munk if (bd >= 0) { 19962eb4d010SOphir Munk /* 19972eb4d010SOphir Munk * This may happen if there is VF LAG kernel support and 19982eb4d010SOphir Munk * application is compiled with older rdma_core library. 19992eb4d010SOphir Munk */ 20002eb4d010SOphir Munk DRV_LOG(ERR, 20012eb4d010SOphir Munk "No kernel/verbs support for VF LAG bonding found."); 20022eb4d010SOphir Munk rte_errno = ENOTSUP; 20032eb4d010SOphir Munk ret = -rte_errno; 20042eb4d010SOphir Munk goto exit; 20052eb4d010SOphir Munk } 20062eb4d010SOphir Munk #endif 20072eb4d010SOphir Munk /* 20082eb4d010SOphir Munk * Now we can determine the maximal 20092eb4d010SOphir Munk * amount of devices to be spawned. 20102eb4d010SOphir Munk */ 20112175c4dcSSuanming Mou list = mlx5_malloc(MLX5_MEM_ZERO, 20122eb4d010SOphir Munk sizeof(struct mlx5_dev_spawn_data) * 20132eb4d010SOphir Munk (np ? np : nd), 20142175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 20152eb4d010SOphir Munk if (!list) { 20162eb4d010SOphir Munk DRV_LOG(ERR, "spawn data array allocation failure"); 20172eb4d010SOphir Munk rte_errno = ENOMEM; 20182eb4d010SOphir Munk ret = -rte_errno; 20192eb4d010SOphir Munk goto exit; 20202eb4d010SOphir Munk } 20212eb4d010SOphir Munk if (bd >= 0 || np > 1) { 20222eb4d010SOphir Munk /* 20232eb4d010SOphir Munk * Single IB device with multiple ports found, 20242eb4d010SOphir Munk * it may be E-Switch master device and representors. 20252eb4d010SOphir Munk * We have to perform identification through the ports. 20262eb4d010SOphir Munk */ 20272eb4d010SOphir Munk MLX5_ASSERT(nl_rdma >= 0); 20282eb4d010SOphir Munk MLX5_ASSERT(ns == 0); 20292eb4d010SOphir Munk MLX5_ASSERT(nd == 1); 20302eb4d010SOphir Munk MLX5_ASSERT(np); 20312eb4d010SOphir Munk for (i = 1; i <= np; ++i) { 20322eb4d010SOphir Munk list[ns].max_port = np; 2033834a9019SOphir Munk list[ns].phys_port = i; 2034834a9019SOphir Munk list[ns].phys_dev = ibv_match[0]; 20352eb4d010SOphir Munk list[ns].eth_dev = NULL; 20362eb4d010SOphir Munk list[ns].pci_dev = pci_dev; 20372eb4d010SOphir Munk list[ns].pf_bond = bd; 20382eb4d010SOphir Munk list[ns].ifindex = mlx5_nl_ifindex 2039834a9019SOphir Munk (nl_rdma, 2040834a9019SOphir Munk mlx5_os_get_dev_device_name 2041834a9019SOphir Munk (list[ns].phys_dev), i); 20422eb4d010SOphir Munk if (!list[ns].ifindex) { 20432eb4d010SOphir Munk /* 20442eb4d010SOphir Munk * No network interface index found for the 20452eb4d010SOphir Munk * specified port, it means there is no 20462eb4d010SOphir Munk * representor on this port. It's OK, 20472eb4d010SOphir Munk * there can be disabled ports, for example 20482eb4d010SOphir Munk * if sriov_numvfs < sriov_totalvfs. 20492eb4d010SOphir Munk */ 20502eb4d010SOphir Munk continue; 20512eb4d010SOphir Munk } 20522eb4d010SOphir Munk ret = -1; 20532eb4d010SOphir Munk if (nl_route >= 0) 20542eb4d010SOphir Munk ret = mlx5_nl_switch_info 20552eb4d010SOphir Munk (nl_route, 20562eb4d010SOphir Munk list[ns].ifindex, 20572eb4d010SOphir Munk &list[ns].info); 20582eb4d010SOphir Munk if (ret || (!list[ns].info.representor && 20592eb4d010SOphir Munk !list[ns].info.master)) { 20602eb4d010SOphir Munk /* 20612eb4d010SOphir Munk * We failed to recognize representors with 20622eb4d010SOphir Munk * Netlink, let's try to perform the task 20632eb4d010SOphir Munk * with sysfs. 20642eb4d010SOphir Munk */ 20652eb4d010SOphir Munk ret = mlx5_sysfs_switch_info 20662eb4d010SOphir Munk (list[ns].ifindex, 20672eb4d010SOphir Munk &list[ns].info); 20682eb4d010SOphir Munk } 20692a87415cSMichael Baum #ifdef HAVE_MLX5DV_DR_DEVX_PORT 20702eb4d010SOphir Munk if (!ret && bd >= 0) { 20712eb4d010SOphir Munk switch (list[ns].info.name_type) { 20722eb4d010SOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 20732eb4d010SOphir Munk if (list[ns].info.port_name == bd) 20742eb4d010SOphir Munk ns++; 20752eb4d010SOphir Munk break; 2076420bbdaeSViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: 2077420bbdaeSViacheslav Ovsiienko /* Fallthrough */ 20782eb4d010SOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 2079cb95feefSXueming Li /* Fallthrough */ 2080cb95feefSXueming Li case MLX5_PHYS_PORT_NAME_TYPE_PFSF: 20812eb4d010SOphir Munk if (list[ns].info.pf_num == bd) 20822eb4d010SOphir Munk ns++; 20832eb4d010SOphir Munk break; 20842eb4d010SOphir Munk default: 20852eb4d010SOphir Munk break; 20862eb4d010SOphir Munk } 20872eb4d010SOphir Munk continue; 20882eb4d010SOphir Munk } 20892a87415cSMichael Baum #endif 20902eb4d010SOphir Munk if (!ret && (list[ns].info.representor ^ 20912eb4d010SOphir Munk list[ns].info.master)) 20922eb4d010SOphir Munk ns++; 20932eb4d010SOphir Munk } 20942eb4d010SOphir Munk if (!ns) { 20952eb4d010SOphir Munk DRV_LOG(ERR, 20962eb4d010SOphir Munk "unable to recognize master/representors" 20972eb4d010SOphir Munk " on the IB device with multiple ports"); 20982eb4d010SOphir Munk rte_errno = ENOENT; 20992eb4d010SOphir Munk ret = -rte_errno; 21002eb4d010SOphir Munk goto exit; 21012eb4d010SOphir Munk } 21022eb4d010SOphir Munk } else { 21032eb4d010SOphir Munk /* 21042eb4d010SOphir Munk * The existence of several matching entries (nd > 1) means 21052eb4d010SOphir Munk * port representors have been instantiated. No existing Verbs 21062eb4d010SOphir Munk * call nor sysfs entries can tell them apart, this can only 21072eb4d010SOphir Munk * be done through Netlink calls assuming kernel drivers are 21082eb4d010SOphir Munk * recent enough to support them. 21092eb4d010SOphir Munk * 21102eb4d010SOphir Munk * In the event of identification failure through Netlink, 21112eb4d010SOphir Munk * try again through sysfs, then: 21122eb4d010SOphir Munk * 21132eb4d010SOphir Munk * 1. A single IB device matches (nd == 1) with single 21142eb4d010SOphir Munk * port (np=0/1) and is not a representor, assume 21152eb4d010SOphir Munk * no switch support. 21162eb4d010SOphir Munk * 21172eb4d010SOphir Munk * 2. Otherwise no safe assumptions can be made; 21182eb4d010SOphir Munk * complain louder and bail out. 21192eb4d010SOphir Munk */ 21202eb4d010SOphir Munk for (i = 0; i != nd; ++i) { 21212eb4d010SOphir Munk memset(&list[ns].info, 0, sizeof(list[ns].info)); 21222eb4d010SOphir Munk list[ns].max_port = 1; 2123834a9019SOphir Munk list[ns].phys_port = 1; 2124834a9019SOphir Munk list[ns].phys_dev = ibv_match[i]; 21252eb4d010SOphir Munk list[ns].eth_dev = NULL; 21262eb4d010SOphir Munk list[ns].pci_dev = pci_dev; 21272eb4d010SOphir Munk list[ns].pf_bond = -1; 21282eb4d010SOphir Munk list[ns].ifindex = 0; 21292eb4d010SOphir Munk if (nl_rdma >= 0) 21302eb4d010SOphir Munk list[ns].ifindex = mlx5_nl_ifindex 2131834a9019SOphir Munk (nl_rdma, 2132834a9019SOphir Munk mlx5_os_get_dev_device_name 2133834a9019SOphir Munk (list[ns].phys_dev), 1); 21342eb4d010SOphir Munk if (!list[ns].ifindex) { 21352eb4d010SOphir Munk char ifname[IF_NAMESIZE]; 21362eb4d010SOphir Munk 21372eb4d010SOphir Munk /* 21382eb4d010SOphir Munk * Netlink failed, it may happen with old 21392eb4d010SOphir Munk * ib_core kernel driver (before 4.16). 21402eb4d010SOphir Munk * We can assume there is old driver because 21412eb4d010SOphir Munk * here we are processing single ports IB 21422eb4d010SOphir Munk * devices. Let's try sysfs to retrieve 21432eb4d010SOphir Munk * the ifindex. The method works for 21442eb4d010SOphir Munk * master device only. 21452eb4d010SOphir Munk */ 21462eb4d010SOphir Munk if (nd > 1) { 21472eb4d010SOphir Munk /* 21482eb4d010SOphir Munk * Multiple devices found, assume 21492eb4d010SOphir Munk * representors, can not distinguish 21502eb4d010SOphir Munk * master/representor and retrieve 21512eb4d010SOphir Munk * ifindex via sysfs. 21522eb4d010SOphir Munk */ 21532eb4d010SOphir Munk continue; 21542eb4d010SOphir Munk } 2155aec086c9SMatan Azrad ret = mlx5_get_ifname_sysfs 2156aec086c9SMatan Azrad (ibv_match[i]->ibdev_path, ifname); 21572eb4d010SOphir Munk if (!ret) 21582eb4d010SOphir Munk list[ns].ifindex = 21592eb4d010SOphir Munk if_nametoindex(ifname); 21602eb4d010SOphir Munk if (!list[ns].ifindex) { 21612eb4d010SOphir Munk /* 21622eb4d010SOphir Munk * No network interface index found 21632eb4d010SOphir Munk * for the specified device, it means 21642eb4d010SOphir Munk * there it is neither representor 21652eb4d010SOphir Munk * nor master. 21662eb4d010SOphir Munk */ 21672eb4d010SOphir Munk continue; 21682eb4d010SOphir Munk } 21692eb4d010SOphir Munk } 21702eb4d010SOphir Munk ret = -1; 21712eb4d010SOphir Munk if (nl_route >= 0) 21722eb4d010SOphir Munk ret = mlx5_nl_switch_info 21732eb4d010SOphir Munk (nl_route, 21742eb4d010SOphir Munk list[ns].ifindex, 21752eb4d010SOphir Munk &list[ns].info); 21762eb4d010SOphir Munk if (ret || (!list[ns].info.representor && 21772eb4d010SOphir Munk !list[ns].info.master)) { 21782eb4d010SOphir Munk /* 21792eb4d010SOphir Munk * We failed to recognize representors with 21802eb4d010SOphir Munk * Netlink, let's try to perform the task 21812eb4d010SOphir Munk * with sysfs. 21822eb4d010SOphir Munk */ 21832eb4d010SOphir Munk ret = mlx5_sysfs_switch_info 21842eb4d010SOphir Munk (list[ns].ifindex, 21852eb4d010SOphir Munk &list[ns].info); 21862eb4d010SOphir Munk } 21872eb4d010SOphir Munk if (!ret && (list[ns].info.representor ^ 21882eb4d010SOphir Munk list[ns].info.master)) { 21892eb4d010SOphir Munk ns++; 21902eb4d010SOphir Munk } else if ((nd == 1) && 21912eb4d010SOphir Munk !list[ns].info.representor && 21922eb4d010SOphir Munk !list[ns].info.master) { 21932eb4d010SOphir Munk /* 21942eb4d010SOphir Munk * Single IB device with 21952eb4d010SOphir Munk * one physical port and 21962eb4d010SOphir Munk * attached network device. 21972eb4d010SOphir Munk * May be SRIOV is not enabled 21982eb4d010SOphir Munk * or there is no representors. 21992eb4d010SOphir Munk */ 22002eb4d010SOphir Munk DRV_LOG(INFO, "no E-Switch support detected"); 22012eb4d010SOphir Munk ns++; 22022eb4d010SOphir Munk break; 22032eb4d010SOphir Munk } 22042eb4d010SOphir Munk } 22052eb4d010SOphir Munk if (!ns) { 22062eb4d010SOphir Munk DRV_LOG(ERR, 22072eb4d010SOphir Munk "unable to recognize master/representors" 22082eb4d010SOphir Munk " on the multiple IB devices"); 22092eb4d010SOphir Munk rte_errno = ENOENT; 22102eb4d010SOphir Munk ret = -rte_errno; 22112eb4d010SOphir Munk goto exit; 22122eb4d010SOphir Munk } 22132eb4d010SOphir Munk } 22142eb4d010SOphir Munk MLX5_ASSERT(ns); 22152eb4d010SOphir Munk /* 22162eb4d010SOphir Munk * Sort list to probe devices in natural order for users convenience 22172eb4d010SOphir Munk * (i.e. master first, then representors from lowest to highest ID). 22182eb4d010SOphir Munk */ 22192eb4d010SOphir Munk qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 22202eb4d010SOphir Munk /* Device specific configuration. */ 22212eb4d010SOphir Munk switch (pci_dev->id.device_id) { 22222eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 22232eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 22242eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 22252eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 22262eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 22272eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 22283ea12cadSRaslan Darawsheh case PCI_DEVICE_ID_MELLANOX_CONNECTXVF: 2229d462a83cSMichael Baum dev_config_vf = 1; 22302eb4d010SOphir Munk break; 22312eb4d010SOphir Munk default: 2232d462a83cSMichael Baum dev_config_vf = 0; 22332eb4d010SOphir Munk break; 22342eb4d010SOphir Munk } 2235f926cce3SXueming Li if (eth_da.type != RTE_ETH_REPRESENTOR_NONE) { 2236f926cce3SXueming Li /* Set devargs default values. */ 2237f926cce3SXueming Li if (eth_da.nb_mh_controllers == 0) { 2238f926cce3SXueming Li eth_da.nb_mh_controllers = 1; 2239f926cce3SXueming Li eth_da.mh_controllers[0] = 0; 2240f926cce3SXueming Li } 2241f926cce3SXueming Li if (eth_da.nb_ports == 0 && ns > 0) { 2242f926cce3SXueming Li if (list[0].pf_bond >= 0 && list[0].info.representor) 2243f926cce3SXueming Li DRV_LOG(WARNING, "Representor on Bonding device should use pf#vf# syntax: %s", 2244f926cce3SXueming Li pci_dev->device.devargs->args); 2245f926cce3SXueming Li eth_da.nb_ports = 1; 2246f926cce3SXueming Li eth_da.ports[0] = list[0].info.pf_num; 2247f926cce3SXueming Li } 2248f926cce3SXueming Li if (eth_da.nb_representor_ports == 0) { 2249f926cce3SXueming Li eth_da.nb_representor_ports = 1; 2250f926cce3SXueming Li eth_da.representor_ports[0] = 0; 2251f926cce3SXueming Li } 2252f926cce3SXueming Li } 22532eb4d010SOphir Munk for (i = 0; i != ns; ++i) { 22542eb4d010SOphir Munk uint32_t restore; 22552eb4d010SOphir Munk 2256d462a83cSMichael Baum /* Default configuration. */ 2257d462a83cSMichael Baum memset(&dev_config, 0, sizeof(struct mlx5_dev_config)); 2258d462a83cSMichael Baum dev_config.vf = dev_config_vf; 2259d462a83cSMichael Baum dev_config.mps = MLX5_ARG_UNSET; 2260d462a83cSMichael Baum dev_config.dbnc = MLX5_ARG_UNSET; 2261d462a83cSMichael Baum dev_config.rx_vec_en = 1; 2262d462a83cSMichael Baum dev_config.txq_inline_max = MLX5_ARG_UNSET; 2263d462a83cSMichael Baum dev_config.txq_inline_min = MLX5_ARG_UNSET; 2264d462a83cSMichael Baum dev_config.txq_inline_mpw = MLX5_ARG_UNSET; 2265d462a83cSMichael Baum dev_config.txqs_inline = MLX5_ARG_UNSET; 2266d462a83cSMichael Baum dev_config.vf_nl_en = 1; 2267d462a83cSMichael Baum dev_config.mr_ext_memseg_en = 1; 2268d462a83cSMichael Baum dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; 2269d462a83cSMichael Baum dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; 2270d462a83cSMichael Baum dev_config.dv_esw_en = 1; 2271d462a83cSMichael Baum dev_config.dv_flow_en = 1; 2272d462a83cSMichael Baum dev_config.decap_en = 1; 2273d462a83cSMichael Baum dev_config.log_hp_size = MLX5_ARG_UNSET; 22742eb4d010SOphir Munk list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 22752eb4d010SOphir Munk &list[i], 2276cb95feefSXueming Li &dev_config, 2277cb95feefSXueming Li ð_da); 22782eb4d010SOphir Munk if (!list[i].eth_dev) { 22792eb4d010SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 22802eb4d010SOphir Munk break; 22812eb4d010SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 22822eb4d010SOphir Munk continue; 22832eb4d010SOphir Munk } 22842eb4d010SOphir Munk restore = list[i].eth_dev->data->dev_flags; 22852eb4d010SOphir Munk rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 22862eb4d010SOphir Munk /* Restore non-PCI flags cleared by the above call. */ 22872eb4d010SOphir Munk list[i].eth_dev->data->dev_flags |= restore; 22882eb4d010SOphir Munk rte_eth_dev_probing_finish(list[i].eth_dev); 22892eb4d010SOphir Munk } 22902eb4d010SOphir Munk if (i != ns) { 22912eb4d010SOphir Munk DRV_LOG(ERR, 22922eb4d010SOphir Munk "probe of PCI device " PCI_PRI_FMT " aborted after" 22932eb4d010SOphir Munk " encountering an error: %s", 2294f926cce3SXueming Li owner_pci.domain, owner_pci.bus, 2295f926cce3SXueming Li owner_pci.devid, owner_pci.function, 22962eb4d010SOphir Munk strerror(rte_errno)); 22972eb4d010SOphir Munk ret = -rte_errno; 22982eb4d010SOphir Munk /* Roll back. */ 22992eb4d010SOphir Munk while (i--) { 23002eb4d010SOphir Munk if (!list[i].eth_dev) 23012eb4d010SOphir Munk continue; 23022eb4d010SOphir Munk mlx5_dev_close(list[i].eth_dev); 23032eb4d010SOphir Munk /* mac_addrs must not be freed because in dev_private */ 23042eb4d010SOphir Munk list[i].eth_dev->data->mac_addrs = NULL; 23052eb4d010SOphir Munk claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 23062eb4d010SOphir Munk } 23072eb4d010SOphir Munk /* Restore original error. */ 23082eb4d010SOphir Munk rte_errno = -ret; 23092eb4d010SOphir Munk } else { 23102eb4d010SOphir Munk ret = 0; 23112eb4d010SOphir Munk } 23122eb4d010SOphir Munk exit: 23132eb4d010SOphir Munk /* 23142eb4d010SOphir Munk * Do the routine cleanup: 23152eb4d010SOphir Munk * - close opened Netlink sockets 23162eb4d010SOphir Munk * - free allocated spawn data array 23172eb4d010SOphir Munk * - free the Infiniband device list 23182eb4d010SOphir Munk */ 23192eb4d010SOphir Munk if (nl_rdma >= 0) 23202eb4d010SOphir Munk close(nl_rdma); 23212eb4d010SOphir Munk if (nl_route >= 0) 23222eb4d010SOphir Munk close(nl_route); 23232eb4d010SOphir Munk if (list) 23242175c4dcSSuanming Mou mlx5_free(list); 23252eb4d010SOphir Munk MLX5_ASSERT(ibv_list); 23262eb4d010SOphir Munk mlx5_glue->free_device_list(ibv_list); 23272eb4d010SOphir Munk return ret; 23282eb4d010SOphir Munk } 23292eb4d010SOphir Munk 2330*08c2772fSXueming Li /** 2331*08c2772fSXueming Li * DPDK callback to register a PCI device. 2332*08c2772fSXueming Li * 2333*08c2772fSXueming Li * This function spawns Ethernet devices out of a given PCI device. 2334*08c2772fSXueming Li * 2335*08c2772fSXueming Li * @param[in] pci_drv 2336*08c2772fSXueming Li * PCI driver structure (mlx5_driver). 2337*08c2772fSXueming Li * @param[in] pci_dev 2338*08c2772fSXueming Li * PCI device information. 2339*08c2772fSXueming Li * 2340*08c2772fSXueming Li * @return 2341*08c2772fSXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 2342*08c2772fSXueming Li */ 2343*08c2772fSXueming Li int 2344*08c2772fSXueming Li mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2345*08c2772fSXueming Li struct rte_pci_device *pci_dev) 2346*08c2772fSXueming Li { 2347*08c2772fSXueming Li struct rte_eth_devargs eth_da = { .type = RTE_ETH_REPRESENTOR_NONE }; 2348*08c2772fSXueming Li int ret = 0; 2349*08c2772fSXueming Li uint16_t p; 2350*08c2772fSXueming Li 2351*08c2772fSXueming Li if (pci_dev->device.devargs) { 2352*08c2772fSXueming Li /* Parse representor information from device argument. */ 2353*08c2772fSXueming Li if (pci_dev->device.devargs->cls_str) 2354*08c2772fSXueming Li ret = rte_eth_devargs_parse 2355*08c2772fSXueming Li (pci_dev->device.devargs->cls_str, ð_da); 2356*08c2772fSXueming Li if (ret) { 2357*08c2772fSXueming Li DRV_LOG(ERR, "failed to parse device arguments: %s", 2358*08c2772fSXueming Li pci_dev->device.devargs->cls_str); 2359*08c2772fSXueming Li return -rte_errno; 2360*08c2772fSXueming Li } 2361*08c2772fSXueming Li if (eth_da.type == RTE_ETH_REPRESENTOR_NONE) { 2362*08c2772fSXueming Li /* Support legacy device argument */ 2363*08c2772fSXueming Li ret = rte_eth_devargs_parse 2364*08c2772fSXueming Li (pci_dev->device.devargs->args, ð_da); 2365*08c2772fSXueming Li if (ret) { 2366*08c2772fSXueming Li DRV_LOG(ERR, "failed to parse device arguments: %s", 2367*08c2772fSXueming Li pci_dev->device.devargs->args); 2368*08c2772fSXueming Li return -rte_errno; 2369*08c2772fSXueming Li } 2370*08c2772fSXueming Li } 2371*08c2772fSXueming Li } 2372*08c2772fSXueming Li 2373*08c2772fSXueming Li if (eth_da.nb_ports > 0) { 2374*08c2772fSXueming Li /* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */ 2375*08c2772fSXueming Li for (p = 0; p < eth_da.nb_ports; p++) 2376*08c2772fSXueming Li ret = mlx5_os_pci_probe_pf(pci_dev, ð_da, 2377*08c2772fSXueming Li eth_da.ports[p]); 2378*08c2772fSXueming Li } else { 2379*08c2772fSXueming Li ret = mlx5_os_pci_probe_pf(pci_dev, ð_da, 0); 2380*08c2772fSXueming Li } 2381*08c2772fSXueming Li return ret; 2382*08c2772fSXueming Li } 2383*08c2772fSXueming Li 23842eb4d010SOphir Munk static int 23852eb4d010SOphir Munk mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config) 23862eb4d010SOphir Munk { 23872eb4d010SOphir Munk char *env; 23882eb4d010SOphir Munk int value; 23892eb4d010SOphir Munk 23902eb4d010SOphir Munk MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 23912eb4d010SOphir Munk /* Get environment variable to store. */ 23922eb4d010SOphir Munk env = getenv(MLX5_SHUT_UP_BF); 23932eb4d010SOphir Munk value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET; 23942eb4d010SOphir Munk if (config->dbnc == MLX5_ARG_UNSET) 23952eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1); 23962eb4d010SOphir Munk else 23972eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, 23982eb4d010SOphir Munk config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1); 23992eb4d010SOphir Munk return value; 24002eb4d010SOphir Munk } 24012eb4d010SOphir Munk 24022eb4d010SOphir Munk static void 24032eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(int value) 24042eb4d010SOphir Munk { 24052eb4d010SOphir Munk MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 24062eb4d010SOphir Munk /* Restore the original environment variable state. */ 24072eb4d010SOphir Munk if (value == MLX5_ARG_UNSET) 24082eb4d010SOphir Munk unsetenv(MLX5_SHUT_UP_BF); 24092eb4d010SOphir Munk else 24102eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1); 24112eb4d010SOphir Munk } 24122eb4d010SOphir Munk 24132eb4d010SOphir Munk /** 24142eb4d010SOphir Munk * Extract pdn of PD object using DV API. 24152eb4d010SOphir Munk * 24162eb4d010SOphir Munk * @param[in] pd 24172eb4d010SOphir Munk * Pointer to the verbs PD object. 24182eb4d010SOphir Munk * @param[out] pdn 24192eb4d010SOphir Munk * Pointer to the PD object number variable. 24202eb4d010SOphir Munk * 24212eb4d010SOphir Munk * @return 24222eb4d010SOphir Munk * 0 on success, error value otherwise. 24232eb4d010SOphir Munk */ 24242eb4d010SOphir Munk int 24252eb4d010SOphir Munk mlx5_os_get_pdn(void *pd, uint32_t *pdn) 24262eb4d010SOphir Munk { 24272eb4d010SOphir Munk #ifdef HAVE_IBV_FLOW_DV_SUPPORT 24282eb4d010SOphir Munk struct mlx5dv_obj obj; 24292eb4d010SOphir Munk struct mlx5dv_pd pd_info; 24302eb4d010SOphir Munk int ret = 0; 24312eb4d010SOphir Munk 24322eb4d010SOphir Munk obj.pd.in = pd; 24332eb4d010SOphir Munk obj.pd.out = &pd_info; 24342eb4d010SOphir Munk ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 24352eb4d010SOphir Munk if (ret) { 24362eb4d010SOphir Munk DRV_LOG(DEBUG, "Fail to get PD object info"); 24372eb4d010SOphir Munk return ret; 24382eb4d010SOphir Munk } 24392eb4d010SOphir Munk *pdn = pd_info.pdn; 24402eb4d010SOphir Munk return 0; 24412eb4d010SOphir Munk #else 24422eb4d010SOphir Munk (void)pd; 24432eb4d010SOphir Munk (void)pdn; 24442eb4d010SOphir Munk return -ENOTSUP; 24452eb4d010SOphir Munk #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 24462eb4d010SOphir Munk } 24472eb4d010SOphir Munk 24482eb4d010SOphir Munk /** 24492eb4d010SOphir Munk * Function API to open IB device. 24502eb4d010SOphir Munk * 24512eb4d010SOphir Munk * This function calls the Linux glue APIs to open a device. 24522eb4d010SOphir Munk * 24532eb4d010SOphir Munk * @param[in] spawn 24542eb4d010SOphir Munk * Pointer to the IB device attributes (name, port, etc). 24552eb4d010SOphir Munk * @param[out] config 24562eb4d010SOphir Munk * Pointer to device configuration structure. 24572eb4d010SOphir Munk * @param[out] sh 24582eb4d010SOphir Munk * Pointer to shared context structure. 24592eb4d010SOphir Munk * 24602eb4d010SOphir Munk * @return 24612eb4d010SOphir Munk * 0 on success, a positive error value otherwise. 24622eb4d010SOphir Munk */ 24632eb4d010SOphir Munk int 24642eb4d010SOphir Munk mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn, 24652eb4d010SOphir Munk const struct mlx5_dev_config *config, 24662eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh) 24672eb4d010SOphir Munk { 24682eb4d010SOphir Munk int dbmap_env; 24692eb4d010SOphir Munk int err = 0; 2470d133f4cdSViacheslav Ovsiienko 2471d133f4cdSViacheslav Ovsiienko sh->numa_node = spawn->pci_dev->device.numa_node; 2472d133f4cdSViacheslav Ovsiienko pthread_mutex_init(&sh->txpp.mutex, NULL); 24732eb4d010SOphir Munk /* 24742eb4d010SOphir Munk * Configure environment variable "MLX5_BF_SHUT_UP" 24752eb4d010SOphir Munk * before the device creation. The rdma_core library 24762eb4d010SOphir Munk * checks the variable at device creation and 24772eb4d010SOphir Munk * stores the result internally. 24782eb4d010SOphir Munk */ 24792eb4d010SOphir Munk dbmap_env = mlx5_config_doorbell_mapping_env(config); 24802eb4d010SOphir Munk /* Try to open IB device with DV first, then usual Verbs. */ 24812eb4d010SOphir Munk errno = 0; 2482834a9019SOphir Munk sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev); 24832eb4d010SOphir Munk if (sh->ctx) { 24842eb4d010SOphir Munk sh->devx = 1; 24852eb4d010SOphir Munk DRV_LOG(DEBUG, "DevX is supported"); 24862eb4d010SOphir Munk /* The device is created, no need for environment. */ 24872eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(dbmap_env); 24882eb4d010SOphir Munk } else { 24892eb4d010SOphir Munk /* The environment variable is still configured. */ 2490834a9019SOphir Munk sh->ctx = mlx5_glue->open_device(spawn->phys_dev); 24912eb4d010SOphir Munk err = errno ? errno : ENODEV; 24922eb4d010SOphir Munk /* 24932eb4d010SOphir Munk * The environment variable is not needed anymore, 24942eb4d010SOphir Munk * all device creation attempts are completed. 24952eb4d010SOphir Munk */ 24962eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(dbmap_env); 24972eb4d010SOphir Munk if (!sh->ctx) 24982eb4d010SOphir Munk return err; 24992eb4d010SOphir Munk DRV_LOG(DEBUG, "DevX is NOT supported"); 25002eb4d010SOphir Munk err = 0; 25012eb4d010SOphir Munk } 250281c3b977SViacheslav Ovsiienko if (!err && sh->ctx) { 250381c3b977SViacheslav Ovsiienko /* Hint libmlx5 to use PMD allocator for data plane resources */ 250481c3b977SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 250581c3b977SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 250681c3b977SViacheslav Ovsiienko (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ 250781c3b977SViacheslav Ovsiienko .alloc = &mlx5_alloc_verbs_buf, 250881c3b977SViacheslav Ovsiienko .free = &mlx5_free_verbs_buf, 250981c3b977SViacheslav Ovsiienko .data = sh, 251081c3b977SViacheslav Ovsiienko })); 251181c3b977SViacheslav Ovsiienko } 25122eb4d010SOphir Munk return err; 25132eb4d010SOphir Munk } 25142eb4d010SOphir Munk 25152eb4d010SOphir Munk /** 25162eb4d010SOphir Munk * Install shared asynchronous device events handler. 25172eb4d010SOphir Munk * This function is implemented to support event sharing 25182eb4d010SOphir Munk * between multiple ports of single IB device. 25192eb4d010SOphir Munk * 25202eb4d010SOphir Munk * @param sh 25212eb4d010SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 25222eb4d010SOphir Munk */ 25232eb4d010SOphir Munk void 25242eb4d010SOphir Munk mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) 25252eb4d010SOphir Munk { 25262eb4d010SOphir Munk int ret; 25272eb4d010SOphir Munk int flags; 25282eb4d010SOphir Munk 25292eb4d010SOphir Munk sh->intr_handle.fd = -1; 25302eb4d010SOphir Munk flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL); 25312eb4d010SOphir Munk ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd, 25322eb4d010SOphir Munk F_SETFL, flags | O_NONBLOCK); 25332eb4d010SOphir Munk if (ret) { 25342eb4d010SOphir Munk DRV_LOG(INFO, "failed to change file descriptor async event" 25352eb4d010SOphir Munk " queue"); 25362eb4d010SOphir Munk } else { 25372eb4d010SOphir Munk sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd; 25382eb4d010SOphir Munk sh->intr_handle.type = RTE_INTR_HANDLE_EXT; 25392eb4d010SOphir Munk if (rte_intr_callback_register(&sh->intr_handle, 25402eb4d010SOphir Munk mlx5_dev_interrupt_handler, sh)) { 25412eb4d010SOphir Munk DRV_LOG(INFO, "Fail to install the shared interrupt."); 25422eb4d010SOphir Munk sh->intr_handle.fd = -1; 25432eb4d010SOphir Munk } 25442eb4d010SOphir Munk } 25452eb4d010SOphir Munk if (sh->devx) { 25462eb4d010SOphir Munk #ifdef HAVE_IBV_DEVX_ASYNC 25472eb4d010SOphir Munk sh->intr_handle_devx.fd = -1; 254821b7c452SOphir Munk sh->devx_comp = 254921b7c452SOphir Munk (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx); 255021b7c452SOphir Munk struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp; 255121b7c452SOphir Munk if (!devx_comp) { 25522eb4d010SOphir Munk DRV_LOG(INFO, "failed to allocate devx_comp."); 25532eb4d010SOphir Munk return; 25542eb4d010SOphir Munk } 255521b7c452SOphir Munk flags = fcntl(devx_comp->fd, F_GETFL); 255621b7c452SOphir Munk ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK); 25572eb4d010SOphir Munk if (ret) { 25582eb4d010SOphir Munk DRV_LOG(INFO, "failed to change file descriptor" 25592eb4d010SOphir Munk " devx comp"); 25602eb4d010SOphir Munk return; 25612eb4d010SOphir Munk } 256221b7c452SOphir Munk sh->intr_handle_devx.fd = devx_comp->fd; 25632eb4d010SOphir Munk sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; 25642eb4d010SOphir Munk if (rte_intr_callback_register(&sh->intr_handle_devx, 25652eb4d010SOphir Munk mlx5_dev_interrupt_handler_devx, sh)) { 25662eb4d010SOphir Munk DRV_LOG(INFO, "Fail to install the devx shared" 25672eb4d010SOphir Munk " interrupt."); 25682eb4d010SOphir Munk sh->intr_handle_devx.fd = -1; 25692eb4d010SOphir Munk } 25702eb4d010SOphir Munk #endif /* HAVE_IBV_DEVX_ASYNC */ 25712eb4d010SOphir Munk } 25722eb4d010SOphir Munk } 25732eb4d010SOphir Munk 25742eb4d010SOphir Munk /** 25752eb4d010SOphir Munk * Uninstall shared asynchronous device events handler. 25762eb4d010SOphir Munk * This function is implemented to support event sharing 25772eb4d010SOphir Munk * between multiple ports of single IB device. 25782eb4d010SOphir Munk * 25792eb4d010SOphir Munk * @param dev 25802eb4d010SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 25812eb4d010SOphir Munk */ 25822eb4d010SOphir Munk void 25832eb4d010SOphir Munk mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) 25842eb4d010SOphir Munk { 25852eb4d010SOphir Munk if (sh->intr_handle.fd >= 0) 25862eb4d010SOphir Munk mlx5_intr_callback_unregister(&sh->intr_handle, 25872eb4d010SOphir Munk mlx5_dev_interrupt_handler, sh); 25882eb4d010SOphir Munk #ifdef HAVE_IBV_DEVX_ASYNC 25892eb4d010SOphir Munk if (sh->intr_handle_devx.fd >= 0) 25902eb4d010SOphir Munk rte_intr_callback_unregister(&sh->intr_handle_devx, 25912eb4d010SOphir Munk mlx5_dev_interrupt_handler_devx, sh); 25922eb4d010SOphir Munk if (sh->devx_comp) 25932eb4d010SOphir Munk mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); 25942eb4d010SOphir Munk #endif 25952eb4d010SOphir Munk } 2596042f5c94SOphir Munk 259773bf9235SOphir Munk /** 259873bf9235SOphir Munk * Read statistics by a named counter. 259973bf9235SOphir Munk * 260073bf9235SOphir Munk * @param[in] priv 260173bf9235SOphir Munk * Pointer to the private device data structure. 260273bf9235SOphir Munk * @param[in] ctr_name 260373bf9235SOphir Munk * Pointer to the name of the statistic counter to read 260473bf9235SOphir Munk * @param[out] stat 260573bf9235SOphir Munk * Pointer to read statistic value. 260673bf9235SOphir Munk * @return 260773bf9235SOphir Munk * 0 on success and stat is valud, 1 if failed to read the value 260873bf9235SOphir Munk * rte_errno is set. 260973bf9235SOphir Munk * 261073bf9235SOphir Munk */ 261173bf9235SOphir Munk int 261273bf9235SOphir Munk mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, 261373bf9235SOphir Munk uint64_t *stat) 261473bf9235SOphir Munk { 261573bf9235SOphir Munk int fd; 261673bf9235SOphir Munk 261773bf9235SOphir Munk if (priv->sh) { 2618e6988afdSMatan Azrad if (priv->q_counters != NULL && 2619e6988afdSMatan Azrad strcmp(ctr_name, "out_of_buffer") == 0) 2620e6988afdSMatan Azrad return mlx5_devx_cmd_queue_counter_query(priv->sh->ctx, 2621e6988afdSMatan Azrad 0, (uint32_t *)stat); 262273bf9235SOphir Munk MKSTR(path, "%s/ports/%d/hw_counters/%s", 262373bf9235SOphir Munk priv->sh->ibdev_path, 262473bf9235SOphir Munk priv->dev_port, 262573bf9235SOphir Munk ctr_name); 262673bf9235SOphir Munk fd = open(path, O_RDONLY); 2627038e7fc0SShy Shyman /* 2628038e7fc0SShy Shyman * in switchdev the file location is not per port 2629038e7fc0SShy Shyman * but rather in <ibdev_path>/hw_counters/<file_name>. 2630038e7fc0SShy Shyman */ 2631038e7fc0SShy Shyman if (fd == -1) { 2632038e7fc0SShy Shyman MKSTR(path1, "%s/hw_counters/%s", 2633038e7fc0SShy Shyman priv->sh->ibdev_path, 2634038e7fc0SShy Shyman ctr_name); 2635038e7fc0SShy Shyman fd = open(path1, O_RDONLY); 2636038e7fc0SShy Shyman } 263773bf9235SOphir Munk if (fd != -1) { 263873bf9235SOphir Munk char buf[21] = {'\0'}; 263973bf9235SOphir Munk ssize_t n = read(fd, buf, sizeof(buf)); 264073bf9235SOphir Munk 264173bf9235SOphir Munk close(fd); 264273bf9235SOphir Munk if (n != -1) { 264373bf9235SOphir Munk *stat = strtoull(buf, NULL, 10); 264473bf9235SOphir Munk return 0; 264573bf9235SOphir Munk } 264673bf9235SOphir Munk } 264773bf9235SOphir Munk } 264873bf9235SOphir Munk *stat = 0; 264973bf9235SOphir Munk return 1; 265073bf9235SOphir Munk } 265173bf9235SOphir Munk 265273bf9235SOphir Munk /** 2653d5ed8aa9SOphir Munk * Set the reg_mr and dereg_mr call backs 2654d5ed8aa9SOphir Munk * 2655d5ed8aa9SOphir Munk * @param reg_mr_cb[out] 2656d5ed8aa9SOphir Munk * Pointer to reg_mr func 2657d5ed8aa9SOphir Munk * @param dereg_mr_cb[out] 2658d5ed8aa9SOphir Munk * Pointer to dereg_mr func 2659d5ed8aa9SOphir Munk * 2660d5ed8aa9SOphir Munk */ 2661d5ed8aa9SOphir Munk void 2662d5ed8aa9SOphir Munk mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, 2663d5ed8aa9SOphir Munk mlx5_dereg_mr_t *dereg_mr_cb) 2664d5ed8aa9SOphir Munk { 2665db12615bSOphir Munk *reg_mr_cb = mlx5_mr_verbs_ops.reg_mr; 2666db12615bSOphir Munk *dereg_mr_cb = mlx5_mr_verbs_ops.dereg_mr; 2667d5ed8aa9SOphir Munk } 2668d5ed8aa9SOphir Munk 2669ab27cdd9SOphir Munk /** 2670ab27cdd9SOphir Munk * Remove a MAC address from device 2671ab27cdd9SOphir Munk * 2672ab27cdd9SOphir Munk * @param dev 2673ab27cdd9SOphir Munk * Pointer to Ethernet device structure. 2674ab27cdd9SOphir Munk * @param index 2675ab27cdd9SOphir Munk * MAC address index. 2676ab27cdd9SOphir Munk */ 2677ab27cdd9SOphir Munk void 2678ab27cdd9SOphir Munk mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 2679ab27cdd9SOphir Munk { 2680ab27cdd9SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 2681ab27cdd9SOphir Munk const int vf = priv->config.vf; 2682ab27cdd9SOphir Munk 2683ab27cdd9SOphir Munk if (vf) 2684ab27cdd9SOphir Munk mlx5_nl_mac_addr_remove(priv->nl_socket_route, 2685ab27cdd9SOphir Munk mlx5_ifindex(dev), priv->mac_own, 2686ab27cdd9SOphir Munk &dev->data->mac_addrs[index], index); 2687ab27cdd9SOphir Munk } 2688ab27cdd9SOphir Munk 2689ab27cdd9SOphir Munk /** 2690ab27cdd9SOphir Munk * Adds a MAC address to the device 2691ab27cdd9SOphir Munk * 2692ab27cdd9SOphir Munk * @param dev 2693ab27cdd9SOphir Munk * Pointer to Ethernet device structure. 2694ab27cdd9SOphir Munk * @param mac_addr 2695ab27cdd9SOphir Munk * MAC address to register. 2696ab27cdd9SOphir Munk * @param index 2697ab27cdd9SOphir Munk * MAC address index. 2698ab27cdd9SOphir Munk * 2699ab27cdd9SOphir Munk * @return 2700ab27cdd9SOphir Munk * 0 on success, a negative errno value otherwise 2701ab27cdd9SOphir Munk */ 2702ab27cdd9SOphir Munk int 2703ab27cdd9SOphir Munk mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 2704ab27cdd9SOphir Munk uint32_t index) 2705ab27cdd9SOphir Munk { 2706ab27cdd9SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 2707ab27cdd9SOphir Munk const int vf = priv->config.vf; 2708ab27cdd9SOphir Munk int ret = 0; 2709ab27cdd9SOphir Munk 2710ab27cdd9SOphir Munk if (vf) 2711ab27cdd9SOphir Munk ret = mlx5_nl_mac_addr_add(priv->nl_socket_route, 2712ab27cdd9SOphir Munk mlx5_ifindex(dev), priv->mac_own, 2713ab27cdd9SOphir Munk mac, index); 2714ab27cdd9SOphir Munk return ret; 2715ab27cdd9SOphir Munk } 2716ab27cdd9SOphir Munk 2717ab27cdd9SOphir Munk /** 2718ab27cdd9SOphir Munk * Modify a VF MAC address 2719ab27cdd9SOphir Munk * 2720ab27cdd9SOphir Munk * @param priv 2721ab27cdd9SOphir Munk * Pointer to device private data. 2722ab27cdd9SOphir Munk * @param mac_addr 2723ab27cdd9SOphir Munk * MAC address to modify into. 2724ab27cdd9SOphir Munk * @param iface_idx 2725ab27cdd9SOphir Munk * Net device interface index 2726ab27cdd9SOphir Munk * @param vf_index 2727ab27cdd9SOphir Munk * VF index 2728ab27cdd9SOphir Munk * 2729ab27cdd9SOphir Munk * @return 2730ab27cdd9SOphir Munk * 0 on success, a negative errno value otherwise 2731ab27cdd9SOphir Munk */ 2732ab27cdd9SOphir Munk int 2733ab27cdd9SOphir Munk mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, 2734ab27cdd9SOphir Munk unsigned int iface_idx, 2735ab27cdd9SOphir Munk struct rte_ether_addr *mac_addr, 2736ab27cdd9SOphir Munk int vf_index) 2737ab27cdd9SOphir Munk { 2738ab27cdd9SOphir Munk return mlx5_nl_vf_mac_addr_modify 2739ab27cdd9SOphir Munk (priv->nl_socket_route, iface_idx, mac_addr, vf_index); 2740ab27cdd9SOphir Munk } 2741ab27cdd9SOphir Munk 27424d18abd1SOphir Munk /** 27434d18abd1SOphir Munk * Set device promiscuous mode 27444d18abd1SOphir Munk * 27454d18abd1SOphir Munk * @param dev 27464d18abd1SOphir Munk * Pointer to Ethernet device structure. 27474d18abd1SOphir Munk * @param enable 27484d18abd1SOphir Munk * 0 - promiscuous is disabled, otherwise - enabled 27494d18abd1SOphir Munk * 27504d18abd1SOphir Munk * @return 27514d18abd1SOphir Munk * 0 on success, a negative error value otherwise 27524d18abd1SOphir Munk */ 27534d18abd1SOphir Munk int 27544d18abd1SOphir Munk mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable) 27554d18abd1SOphir Munk { 27564d18abd1SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 27574d18abd1SOphir Munk 27584d18abd1SOphir Munk return mlx5_nl_promisc(priv->nl_socket_route, 27594d18abd1SOphir Munk mlx5_ifindex(dev), !!enable); 27604d18abd1SOphir Munk } 27614d18abd1SOphir Munk 27624d18abd1SOphir Munk /** 27634d18abd1SOphir Munk * Set device promiscuous mode 27644d18abd1SOphir Munk * 27654d18abd1SOphir Munk * @param dev 27664d18abd1SOphir Munk * Pointer to Ethernet device structure. 27674d18abd1SOphir Munk * @param enable 27684d18abd1SOphir Munk * 0 - all multicase is disabled, otherwise - enabled 27694d18abd1SOphir Munk * 27704d18abd1SOphir Munk * @return 27714d18abd1SOphir Munk * 0 on success, a negative error value otherwise 27724d18abd1SOphir Munk */ 27734d18abd1SOphir Munk int 27744d18abd1SOphir Munk mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable) 27754d18abd1SOphir Munk { 27764d18abd1SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 27774d18abd1SOphir Munk 27784d18abd1SOphir Munk return mlx5_nl_allmulti(priv->nl_socket_route, 27794d18abd1SOphir Munk mlx5_ifindex(dev), !!enable); 27804d18abd1SOphir Munk } 27814d18abd1SOphir Munk 2782f00f6562SOphir Munk /** 2783f00f6562SOphir Munk * Flush device MAC addresses 2784f00f6562SOphir Munk * 2785f00f6562SOphir Munk * @param dev 2786f00f6562SOphir Munk * Pointer to Ethernet device structure. 2787f00f6562SOphir Munk * 2788f00f6562SOphir Munk */ 2789f00f6562SOphir Munk void 2790f00f6562SOphir Munk mlx5_os_mac_addr_flush(struct rte_eth_dev *dev) 2791f00f6562SOphir Munk { 2792f00f6562SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 2793f00f6562SOphir Munk 2794f00f6562SOphir Munk mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), 2795f00f6562SOphir Munk dev->data->mac_addrs, 2796f00f6562SOphir Munk MLX5_MAX_MAC_ADDRESSES, priv->mac_own); 2797f00f6562SOphir Munk } 2798