1f44b09f9SOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 2f44b09f9SOphir Munk * Copyright 2015 6WIND S.A. 3f44b09f9SOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 4f44b09f9SOphir Munk */ 5f44b09f9SOphir Munk 6f44b09f9SOphir Munk #include <stddef.h> 7f44b09f9SOphir Munk #include <unistd.h> 8f44b09f9SOphir Munk #include <string.h> 9f44b09f9SOphir Munk #include <stdint.h> 10f44b09f9SOphir Munk #include <stdlib.h> 11f44b09f9SOphir Munk #include <errno.h> 12f44b09f9SOphir Munk #include <net/if.h> 13f44b09f9SOphir Munk #include <linux/rtnetlink.h> 1473bf9235SOphir Munk #include <linux/sockios.h> 1573bf9235SOphir Munk #include <linux/ethtool.h> 16f44b09f9SOphir Munk #include <fcntl.h> 17f44b09f9SOphir Munk 18f44b09f9SOphir Munk #include <rte_malloc.h> 19df96fd0dSBruce Richardson #include <ethdev_driver.h> 20df96fd0dSBruce Richardson #include <ethdev_pci.h> 21f44b09f9SOphir Munk #include <rte_pci.h> 22f44b09f9SOphir Munk #include <rte_bus_pci.h> 23919488fbSXueming Li #include <rte_bus_auxiliary.h> 24f44b09f9SOphir Munk #include <rte_common.h> 25f44b09f9SOphir Munk #include <rte_kvargs.h> 26f44b09f9SOphir Munk #include <rte_rwlock.h> 27f44b09f9SOphir Munk #include <rte_spinlock.h> 28f44b09f9SOphir Munk #include <rte_string_fns.h> 29f44b09f9SOphir Munk #include <rte_alarm.h> 302aba9fc7SOphir Munk #include <rte_eal_paging.h> 31f44b09f9SOphir Munk 32f44b09f9SOphir Munk #include <mlx5_glue.h> 33f44b09f9SOphir Munk #include <mlx5_devx_cmds.h> 34f44b09f9SOphir Munk #include <mlx5_common.h> 352eb4d010SOphir Munk #include <mlx5_common_mp.h> 36d5ed8aa9SOphir Munk #include <mlx5_common_mr.h> 375522da6bSSuanming Mou #include <mlx5_malloc.h> 38f44b09f9SOphir Munk 39f44b09f9SOphir Munk #include "mlx5_defs.h" 40f44b09f9SOphir Munk #include "mlx5.h" 41391b8bccSOphir Munk #include "mlx5_common_os.h" 42f44b09f9SOphir Munk #include "mlx5_utils.h" 43f44b09f9SOphir Munk #include "mlx5_rxtx.h" 44151cbe3aSMichael Baum #include "mlx5_rx.h" 45377b69fbSMichael Baum #include "mlx5_tx.h" 46f44b09f9SOphir Munk #include "mlx5_autoconf.h" 47f44b09f9SOphir Munk #include "mlx5_mr.h" 48f44b09f9SOphir Munk #include "mlx5_flow.h" 49f44b09f9SOphir Munk #include "rte_pmd_mlx5.h" 504f96d913SOphir Munk #include "mlx5_verbs.h" 51f00f6562SOphir Munk #include "mlx5_nl.h" 526deb19e1SMichael Baum #include "mlx5_devx.h" 53f44b09f9SOphir Munk 542eb4d010SOphir Munk #ifndef HAVE_IBV_MLX5_MOD_MPW 552eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 562eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 572eb4d010SOphir Munk #endif 582eb4d010SOphir Munk 592eb4d010SOphir Munk #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 602eb4d010SOphir Munk #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 612eb4d010SOphir Munk #endif 622eb4d010SOphir Munk 632e86c4e5SOphir Munk static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 642e86c4e5SOphir Munk 652e86c4e5SOphir Munk /* Spinlock for mlx5_shared_data allocation. */ 662e86c4e5SOphir Munk static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 672e86c4e5SOphir Munk 682e86c4e5SOphir Munk /* Process local data for secondary processes. */ 692e86c4e5SOphir Munk static struct mlx5_local_data mlx5_local_data; 702e86c4e5SOphir Munk 71b4edeaf3SSuanming Mou /* rte flow indexed pool configuration. */ 72b4edeaf3SSuanming Mou static struct mlx5_indexed_pool_config icfg[] = { 73b4edeaf3SSuanming Mou { 74b4edeaf3SSuanming Mou .size = sizeof(struct rte_flow), 75b4edeaf3SSuanming Mou .trunk_size = 64, 76b4edeaf3SSuanming Mou .need_lock = 1, 77b4edeaf3SSuanming Mou .release_mem_en = 0, 78b4edeaf3SSuanming Mou .malloc = mlx5_malloc, 79b4edeaf3SSuanming Mou .free = mlx5_free, 80b4edeaf3SSuanming Mou .per_core_cache = 0, 81b4edeaf3SSuanming Mou .type = "ctl_flow_ipool", 82b4edeaf3SSuanming Mou }, 83b4edeaf3SSuanming Mou { 84b4edeaf3SSuanming Mou .size = sizeof(struct rte_flow), 85b4edeaf3SSuanming Mou .trunk_size = 64, 86b4edeaf3SSuanming Mou .grow_trunk = 3, 87b4edeaf3SSuanming Mou .grow_shift = 2, 88b4edeaf3SSuanming Mou .need_lock = 1, 89b4edeaf3SSuanming Mou .release_mem_en = 0, 90b4edeaf3SSuanming Mou .malloc = mlx5_malloc, 91b4edeaf3SSuanming Mou .free = mlx5_free, 92b4edeaf3SSuanming Mou .per_core_cache = 1 << 14, 93b4edeaf3SSuanming Mou .type = "rte_flow_ipool", 94b4edeaf3SSuanming Mou }, 95b4edeaf3SSuanming Mou { 96b4edeaf3SSuanming Mou .size = sizeof(struct rte_flow), 97b4edeaf3SSuanming Mou .trunk_size = 64, 98b4edeaf3SSuanming Mou .grow_trunk = 3, 99b4edeaf3SSuanming Mou .grow_shift = 2, 100b4edeaf3SSuanming Mou .need_lock = 1, 101b4edeaf3SSuanming Mou .release_mem_en = 0, 102b4edeaf3SSuanming Mou .malloc = mlx5_malloc, 103b4edeaf3SSuanming Mou .free = mlx5_free, 104b4edeaf3SSuanming Mou .per_core_cache = 0, 105b4edeaf3SSuanming Mou .type = "mcp_flow_ipool", 106b4edeaf3SSuanming Mou }, 107b4edeaf3SSuanming Mou }; 108b4edeaf3SSuanming Mou 109f44b09f9SOphir Munk /** 11008d1838fSDekel Peled * Set the completion channel file descriptor interrupt as non-blocking. 11108d1838fSDekel Peled * 11208d1838fSDekel Peled * @param[in] rxq_obj 11308d1838fSDekel Peled * Pointer to RQ channel object, which includes the channel fd 11408d1838fSDekel Peled * 11508d1838fSDekel Peled * @param[out] fd 11608d1838fSDekel Peled * The file descriptor (representing the intetrrupt) used in this channel. 11708d1838fSDekel Peled * 11808d1838fSDekel Peled * @return 11908d1838fSDekel Peled * 0 on successfully setting the fd to non-blocking, non-zero otherwise. 12008d1838fSDekel Peled */ 12108d1838fSDekel Peled int 12208d1838fSDekel Peled mlx5_os_set_nonblock_channel_fd(int fd) 12308d1838fSDekel Peled { 12408d1838fSDekel Peled int flags; 12508d1838fSDekel Peled 12608d1838fSDekel Peled flags = fcntl(fd, F_GETFL); 12708d1838fSDekel Peled return fcntl(fd, F_SETFL, flags | O_NONBLOCK); 12808d1838fSDekel Peled } 12908d1838fSDekel Peled 13008d1838fSDekel Peled /** 131e85f623eSOphir Munk * Get mlx5 device attributes. The glue function query_device_ex() is called 132e85f623eSOphir Munk * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5 133e85f623eSOphir Munk * device attributes from the glue out parameter. 134e85f623eSOphir Munk * 135e85f623eSOphir Munk * @param dev 136e85f623eSOphir Munk * Pointer to ibv context. 137e85f623eSOphir Munk * 138e85f623eSOphir Munk * @param device_attr 139e85f623eSOphir Munk * Pointer to mlx5 device attributes. 140e85f623eSOphir Munk * 141e85f623eSOphir Munk * @return 142e85f623eSOphir Munk * 0 on success, non zero error number otherwise 143e85f623eSOphir Munk */ 144e85f623eSOphir Munk int 145e85f623eSOphir Munk mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr) 146e85f623eSOphir Munk { 147e85f623eSOphir Munk int err; 148e85f623eSOphir Munk struct ibv_device_attr_ex attr_ex; 149e85f623eSOphir Munk memset(device_attr, 0, sizeof(*device_attr)); 150e85f623eSOphir Munk err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex); 151e85f623eSOphir Munk if (err) 152e85f623eSOphir Munk return err; 153e85f623eSOphir Munk 154e85f623eSOphir Munk device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex; 155e85f623eSOphir Munk device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr; 156e85f623eSOphir Munk device_attr->max_sge = attr_ex.orig_attr.max_sge; 157e85f623eSOphir Munk device_attr->max_cq = attr_ex.orig_attr.max_cq; 1581f29d15eSOphir Munk device_attr->max_cqe = attr_ex.orig_attr.max_cqe; 1591f29d15eSOphir Munk device_attr->max_mr = attr_ex.orig_attr.max_mr; 1601f29d15eSOphir Munk device_attr->max_pd = attr_ex.orig_attr.max_pd; 161e85f623eSOphir Munk device_attr->max_qp = attr_ex.orig_attr.max_qp; 1621f29d15eSOphir Munk device_attr->max_srq = attr_ex.orig_attr.max_srq; 1631f29d15eSOphir Munk device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr; 164e85f623eSOphir Munk device_attr->raw_packet_caps = attr_ex.raw_packet_caps; 165e85f623eSOphir Munk device_attr->max_rwq_indirection_table_size = 166e85f623eSOphir Munk attr_ex.rss_caps.max_rwq_indirection_table_size; 167e85f623eSOphir Munk device_attr->max_tso = attr_ex.tso_caps.max_tso; 168e85f623eSOphir Munk device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts; 169e85f623eSOphir Munk 170e85f623eSOphir Munk struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 171e85f623eSOphir Munk err = mlx5_glue->dv_query_device(ctx, &dv_attr); 172e85f623eSOphir Munk if (err) 173e85f623eSOphir Munk return err; 174e85f623eSOphir Munk 175e85f623eSOphir Munk device_attr->flags = dv_attr.flags; 176e85f623eSOphir Munk device_attr->comp_mask = dv_attr.comp_mask; 177e85f623eSOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 178e85f623eSOphir Munk device_attr->sw_parsing_offloads = 179e85f623eSOphir Munk dv_attr.sw_parsing_caps.sw_parsing_offloads; 180e85f623eSOphir Munk #endif 181e85f623eSOphir Munk device_attr->min_single_stride_log_num_of_bytes = 182e85f623eSOphir Munk dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes; 183e85f623eSOphir Munk device_attr->max_single_stride_log_num_of_bytes = 184e85f623eSOphir Munk dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes; 185e85f623eSOphir Munk device_attr->min_single_wqe_log_num_of_strides = 186e85f623eSOphir Munk dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides; 187e85f623eSOphir Munk device_attr->max_single_wqe_log_num_of_strides = 188e85f623eSOphir Munk dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides; 189e85f623eSOphir Munk device_attr->stride_supported_qpts = 190e85f623eSOphir Munk dv_attr.striding_rq_caps.supported_qpts; 191e85f623eSOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 192e85f623eSOphir Munk device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps; 193e85f623eSOphir Munk #endif 194520e3f48SKamil Vojanec strlcpy(device_attr->fw_ver, attr_ex.orig_attr.fw_ver, 195520e3f48SKamil Vojanec sizeof(device_attr->fw_ver)); 196e85f623eSOphir Munk 197e85f623eSOphir Munk return err; 198e85f623eSOphir Munk } 1992eb4d010SOphir Munk 2002eb4d010SOphir Munk /** 2012eb4d010SOphir Munk * Verbs callback to allocate a memory. This function should allocate the space 2022eb4d010SOphir Munk * according to the size provided residing inside a huge page. 2032eb4d010SOphir Munk * Please note that all allocation must respect the alignment from libmlx5 2042aba9fc7SOphir Munk * (i.e. currently rte_mem_page_size()). 2052eb4d010SOphir Munk * 2062eb4d010SOphir Munk * @param[in] size 2072eb4d010SOphir Munk * The size in bytes of the memory to allocate. 2082eb4d010SOphir Munk * @param[in] data 2092eb4d010SOphir Munk * A pointer to the callback data. 2102eb4d010SOphir Munk * 2112eb4d010SOphir Munk * @return 2122eb4d010SOphir Munk * Allocated buffer, NULL otherwise and rte_errno is set. 2132eb4d010SOphir Munk */ 2142eb4d010SOphir Munk static void * 2152eb4d010SOphir Munk mlx5_alloc_verbs_buf(size_t size, void *data) 2162eb4d010SOphir Munk { 21781c3b977SViacheslav Ovsiienko struct mlx5_dev_ctx_shared *sh = data; 2182eb4d010SOphir Munk void *ret; 2192aba9fc7SOphir Munk size_t alignment = rte_mem_page_size(); 2202aba9fc7SOphir Munk if (alignment == (size_t)-1) { 2212aba9fc7SOphir Munk DRV_LOG(ERR, "Failed to get mem page size"); 2222aba9fc7SOphir Munk rte_errno = ENOMEM; 2232aba9fc7SOphir Munk return NULL; 2242aba9fc7SOphir Munk } 2252eb4d010SOphir Munk 2262eb4d010SOphir Munk MLX5_ASSERT(data != NULL); 22781c3b977SViacheslav Ovsiienko ret = mlx5_malloc(0, size, alignment, sh->numa_node); 2282eb4d010SOphir Munk if (!ret && size) 2292eb4d010SOphir Munk rte_errno = ENOMEM; 2302eb4d010SOphir Munk return ret; 2312eb4d010SOphir Munk } 2322eb4d010SOphir Munk 2332eb4d010SOphir Munk /** 234630a587bSRongwei Liu * Detect misc5 support or not 235630a587bSRongwei Liu * 236630a587bSRongwei Liu * @param[in] priv 237630a587bSRongwei Liu * Device private data pointer 238630a587bSRongwei Liu */ 239630a587bSRongwei Liu #ifdef HAVE_MLX5DV_DR 240630a587bSRongwei Liu static void 241630a587bSRongwei Liu __mlx5_discovery_misc5_cap(struct mlx5_priv *priv) 242630a587bSRongwei Liu { 243630a587bSRongwei Liu #ifdef HAVE_IBV_FLOW_DV_SUPPORT 244630a587bSRongwei Liu /* Dummy VxLAN matcher to detect rdma-core misc5 cap 245630a587bSRongwei Liu * Case: IPv4--->UDP--->VxLAN--->vni 246630a587bSRongwei Liu */ 247630a587bSRongwei Liu void *tbl; 248630a587bSRongwei Liu struct mlx5_flow_dv_match_params matcher_mask; 249630a587bSRongwei Liu void *match_m; 250630a587bSRongwei Liu void *matcher; 251630a587bSRongwei Liu void *headers_m; 252630a587bSRongwei Liu void *misc5_m; 253630a587bSRongwei Liu uint32_t *tunnel_header_m; 254630a587bSRongwei Liu struct mlx5dv_flow_matcher_attr dv_attr; 255630a587bSRongwei Liu 256630a587bSRongwei Liu memset(&matcher_mask, 0, sizeof(matcher_mask)); 257630a587bSRongwei Liu matcher_mask.size = sizeof(matcher_mask.buf); 258630a587bSRongwei Liu match_m = matcher_mask.buf; 259630a587bSRongwei Liu headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers); 260630a587bSRongwei Liu misc5_m = MLX5_ADDR_OF(fte_match_param, 261630a587bSRongwei Liu match_m, misc_parameters_5); 262630a587bSRongwei Liu tunnel_header_m = (uint32_t *) 263630a587bSRongwei Liu MLX5_ADDR_OF(fte_match_set_misc5, 264630a587bSRongwei Liu misc5_m, tunnel_header_1); 265630a587bSRongwei Liu MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); 266630a587bSRongwei Liu MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 4); 267630a587bSRongwei Liu MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); 268630a587bSRongwei Liu *tunnel_header_m = 0xffffff; 269630a587bSRongwei Liu 270630a587bSRongwei Liu tbl = mlx5_glue->dr_create_flow_tbl(priv->sh->rx_domain, 1); 271630a587bSRongwei Liu if (!tbl) { 272630a587bSRongwei Liu DRV_LOG(INFO, "No SW steering support"); 273630a587bSRongwei Liu return; 274630a587bSRongwei Liu } 275630a587bSRongwei Liu dv_attr.type = IBV_FLOW_ATTR_NORMAL, 276630a587bSRongwei Liu dv_attr.match_mask = (void *)&matcher_mask, 277630a587bSRongwei Liu dv_attr.match_criteria_enable = 278630a587bSRongwei Liu (1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT) | 279630a587bSRongwei Liu (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT); 280630a587bSRongwei Liu dv_attr.priority = 3; 281630a587bSRongwei Liu #ifdef HAVE_MLX5DV_DR_ESWITCH 282630a587bSRongwei Liu void *misc2_m; 283630a587bSRongwei Liu if (priv->config.dv_esw_en) { 284630a587bSRongwei Liu /* FDB enabled reg_c_0 */ 285630a587bSRongwei Liu dv_attr.match_criteria_enable |= 286630a587bSRongwei Liu (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT); 287630a587bSRongwei Liu misc2_m = MLX5_ADDR_OF(fte_match_param, 288630a587bSRongwei Liu match_m, misc_parameters_2); 289630a587bSRongwei Liu MLX5_SET(fte_match_set_misc2, misc2_m, 290630a587bSRongwei Liu metadata_reg_c_0, 0xffff); 291630a587bSRongwei Liu } 292630a587bSRongwei Liu #endif 293630a587bSRongwei Liu matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, 294630a587bSRongwei Liu &dv_attr, tbl); 295630a587bSRongwei Liu if (matcher) { 296630a587bSRongwei Liu priv->sh->misc5_cap = 1; 297630a587bSRongwei Liu mlx5_glue->dv_destroy_flow_matcher(matcher); 298630a587bSRongwei Liu } 299630a587bSRongwei Liu mlx5_glue->dr_destroy_flow_tbl(tbl); 300630a587bSRongwei Liu #else 301630a587bSRongwei Liu RTE_SET_USED(priv); 302630a587bSRongwei Liu #endif 303630a587bSRongwei Liu } 304630a587bSRongwei Liu #endif 305630a587bSRongwei Liu 306630a587bSRongwei Liu /** 3072eb4d010SOphir Munk * Verbs callback to free a memory. 3082eb4d010SOphir Munk * 3092eb4d010SOphir Munk * @param[in] ptr 3102eb4d010SOphir Munk * A pointer to the memory to free. 3112eb4d010SOphir Munk * @param[in] data 3122eb4d010SOphir Munk * A pointer to the callback data. 3132eb4d010SOphir Munk */ 3142eb4d010SOphir Munk static void 3152eb4d010SOphir Munk mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 3162eb4d010SOphir Munk { 3172eb4d010SOphir Munk MLX5_ASSERT(data != NULL); 3182175c4dcSSuanming Mou mlx5_free(ptr); 3192eb4d010SOphir Munk } 3202eb4d010SOphir Munk 3212eb4d010SOphir Munk /** 3222eb4d010SOphir Munk * Initialize DR related data within private structure. 3232eb4d010SOphir Munk * Routine checks the reference counter and does actual 3242eb4d010SOphir Munk * resources creation/initialization only if counter is zero. 3252eb4d010SOphir Munk * 3262eb4d010SOphir Munk * @param[in] priv 3272eb4d010SOphir Munk * Pointer to the private device data structure. 3282eb4d010SOphir Munk * 3292eb4d010SOphir Munk * @return 3302eb4d010SOphir Munk * Zero on success, positive error code otherwise. 3312eb4d010SOphir Munk */ 3322eb4d010SOphir Munk static int 3332eb4d010SOphir Munk mlx5_alloc_shared_dr(struct mlx5_priv *priv) 3342eb4d010SOphir Munk { 3352eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 336961b6774SMatan Azrad char s[MLX5_NAME_SIZE] __rte_unused; 33716dbba25SXueming Li int err; 3382eb4d010SOphir Munk 33916dbba25SXueming Li MLX5_ASSERT(sh && sh->refcnt); 34016dbba25SXueming Li if (sh->refcnt > 1) 34116dbba25SXueming Li return 0; 3422eb4d010SOphir Munk err = mlx5_alloc_table_hash_list(priv); 3432eb4d010SOphir Munk if (err) 344291140c6SSuanming Mou goto error; 345291140c6SSuanming Mou /* The resources below are only valid with DV support. */ 346291140c6SSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 347491b7137SMatan Azrad /* Init port id action list. */ 348e78e5408SMatan Azrad snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name); 349d03b7860SSuanming Mou sh->port_id_action_list = mlx5_list_create(s, sh, true, 3500fd5f82aSXueming Li flow_dv_port_id_create_cb, 3510fd5f82aSXueming Li flow_dv_port_id_match_cb, 352491b7137SMatan Azrad flow_dv_port_id_remove_cb, 353491b7137SMatan Azrad flow_dv_port_id_clone_cb, 354491b7137SMatan Azrad flow_dv_port_id_clone_free_cb); 355679f46c7SMatan Azrad if (!sh->port_id_action_list) 356679f46c7SMatan Azrad goto error; 357491b7137SMatan Azrad /* Init push vlan action list. */ 358e78e5408SMatan Azrad snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name); 359d03b7860SSuanming Mou sh->push_vlan_action_list = mlx5_list_create(s, sh, true, 3603422af2aSXueming Li flow_dv_push_vlan_create_cb, 3613422af2aSXueming Li flow_dv_push_vlan_match_cb, 362491b7137SMatan Azrad flow_dv_push_vlan_remove_cb, 363491b7137SMatan Azrad flow_dv_push_vlan_clone_cb, 364491b7137SMatan Azrad flow_dv_push_vlan_clone_free_cb); 365679f46c7SMatan Azrad if (!sh->push_vlan_action_list) 366679f46c7SMatan Azrad goto error; 367491b7137SMatan Azrad /* Init sample action list. */ 368e78e5408SMatan Azrad snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name); 369d03b7860SSuanming Mou sh->sample_action_list = mlx5_list_create(s, sh, true, 37019784141SSuanming Mou flow_dv_sample_create_cb, 37119784141SSuanming Mou flow_dv_sample_match_cb, 372491b7137SMatan Azrad flow_dv_sample_remove_cb, 373491b7137SMatan Azrad flow_dv_sample_clone_cb, 374491b7137SMatan Azrad flow_dv_sample_clone_free_cb); 375679f46c7SMatan Azrad if (!sh->sample_action_list) 376679f46c7SMatan Azrad goto error; 377491b7137SMatan Azrad /* Init dest array action list. */ 378e78e5408SMatan Azrad snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name); 379d03b7860SSuanming Mou sh->dest_array_list = mlx5_list_create(s, sh, true, 38019784141SSuanming Mou flow_dv_dest_array_create_cb, 38119784141SSuanming Mou flow_dv_dest_array_match_cb, 382491b7137SMatan Azrad flow_dv_dest_array_remove_cb, 383491b7137SMatan Azrad flow_dv_dest_array_clone_cb, 384491b7137SMatan Azrad flow_dv_dest_array_clone_free_cb); 385679f46c7SMatan Azrad if (!sh->dest_array_list) 386679f46c7SMatan Azrad goto error; 387291140c6SSuanming Mou #endif 3882eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR 3892eb4d010SOphir Munk void *domain; 3902eb4d010SOphir Munk 3912eb4d010SOphir Munk /* Reference counter is zero, we should initialize structures. */ 3922eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain(sh->ctx, 3932eb4d010SOphir Munk MLX5DV_DR_DOMAIN_TYPE_NIC_RX); 3942eb4d010SOphir Munk if (!domain) { 3952eb4d010SOphir Munk DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); 3962eb4d010SOphir Munk err = errno; 3972eb4d010SOphir Munk goto error; 3982eb4d010SOphir Munk } 3992eb4d010SOphir Munk sh->rx_domain = domain; 4002eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain(sh->ctx, 4012eb4d010SOphir Munk MLX5DV_DR_DOMAIN_TYPE_NIC_TX); 4022eb4d010SOphir Munk if (!domain) { 4032eb4d010SOphir Munk DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); 4042eb4d010SOphir Munk err = errno; 4052eb4d010SOphir Munk goto error; 4062eb4d010SOphir Munk } 4072eb4d010SOphir Munk sh->tx_domain = domain; 4082eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 4092eb4d010SOphir Munk if (priv->config.dv_esw_en) { 4102eb4d010SOphir Munk domain = mlx5_glue->dr_create_domain 4112eb4d010SOphir Munk (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); 4122eb4d010SOphir Munk if (!domain) { 4132eb4d010SOphir Munk DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); 4142eb4d010SOphir Munk err = errno; 4152eb4d010SOphir Munk goto error; 4162eb4d010SOphir Munk } 4172eb4d010SOphir Munk sh->fdb_domain = domain; 418da845ae9SViacheslav Ovsiienko } 419da845ae9SViacheslav Ovsiienko /* 420da845ae9SViacheslav Ovsiienko * The drop action is just some dummy placeholder in rdma-core. It 421da845ae9SViacheslav Ovsiienko * does not belong to domains and has no any attributes, and, can be 422da845ae9SViacheslav Ovsiienko * shared by the entire device. 423da845ae9SViacheslav Ovsiienko */ 424da845ae9SViacheslav Ovsiienko sh->dr_drop_action = mlx5_glue->dr_create_flow_action_drop(); 425da845ae9SViacheslav Ovsiienko if (!sh->dr_drop_action) { 426da845ae9SViacheslav Ovsiienko DRV_LOG(ERR, "FDB mlx5dv_dr_create_flow_action_drop"); 427da845ae9SViacheslav Ovsiienko err = errno; 428da845ae9SViacheslav Ovsiienko goto error; 4292eb4d010SOphir Munk } 4302eb4d010SOphir Munk #endif 431f3020a33SSuanming Mou if (!sh->tunnel_hub && priv->config.dv_miss_info) 4324ec6360dSGregory Etelson err = mlx5_alloc_tunnel_hub(sh); 4334ec6360dSGregory Etelson if (err) { 4344ec6360dSGregory Etelson DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err); 4354ec6360dSGregory Etelson goto error; 4364ec6360dSGregory Etelson } 4372eb4d010SOphir Munk if (priv->config.reclaim_mode == MLX5_RCM_AGGR) { 4382eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1); 4392eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1); 4402eb4d010SOphir Munk if (sh->fdb_domain) 4412eb4d010SOphir Munk mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1); 4422eb4d010SOphir Munk } 4432eb4d010SOphir Munk sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); 444e39226bdSJiawei Wang if (!priv->config.allow_duplicate_pattern) { 445e39226bdSJiawei Wang #ifndef HAVE_MLX5_DR_ALLOW_DUPLICATE 446e39226bdSJiawei Wang DRV_LOG(WARNING, "Disallow duplicate pattern is not supported - maybe old rdma-core version?"); 447e39226bdSJiawei Wang #endif 448e39226bdSJiawei Wang mlx5_glue->dr_allow_duplicate_rules(sh->rx_domain, 0); 449e39226bdSJiawei Wang mlx5_glue->dr_allow_duplicate_rules(sh->tx_domain, 0); 450e39226bdSJiawei Wang if (sh->fdb_domain) 451e39226bdSJiawei Wang mlx5_glue->dr_allow_duplicate_rules(sh->fdb_domain, 0); 452e39226bdSJiawei Wang } 453630a587bSRongwei Liu 454630a587bSRongwei Liu __mlx5_discovery_misc5_cap(priv); 4552eb4d010SOphir Munk #endif /* HAVE_MLX5DV_DR */ 456b80726dcSSuanming Mou sh->default_miss_action = 457b80726dcSSuanming Mou mlx5_glue->dr_create_flow_action_default_miss(); 458b80726dcSSuanming Mou if (!sh->default_miss_action) 459b80726dcSSuanming Mou DRV_LOG(WARNING, "Default miss action is not supported."); 4602eb4d010SOphir Munk return 0; 4612eb4d010SOphir Munk error: 4622eb4d010SOphir Munk /* Rollback the created objects. */ 4632eb4d010SOphir Munk if (sh->rx_domain) { 4642eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->rx_domain); 4652eb4d010SOphir Munk sh->rx_domain = NULL; 4662eb4d010SOphir Munk } 4672eb4d010SOphir Munk if (sh->tx_domain) { 4682eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->tx_domain); 4692eb4d010SOphir Munk sh->tx_domain = NULL; 4702eb4d010SOphir Munk } 4712eb4d010SOphir Munk if (sh->fdb_domain) { 4722eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->fdb_domain); 4732eb4d010SOphir Munk sh->fdb_domain = NULL; 4742eb4d010SOphir Munk } 475da845ae9SViacheslav Ovsiienko if (sh->dr_drop_action) { 476da845ae9SViacheslav Ovsiienko mlx5_glue->destroy_flow_action(sh->dr_drop_action); 477da845ae9SViacheslav Ovsiienko sh->dr_drop_action = NULL; 4782eb4d010SOphir Munk } 4792eb4d010SOphir Munk if (sh->pop_vlan_action) { 4802eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 4812eb4d010SOphir Munk sh->pop_vlan_action = NULL; 4822eb4d010SOphir Munk } 483bf615b07SSuanming Mou if (sh->encaps_decaps) { 484e69a5922SXueming Li mlx5_hlist_destroy(sh->encaps_decaps); 485bf615b07SSuanming Mou sh->encaps_decaps = NULL; 486bf615b07SSuanming Mou } 4873fe88961SSuanming Mou if (sh->modify_cmds) { 488e69a5922SXueming Li mlx5_hlist_destroy(sh->modify_cmds); 4893fe88961SSuanming Mou sh->modify_cmds = NULL; 4903fe88961SSuanming Mou } 4912eb4d010SOphir Munk if (sh->tag_table) { 4922eb4d010SOphir Munk /* tags should be destroyed with flow before. */ 493e69a5922SXueming Li mlx5_hlist_destroy(sh->tag_table); 4942eb4d010SOphir Munk sh->tag_table = NULL; 4952eb4d010SOphir Munk } 4964ec6360dSGregory Etelson if (sh->tunnel_hub) { 4974ec6360dSGregory Etelson mlx5_release_tunnel_hub(sh, priv->dev_port); 4984ec6360dSGregory Etelson sh->tunnel_hub = NULL; 4994ec6360dSGregory Etelson } 5002eb4d010SOphir Munk mlx5_free_table_hash_list(priv); 501679f46c7SMatan Azrad if (sh->port_id_action_list) { 502679f46c7SMatan Azrad mlx5_list_destroy(sh->port_id_action_list); 503679f46c7SMatan Azrad sh->port_id_action_list = NULL; 504679f46c7SMatan Azrad } 505679f46c7SMatan Azrad if (sh->push_vlan_action_list) { 506679f46c7SMatan Azrad mlx5_list_destroy(sh->push_vlan_action_list); 507679f46c7SMatan Azrad sh->push_vlan_action_list = NULL; 508679f46c7SMatan Azrad } 509679f46c7SMatan Azrad if (sh->sample_action_list) { 510679f46c7SMatan Azrad mlx5_list_destroy(sh->sample_action_list); 511679f46c7SMatan Azrad sh->sample_action_list = NULL; 512679f46c7SMatan Azrad } 513679f46c7SMatan Azrad if (sh->dest_array_list) { 514679f46c7SMatan Azrad mlx5_list_destroy(sh->dest_array_list); 515679f46c7SMatan Azrad sh->dest_array_list = NULL; 516679f46c7SMatan Azrad } 5172eb4d010SOphir Munk return err; 5182eb4d010SOphir Munk } 5192eb4d010SOphir Munk 5202eb4d010SOphir Munk /** 5212eb4d010SOphir Munk * Destroy DR related data within private structure. 5222eb4d010SOphir Munk * 5232eb4d010SOphir Munk * @param[in] priv 5242eb4d010SOphir Munk * Pointer to the private device data structure. 5252eb4d010SOphir Munk */ 5262eb4d010SOphir Munk void 5272eb4d010SOphir Munk mlx5_os_free_shared_dr(struct mlx5_priv *priv) 5282eb4d010SOphir Munk { 52916dbba25SXueming Li struct mlx5_dev_ctx_shared *sh = priv->sh; 5302eb4d010SOphir Munk 53116dbba25SXueming Li MLX5_ASSERT(sh && sh->refcnt); 53216dbba25SXueming Li if (sh->refcnt > 1) 5332eb4d010SOphir Munk return; 5342eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR 5352eb4d010SOphir Munk if (sh->rx_domain) { 5362eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->rx_domain); 5372eb4d010SOphir Munk sh->rx_domain = NULL; 5382eb4d010SOphir Munk } 5392eb4d010SOphir Munk if (sh->tx_domain) { 5402eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->tx_domain); 5412eb4d010SOphir Munk sh->tx_domain = NULL; 5422eb4d010SOphir Munk } 5432eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 5442eb4d010SOphir Munk if (sh->fdb_domain) { 5452eb4d010SOphir Munk mlx5_glue->dr_destroy_domain(sh->fdb_domain); 5462eb4d010SOphir Munk sh->fdb_domain = NULL; 5472eb4d010SOphir Munk } 548da845ae9SViacheslav Ovsiienko if (sh->dr_drop_action) { 549da845ae9SViacheslav Ovsiienko mlx5_glue->destroy_flow_action(sh->dr_drop_action); 550da845ae9SViacheslav Ovsiienko sh->dr_drop_action = NULL; 5512eb4d010SOphir Munk } 5522eb4d010SOphir Munk #endif 5532eb4d010SOphir Munk if (sh->pop_vlan_action) { 5542eb4d010SOphir Munk mlx5_glue->destroy_flow_action(sh->pop_vlan_action); 5552eb4d010SOphir Munk sh->pop_vlan_action = NULL; 5562eb4d010SOphir Munk } 5572eb4d010SOphir Munk #endif /* HAVE_MLX5DV_DR */ 558b80726dcSSuanming Mou if (sh->default_miss_action) 559b80726dcSSuanming Mou mlx5_glue->destroy_flow_action 560b80726dcSSuanming Mou (sh->default_miss_action); 561bf615b07SSuanming Mou if (sh->encaps_decaps) { 562e69a5922SXueming Li mlx5_hlist_destroy(sh->encaps_decaps); 563bf615b07SSuanming Mou sh->encaps_decaps = NULL; 564bf615b07SSuanming Mou } 5653fe88961SSuanming Mou if (sh->modify_cmds) { 566e69a5922SXueming Li mlx5_hlist_destroy(sh->modify_cmds); 5673fe88961SSuanming Mou sh->modify_cmds = NULL; 5683fe88961SSuanming Mou } 5692eb4d010SOphir Munk if (sh->tag_table) { 5702eb4d010SOphir Munk /* tags should be destroyed with flow before. */ 571e69a5922SXueming Li mlx5_hlist_destroy(sh->tag_table); 5722eb4d010SOphir Munk sh->tag_table = NULL; 5732eb4d010SOphir Munk } 5744ec6360dSGregory Etelson if (sh->tunnel_hub) { 5754ec6360dSGregory Etelson mlx5_release_tunnel_hub(sh, priv->dev_port); 5764ec6360dSGregory Etelson sh->tunnel_hub = NULL; 5774ec6360dSGregory Etelson } 5782eb4d010SOphir Munk mlx5_free_table_hash_list(priv); 579679f46c7SMatan Azrad if (sh->port_id_action_list) { 580679f46c7SMatan Azrad mlx5_list_destroy(sh->port_id_action_list); 581679f46c7SMatan Azrad sh->port_id_action_list = NULL; 582679f46c7SMatan Azrad } 583679f46c7SMatan Azrad if (sh->push_vlan_action_list) { 584679f46c7SMatan Azrad mlx5_list_destroy(sh->push_vlan_action_list); 585679f46c7SMatan Azrad sh->push_vlan_action_list = NULL; 586679f46c7SMatan Azrad } 587679f46c7SMatan Azrad if (sh->sample_action_list) { 588679f46c7SMatan Azrad mlx5_list_destroy(sh->sample_action_list); 589679f46c7SMatan Azrad sh->sample_action_list = NULL; 590679f46c7SMatan Azrad } 591679f46c7SMatan Azrad if (sh->dest_array_list) { 592679f46c7SMatan Azrad mlx5_list_destroy(sh->dest_array_list); 593679f46c7SMatan Azrad sh->dest_array_list = NULL; 594679f46c7SMatan Azrad } 5952eb4d010SOphir Munk } 5962eb4d010SOphir Munk 5972eb4d010SOphir Munk /** 5982e86c4e5SOphir Munk * Initialize shared data between primary and secondary process. 5992e86c4e5SOphir Munk * 6002e86c4e5SOphir Munk * A memzone is reserved by primary process and secondary processes attach to 6012e86c4e5SOphir Munk * the memzone. 6022e86c4e5SOphir Munk * 6032e86c4e5SOphir Munk * @return 6042e86c4e5SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 6052e86c4e5SOphir Munk */ 6062e86c4e5SOphir Munk static int 6072e86c4e5SOphir Munk mlx5_init_shared_data(void) 6082e86c4e5SOphir Munk { 6092e86c4e5SOphir Munk const struct rte_memzone *mz; 6102e86c4e5SOphir Munk int ret = 0; 6112e86c4e5SOphir Munk 6122e86c4e5SOphir Munk rte_spinlock_lock(&mlx5_shared_data_lock); 6132e86c4e5SOphir Munk if (mlx5_shared_data == NULL) { 6142e86c4e5SOphir Munk if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6152e86c4e5SOphir Munk /* Allocate shared memory. */ 6162e86c4e5SOphir Munk mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 6172e86c4e5SOphir Munk sizeof(*mlx5_shared_data), 6182e86c4e5SOphir Munk SOCKET_ID_ANY, 0); 6192e86c4e5SOphir Munk if (mz == NULL) { 6202e86c4e5SOphir Munk DRV_LOG(ERR, 6212e86c4e5SOphir Munk "Cannot allocate mlx5 shared data"); 6222e86c4e5SOphir Munk ret = -rte_errno; 6232e86c4e5SOphir Munk goto error; 6242e86c4e5SOphir Munk } 6252e86c4e5SOphir Munk mlx5_shared_data = mz->addr; 6262e86c4e5SOphir Munk memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 6272e86c4e5SOphir Munk rte_spinlock_init(&mlx5_shared_data->lock); 6282e86c4e5SOphir Munk } else { 6292e86c4e5SOphir Munk /* Lookup allocated shared memory. */ 6302e86c4e5SOphir Munk mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); 6312e86c4e5SOphir Munk if (mz == NULL) { 6322e86c4e5SOphir Munk DRV_LOG(ERR, 6332e86c4e5SOphir Munk "Cannot attach mlx5 shared data"); 6342e86c4e5SOphir Munk ret = -rte_errno; 6352e86c4e5SOphir Munk goto error; 6362e86c4e5SOphir Munk } 6372e86c4e5SOphir Munk mlx5_shared_data = mz->addr; 6382e86c4e5SOphir Munk memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); 6392e86c4e5SOphir Munk } 6402e86c4e5SOphir Munk } 6412e86c4e5SOphir Munk error: 6422e86c4e5SOphir Munk rte_spinlock_unlock(&mlx5_shared_data_lock); 6432e86c4e5SOphir Munk return ret; 6442e86c4e5SOphir Munk } 6452e86c4e5SOphir Munk 6462e86c4e5SOphir Munk /** 6472e86c4e5SOphir Munk * PMD global initialization. 6482e86c4e5SOphir Munk * 6492e86c4e5SOphir Munk * Independent from individual device, this function initializes global 6502e86c4e5SOphir Munk * per-PMD data structures distinguishing primary and secondary processes. 6512e86c4e5SOphir Munk * Hence, each initialization is called once per a process. 6522e86c4e5SOphir Munk * 6532e86c4e5SOphir Munk * @return 6542e86c4e5SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 6552e86c4e5SOphir Munk */ 6562e86c4e5SOphir Munk static int 6572e86c4e5SOphir Munk mlx5_init_once(void) 6582e86c4e5SOphir Munk { 6592e86c4e5SOphir Munk struct mlx5_shared_data *sd; 6602e86c4e5SOphir Munk struct mlx5_local_data *ld = &mlx5_local_data; 6612e86c4e5SOphir Munk int ret = 0; 6622e86c4e5SOphir Munk 6632e86c4e5SOphir Munk if (mlx5_init_shared_data()) 6642e86c4e5SOphir Munk return -rte_errno; 6652e86c4e5SOphir Munk sd = mlx5_shared_data; 6662e86c4e5SOphir Munk MLX5_ASSERT(sd); 6672e86c4e5SOphir Munk rte_spinlock_lock(&sd->lock); 6682e86c4e5SOphir Munk switch (rte_eal_process_type()) { 6692e86c4e5SOphir Munk case RTE_PROC_PRIMARY: 6702e86c4e5SOphir Munk if (sd->init_done) 6712e86c4e5SOphir Munk break; 6722e86c4e5SOphir Munk LIST_INIT(&sd->mem_event_cb_list); 6732e86c4e5SOphir Munk rte_rwlock_init(&sd->mem_event_rwlock); 6742e86c4e5SOphir Munk rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 6752e86c4e5SOphir Munk mlx5_mr_mem_event_cb, NULL); 6762e86c4e5SOphir Munk ret = mlx5_mp_init_primary(MLX5_MP_NAME, 6772e86c4e5SOphir Munk mlx5_mp_os_primary_handle); 6782e86c4e5SOphir Munk if (ret) 6792e86c4e5SOphir Munk goto out; 6802e86c4e5SOphir Munk sd->init_done = true; 6812e86c4e5SOphir Munk break; 6822e86c4e5SOphir Munk case RTE_PROC_SECONDARY: 6832e86c4e5SOphir Munk if (ld->init_done) 6842e86c4e5SOphir Munk break; 6852e86c4e5SOphir Munk ret = mlx5_mp_init_secondary(MLX5_MP_NAME, 6862e86c4e5SOphir Munk mlx5_mp_os_secondary_handle); 6872e86c4e5SOphir Munk if (ret) 6882e86c4e5SOphir Munk goto out; 6892e86c4e5SOphir Munk ++sd->secondary_cnt; 6902e86c4e5SOphir Munk ld->init_done = true; 6912e86c4e5SOphir Munk break; 6922e86c4e5SOphir Munk default: 6932e86c4e5SOphir Munk break; 6942e86c4e5SOphir Munk } 6952e86c4e5SOphir Munk out: 6962e86c4e5SOphir Munk rte_spinlock_unlock(&sd->lock); 6972e86c4e5SOphir Munk return ret; 6982e86c4e5SOphir Munk } 6992e86c4e5SOphir Munk 7002e86c4e5SOphir Munk /** 70186d259ceSMichael Baum * Create the Tx queue DevX/Verbs object. 70286d259ceSMichael Baum * 70386d259ceSMichael Baum * @param dev 70486d259ceSMichael Baum * Pointer to Ethernet device. 70586d259ceSMichael Baum * @param idx 70686d259ceSMichael Baum * Queue index in DPDK Tx queue array. 70786d259ceSMichael Baum * 70886d259ceSMichael Baum * @return 709f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 71086d259ceSMichael Baum */ 711f49f4483SMichael Baum static int 71286d259ceSMichael Baum mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx) 71386d259ceSMichael Baum { 71486d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 71586d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 71686d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 71786d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 71886d259ceSMichael Baum 71986d259ceSMichael Baum if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 72086d259ceSMichael Baum return mlx5_txq_devx_obj_new(dev, idx); 72186d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 7223ec73abeSMatan Azrad if (!priv->config.dv_esw_en) 72386d259ceSMichael Baum return mlx5_txq_devx_obj_new(dev, idx); 72486d259ceSMichael Baum #endif 72586d259ceSMichael Baum return mlx5_txq_ibv_obj_new(dev, idx); 72686d259ceSMichael Baum } 72786d259ceSMichael Baum 72886d259ceSMichael Baum /** 72986d259ceSMichael Baum * Release an Tx DevX/verbs queue object. 73086d259ceSMichael Baum * 73186d259ceSMichael Baum * @param txq_obj 73286d259ceSMichael Baum * DevX/Verbs Tx queue object. 73386d259ceSMichael Baum */ 73486d259ceSMichael Baum static void 73586d259ceSMichael Baum mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj) 73686d259ceSMichael Baum { 73786d259ceSMichael Baum if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 73886d259ceSMichael Baum mlx5_txq_devx_obj_release(txq_obj); 73986d259ceSMichael Baum return; 74086d259ceSMichael Baum } 7413ec73abeSMatan Azrad #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 7423ec73abeSMatan Azrad if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) { 7433ec73abeSMatan Azrad mlx5_txq_devx_obj_release(txq_obj); 7443ec73abeSMatan Azrad return; 74586d259ceSMichael Baum } 7463ec73abeSMatan Azrad #endif 74786d259ceSMichael Baum mlx5_txq_ibv_obj_release(txq_obj); 74886d259ceSMichael Baum } 74986d259ceSMichael Baum 75086d259ceSMichael Baum /** 751994829e6SSuanming Mou * DV flow counter mode detect and config. 752994829e6SSuanming Mou * 753994829e6SSuanming Mou * @param dev 754994829e6SSuanming Mou * Pointer to rte_eth_dev structure. 755994829e6SSuanming Mou * 756994829e6SSuanming Mou */ 757994829e6SSuanming Mou static void 758994829e6SSuanming Mou mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused) 759994829e6SSuanming Mou { 760994829e6SSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 761994829e6SSuanming Mou struct mlx5_priv *priv = dev->data->dev_private; 7622b5b1aebSSuanming Mou struct mlx5_dev_ctx_shared *sh = priv->sh; 7632b5b1aebSSuanming Mou bool fallback; 764994829e6SSuanming Mou 765994829e6SSuanming Mou #ifndef HAVE_IBV_DEVX_ASYNC 7662b5b1aebSSuanming Mou fallback = true; 767994829e6SSuanming Mou #else 7682b5b1aebSSuanming Mou fallback = false; 7692b5b1aebSSuanming Mou if (!priv->config.devx || !priv->config.dv_flow_en || 7702b5b1aebSSuanming Mou !priv->config.hca_attr.flow_counters_dump || 771994829e6SSuanming Mou !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) || 772994829e6SSuanming Mou (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP)) 7732b5b1aebSSuanming Mou fallback = true; 774994829e6SSuanming Mou #endif 7752b5b1aebSSuanming Mou if (fallback) 776994829e6SSuanming Mou DRV_LOG(INFO, "Use fall-back DV counter management. Flow " 777994829e6SSuanming Mou "counter dump:%d, bulk_alloc_bitmap:0x%hhx.", 778994829e6SSuanming Mou priv->config.hca_attr.flow_counters_dump, 779994829e6SSuanming Mou priv->config.hca_attr.flow_counter_bulk_alloc_bitmap); 7802b5b1aebSSuanming Mou /* Initialize fallback mode only on the port initializes sh. */ 7812b5b1aebSSuanming Mou if (sh->refcnt == 1) 7822b5b1aebSSuanming Mou sh->cmng.counter_fallback = fallback; 7832b5b1aebSSuanming Mou else if (fallback != sh->cmng.counter_fallback) 7842b5b1aebSSuanming Mou DRV_LOG(WARNING, "Port %d in sh has different fallback mode " 7852b5b1aebSSuanming Mou "with others:%d.", PORT_ID(priv), fallback); 786994829e6SSuanming Mou #endif 787994829e6SSuanming Mou } 788994829e6SSuanming Mou 78945633c46SSuanming Mou /** 79045633c46SSuanming Mou * DR flow drop action support detect. 79145633c46SSuanming Mou * 79245633c46SSuanming Mou * @param dev 79345633c46SSuanming Mou * Pointer to rte_eth_dev structure. 79445633c46SSuanming Mou * 79545633c46SSuanming Mou */ 79645633c46SSuanming Mou static void 79745633c46SSuanming Mou mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused) 79845633c46SSuanming Mou { 79945633c46SSuanming Mou #ifdef HAVE_MLX5DV_DR 80045633c46SSuanming Mou struct mlx5_priv *priv = dev->data->dev_private; 80145633c46SSuanming Mou 80245633c46SSuanming Mou if (!priv->config.dv_flow_en || !priv->sh->dr_drop_action) 80345633c46SSuanming Mou return; 80445633c46SSuanming Mou /** 80545633c46SSuanming Mou * DR supports drop action placeholder when it is supported; 80645633c46SSuanming Mou * otherwise, use the queue drop action. 80745633c46SSuanming Mou */ 80845633c46SSuanming Mou if (mlx5_flow_discover_dr_action_support(dev)) 80945633c46SSuanming Mou priv->root_drop_action = priv->drop_queue.hrxq->action; 81045633c46SSuanming Mou else 81145633c46SSuanming Mou priv->root_drop_action = priv->sh->dr_drop_action; 81245633c46SSuanming Mou #endif 81345633c46SSuanming Mou } 81445633c46SSuanming Mou 815e6988afdSMatan Azrad static void 816e6988afdSMatan Azrad mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev) 817e6988afdSMatan Azrad { 818e6988afdSMatan Azrad struct mlx5_priv *priv = dev->data->dev_private; 819e6988afdSMatan Azrad void *ctx = priv->sh->ctx; 820e6988afdSMatan Azrad 821e6988afdSMatan Azrad priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx); 822e6988afdSMatan Azrad if (!priv->q_counters) { 823e6988afdSMatan Azrad struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0); 824e6988afdSMatan Azrad struct ibv_wq *wq; 825e6988afdSMatan Azrad 826e6988afdSMatan Azrad DRV_LOG(DEBUG, "Port %d queue counter object cannot be created " 827e6988afdSMatan Azrad "by DevX - fall-back to use the kernel driver global " 828e6988afdSMatan Azrad "queue counter.", dev->data->port_id); 829e6988afdSMatan Azrad /* Create WQ by kernel and query its queue counter ID. */ 830e6988afdSMatan Azrad if (cq) { 831e6988afdSMatan Azrad wq = mlx5_glue->create_wq(ctx, 832e6988afdSMatan Azrad &(struct ibv_wq_init_attr){ 833e6988afdSMatan Azrad .wq_type = IBV_WQT_RQ, 834e6988afdSMatan Azrad .max_wr = 1, 835e6988afdSMatan Azrad .max_sge = 1, 836e6988afdSMatan Azrad .pd = priv->sh->pd, 837e6988afdSMatan Azrad .cq = cq, 838e6988afdSMatan Azrad }); 839e6988afdSMatan Azrad if (wq) { 840e6988afdSMatan Azrad /* Counter is assigned only on RDY state. */ 841e6988afdSMatan Azrad int ret = mlx5_glue->modify_wq(wq, 842e6988afdSMatan Azrad &(struct ibv_wq_attr){ 843e6988afdSMatan Azrad .attr_mask = IBV_WQ_ATTR_STATE, 844e6988afdSMatan Azrad .wq_state = IBV_WQS_RDY, 845e6988afdSMatan Azrad }); 846e6988afdSMatan Azrad 847e6988afdSMatan Azrad if (ret == 0) 848e6988afdSMatan Azrad mlx5_devx_cmd_wq_query(wq, 849e6988afdSMatan Azrad &priv->counter_set_id); 850e6988afdSMatan Azrad claim_zero(mlx5_glue->destroy_wq(wq)); 851e6988afdSMatan Azrad } 852e6988afdSMatan Azrad claim_zero(mlx5_glue->destroy_cq(cq)); 853e6988afdSMatan Azrad } 854e6988afdSMatan Azrad } else { 855e6988afdSMatan Azrad priv->counter_set_id = priv->q_counters->id; 856e6988afdSMatan Azrad } 857e6988afdSMatan Azrad if (priv->counter_set_id == 0) 858e6988afdSMatan Azrad DRV_LOG(INFO, "Part of the port %d statistics will not be " 859e6988afdSMatan Azrad "available.", dev->data->port_id); 860e6988afdSMatan Azrad } 861e6988afdSMatan Azrad 862994829e6SSuanming Mou /** 863f926cce3SXueming Li * Check if representor spawn info match devargs. 864f926cce3SXueming Li * 865f926cce3SXueming Li * @param spawn 866f926cce3SXueming Li * Verbs device parameters (name, port, switch_info) to spawn. 867f926cce3SXueming Li * @param eth_da 868f926cce3SXueming Li * Device devargs to probe. 869f926cce3SXueming Li * 870f926cce3SXueming Li * @return 871f926cce3SXueming Li * Match result. 872f926cce3SXueming Li */ 873f926cce3SXueming Li static bool 874f926cce3SXueming Li mlx5_representor_match(struct mlx5_dev_spawn_data *spawn, 875f926cce3SXueming Li struct rte_eth_devargs *eth_da) 876f926cce3SXueming Li { 877f926cce3SXueming Li struct mlx5_switch_info *switch_info = &spawn->info; 878f926cce3SXueming Li unsigned int p, f; 879f926cce3SXueming Li uint16_t id; 88091766faeSXueming Li uint16_t repr_id = mlx5_representor_id_encode(switch_info, 88191766faeSXueming Li eth_da->type); 882f926cce3SXueming Li 883f926cce3SXueming Li switch (eth_da->type) { 884f926cce3SXueming Li case RTE_ETH_REPRESENTOR_SF: 88591766faeSXueming Li if (!(spawn->info.port_name == -1 && 88691766faeSXueming Li switch_info->name_type == 88791766faeSXueming Li MLX5_PHYS_PORT_NAME_TYPE_PFHPF) && 88891766faeSXueming Li switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFSF) { 889f926cce3SXueming Li rte_errno = EBUSY; 890f926cce3SXueming Li return false; 891f926cce3SXueming Li } 892f926cce3SXueming Li break; 893f926cce3SXueming Li case RTE_ETH_REPRESENTOR_VF: 894f926cce3SXueming Li /* Allows HPF representor index -1 as exception. */ 895f926cce3SXueming Li if (!(spawn->info.port_name == -1 && 896f926cce3SXueming Li switch_info->name_type == 897f926cce3SXueming Li MLX5_PHYS_PORT_NAME_TYPE_PFHPF) && 898f926cce3SXueming Li switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFVF) { 899f926cce3SXueming Li rte_errno = EBUSY; 900f926cce3SXueming Li return false; 901f926cce3SXueming Li } 902f926cce3SXueming Li break; 903f926cce3SXueming Li case RTE_ETH_REPRESENTOR_NONE: 904f926cce3SXueming Li rte_errno = EBUSY; 905f926cce3SXueming Li return false; 906f926cce3SXueming Li default: 907f926cce3SXueming Li rte_errno = ENOTSUP; 908f926cce3SXueming Li DRV_LOG(ERR, "unsupported representor type"); 909f926cce3SXueming Li return false; 910f926cce3SXueming Li } 911f926cce3SXueming Li /* Check representor ID: */ 912f926cce3SXueming Li for (p = 0; p < eth_da->nb_ports; ++p) { 913f926cce3SXueming Li if (spawn->pf_bond < 0) { 914f926cce3SXueming Li /* For non-LAG mode, allow and ignore pf. */ 915f926cce3SXueming Li switch_info->pf_num = eth_da->ports[p]; 91691766faeSXueming Li repr_id = mlx5_representor_id_encode(switch_info, 91791766faeSXueming Li eth_da->type); 918f926cce3SXueming Li } 919f926cce3SXueming Li for (f = 0; f < eth_da->nb_representor_ports; ++f) { 920f926cce3SXueming Li id = MLX5_REPRESENTOR_ID 921f926cce3SXueming Li (eth_da->ports[p], eth_da->type, 922f926cce3SXueming Li eth_da->representor_ports[f]); 923f926cce3SXueming Li if (repr_id == id) 924f926cce3SXueming Li return true; 925f926cce3SXueming Li } 926f926cce3SXueming Li } 927f926cce3SXueming Li rte_errno = EBUSY; 928f926cce3SXueming Li return false; 929f926cce3SXueming Li } 930f926cce3SXueming Li 931f926cce3SXueming Li 932f926cce3SXueming Li /** 9332eb4d010SOphir Munk * Spawn an Ethernet device from Verbs information. 9342eb4d010SOphir Munk * 9352eb4d010SOphir Munk * @param dpdk_dev 9362eb4d010SOphir Munk * Backing DPDK device. 9372eb4d010SOphir Munk * @param spawn 9382eb4d010SOphir Munk * Verbs device parameters (name, port, switch_info) to spawn. 9392eb4d010SOphir Munk * @param config 9402eb4d010SOphir Munk * Device configuration parameters. 941cb95feefSXueming Li * @param config 942cb95feefSXueming Li * Device arguments. 9432eb4d010SOphir Munk * 9442eb4d010SOphir Munk * @return 9452eb4d010SOphir Munk * A valid Ethernet device object on success, NULL otherwise and rte_errno 9462eb4d010SOphir Munk * is set. The following errors are defined: 9472eb4d010SOphir Munk * 9482eb4d010SOphir Munk * EBUSY: device is not supposed to be spawned. 9492eb4d010SOphir Munk * EEXIST: device is already spawned 9502eb4d010SOphir Munk */ 9512eb4d010SOphir Munk static struct rte_eth_dev * 9522eb4d010SOphir Munk mlx5_dev_spawn(struct rte_device *dpdk_dev, 9532eb4d010SOphir Munk struct mlx5_dev_spawn_data *spawn, 954cb95feefSXueming Li struct mlx5_dev_config *config, 955cb95feefSXueming Li struct rte_eth_devargs *eth_da) 9562eb4d010SOphir Munk { 9572eb4d010SOphir Munk const struct mlx5_switch_info *switch_info = &spawn->info; 9582eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh = NULL; 9592eb4d010SOphir Munk struct ibv_port_attr port_attr; 9602eb4d010SOphir Munk struct mlx5dv_context dv_attr = { .comp_mask = 0 }; 9612eb4d010SOphir Munk struct rte_eth_dev *eth_dev = NULL; 9622eb4d010SOphir Munk struct mlx5_priv *priv = NULL; 9632eb4d010SOphir Munk int err = 0; 9642eb4d010SOphir Munk unsigned int hw_padding = 0; 9652eb4d010SOphir Munk unsigned int mps; 9662eb4d010SOphir Munk unsigned int mpls_en = 0; 9672eb4d010SOphir Munk unsigned int swp = 0; 9682eb4d010SOphir Munk unsigned int mprq = 0; 9692eb4d010SOphir Munk unsigned int mprq_min_stride_size_n = 0; 9702eb4d010SOphir Munk unsigned int mprq_max_stride_size_n = 0; 9712eb4d010SOphir Munk unsigned int mprq_min_stride_num_n = 0; 9722eb4d010SOphir Munk unsigned int mprq_max_stride_num_n = 0; 9732eb4d010SOphir Munk struct rte_ether_addr mac; 9742eb4d010SOphir Munk char name[RTE_ETH_NAME_MAX_LEN]; 9752eb4d010SOphir Munk int own_domain_id = 0; 9762eb4d010SOphir Munk uint16_t port_id; 977d0cf77e8SViacheslav Ovsiienko struct mlx5_port_info vport_info = { .query_flags = 0 }; 978b4edeaf3SSuanming Mou int i; 9792eb4d010SOphir Munk 9802eb4d010SOphir Munk /* Determine if this port representor is supposed to be spawned. */ 981f926cce3SXueming Li if (switch_info->representor && dpdk_dev->devargs && 982f926cce3SXueming Li !mlx5_representor_match(spawn, eth_da)) 983d6541676SXueming Li return NULL; 9842eb4d010SOphir Munk /* Build device name. */ 9852eb4d010SOphir Munk if (spawn->pf_bond < 0) { 9862eb4d010SOphir Munk /* Single device. */ 9872eb4d010SOphir Munk if (!switch_info->representor) 9882eb4d010SOphir Munk strlcpy(name, dpdk_dev->name, sizeof(name)); 9892eb4d010SOphir Munk else 990f926cce3SXueming Li err = snprintf(name, sizeof(name), "%s_representor_%s%u", 991cb95feefSXueming Li dpdk_dev->name, 992cb95feefSXueming Li switch_info->name_type == 993cb95feefSXueming Li MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf", 994cb95feefSXueming Li switch_info->port_name); 9952eb4d010SOphir Munk } else { 9962eb4d010SOphir Munk /* Bonding device. */ 997f926cce3SXueming Li if (!switch_info->representor) { 998f926cce3SXueming Li err = snprintf(name, sizeof(name), "%s_%s", 999834a9019SOphir Munk dpdk_dev->name, 1000834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 1001f926cce3SXueming Li } else { 1002f926cce3SXueming Li err = snprintf(name, sizeof(name), "%s_%s_representor_c%dpf%d%s%u", 1003834a9019SOphir Munk dpdk_dev->name, 1004834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev), 1005f926cce3SXueming Li switch_info->ctrl_num, 1006f926cce3SXueming Li switch_info->pf_num, 1007cb95feefSXueming Li switch_info->name_type == 1008cb95feefSXueming Li MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf", 10092eb4d010SOphir Munk switch_info->port_name); 10102eb4d010SOphir Munk } 1011f926cce3SXueming Li } 1012f926cce3SXueming Li if (err >= (int)sizeof(name)) 1013f926cce3SXueming Li DRV_LOG(WARNING, "device name overflow %s", name); 10142eb4d010SOphir Munk /* check if the device is already spawned */ 10152eb4d010SOphir Munk if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 10162eb4d010SOphir Munk rte_errno = EEXIST; 10172eb4d010SOphir Munk return NULL; 10182eb4d010SOphir Munk } 10192eb4d010SOphir Munk DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 10202eb4d010SOphir Munk if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 10212eb4d010SOphir Munk struct mlx5_mp_id mp_id; 10222eb4d010SOphir Munk 10232eb4d010SOphir Munk eth_dev = rte_eth_dev_attach_secondary(name); 10242eb4d010SOphir Munk if (eth_dev == NULL) { 10252eb4d010SOphir Munk DRV_LOG(ERR, "can not attach rte ethdev"); 10262eb4d010SOphir Munk rte_errno = ENOMEM; 10272eb4d010SOphir Munk return NULL; 10282eb4d010SOphir Munk } 10292eb4d010SOphir Munk eth_dev->device = dpdk_dev; 1030b012b4ceSOphir Munk eth_dev->dev_ops = &mlx5_dev_sec_ops; 1031cbfc6111SFerruh Yigit eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 1032cbfc6111SFerruh Yigit eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 10332eb4d010SOphir Munk err = mlx5_proc_priv_init(eth_dev); 10342eb4d010SOphir Munk if (err) 10352eb4d010SOphir Munk return NULL; 1036fec28ca0SDmitry Kozlyuk mlx5_mp_id_init(&mp_id, eth_dev->data->port_id); 10372eb4d010SOphir Munk /* Receive command fd from primary process */ 10382eb4d010SOphir Munk err = mlx5_mp_req_verbs_cmd_fd(&mp_id); 10392eb4d010SOphir Munk if (err < 0) 10402eb4d010SOphir Munk goto err_secondary; 10412eb4d010SOphir Munk /* Remap UAR for Tx queues. */ 10422eb4d010SOphir Munk err = mlx5_tx_uar_init_secondary(eth_dev, err); 10432eb4d010SOphir Munk if (err) 10442eb4d010SOphir Munk goto err_secondary; 10452eb4d010SOphir Munk /* 10462eb4d010SOphir Munk * Ethdev pointer is still required as input since 10472eb4d010SOphir Munk * the primary device is not accessible from the 10482eb4d010SOphir Munk * secondary process. 10492eb4d010SOphir Munk */ 10502eb4d010SOphir Munk eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); 10512eb4d010SOphir Munk eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); 10522eb4d010SOphir Munk return eth_dev; 10532eb4d010SOphir Munk err_secondary: 10542eb4d010SOphir Munk mlx5_dev_close(eth_dev); 10552eb4d010SOphir Munk return NULL; 10562eb4d010SOphir Munk } 10572eb4d010SOphir Munk /* 10582eb4d010SOphir Munk * Some parameters ("tx_db_nc" in particularly) are needed in 10592eb4d010SOphir Munk * advance to create dv/verbs device context. We proceed the 10602eb4d010SOphir Munk * devargs here to get ones, and later proceed devargs again 10612eb4d010SOphir Munk * to override some hardware settings. 10622eb4d010SOphir Munk */ 1063d462a83cSMichael Baum err = mlx5_args(config, dpdk_dev->devargs); 10642eb4d010SOphir Munk if (err) { 10652eb4d010SOphir Munk err = rte_errno; 10662eb4d010SOphir Munk DRV_LOG(ERR, "failed to process device arguments: %s", 10672eb4d010SOphir Munk strerror(rte_errno)); 10682eb4d010SOphir Munk goto error; 10692eb4d010SOphir Munk } 10704ec6360dSGregory Etelson if (config->dv_miss_info) { 10714ec6360dSGregory Etelson if (switch_info->master || switch_info->representor) 10724ec6360dSGregory Etelson config->dv_xmeta_en = MLX5_XMETA_MODE_META16; 10734ec6360dSGregory Etelson } 1074d462a83cSMichael Baum mlx5_malloc_mem_select(config->sys_mem_en); 1075d462a83cSMichael Baum sh = mlx5_alloc_shared_dev_ctx(spawn, config); 10762eb4d010SOphir Munk if (!sh) 10772eb4d010SOphir Munk return NULL; 1078d462a83cSMichael Baum config->devx = sh->devx; 10792eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR 1080d462a83cSMichael Baum config->dest_tir = 1; 10812eb4d010SOphir Munk #endif 10822eb4d010SOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 10832eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; 10842eb4d010SOphir Munk #endif 10852eb4d010SOphir Munk /* 10862eb4d010SOphir Munk * Multi-packet send is supported by ConnectX-4 Lx PF as well 10872eb4d010SOphir Munk * as all ConnectX-5 devices. 10882eb4d010SOphir Munk */ 10892eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 10902eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 10912eb4d010SOphir Munk #endif 10922eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 10932eb4d010SOphir Munk dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; 10942eb4d010SOphir Munk #endif 10952eb4d010SOphir Munk mlx5_glue->dv_query_device(sh->ctx, &dv_attr); 10962eb4d010SOphir Munk if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 10972eb4d010SOphir Munk if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 10982eb4d010SOphir Munk DRV_LOG(DEBUG, "enhanced MPW is supported"); 10992eb4d010SOphir Munk mps = MLX5_MPW_ENHANCED; 11002eb4d010SOphir Munk } else { 11012eb4d010SOphir Munk DRV_LOG(DEBUG, "MPW is supported"); 11022eb4d010SOphir Munk mps = MLX5_MPW; 11032eb4d010SOphir Munk } 11042eb4d010SOphir Munk } else { 11052eb4d010SOphir Munk DRV_LOG(DEBUG, "MPW isn't supported"); 11062eb4d010SOphir Munk mps = MLX5_MPW_DISABLED; 11072eb4d010SOphir Munk } 11082eb4d010SOphir Munk #ifdef HAVE_IBV_MLX5_MOD_SWP 11092eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) 11102eb4d010SOphir Munk swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; 11112eb4d010SOphir Munk DRV_LOG(DEBUG, "SWP support: %u", swp); 11122eb4d010SOphir Munk #endif 1113accf3cfcSTal Shnaiderman config->swp = swp & (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP | 1114accf3cfcSTal Shnaiderman MLX5_SW_PARSING_TSO_CAP); 11152eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 11162eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { 11172eb4d010SOphir Munk struct mlx5dv_striding_rq_caps mprq_caps = 11182eb4d010SOphir Munk dv_attr.striding_rq_caps; 11192eb4d010SOphir Munk 11202eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", 11212eb4d010SOphir Munk mprq_caps.min_single_stride_log_num_of_bytes); 11222eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", 11232eb4d010SOphir Munk mprq_caps.max_single_stride_log_num_of_bytes); 11242eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", 11252eb4d010SOphir Munk mprq_caps.min_single_wqe_log_num_of_strides); 11262eb4d010SOphir Munk DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", 11272eb4d010SOphir Munk mprq_caps.max_single_wqe_log_num_of_strides); 11282eb4d010SOphir Munk DRV_LOG(DEBUG, "\tsupported_qpts: %d", 11292eb4d010SOphir Munk mprq_caps.supported_qpts); 11302eb4d010SOphir Munk DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); 11312eb4d010SOphir Munk mprq = 1; 11322eb4d010SOphir Munk mprq_min_stride_size_n = 11332eb4d010SOphir Munk mprq_caps.min_single_stride_log_num_of_bytes; 11342eb4d010SOphir Munk mprq_max_stride_size_n = 11352eb4d010SOphir Munk mprq_caps.max_single_stride_log_num_of_bytes; 11362eb4d010SOphir Munk mprq_min_stride_num_n = 11372eb4d010SOphir Munk mprq_caps.min_single_wqe_log_num_of_strides; 11382eb4d010SOphir Munk mprq_max_stride_num_n = 11392eb4d010SOphir Munk mprq_caps.max_single_wqe_log_num_of_strides; 11402eb4d010SOphir Munk } 11412eb4d010SOphir Munk #endif 11423d3f4e6dSAlexander Kozyrev /* Rx CQE compression is enabled by default. */ 11433d3f4e6dSAlexander Kozyrev config->cqe_comp = 1; 11442eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 11452eb4d010SOphir Munk if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 1146*c1a320bfSTal Shnaiderman config->tunnel_en = dv_attr.tunnel_offloads_caps & 1147*c1a320bfSTal Shnaiderman (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN | 1148*c1a320bfSTal Shnaiderman MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE | 1149*c1a320bfSTal Shnaiderman MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE); 11502eb4d010SOphir Munk } 1151*c1a320bfSTal Shnaiderman if (config->tunnel_en) { 1152*c1a320bfSTal Shnaiderman DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s", 1153*c1a320bfSTal Shnaiderman config->tunnel_en & 1154*c1a320bfSTal Shnaiderman MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN ? "[VXLAN]" : "", 1155*c1a320bfSTal Shnaiderman config->tunnel_en & 1156*c1a320bfSTal Shnaiderman MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE ? "[GRE]" : "", 1157*c1a320bfSTal Shnaiderman config->tunnel_en & 1158*c1a320bfSTal Shnaiderman MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE ? "[GENEVE]" : "" 1159*c1a320bfSTal Shnaiderman ); 1160*c1a320bfSTal Shnaiderman } else { 1161*c1a320bfSTal Shnaiderman DRV_LOG(DEBUG, "tunnel offloading is not supported"); 1162*c1a320bfSTal Shnaiderman } 11632eb4d010SOphir Munk #else 11642eb4d010SOphir Munk DRV_LOG(WARNING, 11652eb4d010SOphir Munk "tunnel offloading disabled due to old OFED/rdma-core version"); 11662eb4d010SOphir Munk #endif 11672eb4d010SOphir Munk #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 11682eb4d010SOphir Munk mpls_en = ((dv_attr.tunnel_offloads_caps & 11692eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && 11702eb4d010SOphir Munk (dv_attr.tunnel_offloads_caps & 11712eb4d010SOphir Munk MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); 11722eb4d010SOphir Munk DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", 11732eb4d010SOphir Munk mpls_en ? "" : "not "); 11742eb4d010SOphir Munk #else 11752eb4d010SOphir Munk DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" 11762eb4d010SOphir Munk " old OFED/rdma-core version or firmware configuration"); 11772eb4d010SOphir Munk #endif 1178d462a83cSMichael Baum config->mpls_en = mpls_en; 11792eb4d010SOphir Munk /* Check port status. */ 1180834a9019SOphir Munk err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr); 11812eb4d010SOphir Munk if (err) { 11822eb4d010SOphir Munk DRV_LOG(ERR, "port query failed: %s", strerror(err)); 11832eb4d010SOphir Munk goto error; 11842eb4d010SOphir Munk } 11852eb4d010SOphir Munk if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 11862eb4d010SOphir Munk DRV_LOG(ERR, "port is not configured in Ethernet mode"); 11872eb4d010SOphir Munk err = EINVAL; 11882eb4d010SOphir Munk goto error; 11892eb4d010SOphir Munk } 11902eb4d010SOphir Munk if (port_attr.state != IBV_PORT_ACTIVE) 11912eb4d010SOphir Munk DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", 11922eb4d010SOphir Munk mlx5_glue->port_state_str(port_attr.state), 11932eb4d010SOphir Munk port_attr.state); 11942eb4d010SOphir Munk /* Allocate private eth device data. */ 11952175c4dcSSuanming Mou priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 11962eb4d010SOphir Munk sizeof(*priv), 11972175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 11982eb4d010SOphir Munk if (priv == NULL) { 11992eb4d010SOphir Munk DRV_LOG(ERR, "priv allocation failure"); 12002eb4d010SOphir Munk err = ENOMEM; 12012eb4d010SOphir Munk goto error; 12022eb4d010SOphir Munk } 12032eb4d010SOphir Munk priv->sh = sh; 120491389890SOphir Munk priv->dev_port = spawn->phys_port; 12052eb4d010SOphir Munk priv->pci_dev = spawn->pci_dev; 12062eb4d010SOphir Munk priv->mtu = RTE_ETHER_MTU; 12072eb4d010SOphir Munk /* Some internal functions rely on Netlink sockets, open them now. */ 12082eb4d010SOphir Munk priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); 12092eb4d010SOphir Munk priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); 12102eb4d010SOphir Munk priv->representor = !!switch_info->representor; 12112eb4d010SOphir Munk priv->master = !!switch_info->master; 12122eb4d010SOphir Munk priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 12132eb4d010SOphir Munk priv->vport_meta_tag = 0; 12142eb4d010SOphir Munk priv->vport_meta_mask = 0; 12152eb4d010SOphir Munk priv->pf_bond = spawn->pf_bond; 1216ce4062cbSGregory Etelson 1217ce4062cbSGregory Etelson DRV_LOG(DEBUG, 1218ce4062cbSGregory Etelson "dev_port=%u bus=%s pci=%s master=%d representor=%d pf_bond=%d\n", 1219ce4062cbSGregory Etelson priv->dev_port, dpdk_dev->bus->name, 1220ce4062cbSGregory Etelson priv->pci_dev ? priv->pci_dev->name : "NONE", 1221ce4062cbSGregory Etelson priv->master, priv->representor, priv->pf_bond); 1222ce4062cbSGregory Etelson 12232eb4d010SOphir Munk /* 1224d0cf77e8SViacheslav Ovsiienko * If we have E-Switch we should determine the vport attributes. 1225d0cf77e8SViacheslav Ovsiienko * E-Switch may use either source vport field or reg_c[0] metadata 1226d0cf77e8SViacheslav Ovsiienko * register to match on vport index. The engaged part of metadata 1227d0cf77e8SViacheslav Ovsiienko * register is defined by mask. 12282eb4d010SOphir Munk */ 12292eb4d010SOphir Munk if (switch_info->representor || switch_info->master) { 1230d0cf77e8SViacheslav Ovsiienko err = mlx5_glue->devx_port_query(sh->ctx, 1231d0cf77e8SViacheslav Ovsiienko spawn->phys_port, 1232d0cf77e8SViacheslav Ovsiienko &vport_info); 12332eb4d010SOphir Munk if (err) { 12342eb4d010SOphir Munk DRV_LOG(WARNING, 12352eb4d010SOphir Munk "can't query devx port %d on device %s", 1236834a9019SOphir Munk spawn->phys_port, 1237834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 1238d0cf77e8SViacheslav Ovsiienko vport_info.query_flags = 0; 12392eb4d010SOphir Munk } 12402eb4d010SOphir Munk } 1241d0cf77e8SViacheslav Ovsiienko if (vport_info.query_flags & MLX5_PORT_QUERY_REG_C0) { 1242d0cf77e8SViacheslav Ovsiienko priv->vport_meta_tag = vport_info.vport_meta_tag; 1243d0cf77e8SViacheslav Ovsiienko priv->vport_meta_mask = vport_info.vport_meta_mask; 12442eb4d010SOphir Munk if (!priv->vport_meta_mask) { 12452eb4d010SOphir Munk DRV_LOG(ERR, "vport zero mask for port %d" 12462eb4d010SOphir Munk " on bonding device %s", 1247834a9019SOphir Munk spawn->phys_port, 1248834a9019SOphir Munk mlx5_os_get_dev_device_name 1249834a9019SOphir Munk (spawn->phys_dev)); 12502eb4d010SOphir Munk err = ENOTSUP; 12512eb4d010SOphir Munk goto error; 12522eb4d010SOphir Munk } 12532eb4d010SOphir Munk if (priv->vport_meta_tag & ~priv->vport_meta_mask) { 12542eb4d010SOphir Munk DRV_LOG(ERR, "invalid vport tag for port %d" 12552eb4d010SOphir Munk " on bonding device %s", 1256834a9019SOphir Munk spawn->phys_port, 1257834a9019SOphir Munk mlx5_os_get_dev_device_name 1258834a9019SOphir Munk (spawn->phys_dev)); 12592eb4d010SOphir Munk err = ENOTSUP; 12602eb4d010SOphir Munk goto error; 12612eb4d010SOphir Munk } 12622eb4d010SOphir Munk } 1263d0cf77e8SViacheslav Ovsiienko if (vport_info.query_flags & MLX5_PORT_QUERY_VPORT) { 1264d0cf77e8SViacheslav Ovsiienko priv->vport_id = vport_info.vport_id; 1265ecaee305SViacheslav Ovsiienko } else if (spawn->pf_bond >= 0 && 1266ecaee305SViacheslav Ovsiienko (switch_info->representor || switch_info->master)) { 12672eb4d010SOphir Munk DRV_LOG(ERR, "can't deduce vport index for port %d" 12682eb4d010SOphir Munk " on bonding device %s", 1269834a9019SOphir Munk spawn->phys_port, 1270834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev)); 12712eb4d010SOphir Munk err = ENOTSUP; 12722eb4d010SOphir Munk goto error; 12732eb4d010SOphir Munk } else { 12742eb4d010SOphir Munk /* 1275d0cf77e8SViacheslav Ovsiienko * Suppose vport index in compatible way. Kernel/rdma_core 1276d0cf77e8SViacheslav Ovsiienko * support single E-Switch per PF configurations only and 1277d0cf77e8SViacheslav Ovsiienko * vport_id field contains the vport index for associated VF, 1278d0cf77e8SViacheslav Ovsiienko * which is deduced from representor port name. 12792eb4d010SOphir Munk * For example, let's have the IB device port 10, it has 12802eb4d010SOphir Munk * attached network device eth0, which has port name attribute 12812eb4d010SOphir Munk * pf0vf2, we can deduce the VF number as 2, and set vport index 12822eb4d010SOphir Munk * as 3 (2+1). This assigning schema should be changed if the 12832eb4d010SOphir Munk * multiple E-Switch instances per PF configurations or/and PCI 12842eb4d010SOphir Munk * subfunctions are added. 12852eb4d010SOphir Munk */ 12862eb4d010SOphir Munk priv->vport_id = switch_info->representor ? 12872eb4d010SOphir Munk switch_info->port_name + 1 : -1; 1288d0cf77e8SViacheslav Ovsiienko } 128991766faeSXueming Li priv->representor_id = mlx5_representor_id_encode(switch_info, 129091766faeSXueming Li eth_da->type); 12912eb4d010SOphir Munk /* 12922eb4d010SOphir Munk * Look for sibling devices in order to reuse their switch domain 12932eb4d010SOphir Munk * if any, otherwise allocate one. 12942eb4d010SOphir Munk */ 1295ce4062cbSGregory Etelson MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) { 12962eb4d010SOphir Munk const struct mlx5_priv *opriv = 12972eb4d010SOphir Munk rte_eth_devices[port_id].data->dev_private; 12982eb4d010SOphir Munk 12992eb4d010SOphir Munk if (!opriv || 13002eb4d010SOphir Munk opriv->sh != priv->sh || 13012eb4d010SOphir Munk opriv->domain_id == 13022eb4d010SOphir Munk RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 13032eb4d010SOphir Munk continue; 13042eb4d010SOphir Munk priv->domain_id = opriv->domain_id; 1305ce4062cbSGregory Etelson DRV_LOG(DEBUG, "dev_port-%u inherit domain_id=%u\n", 1306ce4062cbSGregory Etelson priv->dev_port, priv->domain_id); 13072eb4d010SOphir Munk break; 13082eb4d010SOphir Munk } 13092eb4d010SOphir Munk if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 13102eb4d010SOphir Munk err = rte_eth_switch_domain_alloc(&priv->domain_id); 13112eb4d010SOphir Munk if (err) { 13122eb4d010SOphir Munk err = rte_errno; 13132eb4d010SOphir Munk DRV_LOG(ERR, "unable to allocate switch domain: %s", 13142eb4d010SOphir Munk strerror(rte_errno)); 13152eb4d010SOphir Munk goto error; 13162eb4d010SOphir Munk } 13172eb4d010SOphir Munk own_domain_id = 1; 1318ce4062cbSGregory Etelson DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n", 1319ce4062cbSGregory Etelson priv->dev_port, priv->domain_id); 13202eb4d010SOphir Munk } 13212eb4d010SOphir Munk /* Override some values set by hardware configuration. */ 1322d462a83cSMichael Baum mlx5_args(config, dpdk_dev->devargs); 1323e9d420dfSGregory Etelson err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev); 13242eb4d010SOphir Munk if (err) 13252eb4d010SOphir Munk goto error; 1326d462a83cSMichael Baum config->hw_csum = !!(sh->device_attr.device_cap_flags_ex & 13272eb4d010SOphir Munk IBV_DEVICE_RAW_IP_CSUM); 13282eb4d010SOphir Munk DRV_LOG(DEBUG, "checksum offloading is %ssupported", 1329d462a83cSMichael Baum (config->hw_csum ? "" : "not ")); 13302eb4d010SOphir Munk #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ 13312eb4d010SOphir Munk !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 13322eb4d010SOphir Munk DRV_LOG(DEBUG, "counters are not supported"); 13332eb4d010SOphir Munk #endif 13342eb4d010SOphir Munk #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR) 1335d462a83cSMichael Baum if (config->dv_flow_en) { 13362eb4d010SOphir Munk DRV_LOG(WARNING, "DV flow is not supported"); 1337d462a83cSMichael Baum config->dv_flow_en = 0; 13382eb4d010SOphir Munk } 13392eb4d010SOphir Munk #endif 1340cdfdb82dSXueming Li if (spawn->max_port > UINT8_MAX) { 1341cdfdb82dSXueming Li /* Verbs can't support ports larger than 255 by design. */ 1342cdfdb82dSXueming Li DRV_LOG(ERR, "can't support IB ports > UINT8_MAX"); 1343cdfdb82dSXueming Li err = EINVAL; 1344cdfdb82dSXueming Li goto error; 1345cdfdb82dSXueming Li } 1346d462a83cSMichael Baum config->ind_table_max_size = 13472eb4d010SOphir Munk sh->device_attr.max_rwq_indirection_table_size; 13482eb4d010SOphir Munk /* 13492eb4d010SOphir Munk * Remove this check once DPDK supports larger/variable 13502eb4d010SOphir Munk * indirection tables. 13512eb4d010SOphir Munk */ 1352d462a83cSMichael Baum if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 1353d462a83cSMichael Baum config->ind_table_max_size = ETH_RSS_RETA_SIZE_512; 13542eb4d010SOphir Munk DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 1355d462a83cSMichael Baum config->ind_table_max_size); 1356d462a83cSMichael Baum config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & 13572eb4d010SOphir Munk IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 13582eb4d010SOphir Munk DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 1359d462a83cSMichael Baum (config->hw_vlan_strip ? "" : "not ")); 1360d462a83cSMichael Baum config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & 13612eb4d010SOphir Munk IBV_RAW_PACKET_CAP_SCATTER_FCS); 13622eb4d010SOphir Munk #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 13632eb4d010SOphir Munk hw_padding = !!sh->device_attr.rx_pad_end_addr_align; 13642eb4d010SOphir Munk #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 13652eb4d010SOphir Munk hw_padding = !!(sh->device_attr.device_cap_flags_ex & 13662eb4d010SOphir Munk IBV_DEVICE_PCI_WRITE_END_PADDING); 13672eb4d010SOphir Munk #endif 1368d462a83cSMichael Baum if (config->hw_padding && !hw_padding) { 13692eb4d010SOphir Munk DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 1370d462a83cSMichael Baum config->hw_padding = 0; 1371d462a83cSMichael Baum } else if (config->hw_padding) { 13722eb4d010SOphir Munk DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); 13732eb4d010SOphir Munk } 1374d462a83cSMichael Baum config->tso = (sh->device_attr.max_tso > 0 && 13752eb4d010SOphir Munk (sh->device_attr.tso_supported_qpts & 13762eb4d010SOphir Munk (1 << IBV_QPT_RAW_PACKET))); 1377d462a83cSMichael Baum if (config->tso) 1378d462a83cSMichael Baum config->tso_max_payload_sz = sh->device_attr.max_tso; 13792eb4d010SOphir Munk /* 13802eb4d010SOphir Munk * MPW is disabled by default, while the Enhanced MPW is enabled 13812eb4d010SOphir Munk * by default. 13822eb4d010SOphir Munk */ 1383d462a83cSMichael Baum if (config->mps == MLX5_ARG_UNSET) 1384d462a83cSMichael Baum config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : 13852eb4d010SOphir Munk MLX5_MPW_DISABLED; 13862eb4d010SOphir Munk else 1387d462a83cSMichael Baum config->mps = config->mps ? mps : MLX5_MPW_DISABLED; 13882eb4d010SOphir Munk DRV_LOG(INFO, "%sMPS is %s", 1389d462a83cSMichael Baum config->mps == MLX5_MPW_ENHANCED ? "enhanced " : 1390d462a83cSMichael Baum config->mps == MLX5_MPW ? "legacy " : "", 1391d462a83cSMichael Baum config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 1392d462a83cSMichael Baum if (config->devx) { 1393d462a83cSMichael Baum err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr); 13942eb4d010SOphir Munk if (err) { 13952eb4d010SOphir Munk err = -err; 13962eb4d010SOphir Munk goto error; 13972eb4d010SOphir Munk } 13983aa27915SSuanming Mou /* Check relax ordering support. */ 1399e82ddd28STal Shnaiderman if (!haswell_broadwell_cpu) { 1400e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_write = 1401e82ddd28STal Shnaiderman config->hca_attr.relaxed_ordering_write; 1402e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_read = 1403e82ddd28STal Shnaiderman config->hca_attr.relaxed_ordering_read; 1404e82ddd28STal Shnaiderman } else { 1405e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_read = 0; 1406e82ddd28STal Shnaiderman sh->cmng.relaxed_ordering_write = 0; 1407e82ddd28STal Shnaiderman } 1408d61381adSViacheslav Ovsiienko sh->rq_ts_format = config->hca_attr.rq_ts_format; 1409d61381adSViacheslav Ovsiienko sh->sq_ts_format = config->hca_attr.sq_ts_format; 141096f85ec4SDong Zhou sh->steering_format_version = 141196f85ec4SDong Zhou config->hca_attr.steering_format_version; 1412d61381adSViacheslav Ovsiienko sh->qp_ts_format = config->hca_attr.qp_ts_format; 14132eb4d010SOphir Munk /* Check for LRO support. */ 1414d462a83cSMichael Baum if (config->dest_tir && config->hca_attr.lro_cap && 1415d462a83cSMichael Baum config->dv_flow_en) { 14162eb4d010SOphir Munk /* TBD check tunnel lro caps. */ 1417d462a83cSMichael Baum config->lro.supported = config->hca_attr.lro_cap; 14182eb4d010SOphir Munk DRV_LOG(DEBUG, "Device supports LRO"); 14192eb4d010SOphir Munk /* 14202eb4d010SOphir Munk * If LRO timeout is not configured by application, 14212eb4d010SOphir Munk * use the minimal supported value. 14222eb4d010SOphir Munk */ 1423d462a83cSMichael Baum if (!config->lro.timeout) 1424d462a83cSMichael Baum config->lro.timeout = 1425d462a83cSMichael Baum config->hca_attr.lro_timer_supported_periods[0]; 14262eb4d010SOphir Munk DRV_LOG(DEBUG, "LRO session timeout set to %d usec", 1427d462a83cSMichael Baum config->lro.timeout); 1428613d64e4SDekel Peled DRV_LOG(DEBUG, "LRO minimal size of TCP segment " 1429613d64e4SDekel Peled "required for coalescing is %d bytes", 1430613d64e4SDekel Peled config->hca_attr.lro_min_mss_size); 14312eb4d010SOphir Munk } 1432c99b4f8bSLi Zhang #if defined(HAVE_MLX5DV_DR) && \ 1433c99b4f8bSLi Zhang (defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \ 1434c99b4f8bSLi Zhang defined(HAVE_MLX5_DR_CREATE_ACTION_ASO)) 1435d462a83cSMichael Baum if (config->hca_attr.qos.sup && 1436b6505738SDekel Peled config->hca_attr.qos.flow_meter_old && 1437d462a83cSMichael Baum config->dv_flow_en) { 14382eb4d010SOphir Munk uint8_t reg_c_mask = 1439d462a83cSMichael Baum config->hca_attr.qos.flow_meter_reg_c_ids; 14402eb4d010SOphir Munk /* 14412eb4d010SOphir Munk * Meter needs two REG_C's for color match and pre-sfx 14422eb4d010SOphir Munk * flow match. Here get the REG_C for color match. 14432eb4d010SOphir Munk * REG_C_0 and REG_C_1 is reserved for metadata feature. 14442eb4d010SOphir Munk */ 14452eb4d010SOphir Munk reg_c_mask &= 0xfc; 14462eb4d010SOphir Munk if (__builtin_popcount(reg_c_mask) < 1) { 14472eb4d010SOphir Munk priv->mtr_en = 0; 14482eb4d010SOphir Munk DRV_LOG(WARNING, "No available register for" 14492eb4d010SOphir Munk " meter."); 14502eb4d010SOphir Munk } else { 145131ef2982SDekel Peled /* 145231ef2982SDekel Peled * The meter color register is used by the 145331ef2982SDekel Peled * flow-hit feature as well. 145431ef2982SDekel Peled * The flow-hit feature must use REG_C_3 145531ef2982SDekel Peled * Prefer REG_C_3 if it is available. 145631ef2982SDekel Peled */ 145731ef2982SDekel Peled if (reg_c_mask & (1 << (REG_C_3 - REG_C_0))) 145831ef2982SDekel Peled priv->mtr_color_reg = REG_C_3; 145931ef2982SDekel Peled else 146031ef2982SDekel Peled priv->mtr_color_reg = ffs(reg_c_mask) 146131ef2982SDekel Peled - 1 + REG_C_0; 14622eb4d010SOphir Munk priv->mtr_en = 1; 14632eb4d010SOphir Munk priv->mtr_reg_share = 1464b6505738SDekel Peled config->hca_attr.qos.flow_meter; 14652eb4d010SOphir Munk DRV_LOG(DEBUG, "The REG_C meter uses is %d", 14662eb4d010SOphir Munk priv->mtr_color_reg); 14672eb4d010SOphir Munk } 14682eb4d010SOphir Munk } 146929efa63aSLi Zhang if (config->hca_attr.qos.sup && 147029efa63aSLi Zhang config->hca_attr.qos.flow_meter_aso_sup) { 147129efa63aSLi Zhang uint32_t log_obj_size = 147229efa63aSLi Zhang rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1); 147329efa63aSLi Zhang if (log_obj_size >= 147429efa63aSLi Zhang config->hca_attr.qos.log_meter_aso_granularity && 147529efa63aSLi Zhang log_obj_size <= 147644432018SLi Zhang config->hca_attr.qos.log_meter_aso_max_alloc) 147729efa63aSLi Zhang sh->meter_aso_en = 1; 147844432018SLi Zhang } 147944432018SLi Zhang if (priv->mtr_en) { 1480afb4aa4fSLi Zhang err = mlx5_aso_flow_mtrs_mng_init(priv->sh); 148129efa63aSLi Zhang if (err) { 148229efa63aSLi Zhang err = -err; 148329efa63aSLi Zhang goto error; 148429efa63aSLi Zhang } 148529efa63aSLi Zhang } 1486630a587bSRongwei Liu if (config->hca_attr.flow.tunnel_header_0_1) 1487630a587bSRongwei Liu sh->tunnel_header_0_1 = 1; 14882eb4d010SOphir Munk #endif 1489a2999c7bSDekel Peled #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO 149031ef2982SDekel Peled if (config->hca_attr.flow_hit_aso && 149131ef2982SDekel Peled priv->mtr_color_reg == REG_C_3) { 149231ef2982SDekel Peled sh->flow_hit_aso_en = 1; 149331ef2982SDekel Peled err = mlx5_flow_aso_age_mng_init(sh); 149431ef2982SDekel Peled if (err) { 149531ef2982SDekel Peled err = -err; 149631ef2982SDekel Peled goto error; 149731ef2982SDekel Peled } 149831ef2982SDekel Peled DRV_LOG(DEBUG, "Flow Hit ASO is supported."); 149931ef2982SDekel Peled } 1500a2999c7bSDekel Peled #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ 1501ee9e5fadSBing Zhao #if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO) && \ 1502ee9e5fadSBing Zhao defined(HAVE_MLX5_DR_ACTION_ASO_CT) 1503ee9e5fadSBing Zhao if (config->hca_attr.ct_offload && 1504ee9e5fadSBing Zhao priv->mtr_color_reg == REG_C_3) { 1505ee9e5fadSBing Zhao err = mlx5_flow_aso_ct_mng_init(sh); 1506ee9e5fadSBing Zhao if (err) { 1507ee9e5fadSBing Zhao err = -err; 1508ee9e5fadSBing Zhao goto error; 1509ee9e5fadSBing Zhao } 1510ee9e5fadSBing Zhao DRV_LOG(DEBUG, "CT ASO is supported."); 1511ee9e5fadSBing Zhao sh->ct_aso_en = 1; 1512ee9e5fadSBing Zhao } 1513ee9e5fadSBing Zhao #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */ 151496b1f027SJiawei Wang #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE) 151596b1f027SJiawei Wang if (config->hca_attr.log_max_ft_sampler_num > 0 && 151696b1f027SJiawei Wang config->dv_flow_en) { 151796b1f027SJiawei Wang priv->sampler_en = 1; 15181b9e9826SThomas Monjalon DRV_LOG(DEBUG, "Sampler enabled!"); 151996b1f027SJiawei Wang } else { 152096b1f027SJiawei Wang priv->sampler_en = 0; 152196b1f027SJiawei Wang if (!config->hca_attr.log_max_ft_sampler_num) 15221b9e9826SThomas Monjalon DRV_LOG(WARNING, 15231b9e9826SThomas Monjalon "No available register for sampler."); 152496b1f027SJiawei Wang else 15251b9e9826SThomas Monjalon DRV_LOG(DEBUG, "DV flow is not supported!"); 152696b1f027SJiawei Wang } 152796b1f027SJiawei Wang #endif 15282eb4d010SOphir Munk } 15293d3f4e6dSAlexander Kozyrev if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 && 15303d3f4e6dSAlexander Kozyrev !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) { 15313d3f4e6dSAlexander Kozyrev DRV_LOG(WARNING, "Rx CQE 128B compression is not supported"); 15323d3f4e6dSAlexander Kozyrev config->cqe_comp = 0; 15333d3f4e6dSAlexander Kozyrev } 15343d3f4e6dSAlexander Kozyrev if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX && 15353d3f4e6dSAlexander Kozyrev (!config->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) { 15363d3f4e6dSAlexander Kozyrev DRV_LOG(WARNING, "Flow Tag CQE compression" 15373d3f4e6dSAlexander Kozyrev " format isn't supported."); 15383d3f4e6dSAlexander Kozyrev config->cqe_comp = 0; 15393d3f4e6dSAlexander Kozyrev } 15403d3f4e6dSAlexander Kozyrev if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX && 15413d3f4e6dSAlexander Kozyrev (!config->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) { 15423d3f4e6dSAlexander Kozyrev DRV_LOG(WARNING, "L3/L4 Header CQE compression" 15433d3f4e6dSAlexander Kozyrev " format isn't supported."); 15443d3f4e6dSAlexander Kozyrev config->cqe_comp = 0; 15453d3f4e6dSAlexander Kozyrev } 15463d3f4e6dSAlexander Kozyrev DRV_LOG(DEBUG, "Rx CQE compression is %ssupported", 15473d3f4e6dSAlexander Kozyrev config->cqe_comp ? "" : "not "); 1548d462a83cSMichael Baum if (config->tx_pp) { 15498f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz", 1550d462a83cSMichael Baum config->hca_attr.dev_freq_khz); 15518f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Packet pacing is %ssupported", 1552d462a83cSMichael Baum config->hca_attr.qos.packet_pacing ? "" : "not "); 15538f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Cross channel ops are %ssupported", 1554d462a83cSMichael Baum config->hca_attr.cross_channel ? "" : "not "); 15558f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "WQE index ignore is %ssupported", 1556d462a83cSMichael Baum config->hca_attr.wqe_index_ignore ? "" : "not "); 15578f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported", 1558d462a83cSMichael Baum config->hca_attr.non_wire_sq ? "" : "not "); 15598f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)", 1560d462a83cSMichael Baum config->hca_attr.log_max_static_sq_wq ? "" : "not ", 1561d462a83cSMichael Baum config->hca_attr.log_max_static_sq_wq); 15628f848f32SViacheslav Ovsiienko DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported", 1563d462a83cSMichael Baum config->hca_attr.qos.wqe_rate_pp ? "" : "not "); 1564d462a83cSMichael Baum if (!config->devx) { 15658f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "DevX is required for packet pacing"); 15668f848f32SViacheslav Ovsiienko err = ENODEV; 15678f848f32SViacheslav Ovsiienko goto error; 15688f848f32SViacheslav Ovsiienko } 1569d462a83cSMichael Baum if (!config->hca_attr.qos.packet_pacing) { 15708f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Packet pacing is not supported"); 15718f848f32SViacheslav Ovsiienko err = ENODEV; 15728f848f32SViacheslav Ovsiienko goto error; 15738f848f32SViacheslav Ovsiienko } 1574d462a83cSMichael Baum if (!config->hca_attr.cross_channel) { 15758f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Cross channel operations are" 15768f848f32SViacheslav Ovsiienko " required for packet pacing"); 15778f848f32SViacheslav Ovsiienko err = ENODEV; 15788f848f32SViacheslav Ovsiienko goto error; 15798f848f32SViacheslav Ovsiienko } 1580d462a83cSMichael Baum if (!config->hca_attr.wqe_index_ignore) { 15818f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "WQE index ignore feature is" 15828f848f32SViacheslav Ovsiienko " required for packet pacing"); 15838f848f32SViacheslav Ovsiienko err = ENODEV; 15848f848f32SViacheslav Ovsiienko goto error; 15858f848f32SViacheslav Ovsiienko } 1586d462a83cSMichael Baum if (!config->hca_attr.non_wire_sq) { 15878f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Non-wire SQ feature is" 15888f848f32SViacheslav Ovsiienko " required for packet pacing"); 15898f848f32SViacheslav Ovsiienko err = ENODEV; 15908f848f32SViacheslav Ovsiienko goto error; 15918f848f32SViacheslav Ovsiienko } 1592d462a83cSMichael Baum if (!config->hca_attr.log_max_static_sq_wq) { 15938f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Static WQE SQ feature is" 15948f848f32SViacheslav Ovsiienko " required for packet pacing"); 15958f848f32SViacheslav Ovsiienko err = ENODEV; 15968f848f32SViacheslav Ovsiienko goto error; 15978f848f32SViacheslav Ovsiienko } 1598d462a83cSMichael Baum if (!config->hca_attr.qos.wqe_rate_pp) { 15998f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "WQE rate mode is required" 16008f848f32SViacheslav Ovsiienko " for packet pacing"); 16018f848f32SViacheslav Ovsiienko err = ENODEV; 16028f848f32SViacheslav Ovsiienko goto error; 16038f848f32SViacheslav Ovsiienko } 16048f848f32SViacheslav Ovsiienko #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 16058f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "DevX does not provide UAR offset," 16068f848f32SViacheslav Ovsiienko " can't create queues for packet pacing"); 16078f848f32SViacheslav Ovsiienko err = ENODEV; 16088f848f32SViacheslav Ovsiienko goto error; 16098f848f32SViacheslav Ovsiienko #endif 16108f848f32SViacheslav Ovsiienko } 1611d462a83cSMichael Baum if (config->devx) { 1612a2854c4dSViacheslav Ovsiienko uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)]; 1613a2854c4dSViacheslav Ovsiienko 1614972a1bf8SViacheslav Ovsiienko err = config->hca_attr.access_register_user ? 1615972a1bf8SViacheslav Ovsiienko mlx5_devx_cmd_register_read 1616a2854c4dSViacheslav Ovsiienko (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0, 1617972a1bf8SViacheslav Ovsiienko reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP; 1618a2854c4dSViacheslav Ovsiienko if (!err) { 1619a2854c4dSViacheslav Ovsiienko uint32_t ts_mode; 1620a2854c4dSViacheslav Ovsiienko 1621a2854c4dSViacheslav Ovsiienko /* MTUTC register is read successfully. */ 1622a2854c4dSViacheslav Ovsiienko ts_mode = MLX5_GET(register_mtutc, reg, 1623a2854c4dSViacheslav Ovsiienko time_stamp_mode); 1624a2854c4dSViacheslav Ovsiienko if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME) 1625d462a83cSMichael Baum config->rt_timestamp = 1; 1626a2854c4dSViacheslav Ovsiienko } else { 1627a2854c4dSViacheslav Ovsiienko /* Kernel does not support register reading. */ 1628d462a83cSMichael Baum if (config->hca_attr.dev_freq_khz == 1629a2854c4dSViacheslav Ovsiienko (NS_PER_S / MS_PER_S)) 1630d462a83cSMichael Baum config->rt_timestamp = 1; 1631a2854c4dSViacheslav Ovsiienko } 1632a2854c4dSViacheslav Ovsiienko } 163350f95b23SSuanming Mou /* 163450f95b23SSuanming Mou * If HW has bug working with tunnel packet decapsulation and 163550f95b23SSuanming Mou * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip 163650f95b23SSuanming Mou * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore. 163750f95b23SSuanming Mou */ 1638d462a83cSMichael Baum if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en) 1639d462a83cSMichael Baum config->hw_fcs_strip = 0; 164050f95b23SSuanming Mou DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 1641d462a83cSMichael Baum (config->hw_fcs_strip ? "" : "not ")); 1642d462a83cSMichael Baum if (config->mprq.enabled && mprq) { 1643d462a83cSMichael Baum if (config->mprq.stride_num_n && 1644d462a83cSMichael Baum (config->mprq.stride_num_n > mprq_max_stride_num_n || 1645d462a83cSMichael Baum config->mprq.stride_num_n < mprq_min_stride_num_n)) { 1646d462a83cSMichael Baum config->mprq.stride_num_n = 16472eb4d010SOphir Munk RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, 16482eb4d010SOphir Munk mprq_min_stride_num_n), 16492eb4d010SOphir Munk mprq_max_stride_num_n); 16502eb4d010SOphir Munk DRV_LOG(WARNING, 16512eb4d010SOphir Munk "the number of strides" 16522eb4d010SOphir Munk " for Multi-Packet RQ is out of range," 16532eb4d010SOphir Munk " setting default value (%u)", 1654d462a83cSMichael Baum 1 << config->mprq.stride_num_n); 16552eb4d010SOphir Munk } 1656d462a83cSMichael Baum if (config->mprq.stride_size_n && 1657d462a83cSMichael Baum (config->mprq.stride_size_n > mprq_max_stride_size_n || 1658d462a83cSMichael Baum config->mprq.stride_size_n < mprq_min_stride_size_n)) { 1659d462a83cSMichael Baum config->mprq.stride_size_n = 16602eb4d010SOphir Munk RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N, 16612eb4d010SOphir Munk mprq_min_stride_size_n), 16622eb4d010SOphir Munk mprq_max_stride_size_n); 16632eb4d010SOphir Munk DRV_LOG(WARNING, 16642eb4d010SOphir Munk "the size of a stride" 16652eb4d010SOphir Munk " for Multi-Packet RQ is out of range," 16662eb4d010SOphir Munk " setting default value (%u)", 1667d462a83cSMichael Baum 1 << config->mprq.stride_size_n); 16682eb4d010SOphir Munk } 1669d462a83cSMichael Baum config->mprq.min_stride_size_n = mprq_min_stride_size_n; 1670d462a83cSMichael Baum config->mprq.max_stride_size_n = mprq_max_stride_size_n; 1671d462a83cSMichael Baum } else if (config->mprq.enabled && !mprq) { 16722eb4d010SOphir Munk DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 1673d462a83cSMichael Baum config->mprq.enabled = 0; 16742eb4d010SOphir Munk } 1675d462a83cSMichael Baum if (config->max_dump_files_num == 0) 1676d462a83cSMichael Baum config->max_dump_files_num = 128; 16772eb4d010SOphir Munk eth_dev = rte_eth_dev_allocate(name); 16782eb4d010SOphir Munk if (eth_dev == NULL) { 16792eb4d010SOphir Munk DRV_LOG(ERR, "can not allocate rte ethdev"); 16802eb4d010SOphir Munk err = ENOMEM; 16812eb4d010SOphir Munk goto error; 16822eb4d010SOphir Munk } 16832eb4d010SOphir Munk if (priv->representor) { 16842eb4d010SOphir Munk eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 16852eb4d010SOphir Munk eth_dev->data->representor_id = priv->representor_id; 1686ff4e52efSViacheslav Galaktionov MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) { 1687ff4e52efSViacheslav Galaktionov struct mlx5_priv *opriv = 1688ff4e52efSViacheslav Galaktionov rte_eth_devices[port_id].data->dev_private; 1689ff4e52efSViacheslav Galaktionov if (opriv && 1690ff4e52efSViacheslav Galaktionov opriv->master && 1691ff4e52efSViacheslav Galaktionov opriv->domain_id == priv->domain_id && 1692ff4e52efSViacheslav Galaktionov opriv->sh == priv->sh) { 1693ff4e52efSViacheslav Galaktionov eth_dev->data->backer_port_id = port_id; 1694ff4e52efSViacheslav Galaktionov break; 1695ff4e52efSViacheslav Galaktionov } 1696ff4e52efSViacheslav Galaktionov } 1697ff4e52efSViacheslav Galaktionov if (port_id >= RTE_MAX_ETHPORTS) 1698ff4e52efSViacheslav Galaktionov eth_dev->data->backer_port_id = eth_dev->data->port_id; 16992eb4d010SOphir Munk } 170039ae7577SSuanming Mou priv->mp_id.port_id = eth_dev->data->port_id; 170139ae7577SSuanming Mou strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 17022eb4d010SOphir Munk /* 17032eb4d010SOphir Munk * Store associated network device interface index. This index 17042eb4d010SOphir Munk * is permanent throughout the lifetime of device. So, we may store 17052eb4d010SOphir Munk * the ifindex here and use the cached value further. 17062eb4d010SOphir Munk */ 17072eb4d010SOphir Munk MLX5_ASSERT(spawn->ifindex); 17082eb4d010SOphir Munk priv->if_index = spawn->ifindex; 17092eb4d010SOphir Munk eth_dev->data->dev_private = priv; 17102eb4d010SOphir Munk priv->dev_data = eth_dev->data; 17112eb4d010SOphir Munk eth_dev->data->mac_addrs = priv->mac; 17122eb4d010SOphir Munk eth_dev->device = dpdk_dev; 1713f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 17142eb4d010SOphir Munk /* Configure the first MAC address by default. */ 17152eb4d010SOphir Munk if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 17162eb4d010SOphir Munk DRV_LOG(ERR, 17172eb4d010SOphir Munk "port %u cannot get MAC address, is mlx5_en" 17182eb4d010SOphir Munk " loaded? (errno: %s)", 17192eb4d010SOphir Munk eth_dev->data->port_id, strerror(rte_errno)); 17202eb4d010SOphir Munk err = ENODEV; 17212eb4d010SOphir Munk goto error; 17222eb4d010SOphir Munk } 17232eb4d010SOphir Munk DRV_LOG(INFO, 1724c2c4f87bSAman Deep Singh "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT, 1725a7db3afcSAman Deep Singh eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac)); 17262eb4d010SOphir Munk #ifdef RTE_LIBRTE_MLX5_DEBUG 17272eb4d010SOphir Munk { 172828743807STal Shnaiderman char ifname[MLX5_NAMESIZE]; 17292eb4d010SOphir Munk 17302eb4d010SOphir Munk if (mlx5_get_ifname(eth_dev, &ifname) == 0) 17312eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 17322eb4d010SOphir Munk eth_dev->data->port_id, ifname); 17332eb4d010SOphir Munk else 17342eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u ifname is unknown", 17352eb4d010SOphir Munk eth_dev->data->port_id); 17362eb4d010SOphir Munk } 17372eb4d010SOphir Munk #endif 17382eb4d010SOphir Munk /* Get actual MTU if possible. */ 17392eb4d010SOphir Munk err = mlx5_get_mtu(eth_dev, &priv->mtu); 17402eb4d010SOphir Munk if (err) { 17412eb4d010SOphir Munk err = rte_errno; 17422eb4d010SOphir Munk goto error; 17432eb4d010SOphir Munk } 17442eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 17452eb4d010SOphir Munk priv->mtu); 17462eb4d010SOphir Munk /* Initialize burst functions to prevent crashes before link-up. */ 17472eb4d010SOphir Munk eth_dev->rx_pkt_burst = removed_rx_burst; 17482eb4d010SOphir Munk eth_dev->tx_pkt_burst = removed_tx_burst; 1749b012b4ceSOphir Munk eth_dev->dev_ops = &mlx5_dev_ops; 1750cbfc6111SFerruh Yigit eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 1751cbfc6111SFerruh Yigit eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 1752cbfc6111SFerruh Yigit eth_dev->rx_queue_count = mlx5_rx_queue_count; 17532eb4d010SOphir Munk /* Register MAC address. */ 17542eb4d010SOphir Munk claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 1755d462a83cSMichael Baum if (config->vf && config->vf_nl_en) 17562eb4d010SOphir Munk mlx5_nl_mac_addr_sync(priv->nl_socket_route, 17572eb4d010SOphir Munk mlx5_ifindex(eth_dev), 17582eb4d010SOphir Munk eth_dev->data->mac_addrs, 17592eb4d010SOphir Munk MLX5_MAX_MAC_ADDRESSES); 17602eb4d010SOphir Munk priv->ctrl_flows = 0; 1761d163fc2dSXueming Li rte_spinlock_init(&priv->flow_list_lock); 17622eb4d010SOphir Munk TAILQ_INIT(&priv->flow_meters); 1763a295c69aSShun Hao priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); 1764a295c69aSShun Hao if (!priv->mtr_profile_tbl) 1765a295c69aSShun Hao goto error; 17662eb4d010SOphir Munk /* Hint libmlx5 to use PMD allocator for data plane resources */ 176736dabceaSMichael Baum mlx5_glue->dv_set_context_attr(sh->ctx, 176836dabceaSMichael Baum MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 176936dabceaSMichael Baum (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ 17702eb4d010SOphir Munk .alloc = &mlx5_alloc_verbs_buf, 17712eb4d010SOphir Munk .free = &mlx5_free_verbs_buf, 177281c3b977SViacheslav Ovsiienko .data = sh, 177336dabceaSMichael Baum })); 17742eb4d010SOphir Munk /* Bring Ethernet device up. */ 17752eb4d010SOphir Munk DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 17762eb4d010SOphir Munk eth_dev->data->port_id); 17772eb4d010SOphir Munk mlx5_set_link_up(eth_dev); 17782eb4d010SOphir Munk /* 17792eb4d010SOphir Munk * Even though the interrupt handler is not installed yet, 17802eb4d010SOphir Munk * interrupts will still trigger on the async_fd from 17812eb4d010SOphir Munk * Verbs context returned by ibv_open_device(). 17822eb4d010SOphir Munk */ 17832eb4d010SOphir Munk mlx5_link_update(eth_dev, 0); 17842eb4d010SOphir Munk #ifdef HAVE_MLX5DV_DR_ESWITCH 1785d462a83cSMichael Baum if (!(config->hca_attr.eswitch_manager && config->dv_flow_en && 17862eb4d010SOphir Munk (switch_info->representor || switch_info->master))) 1787d462a83cSMichael Baum config->dv_esw_en = 0; 17882eb4d010SOphir Munk #else 1789d462a83cSMichael Baum config->dv_esw_en = 0; 17902eb4d010SOphir Munk #endif 17912eb4d010SOphir Munk /* Detect minimal data bytes to inline. */ 1792d462a83cSMichael Baum mlx5_set_min_inline(spawn, config); 17932eb4d010SOphir Munk /* Store device configuration on private structure. */ 1794d462a83cSMichael Baum priv->config = *config; 1795b4edeaf3SSuanming Mou for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { 1796b4edeaf3SSuanming Mou icfg[i].release_mem_en = !!config->reclaim_mode; 1797b4edeaf3SSuanming Mou if (config->reclaim_mode) 1798b4edeaf3SSuanming Mou icfg[i].per_core_cache = 0; 1799b4edeaf3SSuanming Mou priv->flows[i] = mlx5_ipool_create(&icfg[i]); 1800b4edeaf3SSuanming Mou if (!priv->flows[i]) 1801b4edeaf3SSuanming Mou goto error; 1802b4edeaf3SSuanming Mou } 18032eb4d010SOphir Munk /* Create context for virtual machine VLAN workaround. */ 18042eb4d010SOphir Munk priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); 1805d462a83cSMichael Baum if (config->dv_flow_en) { 18062eb4d010SOphir Munk err = mlx5_alloc_shared_dr(priv); 18072eb4d010SOphir Munk if (err) 18082eb4d010SOphir Munk goto error; 18092eb4d010SOphir Munk } 18107aa9892fSMichael Baum if (config->devx && config->dv_flow_en && config->dest_tir) { 18115eaf882eSMichael Baum priv->obj_ops = devx_obj_ops; 18120c762e81SMichael Baum priv->obj_ops.drop_action_create = 18130c762e81SMichael Baum ibv_obj_ops.drop_action_create; 18140c762e81SMichael Baum priv->obj_ops.drop_action_destroy = 18150c762e81SMichael Baum ibv_obj_ops.drop_action_destroy; 18165d9f3c3fSMichael Baum #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 18175d9f3c3fSMichael Baum priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify; 18185d9f3c3fSMichael Baum #else 18193ec73abeSMatan Azrad if (config->dv_esw_en) 18205d9f3c3fSMichael Baum priv->obj_ops.txq_obj_modify = 18215d9f3c3fSMichael Baum ibv_obj_ops.txq_obj_modify; 18225d9f3c3fSMichael Baum #endif 18233ec73abeSMatan Azrad /* Use specific wrappers for Tx object. */ 18243ec73abeSMatan Azrad priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new; 18253ec73abeSMatan Azrad priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release; 1826e6988afdSMatan Azrad mlx5_queue_counter_id_prepare(eth_dev); 182723233fd6SBing Zhao priv->obj_ops.lb_dummy_queue_create = 182823233fd6SBing Zhao mlx5_rxq_ibv_obj_dummy_lb_create; 182923233fd6SBing Zhao priv->obj_ops.lb_dummy_queue_release = 183023233fd6SBing Zhao mlx5_rxq_ibv_obj_dummy_lb_release; 18315eaf882eSMichael Baum } else { 18325eaf882eSMichael Baum priv->obj_ops = ibv_obj_ops; 18335eaf882eSMichael Baum } 1834f17e4b4fSViacheslav Ovsiienko if (config->tx_pp && 1835f17e4b4fSViacheslav Ovsiienko (priv->config.dv_esw_en || 1836f17e4b4fSViacheslav Ovsiienko priv->obj_ops.txq_obj_new != mlx5_os_txq_obj_new)) { 1837f17e4b4fSViacheslav Ovsiienko /* 1838f17e4b4fSViacheslav Ovsiienko * HAVE_MLX5DV_DEVX_UAR_OFFSET is required to support 1839f17e4b4fSViacheslav Ovsiienko * packet pacing and already checked above. 1840f17e4b4fSViacheslav Ovsiienko * Hence, we should only make sure the SQs will be created 1841f17e4b4fSViacheslav Ovsiienko * with DevX, not with Verbs. 1842f17e4b4fSViacheslav Ovsiienko * Verbs allocates the SQ UAR on its own and it can't be shared 1843f17e4b4fSViacheslav Ovsiienko * with Clock Queue UAR as required for Tx scheduling. 1844f17e4b4fSViacheslav Ovsiienko */ 1845f17e4b4fSViacheslav Ovsiienko DRV_LOG(ERR, "Verbs SQs, UAR can't be shared as required for packet pacing"); 1846f17e4b4fSViacheslav Ovsiienko err = ENODEV; 1847f17e4b4fSViacheslav Ovsiienko goto error; 1848f17e4b4fSViacheslav Ovsiienko } 184965b3cd0dSSuanming Mou priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev); 185065b3cd0dSSuanming Mou if (!priv->drop_queue.hrxq) 185165b3cd0dSSuanming Mou goto error; 18522eb4d010SOphir Munk /* Supported Verbs flow priority number detection. */ 18532eb4d010SOphir Munk err = mlx5_flow_discover_priorities(eth_dev); 18542eb4d010SOphir Munk if (err < 0) { 18552eb4d010SOphir Munk err = -err; 18562eb4d010SOphir Munk goto error; 18572eb4d010SOphir Munk } 18582eb4d010SOphir Munk priv->config.flow_prio = err; 18592eb4d010SOphir Munk if (!priv->config.dv_esw_en && 18602eb4d010SOphir Munk priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 18612eb4d010SOphir Munk DRV_LOG(WARNING, "metadata mode %u is not supported " 18622eb4d010SOphir Munk "(no E-Switch)", priv->config.dv_xmeta_en); 18632eb4d010SOphir Munk priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; 18642eb4d010SOphir Munk } 18652eb4d010SOphir Munk mlx5_set_metadata_mask(eth_dev); 18662eb4d010SOphir Munk if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 18672eb4d010SOphir Munk !priv->sh->dv_regc0_mask) { 18682eb4d010SOphir Munk DRV_LOG(ERR, "metadata mode %u is not supported " 18692eb4d010SOphir Munk "(no metadata reg_c[0] is available)", 18702eb4d010SOphir Munk priv->config.dv_xmeta_en); 18712eb4d010SOphir Munk err = ENOTSUP; 18722eb4d010SOphir Munk goto error; 18732eb4d010SOphir Munk } 1874d03b7860SSuanming Mou priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true, 1875d03b7860SSuanming Mou mlx5_hrxq_create_cb, 1876e1592b6cSSuanming Mou mlx5_hrxq_match_cb, 1877491b7137SMatan Azrad mlx5_hrxq_remove_cb, 1878491b7137SMatan Azrad mlx5_hrxq_clone_cb, 1879491b7137SMatan Azrad mlx5_hrxq_clone_free_cb); 1880679f46c7SMatan Azrad if (!priv->hrxqs) 1881679f46c7SMatan Azrad goto error; 1882491b7137SMatan Azrad rte_rwlock_init(&priv->ind_tbls_lock); 18832eb4d010SOphir Munk /* Query availability of metadata reg_c's. */ 18842eb4d010SOphir Munk err = mlx5_flow_discover_mreg_c(eth_dev); 18852eb4d010SOphir Munk if (err < 0) { 18862eb4d010SOphir Munk err = -err; 18872eb4d010SOphir Munk goto error; 18882eb4d010SOphir Munk } 18892eb4d010SOphir Munk if (!mlx5_flow_ext_mreg_supported(eth_dev)) { 18902eb4d010SOphir Munk DRV_LOG(DEBUG, 18912eb4d010SOphir Munk "port %u extensive metadata register is not supported", 18922eb4d010SOphir Munk eth_dev->data->port_id); 18932eb4d010SOphir Munk if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 18942eb4d010SOphir Munk DRV_LOG(ERR, "metadata mode %u is not supported " 18952eb4d010SOphir Munk "(no metadata registers available)", 18962eb4d010SOphir Munk priv->config.dv_xmeta_en); 18972eb4d010SOphir Munk err = ENOTSUP; 18982eb4d010SOphir Munk goto error; 18992eb4d010SOphir Munk } 19002eb4d010SOphir Munk } 19012eb4d010SOphir Munk if (priv->config.dv_flow_en && 19022eb4d010SOphir Munk priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 19032eb4d010SOphir Munk mlx5_flow_ext_mreg_supported(eth_dev) && 19042eb4d010SOphir Munk priv->sh->dv_regc0_mask) { 19052eb4d010SOphir Munk priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, 1906e69a5922SXueming Li MLX5_FLOW_MREG_HTABLE_SZ, 1907961b6774SMatan Azrad false, true, eth_dev, 1908f7f73ac1SXueming Li flow_dv_mreg_create_cb, 1909f5b0aed2SSuanming Mou flow_dv_mreg_match_cb, 1910961b6774SMatan Azrad flow_dv_mreg_remove_cb, 1911961b6774SMatan Azrad flow_dv_mreg_clone_cb, 1912961b6774SMatan Azrad flow_dv_mreg_clone_free_cb); 19132eb4d010SOphir Munk if (!priv->mreg_cp_tbl) { 19142eb4d010SOphir Munk err = ENOMEM; 19152eb4d010SOphir Munk goto error; 19162eb4d010SOphir Munk } 19172eb4d010SOphir Munk } 1918cc608e4dSSuanming Mou rte_spinlock_init(&priv->shared_act_sl); 1919994829e6SSuanming Mou mlx5_flow_counter_mode_config(eth_dev); 192045633c46SSuanming Mou mlx5_flow_drop_action_config(eth_dev); 19219fbe97f0SXueming Li if (priv->config.dv_flow_en) 19229fbe97f0SXueming Li eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 19232eb4d010SOphir Munk return eth_dev; 19242eb4d010SOphir Munk error: 19252eb4d010SOphir Munk if (priv) { 19262eb4d010SOphir Munk if (priv->mreg_cp_tbl) 1927e69a5922SXueming Li mlx5_hlist_destroy(priv->mreg_cp_tbl); 19282eb4d010SOphir Munk if (priv->sh) 19292eb4d010SOphir Munk mlx5_os_free_shared_dr(priv); 19302eb4d010SOphir Munk if (priv->nl_socket_route >= 0) 19312eb4d010SOphir Munk close(priv->nl_socket_route); 19322eb4d010SOphir Munk if (priv->nl_socket_rdma >= 0) 19332eb4d010SOphir Munk close(priv->nl_socket_rdma); 19342eb4d010SOphir Munk if (priv->vmwa_context) 19352eb4d010SOphir Munk mlx5_vlan_vmwa_exit(priv->vmwa_context); 193665b3cd0dSSuanming Mou if (eth_dev && priv->drop_queue.hrxq) 193765b3cd0dSSuanming Mou mlx5_drop_action_destroy(eth_dev); 1938a295c69aSShun Hao if (priv->mtr_profile_tbl) 1939a295c69aSShun Hao mlx5_l3t_destroy(priv->mtr_profile_tbl); 19402eb4d010SOphir Munk if (own_domain_id) 19412eb4d010SOphir Munk claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 1942679f46c7SMatan Azrad if (priv->hrxqs) 1943679f46c7SMatan Azrad mlx5_list_destroy(priv->hrxqs); 19442175c4dcSSuanming Mou mlx5_free(priv); 19452eb4d010SOphir Munk if (eth_dev != NULL) 19462eb4d010SOphir Munk eth_dev->data->dev_private = NULL; 19472eb4d010SOphir Munk } 19482eb4d010SOphir Munk if (eth_dev != NULL) { 19492eb4d010SOphir Munk /* mac_addrs must not be freed alone because part of 19502eb4d010SOphir Munk * dev_private 19512eb4d010SOphir Munk **/ 19522eb4d010SOphir Munk eth_dev->data->mac_addrs = NULL; 19532eb4d010SOphir Munk rte_eth_dev_release_port(eth_dev); 19542eb4d010SOphir Munk } 19552eb4d010SOphir Munk if (sh) 195691389890SOphir Munk mlx5_free_shared_dev_ctx(sh); 19572eb4d010SOphir Munk MLX5_ASSERT(err > 0); 19582eb4d010SOphir Munk rte_errno = err; 19592eb4d010SOphir Munk return NULL; 19602eb4d010SOphir Munk } 19612eb4d010SOphir Munk 19622eb4d010SOphir Munk /** 19632eb4d010SOphir Munk * Comparison callback to sort device data. 19642eb4d010SOphir Munk * 19652eb4d010SOphir Munk * This is meant to be used with qsort(). 19662eb4d010SOphir Munk * 19672eb4d010SOphir Munk * @param a[in] 19682eb4d010SOphir Munk * Pointer to pointer to first data object. 19692eb4d010SOphir Munk * @param b[in] 19702eb4d010SOphir Munk * Pointer to pointer to second data object. 19712eb4d010SOphir Munk * 19722eb4d010SOphir Munk * @return 19732eb4d010SOphir Munk * 0 if both objects are equal, less than 0 if the first argument is less 19742eb4d010SOphir Munk * than the second, greater than 0 otherwise. 19752eb4d010SOphir Munk */ 19762eb4d010SOphir Munk static int 19772eb4d010SOphir Munk mlx5_dev_spawn_data_cmp(const void *a, const void *b) 19782eb4d010SOphir Munk { 19792eb4d010SOphir Munk const struct mlx5_switch_info *si_a = 19802eb4d010SOphir Munk &((const struct mlx5_dev_spawn_data *)a)->info; 19812eb4d010SOphir Munk const struct mlx5_switch_info *si_b = 19822eb4d010SOphir Munk &((const struct mlx5_dev_spawn_data *)b)->info; 19832eb4d010SOphir Munk int ret; 19842eb4d010SOphir Munk 19852eb4d010SOphir Munk /* Master device first. */ 19862eb4d010SOphir Munk ret = si_b->master - si_a->master; 19872eb4d010SOphir Munk if (ret) 19882eb4d010SOphir Munk return ret; 19892eb4d010SOphir Munk /* Then representor devices. */ 19902eb4d010SOphir Munk ret = si_b->representor - si_a->representor; 19912eb4d010SOphir Munk if (ret) 19922eb4d010SOphir Munk return ret; 19932eb4d010SOphir Munk /* Unidentified devices come last in no specific order. */ 19942eb4d010SOphir Munk if (!si_a->representor) 19952eb4d010SOphir Munk return 0; 19962eb4d010SOphir Munk /* Order representors by name. */ 19972eb4d010SOphir Munk return si_a->port_name - si_b->port_name; 19982eb4d010SOphir Munk } 19992eb4d010SOphir Munk 20002eb4d010SOphir Munk /** 20012eb4d010SOphir Munk * Match PCI information for possible slaves of bonding device. 20022eb4d010SOphir Munk * 20032eb4d010SOphir Munk * @param[in] ibv_dev 20042eb4d010SOphir Munk * Pointer to Infiniband device structure. 20052eb4d010SOphir Munk * @param[in] pci_dev 2006f926cce3SXueming Li * Pointer to primary PCI address structure to match. 20072eb4d010SOphir Munk * @param[in] nl_rdma 20082eb4d010SOphir Munk * Netlink RDMA group socket handle. 2009f926cce3SXueming Li * @param[in] owner 2010f926cce3SXueming Li * Rerepsentor owner PF index. 2011f5f4c482SXueming Li * @param[out] bond_info 2012f5f4c482SXueming Li * Pointer to bonding information. 20132eb4d010SOphir Munk * 20142eb4d010SOphir Munk * @return 20152eb4d010SOphir Munk * negative value if no bonding device found, otherwise 20162eb4d010SOphir Munk * positive index of slave PF in bonding. 20172eb4d010SOphir Munk */ 20182eb4d010SOphir Munk static int 20192eb4d010SOphir Munk mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev, 2020f926cce3SXueming Li const struct rte_pci_addr *pci_dev, 2021f5f4c482SXueming Li int nl_rdma, uint16_t owner, 2022f5f4c482SXueming Li struct mlx5_bond_info *bond_info) 20232eb4d010SOphir Munk { 20242eb4d010SOphir Munk char ifname[IF_NAMESIZE + 1]; 20252eb4d010SOphir Munk unsigned int ifindex; 20262eb4d010SOphir Munk unsigned int np, i; 2027f5f4c482SXueming Li FILE *bond_file = NULL, *file; 20282eb4d010SOphir Munk int pf = -1; 2029f5f4c482SXueming Li int ret; 20302eb4d010SOphir Munk 20312eb4d010SOphir Munk /* 20322eb4d010SOphir Munk * Try to get master device name. If something goes 20332eb4d010SOphir Munk * wrong suppose the lack of kernel support and no 20342eb4d010SOphir Munk * bonding devices. 20352eb4d010SOphir Munk */ 2036f5f4c482SXueming Li memset(bond_info, 0, sizeof(*bond_info)); 20372eb4d010SOphir Munk if (nl_rdma < 0) 20382eb4d010SOphir Munk return -1; 20392eb4d010SOphir Munk if (!strstr(ibv_dev->name, "bond")) 20402eb4d010SOphir Munk return -1; 20412eb4d010SOphir Munk np = mlx5_nl_portnum(nl_rdma, ibv_dev->name); 20422eb4d010SOphir Munk if (!np) 20432eb4d010SOphir Munk return -1; 20442eb4d010SOphir Munk /* 20452eb4d010SOphir Munk * The Master device might not be on the predefined 20462eb4d010SOphir Munk * port (not on port index 1, it is not garanted), 20472eb4d010SOphir Munk * we have to scan all Infiniband device port and 20482eb4d010SOphir Munk * find master. 20492eb4d010SOphir Munk */ 20502eb4d010SOphir Munk for (i = 1; i <= np; ++i) { 20512eb4d010SOphir Munk /* Check whether Infiniband port is populated. */ 20522eb4d010SOphir Munk ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i); 20532eb4d010SOphir Munk if (!ifindex) 20542eb4d010SOphir Munk continue; 20552eb4d010SOphir Munk if (!if_indextoname(ifindex, ifname)) 20562eb4d010SOphir Munk continue; 20572eb4d010SOphir Munk /* Try to read bonding slave names from sysfs. */ 20582eb4d010SOphir Munk MKSTR(slaves, 20592eb4d010SOphir Munk "/sys/class/net/%s/master/bonding/slaves", ifname); 2060f5f4c482SXueming Li bond_file = fopen(slaves, "r"); 2061f5f4c482SXueming Li if (bond_file) 20622eb4d010SOphir Munk break; 20632eb4d010SOphir Munk } 2064f5f4c482SXueming Li if (!bond_file) 20652eb4d010SOphir Munk return -1; 20662eb4d010SOphir Munk /* Use safe format to check maximal buffer length. */ 20672eb4d010SOphir Munk MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE); 2068f5f4c482SXueming Li while (fscanf(bond_file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) { 20692eb4d010SOphir Munk char tmp_str[IF_NAMESIZE + 32]; 20702eb4d010SOphir Munk struct rte_pci_addr pci_addr; 20712eb4d010SOphir Munk struct mlx5_switch_info info; 20722eb4d010SOphir Munk 20732eb4d010SOphir Munk /* Process slave interface names in the loop. */ 20742eb4d010SOphir Munk snprintf(tmp_str, sizeof(tmp_str), 20752eb4d010SOphir Munk "/sys/class/net/%s", ifname); 20764d567938SThomas Monjalon if (mlx5_get_pci_addr(tmp_str, &pci_addr)) { 20772eb4d010SOphir Munk DRV_LOG(WARNING, "can not get PCI address" 20782eb4d010SOphir Munk " for netdev \"%s\"", ifname); 20792eb4d010SOphir Munk continue; 20802eb4d010SOphir Munk } 20812eb4d010SOphir Munk /* Slave interface PCI address match found. */ 20822eb4d010SOphir Munk snprintf(tmp_str, sizeof(tmp_str), 20832eb4d010SOphir Munk "/sys/class/net/%s/phys_port_name", ifname); 20842eb4d010SOphir Munk file = fopen(tmp_str, "rb"); 20852eb4d010SOphir Munk if (!file) 20862eb4d010SOphir Munk break; 20872eb4d010SOphir Munk info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET; 20882eb4d010SOphir Munk if (fscanf(file, "%32s", tmp_str) == 1) 20892eb4d010SOphir Munk mlx5_translate_port_name(tmp_str, &info); 2090f5f4c482SXueming Li fclose(file); 2091f5f4c482SXueming Li /* Only process PF ports. */ 2092f5f4c482SXueming Li if (info.name_type != MLX5_PHYS_PORT_NAME_TYPE_LEGACY && 2093f5f4c482SXueming Li info.name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK) 2094f5f4c482SXueming Li continue; 2095f5f4c482SXueming Li /* Check max bonding member. */ 2096f5f4c482SXueming Li if (info.port_name >= MLX5_BOND_MAX_PORTS) { 2097f5f4c482SXueming Li DRV_LOG(WARNING, "bonding index out of range, " 2098f5f4c482SXueming Li "please increase MLX5_BOND_MAX_PORTS: %s", 2099f5f4c482SXueming Li tmp_str); 21002eb4d010SOphir Munk break; 21012eb4d010SOphir Munk } 2102d31a8971SXueming Li /* Match PCI address, allows BDF0+pfx or BDFx+pfx. */ 2103f5f4c482SXueming Li if (pci_dev->domain == pci_addr.domain && 2104f5f4c482SXueming Li pci_dev->bus == pci_addr.bus && 2105f5f4c482SXueming Li pci_dev->devid == pci_addr.devid && 2106d31a8971SXueming Li ((pci_dev->function == 0 && 2107d31a8971SXueming Li pci_dev->function + owner == pci_addr.function) || 2108d31a8971SXueming Li (pci_dev->function == owner && 2109d31a8971SXueming Li pci_addr.function == owner))) 2110f5f4c482SXueming Li pf = info.port_name; 2111f5f4c482SXueming Li /* Get ifindex. */ 2112f5f4c482SXueming Li snprintf(tmp_str, sizeof(tmp_str), 2113f5f4c482SXueming Li "/sys/class/net/%s/ifindex", ifname); 2114f5f4c482SXueming Li file = fopen(tmp_str, "rb"); 2115f5f4c482SXueming Li if (!file) 2116f5f4c482SXueming Li break; 2117f5f4c482SXueming Li ret = fscanf(file, "%u", &ifindex); 21182eb4d010SOphir Munk fclose(file); 2119f5f4c482SXueming Li if (ret != 1) 2120f5f4c482SXueming Li break; 2121f5f4c482SXueming Li /* Save bonding info. */ 2122f5f4c482SXueming Li strncpy(bond_info->ports[info.port_name].ifname, ifname, 2123f5f4c482SXueming Li sizeof(bond_info->ports[0].ifname)); 2124f5f4c482SXueming Li bond_info->ports[info.port_name].pci_addr = pci_addr; 2125f5f4c482SXueming Li bond_info->ports[info.port_name].ifindex = ifindex; 2126f5f4c482SXueming Li bond_info->n_port++; 2127f5f4c482SXueming Li } 2128f5f4c482SXueming Li if (pf >= 0) { 2129f5f4c482SXueming Li /* Get bond interface info */ 2130f5f4c482SXueming Li ret = mlx5_sysfs_bond_info(ifindex, &bond_info->ifindex, 2131f5f4c482SXueming Li bond_info->ifname); 2132f5f4c482SXueming Li if (ret) 2133f5f4c482SXueming Li DRV_LOG(ERR, "unable to get bond info: %s", 2134f5f4c482SXueming Li strerror(rte_errno)); 2135f5f4c482SXueming Li else 2136f5f4c482SXueming Li DRV_LOG(INFO, "PF device %u, bond device %u(%s)", 2137f5f4c482SXueming Li ifindex, bond_info->ifindex, bond_info->ifname); 2138f5f4c482SXueming Li } 21392eb4d010SOphir Munk return pf; 21402eb4d010SOphir Munk } 21412eb4d010SOphir Munk 2142919488fbSXueming Li static void 2143919488fbSXueming Li mlx5_os_config_default(struct mlx5_dev_config *config) 2144919488fbSXueming Li { 2145919488fbSXueming Li memset(config, 0, sizeof(*config)); 2146919488fbSXueming Li config->mps = MLX5_ARG_UNSET; 2147919488fbSXueming Li config->dbnc = MLX5_ARG_UNSET; 2148919488fbSXueming Li config->rx_vec_en = 1; 2149919488fbSXueming Li config->txq_inline_max = MLX5_ARG_UNSET; 2150919488fbSXueming Li config->txq_inline_min = MLX5_ARG_UNSET; 2151919488fbSXueming Li config->txq_inline_mpw = MLX5_ARG_UNSET; 2152919488fbSXueming Li config->txqs_inline = MLX5_ARG_UNSET; 2153919488fbSXueming Li config->vf_nl_en = 1; 2154919488fbSXueming Li config->mr_ext_memseg_en = 1; 2155fec28ca0SDmitry Kozlyuk config->mr_mempool_reg_en = 1; 2156919488fbSXueming Li config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; 2157919488fbSXueming Li config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; 2158919488fbSXueming Li config->dv_esw_en = 1; 2159919488fbSXueming Li config->dv_flow_en = 1; 2160919488fbSXueming Li config->decap_en = 1; 2161919488fbSXueming Li config->log_hp_size = MLX5_ARG_UNSET; 216297c9b0aaSMichael Baum config->allow_duplicate_pattern = 1; 2163919488fbSXueming Li } 2164919488fbSXueming Li 21652eb4d010SOphir Munk /** 216608c2772fSXueming Li * Register a PCI device within bonding. 21672eb4d010SOphir Munk * 216808c2772fSXueming Li * This function spawns Ethernet devices out of a given PCI device and 216908c2772fSXueming Li * bonding owner PF index. 21702eb4d010SOphir Munk * 21712eb4d010SOphir Munk * @param[in] pci_dev 21722eb4d010SOphir Munk * PCI device information. 217308c2772fSXueming Li * @param[in] req_eth_da 217408c2772fSXueming Li * Requested ethdev device argument. 217508c2772fSXueming Li * @param[in] owner_id 217608c2772fSXueming Li * Requested owner PF port ID within bonding device, default to 0. 21772eb4d010SOphir Munk * 21782eb4d010SOphir Munk * @return 21792eb4d010SOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 21802eb4d010SOphir Munk */ 218108c2772fSXueming Li static int 218208c2772fSXueming Li mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev, 218308c2772fSXueming Li struct rte_eth_devargs *req_eth_da, 218408c2772fSXueming Li uint16_t owner_id) 21852eb4d010SOphir Munk { 21862eb4d010SOphir Munk struct ibv_device **ibv_list; 21872eb4d010SOphir Munk /* 21882eb4d010SOphir Munk * Number of found IB Devices matching with requested PCI BDF. 21892eb4d010SOphir Munk * nd != 1 means there are multiple IB devices over the same 21902eb4d010SOphir Munk * PCI device and we have representors and master. 21912eb4d010SOphir Munk */ 21922eb4d010SOphir Munk unsigned int nd = 0; 21932eb4d010SOphir Munk /* 21942eb4d010SOphir Munk * Number of found IB device Ports. nd = 1 and np = 1..n means 21952eb4d010SOphir Munk * we have the single multiport IB device, and there may be 21962eb4d010SOphir Munk * representors attached to some of found ports. 21972eb4d010SOphir Munk */ 21982eb4d010SOphir Munk unsigned int np = 0; 21992eb4d010SOphir Munk /* 22002eb4d010SOphir Munk * Number of DPDK ethernet devices to Spawn - either over 22012eb4d010SOphir Munk * multiple IB devices or multiple ports of single IB device. 22022eb4d010SOphir Munk * Actually this is the number of iterations to spawn. 22032eb4d010SOphir Munk */ 22042eb4d010SOphir Munk unsigned int ns = 0; 22052eb4d010SOphir Munk /* 22062eb4d010SOphir Munk * Bonding device 22072eb4d010SOphir Munk * < 0 - no bonding device (single one) 22082eb4d010SOphir Munk * >= 0 - bonding device (value is slave PF index) 22092eb4d010SOphir Munk */ 22102eb4d010SOphir Munk int bd = -1; 22112eb4d010SOphir Munk struct mlx5_dev_spawn_data *list = NULL; 22122eb4d010SOphir Munk struct mlx5_dev_config dev_config; 2213d462a83cSMichael Baum unsigned int dev_config_vf; 221408c2772fSXueming Li struct rte_eth_devargs eth_da = *req_eth_da; 2215f926cce3SXueming Li struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */ 2216f5f4c482SXueming Li struct mlx5_bond_info bond_info; 2217f926cce3SXueming Li int ret = -1; 22182eb4d010SOphir Munk 22192eb4d010SOphir Munk errno = 0; 22202eb4d010SOphir Munk ibv_list = mlx5_glue->get_device_list(&ret); 22212eb4d010SOphir Munk if (!ibv_list) { 22222eb4d010SOphir Munk rte_errno = errno ? errno : ENOSYS; 22232eb4d010SOphir Munk DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 22242eb4d010SOphir Munk return -rte_errno; 22252eb4d010SOphir Munk } 22262eb4d010SOphir Munk /* 22272eb4d010SOphir Munk * First scan the list of all Infiniband devices to find 22282eb4d010SOphir Munk * matching ones, gathering into the list. 22292eb4d010SOphir Munk */ 22302eb4d010SOphir Munk struct ibv_device *ibv_match[ret + 1]; 22312eb4d010SOphir Munk int nl_route = mlx5_nl_init(NETLINK_ROUTE); 22322eb4d010SOphir Munk int nl_rdma = mlx5_nl_init(NETLINK_RDMA); 22332eb4d010SOphir Munk unsigned int i; 22342eb4d010SOphir Munk 22352eb4d010SOphir Munk while (ret-- > 0) { 22362eb4d010SOphir Munk struct rte_pci_addr pci_addr; 22372eb4d010SOphir Munk 22382eb4d010SOphir Munk DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); 22392eb4d010SOphir Munk bd = mlx5_device_bond_pci_match 2240f5f4c482SXueming Li (ibv_list[ret], &owner_pci, nl_rdma, owner_id, 2241f5f4c482SXueming Li &bond_info); 22422eb4d010SOphir Munk if (bd >= 0) { 22432eb4d010SOphir Munk /* 22442eb4d010SOphir Munk * Bonding device detected. Only one match is allowed, 22452eb4d010SOphir Munk * the bonding is supported over multi-port IB device, 22462eb4d010SOphir Munk * there should be no matches on representor PCI 22472eb4d010SOphir Munk * functions or non VF LAG bonding devices with 22482eb4d010SOphir Munk * specified address. 22492eb4d010SOphir Munk */ 22502eb4d010SOphir Munk if (nd) { 22512eb4d010SOphir Munk DRV_LOG(ERR, 22522eb4d010SOphir Munk "multiple PCI match on bonding device" 22532eb4d010SOphir Munk "\"%s\" found", ibv_list[ret]->name); 22542eb4d010SOphir Munk rte_errno = ENOENT; 22552eb4d010SOphir Munk ret = -rte_errno; 22562eb4d010SOphir Munk goto exit; 22572eb4d010SOphir Munk } 2258f926cce3SXueming Li /* Amend owner pci address if owner PF ID specified. */ 2259f926cce3SXueming Li if (eth_da.nb_representor_ports) 226008c2772fSXueming Li owner_pci.function += owner_id; 22612eb4d010SOphir Munk DRV_LOG(INFO, "PCI information matches for" 22622eb4d010SOphir Munk " slave %d bonding device \"%s\"", 22632eb4d010SOphir Munk bd, ibv_list[ret]->name); 22642eb4d010SOphir Munk ibv_match[nd++] = ibv_list[ret]; 22652eb4d010SOphir Munk break; 2266f926cce3SXueming Li } else { 2267f926cce3SXueming Li /* Bonding device not found. */ 22684d567938SThomas Monjalon if (mlx5_get_pci_addr(ibv_list[ret]->ibdev_path, 22694d567938SThomas Monjalon &pci_addr)) 22702eb4d010SOphir Munk continue; 2271f926cce3SXueming Li if (owner_pci.domain != pci_addr.domain || 2272f926cce3SXueming Li owner_pci.bus != pci_addr.bus || 2273f926cce3SXueming Li owner_pci.devid != pci_addr.devid || 2274f926cce3SXueming Li owner_pci.function != pci_addr.function) 22752eb4d010SOphir Munk continue; 22762eb4d010SOphir Munk DRV_LOG(INFO, "PCI information matches for device \"%s\"", 22772eb4d010SOphir Munk ibv_list[ret]->name); 22782eb4d010SOphir Munk ibv_match[nd++] = ibv_list[ret]; 22792eb4d010SOphir Munk } 2280f926cce3SXueming Li } 22812eb4d010SOphir Munk ibv_match[nd] = NULL; 22822eb4d010SOphir Munk if (!nd) { 22832eb4d010SOphir Munk /* No device matches, just complain and bail out. */ 22842eb4d010SOphir Munk DRV_LOG(WARNING, 22852eb4d010SOphir Munk "no Verbs device matches PCI device " PCI_PRI_FMT "," 22862eb4d010SOphir Munk " are kernel drivers loaded?", 2287f926cce3SXueming Li owner_pci.domain, owner_pci.bus, 2288f926cce3SXueming Li owner_pci.devid, owner_pci.function); 22892eb4d010SOphir Munk rte_errno = ENOENT; 22902eb4d010SOphir Munk ret = -rte_errno; 22912eb4d010SOphir Munk goto exit; 22922eb4d010SOphir Munk } 22932eb4d010SOphir Munk if (nd == 1) { 22942eb4d010SOphir Munk /* 22952eb4d010SOphir Munk * Found single matching device may have multiple ports. 22962eb4d010SOphir Munk * Each port may be representor, we have to check the port 22972eb4d010SOphir Munk * number and check the representors existence. 22982eb4d010SOphir Munk */ 22992eb4d010SOphir Munk if (nl_rdma >= 0) 23002eb4d010SOphir Munk np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); 23012eb4d010SOphir Munk if (!np) 23022eb4d010SOphir Munk DRV_LOG(WARNING, "can not get IB device \"%s\"" 23032eb4d010SOphir Munk " ports number", ibv_match[0]->name); 23042eb4d010SOphir Munk if (bd >= 0 && !np) { 23052eb4d010SOphir Munk DRV_LOG(ERR, "can not get ports" 23062eb4d010SOphir Munk " for bonding device"); 23072eb4d010SOphir Munk rte_errno = ENOENT; 23082eb4d010SOphir Munk ret = -rte_errno; 23092eb4d010SOphir Munk goto exit; 23102eb4d010SOphir Munk } 23112eb4d010SOphir Munk } 23122eb4d010SOphir Munk /* 23132eb4d010SOphir Munk * Now we can determine the maximal 23142eb4d010SOphir Munk * amount of devices to be spawned. 23152eb4d010SOphir Munk */ 23162175c4dcSSuanming Mou list = mlx5_malloc(MLX5_MEM_ZERO, 23172eb4d010SOphir Munk sizeof(struct mlx5_dev_spawn_data) * 23182eb4d010SOphir Munk (np ? np : nd), 23192175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 23202eb4d010SOphir Munk if (!list) { 23212eb4d010SOphir Munk DRV_LOG(ERR, "spawn data array allocation failure"); 23222eb4d010SOphir Munk rte_errno = ENOMEM; 23232eb4d010SOphir Munk ret = -rte_errno; 23242eb4d010SOphir Munk goto exit; 23252eb4d010SOphir Munk } 23262eb4d010SOphir Munk if (bd >= 0 || np > 1) { 23272eb4d010SOphir Munk /* 23282eb4d010SOphir Munk * Single IB device with multiple ports found, 23292eb4d010SOphir Munk * it may be E-Switch master device and representors. 23302eb4d010SOphir Munk * We have to perform identification through the ports. 23312eb4d010SOphir Munk */ 23322eb4d010SOphir Munk MLX5_ASSERT(nl_rdma >= 0); 23332eb4d010SOphir Munk MLX5_ASSERT(ns == 0); 23342eb4d010SOphir Munk MLX5_ASSERT(nd == 1); 23352eb4d010SOphir Munk MLX5_ASSERT(np); 23362eb4d010SOphir Munk for (i = 1; i <= np; ++i) { 2337f5f4c482SXueming Li list[ns].bond_info = &bond_info; 23382eb4d010SOphir Munk list[ns].max_port = np; 2339834a9019SOphir Munk list[ns].phys_port = i; 2340834a9019SOphir Munk list[ns].phys_dev = ibv_match[0]; 23412eb4d010SOphir Munk list[ns].eth_dev = NULL; 23422eb4d010SOphir Munk list[ns].pci_dev = pci_dev; 23432eb4d010SOphir Munk list[ns].pf_bond = bd; 23442eb4d010SOphir Munk list[ns].ifindex = mlx5_nl_ifindex 2345834a9019SOphir Munk (nl_rdma, 2346834a9019SOphir Munk mlx5_os_get_dev_device_name 2347834a9019SOphir Munk (list[ns].phys_dev), i); 23482eb4d010SOphir Munk if (!list[ns].ifindex) { 23492eb4d010SOphir Munk /* 23502eb4d010SOphir Munk * No network interface index found for the 23512eb4d010SOphir Munk * specified port, it means there is no 23522eb4d010SOphir Munk * representor on this port. It's OK, 23532eb4d010SOphir Munk * there can be disabled ports, for example 23542eb4d010SOphir Munk * if sriov_numvfs < sriov_totalvfs. 23552eb4d010SOphir Munk */ 23562eb4d010SOphir Munk continue; 23572eb4d010SOphir Munk } 23582eb4d010SOphir Munk ret = -1; 23592eb4d010SOphir Munk if (nl_route >= 0) 23602eb4d010SOphir Munk ret = mlx5_nl_switch_info 23612eb4d010SOphir Munk (nl_route, 23622eb4d010SOphir Munk list[ns].ifindex, 23632eb4d010SOphir Munk &list[ns].info); 23642eb4d010SOphir Munk if (ret || (!list[ns].info.representor && 23652eb4d010SOphir Munk !list[ns].info.master)) { 23662eb4d010SOphir Munk /* 23672eb4d010SOphir Munk * We failed to recognize representors with 23682eb4d010SOphir Munk * Netlink, let's try to perform the task 23692eb4d010SOphir Munk * with sysfs. 23702eb4d010SOphir Munk */ 23712eb4d010SOphir Munk ret = mlx5_sysfs_switch_info 23722eb4d010SOphir Munk (list[ns].ifindex, 23732eb4d010SOphir Munk &list[ns].info); 23742eb4d010SOphir Munk } 23752eb4d010SOphir Munk if (!ret && bd >= 0) { 23762eb4d010SOphir Munk switch (list[ns].info.name_type) { 23772eb4d010SOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 23789f430dd7SViacheslav Ovsiienko if (np == 1) { 23799f430dd7SViacheslav Ovsiienko /* 23809f430dd7SViacheslav Ovsiienko * Force standalone bonding 23819f430dd7SViacheslav Ovsiienko * device for ROCE LAG 23829f430dd7SViacheslav Ovsiienko * confgiurations. 23839f430dd7SViacheslav Ovsiienko */ 23849f430dd7SViacheslav Ovsiienko list[ns].info.master = 0; 23859f430dd7SViacheslav Ovsiienko list[ns].info.representor = 0; 23869f430dd7SViacheslav Ovsiienko } 23872eb4d010SOphir Munk if (list[ns].info.port_name == bd) 23882eb4d010SOphir Munk ns++; 23892eb4d010SOphir Munk break; 2390420bbdaeSViacheslav Ovsiienko case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: 2391420bbdaeSViacheslav Ovsiienko /* Fallthrough */ 23922eb4d010SOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 2393cb95feefSXueming Li /* Fallthrough */ 2394cb95feefSXueming Li case MLX5_PHYS_PORT_NAME_TYPE_PFSF: 23952eb4d010SOphir Munk if (list[ns].info.pf_num == bd) 23962eb4d010SOphir Munk ns++; 23972eb4d010SOphir Munk break; 23982eb4d010SOphir Munk default: 23992eb4d010SOphir Munk break; 24002eb4d010SOphir Munk } 24012eb4d010SOphir Munk continue; 24022eb4d010SOphir Munk } 24032eb4d010SOphir Munk if (!ret && (list[ns].info.representor ^ 24042eb4d010SOphir Munk list[ns].info.master)) 24052eb4d010SOphir Munk ns++; 24062eb4d010SOphir Munk } 24072eb4d010SOphir Munk if (!ns) { 24082eb4d010SOphir Munk DRV_LOG(ERR, 24092eb4d010SOphir Munk "unable to recognize master/representors" 24102eb4d010SOphir Munk " on the IB device with multiple ports"); 24112eb4d010SOphir Munk rte_errno = ENOENT; 24122eb4d010SOphir Munk ret = -rte_errno; 24132eb4d010SOphir Munk goto exit; 24142eb4d010SOphir Munk } 24152eb4d010SOphir Munk } else { 24162eb4d010SOphir Munk /* 24172eb4d010SOphir Munk * The existence of several matching entries (nd > 1) means 24182eb4d010SOphir Munk * port representors have been instantiated. No existing Verbs 24192eb4d010SOphir Munk * call nor sysfs entries can tell them apart, this can only 24202eb4d010SOphir Munk * be done through Netlink calls assuming kernel drivers are 24212eb4d010SOphir Munk * recent enough to support them. 24222eb4d010SOphir Munk * 24232eb4d010SOphir Munk * In the event of identification failure through Netlink, 24242eb4d010SOphir Munk * try again through sysfs, then: 24252eb4d010SOphir Munk * 24262eb4d010SOphir Munk * 1. A single IB device matches (nd == 1) with single 24272eb4d010SOphir Munk * port (np=0/1) and is not a representor, assume 24282eb4d010SOphir Munk * no switch support. 24292eb4d010SOphir Munk * 24302eb4d010SOphir Munk * 2. Otherwise no safe assumptions can be made; 24312eb4d010SOphir Munk * complain louder and bail out. 24322eb4d010SOphir Munk */ 24332eb4d010SOphir Munk for (i = 0; i != nd; ++i) { 24342eb4d010SOphir Munk memset(&list[ns].info, 0, sizeof(list[ns].info)); 2435f5f4c482SXueming Li list[ns].bond_info = NULL; 24362eb4d010SOphir Munk list[ns].max_port = 1; 2437834a9019SOphir Munk list[ns].phys_port = 1; 2438834a9019SOphir Munk list[ns].phys_dev = ibv_match[i]; 24392eb4d010SOphir Munk list[ns].eth_dev = NULL; 24402eb4d010SOphir Munk list[ns].pci_dev = pci_dev; 24412eb4d010SOphir Munk list[ns].pf_bond = -1; 24422eb4d010SOphir Munk list[ns].ifindex = 0; 24432eb4d010SOphir Munk if (nl_rdma >= 0) 24442eb4d010SOphir Munk list[ns].ifindex = mlx5_nl_ifindex 2445834a9019SOphir Munk (nl_rdma, 2446834a9019SOphir Munk mlx5_os_get_dev_device_name 2447834a9019SOphir Munk (list[ns].phys_dev), 1); 24482eb4d010SOphir Munk if (!list[ns].ifindex) { 24492eb4d010SOphir Munk char ifname[IF_NAMESIZE]; 24502eb4d010SOphir Munk 24512eb4d010SOphir Munk /* 24522eb4d010SOphir Munk * Netlink failed, it may happen with old 24532eb4d010SOphir Munk * ib_core kernel driver (before 4.16). 24542eb4d010SOphir Munk * We can assume there is old driver because 24552eb4d010SOphir Munk * here we are processing single ports IB 24562eb4d010SOphir Munk * devices. Let's try sysfs to retrieve 24572eb4d010SOphir Munk * the ifindex. The method works for 24582eb4d010SOphir Munk * master device only. 24592eb4d010SOphir Munk */ 24602eb4d010SOphir Munk if (nd > 1) { 24612eb4d010SOphir Munk /* 24622eb4d010SOphir Munk * Multiple devices found, assume 24632eb4d010SOphir Munk * representors, can not distinguish 24642eb4d010SOphir Munk * master/representor and retrieve 24652eb4d010SOphir Munk * ifindex via sysfs. 24662eb4d010SOphir Munk */ 24672eb4d010SOphir Munk continue; 24682eb4d010SOphir Munk } 2469aec086c9SMatan Azrad ret = mlx5_get_ifname_sysfs 2470aec086c9SMatan Azrad (ibv_match[i]->ibdev_path, ifname); 24712eb4d010SOphir Munk if (!ret) 24722eb4d010SOphir Munk list[ns].ifindex = 24732eb4d010SOphir Munk if_nametoindex(ifname); 24742eb4d010SOphir Munk if (!list[ns].ifindex) { 24752eb4d010SOphir Munk /* 24762eb4d010SOphir Munk * No network interface index found 24772eb4d010SOphir Munk * for the specified device, it means 24782eb4d010SOphir Munk * there it is neither representor 24792eb4d010SOphir Munk * nor master. 24802eb4d010SOphir Munk */ 24812eb4d010SOphir Munk continue; 24822eb4d010SOphir Munk } 24832eb4d010SOphir Munk } 24842eb4d010SOphir Munk ret = -1; 24852eb4d010SOphir Munk if (nl_route >= 0) 24862eb4d010SOphir Munk ret = mlx5_nl_switch_info 24872eb4d010SOphir Munk (nl_route, 24882eb4d010SOphir Munk list[ns].ifindex, 24892eb4d010SOphir Munk &list[ns].info); 24902eb4d010SOphir Munk if (ret || (!list[ns].info.representor && 24912eb4d010SOphir Munk !list[ns].info.master)) { 24922eb4d010SOphir Munk /* 24932eb4d010SOphir Munk * We failed to recognize representors with 24942eb4d010SOphir Munk * Netlink, let's try to perform the task 24952eb4d010SOphir Munk * with sysfs. 24962eb4d010SOphir Munk */ 24972eb4d010SOphir Munk ret = mlx5_sysfs_switch_info 24982eb4d010SOphir Munk (list[ns].ifindex, 24992eb4d010SOphir Munk &list[ns].info); 25002eb4d010SOphir Munk } 25012eb4d010SOphir Munk if (!ret && (list[ns].info.representor ^ 25022eb4d010SOphir Munk list[ns].info.master)) { 25032eb4d010SOphir Munk ns++; 25042eb4d010SOphir Munk } else if ((nd == 1) && 25052eb4d010SOphir Munk !list[ns].info.representor && 25062eb4d010SOphir Munk !list[ns].info.master) { 25072eb4d010SOphir Munk /* 25082eb4d010SOphir Munk * Single IB device with 25092eb4d010SOphir Munk * one physical port and 25102eb4d010SOphir Munk * attached network device. 25112eb4d010SOphir Munk * May be SRIOV is not enabled 25122eb4d010SOphir Munk * or there is no representors. 25132eb4d010SOphir Munk */ 25142eb4d010SOphir Munk DRV_LOG(INFO, "no E-Switch support detected"); 25152eb4d010SOphir Munk ns++; 25162eb4d010SOphir Munk break; 25172eb4d010SOphir Munk } 25182eb4d010SOphir Munk } 25192eb4d010SOphir Munk if (!ns) { 25202eb4d010SOphir Munk DRV_LOG(ERR, 25212eb4d010SOphir Munk "unable to recognize master/representors" 25222eb4d010SOphir Munk " on the multiple IB devices"); 25232eb4d010SOphir Munk rte_errno = ENOENT; 25242eb4d010SOphir Munk ret = -rte_errno; 25252eb4d010SOphir Munk goto exit; 25262eb4d010SOphir Munk } 25276b157f3bSViacheslav Ovsiienko /* 25286b157f3bSViacheslav Ovsiienko * New kernels may add the switch_id attribute for the case 25296b157f3bSViacheslav Ovsiienko * there is no E-Switch and we wrongly recognized the 25306b157f3bSViacheslav Ovsiienko * only device as master. Override this if there is the 25316b157f3bSViacheslav Ovsiienko * single device with single port and new device name 25326b157f3bSViacheslav Ovsiienko * format present. 25336b157f3bSViacheslav Ovsiienko */ 25346b157f3bSViacheslav Ovsiienko if (nd == 1 && 25356b157f3bSViacheslav Ovsiienko list[0].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) { 25366b157f3bSViacheslav Ovsiienko list[0].info.master = 0; 25376b157f3bSViacheslav Ovsiienko list[0].info.representor = 0; 25386b157f3bSViacheslav Ovsiienko } 25392eb4d010SOphir Munk } 25402eb4d010SOphir Munk MLX5_ASSERT(ns); 25412eb4d010SOphir Munk /* 25422eb4d010SOphir Munk * Sort list to probe devices in natural order for users convenience 25432eb4d010SOphir Munk * (i.e. master first, then representors from lowest to highest ID). 25442eb4d010SOphir Munk */ 25452eb4d010SOphir Munk qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); 25462eb4d010SOphir Munk /* Device specific configuration. */ 25472eb4d010SOphir Munk switch (pci_dev->id.device_id) { 25482eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 25492eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 25502eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 25512eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 25522eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 25532eb4d010SOphir Munk case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 25543ea12cadSRaslan Darawsheh case PCI_DEVICE_ID_MELLANOX_CONNECTXVF: 2555d462a83cSMichael Baum dev_config_vf = 1; 25562eb4d010SOphir Munk break; 25572eb4d010SOphir Munk default: 2558d462a83cSMichael Baum dev_config_vf = 0; 25592eb4d010SOphir Munk break; 25602eb4d010SOphir Munk } 2561f926cce3SXueming Li if (eth_da.type != RTE_ETH_REPRESENTOR_NONE) { 2562f926cce3SXueming Li /* Set devargs default values. */ 2563f926cce3SXueming Li if (eth_da.nb_mh_controllers == 0) { 2564f926cce3SXueming Li eth_da.nb_mh_controllers = 1; 2565f926cce3SXueming Li eth_da.mh_controllers[0] = 0; 2566f926cce3SXueming Li } 2567f926cce3SXueming Li if (eth_da.nb_ports == 0 && ns > 0) { 2568f926cce3SXueming Li if (list[0].pf_bond >= 0 && list[0].info.representor) 2569f926cce3SXueming Li DRV_LOG(WARNING, "Representor on Bonding device should use pf#vf# syntax: %s", 2570f926cce3SXueming Li pci_dev->device.devargs->args); 2571f926cce3SXueming Li eth_da.nb_ports = 1; 2572f926cce3SXueming Li eth_da.ports[0] = list[0].info.pf_num; 2573f926cce3SXueming Li } 2574f926cce3SXueming Li if (eth_da.nb_representor_ports == 0) { 2575f926cce3SXueming Li eth_da.nb_representor_ports = 1; 2576f926cce3SXueming Li eth_da.representor_ports[0] = 0; 2577f926cce3SXueming Li } 2578f926cce3SXueming Li } 25792eb4d010SOphir Munk for (i = 0; i != ns; ++i) { 25802eb4d010SOphir Munk uint32_t restore; 25812eb4d010SOphir Munk 2582d462a83cSMichael Baum /* Default configuration. */ 2583919488fbSXueming Li mlx5_os_config_default(&dev_config); 2584d462a83cSMichael Baum dev_config.vf = dev_config_vf; 258556bb3c84SXueming Li list[i].numa_node = pci_dev->device.numa_node; 25862eb4d010SOphir Munk list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, 25872eb4d010SOphir Munk &list[i], 2588cb95feefSXueming Li &dev_config, 2589cb95feefSXueming Li ð_da); 25902eb4d010SOphir Munk if (!list[i].eth_dev) { 25912eb4d010SOphir Munk if (rte_errno != EBUSY && rte_errno != EEXIST) 25922eb4d010SOphir Munk break; 25932eb4d010SOphir Munk /* Device is disabled or already spawned. Ignore it. */ 25942eb4d010SOphir Munk continue; 25952eb4d010SOphir Munk } 25962eb4d010SOphir Munk restore = list[i].eth_dev->data->dev_flags; 25972eb4d010SOphir Munk rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); 2598494d6863SGregory Etelson /** 2599494d6863SGregory Etelson * Each representor has a dedicated interrupts vector. 2600494d6863SGregory Etelson * rte_eth_copy_pci_info() assigns PF interrupts handle to 2601494d6863SGregory Etelson * representor eth_dev object because representor and PF 2602494d6863SGregory Etelson * share the same PCI address. 2603494d6863SGregory Etelson * Override representor device with a dedicated 2604494d6863SGregory Etelson * interrupts handle here. 2605494d6863SGregory Etelson * Representor interrupts handle is released in mlx5_dev_stop(). 2606494d6863SGregory Etelson */ 2607494d6863SGregory Etelson if (list[i].info.representor) { 2608494d6863SGregory Etelson struct rte_intr_handle *intr_handle; 2609494d6863SGregory Etelson intr_handle = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, 2610494d6863SGregory Etelson sizeof(*intr_handle), 0, 2611494d6863SGregory Etelson SOCKET_ID_ANY); 2612494d6863SGregory Etelson if (!intr_handle) { 2613494d6863SGregory Etelson DRV_LOG(ERR, 2614494d6863SGregory Etelson "port %u failed to allocate memory for interrupt handler " 2615494d6863SGregory Etelson "Rx interrupts will not be supported", 2616494d6863SGregory Etelson i); 2617494d6863SGregory Etelson rte_errno = ENOMEM; 2618494d6863SGregory Etelson ret = -rte_errno; 2619494d6863SGregory Etelson goto exit; 2620494d6863SGregory Etelson } 2621494d6863SGregory Etelson list[i].eth_dev->intr_handle = intr_handle; 2622494d6863SGregory Etelson } 26232eb4d010SOphir Munk /* Restore non-PCI flags cleared by the above call. */ 26242eb4d010SOphir Munk list[i].eth_dev->data->dev_flags |= restore; 26252eb4d010SOphir Munk rte_eth_dev_probing_finish(list[i].eth_dev); 26262eb4d010SOphir Munk } 26272eb4d010SOphir Munk if (i != ns) { 26282eb4d010SOphir Munk DRV_LOG(ERR, 26292eb4d010SOphir Munk "probe of PCI device " PCI_PRI_FMT " aborted after" 26302eb4d010SOphir Munk " encountering an error: %s", 2631f926cce3SXueming Li owner_pci.domain, owner_pci.bus, 2632f926cce3SXueming Li owner_pci.devid, owner_pci.function, 26332eb4d010SOphir Munk strerror(rte_errno)); 26342eb4d010SOphir Munk ret = -rte_errno; 26352eb4d010SOphir Munk /* Roll back. */ 26362eb4d010SOphir Munk while (i--) { 26372eb4d010SOphir Munk if (!list[i].eth_dev) 26382eb4d010SOphir Munk continue; 26392eb4d010SOphir Munk mlx5_dev_close(list[i].eth_dev); 26402eb4d010SOphir Munk /* mac_addrs must not be freed because in dev_private */ 26412eb4d010SOphir Munk list[i].eth_dev->data->mac_addrs = NULL; 26422eb4d010SOphir Munk claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); 26432eb4d010SOphir Munk } 26442eb4d010SOphir Munk /* Restore original error. */ 26452eb4d010SOphir Munk rte_errno = -ret; 26462eb4d010SOphir Munk } else { 26472eb4d010SOphir Munk ret = 0; 26482eb4d010SOphir Munk } 26492eb4d010SOphir Munk exit: 26502eb4d010SOphir Munk /* 26512eb4d010SOphir Munk * Do the routine cleanup: 26522eb4d010SOphir Munk * - close opened Netlink sockets 26532eb4d010SOphir Munk * - free allocated spawn data array 26542eb4d010SOphir Munk * - free the Infiniband device list 26552eb4d010SOphir Munk */ 26562eb4d010SOphir Munk if (nl_rdma >= 0) 26572eb4d010SOphir Munk close(nl_rdma); 26582eb4d010SOphir Munk if (nl_route >= 0) 26592eb4d010SOphir Munk close(nl_route); 26602eb4d010SOphir Munk if (list) 26612175c4dcSSuanming Mou mlx5_free(list); 26622eb4d010SOphir Munk MLX5_ASSERT(ibv_list); 26632eb4d010SOphir Munk mlx5_glue->free_device_list(ibv_list); 26642eb4d010SOphir Munk return ret; 26652eb4d010SOphir Munk } 26662eb4d010SOphir Munk 2667919488fbSXueming Li static int 2668919488fbSXueming Li mlx5_os_parse_eth_devargs(struct rte_device *dev, 2669919488fbSXueming Li struct rte_eth_devargs *eth_da) 2670919488fbSXueming Li { 2671919488fbSXueming Li int ret = 0; 2672919488fbSXueming Li 2673919488fbSXueming Li if (dev->devargs == NULL) 2674919488fbSXueming Li return 0; 2675919488fbSXueming Li memset(eth_da, 0, sizeof(*eth_da)); 2676919488fbSXueming Li /* Parse representor information first from class argument. */ 2677919488fbSXueming Li if (dev->devargs->cls_str) 2678919488fbSXueming Li ret = rte_eth_devargs_parse(dev->devargs->cls_str, eth_da); 2679919488fbSXueming Li if (ret != 0) { 2680919488fbSXueming Li DRV_LOG(ERR, "failed to parse device arguments: %s", 2681919488fbSXueming Li dev->devargs->cls_str); 2682919488fbSXueming Li return -rte_errno; 2683919488fbSXueming Li } 2684919488fbSXueming Li if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) { 2685919488fbSXueming Li /* Parse legacy device argument */ 2686919488fbSXueming Li ret = rte_eth_devargs_parse(dev->devargs->args, eth_da); 2687919488fbSXueming Li if (ret) { 2688919488fbSXueming Li DRV_LOG(ERR, "failed to parse device arguments: %s", 2689919488fbSXueming Li dev->devargs->args); 2690919488fbSXueming Li return -rte_errno; 2691919488fbSXueming Li } 2692919488fbSXueming Li } 2693919488fbSXueming Li return 0; 2694919488fbSXueming Li } 2695919488fbSXueming Li 269608c2772fSXueming Li /** 2697a7f34989SXueming Li * Callback to register a PCI device. 269808c2772fSXueming Li * 269908c2772fSXueming Li * This function spawns Ethernet devices out of a given PCI device. 270008c2772fSXueming Li * 270108c2772fSXueming Li * @param[in] pci_dev 270208c2772fSXueming Li * PCI device information. 270308c2772fSXueming Li * 270408c2772fSXueming Li * @return 270508c2772fSXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 270608c2772fSXueming Li */ 2707a7f34989SXueming Li static int 2708a7f34989SXueming Li mlx5_os_pci_probe(struct rte_pci_device *pci_dev) 270908c2772fSXueming Li { 2710919488fbSXueming Li struct rte_eth_devargs eth_da = { .nb_ports = 0 }; 271108c2772fSXueming Li int ret = 0; 271208c2772fSXueming Li uint16_t p; 271308c2772fSXueming Li 2714919488fbSXueming Li ret = mlx5_os_parse_eth_devargs(&pci_dev->device, ð_da); 2715919488fbSXueming Li if (ret != 0) 2716919488fbSXueming Li return ret; 271708c2772fSXueming Li 271808c2772fSXueming Li if (eth_da.nb_ports > 0) { 271908c2772fSXueming Li /* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */ 27206856efa5SMichael Baum for (p = 0; p < eth_da.nb_ports; p++) { 272108c2772fSXueming Li ret = mlx5_os_pci_probe_pf(pci_dev, ð_da, 272208c2772fSXueming Li eth_da.ports[p]); 27236856efa5SMichael Baum if (ret) 27246856efa5SMichael Baum break; 27256856efa5SMichael Baum } 27266856efa5SMichael Baum if (ret) { 27276856efa5SMichael Baum DRV_LOG(ERR, "Probe of PCI device " PCI_PRI_FMT " " 27286856efa5SMichael Baum "aborted due to proding failure of PF %u", 27296856efa5SMichael Baum pci_dev->addr.domain, pci_dev->addr.bus, 27306856efa5SMichael Baum pci_dev->addr.devid, pci_dev->addr.function, 27316856efa5SMichael Baum eth_da.ports[p]); 27326856efa5SMichael Baum mlx5_net_remove(&pci_dev->device); 27336856efa5SMichael Baum } 273408c2772fSXueming Li } else { 273508c2772fSXueming Li ret = mlx5_os_pci_probe_pf(pci_dev, ð_da, 0); 273608c2772fSXueming Li } 273708c2772fSXueming Li return ret; 273808c2772fSXueming Li } 273908c2772fSXueming Li 2740919488fbSXueming Li /* Probe a single SF device on auxiliary bus, no representor support. */ 2741919488fbSXueming Li static int 2742919488fbSXueming Li mlx5_os_auxiliary_probe(struct rte_device *dev) 2743919488fbSXueming Li { 2744919488fbSXueming Li struct rte_eth_devargs eth_da = { .nb_ports = 0 }; 2745919488fbSXueming Li struct mlx5_dev_config config; 2746919488fbSXueming Li struct mlx5_dev_spawn_data spawn = { .pf_bond = -1 }; 2747919488fbSXueming Li struct rte_auxiliary_device *adev = RTE_DEV_TO_AUXILIARY(dev); 2748919488fbSXueming Li struct rte_eth_dev *eth_dev; 2749919488fbSXueming Li int ret = 0; 2750919488fbSXueming Li 2751919488fbSXueming Li /* Parse ethdev devargs. */ 2752919488fbSXueming Li ret = mlx5_os_parse_eth_devargs(dev, ð_da); 2753919488fbSXueming Li if (ret != 0) 2754919488fbSXueming Li return ret; 2755919488fbSXueming Li /* Set default config data. */ 2756919488fbSXueming Li mlx5_os_config_default(&config); 2757919488fbSXueming Li config.sf = 1; 2758919488fbSXueming Li /* Init spawn data. */ 2759919488fbSXueming Li spawn.max_port = 1; 2760919488fbSXueming Li spawn.phys_port = 1; 2761919488fbSXueming Li spawn.phys_dev = mlx5_os_get_ibv_dev(dev); 2762919488fbSXueming Li if (spawn.phys_dev == NULL) 2763919488fbSXueming Li return -rte_errno; 2764919488fbSXueming Li ret = mlx5_auxiliary_get_ifindex(dev->name); 2765919488fbSXueming Li if (ret < 0) { 2766919488fbSXueming Li DRV_LOG(ERR, "failed to get ethdev ifindex: %s", dev->name); 2767919488fbSXueming Li return ret; 2768919488fbSXueming Li } 2769919488fbSXueming Li spawn.ifindex = ret; 2770919488fbSXueming Li spawn.numa_node = dev->numa_node; 2771919488fbSXueming Li /* Spawn device. */ 2772919488fbSXueming Li eth_dev = mlx5_dev_spawn(dev, &spawn, &config, ð_da); 2773919488fbSXueming Li if (eth_dev == NULL) 2774919488fbSXueming Li return -rte_errno; 2775919488fbSXueming Li /* Post create. */ 2776919488fbSXueming Li eth_dev->intr_handle = &adev->intr_handle; 2777919488fbSXueming Li if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2778919488fbSXueming Li eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 2779919488fbSXueming Li eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_RMV; 2780919488fbSXueming Li eth_dev->data->numa_node = dev->numa_node; 2781919488fbSXueming Li } 2782919488fbSXueming Li rte_eth_dev_probing_finish(eth_dev); 2783919488fbSXueming Li return 0; 2784919488fbSXueming Li } 2785919488fbSXueming Li 2786a7f34989SXueming Li /** 2787a7f34989SXueming Li * Net class driver callback to probe a device. 2788a7f34989SXueming Li * 2789919488fbSXueming Li * This function probe PCI bus device(s) or a single SF on auxiliary bus. 2790a7f34989SXueming Li * 2791a7f34989SXueming Li * @param[in] dev 2792a7f34989SXueming Li * Pointer to the generic device. 2793a7f34989SXueming Li * 2794a7f34989SXueming Li * @return 2795a7f34989SXueming Li * 0 on success, the function cannot fail. 2796a7f34989SXueming Li */ 2797a7f34989SXueming Li int 2798a7f34989SXueming Li mlx5_os_net_probe(struct rte_device *dev) 2799a7f34989SXueming Li { 2800a7f34989SXueming Li int ret; 2801a7f34989SXueming Li 2802a7f34989SXueming Li if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2803a7f34989SXueming Li mlx5_pmd_socket_init(); 2804a7f34989SXueming Li ret = mlx5_init_once(); 2805a7f34989SXueming Li if (ret) { 2806a7f34989SXueming Li DRV_LOG(ERR, "unable to init PMD global data: %s", 2807a7f34989SXueming Li strerror(rte_errno)); 2808a7f34989SXueming Li return -rte_errno; 2809a7f34989SXueming Li } 2810a7f34989SXueming Li if (mlx5_dev_is_pci(dev)) 2811a7f34989SXueming Li return mlx5_os_pci_probe(RTE_DEV_TO_PCI(dev)); 2812919488fbSXueming Li else 2813919488fbSXueming Li return mlx5_os_auxiliary_probe(dev); 2814a7f34989SXueming Li } 2815a7f34989SXueming Li 28162eb4d010SOphir Munk static int 28172eb4d010SOphir Munk mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config) 28182eb4d010SOphir Munk { 28192eb4d010SOphir Munk char *env; 28202eb4d010SOphir Munk int value; 28212eb4d010SOphir Munk 28222eb4d010SOphir Munk MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 28232eb4d010SOphir Munk /* Get environment variable to store. */ 28242eb4d010SOphir Munk env = getenv(MLX5_SHUT_UP_BF); 28252eb4d010SOphir Munk value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET; 28262eb4d010SOphir Munk if (config->dbnc == MLX5_ARG_UNSET) 28272eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1); 28282eb4d010SOphir Munk else 28292eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, 28302eb4d010SOphir Munk config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1); 28312eb4d010SOphir Munk return value; 28322eb4d010SOphir Munk } 28332eb4d010SOphir Munk 28342eb4d010SOphir Munk static void 28352eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(int value) 28362eb4d010SOphir Munk { 28372eb4d010SOphir Munk MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 28382eb4d010SOphir Munk /* Restore the original environment variable state. */ 28392eb4d010SOphir Munk if (value == MLX5_ARG_UNSET) 28402eb4d010SOphir Munk unsetenv(MLX5_SHUT_UP_BF); 28412eb4d010SOphir Munk else 28422eb4d010SOphir Munk setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1); 28432eb4d010SOphir Munk } 28442eb4d010SOphir Munk 28452eb4d010SOphir Munk /** 28462eb4d010SOphir Munk * Extract pdn of PD object using DV API. 28472eb4d010SOphir Munk * 28482eb4d010SOphir Munk * @param[in] pd 28492eb4d010SOphir Munk * Pointer to the verbs PD object. 28502eb4d010SOphir Munk * @param[out] pdn 28512eb4d010SOphir Munk * Pointer to the PD object number variable. 28522eb4d010SOphir Munk * 28532eb4d010SOphir Munk * @return 28542eb4d010SOphir Munk * 0 on success, error value otherwise. 28552eb4d010SOphir Munk */ 28562eb4d010SOphir Munk int 28572eb4d010SOphir Munk mlx5_os_get_pdn(void *pd, uint32_t *pdn) 28582eb4d010SOphir Munk { 28592eb4d010SOphir Munk #ifdef HAVE_IBV_FLOW_DV_SUPPORT 28602eb4d010SOphir Munk struct mlx5dv_obj obj; 28612eb4d010SOphir Munk struct mlx5dv_pd pd_info; 28622eb4d010SOphir Munk int ret = 0; 28632eb4d010SOphir Munk 28642eb4d010SOphir Munk obj.pd.in = pd; 28652eb4d010SOphir Munk obj.pd.out = &pd_info; 28662eb4d010SOphir Munk ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 28672eb4d010SOphir Munk if (ret) { 28682eb4d010SOphir Munk DRV_LOG(DEBUG, "Fail to get PD object info"); 28692eb4d010SOphir Munk return ret; 28702eb4d010SOphir Munk } 28712eb4d010SOphir Munk *pdn = pd_info.pdn; 28722eb4d010SOphir Munk return 0; 28732eb4d010SOphir Munk #else 28742eb4d010SOphir Munk (void)pd; 28752eb4d010SOphir Munk (void)pdn; 28762eb4d010SOphir Munk return -ENOTSUP; 28772eb4d010SOphir Munk #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 28782eb4d010SOphir Munk } 28792eb4d010SOphir Munk 28802eb4d010SOphir Munk /** 28812eb4d010SOphir Munk * Function API to open IB device. 28822eb4d010SOphir Munk * 28832eb4d010SOphir Munk * This function calls the Linux glue APIs to open a device. 28842eb4d010SOphir Munk * 28852eb4d010SOphir Munk * @param[in] spawn 28862eb4d010SOphir Munk * Pointer to the IB device attributes (name, port, etc). 28872eb4d010SOphir Munk * @param[out] config 28882eb4d010SOphir Munk * Pointer to device configuration structure. 28892eb4d010SOphir Munk * @param[out] sh 28902eb4d010SOphir Munk * Pointer to shared context structure. 28912eb4d010SOphir Munk * 28922eb4d010SOphir Munk * @return 28932eb4d010SOphir Munk * 0 on success, a positive error value otherwise. 28942eb4d010SOphir Munk */ 28952eb4d010SOphir Munk int 28962eb4d010SOphir Munk mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn, 28972eb4d010SOphir Munk const struct mlx5_dev_config *config, 28982eb4d010SOphir Munk struct mlx5_dev_ctx_shared *sh) 28992eb4d010SOphir Munk { 29002eb4d010SOphir Munk int dbmap_env; 29012eb4d010SOphir Munk int err = 0; 2902d133f4cdSViacheslav Ovsiienko 2903d133f4cdSViacheslav Ovsiienko pthread_mutex_init(&sh->txpp.mutex, NULL); 29042eb4d010SOphir Munk /* 29052eb4d010SOphir Munk * Configure environment variable "MLX5_BF_SHUT_UP" 29062eb4d010SOphir Munk * before the device creation. The rdma_core library 29072eb4d010SOphir Munk * checks the variable at device creation and 29082eb4d010SOphir Munk * stores the result internally. 29092eb4d010SOphir Munk */ 29102eb4d010SOphir Munk dbmap_env = mlx5_config_doorbell_mapping_env(config); 29112eb4d010SOphir Munk /* Try to open IB device with DV first, then usual Verbs. */ 29122eb4d010SOphir Munk errno = 0; 2913834a9019SOphir Munk sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev); 29142eb4d010SOphir Munk if (sh->ctx) { 29152eb4d010SOphir Munk sh->devx = 1; 29162eb4d010SOphir Munk DRV_LOG(DEBUG, "DevX is supported"); 29172eb4d010SOphir Munk /* The device is created, no need for environment. */ 29182eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(dbmap_env); 29192eb4d010SOphir Munk } else { 29202eb4d010SOphir Munk /* The environment variable is still configured. */ 2921834a9019SOphir Munk sh->ctx = mlx5_glue->open_device(spawn->phys_dev); 29222eb4d010SOphir Munk err = errno ? errno : ENODEV; 29232eb4d010SOphir Munk /* 29242eb4d010SOphir Munk * The environment variable is not needed anymore, 29252eb4d010SOphir Munk * all device creation attempts are completed. 29262eb4d010SOphir Munk */ 29272eb4d010SOphir Munk mlx5_restore_doorbell_mapping_env(dbmap_env); 29282eb4d010SOphir Munk if (!sh->ctx) 29292eb4d010SOphir Munk return err; 29302eb4d010SOphir Munk DRV_LOG(DEBUG, "DevX is NOT supported"); 29312eb4d010SOphir Munk err = 0; 29322eb4d010SOphir Munk } 293381c3b977SViacheslav Ovsiienko if (!err && sh->ctx) { 293481c3b977SViacheslav Ovsiienko /* Hint libmlx5 to use PMD allocator for data plane resources */ 293581c3b977SViacheslav Ovsiienko mlx5_glue->dv_set_context_attr(sh->ctx, 293681c3b977SViacheslav Ovsiienko MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 293781c3b977SViacheslav Ovsiienko (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ 293881c3b977SViacheslav Ovsiienko .alloc = &mlx5_alloc_verbs_buf, 293981c3b977SViacheslav Ovsiienko .free = &mlx5_free_verbs_buf, 294081c3b977SViacheslav Ovsiienko .data = sh, 294181c3b977SViacheslav Ovsiienko })); 294281c3b977SViacheslav Ovsiienko } 29432eb4d010SOphir Munk return err; 29442eb4d010SOphir Munk } 29452eb4d010SOphir Munk 29462eb4d010SOphir Munk /** 29472eb4d010SOphir Munk * Install shared asynchronous device events handler. 29482eb4d010SOphir Munk * This function is implemented to support event sharing 29492eb4d010SOphir Munk * between multiple ports of single IB device. 29502eb4d010SOphir Munk * 29512eb4d010SOphir Munk * @param sh 29522eb4d010SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 29532eb4d010SOphir Munk */ 29542eb4d010SOphir Munk void 29552eb4d010SOphir Munk mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) 29562eb4d010SOphir Munk { 29572eb4d010SOphir Munk int ret; 29582eb4d010SOphir Munk int flags; 29592eb4d010SOphir Munk 29602eb4d010SOphir Munk sh->intr_handle.fd = -1; 29612eb4d010SOphir Munk flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL); 29622eb4d010SOphir Munk ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd, 29632eb4d010SOphir Munk F_SETFL, flags | O_NONBLOCK); 29642eb4d010SOphir Munk if (ret) { 29652eb4d010SOphir Munk DRV_LOG(INFO, "failed to change file descriptor async event" 29662eb4d010SOphir Munk " queue"); 29672eb4d010SOphir Munk } else { 29682eb4d010SOphir Munk sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd; 29692eb4d010SOphir Munk sh->intr_handle.type = RTE_INTR_HANDLE_EXT; 29702eb4d010SOphir Munk if (rte_intr_callback_register(&sh->intr_handle, 29712eb4d010SOphir Munk mlx5_dev_interrupt_handler, sh)) { 29722eb4d010SOphir Munk DRV_LOG(INFO, "Fail to install the shared interrupt."); 29732eb4d010SOphir Munk sh->intr_handle.fd = -1; 29742eb4d010SOphir Munk } 29752eb4d010SOphir Munk } 29762eb4d010SOphir Munk if (sh->devx) { 29772eb4d010SOphir Munk #ifdef HAVE_IBV_DEVX_ASYNC 29782eb4d010SOphir Munk sh->intr_handle_devx.fd = -1; 297921b7c452SOphir Munk sh->devx_comp = 298021b7c452SOphir Munk (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx); 298121b7c452SOphir Munk struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp; 298221b7c452SOphir Munk if (!devx_comp) { 29832eb4d010SOphir Munk DRV_LOG(INFO, "failed to allocate devx_comp."); 29842eb4d010SOphir Munk return; 29852eb4d010SOphir Munk } 298621b7c452SOphir Munk flags = fcntl(devx_comp->fd, F_GETFL); 298721b7c452SOphir Munk ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK); 29882eb4d010SOphir Munk if (ret) { 29892eb4d010SOphir Munk DRV_LOG(INFO, "failed to change file descriptor" 29902eb4d010SOphir Munk " devx comp"); 29912eb4d010SOphir Munk return; 29922eb4d010SOphir Munk } 299321b7c452SOphir Munk sh->intr_handle_devx.fd = devx_comp->fd; 29942eb4d010SOphir Munk sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; 29952eb4d010SOphir Munk if (rte_intr_callback_register(&sh->intr_handle_devx, 29962eb4d010SOphir Munk mlx5_dev_interrupt_handler_devx, sh)) { 29972eb4d010SOphir Munk DRV_LOG(INFO, "Fail to install the devx shared" 29982eb4d010SOphir Munk " interrupt."); 29992eb4d010SOphir Munk sh->intr_handle_devx.fd = -1; 30002eb4d010SOphir Munk } 30012eb4d010SOphir Munk #endif /* HAVE_IBV_DEVX_ASYNC */ 30022eb4d010SOphir Munk } 30032eb4d010SOphir Munk } 30042eb4d010SOphir Munk 30052eb4d010SOphir Munk /** 30062eb4d010SOphir Munk * Uninstall shared asynchronous device events handler. 30072eb4d010SOphir Munk * This function is implemented to support event sharing 30082eb4d010SOphir Munk * between multiple ports of single IB device. 30092eb4d010SOphir Munk * 30102eb4d010SOphir Munk * @param dev 30112eb4d010SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 30122eb4d010SOphir Munk */ 30132eb4d010SOphir Munk void 30142eb4d010SOphir Munk mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) 30152eb4d010SOphir Munk { 30162eb4d010SOphir Munk if (sh->intr_handle.fd >= 0) 30172eb4d010SOphir Munk mlx5_intr_callback_unregister(&sh->intr_handle, 30182eb4d010SOphir Munk mlx5_dev_interrupt_handler, sh); 30192eb4d010SOphir Munk #ifdef HAVE_IBV_DEVX_ASYNC 30202eb4d010SOphir Munk if (sh->intr_handle_devx.fd >= 0) 30212eb4d010SOphir Munk rte_intr_callback_unregister(&sh->intr_handle_devx, 30222eb4d010SOphir Munk mlx5_dev_interrupt_handler_devx, sh); 30232eb4d010SOphir Munk if (sh->devx_comp) 30242eb4d010SOphir Munk mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); 30252eb4d010SOphir Munk #endif 30262eb4d010SOphir Munk } 3027042f5c94SOphir Munk 302873bf9235SOphir Munk /** 302973bf9235SOphir Munk * Read statistics by a named counter. 303073bf9235SOphir Munk * 303173bf9235SOphir Munk * @param[in] priv 303273bf9235SOphir Munk * Pointer to the private device data structure. 303373bf9235SOphir Munk * @param[in] ctr_name 303473bf9235SOphir Munk * Pointer to the name of the statistic counter to read 303573bf9235SOphir Munk * @param[out] stat 303673bf9235SOphir Munk * Pointer to read statistic value. 303773bf9235SOphir Munk * @return 303873bf9235SOphir Munk * 0 on success and stat is valud, 1 if failed to read the value 303973bf9235SOphir Munk * rte_errno is set. 304073bf9235SOphir Munk * 304173bf9235SOphir Munk */ 304273bf9235SOphir Munk int 304373bf9235SOphir Munk mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, 304473bf9235SOphir Munk uint64_t *stat) 304573bf9235SOphir Munk { 304673bf9235SOphir Munk int fd; 304773bf9235SOphir Munk 304873bf9235SOphir Munk if (priv->sh) { 3049e6988afdSMatan Azrad if (priv->q_counters != NULL && 3050e6988afdSMatan Azrad strcmp(ctr_name, "out_of_buffer") == 0) 3051978a0303SViacheslav Ovsiienko return mlx5_devx_cmd_queue_counter_query 3052978a0303SViacheslav Ovsiienko (priv->q_counters, 0, (uint32_t *)stat); 305373bf9235SOphir Munk MKSTR(path, "%s/ports/%d/hw_counters/%s", 305473bf9235SOphir Munk priv->sh->ibdev_path, 305573bf9235SOphir Munk priv->dev_port, 305673bf9235SOphir Munk ctr_name); 305773bf9235SOphir Munk fd = open(path, O_RDONLY); 3058038e7fc0SShy Shyman /* 3059038e7fc0SShy Shyman * in switchdev the file location is not per port 3060038e7fc0SShy Shyman * but rather in <ibdev_path>/hw_counters/<file_name>. 3061038e7fc0SShy Shyman */ 3062038e7fc0SShy Shyman if (fd == -1) { 3063038e7fc0SShy Shyman MKSTR(path1, "%s/hw_counters/%s", 3064038e7fc0SShy Shyman priv->sh->ibdev_path, 3065038e7fc0SShy Shyman ctr_name); 3066038e7fc0SShy Shyman fd = open(path1, O_RDONLY); 3067038e7fc0SShy Shyman } 306873bf9235SOphir Munk if (fd != -1) { 306973bf9235SOphir Munk char buf[21] = {'\0'}; 307073bf9235SOphir Munk ssize_t n = read(fd, buf, sizeof(buf)); 307173bf9235SOphir Munk 307273bf9235SOphir Munk close(fd); 307373bf9235SOphir Munk if (n != -1) { 307473bf9235SOphir Munk *stat = strtoull(buf, NULL, 10); 307573bf9235SOphir Munk return 0; 307673bf9235SOphir Munk } 307773bf9235SOphir Munk } 307873bf9235SOphir Munk } 307973bf9235SOphir Munk *stat = 0; 308073bf9235SOphir Munk return 1; 308173bf9235SOphir Munk } 308273bf9235SOphir Munk 308373bf9235SOphir Munk /** 3084d5ed8aa9SOphir Munk * Set the reg_mr and dereg_mr call backs 3085d5ed8aa9SOphir Munk * 3086d5ed8aa9SOphir Munk * @param reg_mr_cb[out] 3087d5ed8aa9SOphir Munk * Pointer to reg_mr func 3088d5ed8aa9SOphir Munk * @param dereg_mr_cb[out] 3089d5ed8aa9SOphir Munk * Pointer to dereg_mr func 3090d5ed8aa9SOphir Munk * 3091d5ed8aa9SOphir Munk */ 3092d5ed8aa9SOphir Munk void 3093d5ed8aa9SOphir Munk mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, 3094d5ed8aa9SOphir Munk mlx5_dereg_mr_t *dereg_mr_cb) 3095d5ed8aa9SOphir Munk { 3096db12615bSOphir Munk *reg_mr_cb = mlx5_mr_verbs_ops.reg_mr; 3097db12615bSOphir Munk *dereg_mr_cb = mlx5_mr_verbs_ops.dereg_mr; 3098d5ed8aa9SOphir Munk } 3099d5ed8aa9SOphir Munk 3100ab27cdd9SOphir Munk /** 3101ab27cdd9SOphir Munk * Remove a MAC address from device 3102ab27cdd9SOphir Munk * 3103ab27cdd9SOphir Munk * @param dev 3104ab27cdd9SOphir Munk * Pointer to Ethernet device structure. 3105ab27cdd9SOphir Munk * @param index 3106ab27cdd9SOphir Munk * MAC address index. 3107ab27cdd9SOphir Munk */ 3108ab27cdd9SOphir Munk void 3109ab27cdd9SOphir Munk mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 3110ab27cdd9SOphir Munk { 3111ab27cdd9SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 3112ab27cdd9SOphir Munk const int vf = priv->config.vf; 3113ab27cdd9SOphir Munk 3114ab27cdd9SOphir Munk if (vf) 3115ab27cdd9SOphir Munk mlx5_nl_mac_addr_remove(priv->nl_socket_route, 3116ab27cdd9SOphir Munk mlx5_ifindex(dev), priv->mac_own, 3117ab27cdd9SOphir Munk &dev->data->mac_addrs[index], index); 3118ab27cdd9SOphir Munk } 3119ab27cdd9SOphir Munk 3120ab27cdd9SOphir Munk /** 3121ab27cdd9SOphir Munk * Adds a MAC address to the device 3122ab27cdd9SOphir Munk * 3123ab27cdd9SOphir Munk * @param dev 3124ab27cdd9SOphir Munk * Pointer to Ethernet device structure. 3125ab27cdd9SOphir Munk * @param mac_addr 3126ab27cdd9SOphir Munk * MAC address to register. 3127ab27cdd9SOphir Munk * @param index 3128ab27cdd9SOphir Munk * MAC address index. 3129ab27cdd9SOphir Munk * 3130ab27cdd9SOphir Munk * @return 3131ab27cdd9SOphir Munk * 0 on success, a negative errno value otherwise 3132ab27cdd9SOphir Munk */ 3133ab27cdd9SOphir Munk int 3134ab27cdd9SOphir Munk mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 3135ab27cdd9SOphir Munk uint32_t index) 3136ab27cdd9SOphir Munk { 3137ab27cdd9SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 3138ab27cdd9SOphir Munk const int vf = priv->config.vf; 3139ab27cdd9SOphir Munk int ret = 0; 3140ab27cdd9SOphir Munk 3141ab27cdd9SOphir Munk if (vf) 3142ab27cdd9SOphir Munk ret = mlx5_nl_mac_addr_add(priv->nl_socket_route, 3143ab27cdd9SOphir Munk mlx5_ifindex(dev), priv->mac_own, 3144ab27cdd9SOphir Munk mac, index); 3145ab27cdd9SOphir Munk return ret; 3146ab27cdd9SOphir Munk } 3147ab27cdd9SOphir Munk 3148ab27cdd9SOphir Munk /** 3149ab27cdd9SOphir Munk * Modify a VF MAC address 3150ab27cdd9SOphir Munk * 3151ab27cdd9SOphir Munk * @param priv 3152ab27cdd9SOphir Munk * Pointer to device private data. 3153ab27cdd9SOphir Munk * @param mac_addr 3154ab27cdd9SOphir Munk * MAC address to modify into. 3155ab27cdd9SOphir Munk * @param iface_idx 3156ab27cdd9SOphir Munk * Net device interface index 3157ab27cdd9SOphir Munk * @param vf_index 3158ab27cdd9SOphir Munk * VF index 3159ab27cdd9SOphir Munk * 3160ab27cdd9SOphir Munk * @return 3161ab27cdd9SOphir Munk * 0 on success, a negative errno value otherwise 3162ab27cdd9SOphir Munk */ 3163ab27cdd9SOphir Munk int 3164ab27cdd9SOphir Munk mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, 3165ab27cdd9SOphir Munk unsigned int iface_idx, 3166ab27cdd9SOphir Munk struct rte_ether_addr *mac_addr, 3167ab27cdd9SOphir Munk int vf_index) 3168ab27cdd9SOphir Munk { 3169ab27cdd9SOphir Munk return mlx5_nl_vf_mac_addr_modify 3170ab27cdd9SOphir Munk (priv->nl_socket_route, iface_idx, mac_addr, vf_index); 3171ab27cdd9SOphir Munk } 3172ab27cdd9SOphir Munk 31734d18abd1SOphir Munk /** 31744d18abd1SOphir Munk * Set device promiscuous mode 31754d18abd1SOphir Munk * 31764d18abd1SOphir Munk * @param dev 31774d18abd1SOphir Munk * Pointer to Ethernet device structure. 31784d18abd1SOphir Munk * @param enable 31794d18abd1SOphir Munk * 0 - promiscuous is disabled, otherwise - enabled 31804d18abd1SOphir Munk * 31814d18abd1SOphir Munk * @return 31824d18abd1SOphir Munk * 0 on success, a negative error value otherwise 31834d18abd1SOphir Munk */ 31844d18abd1SOphir Munk int 31854d18abd1SOphir Munk mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable) 31864d18abd1SOphir Munk { 31874d18abd1SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 31884d18abd1SOphir Munk 31894d18abd1SOphir Munk return mlx5_nl_promisc(priv->nl_socket_route, 31904d18abd1SOphir Munk mlx5_ifindex(dev), !!enable); 31914d18abd1SOphir Munk } 31924d18abd1SOphir Munk 31934d18abd1SOphir Munk /** 31944d18abd1SOphir Munk * Set device promiscuous mode 31954d18abd1SOphir Munk * 31964d18abd1SOphir Munk * @param dev 31974d18abd1SOphir Munk * Pointer to Ethernet device structure. 31984d18abd1SOphir Munk * @param enable 31994d18abd1SOphir Munk * 0 - all multicase is disabled, otherwise - enabled 32004d18abd1SOphir Munk * 32014d18abd1SOphir Munk * @return 32024d18abd1SOphir Munk * 0 on success, a negative error value otherwise 32034d18abd1SOphir Munk */ 32044d18abd1SOphir Munk int 32054d18abd1SOphir Munk mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable) 32064d18abd1SOphir Munk { 32074d18abd1SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 32084d18abd1SOphir Munk 32094d18abd1SOphir Munk return mlx5_nl_allmulti(priv->nl_socket_route, 32104d18abd1SOphir Munk mlx5_ifindex(dev), !!enable); 32114d18abd1SOphir Munk } 32124d18abd1SOphir Munk 3213f00f6562SOphir Munk /** 3214f00f6562SOphir Munk * Flush device MAC addresses 3215f00f6562SOphir Munk * 3216f00f6562SOphir Munk * @param dev 3217f00f6562SOphir Munk * Pointer to Ethernet device structure. 3218f00f6562SOphir Munk * 3219f00f6562SOphir Munk */ 3220f00f6562SOphir Munk void 3221f00f6562SOphir Munk mlx5_os_mac_addr_flush(struct rte_eth_dev *dev) 3222f00f6562SOphir Munk { 3223f00f6562SOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 3224f00f6562SOphir Munk 3225f00f6562SOphir Munk mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), 3226f00f6562SOphir Munk dev->data->mac_addrs, 3227f00f6562SOphir Munk MLX5_MAX_MAC_ADDRESSES, priv->mac_own); 3228f00f6562SOphir Munk } 3229