18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2771fa900SAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4771fa900SAdrien Mazarguil */ 5771fa900SAdrien Mazarguil 6771fa900SAdrien Mazarguil #include <stddef.h> 7771fa900SAdrien Mazarguil #include <unistd.h> 8771fa900SAdrien Mazarguil #include <string.h> 9771fa900SAdrien Mazarguil #include <stdint.h> 10771fa900SAdrien Mazarguil #include <stdlib.h> 11e72dd09bSNélio Laranjeiro #include <errno.h> 12771fa900SAdrien Mazarguil #include <net/if.h> 134a984153SXueming Li #include <sys/mman.h> 14ccdcba53SNélio Laranjeiro #include <linux/rtnetlink.h> 15771fa900SAdrien Mazarguil 16771fa900SAdrien Mazarguil #include <rte_malloc.h> 17ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 18fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 19771fa900SAdrien Mazarguil #include <rte_pci.h> 20c752998bSGaetan Rivet #include <rte_bus_pci.h> 21771fa900SAdrien Mazarguil #include <rte_common.h> 22e72dd09bSNélio Laranjeiro #include <rte_kvargs.h> 23e89c15b6SAdrien Mazarguil #include <rte_rwlock.h> 24e89c15b6SAdrien Mazarguil #include <rte_spinlock.h> 25f38c5457SAdrien Mazarguil #include <rte_string_fns.h> 26f15db67dSMatan Azrad #include <rte_alarm.h> 27771fa900SAdrien Mazarguil 287b4f1e6bSMatan Azrad #include <mlx5_glue.h> 297b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h> 3093e30982SMatan Azrad #include <mlx5_common.h> 31391b8bccSOphir Munk #include <mlx5_common_os.h> 32a4de9586SVu Pham #include <mlx5_common_mp.h> 3383c2047cSSuanming Mou #include <mlx5_malloc.h> 347b4f1e6bSMatan Azrad 357b4f1e6bSMatan Azrad #include "mlx5_defs.h" 36771fa900SAdrien Mazarguil #include "mlx5.h" 37771fa900SAdrien Mazarguil #include "mlx5_utils.h" 382e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 39771fa900SAdrien Mazarguil #include "mlx5_autoconf.h" 40974f1e7eSYongseok Koh #include "mlx5_mr.h" 4184c406e7SOri Kam #include "mlx5_flow.h" 42efa79e68SOri Kam #include "rte_pmd_mlx5.h" 43771fa900SAdrien Mazarguil 4499c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */ 4599c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 4699c12dccSNélio Laranjeiro 47bc91e8dbSYongseok Koh /* Device parameter to enable RX completion entry padding to 128B. */ 48bc91e8dbSYongseok Koh #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" 49bc91e8dbSYongseok Koh 5078c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */ 5178c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 5278c7a16dSYongseok Koh 537d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */ 547d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en" 557d6bf6b8SYongseok Koh 567d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 577d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 587d6bf6b8SYongseok Koh 59ecb16045SAlexander Kozyrev /* Device parameter to configure log 2 of the stride size for MPRQ. */ 60ecb16045SAlexander Kozyrev #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size" 61ecb16045SAlexander Kozyrev 627d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 637d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 647d6bf6b8SYongseok Koh 657d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 667d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 677d6bf6b8SYongseok Koh 68a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/ 692a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline" 702a66cf37SYaacov Hazan 71505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */ 72505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 73505f1fe4SViacheslav Ovsiienko 74505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */ 75505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 76505f1fe4SViacheslav Ovsiienko 77505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */ 78505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 79505f1fe4SViacheslav Ovsiienko 802a66cf37SYaacov Hazan /* 812a66cf37SYaacov Hazan * Device parameter to configure the number of TX queues threshold for 822a66cf37SYaacov Hazan * enabling inline send. 832a66cf37SYaacov Hazan */ 842a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 852a66cf37SYaacov Hazan 8609d8b416SYongseok Koh /* 8709d8b416SYongseok Koh * Device parameter to configure the number of TX queues threshold for 88a6bd4911SViacheslav Ovsiienko * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 8909d8b416SYongseok Koh */ 9009d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 9109d8b416SYongseok Koh 92230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */ 93230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en" 94230189d9SNélio Laranjeiro 95a6bd4911SViacheslav Ovsiienko /* 968409a285SViacheslav Ovsiienko * Device parameter to force doorbell register mapping 978409a285SViacheslav Ovsiienko * to non-cahed region eliminating the extra write memory barrier. 988409a285SViacheslav Ovsiienko */ 998409a285SViacheslav Ovsiienko #define MLX5_TX_DB_NC "tx_db_nc" 1008409a285SViacheslav Ovsiienko 1018409a285SViacheslav Ovsiienko /* 102a6bd4911SViacheslav Ovsiienko * Device parameter to include 2 dsegs in the title WQEBB. 103a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 104a6bd4911SViacheslav Ovsiienko */ 1056ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 1066ce84bd8SYongseok Koh 107a6bd4911SViacheslav Ovsiienko /* 108a6bd4911SViacheslav Ovsiienko * Device parameter to limit the size of inlining packet. 109a6bd4911SViacheslav Ovsiienko * Deprecated, ignored. 110a6bd4911SViacheslav Ovsiienko */ 1116ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 1126ce84bd8SYongseok Koh 113a6bd4911SViacheslav Ovsiienko /* 1148f848f32SViacheslav Ovsiienko * Device parameter to enable Tx scheduling on timestamps 1158f848f32SViacheslav Ovsiienko * and specify the packet pacing granularity in nanoseconds. 1168f848f32SViacheslav Ovsiienko */ 1178f848f32SViacheslav Ovsiienko #define MLX5_TX_PP "tx_pp" 1188f848f32SViacheslav Ovsiienko 1198f848f32SViacheslav Ovsiienko /* 1208f848f32SViacheslav Ovsiienko * Device parameter to specify skew in nanoseconds on Tx datapath, 1218f848f32SViacheslav Ovsiienko * it represents the time between SQ start WQE processing and 1228f848f32SViacheslav Ovsiienko * appearing actual packet data on the wire. 1238f848f32SViacheslav Ovsiienko */ 1248f848f32SViacheslav Ovsiienko #define MLX5_TX_SKEW "tx_skew" 1258f848f32SViacheslav Ovsiienko 1268f848f32SViacheslav Ovsiienko /* 127a6bd4911SViacheslav Ovsiienko * Device parameter to enable hardware Tx vector. 128a6bd4911SViacheslav Ovsiienko * Deprecated, ignored (no vectorized Tx routines anymore). 129a6bd4911SViacheslav Ovsiienko */ 1305644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en" 1315644d5b9SNelio Laranjeiro 1325644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */ 1335644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en" 1345644d5b9SNelio Laranjeiro 13578a54648SXueming Li /* Allow L3 VXLAN flow creation. */ 13678a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 13778a54648SXueming Li 138e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */ 139e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en" 140e2b4925eSOri Kam 14151e72d38SOri Kam /* Activate DV flow steering. */ 14251e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en" 14351e72d38SOri Kam 1442d241515SViacheslav Ovsiienko /* Enable extensive flow metadata support. */ 1452d241515SViacheslav Ovsiienko #define MLX5_DV_XMETA_EN "dv_xmeta_en" 1462d241515SViacheslav Ovsiienko 1470f0ae73aSShiri Kuzin /* Device parameter to let the user manage the lacp traffic of bonded device */ 1480f0ae73aSShiri Kuzin #define MLX5_LACP_BY_USER "lacp_by_user" 1490f0ae73aSShiri Kuzin 150db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */ 151db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en" 152db209cc3SNélio Laranjeiro 153dceb5029SYongseok Koh /* Enable extending memsegs when creating a MR. */ 154dceb5029SYongseok Koh #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 155dceb5029SYongseok Koh 1566de569f5SAdrien Mazarguil /* Select port representors to instantiate. */ 1576de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor" 1586de569f5SAdrien Mazarguil 159066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */ 160066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 161066cfecdSMatan Azrad 16221bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */ 16321bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 16421bb6c7eSDekel Peled 1651ad9a3d0SBing Zhao /* 1661ad9a3d0SBing Zhao * Device parameter to configure the total data buffer size for a single 1671ad9a3d0SBing Zhao * hairpin queue (logarithm value). 1681ad9a3d0SBing Zhao */ 1691ad9a3d0SBing Zhao #define MLX5_HP_BUF_SIZE "hp_buf_log_sz" 1701ad9a3d0SBing Zhao 171a1da6f62SSuanming Mou /* Flow memory reclaim mode. */ 172a1da6f62SSuanming Mou #define MLX5_RECLAIM_MEM "reclaim_mem_mode" 173a1da6f62SSuanming Mou 1745522da6bSSuanming Mou /* The default memory allocator used in PMD. */ 1755522da6bSSuanming Mou #define MLX5_SYS_MEM_EN "sys_mem_en" 17650f95b23SSuanming Mou /* Decap will be used or not. */ 17750f95b23SSuanming Mou #define MLX5_DECAP_EN "decap_en" 1785522da6bSSuanming Mou 179974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */ 180974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data; 181974f1e7eSYongseok Koh 1822e86c4e5SOphir Munk /** Driver-specific log messages type. */ 1832e86c4e5SOphir Munk int mlx5_logtype; 184a170a30dSNélio Laranjeiro 18591389890SOphir Munk static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = 18691389890SOphir Munk LIST_HEAD_INITIALIZER(); 18791389890SOphir Munk static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER; 18817e19bc4SViacheslav Ovsiienko 1895c761238SGregory Etelson static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { 190b88341caSSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT 191014d1cbeSSuanming Mou { 192014d1cbeSSuanming Mou .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), 193014d1cbeSSuanming Mou .trunk_size = 64, 194014d1cbeSSuanming Mou .grow_trunk = 3, 195014d1cbeSSuanming Mou .grow_shift = 2, 196014d1cbeSSuanming Mou .need_lock = 0, 197014d1cbeSSuanming Mou .release_mem_en = 1, 19883c2047cSSuanming Mou .malloc = mlx5_malloc, 19983c2047cSSuanming Mou .free = mlx5_free, 200014d1cbeSSuanming Mou .type = "mlx5_encap_decap_ipool", 201014d1cbeSSuanming Mou }, 2028acf8ac9SSuanming Mou { 2038acf8ac9SSuanming Mou .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), 2048acf8ac9SSuanming Mou .trunk_size = 64, 2058acf8ac9SSuanming Mou .grow_trunk = 3, 2068acf8ac9SSuanming Mou .grow_shift = 2, 2078acf8ac9SSuanming Mou .need_lock = 0, 2088acf8ac9SSuanming Mou .release_mem_en = 1, 20983c2047cSSuanming Mou .malloc = mlx5_malloc, 21083c2047cSSuanming Mou .free = mlx5_free, 2118acf8ac9SSuanming Mou .type = "mlx5_push_vlan_ipool", 2128acf8ac9SSuanming Mou }, 2135f114269SSuanming Mou { 2145f114269SSuanming Mou .size = sizeof(struct mlx5_flow_dv_tag_resource), 2155f114269SSuanming Mou .trunk_size = 64, 2165f114269SSuanming Mou .grow_trunk = 3, 2175f114269SSuanming Mou .grow_shift = 2, 2185f114269SSuanming Mou .need_lock = 0, 2195f114269SSuanming Mou .release_mem_en = 1, 22083c2047cSSuanming Mou .malloc = mlx5_malloc, 22183c2047cSSuanming Mou .free = mlx5_free, 2225f114269SSuanming Mou .type = "mlx5_tag_ipool", 2235f114269SSuanming Mou }, 224f3faf9eaSSuanming Mou { 225f3faf9eaSSuanming Mou .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), 226f3faf9eaSSuanming Mou .trunk_size = 64, 227f3faf9eaSSuanming Mou .grow_trunk = 3, 228f3faf9eaSSuanming Mou .grow_shift = 2, 229f3faf9eaSSuanming Mou .need_lock = 0, 230f3faf9eaSSuanming Mou .release_mem_en = 1, 23183c2047cSSuanming Mou .malloc = mlx5_malloc, 23283c2047cSSuanming Mou .free = mlx5_free, 233f3faf9eaSSuanming Mou .type = "mlx5_port_id_ipool", 234f3faf9eaSSuanming Mou }, 2357ac99475SSuanming Mou { 2367ac99475SSuanming Mou .size = sizeof(struct mlx5_flow_tbl_data_entry), 2377ac99475SSuanming Mou .trunk_size = 64, 2387ac99475SSuanming Mou .grow_trunk = 3, 2397ac99475SSuanming Mou .grow_shift = 2, 2407ac99475SSuanming Mou .need_lock = 0, 2417ac99475SSuanming Mou .release_mem_en = 1, 24283c2047cSSuanming Mou .malloc = mlx5_malloc, 24383c2047cSSuanming Mou .free = mlx5_free, 2447ac99475SSuanming Mou .type = "mlx5_jump_ipool", 2457ac99475SSuanming Mou }, 246b88341caSSuanming Mou #endif 247772dc0ebSSuanming Mou { 2488638e2b0SSuanming Mou .size = sizeof(struct mlx5_flow_meter), 2498638e2b0SSuanming Mou .trunk_size = 64, 2508638e2b0SSuanming Mou .grow_trunk = 3, 2518638e2b0SSuanming Mou .grow_shift = 2, 2528638e2b0SSuanming Mou .need_lock = 0, 2538638e2b0SSuanming Mou .release_mem_en = 1, 25483c2047cSSuanming Mou .malloc = mlx5_malloc, 25583c2047cSSuanming Mou .free = mlx5_free, 2568638e2b0SSuanming Mou .type = "mlx5_meter_ipool", 2578638e2b0SSuanming Mou }, 2588638e2b0SSuanming Mou { 25990e6053aSSuanming Mou .size = sizeof(struct mlx5_flow_mreg_copy_resource), 26090e6053aSSuanming Mou .trunk_size = 64, 26190e6053aSSuanming Mou .grow_trunk = 3, 26290e6053aSSuanming Mou .grow_shift = 2, 26390e6053aSSuanming Mou .need_lock = 0, 26490e6053aSSuanming Mou .release_mem_en = 1, 26583c2047cSSuanming Mou .malloc = mlx5_malloc, 26683c2047cSSuanming Mou .free = mlx5_free, 26790e6053aSSuanming Mou .type = "mlx5_mcp_ipool", 26890e6053aSSuanming Mou }, 26990e6053aSSuanming Mou { 270772dc0ebSSuanming Mou .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), 271772dc0ebSSuanming Mou .trunk_size = 64, 272772dc0ebSSuanming Mou .grow_trunk = 3, 273772dc0ebSSuanming Mou .grow_shift = 2, 274772dc0ebSSuanming Mou .need_lock = 0, 275772dc0ebSSuanming Mou .release_mem_en = 1, 27683c2047cSSuanming Mou .malloc = mlx5_malloc, 27783c2047cSSuanming Mou .free = mlx5_free, 278772dc0ebSSuanming Mou .type = "mlx5_hrxq_ipool", 279772dc0ebSSuanming Mou }, 280b88341caSSuanming Mou { 2815c761238SGregory Etelson /* 2825c761238SGregory Etelson * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. 2835c761238SGregory Etelson * It set in run time according to PCI function configuration. 2845c761238SGregory Etelson */ 2855c761238SGregory Etelson .size = 0, 286b88341caSSuanming Mou .trunk_size = 64, 287b88341caSSuanming Mou .grow_trunk = 3, 288b88341caSSuanming Mou .grow_shift = 2, 289b88341caSSuanming Mou .need_lock = 0, 290b88341caSSuanming Mou .release_mem_en = 1, 29183c2047cSSuanming Mou .malloc = mlx5_malloc, 29283c2047cSSuanming Mou .free = mlx5_free, 293b88341caSSuanming Mou .type = "mlx5_flow_handle_ipool", 294b88341caSSuanming Mou }, 295ab612adcSSuanming Mou { 296ab612adcSSuanming Mou .size = sizeof(struct rte_flow), 297ab612adcSSuanming Mou .trunk_size = 4096, 298ab612adcSSuanming Mou .need_lock = 1, 299ab612adcSSuanming Mou .release_mem_en = 1, 30083c2047cSSuanming Mou .malloc = mlx5_malloc, 30183c2047cSSuanming Mou .free = mlx5_free, 302ab612adcSSuanming Mou .type = "rte_flow_ipool", 303ab612adcSSuanming Mou }, 304014d1cbeSSuanming Mou }; 305014d1cbeSSuanming Mou 306014d1cbeSSuanming Mou 307830d2091SOri Kam #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 308830d2091SOri Kam #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 309830d2091SOri Kam 310860897d2SBing Zhao #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 311860897d2SBing Zhao 312830d2091SOri Kam /** 313830d2091SOri Kam * Allocate ID pool structure. 314830d2091SOri Kam * 31530a3687dSSuanming Mou * @param[in] max_id 31630a3687dSSuanming Mou * The maximum id can be allocated from the pool. 31730a3687dSSuanming Mou * 318830d2091SOri Kam * @return 319830d2091SOri Kam * Pointer to pool object, NULL value otherwise. 320830d2091SOri Kam */ 321830d2091SOri Kam struct mlx5_flow_id_pool * 32230a3687dSSuanming Mou mlx5_flow_id_pool_alloc(uint32_t max_id) 323830d2091SOri Kam { 324830d2091SOri Kam struct mlx5_flow_id_pool *pool; 325830d2091SOri Kam void *mem; 326830d2091SOri Kam 32783c2047cSSuanming Mou pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 32883c2047cSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 329830d2091SOri Kam if (!pool) { 330830d2091SOri Kam DRV_LOG(ERR, "can't allocate id pool"); 331830d2091SOri Kam rte_errno = ENOMEM; 332830d2091SOri Kam return NULL; 333830d2091SOri Kam } 33483c2047cSSuanming Mou mem = mlx5_malloc(MLX5_MEM_ZERO, 33583c2047cSSuanming Mou MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), 33683c2047cSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 337830d2091SOri Kam if (!mem) { 338830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 339830d2091SOri Kam rte_errno = ENOMEM; 340830d2091SOri Kam goto error; 341830d2091SOri Kam } 342830d2091SOri Kam pool->free_arr = mem; 343830d2091SOri Kam pool->curr = pool->free_arr; 344830d2091SOri Kam pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; 345830d2091SOri Kam pool->base_index = 0; 34630a3687dSSuanming Mou pool->max_id = max_id; 347830d2091SOri Kam return pool; 348830d2091SOri Kam error: 34983c2047cSSuanming Mou mlx5_free(pool); 350830d2091SOri Kam return NULL; 351830d2091SOri Kam } 352830d2091SOri Kam 353830d2091SOri Kam /** 354830d2091SOri Kam * Release ID pool structure. 355830d2091SOri Kam * 356830d2091SOri Kam * @param[in] pool 357830d2091SOri Kam * Pointer to flow id pool object to free. 358830d2091SOri Kam */ 359830d2091SOri Kam void 360830d2091SOri Kam mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) 361830d2091SOri Kam { 36283c2047cSSuanming Mou mlx5_free(pool->free_arr); 36383c2047cSSuanming Mou mlx5_free(pool); 364830d2091SOri Kam } 365830d2091SOri Kam 366830d2091SOri Kam /** 367830d2091SOri Kam * Generate ID. 368830d2091SOri Kam * 369830d2091SOri Kam * @param[in] pool 370830d2091SOri Kam * Pointer to flow id pool. 371830d2091SOri Kam * @param[out] id 372830d2091SOri Kam * The generated ID. 373830d2091SOri Kam * 374830d2091SOri Kam * @return 375830d2091SOri Kam * 0 on success, error value otherwise. 376830d2091SOri Kam */ 377830d2091SOri Kam uint32_t 378830d2091SOri Kam mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) 379830d2091SOri Kam { 380830d2091SOri Kam if (pool->curr == pool->free_arr) { 38130a3687dSSuanming Mou if (pool->base_index == pool->max_id) { 382830d2091SOri Kam rte_errno = ENOMEM; 383830d2091SOri Kam DRV_LOG(ERR, "no free id"); 384830d2091SOri Kam return -rte_errno; 385830d2091SOri Kam } 386830d2091SOri Kam *id = ++pool->base_index; 387830d2091SOri Kam return 0; 388830d2091SOri Kam } 389830d2091SOri Kam *id = *(--pool->curr); 390830d2091SOri Kam return 0; 391830d2091SOri Kam } 392830d2091SOri Kam 393830d2091SOri Kam /** 394830d2091SOri Kam * Release ID. 395830d2091SOri Kam * 396830d2091SOri Kam * @param[in] pool 397830d2091SOri Kam * Pointer to flow id pool. 398830d2091SOri Kam * @param[out] id 399830d2091SOri Kam * The generated ID. 400830d2091SOri Kam * 401830d2091SOri Kam * @return 402830d2091SOri Kam * 0 on success, error value otherwise. 403830d2091SOri Kam */ 404830d2091SOri Kam uint32_t 405830d2091SOri Kam mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) 406830d2091SOri Kam { 407830d2091SOri Kam uint32_t size; 408830d2091SOri Kam uint32_t size2; 409830d2091SOri Kam void *mem; 410830d2091SOri Kam 411830d2091SOri Kam if (pool->curr == pool->last) { 412830d2091SOri Kam size = pool->curr - pool->free_arr; 413830d2091SOri Kam size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; 4148e46d4e1SAlexander Kozyrev MLX5_ASSERT(size2 > size); 41583c2047cSSuanming Mou mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0, 41683c2047cSSuanming Mou SOCKET_ID_ANY); 417830d2091SOri Kam if (!mem) { 418830d2091SOri Kam DRV_LOG(ERR, "can't allocate mem for id pool"); 419830d2091SOri Kam rte_errno = ENOMEM; 420830d2091SOri Kam return -rte_errno; 421830d2091SOri Kam } 422830d2091SOri Kam memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); 42383c2047cSSuanming Mou mlx5_free(pool->free_arr); 424830d2091SOri Kam pool->free_arr = mem; 425830d2091SOri Kam pool->curr = pool->free_arr + size; 426830d2091SOri Kam pool->last = pool->free_arr + size2; 427830d2091SOri Kam } 428830d2091SOri Kam *pool->curr = id; 429830d2091SOri Kam pool->curr++; 430830d2091SOri Kam return 0; 431830d2091SOri Kam } 432830d2091SOri Kam 43317e19bc4SViacheslav Ovsiienko /** 434fa2d01c8SDong Zhou * Initialize the shared aging list information per port. 435fa2d01c8SDong Zhou * 436fa2d01c8SDong Zhou * @param[in] sh 4376e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 438fa2d01c8SDong Zhou */ 439fa2d01c8SDong Zhou static void 4406e88bc42SOphir Munk mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) 441fa2d01c8SDong Zhou { 442fa2d01c8SDong Zhou uint32_t i; 443fa2d01c8SDong Zhou struct mlx5_age_info *age_info; 444fa2d01c8SDong Zhou 445fa2d01c8SDong Zhou for (i = 0; i < sh->max_port; i++) { 446fa2d01c8SDong Zhou age_info = &sh->port[i].age_info; 447fa2d01c8SDong Zhou age_info->flags = 0; 448fa2d01c8SDong Zhou TAILQ_INIT(&age_info->aged_counters); 449fa2d01c8SDong Zhou rte_spinlock_init(&age_info->aged_sl); 450fa2d01c8SDong Zhou MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 451fa2d01c8SDong Zhou } 452fa2d01c8SDong Zhou } 453fa2d01c8SDong Zhou 454fa2d01c8SDong Zhou /** 4555382d28cSMatan Azrad * Initialize the counters management structure. 4565382d28cSMatan Azrad * 4575382d28cSMatan Azrad * @param[in] sh 4586e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 4595382d28cSMatan Azrad */ 4605382d28cSMatan Azrad static void 4616e88bc42SOphir Munk mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) 4625382d28cSMatan Azrad { 4635af61440SMatan Azrad int i; 4645382d28cSMatan Azrad 4655af61440SMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 4665382d28cSMatan Azrad TAILQ_INIT(&sh->cmng.flow_counters); 4675af61440SMatan Azrad for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { 468b1cc2266SSuanming Mou sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET; 469b1cc2266SSuanming Mou sh->cmng.ccont[i].max_id = -1; 470b1cc2266SSuanming Mou sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID; 4715af61440SMatan Azrad TAILQ_INIT(&sh->cmng.ccont[i].pool_list); 4725af61440SMatan Azrad rte_spinlock_init(&sh->cmng.ccont[i].resize_sl); 473ac79183dSSuanming Mou TAILQ_INIT(&sh->cmng.ccont[i].counters); 474ac79183dSSuanming Mou rte_spinlock_init(&sh->cmng.ccont[i].csl); 475fa2d01c8SDong Zhou } 4765382d28cSMatan Azrad } 4775382d28cSMatan Azrad 4785382d28cSMatan Azrad /** 4795382d28cSMatan Azrad * Destroy all the resources allocated for a counter memory management. 4805382d28cSMatan Azrad * 4815382d28cSMatan Azrad * @param[in] mng 4825382d28cSMatan Azrad * Pointer to the memory management structure. 4835382d28cSMatan Azrad */ 4845382d28cSMatan Azrad static void 4855382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 4865382d28cSMatan Azrad { 4875382d28cSMatan Azrad uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 4885382d28cSMatan Azrad 4895382d28cSMatan Azrad LIST_REMOVE(mng, next); 4905382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 4915382d28cSMatan Azrad claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); 49283c2047cSSuanming Mou mlx5_free(mem); 4935382d28cSMatan Azrad } 4945382d28cSMatan Azrad 4955382d28cSMatan Azrad /** 4965382d28cSMatan Azrad * Close and release all the resources of the counters management. 4975382d28cSMatan Azrad * 4985382d28cSMatan Azrad * @param[in] sh 4996e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free. 5005382d28cSMatan Azrad */ 5015382d28cSMatan Azrad static void 5026e88bc42SOphir Munk mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) 5035382d28cSMatan Azrad { 5045382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng *mng; 5055af61440SMatan Azrad int i; 5065382d28cSMatan Azrad int j; 507f15db67dSMatan Azrad int retries = 1024; 5085382d28cSMatan Azrad 509f15db67dSMatan Azrad rte_errno = 0; 510f15db67dSMatan Azrad while (--retries) { 511f15db67dSMatan Azrad rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 512f15db67dSMatan Azrad if (rte_errno != EINPROGRESS) 513f15db67dSMatan Azrad break; 514f15db67dSMatan Azrad rte_pause(); 515f15db67dSMatan Azrad } 5165af61440SMatan Azrad for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { 5175382d28cSMatan Azrad struct mlx5_flow_counter_pool *pool; 5185af61440SMatan Azrad uint32_t batch = !!(i > 1); 5195382d28cSMatan Azrad 5205af61440SMatan Azrad if (!sh->cmng.ccont[i].pools) 5215382d28cSMatan Azrad continue; 5225af61440SMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 5235382d28cSMatan Azrad while (pool) { 5245af61440SMatan Azrad if (batch && pool->min_dcs) 5255af61440SMatan Azrad claim_zero(mlx5_devx_cmd_destroy 526fa2d01c8SDong Zhou (pool->min_dcs)); 5275382d28cSMatan Azrad for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 5288d93c830SDong Zhou if (MLX5_POOL_GET_CNT(pool, j)->action) 5295382d28cSMatan Azrad claim_zero 5305382d28cSMatan Azrad (mlx5_glue->destroy_flow_action 531fa2d01c8SDong Zhou (MLX5_POOL_GET_CNT 532fa2d01c8SDong Zhou (pool, j)->action)); 533826b8a87SSuanming Mou if (!batch && MLX5_GET_POOL_CNT_EXT 534826b8a87SSuanming Mou (pool, j)->dcs) 5355382d28cSMatan Azrad claim_zero(mlx5_devx_cmd_destroy 536826b8a87SSuanming Mou (MLX5_GET_POOL_CNT_EXT 537826b8a87SSuanming Mou (pool, j)->dcs)); 5385382d28cSMatan Azrad } 5395af61440SMatan Azrad TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next); 54083c2047cSSuanming Mou mlx5_free(pool); 5415af61440SMatan Azrad pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); 5425382d28cSMatan Azrad } 54383c2047cSSuanming Mou mlx5_free(sh->cmng.ccont[i].pools); 5445382d28cSMatan Azrad } 5455382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 5465382d28cSMatan Azrad while (mng) { 5475382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(mng); 5485382d28cSMatan Azrad mng = LIST_FIRST(&sh->cmng.mem_mngs); 5495382d28cSMatan Azrad } 5505382d28cSMatan Azrad memset(&sh->cmng, 0, sizeof(sh->cmng)); 5515382d28cSMatan Azrad } 5525382d28cSMatan Azrad 5535382d28cSMatan Azrad /** 554014d1cbeSSuanming Mou * Initialize the flow resources' indexed mempool. 555014d1cbeSSuanming Mou * 556014d1cbeSSuanming Mou * @param[in] sh 5576e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 558b88341caSSuanming Mou * @param[in] sh 559b88341caSSuanming Mou * Pointer to user dev config. 560014d1cbeSSuanming Mou */ 561014d1cbeSSuanming Mou static void 5626e88bc42SOphir Munk mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh, 5635c761238SGregory Etelson const struct mlx5_dev_config *config) 564014d1cbeSSuanming Mou { 565014d1cbeSSuanming Mou uint8_t i; 5665c761238SGregory Etelson struct mlx5_indexed_pool_config cfg; 567014d1cbeSSuanming Mou 568a1da6f62SSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) { 5695c761238SGregory Etelson cfg = mlx5_ipool_cfg[i]; 5705c761238SGregory Etelson switch (i) { 5715c761238SGregory Etelson default: 5725c761238SGregory Etelson break; 5735c761238SGregory Etelson /* 5745c761238SGregory Etelson * Set MLX5_IPOOL_MLX5_FLOW ipool size 5755c761238SGregory Etelson * according to PCI function flow configuration. 5765c761238SGregory Etelson */ 5775c761238SGregory Etelson case MLX5_IPOOL_MLX5_FLOW: 5785c761238SGregory Etelson cfg.size = config->dv_flow_en ? 5795c761238SGregory Etelson sizeof(struct mlx5_flow_handle) : 5805c761238SGregory Etelson MLX5_FLOW_HANDLE_VERBS_SIZE; 5815c761238SGregory Etelson break; 5825c761238SGregory Etelson } 583a1da6f62SSuanming Mou if (config->reclaim_mode) 5845c761238SGregory Etelson cfg.release_mem_en = 1; 5855c761238SGregory Etelson sh->ipool[i] = mlx5_ipool_create(&cfg); 586014d1cbeSSuanming Mou } 587a1da6f62SSuanming Mou } 588014d1cbeSSuanming Mou 589014d1cbeSSuanming Mou /** 590014d1cbeSSuanming Mou * Release the flow resources' indexed mempool. 591014d1cbeSSuanming Mou * 592014d1cbeSSuanming Mou * @param[in] sh 5936e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object. 594014d1cbeSSuanming Mou */ 595014d1cbeSSuanming Mou static void 5966e88bc42SOphir Munk mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh) 597014d1cbeSSuanming Mou { 598014d1cbeSSuanming Mou uint8_t i; 599014d1cbeSSuanming Mou 600014d1cbeSSuanming Mou for (i = 0; i < MLX5_IPOOL_MAX; ++i) 601014d1cbeSSuanming Mou mlx5_ipool_destroy(sh->ipool[i]); 602014d1cbeSSuanming Mou } 603014d1cbeSSuanming Mou 604daa38a89SBing Zhao /* 605daa38a89SBing Zhao * Check if dynamic flex parser for eCPRI already exists. 606daa38a89SBing Zhao * 607daa38a89SBing Zhao * @param dev 608daa38a89SBing Zhao * Pointer to Ethernet device structure. 609daa38a89SBing Zhao * 610daa38a89SBing Zhao * @return 611daa38a89SBing Zhao * true on exists, false on not. 612daa38a89SBing Zhao */ 613daa38a89SBing Zhao bool 614daa38a89SBing Zhao mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev) 615daa38a89SBing Zhao { 616daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 617daa38a89SBing Zhao struct mlx5_flex_parser_profiles *prf = 618daa38a89SBing Zhao &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 619daa38a89SBing Zhao 620daa38a89SBing Zhao return !!prf->obj; 621daa38a89SBing Zhao } 622daa38a89SBing Zhao 623daa38a89SBing Zhao /* 624daa38a89SBing Zhao * Allocation of a flex parser for eCPRI. Once created, this parser related 625daa38a89SBing Zhao * resources will be held until the device is closed. 626daa38a89SBing Zhao * 627daa38a89SBing Zhao * @param dev 628daa38a89SBing Zhao * Pointer to Ethernet device structure. 629daa38a89SBing Zhao * 630daa38a89SBing Zhao * @return 631daa38a89SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 632daa38a89SBing Zhao */ 633daa38a89SBing Zhao int 634daa38a89SBing Zhao mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev) 635daa38a89SBing Zhao { 636daa38a89SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 637daa38a89SBing Zhao struct mlx5_flex_parser_profiles *prf = 638daa38a89SBing Zhao &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 6391c506404SBing Zhao struct mlx5_devx_graph_node_attr node = { 6401c506404SBing Zhao .modify_field_select = 0, 6411c506404SBing Zhao }; 6421c506404SBing Zhao uint32_t ids[8]; 6431c506404SBing Zhao int ret; 644daa38a89SBing Zhao 645d7c49561SBing Zhao if (!priv->config.hca_attr.parse_graph_flex_node) { 646d7c49561SBing Zhao DRV_LOG(ERR, "Dynamic flex parser is not supported " 647d7c49561SBing Zhao "for device %s.", priv->dev_data->name); 648d7c49561SBing Zhao return -ENOTSUP; 649d7c49561SBing Zhao } 6501c506404SBing Zhao node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; 6511c506404SBing Zhao /* 8 bytes now: 4B common header + 4B message body header. */ 6521c506404SBing Zhao node.header_length_base_value = 0x8; 6531c506404SBing Zhao /* After MAC layer: Ether / VLAN. */ 6541c506404SBing Zhao node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC; 6551c506404SBing Zhao /* Type of compared condition should be 0xAEFE in the L2 layer. */ 6561c506404SBing Zhao node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI; 6571c506404SBing Zhao /* Sample #0: type in common header. */ 6581c506404SBing Zhao node.sample[0].flow_match_sample_en = 1; 6591c506404SBing Zhao /* Fixed offset. */ 6601c506404SBing Zhao node.sample[0].flow_match_sample_offset_mode = 0x0; 6611c506404SBing Zhao /* Only the 2nd byte will be used. */ 6621c506404SBing Zhao node.sample[0].flow_match_sample_field_base_offset = 0x0; 6631c506404SBing Zhao /* Sample #1: message payload. */ 6641c506404SBing Zhao node.sample[1].flow_match_sample_en = 1; 6651c506404SBing Zhao /* Fixed offset. */ 6661c506404SBing Zhao node.sample[1].flow_match_sample_offset_mode = 0x0; 6671c506404SBing Zhao /* 6681c506404SBing Zhao * Only the first two bytes will be used right now, and its offset will 6691c506404SBing Zhao * start after the common header that with the length of a DW(u32). 6701c506404SBing Zhao */ 6711c506404SBing Zhao node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t); 6721c506404SBing Zhao prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node); 6731c506404SBing Zhao if (!prf->obj) { 6741c506404SBing Zhao DRV_LOG(ERR, "Failed to create flex parser node object."); 6751c506404SBing Zhao return (rte_errno == 0) ? -ENODEV : -rte_errno; 6761c506404SBing Zhao } 6771c506404SBing Zhao prf->num = 2; 6781c506404SBing Zhao ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num); 6791c506404SBing Zhao if (ret) { 6801c506404SBing Zhao DRV_LOG(ERR, "Failed to query sample IDs."); 6811c506404SBing Zhao return (rte_errno == 0) ? -ENODEV : -rte_errno; 6821c506404SBing Zhao } 6831c506404SBing Zhao prf->offset[0] = 0x0; 6841c506404SBing Zhao prf->offset[1] = sizeof(uint32_t); 6851c506404SBing Zhao prf->ids[0] = ids[0]; 6861c506404SBing Zhao prf->ids[1] = ids[1]; 687daa38a89SBing Zhao return 0; 688daa38a89SBing Zhao } 689daa38a89SBing Zhao 6901c506404SBing Zhao /* 6911c506404SBing Zhao * Destroy the flex parser node, including the parser itself, input / output 6921c506404SBing Zhao * arcs and DW samples. Resources could be reused then. 6931c506404SBing Zhao * 6941c506404SBing Zhao * @param dev 6951c506404SBing Zhao * Pointer to Ethernet device structure. 6961c506404SBing Zhao */ 6971c506404SBing Zhao static void 6981c506404SBing Zhao mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev) 6991c506404SBing Zhao { 7001c506404SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 7011c506404SBing Zhao struct mlx5_flex_parser_profiles *prf = 7021c506404SBing Zhao &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 7031c506404SBing Zhao 7041c506404SBing Zhao if (prf->obj) 7051c506404SBing Zhao mlx5_devx_cmd_destroy(prf->obj); 7061c506404SBing Zhao prf->obj = NULL; 7071c506404SBing Zhao } 7081c506404SBing Zhao 709014d1cbeSSuanming Mou /** 71091389890SOphir Munk * Allocate shared device context. If there is multiport device the 71117e19bc4SViacheslav Ovsiienko * master and representors will share this context, if there is single 71291389890SOphir Munk * port dedicated device, the context will be used by only given 71317e19bc4SViacheslav Ovsiienko * port due to unification. 71417e19bc4SViacheslav Ovsiienko * 71591389890SOphir Munk * Routine first searches the context for the specified device name, 71617e19bc4SViacheslav Ovsiienko * if found the shared context assumed and reference counter is incremented. 71717e19bc4SViacheslav Ovsiienko * If no context found the new one is created and initialized with specified 71891389890SOphir Munk * device context and parameters. 71917e19bc4SViacheslav Ovsiienko * 72017e19bc4SViacheslav Ovsiienko * @param[in] spawn 72191389890SOphir Munk * Pointer to the device attributes (name, port, etc). 7228409a285SViacheslav Ovsiienko * @param[in] config 7238409a285SViacheslav Ovsiienko * Pointer to device configuration structure. 72417e19bc4SViacheslav Ovsiienko * 72517e19bc4SViacheslav Ovsiienko * @return 7266e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object on success, 72717e19bc4SViacheslav Ovsiienko * otherwise NULL and rte_errno is set. 72817e19bc4SViacheslav Ovsiienko */ 7292eb4d010SOphir Munk struct mlx5_dev_ctx_shared * 73091389890SOphir Munk mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, 7318409a285SViacheslav Ovsiienko const struct mlx5_dev_config *config) 73217e19bc4SViacheslav Ovsiienko { 7336e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh; 73417e19bc4SViacheslav Ovsiienko int err = 0; 73553e5a82fSViacheslav Ovsiienko uint32_t i; 736ae18a1aeSOri Kam struct mlx5_devx_tis_attr tis_attr = { 0 }; 73717e19bc4SViacheslav Ovsiienko 7388e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn); 73917e19bc4SViacheslav Ovsiienko /* Secondary process should not create the shared context. */ 7408e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 74191389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 74217e19bc4SViacheslav Ovsiienko /* Search for IB context by device name. */ 74391389890SOphir Munk LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) { 744834a9019SOphir Munk if (!strcmp(sh->ibdev_name, 745834a9019SOphir Munk mlx5_os_get_dev_device_name(spawn->phys_dev))) { 74617e19bc4SViacheslav Ovsiienko sh->refcnt++; 74717e19bc4SViacheslav Ovsiienko goto exit; 74817e19bc4SViacheslav Ovsiienko } 74917e19bc4SViacheslav Ovsiienko } 750ae4eb7dcSViacheslav Ovsiienko /* No device found, we have to create new shared context. */ 7518e46d4e1SAlexander Kozyrev MLX5_ASSERT(spawn->max_port); 7522175c4dcSSuanming Mou sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 7536e88bc42SOphir Munk sizeof(struct mlx5_dev_ctx_shared) + 75417e19bc4SViacheslav Ovsiienko spawn->max_port * 75591389890SOphir Munk sizeof(struct mlx5_dev_shared_port), 7562175c4dcSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 75717e19bc4SViacheslav Ovsiienko if (!sh) { 75817e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "shared context allocation failure"); 75917e19bc4SViacheslav Ovsiienko rte_errno = ENOMEM; 76017e19bc4SViacheslav Ovsiienko goto exit; 76117e19bc4SViacheslav Ovsiienko } 7622eb4d010SOphir Munk err = mlx5_os_open_device(spawn, config, sh); 76306f78b5eSViacheslav Ovsiienko if (!sh->ctx) 76417e19bc4SViacheslav Ovsiienko goto error; 765e85f623eSOphir Munk err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr); 76617e19bc4SViacheslav Ovsiienko if (err) { 767e85f623eSOphir Munk DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed"); 76817e19bc4SViacheslav Ovsiienko goto error; 76917e19bc4SViacheslav Ovsiienko } 77017e19bc4SViacheslav Ovsiienko sh->refcnt = 1; 77117e19bc4SViacheslav Ovsiienko sh->max_port = spawn->max_port; 772f44b09f9SOphir Munk strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx), 773f44b09f9SOphir Munk sizeof(sh->ibdev_name) - 1); 774f44b09f9SOphir Munk strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx), 775f44b09f9SOphir Munk sizeof(sh->ibdev_path) - 1); 77653e5a82fSViacheslav Ovsiienko /* 77753e5a82fSViacheslav Ovsiienko * Setting port_id to max unallowed value means 77853e5a82fSViacheslav Ovsiienko * there is no interrupt subhandler installed for 77953e5a82fSViacheslav Ovsiienko * the given port index i. 78053e5a82fSViacheslav Ovsiienko */ 78123242063SMatan Azrad for (i = 0; i < sh->max_port; i++) { 78253e5a82fSViacheslav Ovsiienko sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 78323242063SMatan Azrad sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; 78423242063SMatan Azrad } 78517e19bc4SViacheslav Ovsiienko sh->pd = mlx5_glue->alloc_pd(sh->ctx); 78617e19bc4SViacheslav Ovsiienko if (sh->pd == NULL) { 78717e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "PD allocation failure"); 78817e19bc4SViacheslav Ovsiienko err = ENOMEM; 78917e19bc4SViacheslav Ovsiienko goto error; 79017e19bc4SViacheslav Ovsiienko } 791ae18a1aeSOri Kam if (sh->devx) { 7922eb4d010SOphir Munk err = mlx5_os_get_pdn(sh->pd, &sh->pdn); 793b9d86122SDekel Peled if (err) { 794b9d86122SDekel Peled DRV_LOG(ERR, "Fail to extract pdn from PD"); 795b9d86122SDekel Peled goto error; 796b9d86122SDekel Peled } 797ae18a1aeSOri Kam sh->td = mlx5_devx_cmd_create_td(sh->ctx); 798ae18a1aeSOri Kam if (!sh->td) { 799ae18a1aeSOri Kam DRV_LOG(ERR, "TD allocation failure"); 800ae18a1aeSOri Kam err = ENOMEM; 801ae18a1aeSOri Kam goto error; 802ae18a1aeSOri Kam } 803ae18a1aeSOri Kam tis_attr.transport_domain = sh->td->id; 804ae18a1aeSOri Kam sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr); 805ae18a1aeSOri Kam if (!sh->tis) { 806ae18a1aeSOri Kam DRV_LOG(ERR, "TIS allocation failure"); 807ae18a1aeSOri Kam err = ENOMEM; 808ae18a1aeSOri Kam goto error; 809ae18a1aeSOri Kam } 810fc4d4f73SViacheslav Ovsiienko sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, 0); 811fc4d4f73SViacheslav Ovsiienko if (!sh->tx_uar) { 812fc4d4f73SViacheslav Ovsiienko DRV_LOG(ERR, "Failed to allocate DevX UAR."); 813fc4d4f73SViacheslav Ovsiienko err = ENOMEM; 814fc4d4f73SViacheslav Ovsiienko goto error; 815fc4d4f73SViacheslav Ovsiienko } 816*08d1838fSDekel Peled sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, 0); 817*08d1838fSDekel Peled if (!sh->devx_rx_uar) { 818*08d1838fSDekel Peled DRV_LOG(ERR, "Failed to allocate Rx DevX UAR."); 819*08d1838fSDekel Peled err = ENOMEM; 820*08d1838fSDekel Peled goto error; 821*08d1838fSDekel Peled } 822ae18a1aeSOri Kam } 8230136df99SSuanming Mou sh->flow_id_pool = mlx5_flow_id_pool_alloc 8240136df99SSuanming Mou ((1 << HAIRPIN_FLOW_ID_BITS) - 1); 825d85c7b5eSOri Kam if (!sh->flow_id_pool) { 826d85c7b5eSOri Kam DRV_LOG(ERR, "can't create flow id pool"); 827d85c7b5eSOri Kam err = ENOMEM; 828d85c7b5eSOri Kam goto error; 829d85c7b5eSOri Kam } 83024feb045SViacheslav Ovsiienko #ifndef RTE_ARCH_64 83124feb045SViacheslav Ovsiienko /* Initialize UAR access locks for 32bit implementations. */ 83224feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock_cq); 83324feb045SViacheslav Ovsiienko for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 83424feb045SViacheslav Ovsiienko rte_spinlock_init(&sh->uar_lock[i]); 83524feb045SViacheslav Ovsiienko #endif 836ab3cffcfSViacheslav Ovsiienko /* 837ab3cffcfSViacheslav Ovsiienko * Once the device is added to the list of memory event 838ab3cffcfSViacheslav Ovsiienko * callback, its global MR cache table cannot be expanded 839ab3cffcfSViacheslav Ovsiienko * on the fly because of deadlock. If it overflows, lookup 840ab3cffcfSViacheslav Ovsiienko * should be done by searching MR list linearly, which is slow. 841ab3cffcfSViacheslav Ovsiienko * 842ab3cffcfSViacheslav Ovsiienko * At this point the device is not added to the memory 843ab3cffcfSViacheslav Ovsiienko * event list yet, context is just being created. 844ab3cffcfSViacheslav Ovsiienko */ 845b8dc6b0eSVu Pham err = mlx5_mr_btree_init(&sh->share_cache.cache, 846ab3cffcfSViacheslav Ovsiienko MLX5_MR_BTREE_CACHE_N * 2, 84746e10a4cSViacheslav Ovsiienko spawn->pci_dev->device.numa_node); 848ab3cffcfSViacheslav Ovsiienko if (err) { 849ab3cffcfSViacheslav Ovsiienko err = rte_errno; 850ab3cffcfSViacheslav Ovsiienko goto error; 851ab3cffcfSViacheslav Ovsiienko } 852d5ed8aa9SOphir Munk mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb, 853d5ed8aa9SOphir Munk &sh->share_cache.dereg_mr_cb); 8542eb4d010SOphir Munk mlx5_os_dev_shared_handler_install(sh); 855632f0f19SSuanming Mou sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD); 856632f0f19SSuanming Mou if (!sh->cnt_id_tbl) { 857632f0f19SSuanming Mou err = rte_errno; 858632f0f19SSuanming Mou goto error; 859632f0f19SSuanming Mou } 860fa2d01c8SDong Zhou mlx5_flow_aging_init(sh); 8615382d28cSMatan Azrad mlx5_flow_counters_mng_init(sh); 862b88341caSSuanming Mou mlx5_flow_ipool_create(sh, config); 8630e3d0525SViacheslav Ovsiienko /* Add device to memory callback list. */ 8640e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 8650e3d0525SViacheslav Ovsiienko LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 8660e3d0525SViacheslav Ovsiienko sh, mem_event_cb); 8670e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 8680e3d0525SViacheslav Ovsiienko /* Add context to the global device list. */ 86991389890SOphir Munk LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); 87017e19bc4SViacheslav Ovsiienko exit: 87191389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 87217e19bc4SViacheslav Ovsiienko return sh; 87317e19bc4SViacheslav Ovsiienko error: 874d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 87591389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 8768e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 877632f0f19SSuanming Mou if (sh->cnt_id_tbl) { 878632f0f19SSuanming Mou mlx5_l3t_destroy(sh->cnt_id_tbl); 879632f0f19SSuanming Mou sh->cnt_id_tbl = NULL; 880632f0f19SSuanming Mou } 881fc4d4f73SViacheslav Ovsiienko if (sh->tx_uar) { 882fc4d4f73SViacheslav Ovsiienko mlx5_glue->devx_free_uar(sh->tx_uar); 883fc4d4f73SViacheslav Ovsiienko sh->tx_uar = NULL; 884fc4d4f73SViacheslav Ovsiienko } 885ae18a1aeSOri Kam if (sh->tis) 886ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 887ae18a1aeSOri Kam if (sh->td) 888ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 889*08d1838fSDekel Peled if (sh->devx_rx_uar) 890*08d1838fSDekel Peled mlx5_glue->devx_free_uar(sh->devx_rx_uar); 89117e19bc4SViacheslav Ovsiienko if (sh->pd) 89217e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 89317e19bc4SViacheslav Ovsiienko if (sh->ctx) 89417e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 895d85c7b5eSOri Kam if (sh->flow_id_pool) 896d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 8972175c4dcSSuanming Mou mlx5_free(sh); 8988e46d4e1SAlexander Kozyrev MLX5_ASSERT(err > 0); 89917e19bc4SViacheslav Ovsiienko rte_errno = err; 90017e19bc4SViacheslav Ovsiienko return NULL; 90117e19bc4SViacheslav Ovsiienko } 90217e19bc4SViacheslav Ovsiienko 90317e19bc4SViacheslav Ovsiienko /** 90417e19bc4SViacheslav Ovsiienko * Free shared IB device context. Decrement counter and if zero free 90517e19bc4SViacheslav Ovsiienko * all allocated resources and close handles. 90617e19bc4SViacheslav Ovsiienko * 90717e19bc4SViacheslav Ovsiienko * @param[in] sh 9086e88bc42SOphir Munk * Pointer to mlx5_dev_ctx_shared object to free 90917e19bc4SViacheslav Ovsiienko */ 9102eb4d010SOphir Munk void 91191389890SOphir Munk mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) 91217e19bc4SViacheslav Ovsiienko { 91391389890SOphir Munk pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 9140afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG 91517e19bc4SViacheslav Ovsiienko /* Check the object presence in the list. */ 9166e88bc42SOphir Munk struct mlx5_dev_ctx_shared *lctx; 91717e19bc4SViacheslav Ovsiienko 91891389890SOphir Munk LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next) 91917e19bc4SViacheslav Ovsiienko if (lctx == sh) 92017e19bc4SViacheslav Ovsiienko break; 9218e46d4e1SAlexander Kozyrev MLX5_ASSERT(lctx); 92217e19bc4SViacheslav Ovsiienko if (lctx != sh) { 92317e19bc4SViacheslav Ovsiienko DRV_LOG(ERR, "Freeing non-existing shared IB context"); 92417e19bc4SViacheslav Ovsiienko goto exit; 92517e19bc4SViacheslav Ovsiienko } 92617e19bc4SViacheslav Ovsiienko #endif 9278e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 9288e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh->refcnt); 92917e19bc4SViacheslav Ovsiienko /* Secondary process should not free the shared context. */ 9308e46d4e1SAlexander Kozyrev MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 93117e19bc4SViacheslav Ovsiienko if (--sh->refcnt) 93217e19bc4SViacheslav Ovsiienko goto exit; 9330e3d0525SViacheslav Ovsiienko /* Remove from memory callback device list. */ 9340e3d0525SViacheslav Ovsiienko rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 9350e3d0525SViacheslav Ovsiienko LIST_REMOVE(sh, mem_event_cb); 9360e3d0525SViacheslav Ovsiienko rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 9374f8e6befSMichael Baum /* Release created Memory Regions. */ 938b8dc6b0eSVu Pham mlx5_mr_release_cache(&sh->share_cache); 9390e3d0525SViacheslav Ovsiienko /* Remove context from the global device list. */ 94017e19bc4SViacheslav Ovsiienko LIST_REMOVE(sh, next); 94153e5a82fSViacheslav Ovsiienko /* 94253e5a82fSViacheslav Ovsiienko * Ensure there is no async event handler installed. 94353e5a82fSViacheslav Ovsiienko * Only primary process handles async device events. 94453e5a82fSViacheslav Ovsiienko **/ 9455382d28cSMatan Azrad mlx5_flow_counters_mng_close(sh); 946014d1cbeSSuanming Mou mlx5_flow_ipool_destroy(sh); 9472eb4d010SOphir Munk mlx5_os_dev_shared_handler_uninstall(sh); 948632f0f19SSuanming Mou if (sh->cnt_id_tbl) { 949632f0f19SSuanming Mou mlx5_l3t_destroy(sh->cnt_id_tbl); 950632f0f19SSuanming Mou sh->cnt_id_tbl = NULL; 951632f0f19SSuanming Mou } 952fc4d4f73SViacheslav Ovsiienko if (sh->tx_uar) { 953fc4d4f73SViacheslav Ovsiienko mlx5_glue->devx_free_uar(sh->tx_uar); 954fc4d4f73SViacheslav Ovsiienko sh->tx_uar = NULL; 955fc4d4f73SViacheslav Ovsiienko } 95617e19bc4SViacheslav Ovsiienko if (sh->pd) 95717e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->dealloc_pd(sh->pd)); 958ae18a1aeSOri Kam if (sh->tis) 959ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 960ae18a1aeSOri Kam if (sh->td) 961ae18a1aeSOri Kam claim_zero(mlx5_devx_cmd_destroy(sh->td)); 962*08d1838fSDekel Peled if (sh->devx_rx_uar) 963*08d1838fSDekel Peled mlx5_glue->devx_free_uar(sh->devx_rx_uar); 96417e19bc4SViacheslav Ovsiienko if (sh->ctx) 96517e19bc4SViacheslav Ovsiienko claim_zero(mlx5_glue->close_device(sh->ctx)); 966d85c7b5eSOri Kam if (sh->flow_id_pool) 967d85c7b5eSOri Kam mlx5_flow_id_pool_release(sh->flow_id_pool); 968d133f4cdSViacheslav Ovsiienko pthread_mutex_destroy(&sh->txpp.mutex); 9692175c4dcSSuanming Mou mlx5_free(sh); 97017e19bc4SViacheslav Ovsiienko exit: 97191389890SOphir Munk pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 97217e19bc4SViacheslav Ovsiienko } 97317e19bc4SViacheslav Ovsiienko 974771fa900SAdrien Mazarguil /** 97554534725SMatan Azrad * Destroy table hash list and all the root entries per domain. 97654534725SMatan Azrad * 97754534725SMatan Azrad * @param[in] priv 97854534725SMatan Azrad * Pointer to the private device data structure. 97954534725SMatan Azrad */ 9802eb4d010SOphir Munk void 98154534725SMatan Azrad mlx5_free_table_hash_list(struct mlx5_priv *priv) 98254534725SMatan Azrad { 9836e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 98454534725SMatan Azrad struct mlx5_flow_tbl_data_entry *tbl_data; 98554534725SMatan Azrad union mlx5_flow_tbl_key table_key = { 98654534725SMatan Azrad { 98754534725SMatan Azrad .table_id = 0, 98854534725SMatan Azrad .reserved = 0, 98954534725SMatan Azrad .domain = 0, 99054534725SMatan Azrad .direction = 0, 99154534725SMatan Azrad } 99254534725SMatan Azrad }; 99354534725SMatan Azrad struct mlx5_hlist_entry *pos; 99454534725SMatan Azrad 99554534725SMatan Azrad if (!sh->flow_tbls) 99654534725SMatan Azrad return; 99754534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 99854534725SMatan Azrad if (pos) { 99954534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 100054534725SMatan Azrad entry); 10018e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 100254534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 100383c2047cSSuanming Mou mlx5_free(tbl_data); 100454534725SMatan Azrad } 100554534725SMatan Azrad table_key.direction = 1; 100654534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 100754534725SMatan Azrad if (pos) { 100854534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 100954534725SMatan Azrad entry); 10108e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 101154534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 101283c2047cSSuanming Mou mlx5_free(tbl_data); 101354534725SMatan Azrad } 101454534725SMatan Azrad table_key.direction = 0; 101554534725SMatan Azrad table_key.domain = 1; 101654534725SMatan Azrad pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); 101754534725SMatan Azrad if (pos) { 101854534725SMatan Azrad tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, 101954534725SMatan Azrad entry); 10208e46d4e1SAlexander Kozyrev MLX5_ASSERT(tbl_data); 102154534725SMatan Azrad mlx5_hlist_remove(sh->flow_tbls, pos); 102283c2047cSSuanming Mou mlx5_free(tbl_data); 102354534725SMatan Azrad } 102454534725SMatan Azrad mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL); 102554534725SMatan Azrad } 102654534725SMatan Azrad 102754534725SMatan Azrad /** 102854534725SMatan Azrad * Initialize flow table hash list and create the root tables entry 102954534725SMatan Azrad * for each domain. 103054534725SMatan Azrad * 103154534725SMatan Azrad * @param[in] priv 103254534725SMatan Azrad * Pointer to the private device data structure. 103354534725SMatan Azrad * 103454534725SMatan Azrad * @return 103554534725SMatan Azrad * Zero on success, positive error code otherwise. 103654534725SMatan Azrad */ 10372eb4d010SOphir Munk int 103854534725SMatan Azrad mlx5_alloc_table_hash_list(struct mlx5_priv *priv) 103954534725SMatan Azrad { 10406e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 104154534725SMatan Azrad char s[MLX5_HLIST_NAMESIZE]; 104254534725SMatan Azrad int err = 0; 104354534725SMatan Azrad 10448e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 104554534725SMatan Azrad snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); 104654534725SMatan Azrad sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE); 104754534725SMatan Azrad if (!sh->flow_tbls) { 104863783b01SDavid Marchand DRV_LOG(ERR, "flow tables with hash creation failed."); 104954534725SMatan Azrad err = ENOMEM; 105054534725SMatan Azrad return err; 105154534725SMatan Azrad } 105254534725SMatan Azrad #ifndef HAVE_MLX5DV_DR 105354534725SMatan Azrad /* 105454534725SMatan Azrad * In case we have not DR support, the zero tables should be created 105554534725SMatan Azrad * because DV expect to see them even if they cannot be created by 105654534725SMatan Azrad * RDMA-CORE. 105754534725SMatan Azrad */ 105854534725SMatan Azrad union mlx5_flow_tbl_key table_key = { 105954534725SMatan Azrad { 106054534725SMatan Azrad .table_id = 0, 106154534725SMatan Azrad .reserved = 0, 106254534725SMatan Azrad .domain = 0, 106354534725SMatan Azrad .direction = 0, 106454534725SMatan Azrad } 106554534725SMatan Azrad }; 106683c2047cSSuanming Mou struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO, 106783c2047cSSuanming Mou sizeof(*tbl_data), 0, 106883c2047cSSuanming Mou SOCKET_ID_ANY); 106954534725SMatan Azrad 107054534725SMatan Azrad if (!tbl_data) { 107154534725SMatan Azrad err = ENOMEM; 107254534725SMatan Azrad goto error; 107354534725SMatan Azrad } 107454534725SMatan Azrad tbl_data->entry.key = table_key.v64; 107554534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 107654534725SMatan Azrad if (err) 107754534725SMatan Azrad goto error; 107854534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 107954534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 108054534725SMatan Azrad table_key.direction = 1; 108183c2047cSSuanming Mou tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, 108283c2047cSSuanming Mou SOCKET_ID_ANY); 108354534725SMatan Azrad if (!tbl_data) { 108454534725SMatan Azrad err = ENOMEM; 108554534725SMatan Azrad goto error; 108654534725SMatan Azrad } 108754534725SMatan Azrad tbl_data->entry.key = table_key.v64; 108854534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 108954534725SMatan Azrad if (err) 109054534725SMatan Azrad goto error; 109154534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 109254534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 109354534725SMatan Azrad table_key.direction = 0; 109454534725SMatan Azrad table_key.domain = 1; 109583c2047cSSuanming Mou tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, 109683c2047cSSuanming Mou SOCKET_ID_ANY); 109754534725SMatan Azrad if (!tbl_data) { 109854534725SMatan Azrad err = ENOMEM; 109954534725SMatan Azrad goto error; 110054534725SMatan Azrad } 110154534725SMatan Azrad tbl_data->entry.key = table_key.v64; 110254534725SMatan Azrad err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); 110354534725SMatan Azrad if (err) 110454534725SMatan Azrad goto error; 110554534725SMatan Azrad rte_atomic32_init(&tbl_data->tbl.refcnt); 110654534725SMatan Azrad rte_atomic32_inc(&tbl_data->tbl.refcnt); 110754534725SMatan Azrad return err; 110854534725SMatan Azrad error: 110954534725SMatan Azrad mlx5_free_table_hash_list(priv); 111054534725SMatan Azrad #endif /* HAVE_MLX5DV_DR */ 111154534725SMatan Azrad return err; 111254534725SMatan Azrad } 111354534725SMatan Azrad 111454534725SMatan Azrad /** 11154d803a72SOlga Shern * Retrieve integer value from environment variable. 11164d803a72SOlga Shern * 11174d803a72SOlga Shern * @param[in] name 11184d803a72SOlga Shern * Environment variable name. 11194d803a72SOlga Shern * 11204d803a72SOlga Shern * @return 11214d803a72SOlga Shern * Integer value, 0 if the variable is not set. 11224d803a72SOlga Shern */ 11234d803a72SOlga Shern int 11244d803a72SOlga Shern mlx5_getenv_int(const char *name) 11254d803a72SOlga Shern { 11264d803a72SOlga Shern const char *val = getenv(name); 11274d803a72SOlga Shern 11284d803a72SOlga Shern if (val == NULL) 11294d803a72SOlga Shern return 0; 11304d803a72SOlga Shern return atoi(val); 11314d803a72SOlga Shern } 11324d803a72SOlga Shern 11334d803a72SOlga Shern /** 1134c9ba7523SRaslan Darawsheh * DPDK callback to add udp tunnel port 1135c9ba7523SRaslan Darawsheh * 1136c9ba7523SRaslan Darawsheh * @param[in] dev 1137c9ba7523SRaslan Darawsheh * A pointer to eth_dev 1138c9ba7523SRaslan Darawsheh * @param[in] udp_tunnel 1139c9ba7523SRaslan Darawsheh * A pointer to udp tunnel 1140c9ba7523SRaslan Darawsheh * 1141c9ba7523SRaslan Darawsheh * @return 1142c9ba7523SRaslan Darawsheh * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 1143c9ba7523SRaslan Darawsheh */ 1144c9ba7523SRaslan Darawsheh int 1145c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 1146c9ba7523SRaslan Darawsheh struct rte_eth_udp_tunnel *udp_tunnel) 1147c9ba7523SRaslan Darawsheh { 11488e46d4e1SAlexander Kozyrev MLX5_ASSERT(udp_tunnel != NULL); 1149c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && 1150c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4789) 1151c9ba7523SRaslan Darawsheh return 0; 1152c9ba7523SRaslan Darawsheh if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && 1153c9ba7523SRaslan Darawsheh udp_tunnel->udp_port == 4790) 1154c9ba7523SRaslan Darawsheh return 0; 1155c9ba7523SRaslan Darawsheh return -ENOTSUP; 1156c9ba7523SRaslan Darawsheh } 1157c9ba7523SRaslan Darawsheh 1158c9ba7523SRaslan Darawsheh /** 1159120dc4a7SYongseok Koh * Initialize process private data structure. 1160120dc4a7SYongseok Koh * 1161120dc4a7SYongseok Koh * @param dev 1162120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1163120dc4a7SYongseok Koh * 1164120dc4a7SYongseok Koh * @return 1165120dc4a7SYongseok Koh * 0 on success, a negative errno value otherwise and rte_errno is set. 1166120dc4a7SYongseok Koh */ 1167120dc4a7SYongseok Koh int 1168120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev) 1169120dc4a7SYongseok Koh { 1170120dc4a7SYongseok Koh struct mlx5_priv *priv = dev->data->dev_private; 1171120dc4a7SYongseok Koh struct mlx5_proc_priv *ppriv; 1172120dc4a7SYongseok Koh size_t ppriv_size; 1173120dc4a7SYongseok Koh 1174120dc4a7SYongseok Koh /* 1175120dc4a7SYongseok Koh * UAR register table follows the process private structure. BlueFlame 1176120dc4a7SYongseok Koh * registers for Tx queues are stored in the table. 1177120dc4a7SYongseok Koh */ 1178120dc4a7SYongseok Koh ppriv_size = 1179120dc4a7SYongseok Koh sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 11802175c4dcSSuanming Mou ppriv = mlx5_malloc(MLX5_MEM_RTE, ppriv_size, RTE_CACHE_LINE_SIZE, 11812175c4dcSSuanming Mou dev->device->numa_node); 1182120dc4a7SYongseok Koh if (!ppriv) { 1183120dc4a7SYongseok Koh rte_errno = ENOMEM; 1184120dc4a7SYongseok Koh return -rte_errno; 1185120dc4a7SYongseok Koh } 1186120dc4a7SYongseok Koh ppriv->uar_table_sz = ppriv_size; 1187120dc4a7SYongseok Koh dev->process_private = ppriv; 1188120dc4a7SYongseok Koh return 0; 1189120dc4a7SYongseok Koh } 1190120dc4a7SYongseok Koh 1191120dc4a7SYongseok Koh /** 1192120dc4a7SYongseok Koh * Un-initialize process private data structure. 1193120dc4a7SYongseok Koh * 1194120dc4a7SYongseok Koh * @param dev 1195120dc4a7SYongseok Koh * Pointer to Ethernet device structure. 1196120dc4a7SYongseok Koh */ 1197120dc4a7SYongseok Koh static void 1198120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 1199120dc4a7SYongseok Koh { 1200120dc4a7SYongseok Koh if (!dev->process_private) 1201120dc4a7SYongseok Koh return; 12022175c4dcSSuanming Mou mlx5_free(dev->process_private); 1203120dc4a7SYongseok Koh dev->process_private = NULL; 1204120dc4a7SYongseok Koh } 1205120dc4a7SYongseok Koh 1206120dc4a7SYongseok Koh /** 1207771fa900SAdrien Mazarguil * DPDK callback to close the device. 1208771fa900SAdrien Mazarguil * 1209771fa900SAdrien Mazarguil * Destroy all queues and objects, free memory. 1210771fa900SAdrien Mazarguil * 1211771fa900SAdrien Mazarguil * @param dev 1212771fa900SAdrien Mazarguil * Pointer to Ethernet device structure. 1213771fa900SAdrien Mazarguil */ 12142eb4d010SOphir Munk void 1215771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev) 1216771fa900SAdrien Mazarguil { 1217dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 12182e22920bSAdrien Mazarguil unsigned int i; 12196af6b973SNélio Laranjeiro int ret; 1220771fa900SAdrien Mazarguil 12212786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 12222786b7bfSSuanming Mou /* Check if process_private released. */ 12232786b7bfSSuanming Mou if (!dev->process_private) 12242786b7bfSSuanming Mou return; 12252786b7bfSSuanming Mou mlx5_tx_uar_uninit_secondary(dev); 12262786b7bfSSuanming Mou mlx5_proc_priv_uninit(dev); 12272786b7bfSSuanming Mou rte_eth_dev_release_port(dev); 12282786b7bfSSuanming Mou return; 12292786b7bfSSuanming Mou } 12302786b7bfSSuanming Mou if (!priv->sh) 12312786b7bfSSuanming Mou return; 1232a170a30dSNélio Laranjeiro DRV_LOG(DEBUG, "port %u closing device \"%s\"", 12330f99970bSNélio Laranjeiro dev->data->port_id, 1234f44b09f9SOphir Munk ((priv->sh->ctx != NULL) ? 1235f44b09f9SOphir Munk mlx5_os_get_ctx_device_name(priv->sh->ctx) : "")); 12368db7e3b6SBing Zhao /* 12378db7e3b6SBing Zhao * If default mreg copy action is removed at the stop stage, 12388db7e3b6SBing Zhao * the search will return none and nothing will be done anymore. 12398db7e3b6SBing Zhao */ 12408db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 1241af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 12428db7e3b6SBing Zhao /* 12438db7e3b6SBing Zhao * If all the flows are already flushed in the device stop stage, 12448db7e3b6SBing Zhao * then this will return directly without any action. 12458db7e3b6SBing Zhao */ 12468db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->flows, true); 124702e76468SSuanming Mou mlx5_flow_meter_flush(dev, NULL); 1248e7bfa359SBing Zhao /* Free the intermediate buffers for flow creation. */ 1249e7bfa359SBing Zhao mlx5_flow_free_intermediate(dev); 12502e22920bSAdrien Mazarguil /* Prevent crashes when queues are still in use. */ 12512e22920bSAdrien Mazarguil dev->rx_pkt_burst = removed_rx_burst; 12522e22920bSAdrien Mazarguil dev->tx_pkt_burst = removed_tx_burst; 12532aac5b5dSYongseok Koh rte_wmb(); 12542aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 12552e86c4e5SOphir Munk mlx5_mp_os_req_stop_rxtx(dev); 12561c506404SBing Zhao /* Free the eCPRI flex parser resource. */ 12571c506404SBing Zhao mlx5_flex_parser_ecpri_release(dev); 12582e22920bSAdrien Mazarguil if (priv->rxqs != NULL) { 12592e22920bSAdrien Mazarguil /* XXX race condition if mlx5_rx_burst() is still running. */ 12602e22920bSAdrien Mazarguil usleep(1000); 1261a1366b1aSNélio Laranjeiro for (i = 0; (i != priv->rxqs_n); ++i) 1262af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 12632e22920bSAdrien Mazarguil priv->rxqs_n = 0; 12642e22920bSAdrien Mazarguil priv->rxqs = NULL; 12652e22920bSAdrien Mazarguil } 12662e22920bSAdrien Mazarguil if (priv->txqs != NULL) { 12672e22920bSAdrien Mazarguil /* XXX race condition if mlx5_tx_burst() is still running. */ 12682e22920bSAdrien Mazarguil usleep(1000); 12696e78005aSNélio Laranjeiro for (i = 0; (i != priv->txqs_n); ++i) 1270af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 12712e22920bSAdrien Mazarguil priv->txqs_n = 0; 12722e22920bSAdrien Mazarguil priv->txqs = NULL; 12732e22920bSAdrien Mazarguil } 1274120dc4a7SYongseok Koh mlx5_proc_priv_uninit(dev); 1275dd3c774fSViacheslav Ovsiienko if (priv->mreg_cp_tbl) 1276dd3c774fSViacheslav Ovsiienko mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); 12777d6bf6b8SYongseok Koh mlx5_mprq_free_mp(dev); 12782eb4d010SOphir Munk mlx5_os_free_shared_dr(priv); 127929c1d8bbSNélio Laranjeiro if (priv->rss_conf.rss_key != NULL) 128083c2047cSSuanming Mou mlx5_free(priv->rss_conf.rss_key); 1281634efbc2SNelio Laranjeiro if (priv->reta_idx != NULL) 128283c2047cSSuanming Mou mlx5_free(priv->reta_idx); 1283ccdcba53SNélio Laranjeiro if (priv->config.vf) 1284f22442cbSMatan Azrad mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), 1285f22442cbSMatan Azrad dev->data->mac_addrs, 1286f22442cbSMatan Azrad MLX5_MAX_MAC_ADDRESSES, priv->mac_own); 128726c08b97SAdrien Mazarguil if (priv->nl_socket_route >= 0) 128826c08b97SAdrien Mazarguil close(priv->nl_socket_route); 128926c08b97SAdrien Mazarguil if (priv->nl_socket_rdma >= 0) 129026c08b97SAdrien Mazarguil close(priv->nl_socket_rdma); 1291dfedf3e3SViacheslav Ovsiienko if (priv->vmwa_context) 1292dfedf3e3SViacheslav Ovsiienko mlx5_vlan_vmwa_exit(priv->vmwa_context); 129323820a79SDekel Peled ret = mlx5_hrxq_verify(dev); 1294f5479b68SNélio Laranjeiro if (ret) 1295a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 12960f99970bSNélio Laranjeiro dev->data->port_id); 129715c80a12SDekel Peled ret = mlx5_ind_table_obj_verify(dev); 12984c7a0f5fSNélio Laranjeiro if (ret) 1299a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some indirection table still remain", 13000f99970bSNélio Laranjeiro dev->data->port_id); 130193403560SDekel Peled ret = mlx5_rxq_obj_verify(dev); 130209cb5b58SNélio Laranjeiro if (ret) 130393403560SDekel Peled DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 13040f99970bSNélio Laranjeiro dev->data->port_id); 1305af4f09f2SNélio Laranjeiro ret = mlx5_rxq_verify(dev); 1306a1366b1aSNélio Laranjeiro if (ret) 1307a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Rx queues still remain", 13080f99970bSNélio Laranjeiro dev->data->port_id); 1309894c4a8eSOri Kam ret = mlx5_txq_obj_verify(dev); 1310faf2667fSNélio Laranjeiro if (ret) 1311a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 13120f99970bSNélio Laranjeiro dev->data->port_id); 1313af4f09f2SNélio Laranjeiro ret = mlx5_txq_verify(dev); 13146e78005aSNélio Laranjeiro if (ret) 1315a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some Tx queues still remain", 13160f99970bSNélio Laranjeiro dev->data->port_id); 1317af4f09f2SNélio Laranjeiro ret = mlx5_flow_verify(dev); 13186af6b973SNélio Laranjeiro if (ret) 1319a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "port %u some flows still remain", 1320a170a30dSNélio Laranjeiro dev->data->port_id); 1321772dc0ebSSuanming Mou /* 1322772dc0ebSSuanming Mou * Free the shared context in last turn, because the cleanup 1323772dc0ebSSuanming Mou * routines above may use some shared fields, like 1324772dc0ebSSuanming Mou * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing 1325772dc0ebSSuanming Mou * ifindex if Netlink fails. 1326772dc0ebSSuanming Mou */ 132791389890SOphir Munk mlx5_free_shared_dev_ctx(priv->sh); 13282b730263SAdrien Mazarguil if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 13292b730263SAdrien Mazarguil unsigned int c = 0; 1330d874a4eeSThomas Monjalon uint16_t port_id; 13312b730263SAdrien Mazarguil 1332fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 1333dbeba4cfSThomas Monjalon struct mlx5_priv *opriv = 1334d874a4eeSThomas Monjalon rte_eth_devices[port_id].data->dev_private; 13352b730263SAdrien Mazarguil 13362b730263SAdrien Mazarguil if (!opriv || 13372b730263SAdrien Mazarguil opriv->domain_id != priv->domain_id || 1338d874a4eeSThomas Monjalon &rte_eth_devices[port_id] == dev) 13392b730263SAdrien Mazarguil continue; 13402b730263SAdrien Mazarguil ++c; 1341f7e95215SViacheslav Ovsiienko break; 13422b730263SAdrien Mazarguil } 13432b730263SAdrien Mazarguil if (!c) 13442b730263SAdrien Mazarguil claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 13452b730263SAdrien Mazarguil } 1346771fa900SAdrien Mazarguil memset(priv, 0, sizeof(*priv)); 13472b730263SAdrien Mazarguil priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 134842603bbdSOphir Munk /* 134942603bbdSOphir Munk * Reset mac_addrs to NULL such that it is not freed as part of 135042603bbdSOphir Munk * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 135142603bbdSOphir Munk * it is freed when dev_private is freed. 135242603bbdSOphir Munk */ 135342603bbdSOphir Munk dev->data->mac_addrs = NULL; 1354771fa900SAdrien Mazarguil } 1355771fa900SAdrien Mazarguil 1356e72dd09bSNélio Laranjeiro /** 1357e72dd09bSNélio Laranjeiro * Verify and store value for device argument. 1358e72dd09bSNélio Laranjeiro * 1359e72dd09bSNélio Laranjeiro * @param[in] key 1360e72dd09bSNélio Laranjeiro * Key argument to verify. 1361e72dd09bSNélio Laranjeiro * @param[in] val 1362e72dd09bSNélio Laranjeiro * Value associated with key. 1363e72dd09bSNélio Laranjeiro * @param opaque 1364e72dd09bSNélio Laranjeiro * User data. 1365e72dd09bSNélio Laranjeiro * 1366e72dd09bSNélio Laranjeiro * @return 1367a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1368e72dd09bSNélio Laranjeiro */ 1369e72dd09bSNélio Laranjeiro static int 1370e72dd09bSNélio Laranjeiro mlx5_args_check(const char *key, const char *val, void *opaque) 1371e72dd09bSNélio Laranjeiro { 13727fe24446SShahaf Shuler struct mlx5_dev_config *config = opaque; 13738f848f32SViacheslav Ovsiienko unsigned long mod; 13748f848f32SViacheslav Ovsiienko signed long tmp; 1375e72dd09bSNélio Laranjeiro 13766de569f5SAdrien Mazarguil /* No-op, port representors are processed in mlx5_dev_spawn(). */ 13776de569f5SAdrien Mazarguil if (!strcmp(MLX5_REPRESENTOR, key)) 13786de569f5SAdrien Mazarguil return 0; 137999c12dccSNélio Laranjeiro errno = 0; 13808f848f32SViacheslav Ovsiienko tmp = strtol(val, NULL, 0); 138199c12dccSNélio Laranjeiro if (errno) { 1382a6d83b6aSNélio Laranjeiro rte_errno = errno; 1383a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1384a6d83b6aSNélio Laranjeiro return -rte_errno; 138599c12dccSNélio Laranjeiro } 13868f848f32SViacheslav Ovsiienko if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) { 13878f848f32SViacheslav Ovsiienko /* Negative values are acceptable for some keys only. */ 13888f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 13898f848f32SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val); 13908f848f32SViacheslav Ovsiienko return -rte_errno; 13918f848f32SViacheslav Ovsiienko } 13928f848f32SViacheslav Ovsiienko mod = tmp >= 0 ? tmp : -tmp; 139399c12dccSNélio Laranjeiro if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 13947fe24446SShahaf Shuler config->cqe_comp = !!tmp; 1395bc91e8dbSYongseok Koh } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { 1396bc91e8dbSYongseok Koh config->cqe_pad = !!tmp; 139778c7a16dSYongseok Koh } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 139878c7a16dSYongseok Koh config->hw_padding = !!tmp; 13997d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 14007d6bf6b8SYongseok Koh config->mprq.enabled = !!tmp; 14017d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 14027d6bf6b8SYongseok Koh config->mprq.stride_num_n = tmp; 1403ecb16045SAlexander Kozyrev } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { 1404ecb16045SAlexander Kozyrev config->mprq.stride_size_n = tmp; 14057d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 14067d6bf6b8SYongseok Koh config->mprq.max_memcpy_len = tmp; 14077d6bf6b8SYongseok Koh } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 14087d6bf6b8SYongseok Koh config->mprq.min_rxqs_num = tmp; 14092a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1410505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1411505f1fe4SViacheslav Ovsiienko " converted to txq_inline_max", key); 1412505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1413505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1414505f1fe4SViacheslav Ovsiienko config->txq_inline_max = tmp; 1415505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1416505f1fe4SViacheslav Ovsiienko config->txq_inline_min = tmp; 1417505f1fe4SViacheslav Ovsiienko } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1418505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 14192a66cf37SYaacov Hazan } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 14207fe24446SShahaf Shuler config->txqs_inline = tmp; 142109d8b416SYongseok Koh } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1422a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1423230189d9SNélio Laranjeiro } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1424f9de8718SShahaf Shuler config->mps = !!tmp; 14258409a285SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_DB_NC, key) == 0) { 1426f078ceb6SViacheslav Ovsiienko if (tmp != MLX5_TXDB_CACHED && 1427f078ceb6SViacheslav Ovsiienko tmp != MLX5_TXDB_NCACHED && 1428f078ceb6SViacheslav Ovsiienko tmp != MLX5_TXDB_HEURISTIC) { 1429f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid Tx doorbell " 1430f078ceb6SViacheslav Ovsiienko "mapping parameter"); 1431f078ceb6SViacheslav Ovsiienko rte_errno = EINVAL; 1432f078ceb6SViacheslav Ovsiienko return -rte_errno; 1433f078ceb6SViacheslav Ovsiienko } 1434f078ceb6SViacheslav Ovsiienko config->dbnc = tmp; 14356ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1436a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 14376ce84bd8SYongseok Koh } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1438505f1fe4SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter," 1439505f1fe4SViacheslav Ovsiienko " converted to txq_inline_mpw", key); 1440505f1fe4SViacheslav Ovsiienko config->txq_inline_mpw = tmp; 14415644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1442a6bd4911SViacheslav Ovsiienko DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 14438f848f32SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_PP, key) == 0) { 14448f848f32SViacheslav Ovsiienko if (!mod) { 14458f848f32SViacheslav Ovsiienko DRV_LOG(ERR, "Zero Tx packet pacing parameter"); 14468f848f32SViacheslav Ovsiienko rte_errno = EINVAL; 14478f848f32SViacheslav Ovsiienko return -rte_errno; 14488f848f32SViacheslav Ovsiienko } 14498f848f32SViacheslav Ovsiienko config->tx_pp = tmp; 14508f848f32SViacheslav Ovsiienko } else if (strcmp(MLX5_TX_SKEW, key) == 0) { 14518f848f32SViacheslav Ovsiienko config->tx_skew = tmp; 14525644d5b9SNelio Laranjeiro } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 14537fe24446SShahaf Shuler config->rx_vec_en = !!tmp; 145478a54648SXueming Li } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 145578a54648SXueming Li config->l3_vxlan_en = !!tmp; 1456db209cc3SNélio Laranjeiro } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1457db209cc3SNélio Laranjeiro config->vf_nl_en = !!tmp; 1458e2b4925eSOri Kam } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1459e2b4925eSOri Kam config->dv_esw_en = !!tmp; 146051e72d38SOri Kam } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 146151e72d38SOri Kam config->dv_flow_en = !!tmp; 14622d241515SViacheslav Ovsiienko } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { 14632d241515SViacheslav Ovsiienko if (tmp != MLX5_XMETA_MODE_LEGACY && 14642d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META16 && 14652d241515SViacheslav Ovsiienko tmp != MLX5_XMETA_MODE_META32) { 1466f078ceb6SViacheslav Ovsiienko DRV_LOG(ERR, "invalid extensive " 14672d241515SViacheslav Ovsiienko "metadata parameter"); 14682d241515SViacheslav Ovsiienko rte_errno = EINVAL; 14692d241515SViacheslav Ovsiienko return -rte_errno; 14702d241515SViacheslav Ovsiienko } 14712d241515SViacheslav Ovsiienko config->dv_xmeta_en = tmp; 14720f0ae73aSShiri Kuzin } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) { 14730f0ae73aSShiri Kuzin config->lacp_by_user = !!tmp; 1474dceb5029SYongseok Koh } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1475dceb5029SYongseok Koh config->mr_ext_memseg_en = !!tmp; 1476066cfecdSMatan Azrad } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1477066cfecdSMatan Azrad config->max_dump_files_num = tmp; 147821bb6c7eSDekel Peled } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 147921bb6c7eSDekel Peled config->lro.timeout = tmp; 1480d768f324SMatan Azrad } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) { 1481d768f324SMatan Azrad DRV_LOG(DEBUG, "class argument is %s.", val); 14821ad9a3d0SBing Zhao } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { 14831ad9a3d0SBing Zhao config->log_hp_size = tmp; 1484a1da6f62SSuanming Mou } else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) { 1485a1da6f62SSuanming Mou if (tmp != MLX5_RCM_NONE && 1486a1da6f62SSuanming Mou tmp != MLX5_RCM_LIGHT && 1487a1da6f62SSuanming Mou tmp != MLX5_RCM_AGGR) { 1488a1da6f62SSuanming Mou DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val); 1489a1da6f62SSuanming Mou rte_errno = EINVAL; 1490a1da6f62SSuanming Mou return -rte_errno; 1491a1da6f62SSuanming Mou } 1492a1da6f62SSuanming Mou config->reclaim_mode = tmp; 14935522da6bSSuanming Mou } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) { 14945522da6bSSuanming Mou config->sys_mem_en = !!tmp; 149550f95b23SSuanming Mou } else if (strcmp(MLX5_DECAP_EN, key) == 0) { 149650f95b23SSuanming Mou config->decap_en = !!tmp; 149799c12dccSNélio Laranjeiro } else { 1498a170a30dSNélio Laranjeiro DRV_LOG(WARNING, "%s: unknown parameter", key); 1499a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1500a6d83b6aSNélio Laranjeiro return -rte_errno; 1501e72dd09bSNélio Laranjeiro } 150299c12dccSNélio Laranjeiro return 0; 150399c12dccSNélio Laranjeiro } 1504e72dd09bSNélio Laranjeiro 1505e72dd09bSNélio Laranjeiro /** 1506e72dd09bSNélio Laranjeiro * Parse device parameters. 1507e72dd09bSNélio Laranjeiro * 15087fe24446SShahaf Shuler * @param config 15097fe24446SShahaf Shuler * Pointer to device configuration structure. 1510e72dd09bSNélio Laranjeiro * @param devargs 1511e72dd09bSNélio Laranjeiro * Device arguments structure. 1512e72dd09bSNélio Laranjeiro * 1513e72dd09bSNélio Laranjeiro * @return 1514a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1515e72dd09bSNélio Laranjeiro */ 15162eb4d010SOphir Munk int 15177fe24446SShahaf Shuler mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1518e72dd09bSNélio Laranjeiro { 1519e72dd09bSNélio Laranjeiro const char **params = (const char *[]){ 152099c12dccSNélio Laranjeiro MLX5_RXQ_CQE_COMP_EN, 1521bc91e8dbSYongseok Koh MLX5_RXQ_CQE_PAD_EN, 152278c7a16dSYongseok Koh MLX5_RXQ_PKT_PAD_EN, 15237d6bf6b8SYongseok Koh MLX5_RX_MPRQ_EN, 15247d6bf6b8SYongseok Koh MLX5_RX_MPRQ_LOG_STRIDE_NUM, 1525ecb16045SAlexander Kozyrev MLX5_RX_MPRQ_LOG_STRIDE_SIZE, 15267d6bf6b8SYongseok Koh MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 15277d6bf6b8SYongseok Koh MLX5_RXQS_MIN_MPRQ, 15282a66cf37SYaacov Hazan MLX5_TXQ_INLINE, 1529505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MIN, 1530505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MAX, 1531505f1fe4SViacheslav Ovsiienko MLX5_TXQ_INLINE_MPW, 15322a66cf37SYaacov Hazan MLX5_TXQS_MIN_INLINE, 153309d8b416SYongseok Koh MLX5_TXQS_MAX_VEC, 1534230189d9SNélio Laranjeiro MLX5_TXQ_MPW_EN, 15356ce84bd8SYongseok Koh MLX5_TXQ_MPW_HDR_DSEG_EN, 15366ce84bd8SYongseok Koh MLX5_TXQ_MAX_INLINE_LEN, 15378409a285SViacheslav Ovsiienko MLX5_TX_DB_NC, 15388f848f32SViacheslav Ovsiienko MLX5_TX_PP, 15398f848f32SViacheslav Ovsiienko MLX5_TX_SKEW, 15405644d5b9SNelio Laranjeiro MLX5_TX_VEC_EN, 15415644d5b9SNelio Laranjeiro MLX5_RX_VEC_EN, 154278a54648SXueming Li MLX5_L3_VXLAN_EN, 1543db209cc3SNélio Laranjeiro MLX5_VF_NL_EN, 1544e2b4925eSOri Kam MLX5_DV_ESW_EN, 154551e72d38SOri Kam MLX5_DV_FLOW_EN, 15462d241515SViacheslav Ovsiienko MLX5_DV_XMETA_EN, 15470f0ae73aSShiri Kuzin MLX5_LACP_BY_USER, 1548dceb5029SYongseok Koh MLX5_MR_EXT_MEMSEG_EN, 15496de569f5SAdrien Mazarguil MLX5_REPRESENTOR, 1550066cfecdSMatan Azrad MLX5_MAX_DUMP_FILES_NUM, 155121bb6c7eSDekel Peled MLX5_LRO_TIMEOUT_USEC, 1552d768f324SMatan Azrad MLX5_CLASS_ARG_NAME, 15531ad9a3d0SBing Zhao MLX5_HP_BUF_SIZE, 1554a1da6f62SSuanming Mou MLX5_RECLAIM_MEM, 15555522da6bSSuanming Mou MLX5_SYS_MEM_EN, 155650f95b23SSuanming Mou MLX5_DECAP_EN, 1557e72dd09bSNélio Laranjeiro NULL, 1558e72dd09bSNélio Laranjeiro }; 1559e72dd09bSNélio Laranjeiro struct rte_kvargs *kvlist; 1560e72dd09bSNélio Laranjeiro int ret = 0; 1561e72dd09bSNélio Laranjeiro int i; 1562e72dd09bSNélio Laranjeiro 1563e72dd09bSNélio Laranjeiro if (devargs == NULL) 1564e72dd09bSNélio Laranjeiro return 0; 1565e72dd09bSNélio Laranjeiro /* Following UGLY cast is done to pass checkpatch. */ 1566e72dd09bSNélio Laranjeiro kvlist = rte_kvargs_parse(devargs->args, params); 156715b0ea00SMatan Azrad if (kvlist == NULL) { 156815b0ea00SMatan Azrad rte_errno = EINVAL; 156915b0ea00SMatan Azrad return -rte_errno; 157015b0ea00SMatan Azrad } 1571e72dd09bSNélio Laranjeiro /* Process parameters. */ 1572e72dd09bSNélio Laranjeiro for (i = 0; (params[i] != NULL); ++i) { 1573e72dd09bSNélio Laranjeiro if (rte_kvargs_count(kvlist, params[i])) { 1574e72dd09bSNélio Laranjeiro ret = rte_kvargs_process(kvlist, params[i], 15757fe24446SShahaf Shuler mlx5_args_check, config); 1576a6d83b6aSNélio Laranjeiro if (ret) { 1577a6d83b6aSNélio Laranjeiro rte_errno = EINVAL; 1578a67323e4SShahaf Shuler rte_kvargs_free(kvlist); 1579a6d83b6aSNélio Laranjeiro return -rte_errno; 1580e72dd09bSNélio Laranjeiro } 1581e72dd09bSNélio Laranjeiro } 1582a67323e4SShahaf Shuler } 1583e72dd09bSNélio Laranjeiro rte_kvargs_free(kvlist); 1584e72dd09bSNélio Laranjeiro return 0; 1585e72dd09bSNélio Laranjeiro } 1586e72dd09bSNélio Laranjeiro 15877be600c8SYongseok Koh /** 158838b4b397SViacheslav Ovsiienko * Configures the minimal amount of data to inline into WQE 158938b4b397SViacheslav Ovsiienko * while sending packets. 159038b4b397SViacheslav Ovsiienko * 159138b4b397SViacheslav Ovsiienko * - the txq_inline_min has the maximal priority, if this 159238b4b397SViacheslav Ovsiienko * key is specified in devargs 159338b4b397SViacheslav Ovsiienko * - if DevX is enabled the inline mode is queried from the 159438b4b397SViacheslav Ovsiienko * device (HCA attributes and NIC vport context if needed). 1595ee76bddcSThomas Monjalon * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx 159638b4b397SViacheslav Ovsiienko * and none (0 bytes) for other NICs 159738b4b397SViacheslav Ovsiienko * 159838b4b397SViacheslav Ovsiienko * @param spawn 159938b4b397SViacheslav Ovsiienko * Verbs device parameters (name, port, switch_info) to spawn. 160038b4b397SViacheslav Ovsiienko * @param config 160138b4b397SViacheslav Ovsiienko * Device configuration parameters. 160238b4b397SViacheslav Ovsiienko */ 16032eb4d010SOphir Munk void 160438b4b397SViacheslav Ovsiienko mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 160538b4b397SViacheslav Ovsiienko struct mlx5_dev_config *config) 160638b4b397SViacheslav Ovsiienko { 160738b4b397SViacheslav Ovsiienko if (config->txq_inline_min != MLX5_ARG_UNSET) { 160838b4b397SViacheslav Ovsiienko /* Application defines size of inlined data explicitly. */ 160938b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 161038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 161138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 161238b4b397SViacheslav Ovsiienko if (config->txq_inline_min < 161338b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2) { 161438b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, 161538b4b397SViacheslav Ovsiienko "txq_inline_mix aligned to minimal" 161638b4b397SViacheslav Ovsiienko " ConnectX-4 required value %d", 161738b4b397SViacheslav Ovsiienko (int)MLX5_INLINE_HSIZE_L2); 161838b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 161938b4b397SViacheslav Ovsiienko } 162038b4b397SViacheslav Ovsiienko break; 162138b4b397SViacheslav Ovsiienko } 162238b4b397SViacheslav Ovsiienko goto exit; 162338b4b397SViacheslav Ovsiienko } 162438b4b397SViacheslav Ovsiienko if (config->hca_attr.eth_net_offloads) { 162538b4b397SViacheslav Ovsiienko /* We have DevX enabled, inline mode queried successfully. */ 162638b4b397SViacheslav Ovsiienko switch (config->hca_attr.wqe_inline_mode) { 162738b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_L2: 162838b4b397SViacheslav Ovsiienko /* outer L2 header must be inlined. */ 162938b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 163038b4b397SViacheslav Ovsiienko goto exit; 163138b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 163238b4b397SViacheslav Ovsiienko /* No inline data are required by NIC. */ 163338b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 163438b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 163538b4b397SViacheslav Ovsiienko config->hca_attr.wqe_vlan_insert; 163638b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 163738b4b397SViacheslav Ovsiienko goto exit; 163838b4b397SViacheslav Ovsiienko case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 163938b4b397SViacheslav Ovsiienko /* inline mode is defined by NIC vport context. */ 164038b4b397SViacheslav Ovsiienko if (!config->hca_attr.eth_virt) 164138b4b397SViacheslav Ovsiienko break; 164238b4b397SViacheslav Ovsiienko switch (config->hca_attr.vport_inline_mode) { 164338b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_NONE: 164438b4b397SViacheslav Ovsiienko config->txq_inline_min = 164538b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_NONE; 164638b4b397SViacheslav Ovsiienko goto exit; 164738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_L2: 164838b4b397SViacheslav Ovsiienko config->txq_inline_min = 164938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L2; 165038b4b397SViacheslav Ovsiienko goto exit; 165138b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_IP: 165238b4b397SViacheslav Ovsiienko config->txq_inline_min = 165338b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L3; 165438b4b397SViacheslav Ovsiienko goto exit; 165538b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_TCP_UDP: 165638b4b397SViacheslav Ovsiienko config->txq_inline_min = 165738b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_L4; 165838b4b397SViacheslav Ovsiienko goto exit; 165938b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_L2: 166038b4b397SViacheslav Ovsiienko config->txq_inline_min = 166138b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L2; 166238b4b397SViacheslav Ovsiienko goto exit; 166338b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_IP: 166438b4b397SViacheslav Ovsiienko config->txq_inline_min = 166538b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L3; 166638b4b397SViacheslav Ovsiienko goto exit; 166738b4b397SViacheslav Ovsiienko case MLX5_INLINE_MODE_INNER_TCP_UDP: 166838b4b397SViacheslav Ovsiienko config->txq_inline_min = 166938b4b397SViacheslav Ovsiienko MLX5_INLINE_HSIZE_INNER_L4; 167038b4b397SViacheslav Ovsiienko goto exit; 167138b4b397SViacheslav Ovsiienko } 167238b4b397SViacheslav Ovsiienko } 167338b4b397SViacheslav Ovsiienko } 167438b4b397SViacheslav Ovsiienko /* 167538b4b397SViacheslav Ovsiienko * We get here if we are unable to deduce 167638b4b397SViacheslav Ovsiienko * inline data size with DevX. Try PCI ID 167738b4b397SViacheslav Ovsiienko * to determine old NICs. 167838b4b397SViacheslav Ovsiienko */ 167938b4b397SViacheslav Ovsiienko switch (spawn->pci_dev->id.device_id) { 168038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 168138b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 168238b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 168338b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1684614de6c8SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 168538b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 168638b4b397SViacheslav Ovsiienko break; 168738b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 168838b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 168938b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 169038b4b397SViacheslav Ovsiienko case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 169138b4b397SViacheslav Ovsiienko /* 169238b4b397SViacheslav Ovsiienko * These NICs support VLAN insertion from WQE and 169338b4b397SViacheslav Ovsiienko * report the wqe_vlan_insert flag. But there is the bug 169438b4b397SViacheslav Ovsiienko * and PFC control may be broken, so disable feature. 169538b4b397SViacheslav Ovsiienko */ 169638b4b397SViacheslav Ovsiienko config->hw_vlan_insert = 0; 169720215627SDavid Christensen config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 169838b4b397SViacheslav Ovsiienko break; 169938b4b397SViacheslav Ovsiienko default: 170038b4b397SViacheslav Ovsiienko config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 170138b4b397SViacheslav Ovsiienko break; 170238b4b397SViacheslav Ovsiienko } 170338b4b397SViacheslav Ovsiienko exit: 170438b4b397SViacheslav Ovsiienko DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 170538b4b397SViacheslav Ovsiienko } 170638b4b397SViacheslav Ovsiienko 170738b4b397SViacheslav Ovsiienko /** 170839139371SViacheslav Ovsiienko * Configures the metadata mask fields in the shared context. 170939139371SViacheslav Ovsiienko * 171039139371SViacheslav Ovsiienko * @param [in] dev 171139139371SViacheslav Ovsiienko * Pointer to Ethernet device. 171239139371SViacheslav Ovsiienko */ 17132eb4d010SOphir Munk void 171439139371SViacheslav Ovsiienko mlx5_set_metadata_mask(struct rte_eth_dev *dev) 171539139371SViacheslav Ovsiienko { 171639139371SViacheslav Ovsiienko struct mlx5_priv *priv = dev->data->dev_private; 17176e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 171839139371SViacheslav Ovsiienko uint32_t meta, mark, reg_c0; 171939139371SViacheslav Ovsiienko 172039139371SViacheslav Ovsiienko reg_c0 = ~priv->vport_meta_mask; 172139139371SViacheslav Ovsiienko switch (priv->config.dv_xmeta_en) { 172239139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_LEGACY: 172339139371SViacheslav Ovsiienko meta = UINT32_MAX; 172439139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 172539139371SViacheslav Ovsiienko break; 172639139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META16: 172739139371SViacheslav Ovsiienko meta = reg_c0 >> rte_bsf32(reg_c0); 172839139371SViacheslav Ovsiienko mark = MLX5_FLOW_MARK_MASK; 172939139371SViacheslav Ovsiienko break; 173039139371SViacheslav Ovsiienko case MLX5_XMETA_MODE_META32: 173139139371SViacheslav Ovsiienko meta = UINT32_MAX; 173239139371SViacheslav Ovsiienko mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK; 173339139371SViacheslav Ovsiienko break; 173439139371SViacheslav Ovsiienko default: 173539139371SViacheslav Ovsiienko meta = 0; 173639139371SViacheslav Ovsiienko mark = 0; 17378e46d4e1SAlexander Kozyrev MLX5_ASSERT(false); 173839139371SViacheslav Ovsiienko break; 173939139371SViacheslav Ovsiienko } 174039139371SViacheslav Ovsiienko if (sh->dv_mark_mask && sh->dv_mark_mask != mark) 174139139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X", 174239139371SViacheslav Ovsiienko sh->dv_mark_mask, mark); 174339139371SViacheslav Ovsiienko else 174439139371SViacheslav Ovsiienko sh->dv_mark_mask = mark; 174539139371SViacheslav Ovsiienko if (sh->dv_meta_mask && sh->dv_meta_mask != meta) 174639139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X", 174739139371SViacheslav Ovsiienko sh->dv_meta_mask, meta); 174839139371SViacheslav Ovsiienko else 174939139371SViacheslav Ovsiienko sh->dv_meta_mask = meta; 175039139371SViacheslav Ovsiienko if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) 175139139371SViacheslav Ovsiienko DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X", 175239139371SViacheslav Ovsiienko sh->dv_meta_mask, reg_c0); 175339139371SViacheslav Ovsiienko else 175439139371SViacheslav Ovsiienko sh->dv_regc0_mask = reg_c0; 175539139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en); 175639139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask); 175739139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask); 175839139371SViacheslav Ovsiienko DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask); 175939139371SViacheslav Ovsiienko } 176039139371SViacheslav Ovsiienko 1761efa79e68SOri Kam int 1762efa79e68SOri Kam rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) 1763efa79e68SOri Kam { 1764efa79e68SOri Kam static const char *const dynf_names[] = { 1765efa79e68SOri Kam RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, 17668f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_METADATA_NAME, 17678f848f32SViacheslav Ovsiienko RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME 1768efa79e68SOri Kam }; 1769efa79e68SOri Kam unsigned int i; 1770efa79e68SOri Kam 1771efa79e68SOri Kam if (n < RTE_DIM(dynf_names)) 1772efa79e68SOri Kam return -ENOMEM; 1773efa79e68SOri Kam for (i = 0; i < RTE_DIM(dynf_names); i++) { 1774efa79e68SOri Kam if (names[i] == NULL) 1775efa79e68SOri Kam return -EINVAL; 1776efa79e68SOri Kam strcpy(names[i], dynf_names[i]); 1777efa79e68SOri Kam } 1778efa79e68SOri Kam return RTE_DIM(dynf_names); 1779efa79e68SOri Kam } 1780efa79e68SOri Kam 178121cae858SDekel Peled /** 17822eb4d010SOphir Munk * Comparison callback to sort device data. 178392d5dd48SViacheslav Ovsiienko * 17842eb4d010SOphir Munk * This is meant to be used with qsort(). 178592d5dd48SViacheslav Ovsiienko * 17862eb4d010SOphir Munk * @param a[in] 17872eb4d010SOphir Munk * Pointer to pointer to first data object. 17882eb4d010SOphir Munk * @param b[in] 17892eb4d010SOphir Munk * Pointer to pointer to second data object. 179092d5dd48SViacheslav Ovsiienko * 179192d5dd48SViacheslav Ovsiienko * @return 17922eb4d010SOphir Munk * 0 if both objects are equal, less than 0 if the first argument is less 17932eb4d010SOphir Munk * than the second, greater than 0 otherwise. 179492d5dd48SViacheslav Ovsiienko */ 17952eb4d010SOphir Munk int 179692d5dd48SViacheslav Ovsiienko mlx5_dev_check_sibling_config(struct mlx5_priv *priv, 179792d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *config) 179892d5dd48SViacheslav Ovsiienko { 17996e88bc42SOphir Munk struct mlx5_dev_ctx_shared *sh = priv->sh; 180092d5dd48SViacheslav Ovsiienko struct mlx5_dev_config *sh_conf = NULL; 180192d5dd48SViacheslav Ovsiienko uint16_t port_id; 180292d5dd48SViacheslav Ovsiienko 18038e46d4e1SAlexander Kozyrev MLX5_ASSERT(sh); 180492d5dd48SViacheslav Ovsiienko /* Nothing to compare for the single/first device. */ 180592d5dd48SViacheslav Ovsiienko if (sh->refcnt == 1) 180692d5dd48SViacheslav Ovsiienko return 0; 180792d5dd48SViacheslav Ovsiienko /* Find the device with shared context. */ 1808fbc83412SViacheslav Ovsiienko MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 180992d5dd48SViacheslav Ovsiienko struct mlx5_priv *opriv = 181092d5dd48SViacheslav Ovsiienko rte_eth_devices[port_id].data->dev_private; 181192d5dd48SViacheslav Ovsiienko 181292d5dd48SViacheslav Ovsiienko if (opriv && opriv != priv && opriv->sh == sh) { 181392d5dd48SViacheslav Ovsiienko sh_conf = &opriv->config; 181492d5dd48SViacheslav Ovsiienko break; 181592d5dd48SViacheslav Ovsiienko } 181692d5dd48SViacheslav Ovsiienko } 181792d5dd48SViacheslav Ovsiienko if (!sh_conf) 181892d5dd48SViacheslav Ovsiienko return 0; 181992d5dd48SViacheslav Ovsiienko if (sh_conf->dv_flow_en ^ config->dv_flow_en) { 182092d5dd48SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch" 182192d5dd48SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 182292d5dd48SViacheslav Ovsiienko rte_errno = EINVAL; 182392d5dd48SViacheslav Ovsiienko return rte_errno; 182492d5dd48SViacheslav Ovsiienko } 18252d241515SViacheslav Ovsiienko if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) { 18262d241515SViacheslav Ovsiienko DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch" 18272d241515SViacheslav Ovsiienko " for shared %s context", sh->ibdev_name); 18282d241515SViacheslav Ovsiienko rte_errno = EINVAL; 18292d241515SViacheslav Ovsiienko return rte_errno; 18302d241515SViacheslav Ovsiienko } 183192d5dd48SViacheslav Ovsiienko return 0; 183292d5dd48SViacheslav Ovsiienko } 1833771fa900SAdrien Mazarguil 1834fbc83412SViacheslav Ovsiienko /** 1835fbc83412SViacheslav Ovsiienko * Look for the ethernet device belonging to mlx5 driver. 1836fbc83412SViacheslav Ovsiienko * 1837fbc83412SViacheslav Ovsiienko * @param[in] port_id 1838fbc83412SViacheslav Ovsiienko * port_id to start looking for device. 1839fbc83412SViacheslav Ovsiienko * @param[in] pci_dev 1840fbc83412SViacheslav Ovsiienko * Pointer to the hint PCI device. When device is being probed 1841fbc83412SViacheslav Ovsiienko * the its siblings (master and preceding representors might 18422eb4d010SOphir Munk * not have assigned driver yet (because the mlx5_os_pci_probe() 1843fbc83412SViacheslav Ovsiienko * is not completed yet, for this case match on hint PCI 1844fbc83412SViacheslav Ovsiienko * device may be used to detect sibling device. 1845fbc83412SViacheslav Ovsiienko * 1846fbc83412SViacheslav Ovsiienko * @return 1847fbc83412SViacheslav Ovsiienko * port_id of found device, RTE_MAX_ETHPORT if not found. 1848fbc83412SViacheslav Ovsiienko */ 1849f7e95215SViacheslav Ovsiienko uint16_t 1850fbc83412SViacheslav Ovsiienko mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) 1851f7e95215SViacheslav Ovsiienko { 1852f7e95215SViacheslav Ovsiienko while (port_id < RTE_MAX_ETHPORTS) { 1853f7e95215SViacheslav Ovsiienko struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1854f7e95215SViacheslav Ovsiienko 1855f7e95215SViacheslav Ovsiienko if (dev->state != RTE_ETH_DEV_UNUSED && 1856f7e95215SViacheslav Ovsiienko dev->device && 1857fbc83412SViacheslav Ovsiienko (dev->device == &pci_dev->device || 1858fbc83412SViacheslav Ovsiienko (dev->device->driver && 1859f7e95215SViacheslav Ovsiienko dev->device->driver->name && 1860fbc83412SViacheslav Ovsiienko !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME)))) 1861f7e95215SViacheslav Ovsiienko break; 1862f7e95215SViacheslav Ovsiienko port_id++; 1863f7e95215SViacheslav Ovsiienko } 1864f7e95215SViacheslav Ovsiienko if (port_id >= RTE_MAX_ETHPORTS) 1865f7e95215SViacheslav Ovsiienko return RTE_MAX_ETHPORTS; 1866f7e95215SViacheslav Ovsiienko return port_id; 1867f7e95215SViacheslav Ovsiienko } 1868f7e95215SViacheslav Ovsiienko 18693a820742SOphir Munk /** 18703a820742SOphir Munk * DPDK callback to remove a PCI device. 18713a820742SOphir Munk * 18723a820742SOphir Munk * This function removes all Ethernet devices belong to a given PCI device. 18733a820742SOphir Munk * 18743a820742SOphir Munk * @param[in] pci_dev 18753a820742SOphir Munk * Pointer to the PCI device. 18763a820742SOphir Munk * 18773a820742SOphir Munk * @return 18783a820742SOphir Munk * 0 on success, the function cannot fail. 18793a820742SOphir Munk */ 18803a820742SOphir Munk static int 18813a820742SOphir Munk mlx5_pci_remove(struct rte_pci_device *pci_dev) 18823a820742SOphir Munk { 18833a820742SOphir Munk uint16_t port_id; 18843a820742SOphir Munk 18852786b7bfSSuanming Mou RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { 18862786b7bfSSuanming Mou /* 18872786b7bfSSuanming Mou * mlx5_dev_close() is not registered to secondary process, 18882786b7bfSSuanming Mou * call the close function explicitly for secondary process. 18892786b7bfSSuanming Mou */ 18902786b7bfSSuanming Mou if (rte_eal_process_type() == RTE_PROC_SECONDARY) 18912786b7bfSSuanming Mou mlx5_dev_close(&rte_eth_devices[port_id]); 18922786b7bfSSuanming Mou else 18933a820742SOphir Munk rte_eth_dev_close(port_id); 18942786b7bfSSuanming Mou } 18953a820742SOphir Munk return 0; 18963a820742SOphir Munk } 18973a820742SOphir Munk 1898771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = { 1899771fa900SAdrien Mazarguil { 19001d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19011d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4) 1902771fa900SAdrien Mazarguil }, 1903771fa900SAdrien Mazarguil { 19041d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19051d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 1906771fa900SAdrien Mazarguil }, 1907771fa900SAdrien Mazarguil { 19081d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19091d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 1910771fa900SAdrien Mazarguil }, 1911771fa900SAdrien Mazarguil { 19121d1bc870SNélio Laranjeiro RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19131d1bc870SNélio Laranjeiro PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 1914771fa900SAdrien Mazarguil }, 1915771fa900SAdrien Mazarguil { 1916528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1917528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5) 1918528a9fbeSYongseok Koh }, 1919528a9fbeSYongseok Koh { 1920528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1921528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 1922528a9fbeSYongseok Koh }, 1923528a9fbeSYongseok Koh { 1924528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1925528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 1926528a9fbeSYongseok Koh }, 1927528a9fbeSYongseok Koh { 1928528a9fbeSYongseok Koh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1929528a9fbeSYongseok Koh PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 1930528a9fbeSYongseok Koh }, 1931528a9fbeSYongseok Koh { 1932dd3331c6SShahaf Shuler RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1933dd3331c6SShahaf Shuler PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 1934dd3331c6SShahaf Shuler }, 1935dd3331c6SShahaf Shuler { 1936c322c0e5SOri Kam RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1937c322c0e5SOri Kam PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 1938c322c0e5SOri Kam }, 1939c322c0e5SOri Kam { 1940f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1941f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6) 1942f0354d84SWisam Jaddo }, 1943f0354d84SWisam Jaddo { 1944f0354d84SWisam Jaddo RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1945f0354d84SWisam Jaddo PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 1946f0354d84SWisam Jaddo }, 1947f0354d84SWisam Jaddo { 19485fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19495fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DX) 19505fc66630SRaslan Darawsheh }, 19515fc66630SRaslan Darawsheh { 19525fc66630SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 19535fc66630SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF) 19545fc66630SRaslan Darawsheh }, 19555fc66630SRaslan Darawsheh { 195658b4a2b1SRaslan Darawsheh RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 195758b4a2b1SRaslan Darawsheh PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 195858b4a2b1SRaslan Darawsheh }, 195958b4a2b1SRaslan Darawsheh { 196028c9a7d7SAli Alnubani RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 196128c9a7d7SAli Alnubani PCI_DEVICE_ID_MELLANOX_CONNECTX6LX) 196228c9a7d7SAli Alnubani }, 196328c9a7d7SAli Alnubani { 1964771fa900SAdrien Mazarguil .vendor_id = 0 1965771fa900SAdrien Mazarguil } 1966771fa900SAdrien Mazarguil }; 1967771fa900SAdrien Mazarguil 19682eb4d010SOphir Munk struct rte_pci_driver mlx5_driver = { 19692f3193cfSJan Viktorin .driver = { 19702f3193cfSJan Viktorin .name = MLX5_DRIVER_NAME 19712f3193cfSJan Viktorin }, 1972771fa900SAdrien Mazarguil .id_table = mlx5_pci_id_map, 19732eb4d010SOphir Munk .probe = mlx5_os_pci_probe, 19743a820742SOphir Munk .remove = mlx5_pci_remove, 1975989e999dSShahaf Shuler .dma_map = mlx5_dma_map, 1976989e999dSShahaf Shuler .dma_unmap = mlx5_dma_unmap, 197710f3581dSOphir Munk .drv_flags = PCI_DRV_FLAGS, 1978771fa900SAdrien Mazarguil }; 1979771fa900SAdrien Mazarguil 19809c99878aSJerin Jacob /* Initialize driver log type. */ 19819c99878aSJerin Jacob RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE) 19829c99878aSJerin Jacob 1983771fa900SAdrien Mazarguil /** 1984771fa900SAdrien Mazarguil * Driver initialization routine. 1985771fa900SAdrien Mazarguil */ 1986f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init) 1987771fa900SAdrien Mazarguil { 19885f8ba81cSXueming Li /* Build the static tables for Verbs conversion. */ 1989ea16068cSYongseok Koh mlx5_set_ptype_table(); 19905f8ba81cSXueming Li mlx5_set_cksum_table(); 19915f8ba81cSXueming Li mlx5_set_swp_types_table(); 19927b4f1e6bSMatan Azrad if (mlx5_glue) 19933dcfe039SThomas Monjalon rte_pci_register(&mlx5_driver); 1994771fa900SAdrien Mazarguil } 1995771fa900SAdrien Mazarguil 199601f19227SShreyansh Jain RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 199701f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 19980880c401SOlivier Matz RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 1999