xref: /dpdk/drivers/net/mlx5/mlx5.c (revision 90967539d0d1afcfd5237ed85efdc430359a0e6b)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2771fa900SAdrien Mazarguil  * Copyright 2015 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2015 Mellanox Technologies, Ltd
4771fa900SAdrien Mazarguil  */
5771fa900SAdrien Mazarguil 
6771fa900SAdrien Mazarguil #include <stddef.h>
7771fa900SAdrien Mazarguil #include <unistd.h>
8771fa900SAdrien Mazarguil #include <string.h>
9771fa900SAdrien Mazarguil #include <stdint.h>
10771fa900SAdrien Mazarguil #include <stdlib.h>
11e72dd09bSNélio Laranjeiro #include <errno.h>
1225025da3SSpike Du #include <fcntl.h>
13771fa900SAdrien Mazarguil 
14771fa900SAdrien Mazarguil #include <rte_malloc.h>
15df96fd0dSBruce Richardson #include <ethdev_driver.h>
16771fa900SAdrien Mazarguil #include <rte_pci.h>
171f37cb2bSDavid Marchand #include <bus_pci_driver.h>
18771fa900SAdrien Mazarguil #include <rte_common.h>
19e72dd09bSNélio Laranjeiro #include <rte_kvargs.h>
20e89c15b6SAdrien Mazarguil #include <rte_rwlock.h>
21e89c15b6SAdrien Mazarguil #include <rte_spinlock.h>
22f38c5457SAdrien Mazarguil #include <rte_string_fns.h>
235dfa003dSMichael Baum #include <rte_eal_paging.h>
24f15db67dSMatan Azrad #include <rte_alarm.h>
2520698c9fSOphir Munk #include <rte_cycles.h>
2625025da3SSpike Du #include <rte_interrupts.h>
27771fa900SAdrien Mazarguil 
287b4f1e6bSMatan Azrad #include <mlx5_glue.h>
297b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h>
3093e30982SMatan Azrad #include <mlx5_common.h>
31391b8bccSOphir Munk #include <mlx5_common_os.h>
32a4de9586SVu Pham #include <mlx5_common_mp.h>
3383c2047cSSuanming Mou #include <mlx5_malloc.h>
347b4f1e6bSMatan Azrad 
357b4f1e6bSMatan Azrad #include "mlx5_defs.h"
36771fa900SAdrien Mazarguil #include "mlx5.h"
37771fa900SAdrien Mazarguil #include "mlx5_utils.h"
382e22920bSAdrien Mazarguil #include "mlx5_rxtx.h"
39151cbe3aSMichael Baum #include "mlx5_rx.h"
40377b69fbSMichael Baum #include "mlx5_tx.h"
41771fa900SAdrien Mazarguil #include "mlx5_autoconf.h"
4284c406e7SOri Kam #include "mlx5_flow.h"
43223f2c21SOphir Munk #include "mlx5_flow_os.h"
44efa79e68SOri Kam #include "rte_pmd_mlx5.h"
45771fa900SAdrien Mazarguil 
46a7f34989SXueming Li #define MLX5_ETH_DRIVER_NAME mlx5_eth
47a7f34989SXueming Li 
4899c12dccSNélio Laranjeiro /* Device parameter to enable RX completion queue compression. */
4999c12dccSNélio Laranjeiro #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
5099c12dccSNélio Laranjeiro 
5178c7a16dSYongseok Koh /* Device parameter to enable padding Rx packet to cacheline size. */
5278c7a16dSYongseok Koh #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
5378c7a16dSYongseok Koh 
547d6bf6b8SYongseok Koh /* Device parameter to enable Multi-Packet Rx queue. */
557d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_EN "mprq_en"
567d6bf6b8SYongseok Koh 
577d6bf6b8SYongseok Koh /* Device parameter to configure log 2 of the number of strides for MPRQ. */
587d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
597d6bf6b8SYongseok Koh 
60ecb16045SAlexander Kozyrev /* Device parameter to configure log 2 of the stride size for MPRQ. */
61ecb16045SAlexander Kozyrev #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size"
62ecb16045SAlexander Kozyrev 
637d6bf6b8SYongseok Koh /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
647d6bf6b8SYongseok Koh #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
657d6bf6b8SYongseok Koh 
667d6bf6b8SYongseok Koh /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
677d6bf6b8SYongseok Koh #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
687d6bf6b8SYongseok Koh 
69a6bd4911SViacheslav Ovsiienko /* Device parameter to configure inline send. Deprecated, ignored.*/
702a66cf37SYaacov Hazan #define MLX5_TXQ_INLINE "txq_inline"
712a66cf37SYaacov Hazan 
72505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with ordinary SEND. */
73505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MAX "txq_inline_max"
74505f1fe4SViacheslav Ovsiienko 
75505f1fe4SViacheslav Ovsiienko /* Device parameter to configure minimal data size to inline. */
76505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MIN "txq_inline_min"
77505f1fe4SViacheslav Ovsiienko 
78505f1fe4SViacheslav Ovsiienko /* Device parameter to limit packet size to inline with Enhanced MPW. */
79505f1fe4SViacheslav Ovsiienko #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
80505f1fe4SViacheslav Ovsiienko 
812a66cf37SYaacov Hazan /*
822a66cf37SYaacov Hazan  * Device parameter to configure the number of TX queues threshold for
832a66cf37SYaacov Hazan  * enabling inline send.
842a66cf37SYaacov Hazan  */
852a66cf37SYaacov Hazan #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
862a66cf37SYaacov Hazan 
8709d8b416SYongseok Koh /*
8809d8b416SYongseok Koh  * Device parameter to configure the number of TX queues threshold for
89a6bd4911SViacheslav Ovsiienko  * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
9009d8b416SYongseok Koh  */
9109d8b416SYongseok Koh #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
9209d8b416SYongseok Koh 
93230189d9SNélio Laranjeiro /* Device parameter to enable multi-packet send WQEs. */
94230189d9SNélio Laranjeiro #define MLX5_TXQ_MPW_EN "txq_mpw_en"
95230189d9SNélio Laranjeiro 
96a6bd4911SViacheslav Ovsiienko /*
97a6bd4911SViacheslav Ovsiienko  * Device parameter to include 2 dsegs in the title WQEBB.
98a6bd4911SViacheslav Ovsiienko  * Deprecated, ignored.
99a6bd4911SViacheslav Ovsiienko  */
1006ce84bd8SYongseok Koh #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
1016ce84bd8SYongseok Koh 
102a6bd4911SViacheslav Ovsiienko /*
103a6bd4911SViacheslav Ovsiienko  * Device parameter to limit the size of inlining packet.
104a6bd4911SViacheslav Ovsiienko  * Deprecated, ignored.
105a6bd4911SViacheslav Ovsiienko  */
1066ce84bd8SYongseok Koh #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
1076ce84bd8SYongseok Koh 
108a6bd4911SViacheslav Ovsiienko /*
1098f848f32SViacheslav Ovsiienko  * Device parameter to enable Tx scheduling on timestamps
1108f848f32SViacheslav Ovsiienko  * and specify the packet pacing granularity in nanoseconds.
1118f848f32SViacheslav Ovsiienko  */
1128f848f32SViacheslav Ovsiienko #define MLX5_TX_PP "tx_pp"
1138f848f32SViacheslav Ovsiienko 
1148f848f32SViacheslav Ovsiienko /*
1158f848f32SViacheslav Ovsiienko  * Device parameter to specify skew in nanoseconds on Tx datapath,
1168f848f32SViacheslav Ovsiienko  * it represents the time between SQ start WQE processing and
1178f848f32SViacheslav Ovsiienko  * appearing actual packet data on the wire.
1188f848f32SViacheslav Ovsiienko  */
1198f848f32SViacheslav Ovsiienko #define MLX5_TX_SKEW "tx_skew"
1208f848f32SViacheslav Ovsiienko 
1218f848f32SViacheslav Ovsiienko /*
122a6bd4911SViacheslav Ovsiienko  * Device parameter to enable hardware Tx vector.
123a6bd4911SViacheslav Ovsiienko  * Deprecated, ignored (no vectorized Tx routines anymore).
124a6bd4911SViacheslav Ovsiienko  */
1255644d5b9SNelio Laranjeiro #define MLX5_TX_VEC_EN "tx_vec_en"
1265644d5b9SNelio Laranjeiro 
1275644d5b9SNelio Laranjeiro /* Device parameter to enable hardware Rx vector. */
1285644d5b9SNelio Laranjeiro #define MLX5_RX_VEC_EN "rx_vec_en"
1295644d5b9SNelio Laranjeiro 
13078a54648SXueming Li /* Allow L3 VXLAN flow creation. */
13178a54648SXueming Li #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
13278a54648SXueming Li 
133e2b4925eSOri Kam /* Activate DV E-Switch flow steering. */
134e2b4925eSOri Kam #define MLX5_DV_ESW_EN "dv_esw_en"
135e2b4925eSOri Kam 
13651e72d38SOri Kam /* Activate DV flow steering. */
13751e72d38SOri Kam #define MLX5_DV_FLOW_EN "dv_flow_en"
13851e72d38SOri Kam 
1392d241515SViacheslav Ovsiienko /* Enable extensive flow metadata support. */
1402d241515SViacheslav Ovsiienko #define MLX5_DV_XMETA_EN "dv_xmeta_en"
1412d241515SViacheslav Ovsiienko 
1424f840086SLong Wu /* Device parameter to let the user manage the lacp traffic of bonding device */
1430f0ae73aSShiri Kuzin #define MLX5_LACP_BY_USER "lacp_by_user"
1440f0ae73aSShiri Kuzin 
145db209cc3SNélio Laranjeiro /* Activate Netlink support in VF mode. */
146db209cc3SNélio Laranjeiro #define MLX5_VF_NL_EN "vf_nl_en"
147db209cc3SNélio Laranjeiro 
1486de569f5SAdrien Mazarguil /* Select port representors to instantiate. */
1496de569f5SAdrien Mazarguil #define MLX5_REPRESENTOR "representor"
1506de569f5SAdrien Mazarguil 
151066cfecdSMatan Azrad /* Device parameter to configure the maximum number of dump files per queue. */
152066cfecdSMatan Azrad #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
153066cfecdSMatan Azrad 
15421bb6c7eSDekel Peled /* Configure timeout of LRO session (in microseconds). */
15521bb6c7eSDekel Peled #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
15621bb6c7eSDekel Peled 
1571ad9a3d0SBing Zhao /*
1581ad9a3d0SBing Zhao  * Device parameter to configure the total data buffer size for a single
1591ad9a3d0SBing Zhao  * hairpin queue (logarithm value).
1601ad9a3d0SBing Zhao  */
1611ad9a3d0SBing Zhao #define MLX5_HP_BUF_SIZE "hp_buf_log_sz"
1621ad9a3d0SBing Zhao 
163a1da6f62SSuanming Mou /* Flow memory reclaim mode. */
164a1da6f62SSuanming Mou #define MLX5_RECLAIM_MEM "reclaim_mem_mode"
165a1da6f62SSuanming Mou 
16650f95b23SSuanming Mou /* Decap will be used or not. */
16750f95b23SSuanming Mou #define MLX5_DECAP_EN "decap_en"
1685522da6bSSuanming Mou 
169e39226bdSJiawei Wang /* Device parameter to configure allow or prevent duplicate rules pattern. */
170e39226bdSJiawei Wang #define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
171e39226bdSJiawei Wang 
172febcac7bSBing Zhao /* Device parameter to configure the delay drop when creating Rxqs. */
173febcac7bSBing Zhao #define MLX5_DELAY_DROP "delay_drop"
174febcac7bSBing Zhao 
1751939eb6fSDariusz Sosnowski /* Device parameter to create the fdb default rule in PMD */
1761939eb6fSDariusz Sosnowski #define MLX5_FDB_DEFAULT_RULE_EN "fdb_def_rule_en"
1771939eb6fSDariusz Sosnowski 
1784d368e1dSXiaoyu Min /* HW steering counter configuration. */
1794d368e1dSXiaoyu Min #define MLX5_HWS_CNT_SERVICE_CORE "service_core"
1804d368e1dSXiaoyu Min 
1814d368e1dSXiaoyu Min /* HW steering counter's query interval. */
1824d368e1dSXiaoyu Min #define MLX5_HWS_CNT_CYCLE_TIME "svc_cycle_time"
1834d368e1dSXiaoyu Min 
184483181f7SDariusz Sosnowski /* Device parameter to control representor matching in ingress/egress flows with HWS. */
185483181f7SDariusz Sosnowski #define MLX5_REPR_MATCHING_EN "repr_matching_en"
186483181f7SDariusz Sosnowski 
187974f1e7eSYongseok Koh /* Shared memory between primary and secondary processes. */
188974f1e7eSYongseok Koh struct mlx5_shared_data *mlx5_shared_data;
189974f1e7eSYongseok Koh 
1902e86c4e5SOphir Munk /** Driver-specific log messages type. */
1912e86c4e5SOphir Munk int mlx5_logtype;
192a170a30dSNélio Laranjeiro 
19392d3a05eSMichael Baum static LIST_HEAD(mlx5_dev_ctx_list, mlx5_dev_ctx_shared) dev_ctx_list = LIST_HEAD_INITIALIZER();
19492d3a05eSMichael Baum static LIST_HEAD(mlx5_phdev_list, mlx5_physical_device) phdev_list = LIST_HEAD_INITIALIZER();
195ef65067cSTal Shnaiderman static pthread_mutex_t mlx5_dev_ctx_list_mutex;
19692d3a05eSMichael Baum 
1975c761238SGregory Etelson static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
198f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1999cac7dedSGregory Etelson 	[MLX5_IPOOL_DECAP_ENCAP] = {
200014d1cbeSSuanming Mou 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
201014d1cbeSSuanming Mou 		.trunk_size = 64,
202014d1cbeSSuanming Mou 		.grow_trunk = 3,
203014d1cbeSSuanming Mou 		.grow_shift = 2,
2042f3dc1f4SSuanming Mou 		.need_lock = 1,
205014d1cbeSSuanming Mou 		.release_mem_en = 1,
20683c2047cSSuanming Mou 		.malloc = mlx5_malloc,
20783c2047cSSuanming Mou 		.free = mlx5_free,
208014d1cbeSSuanming Mou 		.type = "mlx5_encap_decap_ipool",
209014d1cbeSSuanming Mou 	},
2109cac7dedSGregory Etelson 	[MLX5_IPOOL_PUSH_VLAN] = {
2118acf8ac9SSuanming Mou 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
2128acf8ac9SSuanming Mou 		.trunk_size = 64,
2138acf8ac9SSuanming Mou 		.grow_trunk = 3,
2148acf8ac9SSuanming Mou 		.grow_shift = 2,
2152f3dc1f4SSuanming Mou 		.need_lock = 1,
2168acf8ac9SSuanming Mou 		.release_mem_en = 1,
21783c2047cSSuanming Mou 		.malloc = mlx5_malloc,
21883c2047cSSuanming Mou 		.free = mlx5_free,
2198acf8ac9SSuanming Mou 		.type = "mlx5_push_vlan_ipool",
2208acf8ac9SSuanming Mou 	},
2219cac7dedSGregory Etelson 	[MLX5_IPOOL_TAG] = {
2225f114269SSuanming Mou 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
2235f114269SSuanming Mou 		.trunk_size = 64,
2245f114269SSuanming Mou 		.grow_trunk = 3,
2255f114269SSuanming Mou 		.grow_shift = 2,
2262f3dc1f4SSuanming Mou 		.need_lock = 1,
22707b51bb9SSuanming Mou 		.release_mem_en = 0,
22807b51bb9SSuanming Mou 		.per_core_cache = (1 << 16),
22983c2047cSSuanming Mou 		.malloc = mlx5_malloc,
23083c2047cSSuanming Mou 		.free = mlx5_free,
2315f114269SSuanming Mou 		.type = "mlx5_tag_ipool",
2325f114269SSuanming Mou 	},
2339cac7dedSGregory Etelson 	[MLX5_IPOOL_PORT_ID] = {
234f3faf9eaSSuanming Mou 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
235f3faf9eaSSuanming Mou 		.trunk_size = 64,
236f3faf9eaSSuanming Mou 		.grow_trunk = 3,
237f3faf9eaSSuanming Mou 		.grow_shift = 2,
2382f3dc1f4SSuanming Mou 		.need_lock = 1,
239f3faf9eaSSuanming Mou 		.release_mem_en = 1,
24083c2047cSSuanming Mou 		.malloc = mlx5_malloc,
24183c2047cSSuanming Mou 		.free = mlx5_free,
242f3faf9eaSSuanming Mou 		.type = "mlx5_port_id_ipool",
243f3faf9eaSSuanming Mou 	},
2449cac7dedSGregory Etelson 	[MLX5_IPOOL_JUMP] = {
245015a3e81SDariusz Sosnowski 		/*
246015a3e81SDariusz Sosnowski 		 * MLX5_IPOOL_JUMP ipool entry size depends on selected flow engine.
247015a3e81SDariusz Sosnowski 		 * When HW steering is enabled mlx5_flow_group struct is used.
248015a3e81SDariusz Sosnowski 		 * Otherwise mlx5_flow_tbl_data_entry struct is used.
249015a3e81SDariusz Sosnowski 		 */
250015a3e81SDariusz Sosnowski 		.size = 0,
2517ac99475SSuanming Mou 		.trunk_size = 64,
2527ac99475SSuanming Mou 		.grow_trunk = 3,
2537ac99475SSuanming Mou 		.grow_shift = 2,
2542f3dc1f4SSuanming Mou 		.need_lock = 1,
2557ac99475SSuanming Mou 		.release_mem_en = 1,
25683c2047cSSuanming Mou 		.malloc = mlx5_malloc,
25783c2047cSSuanming Mou 		.free = mlx5_free,
2587ac99475SSuanming Mou 		.type = "mlx5_jump_ipool",
2597ac99475SSuanming Mou 	},
2609cac7dedSGregory Etelson 	[MLX5_IPOOL_SAMPLE] = {
261b4c0ddbfSJiawei Wang 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
262b4c0ddbfSJiawei Wang 		.trunk_size = 64,
263b4c0ddbfSJiawei Wang 		.grow_trunk = 3,
264b4c0ddbfSJiawei Wang 		.grow_shift = 2,
2652f3dc1f4SSuanming Mou 		.need_lock = 1,
266b4c0ddbfSJiawei Wang 		.release_mem_en = 1,
267b4c0ddbfSJiawei Wang 		.malloc = mlx5_malloc,
268b4c0ddbfSJiawei Wang 		.free = mlx5_free,
269b4c0ddbfSJiawei Wang 		.type = "mlx5_sample_ipool",
270b4c0ddbfSJiawei Wang 	},
2719cac7dedSGregory Etelson 	[MLX5_IPOOL_DEST_ARRAY] = {
27200c10c22SJiawei Wang 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
27300c10c22SJiawei Wang 		.trunk_size = 64,
27400c10c22SJiawei Wang 		.grow_trunk = 3,
27500c10c22SJiawei Wang 		.grow_shift = 2,
2762f3dc1f4SSuanming Mou 		.need_lock = 1,
27700c10c22SJiawei Wang 		.release_mem_en = 1,
27800c10c22SJiawei Wang 		.malloc = mlx5_malloc,
27900c10c22SJiawei Wang 		.free = mlx5_free,
28000c10c22SJiawei Wang 		.type = "mlx5_dest_array_ipool",
28100c10c22SJiawei Wang 	},
2829cac7dedSGregory Etelson 	[MLX5_IPOOL_TUNNEL_ID] = {
2839cac7dedSGregory Etelson 		.size = sizeof(struct mlx5_flow_tunnel),
284495b2ed4SSuanming Mou 		.trunk_size = MLX5_MAX_TUNNELS,
2859cac7dedSGregory Etelson 		.need_lock = 1,
2869cac7dedSGregory Etelson 		.release_mem_en = 1,
2879cac7dedSGregory Etelson 		.type = "mlx5_tunnel_offload",
2889cac7dedSGregory Etelson 	},
2899cac7dedSGregory Etelson 	[MLX5_IPOOL_TNL_TBL_ID] = {
2909cac7dedSGregory Etelson 		.size = 0,
2919cac7dedSGregory Etelson 		.need_lock = 1,
2929cac7dedSGregory Etelson 		.type = "mlx5_flow_tnl_tbl_ipool",
2939cac7dedSGregory Etelson 	},
294b88341caSSuanming Mou #endif
2959cac7dedSGregory Etelson 	[MLX5_IPOOL_MTR] = {
29683306d6cSShun Hao 		/**
29783306d6cSShun Hao 		 * The ipool index should grow continually from small to big,
29883306d6cSShun Hao 		 * for meter idx, so not set grow_trunk to avoid meter index
29983306d6cSShun Hao 		 * not jump continually.
30083306d6cSShun Hao 		 */
301e6100c7bSLi Zhang 		.size = sizeof(struct mlx5_legacy_flow_meter),
3028638e2b0SSuanming Mou 		.trunk_size = 64,
3032f3dc1f4SSuanming Mou 		.need_lock = 1,
3048638e2b0SSuanming Mou 		.release_mem_en = 1,
30583c2047cSSuanming Mou 		.malloc = mlx5_malloc,
30683c2047cSSuanming Mou 		.free = mlx5_free,
3078638e2b0SSuanming Mou 		.type = "mlx5_meter_ipool",
3088638e2b0SSuanming Mou 	},
3099cac7dedSGregory Etelson 	[MLX5_IPOOL_MCP] = {
31090e6053aSSuanming Mou 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
31190e6053aSSuanming Mou 		.trunk_size = 64,
31290e6053aSSuanming Mou 		.grow_trunk = 3,
31390e6053aSSuanming Mou 		.grow_shift = 2,
3142f3dc1f4SSuanming Mou 		.need_lock = 1,
31590e6053aSSuanming Mou 		.release_mem_en = 1,
31683c2047cSSuanming Mou 		.malloc = mlx5_malloc,
31783c2047cSSuanming Mou 		.free = mlx5_free,
31890e6053aSSuanming Mou 		.type = "mlx5_mcp_ipool",
31990e6053aSSuanming Mou 	},
3209cac7dedSGregory Etelson 	[MLX5_IPOOL_HRXQ] = {
321772dc0ebSSuanming Mou 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
322772dc0ebSSuanming Mou 		.trunk_size = 64,
323772dc0ebSSuanming Mou 		.grow_trunk = 3,
324772dc0ebSSuanming Mou 		.grow_shift = 2,
3252f3dc1f4SSuanming Mou 		.need_lock = 1,
326772dc0ebSSuanming Mou 		.release_mem_en = 1,
32783c2047cSSuanming Mou 		.malloc = mlx5_malloc,
32883c2047cSSuanming Mou 		.free = mlx5_free,
329772dc0ebSSuanming Mou 		.type = "mlx5_hrxq_ipool",
330772dc0ebSSuanming Mou 	},
3319cac7dedSGregory Etelson 	[MLX5_IPOOL_MLX5_FLOW] = {
3325c761238SGregory Etelson 		/*
3335c761238SGregory Etelson 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
3345c761238SGregory Etelson 		 * It set in run time according to PCI function configuration.
3355c761238SGregory Etelson 		 */
3365c761238SGregory Etelson 		.size = 0,
337b88341caSSuanming Mou 		.trunk_size = 64,
338b88341caSSuanming Mou 		.grow_trunk = 3,
339b88341caSSuanming Mou 		.grow_shift = 2,
3402f3dc1f4SSuanming Mou 		.need_lock = 1,
341b4edeaf3SSuanming Mou 		.release_mem_en = 0,
342b4edeaf3SSuanming Mou 		.per_core_cache = 1 << 19,
34383c2047cSSuanming Mou 		.malloc = mlx5_malloc,
34483c2047cSSuanming Mou 		.free = mlx5_free,
345b88341caSSuanming Mou 		.type = "mlx5_flow_handle_ipool",
346b88341caSSuanming Mou 	},
3479cac7dedSGregory Etelson 	[MLX5_IPOOL_RTE_FLOW] = {
348ab612adcSSuanming Mou 		.size = sizeof(struct rte_flow),
349ab612adcSSuanming Mou 		.trunk_size = 4096,
350ab612adcSSuanming Mou 		.need_lock = 1,
351ab612adcSSuanming Mou 		.release_mem_en = 1,
35283c2047cSSuanming Mou 		.malloc = mlx5_malloc,
35383c2047cSSuanming Mou 		.free = mlx5_free,
354ab612adcSSuanming Mou 		.type = "rte_flow_ipool",
355ab612adcSSuanming Mou 	},
3569cac7dedSGregory Etelson 	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
3574ae8825cSXueming Li 		.size = 0,
3584ae8825cSXueming Li 		.need_lock = 1,
3594ae8825cSXueming Li 		.type = "mlx5_flow_rss_id_ipool",
3604ae8825cSXueming Li 	},
3619cac7dedSGregory Etelson 	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
3624a42ac1fSMatan Azrad 		.size = sizeof(struct mlx5_shared_action_rss),
3634a42ac1fSMatan Azrad 		.trunk_size = 64,
3644a42ac1fSMatan Azrad 		.grow_trunk = 3,
3654a42ac1fSMatan Azrad 		.grow_shift = 2,
3664a42ac1fSMatan Azrad 		.need_lock = 1,
3674a42ac1fSMatan Azrad 		.release_mem_en = 1,
3684a42ac1fSMatan Azrad 		.malloc = mlx5_malloc,
3694a42ac1fSMatan Azrad 		.free = mlx5_free,
3704a42ac1fSMatan Azrad 		.type = "mlx5_shared_action_rss",
3714a42ac1fSMatan Azrad 	},
372afb4aa4fSLi Zhang 	[MLX5_IPOOL_MTR_POLICY] = {
373afb4aa4fSLi Zhang 		/**
374afb4aa4fSLi Zhang 		 * The ipool index should grow continually from small to big,
375afb4aa4fSLi Zhang 		 * for policy idx, so not set grow_trunk to avoid policy index
376afb4aa4fSLi Zhang 		 * not jump continually.
377afb4aa4fSLi Zhang 		 */
378afb4aa4fSLi Zhang 		.size = sizeof(struct mlx5_flow_meter_sub_policy),
379afb4aa4fSLi Zhang 		.trunk_size = 64,
380afb4aa4fSLi Zhang 		.need_lock = 1,
381afb4aa4fSLi Zhang 		.release_mem_en = 1,
382afb4aa4fSLi Zhang 		.malloc = mlx5_malloc,
383afb4aa4fSLi Zhang 		.free = mlx5_free,
384afb4aa4fSLi Zhang 		.type = "mlx5_meter_policy_ipool",
385afb4aa4fSLi Zhang 	},
386014d1cbeSSuanming Mou };
387014d1cbeSSuanming Mou 
388830d2091SOri Kam #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
389830d2091SOri Kam #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
390830d2091SOri Kam 
391f7c3f3c2SSuanming Mou #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 1024
392860897d2SBing Zhao 
39399532fb1SAlexander Kozyrev #define MLX5_RXQ_ENH_CQE_COMP_MASK 0x80
39499532fb1SAlexander Kozyrev 
395830d2091SOri Kam /**
396f926cce3SXueming Li  * Decide whether representor ID is a HPF(host PF) port on BF2.
397f926cce3SXueming Li  *
398f926cce3SXueming Li  * @param dev
399f926cce3SXueming Li  *   Pointer to Ethernet device structure.
400f926cce3SXueming Li  *
401f926cce3SXueming Li  * @return
402f926cce3SXueming Li  *   Non-zero if HPF, otherwise 0.
403f926cce3SXueming Li  */
404f926cce3SXueming Li bool
405f926cce3SXueming Li mlx5_is_hpf(struct rte_eth_dev *dev)
406f926cce3SXueming Li {
407f926cce3SXueming Li 	struct mlx5_priv *priv = dev->data->dev_private;
408f926cce3SXueming Li 	uint16_t repr = MLX5_REPRESENTOR_REPR(priv->representor_id);
409f926cce3SXueming Li 	int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
410f926cce3SXueming Li 
411f926cce3SXueming Li 	return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_VF &&
412f926cce3SXueming Li 	       MLX5_REPRESENTOR_REPR(-1) == repr;
413f926cce3SXueming Li }
414f926cce3SXueming Li 
415f926cce3SXueming Li /**
416919488fbSXueming Li  * Decide whether representor ID is a SF port representor.
417919488fbSXueming Li  *
418919488fbSXueming Li  * @param dev
419919488fbSXueming Li  *   Pointer to Ethernet device structure.
420919488fbSXueming Li  *
421919488fbSXueming Li  * @return
422919488fbSXueming Li  *   Non-zero if HPF, otherwise 0.
423919488fbSXueming Li  */
424919488fbSXueming Li bool
425919488fbSXueming Li mlx5_is_sf_repr(struct rte_eth_dev *dev)
426919488fbSXueming Li {
427919488fbSXueming Li 	struct mlx5_priv *priv = dev->data->dev_private;
428919488fbSXueming Li 	int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
429919488fbSXueming Li 
430919488fbSXueming Li 	return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF;
431919488fbSXueming Li }
432919488fbSXueming Li 
433919488fbSXueming Li /**
434f935ed4bSDekel Peled  * Initialize the ASO aging management structure.
435f935ed4bSDekel Peled  *
436f935ed4bSDekel Peled  * @param[in] sh
437f935ed4bSDekel Peled  *   Pointer to mlx5_dev_ctx_shared object to free
438f935ed4bSDekel Peled  *
439f935ed4bSDekel Peled  * @return
440f935ed4bSDekel Peled  *   0 on success, a negative errno value otherwise and rte_errno is set.
441f935ed4bSDekel Peled  */
442f935ed4bSDekel Peled int
443f935ed4bSDekel Peled mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
444f935ed4bSDekel Peled {
445f935ed4bSDekel Peled 	int err;
446f935ed4bSDekel Peled 
447f935ed4bSDekel Peled 	if (sh->aso_age_mng)
448f935ed4bSDekel Peled 		return 0;
449f935ed4bSDekel Peled 	sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng),
450f935ed4bSDekel Peled 				      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
451f935ed4bSDekel Peled 	if (!sh->aso_age_mng) {
452f935ed4bSDekel Peled 		DRV_LOG(ERR, "aso_age_mng allocation was failed.");
453f935ed4bSDekel Peled 		rte_errno = ENOMEM;
454f935ed4bSDekel Peled 		return -ENOMEM;
455f935ed4bSDekel Peled 	}
45648fbb0e9SAlexander Kozyrev 	err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT, 1);
457f935ed4bSDekel Peled 	if (err) {
458f935ed4bSDekel Peled 		mlx5_free(sh->aso_age_mng);
459f935ed4bSDekel Peled 		return -1;
460f935ed4bSDekel Peled 	}
4617cf2d15aSJiawei Wang 	rte_rwlock_init(&sh->aso_age_mng->resize_rwl);
462f935ed4bSDekel Peled 	rte_spinlock_init(&sh->aso_age_mng->free_sl);
463f935ed4bSDekel Peled 	LIST_INIT(&sh->aso_age_mng->free);
464f935ed4bSDekel Peled 	return 0;
465f935ed4bSDekel Peled }
466f935ed4bSDekel Peled 
467f935ed4bSDekel Peled /**
468f935ed4bSDekel Peled  * Close and release all the resources of the ASO aging management structure.
469f935ed4bSDekel Peled  *
470f935ed4bSDekel Peled  * @param[in] sh
471f935ed4bSDekel Peled  *   Pointer to mlx5_dev_ctx_shared object to free.
472f935ed4bSDekel Peled  */
473f935ed4bSDekel Peled static void
474f935ed4bSDekel Peled mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
475f935ed4bSDekel Peled {
476f935ed4bSDekel Peled 	int i, j;
477f935ed4bSDekel Peled 
47829efa63aSLi Zhang 	mlx5_aso_flow_hit_queue_poll_stop(sh);
47929efa63aSLi Zhang 	mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT);
480f935ed4bSDekel Peled 	if (sh->aso_age_mng->pools) {
481f935ed4bSDekel Peled 		struct mlx5_aso_age_pool *pool;
482f935ed4bSDekel Peled 
483f935ed4bSDekel Peled 		for (i = 0; i < sh->aso_age_mng->next; ++i) {
484f935ed4bSDekel Peled 			pool = sh->aso_age_mng->pools[i];
485f935ed4bSDekel Peled 			claim_zero(mlx5_devx_cmd_destroy
486f935ed4bSDekel Peled 						(pool->flow_hit_aso_obj));
487f935ed4bSDekel Peled 			for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j)
488f935ed4bSDekel Peled 				if (pool->actions[j].dr_action)
489f935ed4bSDekel Peled 					claim_zero
490223f2c21SOphir Munk 					    (mlx5_flow_os_destroy_flow_action
491f935ed4bSDekel Peled 					      (pool->actions[j].dr_action));
492f935ed4bSDekel Peled 			mlx5_free(pool);
493f935ed4bSDekel Peled 		}
494f935ed4bSDekel Peled 		mlx5_free(sh->aso_age_mng->pools);
495f935ed4bSDekel Peled 	}
4967ad0b6d9SDekel Peled 	mlx5_free(sh->aso_age_mng);
497f935ed4bSDekel Peled }
498f935ed4bSDekel Peled 
499f935ed4bSDekel Peled /**
500fa2d01c8SDong Zhou  * Initialize the shared aging list information per port.
501fa2d01c8SDong Zhou  *
502fa2d01c8SDong Zhou  * @param[in] sh
5036e88bc42SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object.
504fa2d01c8SDong Zhou  */
505fa2d01c8SDong Zhou static void
5066e88bc42SOphir Munk mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
507fa2d01c8SDong Zhou {
508fa2d01c8SDong Zhou 	uint32_t i;
509fa2d01c8SDong Zhou 	struct mlx5_age_info *age_info;
510fa2d01c8SDong Zhou 
51104a4de75SMichael Baum 	/*
51204a4de75SMichael Baum 	 * In HW steering, aging information structure is initialized later
51304a4de75SMichael Baum 	 * during configure function.
51404a4de75SMichael Baum 	 */
51504a4de75SMichael Baum 	if (sh->config.dv_flow_en == 2)
51604a4de75SMichael Baum 		return;
517fa2d01c8SDong Zhou 	for (i = 0; i < sh->max_port; i++) {
518fa2d01c8SDong Zhou 		age_info = &sh->port[i].age_info;
519fa2d01c8SDong Zhou 		age_info->flags = 0;
520fa2d01c8SDong Zhou 		TAILQ_INIT(&age_info->aged_counters);
521f9bc5274SMatan Azrad 		LIST_INIT(&age_info->aged_aso);
522fa2d01c8SDong Zhou 		rte_spinlock_init(&age_info->aged_sl);
523fa2d01c8SDong Zhou 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
524fa2d01c8SDong Zhou 	}
525fa2d01c8SDong Zhou }
526fa2d01c8SDong Zhou 
527fa2d01c8SDong Zhou /**
528cf8971dbSMichael Baum  * DV flow counter mode detect and config.
529cf8971dbSMichael Baum  *
530cf8971dbSMichael Baum  * @param dev
531cf8971dbSMichael Baum  *   Pointer to rte_eth_dev structure.
532cf8971dbSMichael Baum  *
533cf8971dbSMichael Baum  */
534cf8971dbSMichael Baum void
535cf8971dbSMichael Baum mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
536cf8971dbSMichael Baum {
537cf8971dbSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
538cf8971dbSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
539cf8971dbSMichael Baum 	struct mlx5_dev_ctx_shared *sh = priv->sh;
540cf8971dbSMichael Baum 	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
541cf8971dbSMichael Baum 	bool fallback;
542cf8971dbSMichael Baum 
543cf8971dbSMichael Baum #ifndef HAVE_IBV_DEVX_ASYNC
544cf8971dbSMichael Baum 	fallback = true;
545cf8971dbSMichael Baum #else
546cf8971dbSMichael Baum 	fallback = false;
547a13ec19cSMichael Baum 	if (!sh->cdev->config.devx || !sh->config.dv_flow_en ||
548cf8971dbSMichael Baum 	    !hca_attr->flow_counters_dump ||
549cf8971dbSMichael Baum 	    !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
550cf8971dbSMichael Baum 	    (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
551cf8971dbSMichael Baum 		fallback = true;
552cf8971dbSMichael Baum #endif
553cf8971dbSMichael Baum 	if (fallback)
554cf8971dbSMichael Baum 		DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
555cf8971dbSMichael Baum 			"counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
556cf8971dbSMichael Baum 			hca_attr->flow_counters_dump,
557cf8971dbSMichael Baum 			hca_attr->flow_counter_bulk_alloc_bitmap);
558cf8971dbSMichael Baum 	/* Initialize fallback mode only on the port initializes sh. */
559cf8971dbSMichael Baum 	if (sh->refcnt == 1)
56004a4de75SMichael Baum 		sh->sws_cmng.counter_fallback = fallback;
56104a4de75SMichael Baum 	else if (fallback != sh->sws_cmng.counter_fallback)
562cf8971dbSMichael Baum 		DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
563cf8971dbSMichael Baum 			"with others:%d.", PORT_ID(priv), fallback);
564cf8971dbSMichael Baum #endif
565cf8971dbSMichael Baum }
566cf8971dbSMichael Baum 
567cf8971dbSMichael Baum /**
5685382d28cSMatan Azrad  * Initialize the counters management structure.
5695382d28cSMatan Azrad  *
5705382d28cSMatan Azrad  * @param[in] sh
5716e88bc42SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object to free
572a94e89e4SMichael Baum  *
573a94e89e4SMichael Baum  * @return
574a94e89e4SMichael Baum  *   0 on success, otherwise negative errno value and rte_errno is set.
5755382d28cSMatan Azrad  */
576a94e89e4SMichael Baum static int
5776e88bc42SOphir Munk mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
5785382d28cSMatan Azrad {
57904a4de75SMichael Baum 	int i, j;
5805382d28cSMatan Azrad 
58104a4de75SMichael Baum 	if (sh->config.dv_flow_en < 2) {
582a94e89e4SMichael Baum 		void *pools;
583a94e89e4SMichael Baum 
584a94e89e4SMichael Baum 		pools = mlx5_malloc(MLX5_MEM_ZERO,
585a94e89e4SMichael Baum 				    sizeof(struct mlx5_flow_counter_pool *) *
586a94e89e4SMichael Baum 				    MLX5_COUNTER_POOLS_MAX_NUM,
587a94e89e4SMichael Baum 				    0, SOCKET_ID_ANY);
588a94e89e4SMichael Baum 		if (!pools) {
589a94e89e4SMichael Baum 			DRV_LOG(ERR,
590a94e89e4SMichael Baum 				"Counter management allocation was failed.");
591a94e89e4SMichael Baum 			rte_errno = ENOMEM;
592a94e89e4SMichael Baum 			return -rte_errno;
593a94e89e4SMichael Baum 		}
59404a4de75SMichael Baum 		memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
59504a4de75SMichael Baum 		TAILQ_INIT(&sh->sws_cmng.flow_counters);
59604a4de75SMichael Baum 		sh->sws_cmng.min_id = MLX5_CNT_BATCH_OFFSET;
59704a4de75SMichael Baum 		sh->sws_cmng.max_id = -1;
59804a4de75SMichael Baum 		sh->sws_cmng.last_pool_idx = POOL_IDX_INVALID;
599a94e89e4SMichael Baum 		sh->sws_cmng.pools = pools;
60004a4de75SMichael Baum 		rte_spinlock_init(&sh->sws_cmng.pool_update_sl);
601994829e6SSuanming Mou 		for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
60204a4de75SMichael Baum 			TAILQ_INIT(&sh->sws_cmng.counters[i]);
60304a4de75SMichael Baum 			rte_spinlock_init(&sh->sws_cmng.csl[i]);
60404a4de75SMichael Baum 		}
60504a4de75SMichael Baum 	} else {
60604a4de75SMichael Baum 		struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
60704a4de75SMichael Baum 		uint32_t fw_max_nb_cnts = attr->max_flow_counter;
60804a4de75SMichael Baum 		uint8_t log_dcs = log2above(fw_max_nb_cnts) - 1;
60904a4de75SMichael Baum 		uint32_t max_nb_cnts = 0;
61004a4de75SMichael Baum 
61104a4de75SMichael Baum 		for (i = 0, j = 0; j < MLX5_HWS_CNT_DCS_NUM; ++i) {
61204a4de75SMichael Baum 			int log_dcs_i = log_dcs - i;
61304a4de75SMichael Baum 
61404a4de75SMichael Baum 			if (log_dcs_i < 0)
61504a4de75SMichael Baum 				break;
61604a4de75SMichael Baum 			if ((max_nb_cnts | RTE_BIT32(log_dcs_i)) >
61704a4de75SMichael Baum 			    fw_max_nb_cnts)
61804a4de75SMichael Baum 				continue;
61904a4de75SMichael Baum 			max_nb_cnts |= RTE_BIT32(log_dcs_i);
62004a4de75SMichael Baum 			j++;
62104a4de75SMichael Baum 		}
62204a4de75SMichael Baum 		sh->hws_max_log_bulk_sz = log_dcs;
62304a4de75SMichael Baum 		sh->hws_max_nb_counters = max_nb_cnts;
624fa2d01c8SDong Zhou 	}
625a94e89e4SMichael Baum 	return 0;
6265382d28cSMatan Azrad }
6275382d28cSMatan Azrad 
6285382d28cSMatan Azrad /**
6295382d28cSMatan Azrad  * Destroy all the resources allocated for a counter memory management.
6305382d28cSMatan Azrad  *
6315382d28cSMatan Azrad  * @param[in] mng
6325382d28cSMatan Azrad  *   Pointer to the memory management structure.
6335382d28cSMatan Azrad  */
6345382d28cSMatan Azrad static void
6355382d28cSMatan Azrad mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
6365382d28cSMatan Azrad {
6375382d28cSMatan Azrad 	uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
6385382d28cSMatan Azrad 
6395382d28cSMatan Azrad 	LIST_REMOVE(mng, next);
6408451e165SMichael Baum 	mlx5_os_wrapped_mkey_destroy(&mng->wm);
64183c2047cSSuanming Mou 	mlx5_free(mem);
6425382d28cSMatan Azrad }
6435382d28cSMatan Azrad 
6445382d28cSMatan Azrad /**
6455382d28cSMatan Azrad  * Close and release all the resources of the counters management.
6465382d28cSMatan Azrad  *
6475382d28cSMatan Azrad  * @param[in] sh
6486e88bc42SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object to free.
6495382d28cSMatan Azrad  */
6505382d28cSMatan Azrad static void
6516e88bc42SOphir Munk mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
6525382d28cSMatan Azrad {
6535382d28cSMatan Azrad 	struct mlx5_counter_stats_mem_mng *mng;
6543aa27915SSuanming Mou 	int i, j;
655f15db67dSMatan Azrad 	int retries = 1024;
6565382d28cSMatan Azrad 
657f15db67dSMatan Azrad 	rte_errno = 0;
658f15db67dSMatan Azrad 	while (--retries) {
659f15db67dSMatan Azrad 		rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
660f15db67dSMatan Azrad 		if (rte_errno != EINPROGRESS)
661f15db67dSMatan Azrad 			break;
662f15db67dSMatan Azrad 		rte_pause();
663f15db67dSMatan Azrad 	}
6645382d28cSMatan Azrad 
66504a4de75SMichael Baum 	if (sh->sws_cmng.pools) {
666994829e6SSuanming Mou 		struct mlx5_flow_counter_pool *pool;
66704a4de75SMichael Baum 		uint16_t n_valid = sh->sws_cmng.n_valid;
66804a4de75SMichael Baum 		bool fallback = sh->sws_cmng.counter_fallback;
669994829e6SSuanming Mou 
6703aa27915SSuanming Mou 		for (i = 0; i < n_valid; ++i) {
67104a4de75SMichael Baum 			pool = sh->sws_cmng.pools[i];
6722b5b1aebSSuanming Mou 			if (!fallback && pool->min_dcs)
6735af61440SMatan Azrad 				claim_zero(mlx5_devx_cmd_destroy
674fa2d01c8SDong Zhou 							       (pool->min_dcs));
6755382d28cSMatan Azrad 			for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
6762b5b1aebSSuanming Mou 				struct mlx5_flow_counter *cnt =
6772b5b1aebSSuanming Mou 						MLX5_POOL_GET_CNT(pool, j);
6782b5b1aebSSuanming Mou 
6792b5b1aebSSuanming Mou 				if (cnt->action)
6805382d28cSMatan Azrad 					claim_zero
681223f2c21SOphir Munk 					 (mlx5_flow_os_destroy_flow_action
6822b5b1aebSSuanming Mou 					  (cnt->action));
683a94e89e4SMichael Baum 				if (fallback && cnt->dcs_when_free)
6845382d28cSMatan Azrad 					claim_zero(mlx5_devx_cmd_destroy
6852b5b1aebSSuanming Mou 						   (cnt->dcs_when_free));
6865382d28cSMatan Azrad 			}
68783c2047cSSuanming Mou 			mlx5_free(pool);
6885382d28cSMatan Azrad 		}
68904a4de75SMichael Baum 		mlx5_free(sh->sws_cmng.pools);
6905382d28cSMatan Azrad 	}
69104a4de75SMichael Baum 	mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
6925382d28cSMatan Azrad 	while (mng) {
6935382d28cSMatan Azrad 		mlx5_flow_destroy_counter_stat_mem_mng(mng);
69404a4de75SMichael Baum 		mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
6955382d28cSMatan Azrad 	}
69604a4de75SMichael Baum 	memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
6975382d28cSMatan Azrad }
6985382d28cSMatan Azrad 
69929efa63aSLi Zhang /**
70029efa63aSLi Zhang  * Initialize the aso flow meters management structure.
70129efa63aSLi Zhang  *
70229efa63aSLi Zhang  * @param[in] sh
70329efa63aSLi Zhang  *   Pointer to mlx5_dev_ctx_shared object to free
70429efa63aSLi Zhang  */
70529efa63aSLi Zhang int
706afb4aa4fSLi Zhang mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
70729efa63aSLi Zhang {
708afb4aa4fSLi Zhang 	if (!sh->mtrmng) {
709afb4aa4fSLi Zhang 		sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO,
710afb4aa4fSLi Zhang 			sizeof(*sh->mtrmng),
71129efa63aSLi Zhang 			RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
712afb4aa4fSLi Zhang 		if (!sh->mtrmng) {
713afb4aa4fSLi Zhang 			DRV_LOG(ERR,
714afb4aa4fSLi Zhang 			"meter management allocation was failed.");
71529efa63aSLi Zhang 			rte_errno = ENOMEM;
71629efa63aSLi Zhang 			return -ENOMEM;
71729efa63aSLi Zhang 		}
718afb4aa4fSLi Zhang 		if (sh->meter_aso_en) {
719afb4aa4fSLi Zhang 			rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
7207797b0feSJiawei Wang 			rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl);
721afb4aa4fSLi Zhang 			LIST_INIT(&sh->mtrmng->pools_mng.meters);
722afb4aa4fSLi Zhang 		}
723afb4aa4fSLi Zhang 		sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
72429efa63aSLi Zhang 	}
72529efa63aSLi Zhang 	return 0;
72629efa63aSLi Zhang }
72729efa63aSLi Zhang 
72829efa63aSLi Zhang /**
72929efa63aSLi Zhang  * Close and release all the resources of
73029efa63aSLi Zhang  * the ASO flow meter management structure.
73129efa63aSLi Zhang  *
73229efa63aSLi Zhang  * @param[in] sh
73329efa63aSLi Zhang  *   Pointer to mlx5_dev_ctx_shared object to free.
73429efa63aSLi Zhang  */
73529efa63aSLi Zhang static void
73629efa63aSLi Zhang mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
73729efa63aSLi Zhang {
73829efa63aSLi Zhang 	struct mlx5_aso_mtr_pool *mtr_pool;
739afb4aa4fSLi Zhang 	struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng;
74029efa63aSLi Zhang 	uint32_t idx;
741c99b4f8bSLi Zhang #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
742c99b4f8bSLi Zhang 	struct mlx5_aso_mtr *aso_mtr;
743c99b4f8bSLi Zhang 	int i;
744c99b4f8bSLi Zhang #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
74529efa63aSLi Zhang 
746afb4aa4fSLi Zhang 	if (sh->meter_aso_en) {
74729efa63aSLi Zhang 		mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
748afb4aa4fSLi Zhang 		idx = mtrmng->pools_mng.n_valid;
74929efa63aSLi Zhang 		while (idx--) {
750afb4aa4fSLi Zhang 			mtr_pool = mtrmng->pools_mng.pools[idx];
751c99b4f8bSLi Zhang #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
752c99b4f8bSLi Zhang 			for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) {
753c99b4f8bSLi Zhang 				aso_mtr = &mtr_pool->mtrs[i];
754bf62fb76SShun Hao 				if (aso_mtr->fm.meter_action_g)
755afb4aa4fSLi Zhang 					claim_zero
756afb4aa4fSLi Zhang 					(mlx5_glue->destroy_flow_action
757bf62fb76SShun Hao 					(aso_mtr->fm.meter_action_g));
758bf62fb76SShun Hao 				if (aso_mtr->fm.meter_action_y)
759bf62fb76SShun Hao 					claim_zero
760bf62fb76SShun Hao 					(mlx5_glue->destroy_flow_action
761bf62fb76SShun Hao 					(aso_mtr->fm.meter_action_y));
762c99b4f8bSLi Zhang 			}
763c99b4f8bSLi Zhang #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
76429efa63aSLi Zhang 			claim_zero(mlx5_devx_cmd_destroy
76529efa63aSLi Zhang 						(mtr_pool->devx_obj));
766afb4aa4fSLi Zhang 			mtrmng->pools_mng.n_valid--;
76729efa63aSLi Zhang 			mlx5_free(mtr_pool);
76829efa63aSLi Zhang 		}
769afb4aa4fSLi Zhang 		mlx5_free(sh->mtrmng->pools_mng.pools);
770afb4aa4fSLi Zhang 	}
77129efa63aSLi Zhang 	mlx5_free(sh->mtrmng);
77229efa63aSLi Zhang 	sh->mtrmng = NULL;
77329efa63aSLi Zhang }
77429efa63aSLi Zhang 
775f935ed4bSDekel Peled /* Send FLOW_AGED event if needed. */
776f935ed4bSDekel Peled void
777f935ed4bSDekel Peled mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh)
778f935ed4bSDekel Peled {
779f935ed4bSDekel Peled 	struct mlx5_age_info *age_info;
780f935ed4bSDekel Peled 	uint32_t i;
781f935ed4bSDekel Peled 
782f935ed4bSDekel Peled 	for (i = 0; i < sh->max_port; i++) {
783f935ed4bSDekel Peled 		age_info = &sh->port[i].age_info;
784f935ed4bSDekel Peled 		if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
785f935ed4bSDekel Peled 			continue;
786447d4d79SMichael Baum 		MLX5_AGE_UNSET(age_info, MLX5_AGE_EVENT_NEW);
787447d4d79SMichael Baum 		if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) {
788447d4d79SMichael Baum 			MLX5_AGE_UNSET(age_info, MLX5_AGE_TRIGGER);
789f935ed4bSDekel Peled 			rte_eth_dev_callback_process
790f935ed4bSDekel Peled 				(&rte_eth_devices[sh->port[i].devx_ih_port_id],
791f935ed4bSDekel Peled 				RTE_ETH_EVENT_FLOW_AGED, NULL);
792447d4d79SMichael Baum 		}
793f935ed4bSDekel Peled 	}
794f935ed4bSDekel Peled }
795f935ed4bSDekel Peled 
796ee9e5fadSBing Zhao /*
797ee9e5fadSBing Zhao  * Initialize the ASO connection tracking structure.
798ee9e5fadSBing Zhao  *
799ee9e5fadSBing Zhao  * @param[in] sh
800ee9e5fadSBing Zhao  *   Pointer to mlx5_dev_ctx_shared object.
801ee9e5fadSBing Zhao  *
802ee9e5fadSBing Zhao  * @return
803ee9e5fadSBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
804ee9e5fadSBing Zhao  */
805ee9e5fadSBing Zhao int
806ee9e5fadSBing Zhao mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh)
807ee9e5fadSBing Zhao {
808ee9e5fadSBing Zhao 	int err;
809ee9e5fadSBing Zhao 
810ee9e5fadSBing Zhao 	if (sh->ct_mng)
811ee9e5fadSBing Zhao 		return 0;
812463170a7SSuanming Mou 	sh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng) +
813463170a7SSuanming Mou 				 sizeof(struct mlx5_aso_sq) * MLX5_ASO_CT_SQ_NUM,
814ee9e5fadSBing Zhao 				 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
815ee9e5fadSBing Zhao 	if (!sh->ct_mng) {
816ee9e5fadSBing Zhao 		DRV_LOG(ERR, "ASO CT management allocation failed.");
817ee9e5fadSBing Zhao 		rte_errno = ENOMEM;
818ee9e5fadSBing Zhao 		return -rte_errno;
819ee9e5fadSBing Zhao 	}
82048fbb0e9SAlexander Kozyrev 	err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_CONNECTION_TRACKING, MLX5_ASO_CT_SQ_NUM);
821ee9e5fadSBing Zhao 	if (err) {
822ee9e5fadSBing Zhao 		mlx5_free(sh->ct_mng);
823ee9e5fadSBing Zhao 		/* rte_errno should be extracted from the failure. */
824ee9e5fadSBing Zhao 		rte_errno = EINVAL;
825ee9e5fadSBing Zhao 		return -rte_errno;
826ee9e5fadSBing Zhao 	}
827ee9e5fadSBing Zhao 	rte_spinlock_init(&sh->ct_mng->ct_sl);
828ee9e5fadSBing Zhao 	rte_rwlock_init(&sh->ct_mng->resize_rwl);
829ee9e5fadSBing Zhao 	LIST_INIT(&sh->ct_mng->free_cts);
830ee9e5fadSBing Zhao 	return 0;
831ee9e5fadSBing Zhao }
832ee9e5fadSBing Zhao 
8330af8a229SBing Zhao /*
8340af8a229SBing Zhao  * Close and release all the resources of the
8350af8a229SBing Zhao  * ASO connection tracking management structure.
8360af8a229SBing Zhao  *
8370af8a229SBing Zhao  * @param[in] sh
8380af8a229SBing Zhao  *   Pointer to mlx5_dev_ctx_shared object to free.
8390af8a229SBing Zhao  */
8400af8a229SBing Zhao static void
8410af8a229SBing Zhao mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh)
8420af8a229SBing Zhao {
8430af8a229SBing Zhao 	struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
8440af8a229SBing Zhao 	struct mlx5_aso_ct_pool *ct_pool;
8450af8a229SBing Zhao 	struct mlx5_aso_ct_action *ct;
8460af8a229SBing Zhao 	uint32_t idx;
8470af8a229SBing Zhao 	uint32_t val;
8480af8a229SBing Zhao 	uint32_t cnt;
8490af8a229SBing Zhao 	int i;
8500af8a229SBing Zhao 
8510af8a229SBing Zhao 	mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_CONNECTION_TRACKING);
8520af8a229SBing Zhao 	idx = mng->next;
8530af8a229SBing Zhao 	while (idx--) {
8540af8a229SBing Zhao 		cnt = 0;
8550af8a229SBing Zhao 		ct_pool = mng->pools[idx];
8560af8a229SBing Zhao 		for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
8570af8a229SBing Zhao 			ct = &ct_pool->actions[i];
858e12a0166STyler Retzlaff 			val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
859e12a0166STyler Retzlaff 						 rte_memory_order_relaxed);
8600af8a229SBing Zhao 			MLX5_ASSERT(val == 1);
8610af8a229SBing Zhao 			if (val > 1)
8620af8a229SBing Zhao 				cnt++;
8630af8a229SBing Zhao #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
8640af8a229SBing Zhao 			if (ct->dr_action_orig)
8650af8a229SBing Zhao 				claim_zero(mlx5_glue->destroy_flow_action
8660af8a229SBing Zhao 							(ct->dr_action_orig));
8670af8a229SBing Zhao 			if (ct->dr_action_rply)
8680af8a229SBing Zhao 				claim_zero(mlx5_glue->destroy_flow_action
8690af8a229SBing Zhao 							(ct->dr_action_rply));
8700af8a229SBing Zhao #endif
8710af8a229SBing Zhao 		}
8720af8a229SBing Zhao 		claim_zero(mlx5_devx_cmd_destroy(ct_pool->devx_obj));
8730af8a229SBing Zhao 		if (cnt) {
8740af8a229SBing Zhao 			DRV_LOG(DEBUG, "%u ASO CT objects are being used in the pool %u",
8750af8a229SBing Zhao 				cnt, i);
8760af8a229SBing Zhao 		}
8770af8a229SBing Zhao 		mlx5_free(ct_pool);
8780af8a229SBing Zhao 		/* in case of failure. */
8790af8a229SBing Zhao 		mng->next--;
8800af8a229SBing Zhao 	}
8810af8a229SBing Zhao 	mlx5_free(mng->pools);
8820af8a229SBing Zhao 	mlx5_free(mng);
8830af8a229SBing Zhao 	/* Management structure must be cleared to 0s during allocation. */
8840af8a229SBing Zhao 	sh->ct_mng = NULL;
8850af8a229SBing Zhao }
8860af8a229SBing Zhao 
8875382d28cSMatan Azrad /**
888014d1cbeSSuanming Mou  * Initialize the flow resources' indexed mempool.
889014d1cbeSSuanming Mou  *
890014d1cbeSSuanming Mou  * @param[in] sh
8916e88bc42SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object.
892014d1cbeSSuanming Mou  */
893014d1cbeSSuanming Mou static void
894a13ec19cSMichael Baum mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh)
895014d1cbeSSuanming Mou {
896014d1cbeSSuanming Mou 	uint8_t i;
8975c761238SGregory Etelson 	struct mlx5_indexed_pool_config cfg;
898014d1cbeSSuanming Mou 
899a1da6f62SSuanming Mou 	for (i = 0; i < MLX5_IPOOL_MAX; ++i) {
9005c761238SGregory Etelson 		cfg = mlx5_ipool_cfg[i];
9015c761238SGregory Etelson 		switch (i) {
9025c761238SGregory Etelson 		default:
9035c761238SGregory Etelson 			break;
9045c761238SGregory Etelson 		/*
9055c761238SGregory Etelson 		 * Set MLX5_IPOOL_MLX5_FLOW ipool size
9065c761238SGregory Etelson 		 * according to PCI function flow configuration.
9075c761238SGregory Etelson 		 */
9085c761238SGregory Etelson 		case MLX5_IPOOL_MLX5_FLOW:
909a13ec19cSMichael Baum 			cfg.size = sh->config.dv_flow_en ?
910*90967539SShani Peretz 				RTE_ALIGN_MUL_CEIL(sizeof(struct mlx5_flow_handle), 8) :
9115c761238SGregory Etelson 				MLX5_FLOW_HANDLE_VERBS_SIZE;
9125c761238SGregory Etelson 			break;
913015a3e81SDariusz Sosnowski #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
914015a3e81SDariusz Sosnowski 		/* Set MLX5_IPOOL_JUMP ipool entry size depending on selected flow engine. */
915015a3e81SDariusz Sosnowski 		case MLX5_IPOOL_JUMP:
916015a3e81SDariusz Sosnowski 			cfg.size = sh->config.dv_flow_en == 2 ?
917015a3e81SDariusz Sosnowski 				sizeof(struct mlx5_flow_group) :
918015a3e81SDariusz Sosnowski 				sizeof(struct mlx5_flow_tbl_data_entry);
919015a3e81SDariusz Sosnowski 			break;
920015a3e81SDariusz Sosnowski #endif
9215c761238SGregory Etelson 		}
922a13ec19cSMichael Baum 		if (sh->config.reclaim_mode) {
9235c761238SGregory Etelson 			cfg.release_mem_en = 1;
924b4edeaf3SSuanming Mou 			cfg.per_core_cache = 0;
925cde19e86SSuanming Mou 		} else {
926cde19e86SSuanming Mou 			cfg.release_mem_en = 0;
927b4edeaf3SSuanming Mou 		}
9285c761238SGregory Etelson 		sh->ipool[i] = mlx5_ipool_create(&cfg);
929014d1cbeSSuanming Mou 	}
930a1da6f62SSuanming Mou }
931014d1cbeSSuanming Mou 
9324f3d8d0eSMatan Azrad 
933014d1cbeSSuanming Mou /**
934014d1cbeSSuanming Mou  * Release the flow resources' indexed mempool.
935014d1cbeSSuanming Mou  *
936014d1cbeSSuanming Mou  * @param[in] sh
9376e88bc42SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object.
938014d1cbeSSuanming Mou  */
939014d1cbeSSuanming Mou static void
9406e88bc42SOphir Munk mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh)
941014d1cbeSSuanming Mou {
942014d1cbeSSuanming Mou 	uint8_t i;
943014d1cbeSSuanming Mou 
944014d1cbeSSuanming Mou 	for (i = 0; i < MLX5_IPOOL_MAX; ++i)
945014d1cbeSSuanming Mou 		mlx5_ipool_destroy(sh->ipool[i]);
9464f3d8d0eSMatan Azrad 	for (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i)
9474f3d8d0eSMatan Azrad 		if (sh->mdh_ipools[i])
9484f3d8d0eSMatan Azrad 			mlx5_ipool_destroy(sh->mdh_ipools[i]);
949014d1cbeSSuanming Mou }
950014d1cbeSSuanming Mou 
951daa38a89SBing Zhao /*
952daa38a89SBing Zhao  * Check if dynamic flex parser for eCPRI already exists.
953daa38a89SBing Zhao  *
954daa38a89SBing Zhao  * @param dev
955daa38a89SBing Zhao  *   Pointer to Ethernet device structure.
956daa38a89SBing Zhao  *
957daa38a89SBing Zhao  * @return
958daa38a89SBing Zhao  *   true on exists, false on not.
959daa38a89SBing Zhao  */
960daa38a89SBing Zhao bool
961daa38a89SBing Zhao mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
962daa38a89SBing Zhao {
963daa38a89SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
964575740d1SViacheslav Ovsiienko 	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
965daa38a89SBing Zhao 
966daa38a89SBing Zhao 	return !!prf->obj;
967daa38a89SBing Zhao }
968daa38a89SBing Zhao 
969daa38a89SBing Zhao /*
970daa38a89SBing Zhao  * Allocation of a flex parser for eCPRI. Once created, this parser related
971daa38a89SBing Zhao  * resources will be held until the device is closed.
972daa38a89SBing Zhao  *
973daa38a89SBing Zhao  * @param dev
974daa38a89SBing Zhao  *   Pointer to Ethernet device structure.
975daa38a89SBing Zhao  *
976daa38a89SBing Zhao  * @return
977daa38a89SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
978daa38a89SBing Zhao  */
979daa38a89SBing Zhao int
980daa38a89SBing Zhao mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
981daa38a89SBing Zhao {
982daa38a89SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
983575740d1SViacheslav Ovsiienko 	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
9841c506404SBing Zhao 	struct mlx5_devx_graph_node_attr node = {
9851c506404SBing Zhao 		.modify_field_select = 0,
9861c506404SBing Zhao 	};
987bc0a9303SRongwei Liu 	uint32_t ids[8];
9881c506404SBing Zhao 	int ret;
989daa38a89SBing Zhao 
99053820561SMichael Baum 	if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
991d7c49561SBing Zhao 		DRV_LOG(ERR, "Dynamic flex parser is not supported "
992d7c49561SBing Zhao 			"for device %s.", priv->dev_data->name);
993d7c49561SBing Zhao 		return -ENOTSUP;
994d7c49561SBing Zhao 	}
9951c506404SBing Zhao 	node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
9961c506404SBing Zhao 	/* 8 bytes now: 4B common header + 4B message body header. */
9971c506404SBing Zhao 	node.header_length_base_value = 0x8;
9981c506404SBing Zhao 	/* After MAC layer: Ether / VLAN. */
9991c506404SBing Zhao 	node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC;
10001c506404SBing Zhao 	/* Type of compared condition should be 0xAEFE in the L2 layer. */
10011c506404SBing Zhao 	node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI;
10021c506404SBing Zhao 	/* Sample #0: type in common header. */
10031c506404SBing Zhao 	node.sample[0].flow_match_sample_en = 1;
10041c506404SBing Zhao 	/* Fixed offset. */
10051c506404SBing Zhao 	node.sample[0].flow_match_sample_offset_mode = 0x0;
10061c506404SBing Zhao 	/* Only the 2nd byte will be used. */
10071c506404SBing Zhao 	node.sample[0].flow_match_sample_field_base_offset = 0x0;
10081c506404SBing Zhao 	/* Sample #1: message payload. */
10091c506404SBing Zhao 	node.sample[1].flow_match_sample_en = 1;
10101c506404SBing Zhao 	/* Fixed offset. */
10111c506404SBing Zhao 	node.sample[1].flow_match_sample_offset_mode = 0x0;
10121c506404SBing Zhao 	/*
10131c506404SBing Zhao 	 * Only the first two bytes will be used right now, and its offset will
10141c506404SBing Zhao 	 * start after the common header that with the length of a DW(u32).
10151c506404SBing Zhao 	 */
10161c506404SBing Zhao 	node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
1017ca1418ceSMichael Baum 	prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node);
10181c506404SBing Zhao 	if (!prf->obj) {
10191c506404SBing Zhao 		DRV_LOG(ERR, "Failed to create flex parser node object.");
10201c506404SBing Zhao 		return (rte_errno == 0) ? -ENODEV : -rte_errno;
10211c506404SBing Zhao 	}
10221c506404SBing Zhao 	prf->num = 2;
102300e57916SRongwei Liu 	ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num, NULL);
10241c506404SBing Zhao 	if (ret) {
10251c506404SBing Zhao 		DRV_LOG(ERR, "Failed to query sample IDs.");
1026bc0a9303SRongwei Liu 		goto error;
10271c506404SBing Zhao 	}
10281c506404SBing Zhao 	prf->offset[0] = 0x0;
10291c506404SBing Zhao 	prf->offset[1] = sizeof(uint32_t);
1030bc0a9303SRongwei Liu 	prf->ids[0] = ids[0];
1031bc0a9303SRongwei Liu 	prf->ids[1] = ids[1];
1032daa38a89SBing Zhao 	return 0;
1033bc0a9303SRongwei Liu error:
1034bc0a9303SRongwei Liu 	mlx5_devx_cmd_destroy(prf->obj);
1035bc0a9303SRongwei Liu 	return (rte_errno == 0) ? -ENODEV : -rte_errno;
1036daa38a89SBing Zhao }
1037daa38a89SBing Zhao 
10381c506404SBing Zhao /*
10391c506404SBing Zhao  * Destroy the flex parser node, including the parser itself, input / output
10401c506404SBing Zhao  * arcs and DW samples. Resources could be reused then.
10411c506404SBing Zhao  *
10421c506404SBing Zhao  * @param dev
10431c506404SBing Zhao  *   Pointer to Ethernet device structure.
10441c506404SBing Zhao  */
10451c506404SBing Zhao static void
10461c506404SBing Zhao mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
10471c506404SBing Zhao {
10481c506404SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
1049575740d1SViacheslav Ovsiienko 	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
10501c506404SBing Zhao 
10511c506404SBing Zhao 	if (prf->obj)
10521c506404SBing Zhao 		mlx5_devx_cmd_destroy(prf->obj);
10531c506404SBing Zhao 	prf->obj = NULL;
10541c506404SBing Zhao }
10551c506404SBing Zhao 
105600e57916SRongwei Liu /*
105700e57916SRongwei Liu  * Allocation of a flex parser for srh. Once refcnt is zero, the resources held
105800e57916SRongwei Liu  * by this parser will be freed.
105900e57916SRongwei Liu  * @param dev
106000e57916SRongwei Liu  *   Pointer to Ethernet device structure.
106100e57916SRongwei Liu  *
106200e57916SRongwei Liu  * @return
106300e57916SRongwei Liu  *   0 on success, a negative errno value otherwise and rte_errno is set.
106400e57916SRongwei Liu  */
106500e57916SRongwei Liu int
106600e57916SRongwei Liu mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev)
106700e57916SRongwei Liu {
106800e57916SRongwei Liu 	struct mlx5_devx_graph_node_attr node = {
106900e57916SRongwei Liu 		.modify_field_select = 0,
107000e57916SRongwei Liu 	};
1071813a1db2SRongwei Liu 	uint32_t i;
1072bc0a9303SRongwei Liu 	uint32_t ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
107300e57916SRongwei Liu 	struct mlx5_priv *priv = dev->data->dev_private;
107400e57916SRongwei Liu 	struct mlx5_common_dev_config *config = &priv->sh->cdev->config;
107544b5d879SRongwei Liu 	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
1076bc0a9303SRongwei Liu 	void *fp = NULL, *ibv_ctx = priv->sh->cdev->ctx;
107700e57916SRongwei Liu 	int ret;
107800e57916SRongwei Liu 
107900e57916SRongwei Liu 	memset(ids, 0xff, sizeof(ids));
1080bc0a9303SRongwei Liu 	if (!config->hca_attr.parse_graph_flex_node ||
1081bc0a9303SRongwei Liu 	    !config->hca_attr.flex.query_match_sample_info) {
1082bc0a9303SRongwei Liu 		DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
108300e57916SRongwei Liu 		return -ENOTSUP;
108400e57916SRongwei Liu 	}
1085e12a0166STyler Retzlaff 	if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
1086e12a0166STyler Retzlaff 			rte_memory_order_relaxed) + 1 > 1)
108700e57916SRongwei Liu 		return 0;
1088bc0a9303SRongwei Liu 	priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
1089bc0a9303SRongwei Liu 			sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
1090bc0a9303SRongwei Liu 	if (!priv->sh->srh_flex_parser.flex.devx_fp)
1091bc0a9303SRongwei Liu 		return -ENOMEM;
109200e57916SRongwei Liu 	node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
109300e57916SRongwei Liu 	/* Srv6 first two DW are not counted in. */
109400e57916SRongwei Liu 	node.header_length_base_value = 0x8;
109500e57916SRongwei Liu 	/* The unit is uint64_t. */
109600e57916SRongwei Liu 	node.header_length_field_shift = 0x3;
109700e57916SRongwei Liu 	/* Header length is the 2nd byte. */
109800e57916SRongwei Liu 	node.header_length_field_offset = 0x8;
109944b5d879SRongwei Liu 	if (attr->header_length_mask_width < 8)
110044b5d879SRongwei Liu 		node.header_length_field_offset += 8 - attr->header_length_mask_width;
110100e57916SRongwei Liu 	node.header_length_field_mask = 0xF;
110200e57916SRongwei Liu 	/* One byte next header protocol. */
110300e57916SRongwei Liu 	node.next_header_field_size = 0x8;
110400e57916SRongwei Liu 	node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IP;
110500e57916SRongwei Liu 	node.in[0].compare_condition_value = IPPROTO_ROUTING;
1106813a1db2SRongwei Liu 	/* Final IPv6 address. */
1107813a1db2SRongwei Liu 	for (i = 0; i <= MLX5_SRV6_SAMPLE_NUM - 1 && i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
1108813a1db2SRongwei Liu 		node.sample[i].flow_match_sample_en = 1;
1109813a1db2SRongwei Liu 		node.sample[i].flow_match_sample_offset_mode =
1110813a1db2SRongwei Liu 					MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
111100e57916SRongwei Liu 		/* First come first serve no matter inner or outer. */
1112813a1db2SRongwei Liu 		node.sample[i].flow_match_sample_tunnel_mode =
1113813a1db2SRongwei Liu 					MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
1114813a1db2SRongwei Liu 		node.sample[i].flow_match_sample_field_base_offset =
1115813a1db2SRongwei Liu 					(i + 1) * sizeof(uint32_t); /* in bytes */
1116813a1db2SRongwei Liu 	}
1117813a1db2SRongwei Liu 	node.sample[0].flow_match_sample_field_base_offset = 0;
111800e57916SRongwei Liu 	node.out[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_TCP;
111900e57916SRongwei Liu 	node.out[0].compare_condition_value = IPPROTO_TCP;
112000e57916SRongwei Liu 	node.out[1].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_UDP;
112100e57916SRongwei Liu 	node.out[1].compare_condition_value = IPPROTO_UDP;
112200e57916SRongwei Liu 	node.out[2].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IPV6;
112300e57916SRongwei Liu 	node.out[2].compare_condition_value = IPPROTO_IPV6;
1124bc0a9303SRongwei Liu 	fp = mlx5_devx_cmd_create_flex_parser(ibv_ctx, &node);
1125bc0a9303SRongwei Liu 	if (!fp) {
112600e57916SRongwei Liu 		DRV_LOG(ERR, "Failed to create flex parser node object.");
1127bc0a9303SRongwei Liu 		goto error;
112800e57916SRongwei Liu 	}
1129bc0a9303SRongwei Liu 	priv->sh->srh_flex_parser.flex.devx_fp->devx_obj = fp;
1130813a1db2SRongwei Liu 	priv->sh->srh_flex_parser.flex.mapnum = MLX5_SRV6_SAMPLE_NUM;
1131813a1db2SRongwei Liu 	priv->sh->srh_flex_parser.flex.devx_fp->num_samples = MLX5_SRV6_SAMPLE_NUM;
1132bc0a9303SRongwei Liu 
1133bc0a9303SRongwei Liu 	ret = mlx5_devx_cmd_query_parse_samples(fp, ids, priv->sh->srh_flex_parser.flex.mapnum,
1134bc0a9303SRongwei Liu 						&priv->sh->srh_flex_parser.flex.devx_fp->anchor_id);
113500e57916SRongwei Liu 	if (ret) {
113600e57916SRongwei Liu 		DRV_LOG(ERR, "Failed to query sample IDs.");
1137bc0a9303SRongwei Liu 		goto error;
113800e57916SRongwei Liu 	}
1139813a1db2SRongwei Liu 	for (i = 0; i <= MLX5_SRV6_SAMPLE_NUM - 1 && i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
1140813a1db2SRongwei Liu 		ret = mlx5_devx_cmd_match_sample_info_query(ibv_ctx, ids[i],
1141813a1db2SRongwei Liu 					&priv->sh->srh_flex_parser.flex.devx_fp->sample_info[i]);
1142bc0a9303SRongwei Liu 		if (ret) {
1143813a1db2SRongwei Liu 			DRV_LOG(ERR, "Failed to query sample id %u information.", ids[i]);
1144bc0a9303SRongwei Liu 			goto error;
1145bc0a9303SRongwei Liu 		}
1146813a1db2SRongwei Liu 	}
1147813a1db2SRongwei Liu 	for (i = 0; i <= MLX5_SRV6_SAMPLE_NUM - 1 && i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
1148813a1db2SRongwei Liu 		priv->sh->srh_flex_parser.flex.devx_fp->sample_ids[i] = ids[i];
1149813a1db2SRongwei Liu 		priv->sh->srh_flex_parser.flex.map[i].width = sizeof(uint32_t) * CHAR_BIT;
1150813a1db2SRongwei Liu 		priv->sh->srh_flex_parser.flex.map[i].reg_id = i;
1151813a1db2SRongwei Liu 		priv->sh->srh_flex_parser.flex.map[i].shift =
1152813a1db2SRongwei Liu 						(i + 1) * sizeof(uint32_t) * CHAR_BIT;
1153813a1db2SRongwei Liu 	}
1154813a1db2SRongwei Liu 	priv->sh->srh_flex_parser.flex.map[0].shift = 0;
115500e57916SRongwei Liu 	return 0;
1156bc0a9303SRongwei Liu error:
1157bc0a9303SRongwei Liu 	if (fp)
1158bc0a9303SRongwei Liu 		mlx5_devx_cmd_destroy(fp);
1159bc0a9303SRongwei Liu 	if (priv->sh->srh_flex_parser.flex.devx_fp)
1160bc0a9303SRongwei Liu 		mlx5_free(priv->sh->srh_flex_parser.flex.devx_fp);
1161bc0a9303SRongwei Liu 	return (rte_errno == 0) ? -ENODEV : -rte_errno;
116200e57916SRongwei Liu }
116300e57916SRongwei Liu 
116400e57916SRongwei Liu /*
116500e57916SRongwei Liu  * Destroy the flex parser node, including the parser itself, input / output
116600e57916SRongwei Liu  * arcs and DW samples. Resources could be reused then.
116700e57916SRongwei Liu  *
116800e57916SRongwei Liu  * @param dev
116900e57916SRongwei Liu  *   Pointer to Ethernet device structure
117000e57916SRongwei Liu  */
117100e57916SRongwei Liu void
117200e57916SRongwei Liu mlx5_free_srh_flex_parser(struct rte_eth_dev *dev)
117300e57916SRongwei Liu {
117400e57916SRongwei Liu 	struct mlx5_priv *priv = dev->data->dev_private;
117500e57916SRongwei Liu 	struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
117600e57916SRongwei Liu 
1177e12a0166STyler Retzlaff 	if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
117800e57916SRongwei Liu 		return;
1179bc0a9303SRongwei Liu 	mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
1180bc0a9303SRongwei Liu 	mlx5_free(fp->flex.devx_fp);
1181bc0a9303SRongwei Liu 	fp->flex.devx_fp = NULL;
118200e57916SRongwei Liu }
118300e57916SRongwei Liu 
1184d47fe9daSTal Shnaiderman uint32_t
1185d47fe9daSTal Shnaiderman mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
1186d47fe9daSTal Shnaiderman {
1187d47fe9daSTal Shnaiderman 	uint32_t sw_parsing_offloads = 0;
1188d47fe9daSTal Shnaiderman 
1189d47fe9daSTal Shnaiderman 	if (attr->swp) {
1190d47fe9daSTal Shnaiderman 		sw_parsing_offloads |= MLX5_SW_PARSING_CAP;
1191d47fe9daSTal Shnaiderman 		if (attr->swp_csum)
1192d47fe9daSTal Shnaiderman 			sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP;
1193d47fe9daSTal Shnaiderman 
1194d47fe9daSTal Shnaiderman 		if (attr->swp_lso)
1195d47fe9daSTal Shnaiderman 			sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP;
1196d47fe9daSTal Shnaiderman 	}
1197d47fe9daSTal Shnaiderman 	return sw_parsing_offloads;
1198d47fe9daSTal Shnaiderman }
1199d47fe9daSTal Shnaiderman 
12006a86ee2eSTal Shnaiderman uint32_t
12016a86ee2eSTal Shnaiderman mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
12026a86ee2eSTal Shnaiderman {
12036a86ee2eSTal Shnaiderman 	uint32_t tn_offloads = 0;
12046a86ee2eSTal Shnaiderman 
12056a86ee2eSTal Shnaiderman 	if (attr->tunnel_stateless_vxlan)
12066a86ee2eSTal Shnaiderman 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
12076a86ee2eSTal Shnaiderman 	if (attr->tunnel_stateless_gre)
12086a86ee2eSTal Shnaiderman 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
12096a86ee2eSTal Shnaiderman 	if (attr->tunnel_stateless_geneve_rx)
12106a86ee2eSTal Shnaiderman 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
12116a86ee2eSTal Shnaiderman 	return tn_offloads;
12126a86ee2eSTal Shnaiderman }
12136a86ee2eSTal Shnaiderman 
12145dfa003dSMichael Baum /* Fill all fields of UAR structure. */
1215a0bfe9d5SViacheslav Ovsiienko static int
12165dfa003dSMichael Baum mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
1217a0bfe9d5SViacheslav Ovsiienko {
12185dfa003dSMichael Baum 	int ret;
1219a0bfe9d5SViacheslav Ovsiienko 
12205dfa003dSMichael Baum 	ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
12215dfa003dSMichael Baum 	if (ret) {
12225dfa003dSMichael Baum 		DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
12235dfa003dSMichael Baum 		return -rte_errno;
1224a0bfe9d5SViacheslav Ovsiienko 	}
12255dfa003dSMichael Baum 	MLX5_ASSERT(sh->tx_uar.obj);
12265dfa003dSMichael Baum 	MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
12275dfa003dSMichael Baum 	ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
12285dfa003dSMichael Baum 	if (ret) {
12295dfa003dSMichael Baum 		DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
12305dfa003dSMichael Baum 		mlx5_devx_uar_release(&sh->tx_uar);
12315dfa003dSMichael Baum 		return -rte_errno;
1232a0bfe9d5SViacheslav Ovsiienko 	}
12335dfa003dSMichael Baum 	MLX5_ASSERT(sh->rx_uar.obj);
12345dfa003dSMichael Baum 	MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
12355dfa003dSMichael Baum 	return 0;
1236a0bfe9d5SViacheslav Ovsiienko }
12375dfa003dSMichael Baum 
12385dfa003dSMichael Baum static void
12395dfa003dSMichael Baum mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
12405dfa003dSMichael Baum {
12415dfa003dSMichael Baum 	mlx5_devx_uar_release(&sh->rx_uar);
12425dfa003dSMichael Baum 	mlx5_devx_uar_release(&sh->tx_uar);
1243a0bfe9d5SViacheslav Ovsiienko }
1244a0bfe9d5SViacheslav Ovsiienko 
1245014d1cbeSSuanming Mou /**
1246fc59a1ecSMichael Baum  * rte_mempool_walk() callback to unregister Rx mempools.
1247fc59a1ecSMichael Baum  * It used when implicit mempool registration is disabled.
1248fec28ca0SDmitry Kozlyuk  *
1249fec28ca0SDmitry Kozlyuk  * @param mp
1250fec28ca0SDmitry Kozlyuk  *   The mempool being walked.
1251fec28ca0SDmitry Kozlyuk  * @param arg
1252fec28ca0SDmitry Kozlyuk  *   Pointer to the device shared context.
1253fec28ca0SDmitry Kozlyuk  */
1254fec28ca0SDmitry Kozlyuk static void
1255fc59a1ecSMichael Baum mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
1256fec28ca0SDmitry Kozlyuk {
1257fec28ca0SDmitry Kozlyuk 	struct mlx5_dev_ctx_shared *sh = arg;
1258fec28ca0SDmitry Kozlyuk 
1259fc59a1ecSMichael Baum 	mlx5_dev_mempool_unregister(sh->cdev, mp);
1260fec28ca0SDmitry Kozlyuk }
1261fec28ca0SDmitry Kozlyuk 
1262fec28ca0SDmitry Kozlyuk /**
1263fec28ca0SDmitry Kozlyuk  * Callback used when implicit mempool registration is disabled
1264fec28ca0SDmitry Kozlyuk  * in order to track Rx mempool destruction.
1265fec28ca0SDmitry Kozlyuk  *
1266fec28ca0SDmitry Kozlyuk  * @param event
1267fec28ca0SDmitry Kozlyuk  *   Mempool life cycle event.
1268fec28ca0SDmitry Kozlyuk  * @param mp
1269fec28ca0SDmitry Kozlyuk  *   An Rx mempool registered explicitly when the port is started.
1270fec28ca0SDmitry Kozlyuk  * @param arg
1271fec28ca0SDmitry Kozlyuk  *   Pointer to a device shared context.
1272fec28ca0SDmitry Kozlyuk  */
1273fec28ca0SDmitry Kozlyuk static void
1274fec28ca0SDmitry Kozlyuk mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
1275fec28ca0SDmitry Kozlyuk 					struct rte_mempool *mp, void *arg)
1276fec28ca0SDmitry Kozlyuk {
1277fec28ca0SDmitry Kozlyuk 	struct mlx5_dev_ctx_shared *sh = arg;
1278fec28ca0SDmitry Kozlyuk 
1279fec28ca0SDmitry Kozlyuk 	if (event == RTE_MEMPOOL_EVENT_DESTROY)
1280fc59a1ecSMichael Baum 		mlx5_dev_mempool_unregister(sh->cdev, mp);
1281fec28ca0SDmitry Kozlyuk }
1282fec28ca0SDmitry Kozlyuk 
1283fec28ca0SDmitry Kozlyuk int
1284fec28ca0SDmitry Kozlyuk mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
1285fec28ca0SDmitry Kozlyuk {
1286fec28ca0SDmitry Kozlyuk 	struct mlx5_priv *priv = dev->data->dev_private;
1287fec28ca0SDmitry Kozlyuk 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1288fec28ca0SDmitry Kozlyuk 	int ret;
1289fec28ca0SDmitry Kozlyuk 
1290fec28ca0SDmitry Kozlyuk 	/* Check if we only need to track Rx mempool destruction. */
129185209924SMichael Baum 	if (!sh->cdev->config.mr_mempool_reg_en) {
1292fec28ca0SDmitry Kozlyuk 		ret = rte_mempool_event_callback_register
1293fec28ca0SDmitry Kozlyuk 				(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
1294fec28ca0SDmitry Kozlyuk 		return ret == 0 || rte_errno == EEXIST ? 0 : ret;
1295fec28ca0SDmitry Kozlyuk 	}
1296fc59a1ecSMichael Baum 	return mlx5_dev_mempool_subscribe(sh->cdev);
1297fec28ca0SDmitry Kozlyuk }
1298fec28ca0SDmitry Kozlyuk 
1299fec28ca0SDmitry Kozlyuk /**
1300a89f6433SRongwei Liu  * Set up multiple TISs with different affinities according to
1301a89f6433SRongwei Liu  * number of bonding ports
1302a89f6433SRongwei Liu  *
1303a89f6433SRongwei Liu  * @param priv
1304a89f6433SRongwei Liu  * Pointer of shared context.
1305a89f6433SRongwei Liu  *
1306a89f6433SRongwei Liu  * @return
1307a89f6433SRongwei Liu  * Zero on success, -1 otherwise.
1308a89f6433SRongwei Liu  */
1309a89f6433SRongwei Liu static int
1310a89f6433SRongwei Liu mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
1311a89f6433SRongwei Liu {
1312a89f6433SRongwei Liu 	struct mlx5_devx_lag_context lag_ctx = { 0 };
1313a89f6433SRongwei Liu 	struct mlx5_devx_tis_attr tis_attr = { 0 };
1314ce306af6SJiawei Wang 	int i;
1315a89f6433SRongwei Liu 
1316a89f6433SRongwei Liu 	tis_attr.transport_domain = sh->td->id;
1317a89f6433SRongwei Liu 	if (sh->bond.n_port) {
1318a89f6433SRongwei Liu 		if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) {
1319a89f6433SRongwei Liu 			sh->lag.tx_remap_affinity[0] =
1320a89f6433SRongwei Liu 				lag_ctx.tx_remap_affinity_1;
1321a89f6433SRongwei Liu 			sh->lag.tx_remap_affinity[1] =
1322a89f6433SRongwei Liu 				lag_ctx.tx_remap_affinity_2;
1323a89f6433SRongwei Liu 			sh->lag.affinity_mode = lag_ctx.port_select_mode;
1324a89f6433SRongwei Liu 		} else {
1325a89f6433SRongwei Liu 			DRV_LOG(ERR, "Failed to query lag affinity.");
1326a89f6433SRongwei Liu 			return -1;
1327a89f6433SRongwei Liu 		}
1328ce306af6SJiawei Wang 		if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS)
1329ce306af6SJiawei Wang 			DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
1330ce306af6SJiawei Wang 				sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
1331ce306af6SJiawei Wang 				lag_ctx.tx_remap_affinity_2);
1332ce306af6SJiawei Wang 		else if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
1333ce306af6SJiawei Wang 			DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
1334ce306af6SJiawei Wang 					sh->ibdev_name);
1335ce306af6SJiawei Wang 	}
1336ce306af6SJiawei Wang 	for (i = 0; i <= sh->bond.n_port; i++) {
1337ce306af6SJiawei Wang 		/*
1338ce306af6SJiawei Wang 		 * lag_tx_port_affinity: 0 auto-selection, 1 PF1, 2 PF2 vice versa.
1339ce306af6SJiawei Wang 		 * Each TIS binds to one PF by setting lag_tx_port_affinity (> 0).
1340ce306af6SJiawei Wang 		 * Once LAG enabled, we create multiple TISs and bind each one to
1341ce306af6SJiawei Wang 		 * different PFs, then TIS[i+1] gets affinity i+1 and goes to PF i+1.
1342ce306af6SJiawei Wang 		 * TIS[0] is reserved for HW Hash mode.
1343ce306af6SJiawei Wang 		 */
1344ce306af6SJiawei Wang 		tis_attr.lag_tx_port_affinity = i;
1345ce306af6SJiawei Wang 		sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
1346a89f6433SRongwei Liu 		if (!sh->tis[i]) {
1347ce306af6SJiawei Wang 			DRV_LOG(ERR, "Failed to create TIS %d/%d for [bonding] device"
1348a89f6433SRongwei Liu 				" %s.", i, sh->bond.n_port,
1349a89f6433SRongwei Liu 				sh->ibdev_name);
1350a89f6433SRongwei Liu 			return -1;
1351a89f6433SRongwei Liu 		}
1352a89f6433SRongwei Liu 	}
1353a89f6433SRongwei Liu 	return 0;
1354a89f6433SRongwei Liu }
1355a89f6433SRongwei Liu 
1356a89f6433SRongwei Liu /**
1357a13ec19cSMichael Baum  * Verify and store value for share device argument.
1358a13ec19cSMichael Baum  *
1359a13ec19cSMichael Baum  * @param[in] key
1360a13ec19cSMichael Baum  *   Key argument to verify.
1361a13ec19cSMichael Baum  * @param[in] val
1362a13ec19cSMichael Baum  *   Value associated with key.
1363a13ec19cSMichael Baum  * @param opaque
1364a13ec19cSMichael Baum  *   User data.
1365a13ec19cSMichael Baum  *
1366a13ec19cSMichael Baum  * @return
1367a13ec19cSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
1368a13ec19cSMichael Baum  */
1369a13ec19cSMichael Baum static int
1370a13ec19cSMichael Baum mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)
1371a13ec19cSMichael Baum {
1372a13ec19cSMichael Baum 	struct mlx5_sh_config *config = opaque;
1373a13ec19cSMichael Baum 	signed long tmp;
1374a13ec19cSMichael Baum 
1375a13ec19cSMichael Baum 	errno = 0;
1376a13ec19cSMichael Baum 	tmp = strtol(val, NULL, 0);
1377a13ec19cSMichael Baum 	if (errno) {
1378a13ec19cSMichael Baum 		rte_errno = errno;
1379a13ec19cSMichael Baum 		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
1380a13ec19cSMichael Baum 		return -rte_errno;
1381a13ec19cSMichael Baum 	}
1382a13ec19cSMichael Baum 	if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
1383a13ec19cSMichael Baum 		/* Negative values are acceptable for some keys only. */
1384a13ec19cSMichael Baum 		rte_errno = EINVAL;
1385a13ec19cSMichael Baum 		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
1386a13ec19cSMichael Baum 		return -rte_errno;
1387a13ec19cSMichael Baum 	}
1388a13ec19cSMichael Baum 	if (strcmp(MLX5_TX_PP, key) == 0) {
1389a13ec19cSMichael Baum 		unsigned long mod = tmp >= 0 ? tmp : -tmp;
1390a13ec19cSMichael Baum 
1391a13ec19cSMichael Baum 		if (!mod) {
1392a13ec19cSMichael Baum 			DRV_LOG(ERR, "Zero Tx packet pacing parameter.");
1393a13ec19cSMichael Baum 			rte_errno = EINVAL;
1394a13ec19cSMichael Baum 			return -rte_errno;
1395a13ec19cSMichael Baum 		}
1396a13ec19cSMichael Baum 		config->tx_pp = tmp;
1397a13ec19cSMichael Baum 	} else if (strcmp(MLX5_TX_SKEW, key) == 0) {
1398a13ec19cSMichael Baum 		config->tx_skew = tmp;
1399a13ec19cSMichael Baum 	} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
1400a13ec19cSMichael Baum 		config->l3_vxlan_en = !!tmp;
1401a13ec19cSMichael Baum 	} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
1402a13ec19cSMichael Baum 		config->vf_nl_en = !!tmp;
1403a13ec19cSMichael Baum 	} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
1404a13ec19cSMichael Baum 		config->dv_esw_en = !!tmp;
1405a13ec19cSMichael Baum 	} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
1406d84c3cf7SSuanming Mou 		if (tmp > 2) {
1407d84c3cf7SSuanming Mou 			DRV_LOG(ERR, "Invalid %s parameter.", key);
1408d84c3cf7SSuanming Mou 			rte_errno = EINVAL;
1409d84c3cf7SSuanming Mou 			return -rte_errno;
1410d84c3cf7SSuanming Mou 		}
1411d84c3cf7SSuanming Mou 		config->dv_flow_en = tmp;
1412a13ec19cSMichael Baum 	} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
1413a13ec19cSMichael Baum 		if (tmp != MLX5_XMETA_MODE_LEGACY &&
1414a13ec19cSMichael Baum 		    tmp != MLX5_XMETA_MODE_META16 &&
1415a13ec19cSMichael Baum 		    tmp != MLX5_XMETA_MODE_META32 &&
1416ddb68e47SBing Zhao 		    tmp != MLX5_XMETA_MODE_MISS_INFO &&
1417ddb68e47SBing Zhao 		    tmp != MLX5_XMETA_MODE_META32_HWS) {
1418a13ec19cSMichael Baum 			DRV_LOG(ERR, "Invalid extensive metadata parameter.");
1419a13ec19cSMichael Baum 			rte_errno = EINVAL;
1420a13ec19cSMichael Baum 			return -rte_errno;
1421a13ec19cSMichael Baum 		}
1422a13ec19cSMichael Baum 		if (tmp != MLX5_XMETA_MODE_MISS_INFO)
1423a13ec19cSMichael Baum 			config->dv_xmeta_en = tmp;
1424a13ec19cSMichael Baum 		else
1425a13ec19cSMichael Baum 			config->dv_miss_info = 1;
1426a13ec19cSMichael Baum 	} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
1427a13ec19cSMichael Baum 		config->lacp_by_user = !!tmp;
1428a13ec19cSMichael Baum 	} else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) {
1429a13ec19cSMichael Baum 		if (tmp != MLX5_RCM_NONE &&
1430a13ec19cSMichael Baum 		    tmp != MLX5_RCM_LIGHT &&
1431a13ec19cSMichael Baum 		    tmp != MLX5_RCM_AGGR) {
1432a13ec19cSMichael Baum 			DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
1433a13ec19cSMichael Baum 			rte_errno = EINVAL;
1434a13ec19cSMichael Baum 			return -rte_errno;
1435a13ec19cSMichael Baum 		}
1436a13ec19cSMichael Baum 		config->reclaim_mode = tmp;
1437a13ec19cSMichael Baum 	} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
1438a13ec19cSMichael Baum 		config->decap_en = !!tmp;
1439a13ec19cSMichael Baum 	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
1440a13ec19cSMichael Baum 		config->allow_duplicate_pattern = !!tmp;
14411939eb6fSDariusz Sosnowski 	} else if (strcmp(MLX5_FDB_DEFAULT_RULE_EN, key) == 0) {
14421939eb6fSDariusz Sosnowski 		config->fdb_def_rule = !!tmp;
14434d368e1dSXiaoyu Min 	} else if (strcmp(MLX5_HWS_CNT_SERVICE_CORE, key) == 0) {
14444d368e1dSXiaoyu Min 		config->cnt_svc.service_core = tmp;
14454d368e1dSXiaoyu Min 	} else if (strcmp(MLX5_HWS_CNT_CYCLE_TIME, key) == 0) {
14464d368e1dSXiaoyu Min 		config->cnt_svc.cycle_time = tmp;
1447483181f7SDariusz Sosnowski 	} else if (strcmp(MLX5_REPR_MATCHING_EN, key) == 0) {
1448483181f7SDariusz Sosnowski 		config->repr_matching = !!tmp;
1449a13ec19cSMichael Baum 	}
1450a13ec19cSMichael Baum 	return 0;
1451a13ec19cSMichael Baum }
1452a13ec19cSMichael Baum 
1453a13ec19cSMichael Baum /**
1454a13ec19cSMichael Baum  * Parse user device parameters and adjust them according to device
1455a13ec19cSMichael Baum  * capabilities.
1456a13ec19cSMichael Baum  *
1457a13ec19cSMichael Baum  * @param sh
1458a13ec19cSMichael Baum  *   Pointer to shared device context.
1459a729d2f0SMichael Baum  * @param mkvlist
1460a729d2f0SMichael Baum  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
1461a13ec19cSMichael Baum  * @param config
1462a13ec19cSMichael Baum  *   Pointer to shared device configuration structure.
1463a13ec19cSMichael Baum  *
1464a13ec19cSMichael Baum  * @return
1465a13ec19cSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
1466a13ec19cSMichael Baum  */
1467a13ec19cSMichael Baum static int
1468a13ec19cSMichael Baum mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
1469a729d2f0SMichael Baum 				struct mlx5_kvargs_ctrl *mkvlist,
1470a13ec19cSMichael Baum 				struct mlx5_sh_config *config)
1471a13ec19cSMichael Baum {
1472a729d2f0SMichael Baum 	const char **params = (const char *[]){
1473a729d2f0SMichael Baum 		MLX5_TX_PP,
1474a729d2f0SMichael Baum 		MLX5_TX_SKEW,
1475a729d2f0SMichael Baum 		MLX5_L3_VXLAN_EN,
1476a729d2f0SMichael Baum 		MLX5_VF_NL_EN,
1477a729d2f0SMichael Baum 		MLX5_DV_ESW_EN,
1478a729d2f0SMichael Baum 		MLX5_DV_FLOW_EN,
1479a729d2f0SMichael Baum 		MLX5_DV_XMETA_EN,
1480a729d2f0SMichael Baum 		MLX5_LACP_BY_USER,
1481a729d2f0SMichael Baum 		MLX5_RECLAIM_MEM,
1482a729d2f0SMichael Baum 		MLX5_DECAP_EN,
1483a729d2f0SMichael Baum 		MLX5_ALLOW_DUPLICATE_PATTERN,
14841939eb6fSDariusz Sosnowski 		MLX5_FDB_DEFAULT_RULE_EN,
14854d368e1dSXiaoyu Min 		MLX5_HWS_CNT_SERVICE_CORE,
14864d368e1dSXiaoyu Min 		MLX5_HWS_CNT_CYCLE_TIME,
1487483181f7SDariusz Sosnowski 		MLX5_REPR_MATCHING_EN,
1488a729d2f0SMichael Baum 		NULL,
1489a729d2f0SMichael Baum 	};
1490a13ec19cSMichael Baum 	int ret = 0;
1491a13ec19cSMichael Baum 
1492a13ec19cSMichael Baum 	/* Default configuration. */
1493a13ec19cSMichael Baum 	memset(config, 0, sizeof(*config));
1494a13ec19cSMichael Baum 	config->vf_nl_en = 1;
1495a13ec19cSMichael Baum 	config->dv_esw_en = 1;
1496a13ec19cSMichael Baum 	config->dv_flow_en = 1;
1497a13ec19cSMichael Baum 	config->decap_en = 1;
1498a13ec19cSMichael Baum 	config->allow_duplicate_pattern = 1;
14991939eb6fSDariusz Sosnowski 	config->fdb_def_rule = 1;
15004d368e1dSXiaoyu Min 	config->cnt_svc.cycle_time = MLX5_CNT_SVC_CYCLE_TIME_DEFAULT;
15014d368e1dSXiaoyu Min 	config->cnt_svc.service_core = rte_get_main_lcore();
1502483181f7SDariusz Sosnowski 	config->repr_matching = 1;
1503a729d2f0SMichael Baum 	if (mkvlist != NULL) {
1504a13ec19cSMichael Baum 		/* Process parameters. */
1505a729d2f0SMichael Baum 		ret = mlx5_kvargs_process(mkvlist, params,
1506a13ec19cSMichael Baum 					  mlx5_dev_args_check_handler, config);
1507a13ec19cSMichael Baum 		if (ret) {
1508a13ec19cSMichael Baum 			DRV_LOG(ERR, "Failed to process device arguments: %s",
1509a13ec19cSMichael Baum 				strerror(rte_errno));
1510a13ec19cSMichael Baum 			return -rte_errno;
1511a13ec19cSMichael Baum 		}
1512a13ec19cSMichael Baum 	}
1513a13ec19cSMichael Baum 	/* Adjust parameters according to device capabilities. */
1514a13ec19cSMichael Baum 	if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) {
1515a13ec19cSMichael Baum 		DRV_LOG(WARNING, "DV flow is not supported.");
1516a13ec19cSMichael Baum 		config->dv_flow_en = 0;
1517a13ec19cSMichael Baum 	}
1518a13ec19cSMichael Baum 	if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) {
1519a13ec19cSMichael Baum 		DRV_LOG(DEBUG, "E-Switch DV flow is not supported.");
1520a13ec19cSMichael Baum 		config->dv_esw_en = 0;
1521a13ec19cSMichael Baum 	}
152272d836b3SMichael Baum 	if (config->dv_esw_en && !config->dv_flow_en) {
152372d836b3SMichael Baum 		DRV_LOG(DEBUG,
152472d836b3SMichael Baum 			"E-Switch DV flow is supported only when DV flow is enabled.");
152572d836b3SMichael Baum 		config->dv_esw_en = 0;
152672d836b3SMichael Baum 	}
1527a13ec19cSMichael Baum 	if (config->dv_miss_info && config->dv_esw_en)
1528a13ec19cSMichael Baum 		config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
1529a13ec19cSMichael Baum 	if (!config->dv_esw_en &&
1530a13ec19cSMichael Baum 	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1531a13ec19cSMichael Baum 		DRV_LOG(WARNING,
1532a13ec19cSMichael Baum 			"Metadata mode %u is not supported (no E-Switch).",
1533a13ec19cSMichael Baum 			config->dv_xmeta_en);
1534a13ec19cSMichael Baum 		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1535a13ec19cSMichael Baum 	}
1536483181f7SDariusz Sosnowski 	if (config->dv_flow_en != 2 && !config->repr_matching) {
1537483181f7SDariusz Sosnowski 		DRV_LOG(DEBUG, "Disabling representor matching is valid only "
1538483181f7SDariusz Sosnowski 			       "when HW Steering is enabled.");
1539483181f7SDariusz Sosnowski 		config->repr_matching = 1;
1540483181f7SDariusz Sosnowski 	}
1541a13ec19cSMichael Baum 	if (config->tx_pp && !sh->dev_cap.txpp_en) {
1542a13ec19cSMichael Baum 		DRV_LOG(ERR, "Packet pacing is not supported.");
1543a13ec19cSMichael Baum 		rte_errno = ENODEV;
1544a13ec19cSMichael Baum 		return -rte_errno;
1545a13ec19cSMichael Baum 	}
154695cbaaa1SViacheslav Ovsiienko 	if (!config->tx_pp && config->tx_skew &&
154795cbaaa1SViacheslav Ovsiienko 	    !sh->cdev->config.hca_attr.wait_on_time) {
1548a13ec19cSMichael Baum 		DRV_LOG(WARNING,
1549a13ec19cSMichael Baum 			"\"tx_skew\" doesn't affect without \"tx_pp\".");
1550a13ec19cSMichael Baum 	}
1551593f913aSMichael Baum 	/* Check for LRO support. */
1552593f913aSMichael Baum 	if (mlx5_devx_obj_ops_en(sh) && sh->cdev->config.hca_attr.lro_cap) {
1553593f913aSMichael Baum 		/* TBD check tunnel lro caps. */
1554593f913aSMichael Baum 		config->lro_allowed = 1;
1555593f913aSMichael Baum 		DRV_LOG(DEBUG, "LRO is allowed.");
1556593f913aSMichael Baum 		DRV_LOG(DEBUG,
1557593f913aSMichael Baum 			"LRO minimal size of TCP segment required for coalescing is %d bytes.",
1558593f913aSMichael Baum 			sh->cdev->config.hca_attr.lro_min_mss_size);
1559593f913aSMichael Baum 	}
1560a13ec19cSMichael Baum 	/*
1561a13ec19cSMichael Baum 	 * If HW has bug working with tunnel packet decapsulation and scatter
1562a13ec19cSMichael Baum 	 * FCS, and decapsulation is needed, clear the hw_fcs_strip bit.
1563a13ec19cSMichael Baum 	 * Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1564a13ec19cSMichael Baum 	 */
1565a13ec19cSMichael Baum 	if (sh->dev_cap.scatter_fcs_w_decap_disable && sh->config.decap_en)
1566a13ec19cSMichael Baum 		config->hw_fcs_strip = 0;
1567a13ec19cSMichael Baum 	else
1568a13ec19cSMichael Baum 		config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
1569a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1570a13ec19cSMichael Baum 		(config->hw_fcs_strip ? "" : "not "));
1571a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"tx_pp\" is %d.", config->tx_pp);
1572a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"tx_skew\" is %d.", config->tx_skew);
1573a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"reclaim_mode\" is %u.", config->reclaim_mode);
1574a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"dv_esw_en\" is %u.", config->dv_esw_en);
1575a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"dv_flow_en\" is %u.", config->dv_flow_en);
1576a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"dv_xmeta_en\" is %u.", config->dv_xmeta_en);
1577a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"dv_miss_info\" is %u.", config->dv_miss_info);
1578a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"l3_vxlan_en\" is %u.", config->l3_vxlan_en);
1579a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"vf_nl_en\" is %u.", config->vf_nl_en);
1580a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"lacp_by_user\" is %u.", config->lacp_by_user);
1581a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en);
1582a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.",
1583a13ec19cSMichael Baum 		config->allow_duplicate_pattern);
15841939eb6fSDariusz Sosnowski 	DRV_LOG(DEBUG, "\"fdb_def_rule_en\" is %u.", config->fdb_def_rule);
1585483181f7SDariusz Sosnowski 	DRV_LOG(DEBUG, "\"repr_matching_en\" is %u.", config->repr_matching);
1586a13ec19cSMichael Baum 	return 0;
1587a13ec19cSMichael Baum }
1588a13ec19cSMichael Baum 
1589a13ec19cSMichael Baum /**
1590e3032e9cSMichael Baum  * Configure realtime timestamp format.
1591e3032e9cSMichael Baum  *
1592e3032e9cSMichael Baum  * @param sh
1593e3032e9cSMichael Baum  *   Pointer to mlx5_dev_ctx_shared object.
1594e3032e9cSMichael Baum  * @param hca_attr
1595e3032e9cSMichael Baum  *   Pointer to DevX HCA capabilities structure.
1596e3032e9cSMichael Baum  */
1597e3032e9cSMichael Baum void
1598e3032e9cSMichael Baum mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
1599e3032e9cSMichael Baum 			 struct mlx5_hca_attr *hca_attr)
1600e3032e9cSMichael Baum {
1601e3032e9cSMichael Baum 	uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc);
1602e3032e9cSMichael Baum 	uint32_t reg[dw_cnt];
1603e3032e9cSMichael Baum 	int ret = ENOTSUP;
1604e3032e9cSMichael Baum 
1605e3032e9cSMichael Baum 	if (hca_attr->access_register_user)
1606e3032e9cSMichael Baum 		ret = mlx5_devx_cmd_register_read(sh->cdev->ctx,
1607e3032e9cSMichael Baum 						  MLX5_REGISTER_ID_MTUTC, 0,
1608e3032e9cSMichael Baum 						  reg, dw_cnt);
1609e3032e9cSMichael Baum 	if (!ret) {
1610e3032e9cSMichael Baum 		uint32_t ts_mode;
1611e3032e9cSMichael Baum 
1612e3032e9cSMichael Baum 		/* MTUTC register is read successfully. */
1613e3032e9cSMichael Baum 		ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode);
1614e3032e9cSMichael Baum 		if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
161587af0d1eSMichael Baum 			sh->dev_cap.rt_timestamp = 1;
1616e3032e9cSMichael Baum 	} else {
1617e3032e9cSMichael Baum 		/* Kernel does not support register reading. */
1618e3032e9cSMichael Baum 		if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
161987af0d1eSMichael Baum 			sh->dev_cap.rt_timestamp = 1;
1620e3032e9cSMichael Baum 	}
1621e3032e9cSMichael Baum }
1622e3032e9cSMichael Baum 
162348041ccbSGregory Etelson static void
162448041ccbSGregory Etelson mlx5_init_hws_flow_tags_registers(struct mlx5_dev_ctx_shared *sh)
162548041ccbSGregory Etelson {
162648041ccbSGregory Etelson 	struct mlx5_dev_registers *reg = &sh->registers;
162748041ccbSGregory Etelson 	uint32_t meta_mode = sh->config.dv_xmeta_en;
1628414a0cb5SOri Kam 	uint16_t masks = (uint16_t)sh->cdev->config.hca_attr.set_reg_c;
1629414a0cb5SOri Kam 	uint16_t unset = 0;
163048041ccbSGregory Etelson 	uint32_t i, j;
163148041ccbSGregory Etelson 
163248041ccbSGregory Etelson 	/*
163348041ccbSGregory Etelson 	 * The CAPA is global for common device but only used in net.
163448041ccbSGregory Etelson 	 * It is shared per eswitch domain.
163548041ccbSGregory Etelson 	 */
163648041ccbSGregory Etelson 	if (reg->aso_reg != REG_NON)
163748041ccbSGregory Etelson 		unset |= 1 << mlx5_regc_index(reg->aso_reg);
163848041ccbSGregory Etelson 	unset |= 1 << mlx5_regc_index(REG_C_6);
163948041ccbSGregory Etelson 	if (sh->config.dv_esw_en)
164048041ccbSGregory Etelson 		unset |= 1 << mlx5_regc_index(REG_C_0);
164148041ccbSGregory Etelson 	if (meta_mode == MLX5_XMETA_MODE_META32_HWS)
164248041ccbSGregory Etelson 		unset |= 1 << mlx5_regc_index(REG_C_1);
164348041ccbSGregory Etelson 	masks &= ~unset;
164448041ccbSGregory Etelson 	for (i = 0, j = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) {
164548041ccbSGregory Etelson 		if (!!((1 << i) & masks))
164648041ccbSGregory Etelson 			reg->hw_avl_tags[j++] = mlx5_regc_value(i);
164748041ccbSGregory Etelson 	}
16487a26bfecSBing Zhao 	/*
16497a26bfecSBing Zhao 	 * Set the registers for NAT64 usage internally. REG_C_6 is always used.
16507a26bfecSBing Zhao 	 * The other 2 registers will be fetched from right to left, at least 2
16517a26bfecSBing Zhao 	 * tag registers should be available.
16527a26bfecSBing Zhao 	 */
16537a26bfecSBing Zhao 	MLX5_ASSERT(j >= (MLX5_FLOW_NAT64_REGS_MAX - 1));
16547a26bfecSBing Zhao 	reg->nat64_regs[0] = REG_C_6;
16557a26bfecSBing Zhao 	reg->nat64_regs[1] = reg->hw_avl_tags[j - 2];
16567a26bfecSBing Zhao 	reg->nat64_regs[2] = reg->hw_avl_tags[j - 1];
165748041ccbSGregory Etelson }
165848041ccbSGregory Etelson 
165948041ccbSGregory Etelson static void
166048041ccbSGregory Etelson mlx5_init_aso_register(struct mlx5_dev_ctx_shared *sh)
166148041ccbSGregory Etelson {
166248041ccbSGregory Etelson #if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO_EXT)
166348041ccbSGregory Etelson 	const struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
166448041ccbSGregory Etelson 	const struct mlx5_hca_qos_attr *qos =  &hca_attr->qos;
166548041ccbSGregory Etelson 	uint8_t reg_c_mask = qos->flow_meter_reg_c_ids & 0xfc;
166648041ccbSGregory Etelson 
166748041ccbSGregory Etelson 	if (!(qos->sup && qos->flow_meter_old && sh->config.dv_flow_en))
166848041ccbSGregory Etelson 		return;
166948041ccbSGregory Etelson 	/*
167048041ccbSGregory Etelson 	 * Meter needs two REG_C's for color match and pre-sfx
167148041ccbSGregory Etelson 	 * flow match. Here get the REG_C for color match.
167248041ccbSGregory Etelson 	 * REG_C_0 and REG_C_1 is reserved for metadata feature.
167348041ccbSGregory Etelson 	 */
167448041ccbSGregory Etelson 	if (rte_popcount32(reg_c_mask) > 0) {
167548041ccbSGregory Etelson 		/*
167648041ccbSGregory Etelson 		 * The meter color register is used by the
167748041ccbSGregory Etelson 		 * flow-hit feature as well.
167848041ccbSGregory Etelson 		 * The flow-hit feature must use REG_C_3
167948041ccbSGregory Etelson 		 * Prefer REG_C_3 if it is available.
168048041ccbSGregory Etelson 		 */
168148041ccbSGregory Etelson 		if (reg_c_mask & (1 << mlx5_regc_index(REG_C_3)))
168248041ccbSGregory Etelson 			sh->registers.aso_reg = REG_C_3;
168348041ccbSGregory Etelson 		else
168448041ccbSGregory Etelson 			sh->registers.aso_reg =
168548041ccbSGregory Etelson 				mlx5_regc_value(ffs(reg_c_mask) - 1);
168648041ccbSGregory Etelson 	}
168748041ccbSGregory Etelson #else
168848041ccbSGregory Etelson 	RTE_SET_USED(sh);
168948041ccbSGregory Etelson #endif
169048041ccbSGregory Etelson }
169148041ccbSGregory Etelson 
169248041ccbSGregory Etelson static void
169348041ccbSGregory Etelson mlx5_init_shared_dev_registers(struct mlx5_dev_ctx_shared *sh)
169448041ccbSGregory Etelson {
169548041ccbSGregory Etelson 	if (sh->cdev->config.devx)
169648041ccbSGregory Etelson 		mlx5_init_aso_register(sh);
169748041ccbSGregory Etelson 	if (sh->registers.aso_reg != REG_NON) {
169848041ccbSGregory Etelson 		DRV_LOG(DEBUG, "ASO register: REG_C%d",
169948041ccbSGregory Etelson 			mlx5_regc_index(sh->registers.aso_reg));
170048041ccbSGregory Etelson 	} else {
170148041ccbSGregory Etelson 		DRV_LOG(DEBUG, "ASO register: NONE");
170248041ccbSGregory Etelson 	}
17031beeb6d7SBing Zhao 	if (sh->config.dv_flow_en == 2)
170448041ccbSGregory Etelson 		mlx5_init_hws_flow_tags_registers(sh);
170548041ccbSGregory Etelson }
170648041ccbSGregory Etelson 
170792d3a05eSMichael Baum static struct mlx5_physical_device *
170892d3a05eSMichael Baum mlx5_get_physical_device(struct mlx5_common_device *cdev)
170992d3a05eSMichael Baum {
171092d3a05eSMichael Baum 	struct mlx5_physical_device *phdev;
171192d3a05eSMichael Baum 	struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
171292d3a05eSMichael Baum 
171392d3a05eSMichael Baum 	/* Search for physical device by system_image_guid. */
171492d3a05eSMichael Baum 	LIST_FOREACH(phdev, &phdev_list, next) {
171592d3a05eSMichael Baum 		if (phdev->guid == attr->system_image_guid) {
171692d3a05eSMichael Baum 			phdev->refcnt++;
171792d3a05eSMichael Baum 			return phdev;
171892d3a05eSMichael Baum 		}
171992d3a05eSMichael Baum 	}
172092d3a05eSMichael Baum 	phdev = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
172192d3a05eSMichael Baum 			    sizeof(struct mlx5_physical_device),
172292d3a05eSMichael Baum 			    RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
172392d3a05eSMichael Baum 	if (!phdev) {
172492d3a05eSMichael Baum 		DRV_LOG(ERR, "Physical device allocation failure.");
172592d3a05eSMichael Baum 		rte_errno = ENOMEM;
172692d3a05eSMichael Baum 		return NULL;
172792d3a05eSMichael Baum 	}
172892d3a05eSMichael Baum 	phdev->guid = attr->system_image_guid;
172992d3a05eSMichael Baum 	phdev->refcnt = 1;
173092d3a05eSMichael Baum 	LIST_INSERT_HEAD(&phdev_list, phdev, next);
173192d3a05eSMichael Baum 	DRV_LOG(DEBUG, "Physical device is created, guid=%" PRIu64 ".",
173292d3a05eSMichael Baum 		phdev->guid);
173392d3a05eSMichael Baum 	return phdev;
173492d3a05eSMichael Baum }
173592d3a05eSMichael Baum 
1736f5177bdcSMichael Baum struct mlx5_physical_device *
1737f5177bdcSMichael Baum mlx5_get_locked_physical_device(struct mlx5_priv *priv)
1738f5177bdcSMichael Baum {
1739f5177bdcSMichael Baum 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
1740f5177bdcSMichael Baum 	return priv->sh->phdev;
1741f5177bdcSMichael Baum }
1742f5177bdcSMichael Baum 
1743f5177bdcSMichael Baum void
1744f5177bdcSMichael Baum mlx5_unlock_physical_device(void)
1745f5177bdcSMichael Baum {
1746f5177bdcSMichael Baum 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
1747f5177bdcSMichael Baum }
1748f5177bdcSMichael Baum 
174992d3a05eSMichael Baum static void
175092d3a05eSMichael Baum mlx5_physical_device_destroy(struct mlx5_physical_device *phdev)
175192d3a05eSMichael Baum {
175292d3a05eSMichael Baum #ifdef RTE_LIBRTE_MLX5_DEBUG
175392d3a05eSMichael Baum 	/* Check the object presence in the list. */
175492d3a05eSMichael Baum 	struct mlx5_physical_device *lphdev;
175592d3a05eSMichael Baum 
175692d3a05eSMichael Baum 	LIST_FOREACH(lphdev, &phdev_list, next)
175792d3a05eSMichael Baum 		if (lphdev == phdev)
175892d3a05eSMichael Baum 			break;
175992d3a05eSMichael Baum 	MLX5_ASSERT(lphdev);
176092d3a05eSMichael Baum 	if (lphdev != phdev) {
176192d3a05eSMichael Baum 		DRV_LOG(ERR, "Freeing non-existing physical device");
176292d3a05eSMichael Baum 		return;
176392d3a05eSMichael Baum 	}
176492d3a05eSMichael Baum #endif
176592d3a05eSMichael Baum 	MLX5_ASSERT(phdev);
176692d3a05eSMichael Baum 	MLX5_ASSERT(phdev->refcnt);
176792d3a05eSMichael Baum 	if (--phdev->refcnt)
176892d3a05eSMichael Baum 		return;
176992d3a05eSMichael Baum 	/* Remove physical device from the global device list. */
177092d3a05eSMichael Baum 	LIST_REMOVE(phdev, next);
177192d3a05eSMichael Baum 	mlx5_free(phdev);
177292d3a05eSMichael Baum }
177392d3a05eSMichael Baum 
1774e3032e9cSMichael Baum /**
177591389890SOphir Munk  * Allocate shared device context. If there is multiport device the
177617e19bc4SViacheslav Ovsiienko  * master and representors will share this context, if there is single
177791389890SOphir Munk  * port dedicated device, the context will be used by only given
177817e19bc4SViacheslav Ovsiienko  * port due to unification.
177917e19bc4SViacheslav Ovsiienko  *
178091389890SOphir Munk  * Routine first searches the context for the specified device name,
178117e19bc4SViacheslav Ovsiienko  * if found the shared context assumed and reference counter is incremented.
178217e19bc4SViacheslav Ovsiienko  * If no context found the new one is created and initialized with specified
178391389890SOphir Munk  * device context and parameters.
178417e19bc4SViacheslav Ovsiienko  *
178517e19bc4SViacheslav Ovsiienko  * @param[in] spawn
178691389890SOphir Munk  *   Pointer to the device attributes (name, port, etc).
1787a729d2f0SMichael Baum  * @param mkvlist
1788a729d2f0SMichael Baum  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
178917e19bc4SViacheslav Ovsiienko  *
179017e19bc4SViacheslav Ovsiienko  * @return
17916e88bc42SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object on success,
179217e19bc4SViacheslav Ovsiienko  *   otherwise NULL and rte_errno is set.
179317e19bc4SViacheslav Ovsiienko  */
17942eb4d010SOphir Munk struct mlx5_dev_ctx_shared *
1795a729d2f0SMichael Baum mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
1796a729d2f0SMichael Baum 			  struct mlx5_kvargs_ctrl *mkvlist)
179717e19bc4SViacheslav Ovsiienko {
17986e88bc42SOphir Munk 	struct mlx5_dev_ctx_shared *sh;
179917e19bc4SViacheslav Ovsiienko 	int err = 0;
180053e5a82fSViacheslav Ovsiienko 	uint32_t i;
180117e19bc4SViacheslav Ovsiienko 
18028e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(spawn);
180317e19bc4SViacheslav Ovsiienko 	/* Secondary process should not create the shared context. */
18048e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
180591389890SOphir Munk 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
180617e19bc4SViacheslav Ovsiienko 	/* Search for IB context by device name. */
180792d3a05eSMichael Baum 	LIST_FOREACH(sh, &dev_ctx_list, next) {
1808ca1418ceSMichael Baum 		if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) {
180917e19bc4SViacheslav Ovsiienko 			sh->refcnt++;
181017e19bc4SViacheslav Ovsiienko 			goto exit;
181117e19bc4SViacheslav Ovsiienko 		}
181217e19bc4SViacheslav Ovsiienko 	}
1813ae4eb7dcSViacheslav Ovsiienko 	/* No device found, we have to create new shared context. */
18148e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(spawn->max_port);
18152175c4dcSSuanming Mou 	sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
18166e88bc42SOphir Munk 			 sizeof(struct mlx5_dev_ctx_shared) +
18176be4c57aSMichael Baum 			 spawn->max_port * sizeof(struct mlx5_dev_shared_port),
18182175c4dcSSuanming Mou 			 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
181917e19bc4SViacheslav Ovsiienko 	if (!sh) {
18206be4c57aSMichael Baum 		DRV_LOG(ERR, "Shared context allocation failure.");
182117e19bc4SViacheslav Ovsiienko 		rte_errno = ENOMEM;
182217e19bc4SViacheslav Ovsiienko 		goto exit;
182317e19bc4SViacheslav Ovsiienko 	}
1824887183efSMichael Baum 	pthread_mutex_init(&sh->txpp.mutex, NULL);
18257af08c8fSMichael Baum 	sh->numa_node = spawn->cdev->dev->numa_node;
18267af08c8fSMichael Baum 	sh->cdev = spawn->cdev;
1827cf004fd3SMichael Baum 	sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
1828f5f4c482SXueming Li 	if (spawn->bond_info)
1829f5f4c482SXueming Li 		sh->bond = *spawn->bond_info;
183091d1cfafSMichael Baum 	err = mlx5_os_capabilities_prepare(sh);
183117e19bc4SViacheslav Ovsiienko 	if (err) {
183291d1cfafSMichael Baum 		DRV_LOG(ERR, "Fail to configure device capabilities.");
183317e19bc4SViacheslav Ovsiienko 		goto error;
183417e19bc4SViacheslav Ovsiienko 	}
1835a729d2f0SMichael Baum 	err = mlx5_shared_dev_ctx_args_config(sh, mkvlist, &sh->config);
1836a13ec19cSMichael Baum 	if (err) {
1837a13ec19cSMichael Baum 		DRV_LOG(ERR, "Failed to process device configure: %s",
1838a13ec19cSMichael Baum 			strerror(rte_errno));
1839a13ec19cSMichael Baum 		goto error;
1840a13ec19cSMichael Baum 	}
184117e19bc4SViacheslav Ovsiienko 	sh->refcnt = 1;
184217e19bc4SViacheslav Ovsiienko 	sh->max_port = spawn->max_port;
1843ca1418ceSMichael Baum 	strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
1844f44b09f9SOphir Munk 		sizeof(sh->ibdev_name) - 1);
1845ca1418ceSMichael Baum 	strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
1846f44b09f9SOphir Munk 		sizeof(sh->ibdev_path) - 1);
184792d3a05eSMichael Baum 	sh->phdev = mlx5_get_physical_device(sh->cdev);
184892d3a05eSMichael Baum 	if (!sh->phdev)
184992d3a05eSMichael Baum 		goto error;
185053e5a82fSViacheslav Ovsiienko 	/*
18516be4c57aSMichael Baum 	 * Setting port_id to max unallowed value means there is no interrupt
18526be4c57aSMichael Baum 	 * subhandler installed for the given port index i.
185353e5a82fSViacheslav Ovsiienko 	 */
185423242063SMatan Azrad 	for (i = 0; i < sh->max_port; i++) {
185553e5a82fSViacheslav Ovsiienko 		sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
185623242063SMatan Azrad 		sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
185717f95513SDmitry Kozlyuk 		sh->port[i].nl_ih_port_id = RTE_MAX_ETHPORTS;
185823242063SMatan Azrad 	}
18596dc0cbc6SMichael Baum 	if (sh->cdev->config.devx) {
1860ca1418ceSMichael Baum 		sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
1861ae18a1aeSOri Kam 		if (!sh->td) {
1862ae18a1aeSOri Kam 			DRV_LOG(ERR, "TD allocation failure");
18636be4c57aSMichael Baum 			rte_errno = ENOMEM;
1864ae18a1aeSOri Kam 			goto error;
1865ae18a1aeSOri Kam 		}
1866a89f6433SRongwei Liu 		if (mlx5_setup_tis(sh)) {
1867ae18a1aeSOri Kam 			DRV_LOG(ERR, "TIS allocation failure");
18686be4c57aSMichael Baum 			rte_errno = ENOMEM;
1869ae18a1aeSOri Kam 			goto error;
1870ae18a1aeSOri Kam 		}
18715dfa003dSMichael Baum 		err = mlx5_rxtx_uars_prepare(sh);
1872a0bfe9d5SViacheslav Ovsiienko 		if (err)
1873fc4d4f73SViacheslav Ovsiienko 			goto error;
187424feb045SViacheslav Ovsiienko #ifndef RTE_ARCH_64
18755dfa003dSMichael Baum 	} else {
187624feb045SViacheslav Ovsiienko 		/* Initialize UAR access locks for 32bit implementations. */
187724feb045SViacheslav Ovsiienko 		rte_spinlock_init(&sh->uar_lock_cq);
187824feb045SViacheslav Ovsiienko 		for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
187924feb045SViacheslav Ovsiienko 			rte_spinlock_init(&sh->uar_lock[i]);
188024feb045SViacheslav Ovsiienko #endif
18815dfa003dSMichael Baum 	}
18822eb4d010SOphir Munk 	mlx5_os_dev_shared_handler_install(sh);
188392d3a05eSMichael Baum 	if (LIST_EMPTY(&dev_ctx_list)) {
18845d55a494STal Shnaiderman 		err = mlx5_flow_os_init_workspace_once();
18855d55a494STal Shnaiderman 		if (err)
18865d55a494STal Shnaiderman 			goto error;
18875d55a494STal Shnaiderman 	}
1888a94e89e4SMichael Baum 	err = mlx5_flow_counters_mng_init(sh);
1889a94e89e4SMichael Baum 	if (err) {
1890a94e89e4SMichael Baum 		DRV_LOG(ERR, "Fail to initialize counters manage.");
1891a94e89e4SMichael Baum 		goto error;
1892a94e89e4SMichael Baum 	}
1893fa2d01c8SDong Zhou 	mlx5_flow_aging_init(sh);
1894a13ec19cSMichael Baum 	mlx5_flow_ipool_create(sh);
18950e3d0525SViacheslav Ovsiienko 	/* Add context to the global device list. */
189692d3a05eSMichael Baum 	LIST_INSERT_HEAD(&dev_ctx_list, sh, next);
1897f15f0c38SShiri Kuzin 	rte_spinlock_init(&sh->geneve_tlv_opt_sl);
189848041ccbSGregory Etelson 	mlx5_init_shared_dev_registers(sh);
18996ac2104aSSuanming Mou 	/* Init counter pool list header and lock. */
19006ac2104aSSuanming Mou 	LIST_INIT(&sh->hws_cpool_list);
19016ac2104aSSuanming Mou 	rte_spinlock_init(&sh->cpool_lock);
190217e19bc4SViacheslav Ovsiienko exit:
190391389890SOphir Munk 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
190417e19bc4SViacheslav Ovsiienko 	return sh;
190517e19bc4SViacheslav Ovsiienko error:
19066be4c57aSMichael Baum 	err = rte_errno;
1907d133f4cdSViacheslav Ovsiienko 	pthread_mutex_destroy(&sh->txpp.mutex);
190891389890SOphir Munk 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
19098e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(sh);
19106be4c57aSMichael Baum 	mlx5_rxtx_uars_release(sh);
1911a89f6433SRongwei Liu 	i = 0;
1912a89f6433SRongwei Liu 	do {
1913a89f6433SRongwei Liu 		if (sh->tis[i])
1914a89f6433SRongwei Liu 			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
19150484c1d6SDariusz Sosnowski 	} while (++i <= (uint32_t)sh->bond.n_port);
19166be4c57aSMichael Baum 	if (sh->td)
19176be4c57aSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
191892d3a05eSMichael Baum 	if (sh->phdev)
191992d3a05eSMichael Baum 		mlx5_physical_device_destroy(sh->phdev);
19202175c4dcSSuanming Mou 	mlx5_free(sh);
192117e19bc4SViacheslav Ovsiienko 	rte_errno = err;
192217e19bc4SViacheslav Ovsiienko 	return NULL;
192317e19bc4SViacheslav Ovsiienko }
192417e19bc4SViacheslav Ovsiienko 
192517e19bc4SViacheslav Ovsiienko /**
192625025da3SSpike Du  * Create LWM event_channel and interrupt handle for shared device
192725025da3SSpike Du  * context. All rxqs sharing the device context share the event_channel.
192825025da3SSpike Du  * A callback is registered in interrupt thread to receive the LWM event.
192925025da3SSpike Du  *
193025025da3SSpike Du  * @param[in] priv
193125025da3SSpike Du  *   Pointer to mlx5_priv instance.
193225025da3SSpike Du  *
193325025da3SSpike Du  * @return
193425025da3SSpike Du  *   0 on success, negative with rte_errno set.
193525025da3SSpike Du  */
193625025da3SSpike Du int
193725025da3SSpike Du mlx5_lwm_setup(struct mlx5_priv *priv)
193825025da3SSpike Du {
193925025da3SSpike Du 	int fd_lwm;
194025025da3SSpike Du 
194125025da3SSpike Du 	pthread_mutex_init(&priv->sh->lwm_config_lock, NULL);
194225025da3SSpike Du 	priv->sh->devx_channel_lwm = mlx5_os_devx_create_event_channel
194325025da3SSpike Du 			(priv->sh->cdev->ctx,
194425025da3SSpike Du 			 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
194525025da3SSpike Du 	if (!priv->sh->devx_channel_lwm)
194625025da3SSpike Du 		goto err;
194725025da3SSpike Du 	fd_lwm = mlx5_os_get_devx_channel_fd(priv->sh->devx_channel_lwm);
194825025da3SSpike Du 	priv->sh->intr_handle_lwm = mlx5_os_interrupt_handler_create
194925025da3SSpike Du 		(RTE_INTR_INSTANCE_F_SHARED, true,
195025025da3SSpike Du 		 fd_lwm, mlx5_dev_interrupt_handler_lwm, priv);
195125025da3SSpike Du 	if (!priv->sh->intr_handle_lwm)
195225025da3SSpike Du 		goto err;
195325025da3SSpike Du 	return 0;
195425025da3SSpike Du err:
195525025da3SSpike Du 	if (priv->sh->devx_channel_lwm) {
195625025da3SSpike Du 		mlx5_os_devx_destroy_event_channel
195725025da3SSpike Du 			(priv->sh->devx_channel_lwm);
195825025da3SSpike Du 		priv->sh->devx_channel_lwm = NULL;
195925025da3SSpike Du 	}
196025025da3SSpike Du 	pthread_mutex_destroy(&priv->sh->lwm_config_lock);
196125025da3SSpike Du 	return -rte_errno;
196225025da3SSpike Du }
196325025da3SSpike Du 
196425025da3SSpike Du /**
196525025da3SSpike Du  * Destroy LWM event_channel and interrupt handle for shared device
196625025da3SSpike Du  * context before free this context. The interrupt handler is also
196725025da3SSpike Du  * unregistered.
196825025da3SSpike Du  *
196925025da3SSpike Du  * @param[in] sh
197025025da3SSpike Du  *   Pointer to shared device context.
197125025da3SSpike Du  */
197225025da3SSpike Du void
197325025da3SSpike Du mlx5_lwm_unset(struct mlx5_dev_ctx_shared *sh)
197425025da3SSpike Du {
197525025da3SSpike Du 	if (sh->intr_handle_lwm) {
197625025da3SSpike Du 		mlx5_os_interrupt_handler_destroy(sh->intr_handle_lwm,
197725025da3SSpike Du 			mlx5_dev_interrupt_handler_lwm, (void *)-1);
197825025da3SSpike Du 		sh->intr_handle_lwm = NULL;
197925025da3SSpike Du 	}
198025025da3SSpike Du 	if (sh->devx_channel_lwm) {
198125025da3SSpike Du 		mlx5_os_devx_destroy_event_channel
198225025da3SSpike Du 			(sh->devx_channel_lwm);
198325025da3SSpike Du 		sh->devx_channel_lwm = NULL;
198425025da3SSpike Du 	}
198525025da3SSpike Du 	pthread_mutex_destroy(&sh->lwm_config_lock);
198625025da3SSpike Du }
198725025da3SSpike Du 
198825025da3SSpike Du /**
198917e19bc4SViacheslav Ovsiienko  * Free shared IB device context. Decrement counter and if zero free
199017e19bc4SViacheslav Ovsiienko  * all allocated resources and close handles.
199117e19bc4SViacheslav Ovsiienko  *
199217e19bc4SViacheslav Ovsiienko  * @param[in] sh
19936e88bc42SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object to free
199417e19bc4SViacheslav Ovsiienko  */
19952eb4d010SOphir Munk void
199691389890SOphir Munk mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
199717e19bc4SViacheslav Ovsiienko {
1998fec28ca0SDmitry Kozlyuk 	int ret;
1999a89f6433SRongwei Liu 	int i = 0;
2000fec28ca0SDmitry Kozlyuk 
200191389890SOphir Munk 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
20020afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG
200317e19bc4SViacheslav Ovsiienko 	/* Check the object presence in the list. */
20046e88bc42SOphir Munk 	struct mlx5_dev_ctx_shared *lctx;
200517e19bc4SViacheslav Ovsiienko 
200692d3a05eSMichael Baum 	LIST_FOREACH(lctx, &dev_ctx_list, next)
200717e19bc4SViacheslav Ovsiienko 		if (lctx == sh)
200817e19bc4SViacheslav Ovsiienko 			break;
20098e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(lctx);
201017e19bc4SViacheslav Ovsiienko 	if (lctx != sh) {
201117e19bc4SViacheslav Ovsiienko 		DRV_LOG(ERR, "Freeing non-existing shared IB context");
201217e19bc4SViacheslav Ovsiienko 		goto exit;
201317e19bc4SViacheslav Ovsiienko 	}
201417e19bc4SViacheslav Ovsiienko #endif
20158e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(sh);
20168e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(sh->refcnt);
201717e19bc4SViacheslav Ovsiienko 	/* Secondary process should not free the shared context. */
20188e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
201917e19bc4SViacheslav Ovsiienko 	if (--sh->refcnt)
202017e19bc4SViacheslav Ovsiienko 		goto exit;
2021fec28ca0SDmitry Kozlyuk 	/* Stop watching for mempool events and unregister all mempools. */
2022fc59a1ecSMichael Baum 	if (!sh->cdev->config.mr_mempool_reg_en) {
2023fec28ca0SDmitry Kozlyuk 		ret = rte_mempool_event_callback_unregister
2024fec28ca0SDmitry Kozlyuk 				(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
2025fec28ca0SDmitry Kozlyuk 		if (ret == 0)
2026fc59a1ecSMichael Baum 			rte_mempool_walk
2027fc59a1ecSMichael Baum 			     (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
2028fc59a1ecSMichael Baum 	}
20290e3d0525SViacheslav Ovsiienko 	/* Remove context from the global device list. */
203017e19bc4SViacheslav Ovsiienko 	LIST_REMOVE(sh, next);
2031ea823b2cSDmitry Kozlyuk 	/* Release resources on the last device removal. */
203292d3a05eSMichael Baum 	if (LIST_EMPTY(&dev_ctx_list)) {
2033ea823b2cSDmitry Kozlyuk 		mlx5_os_net_cleanup();
20345d55a494STal Shnaiderman 		mlx5_flow_os_release_workspace();
2035ea823b2cSDmitry Kozlyuk 	}
2036f4a08731SMichael Baum 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
20379086ac09SGregory Etelson 	if (sh->flex_parsers_dv) {
20389086ac09SGregory Etelson 		mlx5_list_destroy(sh->flex_parsers_dv);
20399086ac09SGregory Etelson 		sh->flex_parsers_dv = NULL;
20409086ac09SGregory Etelson 	}
204153e5a82fSViacheslav Ovsiienko 	/*
204253e5a82fSViacheslav Ovsiienko 	 *  Ensure there is no async event handler installed.
204353e5a82fSViacheslav Ovsiienko 	 *  Only primary process handles async device events.
204453e5a82fSViacheslav Ovsiienko 	 **/
20455382d28cSMatan Azrad 	mlx5_flow_counters_mng_close(sh);
2046ce12974cSMichael Baum 	if (sh->ct_mng)
2047ce12974cSMichael Baum 		mlx5_flow_aso_ct_mng_close(sh);
2048f935ed4bSDekel Peled 	if (sh->aso_age_mng) {
2049f935ed4bSDekel Peled 		mlx5_flow_aso_age_mng_close(sh);
2050f935ed4bSDekel Peled 		sh->aso_age_mng = NULL;
2051f935ed4bSDekel Peled 	}
205229efa63aSLi Zhang 	if (sh->mtrmng)
205329efa63aSLi Zhang 		mlx5_aso_flow_mtrs_mng_close(sh);
2054014d1cbeSSuanming Mou 	mlx5_flow_ipool_destroy(sh);
20552eb4d010SOphir Munk 	mlx5_os_dev_shared_handler_uninstall(sh);
20565dfa003dSMichael Baum 	mlx5_rxtx_uars_release(sh);
2057a89f6433SRongwei Liu 	do {
2058a89f6433SRongwei Liu 		if (sh->tis[i])
2059a89f6433SRongwei Liu 			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
20600484c1d6SDariusz Sosnowski 	} while (++i <= sh->bond.n_port);
2061ae18a1aeSOri Kam 	if (sh->td)
2062ae18a1aeSOri Kam 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
2063f15f0c38SShiri Kuzin 	MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
2064d133f4cdSViacheslav Ovsiienko 	pthread_mutex_destroy(&sh->txpp.mutex);
206525025da3SSpike Du 	mlx5_lwm_unset(sh);
206692d3a05eSMichael Baum 	mlx5_physical_device_destroy(sh->phdev);
20672175c4dcSSuanming Mou 	mlx5_free(sh);
2068f4a08731SMichael Baum 	return;
206917e19bc4SViacheslav Ovsiienko exit:
207091389890SOphir Munk 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
207117e19bc4SViacheslav Ovsiienko }
207217e19bc4SViacheslav Ovsiienko 
2073771fa900SAdrien Mazarguil /**
2074afd7a625SXueming Li  * Destroy table hash list.
207554534725SMatan Azrad  *
207654534725SMatan Azrad  * @param[in] priv
207754534725SMatan Azrad  *   Pointer to the private device data structure.
207854534725SMatan Azrad  */
20792eb4d010SOphir Munk void
208054534725SMatan Azrad mlx5_free_table_hash_list(struct mlx5_priv *priv)
208154534725SMatan Azrad {
20826e88bc42SOphir Munk 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2083d1559d66SSuanming Mou 	struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ?
2084d1559d66SSuanming Mou 				   &sh->groups : &sh->flow_tbls;
2085d1559d66SSuanming Mou 	if (*tbls == NULL)
208654534725SMatan Azrad 		return;
2087d1559d66SSuanming Mou 	mlx5_hlist_destroy(*tbls);
2088d1559d66SSuanming Mou 	*tbls = NULL;
208954534725SMatan Azrad }
209054534725SMatan Azrad 
209122681deeSAlex Vesker #ifdef HAVE_MLX5_HWS_SUPPORT
2092d1559d66SSuanming Mou /**
2093d1559d66SSuanming Mou  * Allocate HW steering group hash list.
2094d1559d66SSuanming Mou  *
2095d1559d66SSuanming Mou  * @param[in] priv
2096d1559d66SSuanming Mou  *   Pointer to the private device data structure.
2097d1559d66SSuanming Mou  */
2098d1559d66SSuanming Mou static int
2099d1559d66SSuanming Mou mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv)
2100d1559d66SSuanming Mou {
2101d1559d66SSuanming Mou 	int err = 0;
2102d1559d66SSuanming Mou 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2103d1559d66SSuanming Mou 	char s[MLX5_NAME_SIZE];
2104d1559d66SSuanming Mou 
2105d1559d66SSuanming Mou 	MLX5_ASSERT(sh);
2106d1559d66SSuanming Mou 	snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name);
2107d1559d66SSuanming Mou 	sh->groups = mlx5_hlist_create
2108d1559d66SSuanming Mou 			(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
2109d1559d66SSuanming Mou 			 false, true, sh,
2110d1559d66SSuanming Mou 			 flow_hw_grp_create_cb,
2111d1559d66SSuanming Mou 			 flow_hw_grp_match_cb,
2112d1559d66SSuanming Mou 			 flow_hw_grp_remove_cb,
2113d1559d66SSuanming Mou 			 flow_hw_grp_clone_cb,
2114d1559d66SSuanming Mou 			 flow_hw_grp_clone_free_cb);
2115d1559d66SSuanming Mou 	if (!sh->groups) {
2116d1559d66SSuanming Mou 		DRV_LOG(ERR, "flow groups with hash creation failed.");
2117d1559d66SSuanming Mou 		err = ENOMEM;
2118d1559d66SSuanming Mou 	}
2119d1559d66SSuanming Mou 	return err;
2120d1559d66SSuanming Mou }
2121d1559d66SSuanming Mou #endif
2122d1559d66SSuanming Mou 
2123d1559d66SSuanming Mou 
212454534725SMatan Azrad /**
212554534725SMatan Azrad  * Initialize flow table hash list and create the root tables entry
212654534725SMatan Azrad  * for each domain.
212754534725SMatan Azrad  *
212854534725SMatan Azrad  * @param[in] priv
212954534725SMatan Azrad  *   Pointer to the private device data structure.
213054534725SMatan Azrad  *
213154534725SMatan Azrad  * @return
213254534725SMatan Azrad  *   Zero on success, positive error code otherwise.
213354534725SMatan Azrad  */
21342eb4d010SOphir Munk int
2135afd7a625SXueming Li mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
213654534725SMatan Azrad {
2137afd7a625SXueming Li 	int err = 0;
2138d1559d66SSuanming Mou 
2139afd7a625SXueming Li 	/* Tables are only used in DV and DR modes. */
2140d9bad050SSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
21416e88bc42SOphir Munk 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2142961b6774SMatan Azrad 	char s[MLX5_NAME_SIZE];
214354534725SMatan Azrad 
2144d9bad050SSuanming Mou #ifdef HAVE_MLX5_HWS_SUPPORT
2145d1559d66SSuanming Mou 	if (priv->sh->config.dv_flow_en == 2)
2146d1559d66SSuanming Mou 		return mlx5_alloc_hw_group_hash_list(priv);
2147d9bad050SSuanming Mou #endif
21488e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(sh);
214954534725SMatan Azrad 	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
2150e69a5922SXueming Li 	sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
2151961b6774SMatan Azrad 					  false, true, sh,
2152961b6774SMatan Azrad 					  flow_dv_tbl_create_cb,
2153f5b0aed2SSuanming Mou 					  flow_dv_tbl_match_cb,
2154961b6774SMatan Azrad 					  flow_dv_tbl_remove_cb,
2155961b6774SMatan Azrad 					  flow_dv_tbl_clone_cb,
2156961b6774SMatan Azrad 					  flow_dv_tbl_clone_free_cb);
215754534725SMatan Azrad 	if (!sh->flow_tbls) {
215863783b01SDavid Marchand 		DRV_LOG(ERR, "flow tables with hash creation failed.");
215954534725SMatan Azrad 		err = ENOMEM;
216054534725SMatan Azrad 		return err;
216154534725SMatan Azrad 	}
216254534725SMatan Azrad #ifndef HAVE_MLX5DV_DR
2163afd7a625SXueming Li 	struct rte_flow_error error;
2164afd7a625SXueming Li 	struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id];
2165afd7a625SXueming Li 
216654534725SMatan Azrad 	/*
216754534725SMatan Azrad 	 * In case we have not DR support, the zero tables should be created
216854534725SMatan Azrad 	 * because DV expect to see them even if they cannot be created by
216954534725SMatan Azrad 	 * RDMA-CORE.
217054534725SMatan Azrad 	 */
21712d2cef5dSLi Zhang 	if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0,
21722d2cef5dSLi Zhang 		NULL, 0, 1, 0, &error) ||
21732d2cef5dSLi Zhang 	    !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0,
21742d2cef5dSLi Zhang 		NULL, 0, 1, 0, &error) ||
21752d2cef5dSLi Zhang 	    !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0,
21762d2cef5dSLi Zhang 		NULL, 0, 1, 0, &error)) {
217754534725SMatan Azrad 		err = ENOMEM;
217854534725SMatan Azrad 		goto error;
217954534725SMatan Azrad 	}
218054534725SMatan Azrad 	return err;
218154534725SMatan Azrad error:
218254534725SMatan Azrad 	mlx5_free_table_hash_list(priv);
218354534725SMatan Azrad #endif /* HAVE_MLX5DV_DR */
2184afd7a625SXueming Li #endif
218554534725SMatan Azrad 	return err;
218654534725SMatan Azrad }
218754534725SMatan Azrad 
218854534725SMatan Azrad /**
21894d803a72SOlga Shern  * Retrieve integer value from environment variable.
21904d803a72SOlga Shern  *
21914d803a72SOlga Shern  * @param[in] name
21924d803a72SOlga Shern  *   Environment variable name.
21934d803a72SOlga Shern  *
21944d803a72SOlga Shern  * @return
21954d803a72SOlga Shern  *   Integer value, 0 if the variable is not set.
21964d803a72SOlga Shern  */
21974d803a72SOlga Shern int
21984d803a72SOlga Shern mlx5_getenv_int(const char *name)
21994d803a72SOlga Shern {
22004d803a72SOlga Shern 	const char *val = getenv(name);
22014d803a72SOlga Shern 
22024d803a72SOlga Shern 	if (val == NULL)
22034d803a72SOlga Shern 		return 0;
22044d803a72SOlga Shern 	return atoi(val);
22054d803a72SOlga Shern }
22064d803a72SOlga Shern 
22074d803a72SOlga Shern /**
2208c9ba7523SRaslan Darawsheh  * DPDK callback to add udp tunnel port
2209c9ba7523SRaslan Darawsheh  *
2210c9ba7523SRaslan Darawsheh  * @param[in] dev
2211c9ba7523SRaslan Darawsheh  *   A pointer to eth_dev
2212c9ba7523SRaslan Darawsheh  * @param[in] udp_tunnel
2213c9ba7523SRaslan Darawsheh  *   A pointer to udp tunnel
2214c9ba7523SRaslan Darawsheh  *
2215c9ba7523SRaslan Darawsheh  * @return
2216c9ba7523SRaslan Darawsheh  *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
2217c9ba7523SRaslan Darawsheh  */
2218c9ba7523SRaslan Darawsheh int
2219c9ba7523SRaslan Darawsheh mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
2220c9ba7523SRaslan Darawsheh 			 struct rte_eth_udp_tunnel *udp_tunnel)
2221c9ba7523SRaslan Darawsheh {
22228e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(udp_tunnel != NULL);
2223295968d1SFerruh Yigit 	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
2224c9ba7523SRaslan Darawsheh 	    udp_tunnel->udp_port == 4789)
2225c9ba7523SRaslan Darawsheh 		return 0;
2226295968d1SFerruh Yigit 	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
2227c9ba7523SRaslan Darawsheh 	    udp_tunnel->udp_port == 4790)
2228c9ba7523SRaslan Darawsheh 		return 0;
2229c9ba7523SRaslan Darawsheh 	return -ENOTSUP;
2230c9ba7523SRaslan Darawsheh }
2231c9ba7523SRaslan Darawsheh 
2232c9ba7523SRaslan Darawsheh /**
2233120dc4a7SYongseok Koh  * Initialize process private data structure.
2234120dc4a7SYongseok Koh  *
2235120dc4a7SYongseok Koh  * @param dev
2236120dc4a7SYongseok Koh  *   Pointer to Ethernet device structure.
2237120dc4a7SYongseok Koh  *
2238120dc4a7SYongseok Koh  * @return
2239120dc4a7SYongseok Koh  *   0 on success, a negative errno value otherwise and rte_errno is set.
2240120dc4a7SYongseok Koh  */
2241120dc4a7SYongseok Koh int
2242120dc4a7SYongseok Koh mlx5_proc_priv_init(struct rte_eth_dev *dev)
2243120dc4a7SYongseok Koh {
2244120dc4a7SYongseok Koh 	struct mlx5_priv *priv = dev->data->dev_private;
224527918f0dSTim Martin 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2246120dc4a7SYongseok Koh 	struct mlx5_proc_priv *ppriv;
2247120dc4a7SYongseok Koh 	size_t ppriv_size;
2248120dc4a7SYongseok Koh 
22496dad8b3aSYunjian Wang 	mlx5_proc_priv_uninit(dev);
2250120dc4a7SYongseok Koh 	/*
2251120dc4a7SYongseok Koh 	 * UAR register table follows the process private structure. BlueFlame
2252120dc4a7SYongseok Koh 	 * registers for Tx queues are stored in the table.
2253120dc4a7SYongseok Koh 	 */
22545dfa003dSMichael Baum 	ppriv_size = sizeof(struct mlx5_proc_priv) +
22555dfa003dSMichael Baum 		     priv->txqs_n * sizeof(struct mlx5_uar_data);
225684a22cbcSSuanming Mou 	ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
225784a22cbcSSuanming Mou 			    RTE_CACHE_LINE_SIZE, dev->device->numa_node);
2258120dc4a7SYongseok Koh 	if (!ppriv) {
2259120dc4a7SYongseok Koh 		rte_errno = ENOMEM;
2260120dc4a7SYongseok Koh 		return -rte_errno;
2261120dc4a7SYongseok Koh 	}
226284a22cbcSSuanming Mou 	ppriv->uar_table_sz = priv->txqs_n;
2263120dc4a7SYongseok Koh 	dev->process_private = ppriv;
2264b6e9c33cSMichael Baum 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2265b6e9c33cSMichael Baum 		priv->sh->pppriv = ppriv;
226627918f0dSTim Martin 	/* Check and try to map HCA PCI BAR to allow reading real time. */
226727918f0dSTim Martin 	if (sh->dev_cap.rt_timestamp && mlx5_dev_is_pci(dev->device))
226827918f0dSTim Martin 		mlx5_txpp_map_hca_bar(dev);
2269120dc4a7SYongseok Koh 	return 0;
2270120dc4a7SYongseok Koh }
2271120dc4a7SYongseok Koh 
2272120dc4a7SYongseok Koh /**
2273120dc4a7SYongseok Koh  * Un-initialize process private data structure.
2274120dc4a7SYongseok Koh  *
2275120dc4a7SYongseok Koh  * @param dev
2276120dc4a7SYongseok Koh  *   Pointer to Ethernet device structure.
2277120dc4a7SYongseok Koh  */
22782b36c30bSSuanming Mou void
2279120dc4a7SYongseok Koh mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
2280120dc4a7SYongseok Koh {
22819b31fc90SViacheslav Ovsiienko 	struct mlx5_proc_priv *ppriv = dev->process_private;
22829b31fc90SViacheslav Ovsiienko 
22839b31fc90SViacheslav Ovsiienko 	if (!ppriv)
2284120dc4a7SYongseok Koh 		return;
22859b31fc90SViacheslav Ovsiienko 	if (ppriv->hca_bar)
22869b31fc90SViacheslav Ovsiienko 		mlx5_txpp_unmap_hca_bar(dev);
22872175c4dcSSuanming Mou 	mlx5_free(dev->process_private);
2288120dc4a7SYongseok Koh 	dev->process_private = NULL;
2289120dc4a7SYongseok Koh }
2290120dc4a7SYongseok Koh 
2291120dc4a7SYongseok Koh /**
2292771fa900SAdrien Mazarguil  * DPDK callback to close the device.
2293771fa900SAdrien Mazarguil  *
2294771fa900SAdrien Mazarguil  * Destroy all queues and objects, free memory.
2295771fa900SAdrien Mazarguil  *
2296771fa900SAdrien Mazarguil  * @param dev
2297771fa900SAdrien Mazarguil  *   Pointer to Ethernet device structure.
2298771fa900SAdrien Mazarguil  */
2299b142387bSThomas Monjalon int
2300771fa900SAdrien Mazarguil mlx5_dev_close(struct rte_eth_dev *dev)
2301771fa900SAdrien Mazarguil {
2302dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
230351a04badSMichael Baum 	struct mlx5_dev_ctx_shared *sh;
23042e22920bSAdrien Mazarguil 	unsigned int i;
23056af6b973SNélio Laranjeiro 	int ret;
2306771fa900SAdrien Mazarguil 
23072786b7bfSSuanming Mou 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
230851a04badSMichael Baum 		if (!priv)
230951a04badSMichael Baum 			DRV_LOG(WARNING, "primary process is already closed");
23102786b7bfSSuanming Mou 		/* Check if process_private released. */
23112786b7bfSSuanming Mou 		if (!dev->process_private)
2312b142387bSThomas Monjalon 			return 0;
23132786b7bfSSuanming Mou 		mlx5_tx_uar_uninit_secondary(dev);
23142786b7bfSSuanming Mou 		mlx5_proc_priv_uninit(dev);
23152786b7bfSSuanming Mou 		rte_eth_dev_release_port(dev);
2316b142387bSThomas Monjalon 		return 0;
23172786b7bfSSuanming Mou 	}
231851a04badSMichael Baum 	sh = priv->sh;
2319f5177bdcSMichael Baum 	if (!sh)
2320b142387bSThomas Monjalon 		return 0;
232130ff1d25SViacheslav Ovsiienko 	if (priv->shared_refcnt) {
232230ff1d25SViacheslav Ovsiienko 		DRV_LOG(ERR, "port %u is shared host in use (%u)",
232330ff1d25SViacheslav Ovsiienko 			dev->data->port_id, priv->shared_refcnt);
232430ff1d25SViacheslav Ovsiienko 		rte_errno = EBUSY;
232530ff1d25SViacheslav Ovsiienko 		return -EBUSY;
232630ff1d25SViacheslav Ovsiienko 	}
2327f5177bdcSMichael Baum #ifdef HAVE_MLX5_HWS_SUPPORT
2328f5177bdcSMichael Baum 	/* Check if shared GENEVE options created on context being closed. */
2329f5177bdcSMichael Baum 	ret = mlx5_geneve_tlv_options_check_busy(priv);
2330f5177bdcSMichael Baum 	if (ret) {
2331f5177bdcSMichael Baum 		DRV_LOG(ERR, "port %u maintains shared GENEVE TLV options",
2332f5177bdcSMichael Baum 			dev->data->port_id);
2333f5177bdcSMichael Baum 		return ret;
2334f5177bdcSMichael Baum 	}
2335f5177bdcSMichael Baum #endif
2336a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u closing device \"%s\"",
233751a04badSMichael Baum 		dev->data->port_id, sh->ibdev_name);
23388db7e3b6SBing Zhao 	/*
23398db7e3b6SBing Zhao 	 * If default mreg copy action is removed at the stop stage,
23408db7e3b6SBing Zhao 	 * the search will return none and nothing will be done anymore.
23418db7e3b6SBing Zhao 	 */
23428db7e3b6SBing Zhao 	mlx5_flow_stop_default(dev);
2343af4f09f2SNélio Laranjeiro 	mlx5_traffic_disable(dev);
23448db7e3b6SBing Zhao 	/*
23458db7e3b6SBing Zhao 	 * If all the flows are already flushed in the device stop stage,
23468db7e3b6SBing Zhao 	 * then this will return directly without any action.
23478db7e3b6SBing Zhao 	 */
2348b4edeaf3SSuanming Mou 	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
23494b61b877SBing Zhao 	mlx5_action_handle_flush(dev);
235002e76468SSuanming Mou 	mlx5_flow_meter_flush(dev, NULL);
23512e22920bSAdrien Mazarguil 	/* Prevent crashes when queues are still in use. */
2352a41f593fSFerruh Yigit 	dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
2353a41f593fSFerruh Yigit 	dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
23542aac5b5dSYongseok Koh 	rte_wmb();
23552aac5b5dSYongseok Koh 	/* Disable datapath on secondary process. */
23562e86c4e5SOphir Munk 	mlx5_mp_os_req_stop_rxtx(dev);
23571c506404SBing Zhao 	/* Free the eCPRI flex parser resource. */
23581c506404SBing Zhao 	mlx5_flex_parser_ecpri_release(dev);
2359db25cadcSViacheslav Ovsiienko 	mlx5_flex_item_port_cleanup(dev);
23603564e928SGregory Etelson 	mlx5_indirect_list_handles_release(dev);
236122681deeSAlex Vesker #ifdef HAVE_MLX5_HWS_SUPPORT
23621939eb6fSDariusz Sosnowski 	flow_hw_destroy_vport_action(dev);
2363d068681bSMaayan Kashani 	/* dr context will be closed after mlx5_os_free_shared_dr. */
2364b401400dSSuanming Mou 	flow_hw_resource_release(dev);
23655bd0e3e6SDariusz Sosnowski 	flow_hw_clear_port_info(dev);
2366f5177bdcSMichael Baum 	if (priv->tlv_options != NULL) {
2367f5177bdcSMichael Baum 		/* Free the GENEVE TLV parser resource. */
2368f5177bdcSMichael Baum 		claim_zero(mlx5_geneve_tlv_options_destroy(priv->tlv_options, sh->phdev));
2369f5177bdcSMichael Baum 		priv->tlv_options = NULL;
2370f5177bdcSMichael Baum 	}
2371ae67e3c4SGregory Etelson 	if (priv->ptype_rss_groups) {
2372ae67e3c4SGregory Etelson 		mlx5_ipool_destroy(priv->ptype_rss_groups);
2373ae67e3c4SGregory Etelson 		priv->ptype_rss_groups = NULL;
2374ae67e3c4SGregory Etelson 	}
2375b401400dSSuanming Mou #endif
23765cf0707fSXueming Li 	if (priv->rxq_privs != NULL) {
23772e22920bSAdrien Mazarguil 		/* XXX race condition if mlx5_rx_burst() is still running. */
237820698c9fSOphir Munk 		rte_delay_us_sleep(1000);
2379a1366b1aSNélio Laranjeiro 		for (i = 0; (i != priv->rxqs_n); ++i)
2380af4f09f2SNélio Laranjeiro 			mlx5_rxq_release(dev, i);
23812e22920bSAdrien Mazarguil 		priv->rxqs_n = 0;
23824cda06c3SXueming Li 		mlx5_free(priv->rxq_privs);
23834cda06c3SXueming Li 		priv->rxq_privs = NULL;
23844cda06c3SXueming Li 	}
2385b805b7c4SPengfei Sun 	if (priv->txqs != NULL && dev->data->tx_queues != NULL) {
23862e22920bSAdrien Mazarguil 		/* XXX race condition if mlx5_tx_burst() is still running. */
238720698c9fSOphir Munk 		rte_delay_us_sleep(1000);
23886e78005aSNélio Laranjeiro 		for (i = 0; (i != priv->txqs_n); ++i)
2389af4f09f2SNélio Laranjeiro 			mlx5_txq_release(dev, i);
23902e22920bSAdrien Mazarguil 		priv->txqs_n = 0;
23912e22920bSAdrien Mazarguil 		priv->txqs = NULL;
23922e22920bSAdrien Mazarguil 	}
2393120dc4a7SYongseok Koh 	mlx5_proc_priv_uninit(dev);
239422a3761bSBing Zhao 	if (priv->drop_queue.hrxq)
239522a3761bSBing Zhao 		mlx5_drop_action_destroy(dev);
2396e6988afdSMatan Azrad 	if (priv->q_counters) {
2397e6988afdSMatan Azrad 		mlx5_devx_cmd_destroy(priv->q_counters);
2398e6988afdSMatan Azrad 		priv->q_counters = NULL;
2399e6988afdSMatan Azrad 	}
2400cd00dce6SShani Peretz 	if (priv->q_counters_hairpin) {
2401cd00dce6SShani Peretz 		mlx5_devx_cmd_destroy(priv->q_counters_hairpin);
2402cd00dce6SShani Peretz 		priv->q_counters_hairpin = NULL;
2403cd00dce6SShani Peretz 	}
24047d6bf6b8SYongseok Koh 	mlx5_mprq_free_mp(dev);
24052eb4d010SOphir Munk 	mlx5_os_free_shared_dr(priv);
2406d068681bSMaayan Kashani #ifdef HAVE_MLX5_HWS_SUPPORT
2407d068681bSMaayan Kashani 	if (priv->dr_ctx) {
2408d068681bSMaayan Kashani 		claim_zero(mlx5dr_context_close(priv->dr_ctx));
2409d068681bSMaayan Kashani 		priv->dr_ctx = NULL;
2410d068681bSMaayan Kashani 	}
2411d068681bSMaayan Kashani #endif
241229c1d8bbSNélio Laranjeiro 	if (priv->rss_conf.rss_key != NULL)
241383c2047cSSuanming Mou 		mlx5_free(priv->rss_conf.rss_key);
2414634efbc2SNelio Laranjeiro 	if (priv->reta_idx != NULL)
241583c2047cSSuanming Mou 		mlx5_free(priv->reta_idx);
241651a04badSMichael Baum 	if (sh->dev_cap.vf)
2417f00f6562SOphir Munk 		mlx5_os_mac_addr_flush(dev);
241826c08b97SAdrien Mazarguil 	if (priv->nl_socket_route >= 0)
241926c08b97SAdrien Mazarguil 		close(priv->nl_socket_route);
242026c08b97SAdrien Mazarguil 	if (priv->nl_socket_rdma >= 0)
242126c08b97SAdrien Mazarguil 		close(priv->nl_socket_rdma);
2422dfedf3e3SViacheslav Ovsiienko 	if (priv->vmwa_context)
2423dfedf3e3SViacheslav Ovsiienko 		mlx5_vlan_vmwa_exit(priv->vmwa_context);
242423820a79SDekel Peled 	ret = mlx5_hrxq_verify(dev);
2425f5479b68SNélio Laranjeiro 	if (ret)
2426a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
24270f99970bSNélio Laranjeiro 			dev->data->port_id);
242815c80a12SDekel Peled 	ret = mlx5_ind_table_obj_verify(dev);
24294c7a0f5fSNélio Laranjeiro 	if (ret)
2430a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING, "port %u some indirection table still remain",
24310f99970bSNélio Laranjeiro 			dev->data->port_id);
243293403560SDekel Peled 	ret = mlx5_rxq_obj_verify(dev);
243309cb5b58SNélio Laranjeiro 	if (ret)
243493403560SDekel Peled 		DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
24350f99970bSNélio Laranjeiro 			dev->data->port_id);
2436311b17e6SMichael Baum 	ret = mlx5_ext_rxq_verify(dev);
2437311b17e6SMichael Baum 	if (ret)
2438311b17e6SMichael Baum 		DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
2439311b17e6SMichael Baum 			dev->data->port_id);
2440af4f09f2SNélio Laranjeiro 	ret = mlx5_rxq_verify(dev);
2441a1366b1aSNélio Laranjeiro 	if (ret)
2442a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING, "port %u some Rx queues still remain",
24430f99970bSNélio Laranjeiro 			dev->data->port_id);
2444894c4a8eSOri Kam 	ret = mlx5_txq_obj_verify(dev);
2445faf2667fSNélio Laranjeiro 	if (ret)
2446a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
24470f99970bSNélio Laranjeiro 			dev->data->port_id);
24481944fbc3SSuanming Mou 	ret = mlx5_ext_txq_verify(dev);
24491944fbc3SSuanming Mou 	if (ret)
24501944fbc3SSuanming Mou 		DRV_LOG(WARNING, "Port %u some external TxQ still remain.",
24511944fbc3SSuanming Mou 			dev->data->port_id);
2452af4f09f2SNélio Laranjeiro 	ret = mlx5_txq_verify(dev);
24536e78005aSNélio Laranjeiro 	if (ret)
2454a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING, "port %u some Tx queues still remain",
24550f99970bSNélio Laranjeiro 			dev->data->port_id);
2456af4f09f2SNélio Laranjeiro 	ret = mlx5_flow_verify(dev);
24576af6b973SNélio Laranjeiro 	if (ret)
2458a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING, "port %u some flows still remain",
2459a170a30dSNélio Laranjeiro 			dev->data->port_id);
2460679f46c7SMatan Azrad 	if (priv->hrxqs)
2461679f46c7SMatan Azrad 		mlx5_list_destroy(priv->hrxqs);
246280f872eeSMichael Baum 	mlx5_free(priv->ext_rxqs);
24631944fbc3SSuanming Mou 	mlx5_free(priv->ext_txqs);
246451a04badSMichael Baum 	sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
2465ef4ece4dSMichael Baum 	/*
2466ef4ece4dSMichael Baum 	 * The interrupt handler port id must be reset before priv is reset
2467ef4ece4dSMichael Baum 	 * since 'mlx5_dev_interrupt_nl_cb' uses priv.
2468ef4ece4dSMichael Baum 	 */
2469ef4ece4dSMichael Baum 	rte_io_wmb();
2470772dc0ebSSuanming Mou 	/*
2471772dc0ebSSuanming Mou 	 * Free the shared context in last turn, because the cleanup
2472772dc0ebSSuanming Mou 	 * routines above may use some shared fields, like
24737be78d02SJosh Soref 	 * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
2474772dc0ebSSuanming Mou 	 * ifindex if Netlink fails.
2475772dc0ebSSuanming Mou 	 */
247651a04badSMichael Baum 	mlx5_free_shared_dev_ctx(sh);
24772b730263SAdrien Mazarguil 	if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
24782b730263SAdrien Mazarguil 		unsigned int c = 0;
2479d874a4eeSThomas Monjalon 		uint16_t port_id;
24802b730263SAdrien Mazarguil 
248156bb3c84SXueming Li 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
2482dbeba4cfSThomas Monjalon 			struct mlx5_priv *opriv =
2483d874a4eeSThomas Monjalon 				rte_eth_devices[port_id].data->dev_private;
24842b730263SAdrien Mazarguil 
24852b730263SAdrien Mazarguil 			if (!opriv ||
24862b730263SAdrien Mazarguil 			    opriv->domain_id != priv->domain_id ||
2487d874a4eeSThomas Monjalon 			    &rte_eth_devices[port_id] == dev)
24882b730263SAdrien Mazarguil 				continue;
24892b730263SAdrien Mazarguil 			++c;
2490f7e95215SViacheslav Ovsiienko 			break;
24912b730263SAdrien Mazarguil 		}
24922b730263SAdrien Mazarguil 		if (!c)
24932b730263SAdrien Mazarguil 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
24942b730263SAdrien Mazarguil 	}
2495771fa900SAdrien Mazarguil 	memset(priv, 0, sizeof(*priv));
24962b730263SAdrien Mazarguil 	priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
249742603bbdSOphir Munk 	/*
249842603bbdSOphir Munk 	 * Reset mac_addrs to NULL such that it is not freed as part of
249942603bbdSOphir Munk 	 * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
250042603bbdSOphir Munk 	 * it is freed when dev_private is freed.
250142603bbdSOphir Munk 	 */
250242603bbdSOphir Munk 	dev->data->mac_addrs = NULL;
2503b142387bSThomas Monjalon 	return 0;
2504771fa900SAdrien Mazarguil }
2505771fa900SAdrien Mazarguil 
2506b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_ops = {
2507b012b4ceSOphir Munk 	.dev_configure = mlx5_dev_configure,
2508b012b4ceSOphir Munk 	.dev_start = mlx5_dev_start,
2509b012b4ceSOphir Munk 	.dev_stop = mlx5_dev_stop,
2510b012b4ceSOphir Munk 	.dev_set_link_down = mlx5_set_link_down,
2511b012b4ceSOphir Munk 	.dev_set_link_up = mlx5_set_link_up,
2512b012b4ceSOphir Munk 	.dev_close = mlx5_dev_close,
2513b012b4ceSOphir Munk 	.promiscuous_enable = mlx5_promiscuous_enable,
2514b012b4ceSOphir Munk 	.promiscuous_disable = mlx5_promiscuous_disable,
2515b012b4ceSOphir Munk 	.allmulticast_enable = mlx5_allmulticast_enable,
2516b012b4ceSOphir Munk 	.allmulticast_disable = mlx5_allmulticast_disable,
2517b012b4ceSOphir Munk 	.link_update = mlx5_link_update,
2518b012b4ceSOphir Munk 	.stats_get = mlx5_stats_get,
2519b012b4ceSOphir Munk 	.stats_reset = mlx5_stats_reset,
2520b012b4ceSOphir Munk 	.xstats_get = mlx5_xstats_get,
2521b012b4ceSOphir Munk 	.xstats_reset = mlx5_xstats_reset,
2522b012b4ceSOphir Munk 	.xstats_get_names = mlx5_xstats_get_names,
2523b012b4ceSOphir Munk 	.fw_version_get = mlx5_fw_version_get,
2524b012b4ceSOphir Munk 	.dev_infos_get = mlx5_dev_infos_get,
2525cb95feefSXueming Li 	.representor_info_get = mlx5_representor_info_get,
2526b012b4ceSOphir Munk 	.read_clock = mlx5_txpp_read_clock,
2527b012b4ceSOphir Munk 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2528b012b4ceSOphir Munk 	.vlan_filter_set = mlx5_vlan_filter_set,
2529b012b4ceSOphir Munk 	.rx_queue_setup = mlx5_rx_queue_setup,
25305c9f3294SSpike Du 	.rx_queue_avail_thresh_set = mlx5_rx_queue_lwm_set,
25315c9f3294SSpike Du 	.rx_queue_avail_thresh_query = mlx5_rx_queue_lwm_query,
2532b012b4ceSOphir Munk 	.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2533b012b4ceSOphir Munk 	.tx_queue_setup = mlx5_tx_queue_setup,
2534b012b4ceSOphir Munk 	.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2535b012b4ceSOphir Munk 	.rx_queue_release = mlx5_rx_queue_release,
2536b012b4ceSOphir Munk 	.tx_queue_release = mlx5_tx_queue_release,
2537b012b4ceSOphir Munk 	.rx_queue_start = mlx5_rx_queue_start,
2538b012b4ceSOphir Munk 	.rx_queue_stop = mlx5_rx_queue_stop,
2539b012b4ceSOphir Munk 	.tx_queue_start = mlx5_tx_queue_start,
2540b012b4ceSOphir Munk 	.tx_queue_stop = mlx5_tx_queue_stop,
2541b012b4ceSOphir Munk 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2542b012b4ceSOphir Munk 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2543b012b4ceSOphir Munk 	.mac_addr_remove = mlx5_mac_addr_remove,
2544b012b4ceSOphir Munk 	.mac_addr_add = mlx5_mac_addr_add,
2545b012b4ceSOphir Munk 	.mac_addr_set = mlx5_mac_addr_set,
2546b012b4ceSOphir Munk 	.set_mc_addr_list = mlx5_set_mc_addr_list,
2547b012b4ceSOphir Munk 	.mtu_set = mlx5_dev_set_mtu,
2548b012b4ceSOphir Munk 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2549b012b4ceSOphir Munk 	.vlan_offload_set = mlx5_vlan_offload_set,
2550b012b4ceSOphir Munk 	.reta_update = mlx5_dev_rss_reta_update,
2551b012b4ceSOphir Munk 	.reta_query = mlx5_dev_rss_reta_query,
2552b012b4ceSOphir Munk 	.rss_hash_update = mlx5_rss_hash_update,
2553b012b4ceSOphir Munk 	.rss_hash_conf_get = mlx5_rss_hash_conf_get,
2554fb7ad441SThomas Monjalon 	.flow_ops_get = mlx5_flow_ops_get,
2555b012b4ceSOphir Munk 	.rxq_info_get = mlx5_rxq_info_get,
2556b012b4ceSOphir Munk 	.txq_info_get = mlx5_txq_info_get,
2557b012b4ceSOphir Munk 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2558b012b4ceSOphir Munk 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2559b012b4ceSOphir Munk 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
2560b012b4ceSOphir Munk 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
2561b012b4ceSOphir Munk 	.is_removed = mlx5_is_removed,
2562b012b4ceSOphir Munk 	.udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
2563b012b4ceSOphir Munk 	.get_module_info = mlx5_get_module_info,
2564b012b4ceSOphir Munk 	.get_module_eeprom = mlx5_get_module_eeprom,
2565b012b4ceSOphir Munk 	.hairpin_cap_get = mlx5_hairpin_cap_get,
2566b012b4ceSOphir Munk 	.mtr_ops_get = mlx5_flow_meter_ops_get,
2567b012b4ceSOphir Munk 	.hairpin_bind = mlx5_hairpin_bind,
2568b012b4ceSOphir Munk 	.hairpin_unbind = mlx5_hairpin_unbind,
2569b012b4ceSOphir Munk 	.hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
2570b012b4ceSOphir Munk 	.hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
2571b012b4ceSOphir Munk 	.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
2572b012b4ceSOphir Munk 	.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
2573a8f0df6bSAlexander Kozyrev 	.get_monitor_addr = mlx5_get_monitor_addr,
2574ce306af6SJiawei Wang 	.count_aggr_ports = mlx5_count_aggr_ports,
2575ce306af6SJiawei Wang 	.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,
2576fca8cba4SDavid Marchand 	.rx_metadata_negotiate = mlx5_flow_rx_metadata_negotiate,
25776a3446cfSDariusz Sosnowski 	.get_restore_flags = mlx5_get_restore_flags,
2578b012b4ceSOphir Munk };
2579b012b4ceSOphir Munk 
2580b012b4ceSOphir Munk /* Available operations from secondary process. */
2581b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_sec_ops = {
2582b012b4ceSOphir Munk 	.stats_get = mlx5_stats_get,
2583b012b4ceSOphir Munk 	.stats_reset = mlx5_stats_reset,
2584b012b4ceSOphir Munk 	.xstats_get = mlx5_xstats_get,
2585b012b4ceSOphir Munk 	.xstats_reset = mlx5_xstats_reset,
2586b012b4ceSOphir Munk 	.xstats_get_names = mlx5_xstats_get_names,
2587b012b4ceSOphir Munk 	.fw_version_get = mlx5_fw_version_get,
2588b012b4ceSOphir Munk 	.dev_infos_get = mlx5_dev_infos_get,
258992d16c83SXueming Li 	.representor_info_get = mlx5_representor_info_get,
2590b012b4ceSOphir Munk 	.read_clock = mlx5_txpp_read_clock,
2591b012b4ceSOphir Munk 	.rx_queue_start = mlx5_rx_queue_start,
2592b012b4ceSOphir Munk 	.rx_queue_stop = mlx5_rx_queue_stop,
2593b012b4ceSOphir Munk 	.tx_queue_start = mlx5_tx_queue_start,
2594b012b4ceSOphir Munk 	.tx_queue_stop = mlx5_tx_queue_stop,
2595b012b4ceSOphir Munk 	.rxq_info_get = mlx5_rxq_info_get,
2596b012b4ceSOphir Munk 	.txq_info_get = mlx5_txq_info_get,
2597b012b4ceSOphir Munk 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2598b012b4ceSOphir Munk 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2599b012b4ceSOphir Munk 	.get_module_info = mlx5_get_module_info,
2600b012b4ceSOphir Munk 	.get_module_eeprom = mlx5_get_module_eeprom,
2601ce306af6SJiawei Wang 	.count_aggr_ports = mlx5_count_aggr_ports,
2602ce306af6SJiawei Wang 	.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,
2603fca8cba4SDavid Marchand 	.rx_metadata_negotiate = mlx5_flow_rx_metadata_negotiate,
2604b012b4ceSOphir Munk };
2605b012b4ceSOphir Munk 
2606b012b4ceSOphir Munk /* Available operations in flow isolated mode. */
2607b012b4ceSOphir Munk const struct eth_dev_ops mlx5_dev_ops_isolate = {
2608b012b4ceSOphir Munk 	.dev_configure = mlx5_dev_configure,
2609b012b4ceSOphir Munk 	.dev_start = mlx5_dev_start,
2610b012b4ceSOphir Munk 	.dev_stop = mlx5_dev_stop,
2611b012b4ceSOphir Munk 	.dev_set_link_down = mlx5_set_link_down,
2612b012b4ceSOphir Munk 	.dev_set_link_up = mlx5_set_link_up,
2613b012b4ceSOphir Munk 	.dev_close = mlx5_dev_close,
2614b012b4ceSOphir Munk 	.promiscuous_enable = mlx5_promiscuous_enable,
2615b012b4ceSOphir Munk 	.promiscuous_disable = mlx5_promiscuous_disable,
2616b012b4ceSOphir Munk 	.allmulticast_enable = mlx5_allmulticast_enable,
2617b012b4ceSOphir Munk 	.allmulticast_disable = mlx5_allmulticast_disable,
2618b012b4ceSOphir Munk 	.link_update = mlx5_link_update,
2619b012b4ceSOphir Munk 	.stats_get = mlx5_stats_get,
2620b012b4ceSOphir Munk 	.stats_reset = mlx5_stats_reset,
2621b012b4ceSOphir Munk 	.xstats_get = mlx5_xstats_get,
2622b012b4ceSOphir Munk 	.xstats_reset = mlx5_xstats_reset,
2623b012b4ceSOphir Munk 	.xstats_get_names = mlx5_xstats_get_names,
2624b012b4ceSOphir Munk 	.fw_version_get = mlx5_fw_version_get,
2625b012b4ceSOphir Munk 	.dev_infos_get = mlx5_dev_infos_get,
262692d16c83SXueming Li 	.representor_info_get = mlx5_representor_info_get,
2627b012b4ceSOphir Munk 	.read_clock = mlx5_txpp_read_clock,
2628b012b4ceSOphir Munk 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2629b012b4ceSOphir Munk 	.vlan_filter_set = mlx5_vlan_filter_set,
2630b012b4ceSOphir Munk 	.rx_queue_setup = mlx5_rx_queue_setup,
2631b012b4ceSOphir Munk 	.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2632b012b4ceSOphir Munk 	.tx_queue_setup = mlx5_tx_queue_setup,
2633b012b4ceSOphir Munk 	.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2634b012b4ceSOphir Munk 	.rx_queue_release = mlx5_rx_queue_release,
2635b012b4ceSOphir Munk 	.tx_queue_release = mlx5_tx_queue_release,
2636b012b4ceSOphir Munk 	.rx_queue_start = mlx5_rx_queue_start,
2637b012b4ceSOphir Munk 	.rx_queue_stop = mlx5_rx_queue_stop,
2638b012b4ceSOphir Munk 	.tx_queue_start = mlx5_tx_queue_start,
2639b012b4ceSOphir Munk 	.tx_queue_stop = mlx5_tx_queue_stop,
2640b012b4ceSOphir Munk 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2641b012b4ceSOphir Munk 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2642b012b4ceSOphir Munk 	.mac_addr_remove = mlx5_mac_addr_remove,
2643b012b4ceSOphir Munk 	.mac_addr_add = mlx5_mac_addr_add,
2644b012b4ceSOphir Munk 	.mac_addr_set = mlx5_mac_addr_set,
2645b012b4ceSOphir Munk 	.set_mc_addr_list = mlx5_set_mc_addr_list,
2646b012b4ceSOphir Munk 	.mtu_set = mlx5_dev_set_mtu,
2647b012b4ceSOphir Munk 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2648b012b4ceSOphir Munk 	.vlan_offload_set = mlx5_vlan_offload_set,
2649fb7ad441SThomas Monjalon 	.flow_ops_get = mlx5_flow_ops_get,
2650b012b4ceSOphir Munk 	.rxq_info_get = mlx5_rxq_info_get,
2651b012b4ceSOphir Munk 	.txq_info_get = mlx5_txq_info_get,
2652b012b4ceSOphir Munk 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2653b012b4ceSOphir Munk 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2654b012b4ceSOphir Munk 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
2655b012b4ceSOphir Munk 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
2656b012b4ceSOphir Munk 	.is_removed = mlx5_is_removed,
2657b012b4ceSOphir Munk 	.get_module_info = mlx5_get_module_info,
2658b012b4ceSOphir Munk 	.get_module_eeprom = mlx5_get_module_eeprom,
2659b012b4ceSOphir Munk 	.hairpin_cap_get = mlx5_hairpin_cap_get,
2660b012b4ceSOphir Munk 	.mtr_ops_get = mlx5_flow_meter_ops_get,
2661b012b4ceSOphir Munk 	.hairpin_bind = mlx5_hairpin_bind,
2662b012b4ceSOphir Munk 	.hairpin_unbind = mlx5_hairpin_unbind,
2663b012b4ceSOphir Munk 	.hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
2664b012b4ceSOphir Munk 	.hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
2665b012b4ceSOphir Munk 	.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
2666b012b4ceSOphir Munk 	.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
2667a8f0df6bSAlexander Kozyrev 	.get_monitor_addr = mlx5_get_monitor_addr,
2668ce306af6SJiawei Wang 	.count_aggr_ports = mlx5_count_aggr_ports,
2669ce306af6SJiawei Wang 	.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,
26706a3446cfSDariusz Sosnowski 	.get_restore_flags = mlx5_get_restore_flags,
2671b012b4ceSOphir Munk };
2672b012b4ceSOphir Munk 
2673e72dd09bSNélio Laranjeiro /**
2674e72dd09bSNélio Laranjeiro  * Verify and store value for device argument.
2675e72dd09bSNélio Laranjeiro  *
2676e72dd09bSNélio Laranjeiro  * @param[in] key
2677e72dd09bSNélio Laranjeiro  *   Key argument to verify.
2678e72dd09bSNélio Laranjeiro  * @param[in] val
2679e72dd09bSNélio Laranjeiro  *   Value associated with key.
2680e72dd09bSNélio Laranjeiro  * @param opaque
2681e72dd09bSNélio Laranjeiro  *   User data.
2682e72dd09bSNélio Laranjeiro  *
2683e72dd09bSNélio Laranjeiro  * @return
2684a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
2685e72dd09bSNélio Laranjeiro  */
2686e72dd09bSNélio Laranjeiro static int
268745a6df80SMichael Baum mlx5_port_args_check_handler(const char *key, const char *val, void *opaque)
2688e72dd09bSNélio Laranjeiro {
268945a6df80SMichael Baum 	struct mlx5_port_config *config = opaque;
26908f848f32SViacheslav Ovsiienko 	signed long tmp;
2691e72dd09bSNélio Laranjeiro 
26926de569f5SAdrien Mazarguil 	/* No-op, port representors are processed in mlx5_dev_spawn(). */
2693a729d2f0SMichael Baum 	if (!strcmp(MLX5_REPRESENTOR, key))
26946de569f5SAdrien Mazarguil 		return 0;
269599c12dccSNélio Laranjeiro 	errno = 0;
26968f848f32SViacheslav Ovsiienko 	tmp = strtol(val, NULL, 0);
269799c12dccSNélio Laranjeiro 	if (errno) {
2698a6d83b6aSNélio Laranjeiro 		rte_errno = errno;
2699a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
2700a6d83b6aSNélio Laranjeiro 		return -rte_errno;
270199c12dccSNélio Laranjeiro 	}
2702a13ec19cSMichael Baum 	if (tmp < 0) {
27038f848f32SViacheslav Ovsiienko 		/* Negative values are acceptable for some keys only. */
27048f848f32SViacheslav Ovsiienko 		rte_errno = EINVAL;
27058f848f32SViacheslav Ovsiienko 		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
27068f848f32SViacheslav Ovsiienko 		return -rte_errno;
27078f848f32SViacheslav Ovsiienko 	}
270899c12dccSNélio Laranjeiro 	if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
270999532fb1SAlexander Kozyrev 		if ((tmp & ~MLX5_RXQ_ENH_CQE_COMP_MASK) >
271099532fb1SAlexander Kozyrev 		    MLX5_CQE_RESP_FORMAT_L34H_STRIDX) {
271154c2d46bSAlexander Kozyrev 			DRV_LOG(ERR, "invalid CQE compression "
271254c2d46bSAlexander Kozyrev 				     "format parameter");
271354c2d46bSAlexander Kozyrev 			rte_errno = EINVAL;
271454c2d46bSAlexander Kozyrev 			return -rte_errno;
271554c2d46bSAlexander Kozyrev 		}
27167fe24446SShahaf Shuler 		config->cqe_comp = !!tmp;
271799532fb1SAlexander Kozyrev 		config->cqe_comp_fmt = tmp & ~MLX5_RXQ_ENH_CQE_COMP_MASK;
271899532fb1SAlexander Kozyrev 		config->enh_cqe_comp = !!(tmp & MLX5_RXQ_ENH_CQE_COMP_MASK);
271978c7a16dSYongseok Koh 	} else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
272078c7a16dSYongseok Koh 		config->hw_padding = !!tmp;
27217d6bf6b8SYongseok Koh 	} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
27227d6bf6b8SYongseok Koh 		config->mprq.enabled = !!tmp;
27237d6bf6b8SYongseok Koh 	} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
27240947ed38SMichael Baum 		config->mprq.log_stride_num = tmp;
2725ecb16045SAlexander Kozyrev 	} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
27260947ed38SMichael Baum 		config->mprq.log_stride_size = tmp;
27277d6bf6b8SYongseok Koh 	} else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
27287d6bf6b8SYongseok Koh 		config->mprq.max_memcpy_len = tmp;
27297d6bf6b8SYongseok Koh 	} else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
27307d6bf6b8SYongseok Koh 		config->mprq.min_rxqs_num = tmp;
27312a66cf37SYaacov Hazan 	} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
2732505f1fe4SViacheslav Ovsiienko 		DRV_LOG(WARNING, "%s: deprecated parameter,"
2733505f1fe4SViacheslav Ovsiienko 				 " converted to txq_inline_max", key);
2734505f1fe4SViacheslav Ovsiienko 		config->txq_inline_max = tmp;
2735505f1fe4SViacheslav Ovsiienko 	} else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
2736505f1fe4SViacheslav Ovsiienko 		config->txq_inline_max = tmp;
2737505f1fe4SViacheslav Ovsiienko 	} else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
2738505f1fe4SViacheslav Ovsiienko 		config->txq_inline_min = tmp;
2739505f1fe4SViacheslav Ovsiienko 	} else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
2740505f1fe4SViacheslav Ovsiienko 		config->txq_inline_mpw = tmp;
27412a66cf37SYaacov Hazan 	} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
27427fe24446SShahaf Shuler 		config->txqs_inline = tmp;
274309d8b416SYongseok Koh 	} else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
2744a6bd4911SViacheslav Ovsiienko 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
2745230189d9SNélio Laranjeiro 	} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
2746f9de8718SShahaf Shuler 		config->mps = !!tmp;
27476ce84bd8SYongseok Koh 	} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
2748a6bd4911SViacheslav Ovsiienko 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
27496ce84bd8SYongseok Koh 	} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
2750505f1fe4SViacheslav Ovsiienko 		DRV_LOG(WARNING, "%s: deprecated parameter,"
2751505f1fe4SViacheslav Ovsiienko 				 " converted to txq_inline_mpw", key);
2752505f1fe4SViacheslav Ovsiienko 		config->txq_inline_mpw = tmp;
27535644d5b9SNelio Laranjeiro 	} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
2754a6bd4911SViacheslav Ovsiienko 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
27555644d5b9SNelio Laranjeiro 	} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
27567fe24446SShahaf Shuler 		config->rx_vec_en = !!tmp;
2757066cfecdSMatan Azrad 	} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
2758066cfecdSMatan Azrad 		config->max_dump_files_num = tmp;
275921bb6c7eSDekel Peled 	} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
276087af0d1eSMichael Baum 		config->lro_timeout = tmp;
27611ad9a3d0SBing Zhao 	} else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) {
27621ad9a3d0SBing Zhao 		config->log_hp_size = tmp;
2763febcac7bSBing Zhao 	} else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
2764ce78c518SBing Zhao 		config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
2765ce78c518SBing Zhao 		config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
2766e72dd09bSNélio Laranjeiro 	}
276799c12dccSNélio Laranjeiro 	return 0;
276899c12dccSNélio Laranjeiro }
2769e72dd09bSNélio Laranjeiro 
2770e72dd09bSNélio Laranjeiro /**
277145a6df80SMichael Baum  * Parse user port parameters and adjust them according to device capabilities.
2772e72dd09bSNélio Laranjeiro  *
277345a6df80SMichael Baum  * @param priv
277445a6df80SMichael Baum  *   Pointer to shared device context.
2775a729d2f0SMichael Baum  * @param mkvlist
2776a729d2f0SMichael Baum  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
277745a6df80SMichael Baum  * @param config
277845a6df80SMichael Baum  *   Pointer to port configuration structure.
2779e72dd09bSNélio Laranjeiro  *
2780e72dd09bSNélio Laranjeiro  * @return
2781a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
2782e72dd09bSNélio Laranjeiro  */
27832eb4d010SOphir Munk int
2784a729d2f0SMichael Baum mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist,
278545a6df80SMichael Baum 		      struct mlx5_port_config *config)
2786e72dd09bSNélio Laranjeiro {
278745a6df80SMichael Baum 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
278845a6df80SMichael Baum 	struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
278945a6df80SMichael Baum 	bool devx = priv->sh->cdev->config.devx;
2790a729d2f0SMichael Baum 	const char **params = (const char *[]){
2791a729d2f0SMichael Baum 		MLX5_RXQ_CQE_COMP_EN,
2792a729d2f0SMichael Baum 		MLX5_RXQ_PKT_PAD_EN,
2793a729d2f0SMichael Baum 		MLX5_RX_MPRQ_EN,
2794a729d2f0SMichael Baum 		MLX5_RX_MPRQ_LOG_STRIDE_NUM,
2795a729d2f0SMichael Baum 		MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
2796a729d2f0SMichael Baum 		MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
2797a729d2f0SMichael Baum 		MLX5_RXQS_MIN_MPRQ,
2798a729d2f0SMichael Baum 		MLX5_TXQ_INLINE,
2799a729d2f0SMichael Baum 		MLX5_TXQ_INLINE_MIN,
2800a729d2f0SMichael Baum 		MLX5_TXQ_INLINE_MAX,
2801a729d2f0SMichael Baum 		MLX5_TXQ_INLINE_MPW,
2802a729d2f0SMichael Baum 		MLX5_TXQS_MIN_INLINE,
2803a729d2f0SMichael Baum 		MLX5_TXQS_MAX_VEC,
2804a729d2f0SMichael Baum 		MLX5_TXQ_MPW_EN,
2805a729d2f0SMichael Baum 		MLX5_TXQ_MPW_HDR_DSEG_EN,
2806a729d2f0SMichael Baum 		MLX5_TXQ_MAX_INLINE_LEN,
2807a729d2f0SMichael Baum 		MLX5_TX_VEC_EN,
2808a729d2f0SMichael Baum 		MLX5_RX_VEC_EN,
2809a729d2f0SMichael Baum 		MLX5_REPRESENTOR,
2810a729d2f0SMichael Baum 		MLX5_MAX_DUMP_FILES_NUM,
2811a729d2f0SMichael Baum 		MLX5_LRO_TIMEOUT_USEC,
2812a729d2f0SMichael Baum 		MLX5_HP_BUF_SIZE,
2813a729d2f0SMichael Baum 		MLX5_DELAY_DROP,
2814a729d2f0SMichael Baum 		NULL,
2815a729d2f0SMichael Baum 	};
2816e72dd09bSNélio Laranjeiro 	int ret = 0;
2817e72dd09bSNélio Laranjeiro 
281845a6df80SMichael Baum 	/* Default configuration. */
281945a6df80SMichael Baum 	memset(config, 0, sizeof(*config));
282045a6df80SMichael Baum 	config->mps = MLX5_ARG_UNSET;
282145a6df80SMichael Baum 	config->cqe_comp = 1;
282245a6df80SMichael Baum 	config->rx_vec_en = 1;
282345a6df80SMichael Baum 	config->txq_inline_max = MLX5_ARG_UNSET;
282445a6df80SMichael Baum 	config->txq_inline_min = MLX5_ARG_UNSET;
282545a6df80SMichael Baum 	config->txq_inline_mpw = MLX5_ARG_UNSET;
282645a6df80SMichael Baum 	config->txqs_inline = MLX5_ARG_UNSET;
282745a6df80SMichael Baum 	config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
282845a6df80SMichael Baum 	config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
282945a6df80SMichael Baum 	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
2830fdee0f1bSAlexander Kozyrev 	config->mprq.log_stride_size = MLX5_ARG_UNSET;
283145a6df80SMichael Baum 	config->log_hp_size = MLX5_ARG_UNSET;
283245a6df80SMichael Baum 	config->std_delay_drop = 0;
283345a6df80SMichael Baum 	config->hp_delay_drop = 0;
2834a729d2f0SMichael Baum 	if (mkvlist != NULL) {
2835e72dd09bSNélio Laranjeiro 		/* Process parameters. */
2836a729d2f0SMichael Baum 		ret = mlx5_kvargs_process(mkvlist, params,
283745a6df80SMichael Baum 					  mlx5_port_args_check_handler, config);
283845a6df80SMichael Baum 		if (ret) {
283945a6df80SMichael Baum 			DRV_LOG(ERR, "Failed to process port arguments: %s",
284045a6df80SMichael Baum 				strerror(rte_errno));
284145a6df80SMichael Baum 			return -rte_errno;
284245a6df80SMichael Baum 		}
284345a6df80SMichael Baum 	}
284445a6df80SMichael Baum 	/* Adjust parameters according to device capabilities. */
284545a6df80SMichael Baum 	if (config->hw_padding && !dev_cap->hw_padding) {
284645a6df80SMichael Baum 		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
284745a6df80SMichael Baum 		config->hw_padding = 0;
284845a6df80SMichael Baum 	} else if (config->hw_padding) {
284945a6df80SMichael Baum 		DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
285045a6df80SMichael Baum 	}
285145a6df80SMichael Baum 	/*
285245a6df80SMichael Baum 	 * MPW is disabled by default, while the Enhanced MPW is enabled
285345a6df80SMichael Baum 	 * by default.
285445a6df80SMichael Baum 	 */
285545a6df80SMichael Baum 	if (config->mps == MLX5_ARG_UNSET)
285645a6df80SMichael Baum 		config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ?
285745a6df80SMichael Baum 			      MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
285845a6df80SMichael Baum 	else
285945a6df80SMichael Baum 		config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED;
286045a6df80SMichael Baum 	DRV_LOG(INFO, "%sMPS is %s",
286145a6df80SMichael Baum 		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
286245a6df80SMichael Baum 		config->mps == MLX5_MPW ? "legacy " : "",
286345a6df80SMichael Baum 		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
2864593f913aSMichael Baum 	if (priv->sh->config.lro_allowed) {
286545a6df80SMichael Baum 		/*
286645a6df80SMichael Baum 		 * If LRO timeout is not configured by application,
286745a6df80SMichael Baum 		 * use the minimal supported value.
286845a6df80SMichael Baum 		 */
286945a6df80SMichael Baum 		if (!config->lro_timeout)
287045a6df80SMichael Baum 			config->lro_timeout =
287145a6df80SMichael Baum 				       hca_attr->lro_timer_supported_periods[0];
287245a6df80SMichael Baum 		DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
287345a6df80SMichael Baum 			config->lro_timeout);
287445a6df80SMichael Baum 	}
287545a6df80SMichael Baum 	if (config->cqe_comp && !dev_cap->cqe_comp) {
287645a6df80SMichael Baum 		DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
287745a6df80SMichael Baum 		config->cqe_comp = 0;
287845a6df80SMichael Baum 	}
287945a6df80SMichael Baum 	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
288045a6df80SMichael Baum 	    (!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
288145a6df80SMichael Baum 		DRV_LOG(WARNING,
288245a6df80SMichael Baum 			"Flow Tag CQE compression format isn't supported.");
288345a6df80SMichael Baum 		config->cqe_comp = 0;
288445a6df80SMichael Baum 	}
288545a6df80SMichael Baum 	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
288645a6df80SMichael Baum 	    (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
288745a6df80SMichael Baum 		DRV_LOG(WARNING,
288845a6df80SMichael Baum 			"L3/L4 Header CQE compression format isn't supported.");
288945a6df80SMichael Baum 		config->cqe_comp = 0;
289045a6df80SMichael Baum 	}
289199532fb1SAlexander Kozyrev 	if (config->enh_cqe_comp && !hca_attr->enhanced_cqe_compression) {
289299532fb1SAlexander Kozyrev 		DRV_LOG(WARNING,
289399532fb1SAlexander Kozyrev 			"Enhanced CQE compression isn't supported.");
289499532fb1SAlexander Kozyrev 		config->enh_cqe_comp = 0;
289599532fb1SAlexander Kozyrev 	}
289699532fb1SAlexander Kozyrev 	DRV_LOG(DEBUG, "%sRx CQE compression is %ssupported.",
289799532fb1SAlexander Kozyrev 		config->enh_cqe_comp ? "Enhanced " : "",
289845a6df80SMichael Baum 		config->cqe_comp ? "" : "not ");
289945a6df80SMichael Baum 	if ((config->std_delay_drop || config->hp_delay_drop) &&
290045a6df80SMichael Baum 	    !dev_cap->rq_delay_drop_en) {
290145a6df80SMichael Baum 		config->std_delay_drop = 0;
290245a6df80SMichael Baum 		config->hp_delay_drop = 0;
290345a6df80SMichael Baum 		DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.",
290445a6df80SMichael Baum 			priv->dev_port);
290545a6df80SMichael Baum 	}
290645a6df80SMichael Baum 	if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
290745a6df80SMichael Baum 		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
290845a6df80SMichael Baum 		config->mprq.enabled = 0;
290945a6df80SMichael Baum 	}
291045a6df80SMichael Baum 	if (config->max_dump_files_num == 0)
291145a6df80SMichael Baum 		config->max_dump_files_num = 128;
291245a6df80SMichael Baum 	/* Detect minimal data bytes to inline. */
291345a6df80SMichael Baum 	mlx5_set_min_inline(priv);
291445a6df80SMichael Baum 	DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
291545a6df80SMichael Baum 		config->hw_vlan_insert ? "" : "not ");
291645a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
291745a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
291845a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
291999532fb1SAlexander Kozyrev 	DRV_LOG(DEBUG, "\"enh_cqe_comp\" is %u.", config->enh_cqe_comp);
292045a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
292145a6df80SMichael Baum 	DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
292245a6df80SMichael Baum 		config->std_delay_drop);
292345a6df80SMichael Baum 	DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
292445a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
292545a6df80SMichael Baum 		config->max_dump_files_num);
292645a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
292745a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
292845a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
292945a6df80SMichael Baum 		config->mprq.log_stride_num);
293045a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
293145a6df80SMichael Baum 		config->mprq.log_stride_size);
293245a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
293345a6df80SMichael Baum 		config->mprq.max_memcpy_len);
293445a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
293545a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout);
293645a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
293745a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
293845a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
293945a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
294045a6df80SMichael Baum 	DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
294145a6df80SMichael Baum 	return 0;
2942e72dd09bSNélio Laranjeiro }
2943e72dd09bSNélio Laranjeiro 
29447be600c8SYongseok Koh /**
2945a729d2f0SMichael Baum  * Print the key for device argument.
2946a729d2f0SMichael Baum  *
2947a729d2f0SMichael Baum  * It is "dummy" handler whose whole purpose is to enable using
2948a729d2f0SMichael Baum  * mlx5_kvargs_process() function which set devargs as used.
2949a729d2f0SMichael Baum  *
2950a729d2f0SMichael Baum  * @param key
2951a729d2f0SMichael Baum  *   Key argument.
2952a729d2f0SMichael Baum  * @param val
2953a729d2f0SMichael Baum  *   Value associated with key, unused.
2954a729d2f0SMichael Baum  * @param opaque
2955a729d2f0SMichael Baum  *   Unused, can be NULL.
2956a729d2f0SMichael Baum  *
2957a729d2f0SMichael Baum  * @return
2958a729d2f0SMichael Baum  *   0 on success, function cannot fail.
2959a729d2f0SMichael Baum  */
2960a729d2f0SMichael Baum static int
2961a729d2f0SMichael Baum mlx5_dummy_handler(const char *key, const char *val, void *opaque)
2962a729d2f0SMichael Baum {
2963a729d2f0SMichael Baum 	DRV_LOG(DEBUG, "\tKey: \"%s\" is set as used.", key);
2964a729d2f0SMichael Baum 	RTE_SET_USED(opaque);
2965a729d2f0SMichael Baum 	RTE_SET_USED(val);
2966a729d2f0SMichael Baum 	return 0;
2967a729d2f0SMichael Baum }
2968a729d2f0SMichael Baum 
2969a729d2f0SMichael Baum /**
2970a729d2f0SMichael Baum  * Set requested devargs as used when device is already spawned.
2971a729d2f0SMichael Baum  *
2972a729d2f0SMichael Baum  * It is necessary since it is valid to ask probe again for existing device,
2973a729d2f0SMichael Baum  * if its devargs don't assign as used, mlx5_kvargs_validate() will fail.
2974a729d2f0SMichael Baum  *
2975a729d2f0SMichael Baum  * @param name
2976a729d2f0SMichael Baum  *   Name of the existing device.
2977a729d2f0SMichael Baum  * @param port_id
2978a729d2f0SMichael Baum  *   Port identifier of the device.
2979a729d2f0SMichael Baum  * @param mkvlist
2980a729d2f0SMichael Baum  *   Pointer to mlx5 kvargs control to sign as used.
2981a729d2f0SMichael Baum  */
2982a729d2f0SMichael Baum void
2983a729d2f0SMichael Baum mlx5_port_args_set_used(const char *name, uint16_t port_id,
2984a729d2f0SMichael Baum 			struct mlx5_kvargs_ctrl *mkvlist)
2985a729d2f0SMichael Baum {
2986a729d2f0SMichael Baum 	const char **params = (const char *[]){
2987a729d2f0SMichael Baum 		MLX5_RXQ_CQE_COMP_EN,
2988a729d2f0SMichael Baum 		MLX5_RXQ_PKT_PAD_EN,
2989a729d2f0SMichael Baum 		MLX5_RX_MPRQ_EN,
2990a729d2f0SMichael Baum 		MLX5_RX_MPRQ_LOG_STRIDE_NUM,
2991a729d2f0SMichael Baum 		MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
2992a729d2f0SMichael Baum 		MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
2993a729d2f0SMichael Baum 		MLX5_RXQS_MIN_MPRQ,
2994a729d2f0SMichael Baum 		MLX5_TXQ_INLINE,
2995a729d2f0SMichael Baum 		MLX5_TXQ_INLINE_MIN,
2996a729d2f0SMichael Baum 		MLX5_TXQ_INLINE_MAX,
2997a729d2f0SMichael Baum 		MLX5_TXQ_INLINE_MPW,
2998a729d2f0SMichael Baum 		MLX5_TXQS_MIN_INLINE,
2999a729d2f0SMichael Baum 		MLX5_TXQS_MAX_VEC,
3000a729d2f0SMichael Baum 		MLX5_TXQ_MPW_EN,
3001a729d2f0SMichael Baum 		MLX5_TXQ_MPW_HDR_DSEG_EN,
3002a729d2f0SMichael Baum 		MLX5_TXQ_MAX_INLINE_LEN,
3003a729d2f0SMichael Baum 		MLX5_TX_VEC_EN,
3004a729d2f0SMichael Baum 		MLX5_RX_VEC_EN,
3005a729d2f0SMichael Baum 		MLX5_REPRESENTOR,
3006a729d2f0SMichael Baum 		MLX5_MAX_DUMP_FILES_NUM,
3007a729d2f0SMichael Baum 		MLX5_LRO_TIMEOUT_USEC,
3008a729d2f0SMichael Baum 		MLX5_HP_BUF_SIZE,
3009a729d2f0SMichael Baum 		MLX5_DELAY_DROP,
3010a729d2f0SMichael Baum 		NULL,
3011a729d2f0SMichael Baum 	};
3012a729d2f0SMichael Baum 
3013a729d2f0SMichael Baum 	/* Secondary process should not handle devargs. */
3014a729d2f0SMichael Baum 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3015a729d2f0SMichael Baum 		return;
3016a729d2f0SMichael Baum 	MLX5_ASSERT(mkvlist != NULL);
3017a729d2f0SMichael Baum 	DRV_LOG(DEBUG, "Ethernet device \"%s\" for port %u "
3018a729d2f0SMichael Baum 		"already exists, set devargs as used:", name, port_id);
3019a729d2f0SMichael Baum 	/* This function cannot fail with this handler. */
3020a729d2f0SMichael Baum 	mlx5_kvargs_process(mkvlist, params, mlx5_dummy_handler, NULL);
3021a729d2f0SMichael Baum }
3022a729d2f0SMichael Baum 
3023a729d2f0SMichael Baum /**
3024a13ec19cSMichael Baum  * Check sibling device configurations when probing again.
3025a13ec19cSMichael Baum  *
3026a13ec19cSMichael Baum  * Sibling devices sharing infiniband device context should have compatible
3027a13ec19cSMichael Baum  * configurations. This regards representors and bonding device.
3028a13ec19cSMichael Baum  *
3029a13ec19cSMichael Baum  * @param cdev
3030a13ec19cSMichael Baum  *   Pointer to mlx5 device structure.
3031a729d2f0SMichael Baum  * @param mkvlist
3032a729d2f0SMichael Baum  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
3033a13ec19cSMichael Baum  *
3034a13ec19cSMichael Baum  * @return
3035a13ec19cSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
3036a13ec19cSMichael Baum  */
3037a13ec19cSMichael Baum int
3038a729d2f0SMichael Baum mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
3039a729d2f0SMichael Baum 			       struct mlx5_kvargs_ctrl *mkvlist)
3040a13ec19cSMichael Baum {
3041a13ec19cSMichael Baum 	struct mlx5_dev_ctx_shared *sh = NULL;
3042a13ec19cSMichael Baum 	struct mlx5_sh_config *config;
3043a13ec19cSMichael Baum 	int ret;
3044a13ec19cSMichael Baum 
3045a13ec19cSMichael Baum 	/* Secondary process should not handle devargs. */
3046a13ec19cSMichael Baum 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3047a13ec19cSMichael Baum 		return 0;
3048a13ec19cSMichael Baum 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
3049a13ec19cSMichael Baum 	/* Search for IB context by common device pointer. */
305092d3a05eSMichael Baum 	LIST_FOREACH(sh, &dev_ctx_list, next)
3051a13ec19cSMichael Baum 		if (sh->cdev == cdev)
3052a13ec19cSMichael Baum 			break;
3053a13ec19cSMichael Baum 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
3054a13ec19cSMichael Baum 	/* There is sh for this device -> it isn't probe again. */
3055a13ec19cSMichael Baum 	if (sh == NULL)
3056a13ec19cSMichael Baum 		return 0;
3057a13ec19cSMichael Baum 	config = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
3058a13ec19cSMichael Baum 			     sizeof(struct mlx5_sh_config),
3059a13ec19cSMichael Baum 			     RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
3060a13ec19cSMichael Baum 	if (config == NULL) {
3061a13ec19cSMichael Baum 		rte_errno = -ENOMEM;
3062a13ec19cSMichael Baum 		return -rte_errno;
3063a13ec19cSMichael Baum 	}
3064a13ec19cSMichael Baum 	/*
3065a13ec19cSMichael Baum 	 * Creates a temporary IB context configure structure according to new
3066a13ec19cSMichael Baum 	 * devargs attached in probing again.
3067a13ec19cSMichael Baum 	 */
3068a729d2f0SMichael Baum 	ret = mlx5_shared_dev_ctx_args_config(sh, mkvlist, config);
3069a13ec19cSMichael Baum 	if (ret) {
3070a13ec19cSMichael Baum 		DRV_LOG(ERR, "Failed to process device configure: %s",
3071a13ec19cSMichael Baum 			strerror(rte_errno));
3072a13ec19cSMichael Baum 		mlx5_free(config);
3073a13ec19cSMichael Baum 		return ret;
3074a13ec19cSMichael Baum 	}
3075a13ec19cSMichael Baum 	/*
3076a13ec19cSMichael Baum 	 * Checks the match between the temporary structure and the existing
3077a13ec19cSMichael Baum 	 * IB context structure.
3078a13ec19cSMichael Baum 	 */
3079a13ec19cSMichael Baum 	if (sh->config.dv_flow_en ^ config->dv_flow_en) {
3080a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"dv_flow_en\" "
3081a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3082a13ec19cSMichael Baum 			sh->ibdev_name);
3083a13ec19cSMichael Baum 		goto error;
3084a13ec19cSMichael Baum 	}
3085a13ec19cSMichael Baum 	if ((sh->config.dv_xmeta_en ^ config->dv_xmeta_en) ||
3086a13ec19cSMichael Baum 	    (sh->config.dv_miss_info ^ config->dv_miss_info)) {
3087a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"dv_xmeta_en\" "
3088a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3089a13ec19cSMichael Baum 			sh->ibdev_name);
3090a13ec19cSMichael Baum 		goto error;
3091a13ec19cSMichael Baum 	}
3092a13ec19cSMichael Baum 	if (sh->config.dv_esw_en ^ config->dv_esw_en) {
3093a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"dv_esw_en\" "
3094a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3095a13ec19cSMichael Baum 			sh->ibdev_name);
3096a13ec19cSMichael Baum 		goto error;
3097a13ec19cSMichael Baum 	}
3098a13ec19cSMichael Baum 	if (sh->config.reclaim_mode ^ config->reclaim_mode) {
3099a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"reclaim_mode\" "
3100a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3101a13ec19cSMichael Baum 			sh->ibdev_name);
3102a13ec19cSMichael Baum 		goto error;
3103a13ec19cSMichael Baum 	}
3104a13ec19cSMichael Baum 	if (sh->config.allow_duplicate_pattern ^
3105a13ec19cSMichael Baum 	    config->allow_duplicate_pattern) {
3106a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"allow_duplicate_pattern\" "
3107a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3108a13ec19cSMichael Baum 			sh->ibdev_name);
3109a13ec19cSMichael Baum 		goto error;
3110a13ec19cSMichael Baum 	}
31111939eb6fSDariusz Sosnowski 	if (sh->config.fdb_def_rule ^ config->fdb_def_rule) {
31121939eb6fSDariusz Sosnowski 		DRV_LOG(ERR, "\"fdb_def_rule_en\" configuration mismatch for shared %s context.",
31131939eb6fSDariusz Sosnowski 			sh->ibdev_name);
31141939eb6fSDariusz Sosnowski 		goto error;
31151939eb6fSDariusz Sosnowski 	}
3116a13ec19cSMichael Baum 	if (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) {
3117a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"l3_vxlan_en\" "
3118a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3119a13ec19cSMichael Baum 			sh->ibdev_name);
3120a13ec19cSMichael Baum 		goto error;
3121a13ec19cSMichael Baum 	}
3122a13ec19cSMichael Baum 	if (sh->config.decap_en ^ config->decap_en) {
3123a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"decap_en\" "
3124a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3125a13ec19cSMichael Baum 			sh->ibdev_name);
3126a13ec19cSMichael Baum 		goto error;
3127a13ec19cSMichael Baum 	}
3128a13ec19cSMichael Baum 	if (sh->config.lacp_by_user ^ config->lacp_by_user) {
3129a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"lacp_by_user\" "
3130a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3131a13ec19cSMichael Baum 			sh->ibdev_name);
3132a13ec19cSMichael Baum 		goto error;
3133a13ec19cSMichael Baum 	}
3134a13ec19cSMichael Baum 	if (sh->config.tx_pp ^ config->tx_pp) {
3135a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"tx_pp\" "
3136a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3137a13ec19cSMichael Baum 			sh->ibdev_name);
3138a13ec19cSMichael Baum 		goto error;
3139a13ec19cSMichael Baum 	}
3140a13ec19cSMichael Baum 	if (sh->config.tx_skew ^ config->tx_skew) {
3141a13ec19cSMichael Baum 		DRV_LOG(ERR, "\"tx_skew\" "
3142a13ec19cSMichael Baum 			"configuration mismatch for shared %s context.",
3143a13ec19cSMichael Baum 			sh->ibdev_name);
3144a13ec19cSMichael Baum 		goto error;
3145a13ec19cSMichael Baum 	}
3146a13ec19cSMichael Baum 	mlx5_free(config);
3147a13ec19cSMichael Baum 	return 0;
3148a13ec19cSMichael Baum error:
3149a13ec19cSMichael Baum 	mlx5_free(config);
3150a13ec19cSMichael Baum 	rte_errno = EINVAL;
3151a13ec19cSMichael Baum 	return -rte_errno;
3152a13ec19cSMichael Baum }
3153a13ec19cSMichael Baum 
3154a13ec19cSMichael Baum /**
315538b4b397SViacheslav Ovsiienko  * Configures the minimal amount of data to inline into WQE
315638b4b397SViacheslav Ovsiienko  * while sending packets.
315738b4b397SViacheslav Ovsiienko  *
315838b4b397SViacheslav Ovsiienko  * - the txq_inline_min has the maximal priority, if this
315938b4b397SViacheslav Ovsiienko  *   key is specified in devargs
316038b4b397SViacheslav Ovsiienko  * - if DevX is enabled the inline mode is queried from the
316138b4b397SViacheslav Ovsiienko  *   device (HCA attributes and NIC vport context if needed).
3162ee76bddcSThomas Monjalon  * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
316338b4b397SViacheslav Ovsiienko  *   and none (0 bytes) for other NICs
316438b4b397SViacheslav Ovsiienko  *
316545a6df80SMichael Baum  * @param priv
316645a6df80SMichael Baum  *   Pointer to the private device data structure.
316738b4b397SViacheslav Ovsiienko  */
31682eb4d010SOphir Munk void
316945a6df80SMichael Baum mlx5_set_min_inline(struct mlx5_priv *priv)
317038b4b397SViacheslav Ovsiienko {
317145a6df80SMichael Baum 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
317245a6df80SMichael Baum 	struct mlx5_port_config *config = &priv->config;
317353820561SMichael Baum 
317438b4b397SViacheslav Ovsiienko 	if (config->txq_inline_min != MLX5_ARG_UNSET) {
317538b4b397SViacheslav Ovsiienko 		/* Application defines size of inlined data explicitly. */
317645a6df80SMichael Baum 		if (priv->pci_dev != NULL) {
317745a6df80SMichael Baum 			switch (priv->pci_dev->id.device_id) {
317838b4b397SViacheslav Ovsiienko 			case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
317938b4b397SViacheslav Ovsiienko 			case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
318038b4b397SViacheslav Ovsiienko 				if (config->txq_inline_min <
318138b4b397SViacheslav Ovsiienko 					       (int)MLX5_INLINE_HSIZE_L2) {
318238b4b397SViacheslav Ovsiienko 					DRV_LOG(DEBUG,
318356bb3c84SXueming Li 						"txq_inline_mix aligned to minimal ConnectX-4 required value %d",
318438b4b397SViacheslav Ovsiienko 						(int)MLX5_INLINE_HSIZE_L2);
318556bb3c84SXueming Li 					config->txq_inline_min =
318656bb3c84SXueming Li 							MLX5_INLINE_HSIZE_L2;
318738b4b397SViacheslav Ovsiienko 				}
318838b4b397SViacheslav Ovsiienko 				break;
318938b4b397SViacheslav Ovsiienko 			}
319056bb3c84SXueming Li 		}
319138b4b397SViacheslav Ovsiienko 		goto exit;
319238b4b397SViacheslav Ovsiienko 	}
319353820561SMichael Baum 	if (hca_attr->eth_net_offloads) {
319438b4b397SViacheslav Ovsiienko 		/* We have DevX enabled, inline mode queried successfully. */
319553820561SMichael Baum 		switch (hca_attr->wqe_inline_mode) {
319638b4b397SViacheslav Ovsiienko 		case MLX5_CAP_INLINE_MODE_L2:
319738b4b397SViacheslav Ovsiienko 			/* outer L2 header must be inlined. */
319838b4b397SViacheslav Ovsiienko 			config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
319938b4b397SViacheslav Ovsiienko 			goto exit;
320038b4b397SViacheslav Ovsiienko 		case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
320138b4b397SViacheslav Ovsiienko 			/* No inline data are required by NIC. */
320238b4b397SViacheslav Ovsiienko 			config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
320338b4b397SViacheslav Ovsiienko 			config->hw_vlan_insert =
320453820561SMichael Baum 				hca_attr->wqe_vlan_insert;
320538b4b397SViacheslav Ovsiienko 			DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
320638b4b397SViacheslav Ovsiienko 			goto exit;
320738b4b397SViacheslav Ovsiienko 		case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
320838b4b397SViacheslav Ovsiienko 			/* inline mode is defined by NIC vport context. */
320953820561SMichael Baum 			if (!hca_attr->eth_virt)
321038b4b397SViacheslav Ovsiienko 				break;
321153820561SMichael Baum 			switch (hca_attr->vport_inline_mode) {
321238b4b397SViacheslav Ovsiienko 			case MLX5_INLINE_MODE_NONE:
321338b4b397SViacheslav Ovsiienko 				config->txq_inline_min =
321438b4b397SViacheslav Ovsiienko 					MLX5_INLINE_HSIZE_NONE;
321538b4b397SViacheslav Ovsiienko 				goto exit;
321638b4b397SViacheslav Ovsiienko 			case MLX5_INLINE_MODE_L2:
321738b4b397SViacheslav Ovsiienko 				config->txq_inline_min =
321838b4b397SViacheslav Ovsiienko 					MLX5_INLINE_HSIZE_L2;
321938b4b397SViacheslav Ovsiienko 				goto exit;
322038b4b397SViacheslav Ovsiienko 			case MLX5_INLINE_MODE_IP:
322138b4b397SViacheslav Ovsiienko 				config->txq_inline_min =
322238b4b397SViacheslav Ovsiienko 					MLX5_INLINE_HSIZE_L3;
322338b4b397SViacheslav Ovsiienko 				goto exit;
322438b4b397SViacheslav Ovsiienko 			case MLX5_INLINE_MODE_TCP_UDP:
322538b4b397SViacheslav Ovsiienko 				config->txq_inline_min =
322638b4b397SViacheslav Ovsiienko 					MLX5_INLINE_HSIZE_L4;
322738b4b397SViacheslav Ovsiienko 				goto exit;
322838b4b397SViacheslav Ovsiienko 			case MLX5_INLINE_MODE_INNER_L2:
322938b4b397SViacheslav Ovsiienko 				config->txq_inline_min =
323038b4b397SViacheslav Ovsiienko 					MLX5_INLINE_HSIZE_INNER_L2;
323138b4b397SViacheslav Ovsiienko 				goto exit;
323238b4b397SViacheslav Ovsiienko 			case MLX5_INLINE_MODE_INNER_IP:
323338b4b397SViacheslav Ovsiienko 				config->txq_inline_min =
323438b4b397SViacheslav Ovsiienko 					MLX5_INLINE_HSIZE_INNER_L3;
323538b4b397SViacheslav Ovsiienko 				goto exit;
323638b4b397SViacheslav Ovsiienko 			case MLX5_INLINE_MODE_INNER_TCP_UDP:
323738b4b397SViacheslav Ovsiienko 				config->txq_inline_min =
323838b4b397SViacheslav Ovsiienko 					MLX5_INLINE_HSIZE_INNER_L4;
323938b4b397SViacheslav Ovsiienko 				goto exit;
324038b4b397SViacheslav Ovsiienko 			}
324138b4b397SViacheslav Ovsiienko 		}
324238b4b397SViacheslav Ovsiienko 	}
324345a6df80SMichael Baum 	if (priv->pci_dev == NULL) {
324456bb3c84SXueming Li 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
324556bb3c84SXueming Li 		goto exit;
324656bb3c84SXueming Li 	}
324738b4b397SViacheslav Ovsiienko 	/*
324838b4b397SViacheslav Ovsiienko 	 * We get here if we are unable to deduce
324938b4b397SViacheslav Ovsiienko 	 * inline data size with DevX. Try PCI ID
325038b4b397SViacheslav Ovsiienko 	 * to determine old NICs.
325138b4b397SViacheslav Ovsiienko 	 */
325245a6df80SMichael Baum 	switch (priv->pci_dev->id.device_id) {
325338b4b397SViacheslav Ovsiienko 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
325438b4b397SViacheslav Ovsiienko 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
325538b4b397SViacheslav Ovsiienko 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
325638b4b397SViacheslav Ovsiienko 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
3257614de6c8SViacheslav Ovsiienko 		config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
325838b4b397SViacheslav Ovsiienko 		config->hw_vlan_insert = 0;
325938b4b397SViacheslav Ovsiienko 		break;
326038b4b397SViacheslav Ovsiienko 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
326138b4b397SViacheslav Ovsiienko 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
326238b4b397SViacheslav Ovsiienko 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
326338b4b397SViacheslav Ovsiienko 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
326438b4b397SViacheslav Ovsiienko 		/*
326538b4b397SViacheslav Ovsiienko 		 * These NICs support VLAN insertion from WQE and
326638b4b397SViacheslav Ovsiienko 		 * report the wqe_vlan_insert flag. But there is the bug
326738b4b397SViacheslav Ovsiienko 		 * and PFC control may be broken, so disable feature.
326838b4b397SViacheslav Ovsiienko 		 */
326938b4b397SViacheslav Ovsiienko 		config->hw_vlan_insert = 0;
327020215627SDavid Christensen 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
327138b4b397SViacheslav Ovsiienko 		break;
327238b4b397SViacheslav Ovsiienko 	default:
327338b4b397SViacheslav Ovsiienko 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
327438b4b397SViacheslav Ovsiienko 		break;
327538b4b397SViacheslav Ovsiienko 	}
327638b4b397SViacheslav Ovsiienko exit:
327738b4b397SViacheslav Ovsiienko 	DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
327838b4b397SViacheslav Ovsiienko }
327938b4b397SViacheslav Ovsiienko 
328038b4b397SViacheslav Ovsiienko /**
328139139371SViacheslav Ovsiienko  * Configures the metadata mask fields in the shared context.
328239139371SViacheslav Ovsiienko  *
328339139371SViacheslav Ovsiienko  * @param [in] dev
328439139371SViacheslav Ovsiienko  *   Pointer to Ethernet device.
328539139371SViacheslav Ovsiienko  */
32862eb4d010SOphir Munk void
328739139371SViacheslav Ovsiienko mlx5_set_metadata_mask(struct rte_eth_dev *dev)
328839139371SViacheslav Ovsiienko {
328939139371SViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
32906e88bc42SOphir Munk 	struct mlx5_dev_ctx_shared *sh = priv->sh;
329139139371SViacheslav Ovsiienko 	uint32_t meta, mark, reg_c0;
329239139371SViacheslav Ovsiienko 
329339139371SViacheslav Ovsiienko 	reg_c0 = ~priv->vport_meta_mask;
3294a13ec19cSMichael Baum 	switch (sh->config.dv_xmeta_en) {
329539139371SViacheslav Ovsiienko 	case MLX5_XMETA_MODE_LEGACY:
329639139371SViacheslav Ovsiienko 		meta = UINT32_MAX;
329739139371SViacheslav Ovsiienko 		mark = MLX5_FLOW_MARK_MASK;
329839139371SViacheslav Ovsiienko 		break;
329939139371SViacheslav Ovsiienko 	case MLX5_XMETA_MODE_META16:
330039139371SViacheslav Ovsiienko 		meta = reg_c0 >> rte_bsf32(reg_c0);
330139139371SViacheslav Ovsiienko 		mark = MLX5_FLOW_MARK_MASK;
330239139371SViacheslav Ovsiienko 		break;
330339139371SViacheslav Ovsiienko 	case MLX5_XMETA_MODE_META32:
330439139371SViacheslav Ovsiienko 		meta = UINT32_MAX;
330539139371SViacheslav Ovsiienko 		mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK;
330639139371SViacheslav Ovsiienko 		break;
3307ddb68e47SBing Zhao 	case MLX5_XMETA_MODE_META32_HWS:
3308ddb68e47SBing Zhao 		meta = UINT32_MAX;
3309821a6a5cSBing Zhao 		mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK;
3310ddb68e47SBing Zhao 		break;
331139139371SViacheslav Ovsiienko 	default:
331239139371SViacheslav Ovsiienko 		meta = 0;
331339139371SViacheslav Ovsiienko 		mark = 0;
33148e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(false);
331539139371SViacheslav Ovsiienko 		break;
331639139371SViacheslav Ovsiienko 	}
331739139371SViacheslav Ovsiienko 	if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
33187be78d02SJosh Soref 		DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
331939139371SViacheslav Ovsiienko 				 sh->dv_mark_mask, mark);
332039139371SViacheslav Ovsiienko 	else
332139139371SViacheslav Ovsiienko 		sh->dv_mark_mask = mark;
332239139371SViacheslav Ovsiienko 	if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
33237be78d02SJosh Soref 		DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
332439139371SViacheslav Ovsiienko 				 sh->dv_meta_mask, meta);
332539139371SViacheslav Ovsiienko 	else
332639139371SViacheslav Ovsiienko 		sh->dv_meta_mask = meta;
332739139371SViacheslav Ovsiienko 	if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
33287be78d02SJosh Soref 		DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
332939139371SViacheslav Ovsiienko 				 sh->dv_meta_mask, reg_c0);
333039139371SViacheslav Ovsiienko 	else
333139139371SViacheslav Ovsiienko 		sh->dv_regc0_mask = reg_c0;
3332a13ec19cSMichael Baum 	DRV_LOG(DEBUG, "metadata mode %u", sh->config.dv_xmeta_en);
333339139371SViacheslav Ovsiienko 	DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask);
333439139371SViacheslav Ovsiienko 	DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask);
333539139371SViacheslav Ovsiienko 	DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
333639139371SViacheslav Ovsiienko }
333739139371SViacheslav Ovsiienko 
3338efa79e68SOri Kam int
3339efa79e68SOri Kam rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
3340efa79e68SOri Kam {
3341efa79e68SOri Kam 	static const char *const dynf_names[] = {
3342efa79e68SOri Kam 		RTE_PMD_MLX5_FINE_GRANULARITY_INLINE,
33438f848f32SViacheslav Ovsiienko 		RTE_MBUF_DYNFLAG_METADATA_NAME,
33448f848f32SViacheslav Ovsiienko 		RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME
3345efa79e68SOri Kam 	};
3346efa79e68SOri Kam 	unsigned int i;
3347efa79e68SOri Kam 
3348efa79e68SOri Kam 	if (n < RTE_DIM(dynf_names))
3349efa79e68SOri Kam 		return -ENOMEM;
3350efa79e68SOri Kam 	for (i = 0; i < RTE_DIM(dynf_names); i++) {
3351efa79e68SOri Kam 		if (names[i] == NULL)
3352efa79e68SOri Kam 			return -EINVAL;
3353efa79e68SOri Kam 		strcpy(names[i], dynf_names[i]);
3354efa79e68SOri Kam 	}
3355efa79e68SOri Kam 	return RTE_DIM(dynf_names);
3356efa79e68SOri Kam }
3357efa79e68SOri Kam 
335821cae858SDekel Peled /**
3359fbc83412SViacheslav Ovsiienko  * Look for the ethernet device belonging to mlx5 driver.
3360fbc83412SViacheslav Ovsiienko  *
3361fbc83412SViacheslav Ovsiienko  * @param[in] port_id
3362fbc83412SViacheslav Ovsiienko  *   port_id to start looking for device.
336356bb3c84SXueming Li  * @param[in] odev
336456bb3c84SXueming Li  *   Pointer to the hint device. When device is being probed
3365fbc83412SViacheslav Ovsiienko  *   the its siblings (master and preceding representors might
33662eb4d010SOphir Munk  *   not have assigned driver yet (because the mlx5_os_pci_probe()
336756bb3c84SXueming Li  *   is not completed yet, for this case match on hint
3368fbc83412SViacheslav Ovsiienko  *   device may be used to detect sibling device.
3369fbc83412SViacheslav Ovsiienko  *
3370fbc83412SViacheslav Ovsiienko  * @return
3371fbc83412SViacheslav Ovsiienko  *   port_id of found device, RTE_MAX_ETHPORT if not found.
3372fbc83412SViacheslav Ovsiienko  */
3373f7e95215SViacheslav Ovsiienko uint16_t
337456bb3c84SXueming Li mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
3375f7e95215SViacheslav Ovsiienko {
3376f7e95215SViacheslav Ovsiienko 	while (port_id < RTE_MAX_ETHPORTS) {
3377f7e95215SViacheslav Ovsiienko 		struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3378f7e95215SViacheslav Ovsiienko 
3379f7e95215SViacheslav Ovsiienko 		if (dev->state != RTE_ETH_DEV_UNUSED &&
3380f7e95215SViacheslav Ovsiienko 		    dev->device &&
338156bb3c84SXueming Li 		    (dev->device == odev ||
3382fbc83412SViacheslav Ovsiienko 		     (dev->device->driver &&
3383f7e95215SViacheslav Ovsiienko 		     dev->device->driver->name &&
3384919488fbSXueming Li 		     ((strcmp(dev->device->driver->name,
3385919488fbSXueming Li 			      MLX5_PCI_DRIVER_NAME) == 0) ||
3386919488fbSXueming Li 		      (strcmp(dev->device->driver->name,
3387919488fbSXueming Li 			      MLX5_AUXILIARY_DRIVER_NAME) == 0)))))
3388f7e95215SViacheslav Ovsiienko 			break;
3389f7e95215SViacheslav Ovsiienko 		port_id++;
3390f7e95215SViacheslav Ovsiienko 	}
3391f7e95215SViacheslav Ovsiienko 	if (port_id >= RTE_MAX_ETHPORTS)
3392f7e95215SViacheslav Ovsiienko 		return RTE_MAX_ETHPORTS;
3393f7e95215SViacheslav Ovsiienko 	return port_id;
3394f7e95215SViacheslav Ovsiienko }
3395f7e95215SViacheslav Ovsiienko 
33963a820742SOphir Munk /**
3397a7f34989SXueming Li  * Callback to remove a device.
33983a820742SOphir Munk  *
3399a7f34989SXueming Li  * This function removes all Ethernet devices belong to a given device.
34003a820742SOphir Munk  *
34017af08c8fSMichael Baum  * @param[in] cdev
3402a7f34989SXueming Li  *   Pointer to the generic device.
34033a820742SOphir Munk  *
34043a820742SOphir Munk  * @return
34053a820742SOphir Munk  *   0 on success, the function cannot fail.
34063a820742SOphir Munk  */
34076856efa5SMichael Baum int
34087af08c8fSMichael Baum mlx5_net_remove(struct mlx5_common_device *cdev)
34093a820742SOphir Munk {
34103a820742SOphir Munk 	uint16_t port_id;
34118a5a0aadSThomas Monjalon 	int ret = 0;
34123a820742SOphir Munk 
34137af08c8fSMichael Baum 	RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) {
34142786b7bfSSuanming Mou 		/*
34152786b7bfSSuanming Mou 		 * mlx5_dev_close() is not registered to secondary process,
34162786b7bfSSuanming Mou 		 * call the close function explicitly for secondary process.
34172786b7bfSSuanming Mou 		 */
34182786b7bfSSuanming Mou 		if (rte_eal_process_type() == RTE_PROC_SECONDARY)
34198a5a0aadSThomas Monjalon 			ret |= mlx5_dev_close(&rte_eth_devices[port_id]);
34202786b7bfSSuanming Mou 		else
34218a5a0aadSThomas Monjalon 			ret |= rte_eth_dev_close(port_id);
34222786b7bfSSuanming Mou 	}
34238a5a0aadSThomas Monjalon 	return ret == 0 ? 0 : -EIO;
34243a820742SOphir Munk }
34253a820742SOphir Munk 
3426771fa900SAdrien Mazarguil static const struct rte_pci_id mlx5_pci_id_map[] = {
3427771fa900SAdrien Mazarguil 	{
34281d1bc870SNélio Laranjeiro 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34291d1bc870SNélio Laranjeiro 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4)
3430771fa900SAdrien Mazarguil 	},
3431771fa900SAdrien Mazarguil 	{
34321d1bc870SNélio Laranjeiro 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34331d1bc870SNélio Laranjeiro 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
3434771fa900SAdrien Mazarguil 	},
3435771fa900SAdrien Mazarguil 	{
34361d1bc870SNélio Laranjeiro 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34371d1bc870SNélio Laranjeiro 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
3438771fa900SAdrien Mazarguil 	},
3439771fa900SAdrien Mazarguil 	{
34401d1bc870SNélio Laranjeiro 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34411d1bc870SNélio Laranjeiro 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
3442771fa900SAdrien Mazarguil 	},
3443771fa900SAdrien Mazarguil 	{
3444528a9fbeSYongseok Koh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3445528a9fbeSYongseok Koh 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5)
3446528a9fbeSYongseok Koh 	},
3447528a9fbeSYongseok Koh 	{
3448528a9fbeSYongseok Koh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3449528a9fbeSYongseok Koh 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
3450528a9fbeSYongseok Koh 	},
3451528a9fbeSYongseok Koh 	{
3452528a9fbeSYongseok Koh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3453528a9fbeSYongseok Koh 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
3454528a9fbeSYongseok Koh 	},
3455528a9fbeSYongseok Koh 	{
3456528a9fbeSYongseok Koh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3457528a9fbeSYongseok Koh 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
3458528a9fbeSYongseok Koh 	},
3459528a9fbeSYongseok Koh 	{
3460dd3331c6SShahaf Shuler 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34610a9fff95SRaslan Darawsheh 			       PCI_DEVICE_ID_MELLANOX_BLUEFIELD)
3462dd3331c6SShahaf Shuler 	},
3463dd3331c6SShahaf Shuler 	{
3464c322c0e5SOri Kam 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34650a9fff95SRaslan Darawsheh 			       PCI_DEVICE_ID_MELLANOX_BLUEFIELDVF)
3466c322c0e5SOri Kam 	},
3467c322c0e5SOri Kam 	{
3468f0354d84SWisam Jaddo 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3469f0354d84SWisam Jaddo 				PCI_DEVICE_ID_MELLANOX_CONNECTX6)
3470f0354d84SWisam Jaddo 	},
3471f0354d84SWisam Jaddo 	{
3472f0354d84SWisam Jaddo 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3473f0354d84SWisam Jaddo 				PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
3474f0354d84SWisam Jaddo 	},
3475f0354d84SWisam Jaddo 	{
34765fc66630SRaslan Darawsheh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34775fc66630SRaslan Darawsheh 				PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
34785fc66630SRaslan Darawsheh 	},
34795fc66630SRaslan Darawsheh 	{
34805fc66630SRaslan Darawsheh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34813ea12cadSRaslan Darawsheh 				PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
34825fc66630SRaslan Darawsheh 	},
34835fc66630SRaslan Darawsheh 	{
348458b4a2b1SRaslan Darawsheh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34850a9fff95SRaslan Darawsheh 				PCI_DEVICE_ID_MELLANOX_BLUEFIELD2)
348658b4a2b1SRaslan Darawsheh 	},
348758b4a2b1SRaslan Darawsheh 	{
348828c9a7d7SAli Alnubani 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
348928c9a7d7SAli Alnubani 				PCI_DEVICE_ID_MELLANOX_CONNECTX6LX)
349028c9a7d7SAli Alnubani 	},
349128c9a7d7SAli Alnubani 	{
34926ca37b06SRaslan Darawsheh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34936ca37b06SRaslan Darawsheh 				PCI_DEVICE_ID_MELLANOX_CONNECTX7)
34946ca37b06SRaslan Darawsheh 	},
34956ca37b06SRaslan Darawsheh 	{
34966ca37b06SRaslan Darawsheh 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
34970a9fff95SRaslan Darawsheh 				PCI_DEVICE_ID_MELLANOX_BLUEFIELD3)
34986ca37b06SRaslan Darawsheh 	},
34996ca37b06SRaslan Darawsheh 	{
3500771fa900SAdrien Mazarguil 		.vendor_id = 0
3501771fa900SAdrien Mazarguil 	}
3502771fa900SAdrien Mazarguil };
3503771fa900SAdrien Mazarguil 
3504a7f34989SXueming Li static struct mlx5_class_driver mlx5_net_driver = {
3505a7f34989SXueming Li 	.drv_class = MLX5_CLASS_ETH,
3506a7f34989SXueming Li 	.name = RTE_STR(MLX5_ETH_DRIVER_NAME),
3507771fa900SAdrien Mazarguil 	.id_table = mlx5_pci_id_map,
3508a7f34989SXueming Li 	.probe = mlx5_os_net_probe,
3509a7f34989SXueming Li 	.remove = mlx5_net_remove,
3510a7f34989SXueming Li 	.probe_again = 1,
3511a7f34989SXueming Li 	.intr_lsc = 1,
3512a7f34989SXueming Li 	.intr_rmv = 1,
3513771fa900SAdrien Mazarguil };
3514771fa900SAdrien Mazarguil 
35159c99878aSJerin Jacob /* Initialize driver log type. */
3516eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE)
35179c99878aSJerin Jacob 
3518771fa900SAdrien Mazarguil /**
3519771fa900SAdrien Mazarguil  * Driver initialization routine.
3520771fa900SAdrien Mazarguil  */
3521f8e99896SThomas Monjalon RTE_INIT(rte_mlx5_pmd_init)
3522771fa900SAdrien Mazarguil {
3523ef65067cSTal Shnaiderman 	pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL);
352482088001SParav Pandit 	mlx5_common_init();
35255f8ba81cSXueming Li 	/* Build the static tables for Verbs conversion. */
3526ea16068cSYongseok Koh 	mlx5_set_ptype_table();
35275f8ba81cSXueming Li 	mlx5_set_cksum_table();
35285f8ba81cSXueming Li 	mlx5_set_swp_types_table();
35297b4f1e6bSMatan Azrad 	if (mlx5_glue)
3530a7f34989SXueming Li 		mlx5_class_driver_register(&mlx5_net_driver);
3531771fa900SAdrien Mazarguil }
3532771fa900SAdrien Mazarguil 
3533a7f34989SXueming Li RTE_PMD_EXPORT_NAME(MLX5_ETH_DRIVER_NAME, __COUNTER__);
3534a7f34989SXueming Li RTE_PMD_REGISTER_PCI_TABLE(MLX5_ETH_DRIVER_NAME, mlx5_pci_id_map);
3535a7f34989SXueming Li RTE_PMD_REGISTER_KMOD_DEP(MLX5_ETH_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");
3536