xref: /dpdk/drivers/net/mlx5/mlx5.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2771fa900SAdrien Mazarguil  * Copyright 2015 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2015 Mellanox Technologies, Ltd
4771fa900SAdrien Mazarguil  */
5771fa900SAdrien Mazarguil 
6771fa900SAdrien Mazarguil #ifndef RTE_PMD_MLX5_H_
7771fa900SAdrien Mazarguil #define RTE_PMD_MLX5_H_
8771fa900SAdrien Mazarguil 
9771fa900SAdrien Mazarguil #include <stddef.h>
10028669bcSAnatoly Burakov #include <stdbool.h>
11771fa900SAdrien Mazarguil #include <stdint.h>
12771fa900SAdrien Mazarguil #include <limits.h>
131b37f5d8SNélio Laranjeiro #include <sys/queue.h>
14771fa900SAdrien Mazarguil 
155f08883aSGaetan Rivet #include <rte_pci.h>
16771fa900SAdrien Mazarguil #include <rte_ether.h>
17df96fd0dSBruce Richardson #include <ethdev_driver.h>
18974f1e7eSYongseok Koh #include <rte_rwlock.h>
19198a3c33SNelio Laranjeiro #include <rte_interrupts.h>
20a48deadaSOr Ami #include <rte_errno.h>
210d356350SNélio Laranjeiro #include <rte_flow.h>
22e6100c7bSLi Zhang #include <rte_mtr.h>
23771fa900SAdrien Mazarguil 
247b4f1e6bSMatan Azrad #include <mlx5_glue.h>
257b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h>
267b4f1e6bSMatan Azrad #include <mlx5_prm.h>
27a4de9586SVu Pham #include <mlx5_common_mp.h>
28b8dc6b0eSVu Pham #include <mlx5_common_mr.h>
29a7787bb0SMichael Baum #include <mlx5_common_devx.h>
30a77bedf2SMichael Baum #include <mlx5_common_defs.h>
317b4f1e6bSMatan Azrad 
327b4f1e6bSMatan Azrad #include "mlx5_defs.h"
33771fa900SAdrien Mazarguil #include "mlx5_utils.h"
3410f3581dSOphir Munk #include "mlx5_os.h"
35771fa900SAdrien Mazarguil #include "mlx5_autoconf.h"
362eece379SRongwei Liu #include "rte_pmd_mlx5.h"
37b401400dSSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
3822681deeSAlex Vesker #ifndef RTE_EXEC_ENV_WINDOWS
3922681deeSAlex Vesker #define HAVE_MLX5_HWS_SUPPORT 1
4022681deeSAlex Vesker #else
4122681deeSAlex Vesker #define __be64 uint64_t
4222681deeSAlex Vesker #endif
4322681deeSAlex Vesker #include "hws/mlx5dr.h"
44b401400dSSuanming Mou #endif
4518726355SXueming Li 
4618726355SXueming Li #define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh)
4718726355SXueming Li 
48463170a7SSuanming Mou #define MLX5_HW_INV_QUEUE UINT32_MAX
49463170a7SSuanming Mou 
504f3d8d0eSMatan Azrad /*
5115896eafSGregory Etelson  * The default ipool threshold value indicates which per_core_cache
5215896eafSGregory Etelson  * value to set.
5315896eafSGregory Etelson  */
5415896eafSGregory Etelson #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
5515896eafSGregory Etelson /* The default min local cache size. */
5615896eafSGregory Etelson #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
5715896eafSGregory Etelson 
5815896eafSGregory Etelson /*
594f3d8d0eSMatan Azrad  * Number of modification commands.
604f3d8d0eSMatan Azrad  * The maximal actions amount in FW is some constant, and it is 16 in the
614f3d8d0eSMatan Azrad  * latest releases. In some old releases, it will be limited to 8.
624f3d8d0eSMatan Azrad  * Since there is no interface to query the capacity, the maximal value should
634f3d8d0eSMatan Azrad  * be used to allow PMD to create the flow. The validation will be done in the
644f3d8d0eSMatan Azrad  * lower driver layer or FW. A failure will be returned if exceeds the maximal
654f3d8d0eSMatan Azrad  * supported actions number on the root table.
664f3d8d0eSMatan Azrad  * On non-root tables, there is no limitation, but 32 is enough right now.
674f3d8d0eSMatan Azrad  */
684f3d8d0eSMatan Azrad #define MLX5_MAX_MODIFY_NUM			32
694f3d8d0eSMatan Azrad #define MLX5_ROOT_TBL_MODIFY_NUM		16
704f3d8d0eSMatan Azrad 
71db25cadcSViacheslav Ovsiienko /* Maximal number of flex items created on the port.*/
7216d8f37bSViacheslav Ovsiienko #define MLX5_PORT_FLEX_ITEM_NUM			8
73db25cadcSViacheslav Ovsiienko 
74b293e8e4SViacheslav Ovsiienko /* Maximal number of field/field parts to map into sample registers .*/
75b293e8e4SViacheslav Ovsiienko #define MLX5_FLEX_ITEM_MAPPING_NUM		32
76b293e8e4SViacheslav Ovsiienko 
77014d1cbeSSuanming Mou enum mlx5_ipool_index {
78f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
79014d1cbeSSuanming Mou 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
808acf8ac9SSuanming Mou 	MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
815f114269SSuanming Mou 	MLX5_IPOOL_TAG, /* Pool for tag resource. */
82f3faf9eaSSuanming Mou 	MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
83d1559d66SSuanming Mou 	MLX5_IPOOL_JUMP, /* Pool for SWS jump resource. */
84d1559d66SSuanming Mou 	/* Pool for HWS group. Jump action will be created internally. */
85d1559d66SSuanming Mou 	MLX5_IPOOL_HW_GRP = MLX5_IPOOL_JUMP,
86b4c0ddbfSJiawei Wang 	MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
8700c10c22SJiawei Wang 	MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
889cac7dedSGregory Etelson 	MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
899cac7dedSGregory Etelson 	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
90b88341caSSuanming Mou #endif
918638e2b0SSuanming Mou 	MLX5_IPOOL_MTR, /* Pool for meter resource. */
9290e6053aSSuanming Mou 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
93772dc0ebSSuanming Mou 	MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
94b88341caSSuanming Mou 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
95ab612adcSSuanming Mou 	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
964ae8825cSXueming Li 	MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
974a42ac1fSMatan Azrad 	MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
98afb4aa4fSLi Zhang 	MLX5_IPOOL_MTR_POLICY, /* Pool for meter policy resource. */
99014d1cbeSSuanming Mou 	MLX5_IPOOL_MAX,
100014d1cbeSSuanming Mou };
101014d1cbeSSuanming Mou 
102a1da6f62SSuanming Mou /*
103a1da6f62SSuanming Mou  * There are three reclaim memory mode supported.
104a1da6f62SSuanming Mou  * 0(none) means no memory reclaim.
105a1da6f62SSuanming Mou  * 1(light) means only PMD level reclaim.
106a1da6f62SSuanming Mou  * 2(aggressive) means both PMD and rdma-core level reclaim.
107a1da6f62SSuanming Mou  */
108a1da6f62SSuanming Mou enum mlx5_reclaim_mem_mode {
109a1da6f62SSuanming Mou 	MLX5_RCM_NONE, /* Don't reclaim memory. */
110a1da6f62SSuanming Mou 	MLX5_RCM_LIGHT, /* Reclaim PMD level. */
111a1da6f62SSuanming Mou 	MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
112a1da6f62SSuanming Mou };
113a1da6f62SSuanming Mou 
114b4edeaf3SSuanming Mou /* The type of flow. */
115b4edeaf3SSuanming Mou enum mlx5_flow_type {
116b4edeaf3SSuanming Mou 	MLX5_FLOW_TYPE_CTL, /* Control flow. */
117b4edeaf3SSuanming Mou 	MLX5_FLOW_TYPE_GEN, /* General flow. */
118b4edeaf3SSuanming Mou 	MLX5_FLOW_TYPE_MCP, /* MCP flow. */
119b4edeaf3SSuanming Mou 	MLX5_FLOW_TYPE_MAXI,
120b4edeaf3SSuanming Mou };
121b4edeaf3SSuanming Mou 
122febcac7bSBing Zhao /* The mode of delay drop for Rx queues. */
123febcac7bSBing Zhao enum mlx5_delay_drop_mode {
124febcac7bSBing Zhao 	MLX5_DELAY_DROP_NONE = 0, /* All disabled. */
125febcac7bSBing Zhao 	MLX5_DELAY_DROP_STANDARD = RTE_BIT32(0), /* Standard queues enable. */
126febcac7bSBing Zhao 	MLX5_DELAY_DROP_HAIRPIN = RTE_BIT32(1), /* Hairpin queues enable. */
127febcac7bSBing Zhao };
128febcac7bSBing Zhao 
129d1559d66SSuanming Mou /* The HWS action type root/non-root. */
130d1559d66SSuanming Mou enum mlx5_hw_action_flag_type {
131d1559d66SSuanming Mou 	MLX5_HW_ACTION_FLAG_ROOT, /* Root action. */
132d1559d66SSuanming Mou 	MLX5_HW_ACTION_FLAG_NONE_ROOT, /* Non-root ation. */
133d1559d66SSuanming Mou 	MLX5_HW_ACTION_FLAG_MAX, /* Maximum action flag. */
134d1559d66SSuanming Mou };
135d1559d66SSuanming Mou 
136e78e5408SMatan Azrad /* Hlist and list callback context. */
137e1592b6cSSuanming Mou struct mlx5_flow_cb_ctx {
138e1592b6cSSuanming Mou 	struct rte_eth_dev *dev;
139e1592b6cSSuanming Mou 	struct rte_flow_error *error;
140e1592b6cSSuanming Mou 	void *data;
141961b6774SMatan Azrad 	void *data2;
142e1592b6cSSuanming Mou };
143e1592b6cSSuanming Mou 
1444cbeba6fSSuanming Mou struct flow_hw_port_info {
1454cbeba6fSSuanming Mou 	uint32_t regc_mask;
1464cbeba6fSSuanming Mou 	uint32_t regc_value;
1474cbeba6fSSuanming Mou 	uint32_t is_wire:1;
1484cbeba6fSSuanming Mou 	uint32_t direction:2;
1494cbeba6fSSuanming Mou };
1504cbeba6fSSuanming Mou 
1514cbeba6fSSuanming Mou enum mlx5_vport_direction {
1524cbeba6fSSuanming Mou 	MLX5_VPORT_DIRECTION_ANY = 0,
1534cbeba6fSSuanming Mou 	MLX5_VPORT_DIRECTION_NORTH,
1544cbeba6fSSuanming Mou 	MLX5_VPORT_DIRECTION_SOUTH,
1554cbeba6fSSuanming Mou };
1564cbeba6fSSuanming Mou 
15791d1cfafSMichael Baum /* Device capabilities structure which isn't changed in any stage. */
15891d1cfafSMichael Baum struct mlx5_dev_cap {
15991d1cfafSMichael Baum 	int max_cq; /* Maximum number of supported CQs */
16091d1cfafSMichael Baum 	int max_qp; /* Maximum number of supported QPs. */
16191d1cfafSMichael Baum 	int max_qp_wr; /* Maximum number of outstanding WR on any WQ. */
162e85f623eSOphir Munk 	int max_sge;
16391d1cfafSMichael Baum 	/* Maximum number of s/g per WR for SQ & RQ of QP for non RDMA Read
16491d1cfafSMichael Baum 	 * operations.
16591d1cfafSMichael Baum 	 */
16687af0d1eSMichael Baum 	int mps; /* Multi-packet send supported mode. */
16787af0d1eSMichael Baum 	uint32_t vf:1; /* This is a VF. */
16887af0d1eSMichael Baum 	uint32_t sf:1; /* This is a SF. */
16987af0d1eSMichael Baum 	uint32_t txpp_en:1; /* Tx packet pacing is supported. */
17087af0d1eSMichael Baum 	uint32_t mpls_en:1; /* MPLS over GRE/UDP is supported. */
17187af0d1eSMichael Baum 	uint32_t cqe_comp:1; /* CQE compression is supported. */
17287af0d1eSMichael Baum 	uint32_t hw_csum:1; /* Checksum offload is supported. */
17387af0d1eSMichael Baum 	uint32_t hw_padding:1; /* End alignment padding is supported. */
17487af0d1eSMichael Baum 	uint32_t dest_tir:1; /* Whether advanced DR API is available. */
17587af0d1eSMichael Baum 	uint32_t dv_esw_en:1; /* E-Switch DV flow is supported. */
17687af0d1eSMichael Baum 	uint32_t dv_flow_en:1; /* DV flow is supported. */
17787af0d1eSMichael Baum 	uint32_t swp:3; /* Tx generic tunnel checksum and TSO offload. */
17887af0d1eSMichael Baum 	uint32_t hw_vlan_strip:1; /* VLAN stripping is supported. */
17987af0d1eSMichael Baum 	uint32_t scatter_fcs_w_decap_disable:1;
18087af0d1eSMichael Baum 	/* HW has bug working with tunnel packet decap and scatter FCS. */
18187af0d1eSMichael Baum 	uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
18287af0d1eSMichael Baum 	uint32_t rt_timestamp:1; /* Realtime timestamp format. */
18387af0d1eSMichael Baum 	uint32_t rq_delay_drop_en:1; /* Enable RxQ delay drop. */
18487af0d1eSMichael Baum 	uint32_t tunnel_en:3;
18587af0d1eSMichael Baum 	/* Whether tunnel stateless offloads are supported. */
18687af0d1eSMichael Baum 	uint32_t ind_table_max_size;
18791d1cfafSMichael Baum 	/* Maximum receive WQ indirection table size. */
18887af0d1eSMichael Baum 	uint32_t tso:1; /* Whether TSO is supported. */
18987af0d1eSMichael Baum 	uint32_t tso_max_payload_sz; /* Maximum TCP payload for TSO. */
19087af0d1eSMichael Baum 	struct {
19187af0d1eSMichael Baum 		uint32_t enabled:1; /* Whether MPRQ is enabled. */
19287af0d1eSMichael Baum 		uint32_t log_min_stride_size; /* Log min size of a stride. */
19387af0d1eSMichael Baum 		uint32_t log_max_stride_size; /* Log max size of a stride. */
19487af0d1eSMichael Baum 		uint32_t log_min_stride_num; /* Log min num of strides. */
19587af0d1eSMichael Baum 		uint32_t log_max_stride_num; /* Log max num of strides. */
19687af0d1eSMichael Baum 		uint32_t log_min_stride_wqe_size;
19787af0d1eSMichael Baum 		/* Log min WQE size, (size of single stride)*(num of strides).*/
19887af0d1eSMichael Baum 	} mprq; /* Capability for Multi-Packet RQ. */
19991d1cfafSMichael Baum 	char fw_ver[64]; /* Firmware version of this device. */
2004cbeba6fSSuanming Mou 	struct flow_hw_port_info esw_info; /* E-switch manager reg_c0. */
201e85f623eSOphir Munk };
202e85f623eSOphir Munk 
20311c73de9SDariusz Sosnowski #define MLX5_MPESW_PORT_INVALID (-1)
20411c73de9SDariusz Sosnowski 
2052eb4d010SOphir Munk /** Data associated with devices to spawn. */
2062eb4d010SOphir Munk struct mlx5_dev_spawn_data {
2072eb4d010SOphir Munk 	uint32_t ifindex; /**< Network interface index. */
208834a9019SOphir Munk 	uint32_t max_port; /**< Device maximal port index. */
209834a9019SOphir Munk 	uint32_t phys_port; /**< Device physical port index. */
2102eb4d010SOphir Munk 	int pf_bond; /**< bonding device PF index. < 0 - no bonding */
21111c73de9SDariusz Sosnowski 	int mpesw_port; /**< MPESW uplink index. Valid if mpesw_owner_port >= 0. */
2122eb4d010SOphir Munk 	struct mlx5_switch_info info; /**< Switch information. */
213887183efSMichael Baum 	const char *phys_dev_name; /**< Name of physical device. */
2142eb4d010SOphir Munk 	struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
2152eb4d010SOphir Munk 	struct rte_pci_device *pci_dev; /**< Backend PCI device. */
2167af08c8fSMichael Baum 	struct mlx5_common_device *cdev; /**< Backend common device. */
217f5f4c482SXueming Li 	struct mlx5_bond_info *bond_info;
2182eb4d010SOphir Munk };
2192eb4d010SOphir Munk 
22011c73de9SDariusz Sosnowski /**
22111c73de9SDariusz Sosnowski  * Check if the port requested to be probed is MPESW physical device
22211c73de9SDariusz Sosnowski  * or a representor port.
22311c73de9SDariusz Sosnowski  *
22411c73de9SDariusz Sosnowski  * @param spawn
22511c73de9SDariusz Sosnowski  *   Parameters of the probed port.
22611c73de9SDariusz Sosnowski  *
22711c73de9SDariusz Sosnowski  * @return
22811c73de9SDariusz Sosnowski  *   True if the probed port is a physical device or representor in MPESW setup.
22911c73de9SDariusz Sosnowski  *   False otherwise or MPESW was not configured.
23011c73de9SDariusz Sosnowski  */
23111c73de9SDariusz Sosnowski static inline bool
23211c73de9SDariusz Sosnowski mlx5_is_probed_port_on_mpesw_device(struct mlx5_dev_spawn_data *spawn)
23311c73de9SDariusz Sosnowski {
23411c73de9SDariusz Sosnowski 	return spawn->mpesw_port >= 0;
23511c73de9SDariusz Sosnowski }
23611c73de9SDariusz Sosnowski 
237bd0a9315SHaifei Luo /** Data associated with socket messages. */
238*e7750639SAndre Muezerie struct __rte_packed_begin mlx5_flow_dump_req  {
239bd0a9315SHaifei Luo 	uint32_t port_id; /**< There are plans in DPDK to extend port_id. */
240bd0a9315SHaifei Luo 	uint64_t flow_id;
241*e7750639SAndre Muezerie } __rte_packed_end;
242bd0a9315SHaifei Luo 
243bd0a9315SHaifei Luo struct mlx5_flow_dump_ack {
244bd0a9315SHaifei Luo 	int rc; /**< Return code. */
245bd0a9315SHaifei Luo };
246bd0a9315SHaifei Luo 
2476e88bc42SOphir Munk LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
248974f1e7eSYongseok Koh 
2497be600c8SYongseok Koh /* Shared data between primary and secondary processes. */
250974f1e7eSYongseok Koh struct mlx5_shared_data {
2517be600c8SYongseok Koh 	rte_spinlock_t lock;
2527be600c8SYongseok Koh 	/* Global spinlock for primary and secondary processes. */
2537be600c8SYongseok Koh 	int init_done; /* Whether primary has done initialization. */
2547be600c8SYongseok Koh 	unsigned int secondary_cnt; /* Number of secondary processes init'd. */
255974f1e7eSYongseok Koh };
256974f1e7eSYongseok Koh 
2577be600c8SYongseok Koh /* Per-process data structure, not visible to other processes. */
2587be600c8SYongseok Koh struct mlx5_local_data {
2597be600c8SYongseok Koh 	int init_done; /* Whether a secondary has done initialization. */
2607be600c8SYongseok Koh };
2617be600c8SYongseok Koh 
262974f1e7eSYongseok Koh extern struct mlx5_shared_data *mlx5_shared_data;
2632eb4d010SOphir Munk 
2642eb4d010SOphir Munk /* Dev ops structs */
265b012b4ceSOphir Munk extern const struct eth_dev_ops mlx5_dev_ops;
266b012b4ceSOphir Munk extern const struct eth_dev_ops mlx5_dev_sec_ops;
267b012b4ceSOphir Munk extern const struct eth_dev_ops mlx5_dev_ops_isolate;
268974f1e7eSYongseok Koh 
2691a611fdaSShahaf Shuler struct mlx5_counter_ctrl {
2701a611fdaSShahaf Shuler 	/* Name of the counter. */
2711a611fdaSShahaf Shuler 	char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
2721a611fdaSShahaf Shuler 	/* Name of the counter on the device table. */
2731a611fdaSShahaf Shuler 	char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
27473bf9235SOphir Munk 	uint32_t dev:1; /**< Nonzero for dev counters. */
2751a611fdaSShahaf Shuler };
2761a611fdaSShahaf Shuler 
277a4193ae3SShahaf Shuler struct mlx5_xstats_ctrl {
278a4193ae3SShahaf Shuler 	/* Number of device stats. */
279a4193ae3SShahaf Shuler 	uint16_t stats_n;
280a687c3e6SBing Zhao 	/* Number of device stats, for the 2nd port in bond. */
281a687c3e6SBing Zhao 	uint16_t stats_n_2nd;
2821a611fdaSShahaf Shuler 	/* Number of device stats identified by PMD. */
2831a611fdaSShahaf Shuler 	uint16_t mlx5_stats_n;
2841be61fe1SBing Zhao 	/* First device counters index. */
2851be61fe1SBing Zhao 	uint16_t dev_cnt_start;
286a4193ae3SShahaf Shuler 	/* Index in the device counters table. */
287a4193ae3SShahaf Shuler 	uint16_t dev_table_idx[MLX5_MAX_XSTATS];
288a687c3e6SBing Zhao 	/* Index in the output table. */
289a687c3e6SBing Zhao 	uint16_t xstats_o_idx[MLX5_MAX_XSTATS];
290a4193ae3SShahaf Shuler 	uint64_t base[MLX5_MAX_XSTATS];
291c5193a0bSJiawei Wang 	uint64_t xstats[MLX5_MAX_XSTATS];
292c5193a0bSJiawei Wang 	uint64_t hw_stats[MLX5_MAX_XSTATS];
2931a611fdaSShahaf Shuler 	struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS];
294a687c3e6SBing Zhao 	/* Index in the device counters table, for the 2nd port in bond. */
295a687c3e6SBing Zhao 	uint16_t dev_table_idx_2nd[MLX5_MAX_XSTATS];
296a687c3e6SBing Zhao 	/* Index in the output table, for the 2nd port in bond. */
297a687c3e6SBing Zhao 	uint16_t xstats_o_idx_2nd[MLX5_MAX_XSTATS];
298a4193ae3SShahaf Shuler };
299a4193ae3SShahaf Shuler 
3001be61fe1SBing Zhao /* xstats array size. */
3011be61fe1SBing Zhao extern const unsigned int xstats_n;
3021be61fe1SBing Zhao 
303ce9494d7STom Barbette struct mlx5_stats_ctrl {
304ce9494d7STom Barbette 	/* Base for imissed counter. */
305ce9494d7STom Barbette 	uint64_t imissed_base;
306c5193a0bSJiawei Wang 	uint64_t imissed;
307ce9494d7STom Barbette };
308ce9494d7STom Barbette 
3093d491dd6SDekel Peled /* Maximal size of coalesced segment for LRO is set in chunks of 256 Bytes. */
3103d491dd6SDekel Peled #define MLX5_LRO_SEG_CHUNK_SIZE	256u
3113d491dd6SDekel Peled 
3121c7e57f9SDekel Peled /* Maximal size of aggregated LRO packet. */
3133d491dd6SDekel Peled #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
3141c7e57f9SDekel Peled 
3159f209b59SViacheslav Ovsiienko /* Maximal number of segments to split. */
3169f209b59SViacheslav Ovsiienko #define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
3179f209b59SViacheslav Ovsiienko 
3187fe24446SShahaf Shuler /*
31945a6df80SMichael Baum  * Port configuration structure.
32045a6df80SMichael Baum  * User device parameters disabled features.
32145a6df80SMichael Baum  * This structure contains all configurations coming from devargs which
32245a6df80SMichael Baum  * oriented to port. When probing again, devargs doesn't have to be compatible
32345a6df80SMichael Baum  * with primary devargs. It is updated for each port in spawn function.
3247fe24446SShahaf Shuler  */
32545a6df80SMichael Baum struct mlx5_port_config {
32638b4b397SViacheslav Ovsiienko 	unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
3277fe24446SShahaf Shuler 	unsigned int hw_padding:1; /* End alignment padding is supported. */
3287fe24446SShahaf Shuler 	unsigned int cqe_comp:1; /* CQE compression is enabled. */
32999532fb1SAlexander Kozyrev 	unsigned int enh_cqe_comp:1; /* Enhanced CQE compression is enabled. */
33054c2d46bSAlexander Kozyrev 	unsigned int cqe_comp_fmt:3; /* CQE compression format. */
3317fe24446SShahaf Shuler 	unsigned int rx_vec_en:1; /* Rx vector is enabled. */
332febcac7bSBing Zhao 	unsigned int std_delay_drop:1; /* Enable standard Rxq delay drop. */
333febcac7bSBing Zhao 	unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
3347d6bf6b8SYongseok Koh 	struct {
3357d6bf6b8SYongseok Koh 		unsigned int enabled:1; /* Whether MPRQ is enabled. */
3360947ed38SMichael Baum 		unsigned int log_stride_num; /* Log number of strides. */
3370947ed38SMichael Baum 		unsigned int log_stride_size; /* Log size of a stride. */
3387d6bf6b8SYongseok Koh 		unsigned int max_memcpy_len;
3397d6bf6b8SYongseok Koh 		/* Maximum packet size to memcpy Rx packets. */
3407d6bf6b8SYongseok Koh 		unsigned int min_rxqs_num;
3417d6bf6b8SYongseok Koh 		/* Rx queue count threshold to enable MPRQ. */
3427d6bf6b8SYongseok Koh 	} mprq; /* Configurations for Multi-Packet RQ. */
343f9de8718SShahaf Shuler 	int mps; /* Multi-packet send supported mode. */
344066cfecdSMatan Azrad 	unsigned int max_dump_files_num; /* Maximum dump files per queue. */
3451ad9a3d0SBing Zhao 	unsigned int log_hp_size; /* Single hairpin queue data size in total. */
34687af0d1eSMichael Baum 	unsigned int lro_timeout; /* LRO user configuration. */
3477fe24446SShahaf Shuler 	int txqs_inline; /* Queue number threshold for inlining. */
348505f1fe4SViacheslav Ovsiienko 	int txq_inline_min; /* Minimal amount of data bytes to inline. */
349505f1fe4SViacheslav Ovsiienko 	int txq_inline_max; /* Max packet size for inlining with SEND. */
350505f1fe4SViacheslav Ovsiienko 	int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
351a13ec19cSMichael Baum };
352a13ec19cSMichael Baum 
353a13ec19cSMichael Baum /*
354a13ec19cSMichael Baum  * Share context device configuration structure.
355a13ec19cSMichael Baum  * User device parameters disabled features.
356a13ec19cSMichael Baum  * This structure updated once for device in mlx5_alloc_shared_dev_ctx()
357a13ec19cSMichael Baum  * function and cannot change even when probing again.
358a13ec19cSMichael Baum  */
359a13ec19cSMichael Baum struct mlx5_sh_config {
3608f848f32SViacheslav Ovsiienko 	int tx_pp; /* Timestamp scheduling granularity in nanoseconds. */
3618f848f32SViacheslav Ovsiienko 	int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
362a13ec19cSMichael Baum 	uint32_t reclaim_mode:2; /* Memory reclaim mode. */
363a13ec19cSMichael Baum 	uint32_t dv_esw_en:1; /* Enable E-Switch DV flow. */
364d84c3cf7SSuanming Mou 	/* Enable DV flow. 1 means SW steering, 2 means HW steering. */
365ddb68e47SBing Zhao 	uint32_t dv_flow_en:2; /* Enable DV flow. */
366ddb68e47SBing Zhao 	uint32_t dv_xmeta_en:3; /* Enable extensive flow metadata. */
367a13ec19cSMichael Baum 	uint32_t dv_miss_info:1; /* Restore packet after partial hw miss. */
368a13ec19cSMichael Baum 	uint32_t l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
369a13ec19cSMichael Baum 	uint32_t vf_nl_en:1; /* Enable Netlink requests in VF mode. */
370a13ec19cSMichael Baum 	uint32_t lacp_by_user:1; /* Enable user to manage LACP traffic. */
371a13ec19cSMichael Baum 	uint32_t decap_en:1; /* Whether decap will be used or not. */
372a13ec19cSMichael Baum 	uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
373a13ec19cSMichael Baum 	uint32_t allow_duplicate_pattern:1;
374593f913aSMichael Baum 	uint32_t lro_allowed:1; /* Whether LRO is allowed. */
3754d368e1dSXiaoyu Min 	struct {
3764d368e1dSXiaoyu Min 		uint16_t service_core;
3774d368e1dSXiaoyu Min 		uint32_t cycle_time; /* query cycle time in milli-second. */
3784d368e1dSXiaoyu Min 	} cnt_svc; /* configure for HW steering's counter's service. */
379a13ec19cSMichael Baum 	/* Allow/Prevent the duplicate rules pattern. */
3801939eb6fSDariusz Sosnowski 	uint32_t fdb_def_rule:1; /* Create FDB default jump rule */
381483181f7SDariusz Sosnowski 	uint32_t repr_matching:1; /* Enable implicit vport matching in HWS FDB. */
3827fe24446SShahaf Shuler };
3837fe24446SShahaf Shuler 
384dfedf3e3SViacheslav Ovsiienko /* Structure for VF VLAN workaround. */
385dfedf3e3SViacheslav Ovsiienko struct mlx5_vf_vlan {
386dfedf3e3SViacheslav Ovsiienko 	uint32_t tag:12;
387dfedf3e3SViacheslav Ovsiienko 	uint32_t created:1;
388dfedf3e3SViacheslav Ovsiienko };
389dfedf3e3SViacheslav Ovsiienko 
39078be8852SNelio Laranjeiro /* Flow drop context necessary due to Verbs API. */
39178be8852SNelio Laranjeiro struct mlx5_drop {
39278be8852SNelio Laranjeiro 	struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
3935ceb3a02SXueming Li 	struct mlx5_rxq_priv *rxq; /* Rx queue. */
39478be8852SNelio Laranjeiro };
39578be8852SNelio Laranjeiro 
39623233fd6SBing Zhao /* Loopback dummy queue resources required due to Verbs API. */
39723233fd6SBing Zhao struct mlx5_lb_ctx {
39823233fd6SBing Zhao 	struct ibv_qp *qp; /* QP object. */
39923233fd6SBing Zhao 	void *ibv_cq; /* Completion queue. */
400e12a0166STyler Retzlaff 	RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
40123233fd6SBing Zhao };
40223233fd6SBing Zhao 
4031944fbc3SSuanming Mou /* External queue descriptor. */
4041944fbc3SSuanming Mou struct mlx5_external_q {
4051944fbc3SSuanming Mou 	uint32_t hw_id; /* Queue index in the Hardware. */
4061944fbc3SSuanming Mou 	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
4071944fbc3SSuanming Mou };
4081944fbc3SSuanming Mou 
409b401400dSSuanming Mou /* HW steering queue job descriptor type. */
410ed0c7474SGregory Etelson enum mlx5_hw_job_type {
411b401400dSSuanming Mou 	MLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */
412b401400dSSuanming Mou 	MLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */
413ed0c7474SGregory Etelson 	MLX5_HW_Q_JOB_TYPE_UPDATE, /* Flow update job type. */
414ed0c7474SGregory Etelson 	MLX5_HW_Q_JOB_TYPE_QUERY, /* Flow query job type. */
41515896eafSGregory Etelson 	MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, /* Flow update and query job type. */
416654ebd8cSGregory Etelson 	MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE, /* Non-optimized flow create job type. */
417654ebd8cSGregory Etelson 	MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY, /* Non-optimized destroy create job type. */
418654ebd8cSGregory Etelson 	MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE, /* Move flow after table resize. */
419b401400dSSuanming Mou };
420b401400dSSuanming Mou 
42159155721SGregory Etelson enum mlx5_hw_indirect_type {
42259155721SGregory Etelson 	MLX5_HW_INDIRECT_TYPE_LEGACY,
42359155721SGregory Etelson 	MLX5_HW_INDIRECT_TYPE_LIST
42459155721SGregory Etelson };
42559155721SGregory Etelson 
4261939eb6fSDariusz Sosnowski #define MLX5_HW_MAX_ITEMS (16)
4271939eb6fSDariusz Sosnowski 
428d1357665SDariusz Sosnowski #define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
429d1357665SDariusz Sosnowski #define MLX5_PUSH_MAX_LEN 128
430d1357665SDariusz Sosnowski #define MLX5_ENCAP_MAX_LEN 132
431d1357665SDariusz Sosnowski 
432d1357665SDariusz Sosnowski /** Container for flow action data constructed during flow rule creation. */
433d1357665SDariusz Sosnowski struct mlx5_flow_hw_action_params {
434d1357665SDariusz Sosnowski 	/** Array of constructed modify header commands. */
435d1357665SDariusz Sosnowski 	struct mlx5_modification_cmd mhdr_cmd[MLX5_MHDR_MAX_CMD];
436d1357665SDariusz Sosnowski 	/** Constructed encap/decap data buffer. */
437d1357665SDariusz Sosnowski 	uint8_t encap_data[MLX5_ENCAP_MAX_LEN];
438d1357665SDariusz Sosnowski 	/** Constructed IPv6 routing data buffer. */
439d1357665SDariusz Sosnowski 	uint8_t ipv6_push_data[MLX5_PUSH_MAX_LEN];
440d1357665SDariusz Sosnowski };
441d1357665SDariusz Sosnowski 
442d1357665SDariusz Sosnowski /** Container for dynamically generated flow items used during flow rule creation. */
443d1357665SDariusz Sosnowski struct mlx5_flow_hw_pattern_params {
444d1357665SDariusz Sosnowski 	/** Array of dynamically generated flow items. */
445d1357665SDariusz Sosnowski 	struct rte_flow_item items[MLX5_HW_MAX_ITEMS];
446d1357665SDariusz Sosnowski 	/** Temporary REPRESENTED_PORT item generated by PMD. */
447d1357665SDariusz Sosnowski 	struct rte_flow_item_ethdev port_spec;
448d1357665SDariusz Sosnowski 	/** Temporary TAG item generated by PMD. */
449d1357665SDariusz Sosnowski 	struct rte_flow_item_tag tag_spec;
450d1357665SDariusz Sosnowski };
451d1357665SDariusz Sosnowski 
452b401400dSSuanming Mou /* HW steering flow management job descriptor. */
453b401400dSSuanming Mou struct mlx5_hw_q_job {
454b401400dSSuanming Mou 	uint32_t type; /* Job type. */
45559155721SGregory Etelson 	uint32_t indirect_type;
456478ba4bbSSuanming Mou 	const void *action; /* Indirect action attached to the job. */
457b401400dSSuanming Mou 	void *user_data; /* Job user data. */
458478ba4bbSSuanming Mou 	struct {
459d065ecc4SGregory Etelson 		/* User memory for query output */
460d065ecc4SGregory Etelson 		void *user;
461d065ecc4SGregory Etelson 		/* Data extracted from hardware */
462d065ecc4SGregory Etelson 		void *hw;
46357fd15faSDariusz Sosnowski 	} query;
464b401400dSSuanming Mou };
465b401400dSSuanming Mou 
466b401400dSSuanming Mou /* HW steering job descriptor LIFO pool. */
46727595cd8STyler Retzlaff struct __rte_cache_aligned mlx5_hw_q {
468b401400dSSuanming Mou 	uint32_t job_idx; /* Free job index. */
4697cfb022bSDariusz Sosnowski 	uint32_t size; /* Job LIFO queue size. */
4707cfb022bSDariusz Sosnowski 	uint32_t ongoing_flow_ops; /* Number of ongoing flow operations. */
471b401400dSSuanming Mou 	struct mlx5_hw_q_job **job; /* LIFO header. */
472478ba4bbSSuanming Mou 	struct rte_ring *indir_cq; /* Indirect action SW completion queue. */
473478ba4bbSSuanming Mou 	struct rte_ring *indir_iq; /* Indirect action SW in progress queue. */
474654ebd8cSGregory Etelson 	struct rte_ring *flow_transfer_pending;
475654ebd8cSGregory Etelson 	struct rte_ring *flow_transfer_completed;
476d1357665SDariusz Sosnowski 	/* Action's ARGUMENT resource buffer for rule creation. */
477d1357665SDariusz Sosnowski 	struct mlx5_flow_hw_action_params ap;
478d1357665SDariusz Sosnowski 	/* Holds spec value for any implicitly added item. */
479d1357665SDariusz Sosnowski 	struct mlx5_flow_hw_pattern_params pp;
48027595cd8STyler Retzlaff };
481b401400dSSuanming Mou 
48224865366SAlexander Kozyrev 
483a94e89e4SMichael Baum #define MLX5_COUNTER_POOLS_MAX_NUM (1 << 15)
4845382d28cSMatan Azrad #define MLX5_COUNTERS_PER_POOL 512
485f15db67dSMatan Azrad #define MLX5_MAX_PENDING_QUERIES 4
486a94e89e4SMichael Baum #define MLX5_CNT_MR_ALLOC_BULK 64
487df051a3eSSuanming Mou #define MLX5_CNT_SHARED_OFFSET 0x80000000
488df051a3eSSuanming Mou #define IS_BATCH_CNT(cnt) (((cnt) & (MLX5_CNT_SHARED_OFFSET - 1)) >= \
489df051a3eSSuanming Mou 			   MLX5_CNT_BATCH_OFFSET)
490cfbdc3f9SSuanming Mou #define MLX5_CNT_SIZE (sizeof(struct mlx5_flow_counter))
491cfbdc3f9SSuanming Mou #define MLX5_AGE_SIZE (sizeof(struct mlx5_age_param))
492994829e6SSuanming Mou 
4938d93c830SDong Zhou #define MLX5_CNT_LEN(pool) \
494cfbdc3f9SSuanming Mou 	(MLX5_CNT_SIZE + \
4952b5b1aebSSuanming Mou 	((pool)->is_aged ? MLX5_AGE_SIZE : 0))
4968d93c830SDong Zhou #define MLX5_POOL_GET_CNT(pool, index) \
4978d93c830SDong Zhou 	((struct mlx5_flow_counter *) \
4988d93c830SDong Zhou 	((uint8_t *)((pool) + 1) + (index) * (MLX5_CNT_LEN(pool))))
4998d93c830SDong Zhou #define MLX5_CNT_ARRAY_IDX(pool, cnt) \
5008d93c830SDong Zhou 	((int)(((uint8_t *)(cnt) - (uint8_t *)((pool) + 1)) / \
5018d93c830SDong Zhou 	MLX5_CNT_LEN(pool)))
5022f5122dfSViacheslav Ovsiienko #define MLX5_TS_MASK_SECS 8ull
5032f5122dfSViacheslav Ovsiienko /* timestamp wrapping in seconds, must be  power of 2. */
5042f5122dfSViacheslav Ovsiienko 
505c3d3b140SSuanming Mou /*
506c3d3b140SSuanming Mou  * The pool index and offset of counter in the pool array makes up the
507c3d3b140SSuanming Mou  * counter index. In case the counter is from pool 0 and offset 0, it
508c3d3b140SSuanming Mou  * should plus 1 to avoid index 0, since 0 means invalid counter index
509c3d3b140SSuanming Mou  * currently.
510c3d3b140SSuanming Mou  */
511c3d3b140SSuanming Mou #define MLX5_MAKE_CNT_IDX(pi, offset) \
512c3d3b140SSuanming Mou 	((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1)
513fa2d01c8SDong Zhou #define MLX5_CNT_TO_AGE(cnt) \
514fa2d01c8SDong Zhou 	((struct mlx5_age_param *)((cnt) + 1))
515b1cc2266SSuanming Mou /*
516b1cc2266SSuanming Mou  * The maximum single counter is 0x800000 as MLX5_CNT_BATCH_OFFSET
517b1cc2266SSuanming Mou  * defines. The pool size is 512, pool index should never reach
518b1cc2266SSuanming Mou  * INT16_MAX.
519b1cc2266SSuanming Mou  */
520b1cc2266SSuanming Mou #define POOL_IDX_INVALID UINT16_MAX
5215382d28cSMatan Azrad 
522d5a7d04cSDekel Peled /* Age status. */
523fa2d01c8SDong Zhou enum {
524fa2d01c8SDong Zhou 	AGE_FREE, /* Initialized state. */
525fa2d01c8SDong Zhou 	AGE_CANDIDATE, /* Counter assigned to flows. */
526fa2d01c8SDong Zhou 	AGE_TMOUT, /* Timeout, wait for rte_flow_get_aged_flows and destroy. */
527fa2d01c8SDong Zhou };
528fa2d01c8SDong Zhou 
5296b7c717eSSuanming Mou enum mlx5_counter_type {
5306b7c717eSSuanming Mou 	MLX5_COUNTER_TYPE_ORIGIN,
5316b7c717eSSuanming Mou 	MLX5_COUNTER_TYPE_AGE,
5326b7c717eSSuanming Mou 	MLX5_COUNTER_TYPE_MAX,
5336b7c717eSSuanming Mou };
5346b7c717eSSuanming Mou 
535fa2d01c8SDong Zhou /* Counter age parameter. */
536fa2d01c8SDong Zhou struct mlx5_age_param {
537e12a0166STyler Retzlaff 	RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
538fa2d01c8SDong Zhou 	uint16_t port_id; /**< Port id of the counter. */
539d5a7d04cSDekel Peled 	uint32_t timeout:24; /**< Aging timeout in seconds. */
540e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) sec_since_last_hit;
541d5a7d04cSDekel Peled 	/**< Time in seconds since last hit (atomically accessed). */
542fa2d01c8SDong Zhou 	void *context; /**< Flow counter age context. */
543fa2d01c8SDong Zhou };
544fa2d01c8SDong Zhou 
5455382d28cSMatan Azrad struct flow_counter_stats {
5465382d28cSMatan Azrad 	uint64_t hits;
5475382d28cSMatan Azrad 	uint64_t bytes;
5485382d28cSMatan Azrad };
5495382d28cSMatan Azrad 
550df051a3eSSuanming Mou /* Shared counters information for counters. */
551df051a3eSSuanming Mou struct mlx5_flow_counter_shared {
552f3191849SMichael Baum 	union {
553e12a0166STyler Retzlaff 		RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
554f3191849SMichael Baum 		uint32_t id; /* User counter ID for legacy sharing. */
555f3191849SMichael Baum 	};
556df051a3eSSuanming Mou };
557df051a3eSSuanming Mou 
558df051a3eSSuanming Mou struct mlx5_flow_counter_pool;
559826b8a87SSuanming Mou /* Generic counters information. */
5605382d28cSMatan Azrad struct mlx5_flow_counter {
561df051a3eSSuanming Mou 	union {
562df051a3eSSuanming Mou 		/*
563df051a3eSSuanming Mou 		 * User-defined counter shared info is only used during
564df051a3eSSuanming Mou 		 * counter active time. And aging counter sharing is not
565df051a3eSSuanming Mou 		 * supported, so active shared counter will not be chained
566df051a3eSSuanming Mou 		 * to the aging list. For shared counter, only when it is
567df051a3eSSuanming Mou 		 * released, the TAILQ entry memory will be used, at that
568df051a3eSSuanming Mou 		 * time, shared memory is not used anymore.
5692b5b1aebSSuanming Mou 		 *
5702b5b1aebSSuanming Mou 		 * Similarly to none-batch counter dcs, since it doesn't
5712b5b1aebSSuanming Mou 		 * support aging, while counter is allocated, the entry
5722b5b1aebSSuanming Mou 		 * memory is not used anymore. In this case, as bytes
5732b5b1aebSSuanming Mou 		 * memory is used only when counter is allocated, and
5742b5b1aebSSuanming Mou 		 * entry memory is used only when counter is free. The
5752b5b1aebSSuanming Mou 		 * dcs pointer can be saved to these two different place
5762b5b1aebSSuanming Mou 		 * at different stage. It will eliminate the individual
5772b5b1aebSSuanming Mou 		 * counter extend struct.
578df051a3eSSuanming Mou 		 */
5795382d28cSMatan Azrad 		TAILQ_ENTRY(mlx5_flow_counter) next;
5805382d28cSMatan Azrad 		/**< Pointer to the next flow counter structure. */
5812b5b1aebSSuanming Mou 		struct {
582df051a3eSSuanming Mou 			struct mlx5_flow_counter_shared shared_info;
583df051a3eSSuanming Mou 			/**< Shared counter information. */
5842b5b1aebSSuanming Mou 			void *dcs_when_active;
5852b5b1aebSSuanming Mou 			/*
5862b5b1aebSSuanming Mou 			 * For non-batch mode, the dcs will be saved
5872b5b1aebSSuanming Mou 			 * here when the counter is free.
5882b5b1aebSSuanming Mou 			 */
5892b5b1aebSSuanming Mou 		};
590df051a3eSSuanming Mou 	};
591f15db67dSMatan Azrad 	union {
5925382d28cSMatan Azrad 		uint64_t hits; /**< Reset value of hits packets. */
593ac79183dSSuanming Mou 		struct mlx5_flow_counter_pool *pool; /**< Counter pool. */
594f15db67dSMatan Azrad 	};
595df051a3eSSuanming Mou 	union {
5962b5b1aebSSuanming Mou 		uint64_t bytes; /**< Reset value of bytes. */
5972b5b1aebSSuanming Mou 		void *dcs_when_free;
5982b5b1aebSSuanming Mou 		/*
5992b5b1aebSSuanming Mou 		 * For non-batch mode, the dcs will be saved here
6002b5b1aebSSuanming Mou 		 * when the counter is free.
6012b5b1aebSSuanming Mou 		 */
602826b8a87SSuanming Mou 	};
6032b5b1aebSSuanming Mou 	void *action; /**< Pointer to the dv action. */
604826b8a87SSuanming Mou };
605826b8a87SSuanming Mou 
6065382d28cSMatan Azrad TAILQ_HEAD(mlx5_counters, mlx5_flow_counter);
6075382d28cSMatan Azrad 
608826b8a87SSuanming Mou /* Generic counter pool structure - query is in pool resolution. */
6095382d28cSMatan Azrad struct mlx5_flow_counter_pool {
6105382d28cSMatan Azrad 	TAILQ_ENTRY(mlx5_flow_counter_pool) next;
611ac79183dSSuanming Mou 	struct mlx5_counters counters[2]; /* Free counter list. */
6125382d28cSMatan Azrad 	struct mlx5_devx_obj *min_dcs;
613f15db67dSMatan Azrad 	/* The devx object of the minimum counter ID. */
614d5a7d04cSDekel Peled 	uint64_t time_of_last_age_check;
615d5a7d04cSDekel Peled 	/* System time (from rte_rdtsc()) read in the last aging check. */
6162b5b1aebSSuanming Mou 	uint32_t index:30; /* Pool index in container. */
6172b5b1aebSSuanming Mou 	uint32_t is_aged:1; /* Pool with aging counter. */
618ac79183dSSuanming Mou 	volatile uint32_t query_gen:1; /* Query round. */
619f15db67dSMatan Azrad 	rte_spinlock_t sl; /* The pool lock. */
6203aa27915SSuanming Mou 	rte_spinlock_t csl; /* The pool counter free list lock. */
621f15db67dSMatan Azrad 	struct mlx5_counter_stats_raw *raw;
6223aa27915SSuanming Mou 	struct mlx5_counter_stats_raw *raw_hw;
6233aa27915SSuanming Mou 	/* The raw on HW working. */
6245382d28cSMatan Azrad };
6255382d28cSMatan Azrad 
6265382d28cSMatan Azrad /* Memory management structure for group of counter statistics raws. */
6275382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng {
6285382d28cSMatan Azrad 	LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
6295382d28cSMatan Azrad 	struct mlx5_counter_stats_raw *raws;
6308451e165SMichael Baum 	struct mlx5_pmd_wrapped_mr wm;
6315382d28cSMatan Azrad };
6325382d28cSMatan Azrad 
6335382d28cSMatan Azrad /* Raw memory structure for the counter statistics values of a pool. */
6345382d28cSMatan Azrad struct mlx5_counter_stats_raw {
6355382d28cSMatan Azrad 	LIST_ENTRY(mlx5_counter_stats_raw) next;
6365382d28cSMatan Azrad 	struct mlx5_counter_stats_mem_mng *mem_mng;
6375382d28cSMatan Azrad 	volatile struct flow_counter_stats *data;
6385382d28cSMatan Azrad };
6395382d28cSMatan Azrad 
6405382d28cSMatan Azrad TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
6415382d28cSMatan Azrad 
642994829e6SSuanming Mou /* Counter global management structure. */
643994829e6SSuanming Mou struct mlx5_flow_counter_mng {
644e12a0166STyler Retzlaff 	volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
645b1cc2266SSuanming Mou 	uint16_t last_pool_idx; /* Last used pool index */
646b1cc2266SSuanming Mou 	int min_id; /* The minimum counter ID in the pools. */
647b1cc2266SSuanming Mou 	int max_id; /* The maximum counter ID in the pools. */
6483aa27915SSuanming Mou 	rte_spinlock_t pool_update_sl; /* The pool update lock. */
649994829e6SSuanming Mou 	rte_spinlock_t csl[MLX5_COUNTER_TYPE_MAX];
650994829e6SSuanming Mou 	/* The counter free list lock. */
6516b7c717eSSuanming Mou 	struct mlx5_counters counters[MLX5_COUNTER_TYPE_MAX];
6526b7c717eSSuanming Mou 	/* Free counter list. */
6535382d28cSMatan Azrad 	struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
6545af61440SMatan Azrad 	struct mlx5_counter_stats_mem_mng *mem_mng;
6555382d28cSMatan Azrad 	/* Hold the memory management for the next allocated pools raws. */
6565382d28cSMatan Azrad 	struct mlx5_counters flow_counters; /* Legacy flow counter list. */
657f15db67dSMatan Azrad 	uint8_t pending_queries;
658f15db67dSMatan Azrad 	uint16_t pool_index;
659f15db67dSMatan Azrad 	uint8_t query_thread_on;
6602b5b1aebSSuanming Mou 	bool counter_fallback; /* Use counter fallback management. */
6615382d28cSMatan Azrad 	LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
662f15db67dSMatan Azrad 	LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
6635382d28cSMatan Azrad };
6645af61440SMatan Azrad 
665f935ed4bSDekel Peled /* ASO structures. */
666f935ed4bSDekel Peled #define MLX5_ASO_QUEUE_LOG_DESC 10
667f935ed4bSDekel Peled 
668f935ed4bSDekel Peled struct mlx5_aso_cq {
669f935ed4bSDekel Peled 	uint16_t log_desc_n;
670f935ed4bSDekel Peled 	uint32_t cq_ci:24;
671c7d41d98SMichael Baum 	struct mlx5_devx_cq cq_obj;
672f935ed4bSDekel Peled 	uint64_t errors;
673f935ed4bSDekel Peled };
674f935ed4bSDekel Peled 
675f935ed4bSDekel Peled struct mlx5_aso_sq_elem {
67629efa63aSLi Zhang 	union {
67729efa63aSLi Zhang 		struct {
678f935ed4bSDekel Peled 			struct mlx5_aso_age_pool *pool;
679f935ed4bSDekel Peled 			uint16_t burst_size;
680f935ed4bSDekel Peled 		};
68129efa63aSLi Zhang 		struct mlx5_aso_mtr *mtr;
682cf756556SBing Zhao 		struct {
683ebaf1b31SBing Zhao 			struct mlx5_aso_ct_action *ct;
684cf756556SBing Zhao 			char *query_data;
685cf756556SBing Zhao 		};
686478ba4bbSSuanming Mou 		void *user_data;
68715896eafSGregory Etelson 		struct mlx5_quota *quota_obj;
68829efa63aSLi Zhang 	};
68929efa63aSLi Zhang };
690f935ed4bSDekel Peled 
691f935ed4bSDekel Peled struct mlx5_aso_sq {
692f935ed4bSDekel Peled 	uint16_t log_desc_n;
693cfd2037cSLi Zhang 	rte_spinlock_t sqsl;
694f935ed4bSDekel Peled 	struct mlx5_aso_cq cq;
695389ab7f5SMichael Baum 	struct mlx5_devx_sq sq_obj;
696cd414f81SMichael Baum 	struct mlx5_pmd_mr mr;
697478ba4bbSSuanming Mou 	volatile struct mlx5_aso_wqe *db;
698f935ed4bSDekel Peled 	uint16_t pi;
699478ba4bbSSuanming Mou 	uint16_t db_pi;
700105d2149SDekel Peled 	uint32_t head;
701105d2149SDekel Peled 	uint32_t tail;
702f935ed4bSDekel Peled 	uint32_t sqn;
703f935ed4bSDekel Peled 	struct mlx5_aso_sq_elem elts[1 << MLX5_ASO_QUEUE_LOG_DESC];
704f935ed4bSDekel Peled 	uint16_t next; /* Pool index of the next pool to query. */
705f935ed4bSDekel Peled };
706f935ed4bSDekel Peled 
707f935ed4bSDekel Peled struct mlx5_aso_age_action {
708f935ed4bSDekel Peled 	LIST_ENTRY(mlx5_aso_age_action) next;
709f935ed4bSDekel Peled 	void *dr_action;
710e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) refcnt;
711f935ed4bSDekel Peled 	/* Following fields relevant only when action is active. */
712f935ed4bSDekel Peled 	uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
713f935ed4bSDekel Peled 	struct mlx5_age_param age_params;
714f935ed4bSDekel Peled };
715f935ed4bSDekel Peled 
716f935ed4bSDekel Peled #define MLX5_ASO_AGE_ACTIONS_PER_POOL 512
717a94e89e4SMichael Baum #define MLX5_ASO_AGE_CONTAINER_RESIZE 64
718f935ed4bSDekel Peled 
719f935ed4bSDekel Peled struct mlx5_aso_age_pool {
720f935ed4bSDekel Peled 	struct mlx5_devx_obj *flow_hit_aso_obj;
721f935ed4bSDekel Peled 	uint16_t index; /* Pool index in pools array. */
722f935ed4bSDekel Peled 	uint64_t time_of_last_age_check; /* In seconds. */
723f935ed4bSDekel Peled 	struct mlx5_aso_age_action actions[MLX5_ASO_AGE_ACTIONS_PER_POOL];
724f935ed4bSDekel Peled };
725f935ed4bSDekel Peled 
726f935ed4bSDekel Peled LIST_HEAD(aso_age_list, mlx5_aso_age_action);
727f935ed4bSDekel Peled 
728f935ed4bSDekel Peled struct mlx5_aso_age_mng {
729f935ed4bSDekel Peled 	struct mlx5_aso_age_pool **pools;
730f935ed4bSDekel Peled 	uint16_t n; /* Total number of pools. */
731f935ed4bSDekel Peled 	uint16_t next; /* Number of pools in use, index of next free pool. */
7327cf2d15aSJiawei Wang 	rte_rwlock_t resize_rwl; /* Lock for resize objects. */
733f935ed4bSDekel Peled 	rte_spinlock_t free_sl; /* Lock for free list access. */
734f935ed4bSDekel Peled 	struct aso_age_list free; /* Free age actions list - ready to use. */
735f935ed4bSDekel Peled 	struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
736f935ed4bSDekel Peled };
737f935ed4bSDekel Peled 
738f15f0c38SShiri Kuzin /* Management structure for geneve tlv option */
739f15f0c38SShiri Kuzin struct mlx5_geneve_tlv_option_resource {
740f15f0c38SShiri Kuzin 	struct mlx5_devx_obj *obj; /* Pointer to the geneve tlv opt object. */
741f15f0c38SShiri Kuzin 	rte_be16_t option_class; /* geneve tlv opt class.*/
742f15f0c38SShiri Kuzin 	uint8_t option_type; /* geneve tlv opt type.*/
743f15f0c38SShiri Kuzin 	uint8_t length; /* geneve tlv opt length. */
744e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
745f15f0c38SShiri Kuzin };
746f15f0c38SShiri Kuzin 
747f15f0c38SShiri Kuzin 
748fa2d01c8SDong Zhou #define MLX5_AGE_EVENT_NEW		1
749fa2d01c8SDong Zhou #define MLX5_AGE_TRIGGER		2
750fa2d01c8SDong Zhou #define MLX5_AGE_SET(age_info, BIT) \
751fa2d01c8SDong Zhou 	((age_info)->flags |= (1 << (BIT)))
752447d4d79SMichael Baum #define MLX5_AGE_UNSET(age_info, BIT) \
753447d4d79SMichael Baum 	((age_info)->flags &= ~(1 << (BIT)))
754fa2d01c8SDong Zhou #define MLX5_AGE_GET(age_info, BIT) \
755fa2d01c8SDong Zhou 	((age_info)->flags & (1 << (BIT)))
756fa2d01c8SDong Zhou #define GET_PORT_AGE_INFO(priv) \
75791389890SOphir Munk 	(&((priv)->sh->port[(priv)->dev_port - 1].age_info))
758d5a7d04cSDekel Peled /* Current time in seconds. */
759d5a7d04cSDekel Peled #define MLX5_CURR_TIME_SEC	(rte_rdtsc() / rte_get_tsc_hz())
7605382d28cSMatan Azrad 
76104a4de75SMichael Baum /*
76204a4de75SMichael Baum  * HW steering queue oriented AGE info.
76304a4de75SMichael Baum  * It contains an array of rings, one for each HWS queue.
76404a4de75SMichael Baum  */
76504a4de75SMichael Baum struct mlx5_hws_q_age_info {
76604a4de75SMichael Baum 	uint16_t nb_rings; /* Number of aged-out ring lists. */
76704a4de75SMichael Baum 	struct rte_ring *aged_lists[]; /* Aged-out lists. */
76804a4de75SMichael Baum };
76904a4de75SMichael Baum 
77004a4de75SMichael Baum /*
77104a4de75SMichael Baum  * HW steering AGE info.
77204a4de75SMichael Baum  * It has a ring list containing all aged out flow rules.
77304a4de75SMichael Baum  */
77404a4de75SMichael Baum struct mlx5_hws_age_info {
77504a4de75SMichael Baum 	struct rte_ring *aged_list; /* Aged out lists. */
77604a4de75SMichael Baum };
77704a4de75SMichael Baum 
778fa2d01c8SDong Zhou /* Aging information for per port. */
779fa2d01c8SDong Zhou struct mlx5_age_info {
780d5a7d04cSDekel Peled 	uint8_t flags; /* Indicate if is new event or need to be triggered. */
78104a4de75SMichael Baum 	union {
78204a4de75SMichael Baum 		/* SW/FW steering AGE info. */
78304a4de75SMichael Baum 		struct {
78404a4de75SMichael Baum 			struct mlx5_counters aged_counters;
78504a4de75SMichael Baum 			/* Aged counter list. */
78604a4de75SMichael Baum 			struct aso_age_list aged_aso;
78704a4de75SMichael Baum 			/* Aged ASO actions list. */
788f935ed4bSDekel Peled 			rte_spinlock_t aged_sl; /* Aged flow list lock. */
789fa2d01c8SDong Zhou 		};
79004a4de75SMichael Baum 		struct {
79104a4de75SMichael Baum 			struct mlx5_indexed_pool *ages_ipool;
79204a4de75SMichael Baum 			union {
79304a4de75SMichael Baum 				struct mlx5_hws_age_info hw_age;
79404a4de75SMichael Baum 				/* HW steering AGE info. */
79504a4de75SMichael Baum 				struct mlx5_hws_q_age_info *hw_q_age;
79604a4de75SMichael Baum 				/* HW steering queue oriented AGE info. */
79704a4de75SMichael Baum 			};
79804a4de75SMichael Baum 		};
79904a4de75SMichael Baum 	};
80004a4de75SMichael Baum };
8015af61440SMatan Azrad 
80217e19bc4SViacheslav Ovsiienko /* Per port data of shared IB device. */
80391389890SOphir Munk struct mlx5_dev_shared_port {
80417e19bc4SViacheslav Ovsiienko 	uint32_t ih_port_id;
80523242063SMatan Azrad 	uint32_t devx_ih_port_id;
80617f95513SDmitry Kozlyuk 	uint32_t nl_ih_port_id;
80717e19bc4SViacheslav Ovsiienko 	/*
80817e19bc4SViacheslav Ovsiienko 	 * Interrupt handler port_id. Used by shared interrupt
80917e19bc4SViacheslav Ovsiienko 	 * handler to find the corresponding rte_eth device
81017e19bc4SViacheslav Ovsiienko 	 * by IB port index. If value is equal or greater
81117e19bc4SViacheslav Ovsiienko 	 * RTE_MAX_ETHPORTS it means there is no subhandler
81217e19bc4SViacheslav Ovsiienko 	 * installed for specified IB port index.
81317e19bc4SViacheslav Ovsiienko 	 */
814fa2d01c8SDong Zhou 	struct mlx5_age_info age_info;
815fa2d01c8SDong Zhou 	/* Aging information for per port. */
81617e19bc4SViacheslav Ovsiienko };
81717e19bc4SViacheslav Ovsiienko 
818afb4aa4fSLi Zhang /*
819afb4aa4fSLi Zhang  * Max number of actions per DV flow.
820afb4aa4fSLi Zhang  * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
821afb4aa4fSLi Zhang  * in rdma-core file providers/mlx5/verbs.c.
822afb4aa4fSLi Zhang  */
823afb4aa4fSLi Zhang #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
824afb4aa4fSLi Zhang 
825e6100c7bSLi Zhang /* ASO flow meter structures */
826e6100c7bSLi Zhang /* Modify this value if enum rte_mtr_color changes. */
827e6100c7bSLi Zhang #define RTE_MTR_DROPPED RTE_COLORS
828363db9b0SBing Zhao /* Yellow is now supported. */
829363db9b0SBing Zhao #define MLX5_MTR_RTE_COLORS (RTE_COLOR_YELLOW + 1)
830afb4aa4fSLi Zhang /* table_id 22 bits in mlx5_flow_tbl_key so limit policy number. */
831afb4aa4fSLi Zhang #define MLX5_MAX_SUB_POLICY_TBL_NUM 0x3FFFFF
832afb4aa4fSLi Zhang #define MLX5_INVALID_POLICY_ID UINT32_MAX
833afb4aa4fSLi Zhang /* Suffix table_id on MLX5_FLOW_TABLE_LEVEL_METER. */
834afb4aa4fSLi Zhang #define MLX5_MTR_TABLE_ID_SUFFIX 1
835afb4aa4fSLi Zhang /* Drop table_id on MLX5_FLOW_TABLE_LEVEL_METER. */
836afb4aa4fSLi Zhang #define MLX5_MTR_TABLE_ID_DROP 2
837363db9b0SBing Zhao /* Priority of the meter policy matcher. */
838363db9b0SBing Zhao #define MLX5_MTR_POLICY_MATCHER_PRIO 0
8390888c011SRongwei Liu /* Green & yellow color valid for now. */
8400888c011SRongwei Liu #define MLX5_MTR_POLICY_MODE_ALL 0
8414b7bf3ffSBing Zhao /* Default policy. */
8424b7bf3ffSBing Zhao #define MLX5_MTR_POLICY_MODE_DEF 1
8434b7bf3ffSBing Zhao /* Only green color valid. */
8444b7bf3ffSBing Zhao #define MLX5_MTR_POLICY_MODE_OG 2
8454b7bf3ffSBing Zhao /* Only yellow color valid. */
8464b7bf3ffSBing Zhao #define MLX5_MTR_POLICY_MODE_OY 3
847afb4aa4fSLi Zhang 
848bae983b8SMaayan Kashani /* Max number of meters. */
849bae983b8SMaayan Kashani #define MLX5_MTR_MAX(priv) (mlx5_flow_mtr_max_get(priv))
850e1c83d29SMaayan Kashani /* Max number of meters allocated in non template mode. */
851bae983b8SMaayan Kashani #define MLX5_MTR_NT_MAX(priv) (MLX5_MTR_MAX(priv) >> 1)
852bae983b8SMaayan Kashani /* Max number of connection tracking. */
853bae983b8SMaayan Kashani #define MLX5_CT_MAX(priv) (1 << (priv)->sh->cdev->config.hca_attr.log_max_conn_track_offload)
854bae983b8SMaayan Kashani /* Max number of connection tracking allocated in non template mode. */
855bae983b8SMaayan Kashani #define MLX5_CT_NT_MAX(priv) (MLX5_CT_MAX(priv) >> 1)
856bae983b8SMaayan Kashani /* Max number of counters. */
857bae983b8SMaayan Kashani #define MLX5_CNT_MAX(priv) ((priv)->sh->hws_max_nb_counters)
858bae983b8SMaayan Kashani /* Max number of counters allocated in non template mode. */
859bae983b8SMaayan Kashani #define MLX5_CNT_NT_MAX(priv) (MLX5_CNT_MAX(priv) >> 1)
860e1c83d29SMaayan Kashani 
861afb4aa4fSLi Zhang enum mlx5_meter_domain {
862afb4aa4fSLi Zhang 	MLX5_MTR_DOMAIN_INGRESS,
863afb4aa4fSLi Zhang 	MLX5_MTR_DOMAIN_EGRESS,
864afb4aa4fSLi Zhang 	MLX5_MTR_DOMAIN_TRANSFER,
865afb4aa4fSLi Zhang 	MLX5_MTR_DOMAIN_MAX,
866afb4aa4fSLi Zhang };
867afb4aa4fSLi Zhang #define MLX5_MTR_DOMAIN_INGRESS_BIT  (1 << MLX5_MTR_DOMAIN_INGRESS)
868afb4aa4fSLi Zhang #define MLX5_MTR_DOMAIN_EGRESS_BIT   (1 << MLX5_MTR_DOMAIN_EGRESS)
869afb4aa4fSLi Zhang #define MLX5_MTR_DOMAIN_TRANSFER_BIT (1 << MLX5_MTR_DOMAIN_TRANSFER)
870afb4aa4fSLi Zhang #define MLX5_MTR_ALL_DOMAIN_BIT      (MLX5_MTR_DOMAIN_INGRESS_BIT | \
871afb4aa4fSLi Zhang 					MLX5_MTR_DOMAIN_EGRESS_BIT | \
872afb4aa4fSLi Zhang 					MLX5_MTR_DOMAIN_TRANSFER_BIT)
873afb4aa4fSLi Zhang 
8748e5c9feaSShun Hao /* The color tag rule structure. */
8758e5c9feaSShun Hao struct mlx5_sub_policy_color_rule {
8768e5c9feaSShun Hao 	void *rule;
8778e5c9feaSShun Hao 	/* The color rule. */
8788e5c9feaSShun Hao 	struct mlx5_flow_dv_matcher *matcher;
8798e5c9feaSShun Hao 	/* The color matcher. */
8808e5c9feaSShun Hao 	TAILQ_ENTRY(mlx5_sub_policy_color_rule) next_port;
8818e5c9feaSShun Hao 	/**< Pointer to the next color rule structure. */
8828e5c9feaSShun Hao 	int32_t src_port;
8838e5c9feaSShun Hao 	/* On which src port this rule applied. */
8848e5c9feaSShun Hao };
8858e5c9feaSShun Hao 
8868e5c9feaSShun Hao TAILQ_HEAD(mlx5_sub_policy_color_rules, mlx5_sub_policy_color_rule);
8878e5c9feaSShun Hao 
888afb4aa4fSLi Zhang /*
889afb4aa4fSLi Zhang  * Meter sub-policy structure.
890afb4aa4fSLi Zhang  * Each RSS TIR in meter policy need its own sub-policy resource.
891afb4aa4fSLi Zhang  */
892afb4aa4fSLi Zhang struct mlx5_flow_meter_sub_policy {
893afb4aa4fSLi Zhang 	uint32_t main_policy_id:1;
894afb4aa4fSLi Zhang 	/* Main policy id is same as this sub_policy id. */
895afb4aa4fSLi Zhang 	uint32_t idx:31;
896afb4aa4fSLi Zhang 	/* Index to sub_policy ipool entity. */
897afb4aa4fSLi Zhang 	void *main_policy;
898afb4aa4fSLi Zhang 	/* Point to struct mlx5_flow_meter_policy. */
899afb4aa4fSLi Zhang 	struct mlx5_flow_tbl_resource *tbl_rsc;
900afb4aa4fSLi Zhang 	/* The sub-policy table resource. */
901afb4aa4fSLi Zhang 	uint32_t rix_hrxq[MLX5_MTR_RTE_COLORS];
902afb4aa4fSLi Zhang 	/* Index to TIR resource. */
903afb4aa4fSLi Zhang 	struct mlx5_flow_tbl_resource *jump_tbl[MLX5_MTR_RTE_COLORS];
904afb4aa4fSLi Zhang 	/* Meter jump/drop table. */
9058e5c9feaSShun Hao 	struct mlx5_sub_policy_color_rules color_rules[RTE_COLORS];
9068e5c9feaSShun Hao 	/* List for the color rules. */
907afb4aa4fSLi Zhang };
908afb4aa4fSLi Zhang 
909afb4aa4fSLi Zhang struct mlx5_meter_policy_acts {
910afb4aa4fSLi Zhang 	uint8_t actions_n;
911afb4aa4fSLi Zhang 	/* Number of actions. */
912afb4aa4fSLi Zhang 	void *dv_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
913afb4aa4fSLi Zhang 	/* Action list. */
914afb4aa4fSLi Zhang };
915afb4aa4fSLi Zhang 
916afb4aa4fSLi Zhang struct mlx5_meter_policy_action_container {
917afb4aa4fSLi Zhang 	uint32_t rix_mark;
918afb4aa4fSLi Zhang 	/* Index to the mark action. */
919afb4aa4fSLi Zhang 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
920afb4aa4fSLi Zhang 	/* Pointer to modify header resource in cache. */
921afb4aa4fSLi Zhang 	uint8_t fate_action;
922afb4aa4fSLi Zhang 	/* Fate action type. */
923afb4aa4fSLi Zhang 	union {
924afb4aa4fSLi Zhang 		struct rte_flow_action *rss;
925afb4aa4fSLi Zhang 		/* Rss action configuration. */
926afb4aa4fSLi Zhang 		uint32_t rix_port_id_action;
927afb4aa4fSLi Zhang 		/* Index to port ID action resource. */
928afb4aa4fSLi Zhang 		void *dr_jump_action[MLX5_MTR_DOMAIN_MAX];
929afb4aa4fSLi Zhang 		/* Jump/drop action per color. */
930ec962badSLi Zhang 		uint16_t queue;
931ec962badSLi Zhang 		/* Queue action configuration. */
93250cc92ddSShun Hao 		struct {
93350cc92ddSShun Hao 			uint32_t next_mtr_id;
93450cc92ddSShun Hao 			/* The next meter id. */
93550cc92ddSShun Hao 			void *next_sub_policy;
93650cc92ddSShun Hao 			/* Next meter's sub-policy. */
93750cc92ddSShun Hao 		};
938afb4aa4fSLi Zhang 	};
939afb4aa4fSLi Zhang };
940afb4aa4fSLi Zhang 
941afb4aa4fSLi Zhang /* Flow meter policy parameter structure. */
942afb4aa4fSLi Zhang struct mlx5_flow_meter_policy {
943afb4aa4fSLi Zhang 	uint32_t is_rss:1;
944afb4aa4fSLi Zhang 	/* Is RSS policy table. */
945afb4aa4fSLi Zhang 	uint32_t ingress:1;
946afb4aa4fSLi Zhang 	/* Rule applies to ingress domain. */
947afb4aa4fSLi Zhang 	uint32_t egress:1;
948afb4aa4fSLi Zhang 	/* Rule applies to egress domain. */
949afb4aa4fSLi Zhang 	uint32_t transfer:1;
950afb4aa4fSLi Zhang 	/* Rule applies to transfer domain. */
951ec962badSLi Zhang 	uint32_t is_queue:1;
952ec962badSLi Zhang 	/* Is queue action in policy table. */
95350cc92ddSShun Hao 	uint32_t is_hierarchy:1;
95450cc92ddSShun Hao 	/* Is meter action in policy table. */
9558330a5fbSShun Hao 	uint32_t match_port:1;
9568330a5fbSShun Hao 	/* If policy flows match src port. */
9578330a5fbSShun Hao 	uint32_t hierarchy_match_port:1;
9588330a5fbSShun Hao 	/* Is any meter in hierarchy contains policy flow that matches src port. */
95924865366SAlexander Kozyrev 	uint32_t skip_r:1;
96024865366SAlexander Kozyrev 	/* If red color policy is skipped. */
9614d648fadSBing Zhao 	uint32_t skip_y:1;
9624d648fadSBing Zhao 	/* If yellow color policy is skipped. */
9634d648fadSBing Zhao 	uint32_t skip_g:1;
9644d648fadSBing Zhao 	/* If green color policy is skipped. */
9659267617bSShun Hao 	uint32_t mark:1;
9669267617bSShun Hao 	/* If policy contains mark action. */
96724865366SAlexander Kozyrev 	uint32_t initialized:1;
96824865366SAlexander Kozyrev 	/* Initialized. */
96924865366SAlexander Kozyrev 	uint16_t group;
97024865366SAlexander Kozyrev 	/* The group. */
971afb4aa4fSLi Zhang 	rte_spinlock_t sl;
972e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) ref_cnt;
973afb4aa4fSLi Zhang 	/* Use count. */
97424865366SAlexander Kozyrev 	struct rte_flow_pattern_template *hws_item_templ;
97524865366SAlexander Kozyrev 	/* Hardware steering item templates. */
97624865366SAlexander Kozyrev 	struct rte_flow_actions_template *hws_act_templ[MLX5_MTR_DOMAIN_MAX];
97724865366SAlexander Kozyrev 	/* Hardware steering action templates. */
97824865366SAlexander Kozyrev 	struct rte_flow_template_table *hws_flow_table[MLX5_MTR_DOMAIN_MAX];
97924865366SAlexander Kozyrev 	/* Hardware steering tables. */
98024865366SAlexander Kozyrev 	struct rte_flow *hws_flow_rule[MLX5_MTR_DOMAIN_MAX][RTE_COLORS];
98124865366SAlexander Kozyrev 	/* Hardware steering rules. */
982afb4aa4fSLi Zhang 	struct mlx5_meter_policy_action_container act_cnt[MLX5_MTR_RTE_COLORS];
983afb4aa4fSLi Zhang 	/* Policy actions container. */
984afb4aa4fSLi Zhang 	void *dr_drop_action[MLX5_MTR_DOMAIN_MAX];
985afb4aa4fSLi Zhang 	/* drop action for red color. */
986afb4aa4fSLi Zhang 	uint16_t sub_policy_num;
987afb4aa4fSLi Zhang 	/* Count sub policy tables, 3 bits per domain. */
988afb4aa4fSLi Zhang 	struct mlx5_flow_meter_sub_policy **sub_policys[MLX5_MTR_DOMAIN_MAX];
989afb4aa4fSLi Zhang 	/* Sub policy table array must be the end of struct. */
990afb4aa4fSLi Zhang };
991afb4aa4fSLi Zhang 
992afb4aa4fSLi Zhang /* The maximum sub policy is relate to struct mlx5_rss_hash_fields[]. */
993afb4aa4fSLi Zhang #define MLX5_MTR_RSS_MAX_SUB_POLICY 7
994afb4aa4fSLi Zhang #define MLX5_MTR_SUB_POLICY_NUM_SHIFT  3
995afb4aa4fSLi Zhang #define MLX5_MTR_SUB_POLICY_NUM_MASK  0x7
996afb4aa4fSLi Zhang #define MLX5_MTRS_DEFAULT_RULE_PRIORITY 0xFFFF
99750cc92ddSShun Hao #define MLX5_MTR_CHAIN_MAX_NUM 8
998afb4aa4fSLi Zhang 
999afb4aa4fSLi Zhang /* Flow meter default policy parameter structure.
1000afb4aa4fSLi Zhang  * Policy index 0 is reserved by default policy table.
1001afb4aa4fSLi Zhang  * Action per color as below:
1002afb4aa4fSLi Zhang  * green - do nothing, yellow - do nothing, red - drop
1003afb4aa4fSLi Zhang  */
1004afb4aa4fSLi Zhang struct mlx5_flow_meter_def_policy {
1005afb4aa4fSLi Zhang 	struct mlx5_flow_meter_sub_policy sub_policy;
1006afb4aa4fSLi Zhang 	/* Policy rules jump to other tables. */
1007afb4aa4fSLi Zhang 	void *dr_jump_action[RTE_COLORS];
1008afb4aa4fSLi Zhang 	/* Jump action per color. */
1009afb4aa4fSLi Zhang };
1010e6100c7bSLi Zhang 
1011e6100c7bSLi Zhang /* Meter parameter structure. */
1012e6100c7bSLi Zhang struct mlx5_flow_meter_info {
101344432018SLi Zhang 	uint32_t meter_id;
101444432018SLi Zhang 	/**< Meter id. */
101544432018SLi Zhang 	uint32_t policy_id;
101644432018SLi Zhang 	/* Policy id, the first sub_policy idx. */
1017e6100c7bSLi Zhang 	struct mlx5_flow_meter_profile *profile;
1018e6100c7bSLi Zhang 	/**< Meter profile parameters. */
1019e6100c7bSLi Zhang 	rte_spinlock_t sl; /**< Meter action spinlock. */
1020e6100c7bSLi Zhang 	/** Set of stats counters to be enabled.
1021e6100c7bSLi Zhang 	 * @see enum rte_mtr_stats_type
1022e6100c7bSLi Zhang 	 */
1023e6100c7bSLi Zhang 	uint32_t bytes_dropped:1;
1024e6100c7bSLi Zhang 	/** Set bytes dropped stats to be enabled. */
1025e6100c7bSLi Zhang 	uint32_t pkts_dropped:1;
1026e6100c7bSLi Zhang 	/** Set packets dropped stats to be enabled. */
1027e6100c7bSLi Zhang 	uint32_t active_state:1;
1028e6100c7bSLi Zhang 	/**< Meter hw active state. */
1029e6100c7bSLi Zhang 	uint32_t shared:1;
1030e6100c7bSLi Zhang 	/**< Meter shared or not. */
1031e6100c7bSLi Zhang 	uint32_t is_enable:1;
1032e6100c7bSLi Zhang 	/**< Meter disable/enable state. */
1033e6100c7bSLi Zhang 	uint32_t ingress:1;
1034e6100c7bSLi Zhang 	/**< Rule applies to egress traffic. */
1035e6100c7bSLi Zhang 	uint32_t egress:1;
1036e6100c7bSLi Zhang 	/**
1037e6100c7bSLi Zhang 	 * Instead of simply matching the properties of traffic as it would
1038e6100c7bSLi Zhang 	 * appear on a given DPDK port ID, enabling this attribute transfers
1039e6100c7bSLi Zhang 	 * a flow rule to the lowest possible level of any device endpoints
1040e6100c7bSLi Zhang 	 * found in the pattern.
1041e6100c7bSLi Zhang 	 *
1042e6100c7bSLi Zhang 	 * When supported, this effectively enables an application to
1043e6100c7bSLi Zhang 	 * re-route traffic not necessarily intended for it (e.g. coming
1044e6100c7bSLi Zhang 	 * from or addressed to different physical ports, VFs or
1045e6100c7bSLi Zhang 	 * applications) at the device level.
1046e6100c7bSLi Zhang 	 *
1047e6100c7bSLi Zhang 	 * It complements the behavior of some pattern items such as
10485e3779b7SIvan Malov 	 * RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT and is meaningless without them.
1049e6100c7bSLi Zhang 	 *
1050e6100c7bSLi Zhang 	 * When transferring flow rules, ingress and egress attributes keep
1051e6100c7bSLi Zhang 	 * their original meaning, as if processing traffic emitted or
1052e6100c7bSLi Zhang 	 * received by the application.
1053e6100c7bSLi Zhang 	 */
1054e6100c7bSLi Zhang 	uint32_t transfer:1;
105544432018SLi Zhang 	uint32_t def_policy:1;
105624865366SAlexander Kozyrev 	uint32_t initialized:1;
105744432018SLi Zhang 	/* Meter points to default policy. */
10586b838de3SShun Hao 	uint32_t color_aware:1;
10596b838de3SShun Hao 	/* Meter is color aware mode. */
106044432018SLi Zhang 	void *drop_rule[MLX5_MTR_DOMAIN_MAX];
106144432018SLi Zhang 	/* Meter drop rule in drop table. */
10625f0d54f3SLi Zhang 	uint32_t drop_cnt;
10635f0d54f3SLi Zhang 	/**< Color counter for drop. */
1064e6100c7bSLi Zhang 	uint32_t ref_cnt;
1065e6100c7bSLi Zhang 	/**< Use count. */
1066e6100c7bSLi Zhang 	struct mlx5_indexed_pool *flow_ipool;
1067e6100c7bSLi Zhang 	/**< Index pool for flow id. */
1068bf62fb76SShun Hao 	void *meter_action_g;
1069c99b4f8bSLi Zhang 	/**< Flow meter action. */
1070bf62fb76SShun Hao 	void *meter_action_y;
1071bf62fb76SShun Hao 	/**< Flow meter action for yellow init_color. */
107224865366SAlexander Kozyrev 	uint32_t meter_offset;
107324865366SAlexander Kozyrev 	/**< Flow meter offset. */
107424865366SAlexander Kozyrev 	uint16_t group;
107524865366SAlexander Kozyrev 	/**< Flow meter group. */
1076e6100c7bSLi Zhang };
1077e6100c7bSLi Zhang 
107844432018SLi Zhang /* PPS(packets per second) map to BPS(Bytes per second).
107944432018SLi Zhang  * HW treat packet as 128bytes in PPS mode
108044432018SLi Zhang  */
108144432018SLi Zhang #define MLX5_MTRS_PPS_MAP_BPS_SHIFT 7
108244432018SLi Zhang 
1083e6100c7bSLi Zhang /* RFC2697 parameter structure. */
1084e6100c7bSLi Zhang struct mlx5_flow_meter_srtcm_rfc2697_prm {
1085e6100c7bSLi Zhang 	rte_be32_t cbs_cir;
1086e6100c7bSLi Zhang 	/*
1087e6100c7bSLi Zhang 	 * bit 24-28: cbs_exponent, bit 16-23 cbs_mantissa,
1088e6100c7bSLi Zhang 	 * bit 8-12: cir_exponent, bit 0-7 cir_mantissa.
1089e6100c7bSLi Zhang 	 */
1090e6100c7bSLi Zhang 	rte_be32_t ebs_eir;
1091e6100c7bSLi Zhang 	/*
1092e6100c7bSLi Zhang 	 * bit 24-28: ebs_exponent, bit 16-23 ebs_mantissa,
1093e6100c7bSLi Zhang 	 * bit 8-12: eir_exponent, bit 0-7 eir_mantissa.
1094e6100c7bSLi Zhang 	 */
1095e6100c7bSLi Zhang };
1096e6100c7bSLi Zhang 
1097e6100c7bSLi Zhang /* Flow meter profile structure. */
1098e6100c7bSLi Zhang struct mlx5_flow_meter_profile {
1099e6100c7bSLi Zhang 	TAILQ_ENTRY(mlx5_flow_meter_profile) next;
1100e6100c7bSLi Zhang 	/**< Pointer to the next flow meter structure. */
1101e6100c7bSLi Zhang 	uint32_t id; /**< Profile id. */
1102e6100c7bSLi Zhang 	struct rte_mtr_meter_profile profile; /**< Profile detail. */
1103e6100c7bSLi Zhang 	union {
1104e6100c7bSLi Zhang 		struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
1105e6100c7bSLi Zhang 		/**< srtcm_rfc2697 struct. */
1106e6100c7bSLi Zhang 	};
1107e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
11084d648fadSBing Zhao 	uint32_t g_support:1; /**< If G color will be generated. */
11094d648fadSBing Zhao 	uint32_t y_support:1; /**< If Y color will be generated. */
111024865366SAlexander Kozyrev 	uint32_t initialized:1; /**< Initialized. */
1111e6100c7bSLi Zhang };
1112e6100c7bSLi Zhang 
1113e6100c7bSLi Zhang /* 2 meters in each ASO cache line */
1114e6100c7bSLi Zhang #define MLX5_MTRS_CONTAINER_RESIZE 64
1115e6100c7bSLi Zhang /*
1116e6100c7bSLi Zhang  * The pool index and offset of meter in the pool array makes up the
1117e6100c7bSLi Zhang  * meter index. In case the meter is from pool 0 and offset 0, it
1118e6100c7bSLi Zhang  * should plus 1 to avoid index 0, since 0 means invalid meter index
1119e6100c7bSLi Zhang  * currently.
1120e6100c7bSLi Zhang  */
1121e6100c7bSLi Zhang #define MLX5_MAKE_MTR_IDX(pi, offset) \
1122e6100c7bSLi Zhang 		((pi) * MLX5_ASO_MTRS_PER_POOL + (offset) + 1)
1123e6100c7bSLi Zhang 
1124e6100c7bSLi Zhang /*aso flow meter state*/
1125e6100c7bSLi Zhang enum mlx5_aso_mtr_state {
1126e6100c7bSLi Zhang 	ASO_METER_FREE, /* In free list. */
1127e6100c7bSLi Zhang 	ASO_METER_WAIT, /* ACCESS_ASO WQE in progress. */
1128478ba4bbSSuanming Mou 	ASO_METER_WAIT_ASYNC, /* CQE will be handled by async pull. */
1129e6100c7bSLi Zhang 	ASO_METER_READY, /* CQE received. */
1130e6100c7bSLi Zhang };
1131e6100c7bSLi Zhang 
113224865366SAlexander Kozyrev /*aso flow meter type*/
113324865366SAlexander Kozyrev enum mlx5_aso_mtr_type {
113424865366SAlexander Kozyrev 	ASO_METER_INDIRECT,
113524865366SAlexander Kozyrev 	ASO_METER_DIRECT,
113624865366SAlexander Kozyrev };
113724865366SAlexander Kozyrev 
1138e6100c7bSLi Zhang /* Generic aso_flow_meter information. */
1139e6100c7bSLi Zhang struct mlx5_aso_mtr {
114048fbb0e9SAlexander Kozyrev 	union {
1141e6100c7bSLi Zhang 		LIST_ENTRY(mlx5_aso_mtr) next;
114248fbb0e9SAlexander Kozyrev 		struct mlx5_aso_mtr_pool *pool;
114348fbb0e9SAlexander Kozyrev 	};
114424865366SAlexander Kozyrev 	enum mlx5_aso_mtr_type type;
1145e6100c7bSLi Zhang 	struct mlx5_flow_meter_info fm;
1146e6100c7bSLi Zhang 	/**< Pointer to the next aso flow meter structure. */
1147e12a0166STyler Retzlaff 	RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
114824865366SAlexander Kozyrev 	uint32_t offset;
114948fbb0e9SAlexander Kozyrev 	enum rte_color init_color;
1150e6100c7bSLi Zhang };
1151e6100c7bSLi Zhang 
1152e6100c7bSLi Zhang /* Generic aso_flow_meter pool structure. */
1153e6100c7bSLi Zhang struct mlx5_aso_mtr_pool {
1154e6100c7bSLi Zhang 	struct mlx5_aso_mtr mtrs[MLX5_ASO_MTRS_PER_POOL];
1155e6100c7bSLi Zhang 	/*Must be the first in pool*/
1156e6100c7bSLi Zhang 	struct mlx5_devx_obj *devx_obj;
1157e6100c7bSLi Zhang 	/* The devx object of the minimum aso flow meter ID. */
115848fbb0e9SAlexander Kozyrev 	struct mlx5dr_action *action; /* HWS action. */
115948fbb0e9SAlexander Kozyrev 	struct mlx5_indexed_pool *idx_pool; /* HWS index pool. */
1160e6100c7bSLi Zhang 	uint32_t index; /* Pool index in management structure. */
116148fbb0e9SAlexander Kozyrev 	uint32_t nb_sq; /* Number of ASO SQ. */
116248fbb0e9SAlexander Kozyrev 	struct mlx5_aso_sq *sq; /* ASO SQs. */
1163e6100c7bSLi Zhang };
1164e6100c7bSLi Zhang 
1165e6100c7bSLi Zhang LIST_HEAD(aso_meter_list, mlx5_aso_mtr);
1166e6100c7bSLi Zhang /* Pools management structure for ASO flow meter pools. */
1167e6100c7bSLi Zhang struct mlx5_aso_mtr_pools_mng {
1168e6100c7bSLi Zhang 	volatile uint16_t n_valid; /* Number of valid pools. */
1169e6100c7bSLi Zhang 	uint16_t n; /* Number of pools. */
1170e6100c7bSLi Zhang 	rte_spinlock_t mtrsl; /* The ASO flow meter free list lock. */
11717797b0feSJiawei Wang 	rte_rwlock_t resize_mtrwl; /* Lock for resize objects. */
1172e6100c7bSLi Zhang 	struct aso_meter_list meters; /* Free ASO flow meter list. */
1173e6100c7bSLi Zhang 	struct mlx5_aso_sq sq; /*SQ using by ASO flow meter. */
1174e6100c7bSLi Zhang 	struct mlx5_aso_mtr_pool **pools; /* ASO flow meter pool array. */
1175e6100c7bSLi Zhang };
1176e6100c7bSLi Zhang 
117724865366SAlexander Kozyrev /* Bulk management structure for ASO flow meter. */
117824865366SAlexander Kozyrev struct mlx5_mtr_bulk {
117924865366SAlexander Kozyrev 	uint32_t size; /* Number of ASO objects. */
118024865366SAlexander Kozyrev 	struct mlx5dr_action *action; /* HWS action */
118124865366SAlexander Kozyrev 	struct mlx5_devx_obj *devx_obj; /* DEVX object. */
118224865366SAlexander Kozyrev 	struct mlx5_aso_mtr *aso; /* Array of ASO objects. */
118324865366SAlexander Kozyrev };
118424865366SAlexander Kozyrev 
1185afb4aa4fSLi Zhang /* Meter management structure for global flow meter resource. */
1186afb4aa4fSLi Zhang struct mlx5_flow_mtr_mng {
1187afb4aa4fSLi Zhang 	struct mlx5_aso_mtr_pools_mng pools_mng;
1188afb4aa4fSLi Zhang 	/* Pools management structure for ASO flow meter pools. */
1189afb4aa4fSLi Zhang 	struct mlx5_flow_meter_def_policy *def_policy[MLX5_MTR_DOMAIN_MAX];
1190afb4aa4fSLi Zhang 	/* Default policy table. */
1191afb4aa4fSLi Zhang 	uint32_t def_policy_id;
1192afb4aa4fSLi Zhang 	/* Default policy id. */
1193e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
1194afb4aa4fSLi Zhang 	/** def_policy meter use count. */
1195afb4aa4fSLi Zhang 	struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
1196afb4aa4fSLi Zhang 	/* Meter drop table. */
119744432018SLi Zhang 	struct mlx5_flow_dv_matcher *
119844432018SLi Zhang 			drop_matcher[MLX5_MTR_DOMAIN_MAX][MLX5_REG_BITS];
1199afb4aa4fSLi Zhang 	/* Matcher meter in drop table. */
1200afb4aa4fSLi Zhang 	struct mlx5_flow_dv_matcher *def_matcher[MLX5_MTR_DOMAIN_MAX];
1201afb4aa4fSLi Zhang 	/* Default matcher in drop table. */
1202afb4aa4fSLi Zhang 	void *def_rule[MLX5_MTR_DOMAIN_MAX];
1203afb4aa4fSLi Zhang 	/* Default rule in drop table. */
120444432018SLi Zhang 	uint8_t max_mtr_bits;
120544432018SLi Zhang 	/* Indicate how many bits are used by meter id at the most. */
120644432018SLi Zhang 	uint8_t max_mtr_flow_bits;
120744432018SLi Zhang 	/* Indicate how many bits are used by meter flow id at the most. */
1208afb4aa4fSLi Zhang };
1209afb4aa4fSLi Zhang 
1210860897d2SBing Zhao /* Table key of the hash organization. */
1211860897d2SBing Zhao union mlx5_flow_tbl_key {
1212860897d2SBing Zhao 	struct {
1213860897d2SBing Zhao 		/* Table ID should be at the lowest address. */
12142d2cef5dSLi Zhang 		uint32_t level;	/**< Level of the table. */
12152d2cef5dSLi Zhang 		uint32_t id:22;	/**< ID of the table. */
12162d2cef5dSLi Zhang 		uint32_t dummy:1;	/**< Dummy table for DV API. */
12172d2cef5dSLi Zhang 		uint32_t is_fdb:1;	/**< 1 - FDB, 0 - NIC TX/RX. */
12182d2cef5dSLi Zhang 		uint32_t is_egress:1;	/**< 1 - egress, 0 - ingress. */
12192d2cef5dSLi Zhang 		uint32_t reserved:7;	/**< must be zero for comparison. */
1220860897d2SBing Zhao 	};
1221860897d2SBing Zhao 	uint64_t v64;			/**< full 64bits value of key */
1222860897d2SBing Zhao };
1223860897d2SBing Zhao 
122479e35d0dSViacheslav Ovsiienko /* Table structure. */
122579e35d0dSViacheslav Ovsiienko struct mlx5_flow_tbl_resource {
122679e35d0dSViacheslav Ovsiienko 	void *obj; /**< Pointer to DR table object. */
122779e35d0dSViacheslav Ovsiienko };
122879e35d0dSViacheslav Ovsiienko 
1229b67b4ecbSDekel Peled #define MLX5_MAX_TABLES UINT16_MAX
12303c84f34eSOri Kam #define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1)
12315e61bcddSViacheslav Ovsiienko /* Reserve the last two tables for metadata register copy. */
12325e61bcddSViacheslav Ovsiienko #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
1233dd3c774fSViacheslav Ovsiienko #define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
1234dd3c774fSViacheslav Ovsiienko /* Tables for metering splits should be added here. */
1235afb4aa4fSLi Zhang #define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 3)
1236afb4aa4fSLi Zhang #define MLX5_FLOW_TABLE_LEVEL_POLICY (MLX5_MAX_TABLES - 4)
1237afb4aa4fSLi Zhang #define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_POLICY
123824865366SAlexander Kozyrev #define MLX5_FLOW_TABLE_HWS_POLICY (MLX5_MAX_TABLES - 10)
1239b67b4ecbSDekel Peled #define MLX5_MAX_TABLES_FDB UINT16_MAX
1240ae67e3c4SGregory Etelson #define MLX5_FLOW_TABLE_PTYPE_RSS_NUM 1024
1241ae67e3c4SGregory Etelson #define MLX5_FLOW_TABLE_PTYPE_RSS_LAST (MLX5_MAX_TABLES - 11)
1242ae67e3c4SGregory Etelson #define MLX5_FLOW_TABLE_PTYPE_RSS_BASE \
1243ae67e3c4SGregory Etelson (1 + MLX5_FLOW_TABLE_PTYPE_RSS_LAST - MLX5_FLOW_TABLE_PTYPE_RSS_NUM)
1244b4c0ddbfSJiawei Wang #define MLX5_FLOW_TABLE_FACTOR 10
124579e35d0dSViacheslav Ovsiienko 
1246d85c7b5eSOri Kam /* ID generation structure. */
1247d85c7b5eSOri Kam struct mlx5_flow_id_pool {
1248d85c7b5eSOri Kam 	uint32_t *free_arr; /**< Pointer to the a array of free values. */
1249d85c7b5eSOri Kam 	uint32_t base_index;
1250d85c7b5eSOri Kam 	/**< The next index that can be used without any free elements. */
1251d85c7b5eSOri Kam 	uint32_t *curr; /**< Pointer to the index to pop. */
12527be78d02SJosh Soref 	uint32_t *last; /**< Pointer to the last element in the empty array. */
125330a3687dSSuanming Mou 	uint32_t max_id; /**< Maximum id can be allocated from the pool. */
1254d85c7b5eSOri Kam };
1255d85c7b5eSOri Kam 
1256d133f4cdSViacheslav Ovsiienko /* Tx pacing queue structure - for Clock and Rearm queues. */
1257d133f4cdSViacheslav Ovsiienko struct mlx5_txpp_wq {
1258d133f4cdSViacheslav Ovsiienko 	/* Completion Queue related data.*/
1259a7787bb0SMichael Baum 	struct mlx5_devx_cq cq_obj;
1260d133f4cdSViacheslav Ovsiienko 	uint32_t cq_ci:24;
1261d133f4cdSViacheslav Ovsiienko 	uint32_t arm_sn:2;
1262d133f4cdSViacheslav Ovsiienko 	/* Send Queue related data.*/
126371011bd5SMichael Baum 	struct mlx5_devx_sq sq_obj;
1264d133f4cdSViacheslav Ovsiienko 	uint16_t sq_size; /* Number of WQEs in the queue. */
1265d133f4cdSViacheslav Ovsiienko 	uint16_t sq_ci; /* Next WQE to execute. */
1266d133f4cdSViacheslav Ovsiienko };
1267d133f4cdSViacheslav Ovsiienko 
126877522be0SViacheslav Ovsiienko /* Tx packet pacing internal timestamp. */
126977522be0SViacheslav Ovsiienko struct mlx5_txpp_ts {
1270e12a0166STyler Retzlaff 	RTE_ATOMIC(uint64_t) ci_ts;
1271e12a0166STyler Retzlaff 	RTE_ATOMIC(uint64_t) ts;
127277522be0SViacheslav Ovsiienko };
127377522be0SViacheslav Ovsiienko 
1274d133f4cdSViacheslav Ovsiienko /* Tx packet pacing structure. */
1275d133f4cdSViacheslav Ovsiienko struct mlx5_dev_txpp {
1276d133f4cdSViacheslav Ovsiienko 	pthread_mutex_t mutex; /* Pacing create/destroy mutex. */
1277d133f4cdSViacheslav Ovsiienko 	uint32_t refcnt; /* Pacing reference counter. */
1278d133f4cdSViacheslav Ovsiienko 	uint32_t freq; /* Timestamp frequency, Hz. */
1279d133f4cdSViacheslav Ovsiienko 	uint32_t tick; /* Completion tick duration in nanoseconds. */
1280d133f4cdSViacheslav Ovsiienko 	uint32_t test; /* Packet pacing test mode. */
1281d133f4cdSViacheslav Ovsiienko 	int32_t skew; /* Scheduling skew. */
1282d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle; /* Periodic interrupt. */
12831f66ac5bSOphir Munk 	void *echan; /* Event Channel. */
1284d133f4cdSViacheslav Ovsiienko 	struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
1285551c94c8SViacheslav Ovsiienko 	struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */
12861f66ac5bSOphir Munk 	void *pp; /* Packet pacing context. */
1287aef1e20eSViacheslav Ovsiienko 	uint16_t pp_id; /* Packet pacing context index. */
128877522be0SViacheslav Ovsiienko 	uint16_t ts_n; /* Number of captured timestamps. */
12897be78d02SJosh Soref 	uint16_t ts_p; /* Pointer to statistics timestamp. */
129077522be0SViacheslav Ovsiienko 	struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */
129177522be0SViacheslav Ovsiienko 	struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
129277522be0SViacheslav Ovsiienko 	uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
129377522be0SViacheslav Ovsiienko 	/* Statistics counters. */
1294e12a0166STyler Retzlaff 	RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
1295e12a0166STyler Retzlaff 	RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
1296e12a0166STyler Retzlaff 	RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
1297e12a0166STyler Retzlaff 	RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
1298e12a0166STyler Retzlaff 	RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
1299e12a0166STyler Retzlaff 	RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
1300d133f4cdSViacheslav Ovsiienko };
1301d133f4cdSViacheslav Ovsiienko 
1302575740d1SViacheslav Ovsiienko /* Sample ID information of eCPRI flex parser structure. */
1303575740d1SViacheslav Ovsiienko struct mlx5_ecpri_parser_profile {
1304daa38a89SBing Zhao 	uint32_t num;		/* Actual number of samples. */
1305daa38a89SBing Zhao 	uint32_t ids[8];	/* Sample IDs for this profile. */
1306daa38a89SBing Zhao 	uint8_t offset[8];	/* Bytes offset of each parser. */
1307daa38a89SBing Zhao 	void *obj;		/* Flex parser node object. */
1308daa38a89SBing Zhao };
1309daa38a89SBing Zhao 
1310f5f4c482SXueming Li /* Max member ports per bonding device. */
1311f5f4c482SXueming Li #define MLX5_BOND_MAX_PORTS 2
1312f5f4c482SXueming Li 
1313f5f4c482SXueming Li /* Bonding device information. */
1314f5f4c482SXueming Li struct mlx5_bond_info {
1315f5f4c482SXueming Li 	int n_port; /* Number of bond member ports. */
1316f5f4c482SXueming Li 	uint32_t ifindex;
1317f5f4c482SXueming Li 	char ifname[MLX5_NAMESIZE + 1];
1318f5f4c482SXueming Li 	struct {
1319f5f4c482SXueming Li 		char ifname[MLX5_NAMESIZE + 1];
1320f5f4c482SXueming Li 		uint32_t ifindex;
1321f5f4c482SXueming Li 		struct rte_pci_addr pci_addr;
1322f5f4c482SXueming Li 	} ports[MLX5_BOND_MAX_PORTS];
1323f5f4c482SXueming Li };
1324f5f4c482SXueming Li 
1325ee9e5fadSBing Zhao /* Number of connection tracking objects per pool: must be a power of 2. */
1326ee9e5fadSBing Zhao #define MLX5_ASO_CT_ACTIONS_PER_POOL 64
1327ee9e5fadSBing Zhao 
13282db75e8bSBing Zhao /* Generate incremental and unique CT index from pool and offset. */
13292db75e8bSBing Zhao #define MLX5_MAKE_CT_IDX(pool, offset) \
13302db75e8bSBing Zhao 	((pool) * MLX5_ASO_CT_ACTIONS_PER_POOL + (offset) + 1)
13312db75e8bSBing Zhao 
1332ee9e5fadSBing Zhao /* ASO Conntrack state. */
1333ee9e5fadSBing Zhao enum mlx5_aso_ct_state {
1334ee9e5fadSBing Zhao 	ASO_CONNTRACK_FREE, /* Inactive, in the free list. */
1335ee9e5fadSBing Zhao 	ASO_CONNTRACK_WAIT, /* WQE sent in the SQ. */
1336478ba4bbSSuanming Mou 	ASO_CONNTRACK_WAIT_ASYNC, /* CQE will be handled by async pull. */
1337ee9e5fadSBing Zhao 	ASO_CONNTRACK_READY, /* CQE received w/o error. */
1338ee9e5fadSBing Zhao 	ASO_CONNTRACK_QUERY, /* WQE for query sent. */
1339ee9e5fadSBing Zhao 	ASO_CONNTRACK_MAX, /* Guard. */
1340ee9e5fadSBing Zhao };
1341ee9e5fadSBing Zhao 
1342ee9e5fadSBing Zhao /* Generic ASO connection tracking structure. */
1343ee9e5fadSBing Zhao struct mlx5_aso_ct_action {
1344463170a7SSuanming Mou 	union {
1345478ba4bbSSuanming Mou 		/* SWS mode struct. */
1346478ba4bbSSuanming Mou 		struct {
1347463170a7SSuanming Mou 			/* Pointer to the next ASO CT. Used only in SWS. */
1348478ba4bbSSuanming Mou 			LIST_ENTRY(mlx5_aso_ct_action) next;
1349463170a7SSuanming Mou 		};
1350478ba4bbSSuanming Mou 		/* HWS mode struct. */
1351478ba4bbSSuanming Mou 		struct {
1352478ba4bbSSuanming Mou 			/* Pointer to action pool. Used only in HWS. */
1353478ba4bbSSuanming Mou 			struct mlx5_aso_ct_pool *pool;
1354478ba4bbSSuanming Mou 		};
1355478ba4bbSSuanming Mou 	};
1356478ba4bbSSuanming Mou 	/* General action object for original dir. */
1357478ba4bbSSuanming Mou 	void *dr_action_orig;
1358478ba4bbSSuanming Mou 	/* General action object for reply dir. */
1359478ba4bbSSuanming Mou 	void *dr_action_rply;
1360e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
136104aa2716SDariusz Sosnowski 	uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */
1362ee9e5fadSBing Zhao 	uint16_t peer; /* The only peer port index could also use this CT. */
1363e12a0166STyler Retzlaff 	RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
1364ee9e5fadSBing Zhao 	bool is_original; /* The direction of the DR action to be used. */
1365ee9e5fadSBing Zhao };
1366ee9e5fadSBing Zhao 
1367ebaf1b31SBing Zhao /* CT action object state update. */
1368ebaf1b31SBing Zhao #define MLX5_ASO_CT_UPDATE_STATE(c, s) \
1369e12a0166STyler Retzlaff 	rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
1370ebaf1b31SBing Zhao 
1371463170a7SSuanming Mou #ifdef PEDANTIC
1372463170a7SSuanming Mou #pragma GCC diagnostic ignored "-Wpedantic"
1373463170a7SSuanming Mou #endif
1374463170a7SSuanming Mou 
1375ee9e5fadSBing Zhao /* ASO connection tracking software pool definition. */
1376ee9e5fadSBing Zhao struct mlx5_aso_ct_pool {
1377ee9e5fadSBing Zhao 	uint16_t index; /* Pool index in pools array. */
1378463170a7SSuanming Mou 	/* Free ASO CT index in the pool. Used by HWS. */
1379463170a7SSuanming Mou 	struct mlx5_indexed_pool *cts;
1380ee9e5fadSBing Zhao 	struct mlx5_devx_obj *devx_obj;
1381463170a7SSuanming Mou 	union {
1382463170a7SSuanming Mou 		void *dummy_action;
1383463170a7SSuanming Mou 		/* Dummy action to increase the reference count in the driver. */
1384463170a7SSuanming Mou 		struct mlx5dr_action *dr_action;
1385463170a7SSuanming Mou 		/* HWS action. */
1386463170a7SSuanming Mou 	};
1387463170a7SSuanming Mou 	struct mlx5_aso_sq *sq; /* Async ASO SQ. */
1388463170a7SSuanming Mou 	struct mlx5_aso_sq *shared_sq; /* Shared ASO SQ. */
13890fc536d5SStephen Hemminger 	struct mlx5_aso_ct_action actions[];
1390ee9e5fadSBing Zhao 	/* CT action structures bulk. */
1391ee9e5fadSBing Zhao };
1392ee9e5fadSBing Zhao 
1393ee9e5fadSBing Zhao LIST_HEAD(aso_ct_list, mlx5_aso_ct_action);
1394ee9e5fadSBing Zhao 
1395463170a7SSuanming Mou #define MLX5_ASO_CT_SQ_NUM 16
1396463170a7SSuanming Mou 
1397ee9e5fadSBing Zhao /* Pools management structure for ASO connection tracking pools. */
1398ee9e5fadSBing Zhao struct mlx5_aso_ct_pools_mng {
1399ee9e5fadSBing Zhao 	struct mlx5_aso_ct_pool **pools;
1400ee9e5fadSBing Zhao 	uint16_t n; /* Total number of pools. */
1401ee9e5fadSBing Zhao 	uint16_t next; /* Number of pools in use, index of next free pool. */
1402463170a7SSuanming Mou 	uint32_t nb_sq; /* Number of ASO SQ. */
1403ee9e5fadSBing Zhao 	rte_spinlock_t ct_sl; /* The ASO CT free list lock. */
1404ee9e5fadSBing Zhao 	rte_rwlock_t resize_rwl; /* The ASO CT pool resize lock. */
1405ee9e5fadSBing Zhao 	struct aso_ct_list free_cts; /* Free ASO CT objects list. */
14060fc536d5SStephen Hemminger 	struct mlx5_aso_sq aso_sqs[]; /* ASO queue objects. */
1407ee9e5fadSBing Zhao };
1408ee9e5fadSBing Zhao 
1409463170a7SSuanming Mou #ifdef PEDANTIC
1410463170a7SSuanming Mou #pragma GCC diagnostic error "-Wpedantic"
1411463170a7SSuanming Mou #endif
1412463170a7SSuanming Mou 
1413a89f6433SRongwei Liu /* LAG attr. */
1414a89f6433SRongwei Liu struct mlx5_lag {
1415a89f6433SRongwei Liu 	uint8_t tx_remap_affinity[16]; /* The PF port number of affinity */
1416a89f6433SRongwei Liu 	uint8_t affinity_mode; /* TIS or hash based affinity */
1417a89f6433SRongwei Liu };
1418a89f6433SRongwei Liu 
14199086ac09SGregory Etelson /* DevX flex parser context. */
14209086ac09SGregory Etelson struct mlx5_flex_parser_devx {
14219086ac09SGregory Etelson 	struct mlx5_list_entry entry;  /* List element at the beginning. */
14229086ac09SGregory Etelson 	uint32_t num_samples;
1423f1324a17SRongwei Liu 	uint8_t anchor_id;
14249086ac09SGregory Etelson 	void *devx_obj;
14259086ac09SGregory Etelson 	struct mlx5_devx_graph_node_attr devx_conf;
1426bc0a9303SRongwei Liu 	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
1427bc0a9303SRongwei Liu 	struct mlx5_devx_match_sample_info_query_attr sample_info[MLX5_GRAPH_NODE_SAMPLE_NUM];
14289086ac09SGregory Etelson };
14299086ac09SGregory Etelson 
14307be78d02SJosh Soref /* Pattern field descriptor - how to translate flex pattern into samples. */
1431b293e8e4SViacheslav Ovsiienko __extension__
1432b293e8e4SViacheslav Ovsiienko struct mlx5_flex_pattern_field {
1433b293e8e4SViacheslav Ovsiienko 	uint16_t width:6;
1434b293e8e4SViacheslav Ovsiienko 	uint16_t shift:5;
1435b293e8e4SViacheslav Ovsiienko 	uint16_t reg_id:5;
1436b293e8e4SViacheslav Ovsiienko };
1437813a1db2SRongwei Liu 
1438b293e8e4SViacheslav Ovsiienko #define MLX5_INVALID_SAMPLE_REG_ID 0x1F
1439b293e8e4SViacheslav Ovsiienko 
1440db25cadcSViacheslav Ovsiienko /* Port flex item context. */
1441db25cadcSViacheslav Ovsiienko struct mlx5_flex_item {
1442db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
1443e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
1444b293e8e4SViacheslav Ovsiienko 	enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
1445b293e8e4SViacheslav Ovsiienko 	uint32_t mapnum; /* Number of pattern translation entries. */
1446b293e8e4SViacheslav Ovsiienko 	struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
1447db25cadcSViacheslav Ovsiienko };
1448db25cadcSViacheslav Ovsiienko 
1449813a1db2SRongwei Liu /*
1450813a1db2SRongwei Liu  * Sample an IPv6 address and the first dword of SRv6 header.
1451813a1db2SRongwei Liu  * Then it is 16 + 4 = 20 bytes which is 5 dwords.
1452813a1db2SRongwei Liu  */
1453813a1db2SRongwei Liu #define MLX5_SRV6_SAMPLE_NUM 5
1454bc0a9303SRongwei Liu /* Mlx5 internal flex parser profile structure. */
1455bc0a9303SRongwei Liu struct mlx5_internal_flex_parser_profile {
1456e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) refcnt;
1457bc0a9303SRongwei Liu 	struct mlx5_flex_item flex; /* Hold map info for modify field. */
1458bc0a9303SRongwei Liu };
1459bc0a9303SRongwei Liu 
1460f31a141eSMichael Savisko struct mlx5_send_to_kernel_action {
1461f31a141eSMichael Savisko 	void *action;
1462f31a141eSMichael Savisko 	void *tbl;
1463f31a141eSMichael Savisko };
1464f31a141eSMichael Savisko 
14654d368e1dSXiaoyu Min #define HWS_CNT_ASO_SQ_NUM 4
14664d368e1dSXiaoyu Min 
14674d368e1dSXiaoyu Min struct mlx5_hws_aso_mng {
14684d368e1dSXiaoyu Min 	uint16_t sq_num;
14694d368e1dSXiaoyu Min 	struct mlx5_aso_sq sqs[HWS_CNT_ASO_SQ_NUM];
14704d368e1dSXiaoyu Min };
14714d368e1dSXiaoyu Min 
14724d368e1dSXiaoyu Min struct mlx5_hws_cnt_svc_mng {
14734d368e1dSXiaoyu Min 	uint32_t refcnt;
14744d368e1dSXiaoyu Min 	uint32_t service_core;
14754d368e1dSXiaoyu Min 	uint32_t query_interval;
1476a7ba40b2SThomas Monjalon 	rte_thread_t service_thread;
14774d368e1dSXiaoyu Min 	uint8_t svc_running;
147827595cd8STyler Retzlaff 	alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_aso_mng aso_mng;
14794d368e1dSXiaoyu Min };
14804d368e1dSXiaoyu Min 
1481414a0cb5SOri Kam #define MLX5_FLOW_HW_TAGS_MAX 12
14827a26bfecSBing Zhao #define MLX5_FLOW_NAT64_REGS_MAX 3
148304e740e6SGregory Etelson 
148404e740e6SGregory Etelson struct mlx5_dev_registers {
14855e9f9a28SGregory Etelson 	enum modify_reg aso_reg;
148604e740e6SGregory Etelson 	enum modify_reg hw_avl_tags[MLX5_FLOW_HW_TAGS_MAX];
14877a26bfecSBing Zhao 	enum modify_reg nat64_regs[MLX5_FLOW_NAT64_REGS_MAX];
148804e740e6SGregory Etelson };
148904e740e6SGregory Etelson 
149048041ccbSGregory Etelson #if defined(HAVE_MLX5DV_DR) && \
149148041ccbSGregory Etelson 	(defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
149248041ccbSGregory Etelson 	 defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
149348041ccbSGregory Etelson #define HAVE_MLX5_DR_CREATE_ACTION_ASO_EXT
149448041ccbSGregory Etelson #endif
149548041ccbSGregory Etelson 
1496f5177bdcSMichael Baum struct mlx5_geneve_tlv_options;
1497f5177bdcSMichael Baum 
1498edc80bbfSGavin Li enum mlx5_ipv6_tc_support {
1499edc80bbfSGavin Li 	MLX5_IPV6_TC_UNKNOWN = 0,
1500edc80bbfSGavin Li 	MLX5_IPV6_TC_FALLBACK,
1501edc80bbfSGavin Li 	MLX5_IPV6_TC_OK,
1502edc80bbfSGavin Li };
1503edc80bbfSGavin Li 
1504edc80bbfSGavin Li struct mlx5_common_nic_config {
1505edc80bbfSGavin Li 	enum mlx5_ipv6_tc_support ipv6_tc_fallback;
1506edc80bbfSGavin Li 	/* Whether ipv6 traffic class should use old value. */
1507edc80bbfSGavin Li };
1508edc80bbfSGavin Li 
150992d3a05eSMichael Baum /**
151092d3a05eSMichael Baum  * Physical device structure.
151192d3a05eSMichael Baum  * This device is created once per NIC to manage recourses shared by all ports
151292d3a05eSMichael Baum  * under same physical device.
151392d3a05eSMichael Baum  */
151492d3a05eSMichael Baum struct mlx5_physical_device {
151592d3a05eSMichael Baum 	LIST_ENTRY(mlx5_physical_device) next;
151692d3a05eSMichael Baum 	struct mlx5_dev_ctx_shared *sh; /* Created on sherd context. */
151792d3a05eSMichael Baum 	uint64_t guid; /* System image guid, the uniq ID of physical device. */
1518f5177bdcSMichael Baum 	struct mlx5_geneve_tlv_options *tlv_options;
1519edc80bbfSGavin Li 	struct mlx5_common_nic_config config;
152092d3a05eSMichael Baum 	uint32_t refcnt;
152192d3a05eSMichael Baum };
152292d3a05eSMichael Baum 
152317e19bc4SViacheslav Ovsiienko /*
152417e19bc4SViacheslav Ovsiienko  * Shared Infiniband device context for Master/Representors
152517e19bc4SViacheslav Ovsiienko  * which belong to same IB device with multiple IB ports.
152617e19bc4SViacheslav Ovsiienko  **/
15276e88bc42SOphir Munk struct mlx5_dev_ctx_shared {
15286e88bc42SOphir Munk 	LIST_ENTRY(mlx5_dev_ctx_shared) next;
152917e19bc4SViacheslav Ovsiienko 	uint32_t refcnt;
1530cf004fd3SMichael Baum 	uint32_t esw_mode:1; /* Whether is E-Switch mode. */
1531f935ed4bSDekel Peled 	uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
153296f85ec4SDong Zhou 	uint32_t steering_format_version:4;
153396f85ec4SDong Zhou 	/* Indicates the device steering logic format. */
1534e6100c7bSLi Zhang 	uint32_t meter_aso_en:1; /* Flow Meter ASO is supported. */
1535ee9e5fadSBing Zhao 	uint32_t ct_aso_en:1; /* Connection Tracking ASO is supported. */
1536630a587bSRongwei Liu 	uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
15375c4d4917SSean Zhang 	uint32_t tunnel_header_2_3:1; /* tunnel_header_2_3 is supported. */
1538630a587bSRongwei Liu 	uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */
1539c1f0cdaeSDariusz Sosnowski 	uint32_t dr_root_drop_action_en:1; /* DR drop action is usable on root tables. */
15403c4338a4SJiawei Wang 	uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */
15413c4338a4SJiawei Wang 	uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */
15423c4338a4SJiawei Wang 	uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */
15431939eb6fSDariusz Sosnowski 	uint32_t shared_mark_enabled:1;
15441939eb6fSDariusz Sosnowski 	/* If mark action is enabled on Rxqs (shared E-Switch domain). */
154576895c7dSJiawei Wang 	uint32_t lag_rx_port_affinity_en:1;
154676895c7dSJiawei Wang 	/* lag_rx_port_affinity is supported. */
154704a4de75SMichael Baum 	uint32_t hws_max_log_bulk_sz:5;
154804a4de75SMichael Baum 	/* Log of minimal HWS counters created hard coded. */
154904a4de75SMichael Baum 	uint32_t hws_max_nb_counters; /* Maximal number for HWS counters. */
155017e19bc4SViacheslav Ovsiienko 	uint32_t max_port; /* Maximal IB device port index. */
1551f5f4c482SXueming Li 	struct mlx5_bond_info bond; /* Bonding information. */
15527af08c8fSMichael Baum 	struct mlx5_common_device *cdev; /* Backend mlx5 device. */
155392d3a05eSMichael Baum 	struct mlx5_physical_device *phdev; /* Backend physical device. */
15548791ff42SDekel Peled 	uint32_t tdn; /* Transport Domain number. */
1555d0b3ef1aSTal Shnaiderman 	char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
1556d0b3ef1aSTal Shnaiderman 	char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
155791d1cfafSMichael Baum 	struct mlx5_dev_cap dev_cap; /* Device capabilities. */
1558a13ec19cSMichael Baum 	struct mlx5_sh_config config; /* Device configuration. */
1559d133f4cdSViacheslav Ovsiienko 	int numa_node; /* Numa node of backing physical device. */
1560d133f4cdSViacheslav Ovsiienko 	/* Packet pacing related structure. */
1561d133f4cdSViacheslav Ovsiienko 	struct mlx5_dev_txpp txpp;
1562b2177648SViacheslav Ovsiienko 	/* Shared DV/DR flow data section. */
156339139371SViacheslav Ovsiienko 	uint32_t dv_meta_mask; /* flow META metadata supported mask. */
156439139371SViacheslav Ovsiienko 	uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
15657be78d02SJosh Soref 	uint32_t dv_regc0_mask; /* available bits of metadata reg_c[0]. */
1566d1e64fbfSOri Kam 	void *fdb_domain; /* FDB Direct Rules name space handle. */
1567d1e64fbfSOri Kam 	void *rx_domain; /* RX Direct Rules name space handle. */
1568d1e64fbfSOri Kam 	void *tx_domain; /* TX Direct Rules name space handle. */
156924feb045SViacheslav Ovsiienko #ifndef RTE_ARCH_64
15705dfa003dSMichael Baum 	rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR. */
157124feb045SViacheslav Ovsiienko 	rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
157224feb045SViacheslav Ovsiienko 	/* UAR same-page access control required in 32bit implementations. */
157324feb045SViacheslav Ovsiienko #endif
1574d1559d66SSuanming Mou 	union {
1575d1559d66SSuanming Mou 		struct mlx5_hlist *flow_tbls; /* SWS flow table. */
1576d1559d66SSuanming Mou 		struct mlx5_hlist *groups; /* HWS flow group. */
1577d1559d66SSuanming Mou 	};
157813b5713aSRongwei Liu 	struct mlx5_hlist *mreg_cp_tbl;
157913b5713aSRongwei Liu 	/* Hash table of Rx metadata register copy table. */
15804ec6360dSGregory Etelson 	struct mlx5_flow_tunnel_hub *tunnel_hub;
1581860897d2SBing Zhao 	/* Direct Rules tables for FDB, NIC TX+RX */
1582da845ae9SViacheslav Ovsiienko 	void *dr_drop_action; /* Pointer to DR drop action, any domain. */
1583b41e47daSMoti Haimovsky 	void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
1584b2cd3918SJiawei Wang #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1585b2cd3918SJiawei Wang 	struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
1586b2cd3918SJiawei Wang #endif
1587e12a0166STyler Retzlaff 	RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
1588e12a0166STyler Retzlaff 	RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
1589e12a0166STyler Retzlaff 	RTE_ATOMIC(struct mlx5_hlist *) tag_table;
1590679f46c7SMatan Azrad 	struct mlx5_list *port_id_action_list; /* Port ID action list. */
1591679f46c7SMatan Azrad 	struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
1592679f46c7SMatan Azrad 	struct mlx5_list *sample_action_list; /* List of sample actions. */
1593679f46c7SMatan Azrad 	struct mlx5_list *dest_array_list;
15949086ac09SGregory Etelson 	struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
159519784141SSuanming Mou 	/* List of destination array actions. */
159604a4de75SMichael Baum 	struct mlx5_flow_counter_mng sws_cmng;
159704a4de75SMichael Baum 	/* SW steering counters management structure. */
1598b80726dcSSuanming Mou 	void *default_miss_action; /* Default miss action. */
1599014d1cbeSSuanming Mou 	struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
1600e12a0166STyler Retzlaff 	RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
1601b2177648SViacheslav Ovsiienko 	/* Shared interrupt handler section. */
1602d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
1603d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
160417f95513SDmitry Kozlyuk 	struct rte_intr_handle *intr_handle_nl; /* Netlink interrupt handler. */
160521b7c452SOphir Munk 	void *devx_comp; /* DEVX async comp obj. */
1606a89f6433SRongwei Liu 	struct mlx5_devx_obj *tis[16]; /* TIS object. */
1607ae18a1aeSOri Kam 	struct mlx5_devx_obj *td; /* Transport domain. */
1608a89f6433SRongwei Liu 	struct mlx5_lag lag; /* LAG attributes */
16095dfa003dSMichael Baum 	struct mlx5_uar tx_uar; /* DevX UAR for Tx and Txpp and ASO SQs. */
16105dfa003dSMichael Baum 	struct mlx5_uar rx_uar; /* DevX UAR for Rx. */
1611b6e9c33cSMichael Baum 	struct mlx5_proc_priv *pppriv; /* Pointer to primary private process. */
1612575740d1SViacheslav Ovsiienko 	struct mlx5_ecpri_parser_profile ecpri_parser;
161300e57916SRongwei Liu 	struct mlx5_internal_flex_parser_profile srh_flex_parser; /* srh flex parser structure. */
1614daa38a89SBing Zhao 	/* Flex parser profiles information. */
161509c25553SXueming Li 	LIST_HEAD(shared_rxqs, mlx5_rxq_ctrl) shared_rxqs; /* Shared RXQs. */
1616f935ed4bSDekel Peled 	struct mlx5_aso_age_mng *aso_age_mng;
1617f935ed4bSDekel Peled 	/* Management data for aging mechanism using ASO Flow Hit. */
1618f15f0c38SShiri Kuzin 	struct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;
1619f15f0c38SShiri Kuzin 	/* Management structure for geneve tlv option */
1620f15f0c38SShiri Kuzin 	rte_spinlock_t geneve_tlv_opt_sl; /* Lock for geneve tlv resource */
1621afb4aa4fSLi Zhang 	struct mlx5_flow_mtr_mng *mtrmng;
1622afb4aa4fSLi Zhang 	/* Meter management structure. */
1623463170a7SSuanming Mou 	struct mlx5_aso_ct_pools_mng *ct_mng; /* Management data for ASO CT in HWS only. */
162423233fd6SBing Zhao 	struct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */
16253c4338a4SJiawei Wang 	unsigned int flow_max_priority;
16263c4338a4SJiawei Wang 	enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];
1627ddb68e47SBing Zhao 	/* Availability of mreg_c's. */
162825025da3SSpike Du 	void *devx_channel_lwm;
162925025da3SSpike Du 	struct rte_intr_handle *intr_handle_lwm;
163025025da3SSpike Du 	pthread_mutex_t lwm_config_lock;
16312235fcdaSSpike Du 	uint32_t host_shaper_rate:8;
16322235fcdaSSpike Du 	uint32_t lwm_triggered:1;
16334d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_svc_mng *cnt_svc;
16346ac2104aSSuanming Mou 	rte_spinlock_t cpool_lock;
16356ac2104aSSuanming Mou 	LIST_HEAD(hws_cpool_list, mlx5_hws_cnt_pool) hws_cpool_list; /* Count pool list. */
163604e740e6SGregory Etelson 	struct mlx5_dev_registers registers;
163791389890SOphir Munk 	struct mlx5_dev_shared_port port[]; /* per device port data array. */
163817e19bc4SViacheslav Ovsiienko };
163917e19bc4SViacheslav Ovsiienko 
16402b36c30bSSuanming Mou /*
16412b36c30bSSuanming Mou  * Per-process private structure.
16422b36c30bSSuanming Mou  * Caution, secondary process may rebuild the struct during port start.
16432b36c30bSSuanming Mou  */
1644120dc4a7SYongseok Koh struct mlx5_proc_priv {
1645e12a0166STyler Retzlaff 	RTE_ATOMIC(void *) hca_bar;
16469b31fc90SViacheslav Ovsiienko 	/* Mapped HCA PCI BAR area. */
1647120dc4a7SYongseok Koh 	size_t uar_table_sz;
1648120dc4a7SYongseok Koh 	/* Size of UAR register table. */
16495dfa003dSMichael Baum 	struct mlx5_uar_data uar_table[];
1650120dc4a7SYongseok Koh 	/* Table of UAR registers for each process. */
1651120dc4a7SYongseok Koh };
1652120dc4a7SYongseok Koh 
16533bd26b23SSuanming Mou /* MTR profile list. */
16543bd26b23SSuanming Mou TAILQ_HEAD(mlx5_mtr_profiles, mlx5_flow_meter_profile);
16553f373f35SSuanming Mou /* MTR list. */
1656e6100c7bSLi Zhang TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
16573bd26b23SSuanming Mou 
165824865366SAlexander Kozyrev struct mlx5_mtr_config {
165924865366SAlexander Kozyrev 	uint32_t nb_meters; /**< Number of configured meters */
166024865366SAlexander Kozyrev 	uint32_t nb_meter_profiles; /**< Number of configured meter profiles */
166124865366SAlexander Kozyrev 	uint32_t nb_meter_policies; /**< Number of configured meter policies */
166224865366SAlexander Kozyrev };
166324865366SAlexander Kozyrev 
1664e1592b6cSSuanming Mou /* RSS description. */
1665e1592b6cSSuanming Mou struct mlx5_flow_rss_desc {
16660e04e1e2SXueming Li 	bool symmetric_hash_function; /**< Symmetric hash function */
1667e1592b6cSSuanming Mou 	uint32_t level;
1668e1592b6cSSuanming Mou 	uint32_t queue_num; /**< Number of entries in @p queue. */
1669295968d1SFerruh Yigit 	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
1670e1592b6cSSuanming Mou 	uint64_t hash_fields; /* Verbs Hash fields. */
1671e1592b6cSSuanming Mou 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
1672e1592b6cSSuanming Mou 	uint32_t key_len; /**< RSS hash key len. */
16733a2f674bSSuanming Mou 	uint32_t hws_flags; /**< HW steering action. */
1674e1592b6cSSuanming Mou 	uint32_t tunnel; /**< Queue in tunnel. */
1675fabf8a37SSuanming Mou 	uint32_t shared_rss; /**< Shared RSS index. */
1676fa7ad49eSAndrey Vesnovaty 	struct mlx5_ind_table_obj *ind_tbl;
1677fa7ad49eSAndrey Vesnovaty 	/**< Indirection table for shared RSS hash RX queues. */
1678e1592b6cSSuanming Mou 	union {
1679e1592b6cSSuanming Mou 		uint16_t *queue; /**< Destination queues. */
1680e1592b6cSSuanming Mou 		const uint16_t *const_q; /**< Const pointer convert. */
1681e1592b6cSSuanming Mou 	};
1682e1592b6cSSuanming Mou };
1683e1592b6cSSuanming Mou 
1684120dc4a7SYongseok Koh #define MLX5_PROC_PRIV(port_id) \
1685120dc4a7SYongseok Koh 	((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
1686120dc4a7SYongseok Koh 
16876deb19e1SMichael Baum /* Verbs/DevX Rx queue elements. */
16886deb19e1SMichael Baum struct mlx5_rxq_obj {
16896deb19e1SMichael Baum 	LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
16906deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
16916deb19e1SMichael Baum 	int fd; /* File descriptor for event channel */
16926deb19e1SMichael Baum 	union {
16936deb19e1SMichael Baum 		struct {
16946deb19e1SMichael Baum 			void *wq; /* Work Queue. */
16956deb19e1SMichael Baum 			void *ibv_cq; /* Completion Queue. */
16966deb19e1SMichael Baum 			void *ibv_channel;
16976deb19e1SMichael Baum 		};
16986e0a3637SMichael Baum 		struct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */
16996deb19e1SMichael Baum 		struct {
170009c25553SXueming Li 			struct mlx5_devx_rmp devx_rmp; /* RMP for shared RQ. */
17015cd33796SMichael Baum 			struct mlx5_devx_cq cq_obj; /* DevX CQ object. */
17026deb19e1SMichael Baum 			void *devx_channel;
17036deb19e1SMichael Baum 		};
17046deb19e1SMichael Baum 	};
17056deb19e1SMichael Baum };
17066deb19e1SMichael Baum 
170787e2db37SMichael Baum /* Indirection table. */
170887e2db37SMichael Baum struct mlx5_ind_table_obj {
170987e2db37SMichael Baum 	LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
1710e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
171187e2db37SMichael Baum 	union {
171287e2db37SMichael Baum 		void *ind_table; /**< Indirection table. */
171387e2db37SMichael Baum 		struct mlx5_devx_obj *rqt; /* DevX RQT object. */
171487e2db37SMichael Baum 	};
171587e2db37SMichael Baum 	uint32_t queues_n; /**< Number of queues in the list. */
1716fa7ad49eSAndrey Vesnovaty 	uint16_t *queues; /**< Queue list. */
171787e2db37SMichael Baum };
171887e2db37SMichael Baum 
171985552726SMichael Baum /* Hash Rx queue. */
1720a0a45e8aSViacheslav Ovsiienko __extension__
172185552726SMichael Baum struct mlx5_hrxq {
1722e78e5408SMatan Azrad 	struct mlx5_list_entry entry; /* List entry. */
172384d33890SSuanming Mou 	uint32_t standalone:1; /* This object used in shared action. */
172485552726SMichael Baum 	struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
172585552726SMichael Baum 	union {
172685552726SMichael Baum 		void *qp; /* Verbs queue pair. */
172785552726SMichael Baum 		struct mlx5_devx_obj *tir; /* DevX TIR object. */
172885552726SMichael Baum 	};
1729f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
173085552726SMichael Baum 	void *action; /* DV QP action pointer. */
173185552726SMichael Baum #endif
17320e04e1e2SXueming Li 	bool symmetric_hash_function; /* Symmetric hash function */
17333a2f674bSSuanming Mou 	uint32_t hws_flags; /* Hw steering flags. */
173485552726SMichael Baum 	uint64_t hash_fields; /* Verbs Hash fields. */
173585552726SMichael Baum 	uint32_t rss_key_len; /* Hash key length in bytes. */
1736e1592b6cSSuanming Mou 	uint32_t idx; /* Hash Rx queue index. */
173785552726SMichael Baum 	uint8_t rss_key[]; /* Hash key. */
173885552726SMichael Baum };
173985552726SMichael Baum 
174086d259ceSMichael Baum /* Verbs/DevX Tx queue elements. */
174186d259ceSMichael Baum struct mlx5_txq_obj {
174286d259ceSMichael Baum 	LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */
174386d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
174486d259ceSMichael Baum 	union {
174586d259ceSMichael Baum 		struct {
174686d259ceSMichael Baum 			void *cq; /* Completion Queue. */
174786d259ceSMichael Baum 			void *qp; /* Queue Pair. */
174886d259ceSMichael Baum 		};
174986d259ceSMichael Baum 		struct {
175086d259ceSMichael Baum 			struct mlx5_devx_obj *sq;
175186d259ceSMichael Baum 			/* DevX object for Sx queue. */
175286d259ceSMichael Baum 			struct mlx5_devx_obj *tis; /* The TIS object. */
17537274b417SDariusz Sosnowski 			void *umem_buf_wq_buffer;
17547274b417SDariusz Sosnowski 			void *umem_obj_wq_buffer;
175586d259ceSMichael Baum 		};
175686d259ceSMichael Baum 		struct {
175786d259ceSMichael Baum 			struct rte_eth_dev *dev;
17585f04f70cSMichael Baum 			struct mlx5_devx_cq cq_obj;
175974e91860SMichael Baum 			/* DevX CQ object and its resources. */
176074e91860SMichael Baum 			struct mlx5_devx_sq sq_obj;
176174e91860SMichael Baum 			/* DevX SQ object and its resources. */
176286d259ceSMichael Baum 		};
176386d259ceSMichael Baum 	};
176486d259ceSMichael Baum };
176586d259ceSMichael Baum 
17664c6d80f1SMichael Baum enum mlx5_rxq_modify_type {
17674c6d80f1SMichael Baum 	MLX5_RXQ_MOD_ERR2RST, /* modify state from error to reset. */
17684c6d80f1SMichael Baum 	MLX5_RXQ_MOD_RST2RDY, /* modify state from reset to ready. */
17694c6d80f1SMichael Baum 	MLX5_RXQ_MOD_RDY2ERR, /* modify state from ready to error. */
17704c6d80f1SMichael Baum 	MLX5_RXQ_MOD_RDY2RST, /* modify state from ready to reset. */
17717158e46cSSpike Du 	MLX5_RXQ_MOD_RDY2RDY, /* modify state from ready to ready. */
17724c6d80f1SMichael Baum };
17734c6d80f1SMichael Baum 
17745d9f3c3fSMichael Baum enum mlx5_txq_modify_type {
17755d9f3c3fSMichael Baum 	MLX5_TXQ_MOD_RST2RDY, /* modify state from reset to ready. */
17765d9f3c3fSMichael Baum 	MLX5_TXQ_MOD_RDY2RST, /* modify state from ready to reset. */
17775d9f3c3fSMichael Baum 	MLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */
17785d9f3c3fSMichael Baum };
17795d9f3c3fSMichael Baum 
17804cda06c3SXueming Li struct mlx5_rxq_priv;
178125025da3SSpike Du struct mlx5_priv;
17824cda06c3SXueming Li 
17838bb2410eSOphir Munk /* HW objects operations structure. */
17848bb2410eSOphir Munk struct mlx5_obj_ops {
17855ceb3a02SXueming Li 	int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_priv *rxq, int on);
17865ceb3a02SXueming Li 	int (*rxq_obj_new)(struct mlx5_rxq_priv *rxq);
178732287079SMichael Baum 	int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
17885ceb3a02SXueming Li 	int (*rxq_obj_modify)(struct mlx5_rxq_priv *rxq, uint8_t type);
17895ceb3a02SXueming Li 	void (*rxq_obj_release)(struct mlx5_rxq_priv *rxq);
179025025da3SSpike Du 	int (*rxq_event_get_lwm)(struct mlx5_priv *priv, int *rxq_idx, int *port_id);
179125ae7f1aSMichael Baum 	int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
179225ae7f1aSMichael Baum 			     struct mlx5_ind_table_obj *ind_tbl);
1793fa7ad49eSAndrey Vesnovaty 	int (*ind_table_modify)(struct rte_eth_dev *dev,
1794fa7ad49eSAndrey Vesnovaty 				const unsigned int log_n,
1795fa7ad49eSAndrey Vesnovaty 				const uint16_t *queues, const uint32_t queues_n,
1796fa7ad49eSAndrey Vesnovaty 				struct mlx5_ind_table_obj *ind_tbl);
179725ae7f1aSMichael Baum 	void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl);
17985a959cbfSMichael Baum 	int (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
179985552726SMichael Baum 			int tunnel __rte_unused);
1800b8cc58c1SAndrey Vesnovaty 	int (*hrxq_modify)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
1801b8cc58c1SAndrey Vesnovaty 			   const uint8_t *rss_key,
1802b8cc58c1SAndrey Vesnovaty 			   uint64_t hash_fields,
18030e04e1e2SXueming Li 			   bool symmetric_hash_function,
1804b8cc58c1SAndrey Vesnovaty 			   const struct mlx5_ind_table_obj *ind_tbl);
180585552726SMichael Baum 	void (*hrxq_destroy)(struct mlx5_hrxq *hrxq);
18060c762e81SMichael Baum 	int (*drop_action_create)(struct rte_eth_dev *dev);
18070c762e81SMichael Baum 	void (*drop_action_destroy)(struct rte_eth_dev *dev);
1808f49f4483SMichael Baum 	int (*txq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
18095d9f3c3fSMichael Baum 	int (*txq_obj_modify)(struct mlx5_txq_obj *obj,
18105d9f3c3fSMichael Baum 			      enum mlx5_txq_modify_type type, uint8_t dev_port);
181186d259ceSMichael Baum 	void (*txq_obj_release)(struct mlx5_txq_obj *txq_obj);
181223233fd6SBing Zhao 	int (*lb_dummy_queue_create)(struct rte_eth_dev *dev);
181323233fd6SBing Zhao 	void (*lb_dummy_queue_release)(struct rte_eth_dev *dev);
18148bb2410eSOphir Munk };
18158bb2410eSOphir Munk 
18164a42ac1fSMatan Azrad #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
18174a42ac1fSMatan Azrad 
1818d6708a9dSDariusz Sosnowski enum mlx5_ctrl_flow_type {
1819d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_GENERAL,
1820d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
1821d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_SQ_MISS,
1822d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP,
1823d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
1824d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH,
1825d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_LACP_RX,
1826d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
1827d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
1828d6708a9dSDariusz Sosnowski 	MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
182986f2907cSDariusz Sosnowski };
183086f2907cSDariusz Sosnowski 
183186f2907cSDariusz Sosnowski /** Additional info about control flow rule. */
1832d6708a9dSDariusz Sosnowski struct mlx5_ctrl_flow_info {
183386f2907cSDariusz Sosnowski 	/** Determines the kind of control flow rule. */
1834d6708a9dSDariusz Sosnowski 	enum mlx5_ctrl_flow_type type;
183586f2907cSDariusz Sosnowski 	union {
183686f2907cSDariusz Sosnowski 		/**
183786f2907cSDariusz Sosnowski 		 * If control flow is a SQ miss flow (root or not),
183886f2907cSDariusz Sosnowski 		 * then fields contains matching SQ number.
183986f2907cSDariusz Sosnowski 		 */
184086f2907cSDariusz Sosnowski 		uint32_t esw_mgr_sq;
184186f2907cSDariusz Sosnowski 		/**
184286f2907cSDariusz Sosnowski 		 * If control flow is a Tx representor matching,
184386f2907cSDariusz Sosnowski 		 * then fields contains matching SQ number.
184486f2907cSDariusz Sosnowski 		 */
184586f2907cSDariusz Sosnowski 		uint32_t tx_repr_sq;
1846a977e2b5SDariusz Sosnowski 		/** Contains data relevant for unicast control flow rules. */
1847a977e2b5SDariusz Sosnowski 		struct {
1848a977e2b5SDariusz Sosnowski 			/**
1849a977e2b5SDariusz Sosnowski 			 * If control flow is a unicast DMAC (or with VLAN) flow rule,
1850a977e2b5SDariusz Sosnowski 			 * then this field contains DMAC.
1851a977e2b5SDariusz Sosnowski 			 */
1852a977e2b5SDariusz Sosnowski 			struct rte_ether_addr dmac;
1853a977e2b5SDariusz Sosnowski 			/**
1854a977e2b5SDariusz Sosnowski 			 * If control flow is a unicast DMAC with VLAN flow rule,
1855a977e2b5SDariusz Sosnowski 			 * then this field contains VLAN ID.
1856a977e2b5SDariusz Sosnowski 			 */
1857a977e2b5SDariusz Sosnowski 			uint16_t vlan;
1858a977e2b5SDariusz Sosnowski 		} uc;
185986f2907cSDariusz Sosnowski 	};
186086f2907cSDariusz Sosnowski };
186186f2907cSDariusz Sosnowski 
18629660a4e6SDariusz Sosnowski /** Returns true if a control flow rule with unicast DMAC match on given address was created. */
18639660a4e6SDariusz Sosnowski bool mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
18649660a4e6SDariusz Sosnowski 
18659660a4e6SDariusz Sosnowski /**
18669660a4e6SDariusz Sosnowski  * Returns true if a control flow rule with unicast DMAC and VLAN match
18679660a4e6SDariusz Sosnowski  * on given values was created.
18689660a4e6SDariusz Sosnowski  */
18699660a4e6SDariusz Sosnowski bool mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
18709660a4e6SDariusz Sosnowski 					const struct rte_ether_addr *addr,
18719660a4e6SDariusz Sosnowski 					const uint16_t vid);
18729660a4e6SDariusz Sosnowski 
187386f2907cSDariusz Sosnowski /** Entry for tracking control flow rules in HWS. */
1874d6708a9dSDariusz Sosnowski struct mlx5_ctrl_flow_entry {
1875d6708a9dSDariusz Sosnowski 	LIST_ENTRY(mlx5_ctrl_flow_entry) next;
187686f2907cSDariusz Sosnowski 	/**
187786f2907cSDariusz Sosnowski 	 * Owner device is a port on behalf of which flow rule was created.
187886f2907cSDariusz Sosnowski 	 *
187986f2907cSDariusz Sosnowski 	 * It's different from the port which really created the flow rule
188086f2907cSDariusz Sosnowski 	 * if and only if flow rule is created on transfer proxy port
188186f2907cSDariusz Sosnowski 	 * on behalf of representor port.
188286f2907cSDariusz Sosnowski 	 */
18831939eb6fSDariusz Sosnowski 	struct rte_eth_dev *owner_dev;
188486f2907cSDariusz Sosnowski 	/** Pointer to flow rule handle. */
18851939eb6fSDariusz Sosnowski 	struct rte_flow *flow;
188686f2907cSDariusz Sosnowski 	/** Additional information about the control flow rule. */
1887d6708a9dSDariusz Sosnowski 	struct mlx5_ctrl_flow_info info;
18881939eb6fSDariusz Sosnowski };
18891939eb6fSDariusz Sosnowski 
189027d171b8SMaayan Kashani /* HW Steering port configuration passed to rte_flow_configure(). */
189127d171b8SMaayan Kashani struct mlx5_flow_hw_attr {
189227d171b8SMaayan Kashani 	struct rte_flow_port_attr port_attr;
189327d171b8SMaayan Kashani 	uint16_t nb_queue;
189427d171b8SMaayan Kashani 	struct rte_flow_queue_attr *queue_attr;
189527d171b8SMaayan Kashani 	bool nt_mode;
189627d171b8SMaayan Kashani };
189727d171b8SMaayan Kashani 
18982eece379SRongwei Liu /*
18992eece379SRongwei Liu  * Flow rule structure for flow engine mode control, focus on group 0.
19002eece379SRongwei Liu  * Apply to all supported domains.
19012eece379SRongwei Liu  */
19022eece379SRongwei Liu struct mlx5_dv_flow_info {
19032eece379SRongwei Liu 	LIST_ENTRY(mlx5_dv_flow_info) next;
19042eece379SRongwei Liu 	uint32_t orig_prio; /* prio set by user */
19052eece379SRongwei Liu 	uint32_t flow_idx_high_prio;
19062eece379SRongwei Liu 	/* flow index owned by standby mode. priority is lower unless DUP flags. */
19072eece379SRongwei Liu 	uint32_t flow_idx_low_prio;
19082eece379SRongwei Liu 	struct rte_flow_item *items;
19092eece379SRongwei Liu 	struct rte_flow_action *actions;
19102eece379SRongwei Liu 	struct rte_flow_attr attr;
19112eece379SRongwei Liu };
19122eece379SRongwei Liu 
191386647d46SThomas Monjalon struct rte_pmd_mlx5_flow_engine_mode_info {
191486647d46SThomas Monjalon 	enum rte_pmd_mlx5_flow_engine_mode mode;
19152eece379SRongwei Liu 	uint32_t mode_flag;
19162eece379SRongwei Liu 	/* The list is maintained in insertion order. */
19172eece379SRongwei Liu 	LIST_HEAD(hot_up_info, mlx5_dv_flow_info) hot_upgrade;
19182eece379SRongwei Liu };
19192eece379SRongwei Liu 
19209fa7c1cdSDariusz Sosnowski struct mlx5_flow_hw_ctrl_rx;
19219fa7c1cdSDariusz Sosnowski 
192215896eafSGregory Etelson enum mlx5_quota_state {
192315896eafSGregory Etelson 	MLX5_QUOTA_STATE_FREE,	/* quota not in use */
192415896eafSGregory Etelson 	MLX5_QUOTA_STATE_READY, /* quota is ready   */
192515896eafSGregory Etelson 	MLX5_QUOTA_STATE_WAIT	/* quota waits WR completion */
192615896eafSGregory Etelson };
192715896eafSGregory Etelson 
192815896eafSGregory Etelson struct mlx5_quota {
1929e12a0166STyler Retzlaff 	RTE_ATOMIC(uint8_t) state; /* object state */
193015896eafSGregory Etelson 	uint8_t mode;  /* metering mode */
193115896eafSGregory Etelson 	/**
193215896eafSGregory Etelson 	 * Keep track of application update types.
193315896eafSGregory Etelson 	 * PMD does not allow 2 consecutive ADD updates.
193415896eafSGregory Etelson 	 */
193515896eafSGregory Etelson 	enum rte_flow_update_quota_op last_update;
193615896eafSGregory Etelson };
193715896eafSGregory Etelson 
193815896eafSGregory Etelson /* Bulk management structure for flow quota. */
193915896eafSGregory Etelson struct mlx5_quota_ctx {
194015896eafSGregory Etelson 	struct mlx5dr_action *dr_action; /* HWS action */
194115896eafSGregory Etelson 	struct mlx5_devx_obj *devx_obj; /* DEVX ranged object. */
194215896eafSGregory Etelson 	struct mlx5_pmd_mr mr; /* MR for READ from MTR ASO */
194315896eafSGregory Etelson 	struct mlx5_aso_mtr_dseg **read_buf; /* Buffers for READ */
194415896eafSGregory Etelson 	struct mlx5_aso_sq *sq; /* SQs for sync/async ACCESS_ASO WRs */
194515896eafSGregory Etelson 	struct mlx5_indexed_pool *quota_ipool; /* Manage quota objects */
194615896eafSGregory Etelson };
194715896eafSGregory Etelson 
1948dbeba4cfSThomas Monjalon struct mlx5_priv {
1949df428ceeSYongseok Koh 	struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
19506e88bc42SOphir Munk 	struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
195191389890SOphir Munk 	uint32_t dev_port; /* Device port number. */
195246e10a4cSViacheslav Ovsiienko 	struct rte_pci_device *pci_dev; /* Backend PCI device. */
19536d13ea8eSOlivier Matz 	struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
1954ccdcba53SNélio Laranjeiro 	BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
1955ccdcba53SNélio Laranjeiro 	/* Bit-field of MAC addresses owned by the PMD. */
1956e9086978SAdrien Mazarguil 	uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
1957e9086978SAdrien Mazarguil 	unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
1958771fa900SAdrien Mazarguil 	/* Device properties. */
1959771fa900SAdrien Mazarguil 	uint16_t mtu; /* Configured MTU. */
196051d5f8ecSNélio Laranjeiro 	unsigned int isolated:1; /* Whether isolated mode is enabled. */
19612b730263SAdrien Mazarguil 	unsigned int representor:1; /* Device is a port representor. */
1962299d7dc2SViacheslav Ovsiienko 	unsigned int master:1; /* Device is a E-Switch master. */
1963d133f4cdSViacheslav Ovsiienko 	unsigned int txpp_en:1; /* Tx packet pacing enabled. */
196483306d6cSShun Hao 	unsigned int sampler_en:1; /* Whether support sampler. */
19656bc327b9SSuanming Mou 	unsigned int mtr_en:1; /* Whether support meter. */
1966792e749eSSuanming Mou 	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
196723233fd6SBing Zhao 	unsigned int lb_used:1; /* Loopback queue is referred to. */
196822dc56cfSViacheslav Ovsiienko 	unsigned int rmv_notified:1; /* Notified about removal event */
1969082becbfSRaja Zidane 	uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */
1970674afdf0SJiawei Wang 	uint32_t num_lag_ports:4; /* Number of ports can be bonded. */
1971fca8cba4SDavid Marchand 	uint32_t tunnel_enabled:1; /* If tunnel offloading is enabled on rxqs. */
19722b730263SAdrien Mazarguil 	uint16_t domain_id; /* Switch domain identifier. */
1973299d7dc2SViacheslav Ovsiienko 	uint16_t vport_id; /* Associated VF vport index (if any). */
1974d5c06b1bSViacheslav Ovsiienko 	uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
1975d5c06b1bSViacheslav Ovsiienko 	uint32_t vport_meta_mask; /* Used for vport index field match mask. */
1976b3880af2SJiawei Wang 	uint16_t representor_id; /* UINT16_MAX if not a representor. */
1977f5f4c482SXueming Li 	int32_t pf_bond; /* >=0, representor owner PF index in bonding. */
197811c73de9SDariusz Sosnowski 	int32_t mpesw_owner; /* >=0, representor owner PF index in MPESW. */
197911c73de9SDariusz Sosnowski 	int32_t mpesw_port; /* Related port index of MPESW device. < 0 - no MPESW. */
198011c73de9SDariusz Sosnowski 	bool mpesw_uplink; /* If true, port is an uplink port. */
1981fa2e14d4SViacheslav Ovsiienko 	unsigned int if_index; /* Associated kernel network device index. */
19822e22920bSAdrien Mazarguil 	/* RX/TX queues. */
19832e22920bSAdrien Mazarguil 	unsigned int rxqs_n; /* RX queues array size. */
19842e22920bSAdrien Mazarguil 	unsigned int txqs_n; /* TX queues array size. */
19858e8b44f2SSuanming Mou 	struct mlx5_external_q *ext_rxqs; /* External RX queues array. */
19861944fbc3SSuanming Mou 	struct mlx5_external_q *ext_txqs; /* External TX queues array. */
19874cda06c3SXueming Li 	struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
1988991b04f6SNélio Laranjeiro 	struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
19897d6bf6b8SYongseok Koh 	struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
199029c1d8bbSNélio Laranjeiro 	struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
1991634efbc2SNelio Laranjeiro 	unsigned int (*reta_idx)[]; /* RETA index table. */
1992634efbc2SNelio Laranjeiro 	unsigned int reta_idx_n; /* RETA index size. */
199378be8852SNelio Laranjeiro 	struct mlx5_drop drop_queue; /* Flow drop queues. */
199445633c46SSuanming Mou 	void *root_drop_action; /* Pointer to root drop action. */
19951939eb6fSDariusz Sosnowski 	rte_spinlock_t hw_ctrl_lock;
1996d6708a9dSDariusz Sosnowski 	LIST_HEAD(hw_ctrl_flow, mlx5_ctrl_flow_entry) hw_ctrl_flows;
1997d6708a9dSDariusz Sosnowski 	LIST_HEAD(hw_ext_ctrl_flow, mlx5_ctrl_flow_entry) hw_ext_ctrl_flows;
199848db3b61SDariusz Sosnowski 	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
1999483181f7SDariusz Sosnowski 	struct rte_flow_pattern_template *hw_tx_repr_tagging_pt;
2000483181f7SDariusz Sosnowski 	struct rte_flow_actions_template *hw_tx_repr_tagging_at;
2001483181f7SDariusz Sosnowski 	struct rte_flow_template_table *hw_tx_repr_tagging_tbl;
2002b4edeaf3SSuanming Mou 	struct mlx5_indexed_pool *flows[MLX5_FLOW_TYPE_MAXI];
2003b4edeaf3SSuanming Mou 	/* RTE Flow rules. */
2004ab612adcSSuanming Mou 	uint32_t ctrl_flows; /* Control flow rules. */
2005d163fc2dSXueming Li 	rte_spinlock_t flow_list_lock;
20065eaf882eSMichael Baum 	struct mlx5_obj_ops obj_ops; /* HW objects operations. */
200793403560SDekel Peled 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
2008679f46c7SMatan Azrad 	struct mlx5_list *hrxqs; /* Hash Rx queues. */
20096e78005aSNélio Laranjeiro 	LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
2010894c4a8eSOri Kam 	LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
201115c80a12SDekel Peled 	/* Indirection tables. */
201215c80a12SDekel Peled 	LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
20133a2f674bSSuanming Mou 	/* Standalone indirect tables. */
20143a2f674bSSuanming Mou 	LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
20153564e928SGregory Etelson 	/* Objects created with indirect list action */
20163564e928SGregory Etelson 	LIST_HEAD(indirect_list, mlx5_indirect_list) indirect_list_head;
2017684b9a1bSOri Kam 	/* Pointer to next element. */
2018491b7137SMatan Azrad 	rte_rwlock_t ind_tbls_lock;
2019b5c8b3e7SAlexander Kozyrev 	uint32_t refcnt; /**< Reference counter. */
2020684b9a1bSOri Kam 	/**< Verbs modify header action object. */
2021684b9a1bSOri Kam 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
2022a2364004SGregory Etelson 	uint32_t max_lro_msg_size;
202375ef62a9SNélio Laranjeiro 	uint32_t link_speed_capa; /* Link speed capabilities. */
2024a4193ae3SShahaf Shuler 	struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
2025ce9494d7STom Barbette 	struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
202645a6df80SMichael Baum 	struct mlx5_port_config config; /* Port configuration. */
2027d10b09dbSOlivier Matz 	/* Context for Verbs allocator. */
202826c08b97SAdrien Mazarguil 	int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
202926c08b97SAdrien Mazarguil 	int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
2030c12671e3SMatan Azrad 	struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
203124865366SAlexander Kozyrev 	struct mlx5_mtr_config mtr_config; /* Meter configuration */
203227efd5deSSuanming Mou 	uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
2033e6100c7bSLi Zhang 	struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */
2034a295c69aSShun Hao 	struct mlx5_l3t_tbl *mtr_profile_tbl; /* Meter index lookup table. */
203524865366SAlexander Kozyrev 	struct mlx5_flow_meter_profile *mtr_profile_arr; /* Profile array. */
2036efcce4dcSShun Hao 	struct mlx5_l3t_tbl *policy_idx_tbl; /* Policy index lookup table. */
203724865366SAlexander Kozyrev 	struct mlx5_flow_meter_policy *mtr_policy_arr; /* Policy array. */
203829efa63aSLi Zhang 	struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */
203924865366SAlexander Kozyrev 	struct mlx5_mtr_bulk mtr_bulk; /* Meter index mapping for HWS */
204015896eafSGregory Etelson 	struct mlx5_quota_ctx quota_ctx; /* Quota index mapping for HWS */
204163bd1629SOri Kam 	uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
2042fbde4331SMatan Azrad 	uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
2043a4de9586SVu Pham 	struct mlx5_mp_id mp_id; /* ID of a multi-process process */
2044c2ddde79SWentao Cui 	LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
2045cc608e4dSSuanming Mou 	rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */
20464a42ac1fSMatan Azrad 	uint32_t rss_shared_actions; /* RSS shared actions. */
2047cd00dce6SShani Peretz 	/* If true, indicates that we failed to allocate a q counter in the past. */
2048cd00dce6SShani Peretz 	bool q_counters_allocation_failure;
2049e6988afdSMatan Azrad 	struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
2050e6988afdSMatan Azrad 	uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
2051cd00dce6SShani Peretz 	/* DevX queue counter object for all hairpin queues of the port. */
2052cd00dce6SShani Peretz 	struct mlx5_devx_obj *q_counters_hairpin;
2053a89f6433SRongwei Liu 	uint32_t lag_affinity_idx; /* LAG mode queue 0 affinity starting. */
2054db25cadcSViacheslav Ovsiienko 	rte_spinlock_t flex_item_sl; /* Flex item list spinlock. */
2055db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];
2056db25cadcSViacheslav Ovsiienko 	/* Flex items have been created on the port. */
2057db25cadcSViacheslav Ovsiienko 	uint32_t flex_item_map; /* Map of allocated flex item elements. */
205824865366SAlexander Kozyrev 	uint32_t nb_queue; /* HW steering queue number. */
20594d368e1dSXiaoyu Min 	struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
2060e12a0166STyler Retzlaff 	RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
206186647d46SThomas Monjalon 	struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
20621770a0fcSDariusz Sosnowski 	struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
20631ea333d2SBing Zhao 	bool hws_rule_flushing; /**< Whether this port is in rules flushing stage. */
2064b401400dSSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
206542431df9SSuanming Mou 	/* Item template list. */
206642431df9SSuanming Mou 	LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt;
2067836b5c9bSSuanming Mou 	/* Action template list. */
2068836b5c9bSSuanming Mou 	LIST_HEAD(flow_hw_at, rte_flow_actions_template) flow_hw_at;
2069b401400dSSuanming Mou 	struct mlx5dr_context *dr_ctx; /**< HW steering DR context. */
2070f5177bdcSMichael Baum 	/* Pointer to the GENEVE TLV options. */
2071f5177bdcSMichael Baum 	struct mlx5_geneve_tlv_options *tlv_options;
2072b401400dSSuanming Mou 	/* HW steering queue polling mechanism job descriptor LIFO. */
207304a4de75SMichael Baum 	uint32_t hws_strict_queue:1;
207404a4de75SMichael Baum 	/**< Whether all operations strictly happen on the same HWS queue. */
207504a4de75SMichael Baum 	uint32_t hws_age_req:1; /**< Whether this port has AGE indexed pool. */
2076b401400dSSuanming Mou 	struct mlx5_hw_q *hw_q;
2077d1559d66SSuanming Mou 	/* HW steering rte flow table list header. */
2078d1559d66SSuanming Mou 	LIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;
20798ce638efSTomer Shmilovich 	/* HW steering rte flow group list header */
20808ce638efSTomer Shmilovich 	LIST_HEAD(flow_hw_grp, mlx5_flow_group) flow_hw_grp;
2081773ca0e9SGregory Etelson 	struct mlx5dr_action *hw_push_vlan[MLX5DR_TABLE_TYPE_MAX];
2082773ca0e9SGregory Etelson 	struct mlx5dr_action *hw_pop_vlan[MLX5DR_TABLE_TYPE_MAX];
20831939eb6fSDariusz Sosnowski 	struct mlx5dr_action **hw_vport;
2084d1559d66SSuanming Mou 	/* HW steering global drop action. */
20851939eb6fSDariusz Sosnowski 	struct mlx5dr_action *hw_drop[2];
20861939eb6fSDariusz Sosnowski 	/* HW steering global tag action. */
20871939eb6fSDariusz Sosnowski 	struct mlx5dr_action *hw_tag[2];
20883dce73a2SSuanming Mou 	/* HW steering global default miss action. */
20893dce73a2SSuanming Mou 	struct mlx5dr_action *hw_def_miss;
209012dcc029SBar Neuman 	/* HW steering global send to kernel action. */
2091b2cd3918SJiawei Wang 	struct mlx5dr_action *hw_send_to_kernel[MLX5DR_TABLE_TYPE_MAX];
2092f1fecffaSDariusz Sosnowski 	/* HW steering create ongoing rte flow table list header. */
2093f1fecffaSDariusz Sosnowski 	LIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo;
2094f13fab23SSuanming Mou 	struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */
2095463170a7SSuanming Mou 	struct mlx5_aso_ct_pools_mng *ct_mng;
2096463170a7SSuanming Mou 	/* Management data for ASO connection tracking. */
2097463170a7SSuanming Mou 	struct mlx5_aso_ct_pool *hws_ctpool; /* HW steering's CT pool. */
209848fbb0e9SAlexander Kozyrev 	struct mlx5_aso_mtr_pool *hws_mpool; /* HW steering's Meter pool. */
20999fa7c1cdSDariusz Sosnowski 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
21009fa7c1cdSDariusz Sosnowski 	/**< HW steering templates used to create control flow rules. */
2101840f09fbSBing Zhao 
2102a190f25eSGregory Etelson 	struct rte_flow_actions_template *action_template_drop[MLX5DR_TABLE_TYPE_MAX];
2103840f09fbSBing Zhao 
2104840f09fbSBing Zhao 	/*
2105840f09fbSBing Zhao 	 * The NAT64 action can be shared among matchers per domain.
2106840f09fbSBing Zhao 	 * [0]: RTE_FLOW_NAT64_6TO4, [1]: RTE_FLOW_NAT64_4TO6
2107840f09fbSBing Zhao 	 * Todo: consider to add *_MAX macro.
2108840f09fbSBing Zhao 	 */
2109840f09fbSBing Zhao 	struct mlx5dr_action *action_nat64[MLX5DR_TABLE_TYPE_MAX][2];
2110ae67e3c4SGregory Etelson 	struct mlx5_indexed_pool *ptype_rss_groups;
2111b401400dSSuanming Mou #endif
211230ff1d25SViacheslav Ovsiienko 	struct rte_eth_dev *shared_host; /* Host device for HW steering. */
2113e12a0166STyler Retzlaff 	RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
2114771fa900SAdrien Mazarguil };
2115771fa900SAdrien Mazarguil 
2116d6dc072aSGregory Etelson static __rte_always_inline bool
2117d6dc072aSGregory Etelson mlx5_hws_active(const struct rte_eth_dev *dev)
2118d6dc072aSGregory Etelson {
2119d6dc072aSGregory Etelson #if defined(HAVE_MLX5_HWS_SUPPORT)
2120d6dc072aSGregory Etelson 	const struct mlx5_priv *priv = dev->data->dev_private;
2121d6dc072aSGregory Etelson 
2122d6dc072aSGregory Etelson 	return priv->sh->config.dv_flow_en == 2;
2123d6dc072aSGregory Etelson #else
2124d6dc072aSGregory Etelson 	RTE_SET_USED(dev);
2125d6dc072aSGregory Etelson 	return false;
2126d6dc072aSGregory Etelson #endif
2127d6dc072aSGregory Etelson }
2128d6dc072aSGregory Etelson 
2129df428ceeSYongseok Koh #define PORT_ID(priv) ((priv)->dev_data->port_id)
2130df428ceeSYongseok Koh #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
213124865366SAlexander Kozyrev #define CTRL_QUEUE_ID(priv) ((priv)->nb_queue - 1)
2132df428ceeSYongseok Koh 
213337cd4501SBing Zhao struct rte_hairpin_peer_info {
213437cd4501SBing Zhao 	uint32_t qp_id;
213537cd4501SBing Zhao 	uint32_t vhca_id;
213637cd4501SBing Zhao 	uint16_t peer_q;
213737cd4501SBing Zhao 	uint16_t tx_explicit;
213837cd4501SBing Zhao 	uint16_t manual_bind;
213937cd4501SBing Zhao };
214037cd4501SBing Zhao 
21415db9318fSHaifei Luo #define BUF_SIZE 1024
21425db9318fSHaifei Luo enum dr_dump_rec_type {
21435db9318fSHaifei Luo 	DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT = 4410,
21445db9318fSHaifei Luo 	DR_DUMP_REC_TYPE_PMD_MODIFY_HDR = 4420,
21455db9318fSHaifei Luo 	DR_DUMP_REC_TYPE_PMD_COUNTER = 4430,
21465db9318fSHaifei Luo };
21475db9318fSHaifei Luo 
21484359d9d1SGregory Etelson #if defined(HAVE_MLX5_HWS_SUPPORT)
21494359d9d1SGregory Etelson static __rte_always_inline struct mlx5_hw_q_job *
21504359d9d1SGregory Etelson flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)
21514359d9d1SGregory Etelson {
21524359d9d1SGregory Etelson 	MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size);
21534359d9d1SGregory Etelson 	return priv->hw_q[queue].job_idx ?
21544359d9d1SGregory Etelson 	       priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL;
21554359d9d1SGregory Etelson }
21564359d9d1SGregory Etelson 
21574359d9d1SGregory Etelson static __rte_always_inline void
21584359d9d1SGregory Etelson flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue)
21594359d9d1SGregory Etelson {
21604359d9d1SGregory Etelson 	MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size);
21614359d9d1SGregory Etelson 	priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
21624359d9d1SGregory Etelson }
21634359d9d1SGregory Etelson 
21644359d9d1SGregory Etelson struct mlx5_hw_q_job *
21654359d9d1SGregory Etelson mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
21664359d9d1SGregory Etelson 			  const struct rte_flow_action_handle *handle,
21674359d9d1SGregory Etelson 			  void *user_data, void *query_data,
21684359d9d1SGregory Etelson 			  enum mlx5_hw_job_type type,
21694359d9d1SGregory Etelson 			  struct rte_flow_error *error);
21704359d9d1SGregory Etelson #endif
21714359d9d1SGregory Etelson 
2172c4b86201SMichael Baum /**
2173c4b86201SMichael Baum  * Indicates whether HW objects operations can be created by DevX.
2174c4b86201SMichael Baum  *
2175c4b86201SMichael Baum  * This function is used for both:
2176c4b86201SMichael Baum  *  Before creation - deciding whether to create HW objects operations by DevX.
2177c4b86201SMichael Baum  *  After creation - indicator if HW objects operations were created by DevX.
2178c4b86201SMichael Baum  *
2179c4b86201SMichael Baum  * @param sh
2180c4b86201SMichael Baum  *   Pointer to shared device context.
2181c4b86201SMichael Baum  *
2182c4b86201SMichael Baum  * @return
2183c4b86201SMichael Baum  *   True if HW objects were created by DevX, False otherwise.
2184c4b86201SMichael Baum  */
2185c4b86201SMichael Baum static inline bool
2186c4b86201SMichael Baum mlx5_devx_obj_ops_en(struct mlx5_dev_ctx_shared *sh)
2187c4b86201SMichael Baum {
2188c4b86201SMichael Baum 	/*
2189c4b86201SMichael Baum 	 * When advanced DR API is available and DV flow is supported and
2190c4b86201SMichael Baum 	 * DevX is supported, HW objects operations are created by DevX.
2191c4b86201SMichael Baum 	 */
2192c4b86201SMichael Baum 	return (sh->cdev->config.devx && sh->config.dv_flow_en &&
2193c4b86201SMichael Baum 		sh->dev_cap.dest_tir);
2194c4b86201SMichael Baum }
2195c4b86201SMichael Baum 
219611c73de9SDariusz Sosnowski /**
219711c73de9SDariusz Sosnowski  * Check if the port is either MPESW physical device or a representor port.
219811c73de9SDariusz Sosnowski  *
219911c73de9SDariusz Sosnowski  * @param priv
220011c73de9SDariusz Sosnowski  *   Pointer to port's private data.
220111c73de9SDariusz Sosnowski  *
220211c73de9SDariusz Sosnowski  * @return
220311c73de9SDariusz Sosnowski  *   True if the port is a physical device or representor in MPESW setup.
220411c73de9SDariusz Sosnowski  *   False otherwise or MPESW was not configured.
220511c73de9SDariusz Sosnowski  */
220611c73de9SDariusz Sosnowski static inline bool
220711c73de9SDariusz Sosnowski mlx5_is_port_on_mpesw_device(struct mlx5_priv *priv)
220811c73de9SDariusz Sosnowski {
220911c73de9SDariusz Sosnowski 	return priv->mpesw_port >= 0;
221011c73de9SDariusz Sosnowski }
221111c73de9SDariusz Sosnowski 
22124d803a72SOlga Shern /* mlx5.c */
22134d803a72SOlga Shern 
22144d803a72SOlga Shern int mlx5_getenv_int(const char *);
2215120dc4a7SYongseok Koh int mlx5_proc_priv_init(struct rte_eth_dev *dev);
22162b36c30bSSuanming Mou void mlx5_proc_priv_uninit(struct rte_eth_dev *dev);
2217c9ba7523SRaslan Darawsheh int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
2218c9ba7523SRaslan Darawsheh 			      struct rte_eth_udp_tunnel *udp_tunnel);
221956bb3c84SXueming Li uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev);
2220b142387bSThomas Monjalon int mlx5_dev_close(struct rte_eth_dev *dev);
22217af08c8fSMichael Baum int mlx5_net_remove(struct mlx5_common_device *cdev);
2222f926cce3SXueming Li bool mlx5_is_hpf(struct rte_eth_dev *dev);
2223919488fbSXueming Li bool mlx5_is_sf_repr(struct rte_eth_dev *dev);
2224f935ed4bSDekel Peled void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh);
222525025da3SSpike Du int mlx5_lwm_setup(struct mlx5_priv *priv);
222625025da3SSpike Du void mlx5_lwm_unset(struct mlx5_dev_ctx_shared *sh);
2227f7e95215SViacheslav Ovsiienko 
2228f7e95215SViacheslav Ovsiienko /* Macro to iterate over all valid ports for mlx5 driver. */
222956bb3c84SXueming Li #define MLX5_ETH_FOREACH_DEV(port_id, dev) \
223056bb3c84SXueming Li 	for (port_id = mlx5_eth_find_next(0, dev); \
2231f7e95215SViacheslav Ovsiienko 	     port_id < RTE_MAX_ETHPORTS; \
223256bb3c84SXueming Li 	     port_id = mlx5_eth_find_next(port_id + 1, dev))
2233e3032e9cSMichael Baum void mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
2234e3032e9cSMichael Baum 			      struct mlx5_hca_attr *hca_attr);
22352eb4d010SOphir Munk struct mlx5_dev_ctx_shared *
2236a729d2f0SMichael Baum mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
2237a729d2f0SMichael Baum 			  struct mlx5_kvargs_ctrl *mkvlist);
223891389890SOphir Munk void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
2239fec28ca0SDmitry Kozlyuk int mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev);
22402eb4d010SOphir Munk void mlx5_free_table_hash_list(struct mlx5_priv *priv);
22412eb4d010SOphir Munk int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
224245a6df80SMichael Baum void mlx5_set_min_inline(struct mlx5_priv *priv);
22432eb4d010SOphir Munk void mlx5_set_metadata_mask(struct rte_eth_dev *dev);
2244a729d2f0SMichael Baum int mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
2245a729d2f0SMichael Baum 				   struct mlx5_kvargs_ctrl *mkvlist);
2246a729d2f0SMichael Baum int mlx5_port_args_config(struct mlx5_priv *priv,
2247a729d2f0SMichael Baum 			  struct mlx5_kvargs_ctrl *mkvlist,
224845a6df80SMichael Baum 			  struct mlx5_port_config *config);
2249a729d2f0SMichael Baum void mlx5_port_args_set_used(const char *name, uint16_t port_id,
2250a729d2f0SMichael Baum 			     struct mlx5_kvargs_ctrl *mkvlist);
2251daa38a89SBing Zhao bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
2252daa38a89SBing Zhao int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
2253cf8971dbSMichael Baum void mlx5_flow_counter_mode_config(struct rte_eth_dev *dev);
2254f935ed4bSDekel Peled int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh);
2255afb4aa4fSLi Zhang int mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh);
2256ee9e5fadSBing Zhao int mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh);
2257f5177bdcSMichael Baum struct mlx5_physical_device *
2258f5177bdcSMichael Baum mlx5_get_locked_physical_device(struct mlx5_priv *priv);
2259f5177bdcSMichael Baum void mlx5_unlock_physical_device(void);
22604d803a72SOlga Shern 
2261771fa900SAdrien Mazarguil /* mlx5_ethdev.c */
2262771fa900SAdrien Mazarguil 
22631256805dSOphir Munk int mlx5_dev_configure(struct rte_eth_dev *dev);
2264cb95feefSXueming Li int mlx5_representor_info_get(struct rte_eth_dev *dev,
2265cb95feefSXueming Li 			      struct rte_eth_representor_info *info);
2266cb95feefSXueming Li #define MLX5_REPRESENTOR_ID(pf, type, repr) \
2267cb95feefSXueming Li 		(((pf) << 14) + ((type) << 12) + ((repr) & 0xfff))
2268cb95feefSXueming Li #define MLX5_REPRESENTOR_REPR(repr_id) \
2269cb95feefSXueming Li 		((repr_id) & 0xfff)
2270cb95feefSXueming Li #define MLX5_REPRESENTOR_TYPE(repr_id) \
2271cb95feefSXueming Li 		(((repr_id) >> 12) & 3)
227291766faeSXueming Li uint16_t mlx5_representor_id_encode(const struct mlx5_switch_info *info,
227391766faeSXueming Li 				    enum rte_eth_representor_type hpf_type);
2274dec50e58SMichael Baum int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
2275dec50e58SMichael Baum int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
2276ba6a168aSSivaramakrishnan Venkat const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev,
2277ba6a168aSSivaramakrishnan Venkat 					      size_t *no_of_elements);
22781256805dSOphir Munk int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
22791256805dSOphir Munk int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
22801256805dSOphir Munk 			 struct rte_eth_hairpin_cap *cap);
2281ef9ee13fSOphir Munk eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
2282ef9ee13fSOphir Munk struct mlx5_priv *mlx5_port_to_eswitch_info(uint16_t port, bool valid);
2283ef9ee13fSOphir Munk struct mlx5_priv *mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev);
2284ef9ee13fSOphir Munk int mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev);
22856a3446cfSDariusz Sosnowski uint64_t mlx5_get_restore_flags(struct rte_eth_dev *dev,
22866a3446cfSDariusz Sosnowski 				enum rte_eth_dev_operation op);
22871256805dSOphir Munk 
22881256805dSOphir Munk /* mlx5_ethdev_os.c */
22891256805dSOphir Munk 
229028743807STal Shnaiderman int mlx5_get_ifname(const struct rte_eth_dev *dev,
229128743807STal Shnaiderman 			char (*ifname)[MLX5_NAMESIZE]);
22923f8cb05dSAdrien Mazarguil unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
229398c4b12aSOphir Munk int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);
2294af4f09f2SNélio Laranjeiro int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
22951256805dSOphir Munk int mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
2296e571ad55STom Barbette int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
22973692c7ecSNélio Laranjeiro int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
22983692c7ecSNélio Laranjeiro int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
22993692c7ecSNélio Laranjeiro 			   struct rte_eth_fc_conf *fc_conf);
23003692c7ecSNélio Laranjeiro int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
23013692c7ecSNélio Laranjeiro 			   struct rte_eth_fc_conf *fc_conf);
2302af4f09f2SNélio Laranjeiro void mlx5_dev_interrupt_handler(void *arg);
2303f15db67dSMatan Azrad void mlx5_dev_interrupt_handler_devx(void *arg);
230417f95513SDmitry Kozlyuk void mlx5_dev_interrupt_handler_nl(void *arg);
230562072098SOr Ami int mlx5_set_link_down(struct rte_eth_dev *dev);
230662072098SOr Ami int mlx5_set_link_up(struct rte_eth_dev *dev);
2307d3e0f392SMatan Azrad int mlx5_is_removed(struct rte_eth_dev *dev);
2308f872b4b9SNelio Laranjeiro int mlx5_sysfs_switch_info(unsigned int ifindex,
2309f872b4b9SNelio Laranjeiro 			   struct mlx5_switch_info *info);
231030a86157SViacheslav Ovsiienko void mlx5_translate_port_name(const char *port_name_in,
2311b2f3a381SDekel Peled 			      struct mlx5_switch_info *port_info_out);
2312c21e5facSXueming Li int mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex,
2313c21e5facSXueming Li 			 char *ifname);
23148a6a09f8SDekel Peled int mlx5_get_module_info(struct rte_eth_dev *dev,
23158a6a09f8SDekel Peled 			 struct rte_eth_dev_module_info *modinfo);
23168a6a09f8SDekel Peled int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
23178a6a09f8SDekel Peled 			   struct rte_dev_eeprom_info *info);
231898c4b12aSOphir Munk int mlx5_os_read_dev_stat(struct mlx5_priv *priv,
231998c4b12aSOphir Munk 			  const char *ctr_name, uint64_t *stat);
2320a687c3e6SBing Zhao int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats);
2321a687c3e6SBing Zhao int mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master,
2322a687c3e6SBing Zhao 			uint16_t *n_stats, uint16_t *n_stats_sec);
232398c4b12aSOphir Munk void mlx5_os_stats_init(struct rte_eth_dev *dev);
2324e8482187SBing Zhao int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev);
232563bd1629SOri Kam 
2326771fa900SAdrien Mazarguil /* mlx5_mac.c */
2327771fa900SAdrien Mazarguil 
23283692c7ecSNélio Laranjeiro void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
23296d13ea8eSOlivier Matz int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
23303692c7ecSNélio Laranjeiro 		      uint32_t index, uint32_t vmdq);
23316d13ea8eSOlivier Matz int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
2332e0586a8dSNélio Laranjeiro int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
23336d13ea8eSOlivier Matz 			struct rte_ether_addr *mc_addr_set,
23346d13ea8eSOlivier Matz 			uint32_t nb_mc_addr);
2335771fa900SAdrien Mazarguil 
23362f97422eSNelio Laranjeiro /* mlx5_rss.c */
23372f97422eSNelio Laranjeiro 
23383692c7ecSNélio Laranjeiro int mlx5_rss_hash_update(struct rte_eth_dev *dev,
23393692c7ecSNélio Laranjeiro 			 struct rte_eth_rss_conf *rss_conf);
23403692c7ecSNélio Laranjeiro int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
23413692c7ecSNélio Laranjeiro 			   struct rte_eth_rss_conf *rss_conf);
2342af4f09f2SNélio Laranjeiro int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
23433692c7ecSNélio Laranjeiro int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
23443692c7ecSNélio Laranjeiro 			    struct rte_eth_rss_reta_entry64 *reta_conf,
23453692c7ecSNélio Laranjeiro 			    uint16_t reta_size);
23463692c7ecSNélio Laranjeiro int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
23473692c7ecSNélio Laranjeiro 			     struct rte_eth_rss_reta_entry64 *reta_conf,
23483692c7ecSNélio Laranjeiro 			     uint16_t reta_size);
23492f97422eSNelio Laranjeiro 
23501bdbe1afSAdrien Mazarguil /* mlx5_rxmode.c */
23511bdbe1afSAdrien Mazarguil 
23529039c812SAndrew Rybchenko int mlx5_promiscuous_enable(struct rte_eth_dev *dev);
23539039c812SAndrew Rybchenko int mlx5_promiscuous_disable(struct rte_eth_dev *dev);
2354ca041cd4SIvan Ilchenko int mlx5_allmulticast_enable(struct rte_eth_dev *dev);
2355ca041cd4SIvan Ilchenko int mlx5_allmulticast_disable(struct rte_eth_dev *dev);
23561bdbe1afSAdrien Mazarguil 
235787011737SAdrien Mazarguil /* mlx5_stats.c */
235887011737SAdrien Mazarguil 
23593692c7ecSNélio Laranjeiro int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
23609970a9adSIgor Romanov int mlx5_stats_reset(struct rte_eth_dev *dev);
2361af4f09f2SNélio Laranjeiro int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
2362af4f09f2SNélio Laranjeiro 		    unsigned int n);
23639970a9adSIgor Romanov int mlx5_xstats_reset(struct rte_eth_dev *dev);
2364af4f09f2SNélio Laranjeiro int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
23653692c7ecSNélio Laranjeiro 			  struct rte_eth_xstat_name *xstats_names,
23663692c7ecSNélio Laranjeiro 			  unsigned int n);
236787011737SAdrien Mazarguil 
2368e9086978SAdrien Mazarguil /* mlx5_vlan.c */
2369e9086978SAdrien Mazarguil 
23703692c7ecSNélio Laranjeiro int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
23713692c7ecSNélio Laranjeiro void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
23723692c7ecSNélio Laranjeiro int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
23737af10d29SOphir Munk 
23747af10d29SOphir Munk /* mlx5_vlan_os.c */
23757af10d29SOphir Munk 
23767af10d29SOphir Munk void mlx5_vlan_vmwa_exit(void *ctx);
2377c12671e3SMatan Azrad void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
2378c12671e3SMatan Azrad 			    struct mlx5_vf_vlan *vf_vlan);
2379c12671e3SMatan Azrad void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
2380c12671e3SMatan Azrad 			    struct mlx5_vf_vlan *vf_vlan);
23817af10d29SOphir Munk void *mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex);
2382e9086978SAdrien Mazarguil 
2383e60fbd5bSAdrien Mazarguil /* mlx5_trigger.c */
2384e60fbd5bSAdrien Mazarguil 
23853692c7ecSNélio Laranjeiro int mlx5_dev_start(struct rte_eth_dev *dev);
238662024eb8SIvan Ilchenko int mlx5_dev_stop(struct rte_eth_dev *dev);
2387af4f09f2SNélio Laranjeiro int mlx5_traffic_enable(struct rte_eth_dev *dev);
2388925061b5SNélio Laranjeiro void mlx5_traffic_disable(struct rte_eth_dev *dev);
23893692c7ecSNélio Laranjeiro int mlx5_traffic_restart(struct rte_eth_dev *dev);
239037cd4501SBing Zhao int mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
239137cd4501SBing Zhao 				   struct rte_hairpin_peer_info *current_info,
239237cd4501SBing Zhao 				   struct rte_hairpin_peer_info *peer_info,
239337cd4501SBing Zhao 				   uint32_t direction);
239437cd4501SBing Zhao int mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
239537cd4501SBing Zhao 				 struct rte_hairpin_peer_info *peer_info,
239637cd4501SBing Zhao 				 uint32_t direction);
239737cd4501SBing Zhao int mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
239837cd4501SBing Zhao 				   uint32_t direction);
239937cd4501SBing Zhao int mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port);
240037cd4501SBing Zhao int mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port);
240102109eaeSBing Zhao int mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
240202109eaeSBing Zhao 				size_t len, uint32_t direction);
2403d9f28495SDariusz Sosnowski int mlx5_traffic_mac_add(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
2404d9f28495SDariusz Sosnowski int mlx5_traffic_mac_remove(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
2405d9f28495SDariusz Sosnowski int mlx5_traffic_vlan_add(struct rte_eth_dev *dev, const uint16_t vid);
2406d9f28495SDariusz Sosnowski int mlx5_traffic_vlan_remove(struct rte_eth_dev *dev, const uint16_t vid);
2407e60fbd5bSAdrien Mazarguil 
24080d356350SNélio Laranjeiro /* mlx5_flow.c */
24090d356350SNélio Laranjeiro 
24105e61bcddSViacheslav Ovsiienko int mlx5_flow_discover_mreg_c(struct rte_eth_dev *eth_dev);
24115e61bcddSViacheslav Ovsiienko bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev);
241278be8852SNelio Laranjeiro void mlx5_flow_print(struct rte_flow *flow);
24133692c7ecSNélio Laranjeiro int mlx5_flow_validate(struct rte_eth_dev *dev,
24143692c7ecSNélio Laranjeiro 		       const struct rte_flow_attr *attr,
24153692c7ecSNélio Laranjeiro 		       const struct rte_flow_item items[],
24163692c7ecSNélio Laranjeiro 		       const struct rte_flow_action actions[],
24173692c7ecSNélio Laranjeiro 		       struct rte_flow_error *error);
241827d171b8SMaayan Kashani uintptr_t
2419e38776c3SMaayan Kashani mlx5_flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
2420e38776c3SMaayan Kashani 		      const struct rte_flow_attr *attr,
2421e38776c3SMaayan Kashani 		      const struct rte_flow_item items[],
2422e38776c3SMaayan Kashani 		      const struct rte_flow_action actions[],
2423e38776c3SMaayan Kashani 		      bool external, struct rte_flow_error *error);
2424e38776c3SMaayan Kashani void
2425e38776c3SMaayan Kashani mlx5_flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
242627d171b8SMaayan Kashani 		       uintptr_t flow_idx);
24273692c7ecSNélio Laranjeiro struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
24283692c7ecSNélio Laranjeiro 				  const struct rte_flow_attr *attr,
24293692c7ecSNélio Laranjeiro 				  const struct rte_flow_item items[],
24303692c7ecSNélio Laranjeiro 				  const struct rte_flow_action actions[],
24313692c7ecSNélio Laranjeiro 				  struct rte_flow_error *error);
24323692c7ecSNélio Laranjeiro int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
24333692c7ecSNélio Laranjeiro 		      struct rte_flow_error *error);
2434b4edeaf3SSuanming Mou void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
2435b4edeaf3SSuanming Mou 			  bool active);
24363692c7ecSNélio Laranjeiro int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
24373692c7ecSNélio Laranjeiro int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2438fb8fd96dSDeclan Doherty 		    const struct rte_flow_action *action, void *data,
24393692c7ecSNélio Laranjeiro 		    struct rte_flow_error *error);
24403692c7ecSNélio Laranjeiro int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
24413692c7ecSNélio Laranjeiro 		      struct rte_flow_error *error);
2442fb7ad441SThomas Monjalon int mlx5_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
24438db7e3b6SBing Zhao int mlx5_flow_start_default(struct rte_eth_dev *dev);
24448db7e3b6SBing Zhao void mlx5_flow_stop_default(struct rte_eth_dev *dev);
2445af4f09f2SNélio Laranjeiro int mlx5_flow_verify(struct rte_eth_dev *dev);
244626e1eaf2SDariusz Sosnowski int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t sq_num);
2447af4f09f2SNélio Laranjeiro int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
2448af4f09f2SNélio Laranjeiro 			struct rte_flow_item_eth *eth_spec,
2449af4f09f2SNélio Laranjeiro 			struct rte_flow_item_eth *eth_mask,
2450af4f09f2SNélio Laranjeiro 			struct rte_flow_item_vlan *vlan_spec,
2451af4f09f2SNélio Laranjeiro 			struct rte_flow_item_vlan *vlan_mask);
2452af4f09f2SNélio Laranjeiro int mlx5_ctrl_flow(struct rte_eth_dev *dev,
2453af4f09f2SNélio Laranjeiro 		   struct rte_flow_item_eth *eth_spec,
2454af4f09f2SNélio Laranjeiro 		   struct rte_flow_item_eth *eth_mask);
24553c78124fSShiri Kuzin int mlx5_flow_lacp_miss(struct rte_eth_dev *dev);
2456b67b4ecbSDekel Peled struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
2457686d05b6SXueming Li uint32_t mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev,
245826e1eaf2SDariusz Sosnowski 					    uint32_t sq_num);
24596e88bc42SOphir Munk void mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
2460f15db67dSMatan Azrad 				       uint64_t async_id, int status);
24616e88bc42SOphir Munk void mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh);
2462f15db67dSMatan Azrad void mlx5_flow_query_alarm(void *arg);
2463956d5c74SSuanming Mou uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev);
2464956d5c74SSuanming Mou void mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt);
2465956d5c74SSuanming Mou int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
24669b57df55SHaifei Luo 		    bool clear, uint64_t *pkts, uint64_t *bytes, void **action);
246750c38379SHaifei Luo int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow,
246850c38379SHaifei Luo 			FILE *file, struct rte_flow_error *error);
24695db9318fSHaifei Luo int save_dump_file(const unsigned char *data, uint32_t size,
2470a7ac7faeSHaifei Luo 		uint32_t type, uint64_t id, void *arg, FILE *file);
24715db9318fSHaifei Luo int mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
24725db9318fSHaifei Luo 	struct rte_flow_query_count *count, struct rte_flow_error *error);
24735db9318fSHaifei Luo #ifdef HAVE_IBV_FLOW_DV_SUPPORT
24745db9318fSHaifei Luo int mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, struct rte_flow *flow,
24755db9318fSHaifei Luo 		FILE *file, struct rte_flow_error *error);
24765db9318fSHaifei Luo #endif
2477fca8cba4SDavid Marchand int mlx5_flow_rx_metadata_negotiate(struct rte_eth_dev *dev,
2478fca8cba4SDavid Marchand 	uint64_t *features);
2479fca8cba4SDavid Marchand void mlx5_flow_rxq_dynf_set(struct rte_eth_dev *dev);
2480fa2d01c8SDong Zhou int mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
2481fa2d01c8SDong Zhou 			uint32_t nb_contexts, struct rte_flow_error *error);
24820a429117SBing Zhao int mlx5_validate_action_ct(struct rte_eth_dev *dev,
24830a429117SBing Zhao 			    const struct rte_flow_action_conntrack *conntrack,
24840a429117SBing Zhao 			    struct rte_flow_error *error);
24850a429117SBing Zhao 
248604a4de75SMichael Baum int mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
248704a4de75SMichael Baum 			       void **contexts, uint32_t nb_contexts,
248804a4de75SMichael Baum 			       struct rte_flow_error *error);
24890d356350SNélio Laranjeiro 
24902e86c4e5SOphir Munk /* mlx5_mp_os.c */
2491161d103bSViacheslav Ovsiienko 
24922e86c4e5SOphir Munk int mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg,
24932e86c4e5SOphir Munk 			      const void *peer);
24942e86c4e5SOphir Munk int mlx5_mp_os_secondary_handle(const struct rte_mp_msg *mp_msg,
24952e86c4e5SOphir Munk 				const void *peer);
24962e86c4e5SOphir Munk void mlx5_mp_os_req_start_rxtx(struct rte_eth_dev *dev);
24972e86c4e5SOphir Munk void mlx5_mp_os_req_stop_rxtx(struct rte_eth_dev *dev);
2498161d103bSViacheslav Ovsiienko int mlx5_mp_os_req_queue_control(struct rte_eth_dev *dev, uint16_t queue_id,
2499161d103bSViacheslav Ovsiienko 				 enum mlx5_mp_req_type req_type);
2500f8b9a3baSXueming Li 
2501e6cdc54cSXueming Li /* mlx5_socket.c */
2502e6cdc54cSXueming Li 
2503e6cdc54cSXueming Li int mlx5_pmd_socket_init(void);
2504ea823b2cSDmitry Kozlyuk void mlx5_pmd_socket_uninit(void);
2505e6cdc54cSXueming Li 
2506d740eb50SSuanming Mou /* mlx5_flow_meter.c */
2507d740eb50SSuanming Mou 
250824865366SAlexander Kozyrev int mlx5_flow_meter_init(struct rte_eth_dev *dev,
250924865366SAlexander Kozyrev 			 uint32_t nb_meters,
251024865366SAlexander Kozyrev 			 uint32_t nb_meter_profiles,
251148fbb0e9SAlexander Kozyrev 			 uint32_t nb_meter_policies,
251248fbb0e9SAlexander Kozyrev 			 uint32_t nb_queues);
251324865366SAlexander Kozyrev void mlx5_flow_meter_uninit(struct rte_eth_dev *dev);
2514d740eb50SSuanming Mou int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev, void *arg);
2515e6100c7bSLi Zhang struct mlx5_flow_meter_info *mlx5_flow_meter_find(struct mlx5_priv *priv,
2516e6100c7bSLi Zhang 		uint32_t meter_id, uint32_t *mtr_idx);
2517e6100c7bSLi Zhang struct mlx5_flow_meter_info *
2518e6100c7bSLi Zhang flow_dv_meter_find_by_idx(struct mlx5_priv *priv, uint32_t idx);
251983306d6cSShun Hao int mlx5_flow_meter_attach(struct mlx5_priv *priv,
2520e6100c7bSLi Zhang 			   struct mlx5_flow_meter_info *fm,
2521266e9f3dSSuanming Mou 			   const struct rte_flow_attr *attr,
2522266e9f3dSSuanming Mou 			   struct rte_flow_error *error);
2523c99b4f8bSLi Zhang void mlx5_flow_meter_detach(struct mlx5_priv *priv,
2524c99b4f8bSLi Zhang 			    struct mlx5_flow_meter_info *fm);
2525afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mlx5_flow_meter_policy_find
2526afb4aa4fSLi Zhang 		(struct rte_eth_dev *dev,
2527afb4aa4fSLi Zhang 		uint32_t policy_id,
2528afb4aa4fSLi Zhang 		uint32_t *policy_idx);
2529bf62fb76SShun Hao struct mlx5_flow_meter_info *
2530bf62fb76SShun Hao mlx5_flow_meter_hierarchy_next_meter(struct mlx5_priv *priv,
2531bf62fb76SShun Hao 				     struct mlx5_flow_meter_policy *policy,
2532bf62fb76SShun Hao 				     uint32_t *mtr_idx);
253350cc92ddSShun Hao struct mlx5_flow_meter_policy *
253450cc92ddSShun Hao mlx5_flow_meter_hierarchy_get_final_policy(struct rte_eth_dev *dev,
253550cc92ddSShun Hao 					struct mlx5_flow_meter_policy *policy);
2536afb4aa4fSLi Zhang int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
2537afb4aa4fSLi Zhang 			  struct rte_mtr_error *error);
2538ec962badSLi Zhang void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
2539bae983b8SMaayan Kashani uint32_t mlx5_flow_mtr_max_get(struct mlx5_priv *priv);
2540d740eb50SSuanming Mou 
2541f44b09f9SOphir Munk /* mlx5_os.c */
25425dfa003dSMichael Baum 
25432eb4d010SOphir Munk struct rte_pci_driver;
254491d1cfafSMichael Baum int mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh);
25452eb4d010SOphir Munk void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
2546a729d2f0SMichael Baum int mlx5_os_net_probe(struct mlx5_common_device *cdev,
2547a729d2f0SMichael Baum 		      struct mlx5_kvargs_ctrl *mkvlist);
25482eb4d010SOphir Munk void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
25492eb4d010SOphir Munk void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
2550ab27cdd9SOphir Munk void mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
2551ab27cdd9SOphir Munk int mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
2552ab27cdd9SOphir Munk 			 uint32_t index);
2553ab27cdd9SOphir Munk int mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, unsigned int iface_idx,
2554ab27cdd9SOphir Munk 			       struct rte_ether_addr *mac_addr,
2555ab27cdd9SOphir Munk 			       int vf_index);
25564d18abd1SOphir Munk int mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable);
25574d18abd1SOphir Munk int mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable);
255808d1838fSDekel Peled int mlx5_os_set_nonblock_channel_fd(int fd);
2559f00f6562SOphir Munk void mlx5_os_mac_addr_flush(struct rte_eth_dev *dev);
2560ea823b2cSDmitry Kozlyuk void mlx5_os_net_cleanup(void);
25611c506404SBing Zhao 
2562d133f4cdSViacheslav Ovsiienko /* mlx5_txpp.c */
2563d133f4cdSViacheslav Ovsiienko 
2564d133f4cdSViacheslav Ovsiienko int mlx5_txpp_start(struct rte_eth_dev *dev);
2565d133f4cdSViacheslav Ovsiienko void mlx5_txpp_stop(struct rte_eth_dev *dev);
2566b94d93caSViacheslav Ovsiienko int mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp);
25673b025c0cSViacheslav Ovsiienko int mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
25683b025c0cSViacheslav Ovsiienko 			 struct rte_eth_xstat *stats,
25693b025c0cSViacheslav Ovsiienko 			 unsigned int n, unsigned int n_used);
25703b025c0cSViacheslav Ovsiienko int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev);
25713b025c0cSViacheslav Ovsiienko int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev,
25723b025c0cSViacheslav Ovsiienko 			       struct rte_eth_xstat_name *xstats_names,
25733b025c0cSViacheslav Ovsiienko 			       unsigned int n, unsigned int n_used);
257477522be0SViacheslav Ovsiienko void mlx5_txpp_interrupt_handler(void *cb_arg);
25759b31fc90SViacheslav Ovsiienko int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev);
25769b31fc90SViacheslav Ovsiienko void mlx5_txpp_unmap_hca_bar(struct rte_eth_dev *dev);
2577d133f4cdSViacheslav Ovsiienko 
2578ef9ee13fSOphir Munk /* mlx5_rxtx.c */
2579ef9ee13fSOphir Munk 
2580ef9ee13fSOphir Munk eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
2581ef9ee13fSOphir Munk 
2582e6100c7bSLi Zhang /* mlx5_flow_aso.c */
2583f935ed4bSDekel Peled 
258448fbb0e9SAlexander Kozyrev int mlx5_aso_mtr_queue_init(struct mlx5_dev_ctx_shared *sh,
258548fbb0e9SAlexander Kozyrev 			    struct mlx5_aso_mtr_pool *hws_pool,
258648fbb0e9SAlexander Kozyrev 			    struct mlx5_aso_mtr_pools_mng *pool_mng,
258748fbb0e9SAlexander Kozyrev 			    uint32_t nb_queues);
258848fbb0e9SAlexander Kozyrev void mlx5_aso_mtr_queue_uninit(struct mlx5_dev_ctx_shared *sh,
258948fbb0e9SAlexander Kozyrev 			       struct mlx5_aso_mtr_pool *hws_pool,
259048fbb0e9SAlexander Kozyrev 			       struct mlx5_aso_mtr_pools_mng *pool_mng);
259129efa63aSLi Zhang int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
259248fbb0e9SAlexander Kozyrev 			enum mlx5_access_aso_opc_mod aso_opc_mode,
259348fbb0e9SAlexander Kozyrev 			uint32_t nb_queues);
259429efa63aSLi Zhang int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh);
259529efa63aSLi Zhang int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);
259629efa63aSLi Zhang void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
259729efa63aSLi Zhang 			   enum mlx5_access_aso_opc_mod aso_opc_mod);
25984359d9d1SGregory Etelson int mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue,
25994359d9d1SGregory Etelson 				 struct mlx5_aso_mtr *mtr,
26004359d9d1SGregory Etelson 				 struct mlx5_mtr_bulk *bulk,
26014359d9d1SGregory Etelson 				 struct mlx5_hw_q_job *job, bool push);
26024359d9d1SGregory Etelson int mlx5_aso_mtr_wait(struct mlx5_priv *priv,
26034359d9d1SGregory Etelson 		      struct mlx5_aso_mtr *mtr, bool is_tmpl_api);
2604463170a7SSuanming Mou int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
2605ebaf1b31SBing Zhao 			      struct mlx5_aso_ct_action *ct,
2606478ba4bbSSuanming Mou 			      const struct rte_flow_action_conntrack *profile,
2607478ba4bbSSuanming Mou 			      void *user_data,
2608478ba4bbSSuanming Mou 			      bool push);
2609463170a7SSuanming Mou int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
2610cf756556SBing Zhao 			   struct mlx5_aso_ct_action *ct);
2611463170a7SSuanming Mou int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
2612cf756556SBing Zhao 			     struct mlx5_aso_ct_action *ct,
2613478ba4bbSSuanming Mou 			     struct rte_flow_action_conntrack *profile,
2614478ba4bbSSuanming Mou 			     void *user_data, bool push);
2615463170a7SSuanming Mou int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh, uint32_t queue,
26162d084f69SBing Zhao 			  struct mlx5_aso_ct_action *ct);
2617d47fe9daSTal Shnaiderman uint32_t
2618d47fe9daSTal Shnaiderman mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);
26196a86ee2eSTal Shnaiderman uint32_t
26206a86ee2eSTal Shnaiderman mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);
2621f935ed4bSDekel Peled 
2622478ba4bbSSuanming Mou void mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,
2623478ba4bbSSuanming Mou 			     char *wdata);
2624478ba4bbSSuanming Mou void mlx5_aso_push_wqe(struct mlx5_dev_ctx_shared *sh,
2625478ba4bbSSuanming Mou 		       struct mlx5_aso_sq *sq);
2626478ba4bbSSuanming Mou int mlx5_aso_pull_completion(struct mlx5_aso_sq *sq,
2627478ba4bbSSuanming Mou 			     struct rte_flow_op_result res[],
2628478ba4bbSSuanming Mou 			     uint16_t n_res);
26294d368e1dSXiaoyu Min int mlx5_aso_cnt_queue_init(struct mlx5_dev_ctx_shared *sh);
26304d368e1dSXiaoyu Min void mlx5_aso_cnt_queue_uninit(struct mlx5_dev_ctx_shared *sh);
26314d368e1dSXiaoyu Min int mlx5_aso_cnt_query(struct mlx5_dev_ctx_shared *sh,
26324d368e1dSXiaoyu Min 		struct mlx5_hws_cnt_pool *cpool);
2633463170a7SSuanming Mou int mlx5_aso_ct_queue_init(struct mlx5_dev_ctx_shared *sh,
2634463170a7SSuanming Mou 			   struct mlx5_aso_ct_pools_mng *ct_mng,
2635463170a7SSuanming Mou 			   uint32_t nb_queues);
2636463170a7SSuanming Mou int mlx5_aso_ct_queue_uninit(struct mlx5_dev_ctx_shared *sh,
2637463170a7SSuanming Mou 			     struct mlx5_aso_ct_pools_mng *ct_mng);
263815896eafSGregory Etelson int
263915896eafSGregory Etelson mlx5_aso_sq_create(struct mlx5_common_device *cdev, struct mlx5_aso_sq *sq,
264015896eafSGregory Etelson 		   void *uar, uint16_t log_desc_n);
264115896eafSGregory Etelson void
264215896eafSGregory Etelson mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq);
264315896eafSGregory Etelson void
264415896eafSGregory Etelson mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq);
264515896eafSGregory Etelson void
264615896eafSGregory Etelson mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq);
26474d368e1dSXiaoyu Min 
2648db25cadcSViacheslav Ovsiienko /* mlx5_flow_flex.c */
2649db25cadcSViacheslav Ovsiienko 
2650db25cadcSViacheslav Ovsiienko struct rte_flow_item_flex_handle *
2651db25cadcSViacheslav Ovsiienko flow_dv_item_create(struct rte_eth_dev *dev,
2652db25cadcSViacheslav Ovsiienko 		    const struct rte_flow_item_flex_conf *conf,
2653db25cadcSViacheslav Ovsiienko 		    struct rte_flow_error *error);
2654db25cadcSViacheslav Ovsiienko int flow_dv_item_release(struct rte_eth_dev *dev,
2655db25cadcSViacheslav Ovsiienko 		    const struct rte_flow_item_flex_handle *flex_handle,
2656db25cadcSViacheslav Ovsiienko 		    struct rte_flow_error *error);
2657db25cadcSViacheslav Ovsiienko int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
2658db25cadcSViacheslav Ovsiienko void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
26596dac7d7fSViacheslav Ovsiienko void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
26606dac7d7fSViacheslav Ovsiienko 				   void *key, const struct rte_flow_item *item,
26616dac7d7fSViacheslav Ovsiienko 				   bool is_inner);
26628c0ca752SRongwei Liu int mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
266397e19f07SViacheslav Ovsiienko 			    uint32_t idx, uint32_t *pos, bool is_inner);
26648c0ca752SRongwei Liu int mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
26658c0ca752SRongwei Liu 					    void *flex, uint32_t byte_off,
266697e19f07SViacheslav Ovsiienko 					    bool tunnel, uint32_t *value);
2667850233acSViacheslav Ovsiienko int mlx5_flex_get_tunnel_mode(const struct rte_flow_item *item,
2668850233acSViacheslav Ovsiienko 			      enum rte_flow_item_flex_tunnel_mode *tunnel_mode);
26696dac7d7fSViacheslav Ovsiienko int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
26706dac7d7fSViacheslav Ovsiienko 			    struct rte_flow_item_flex_handle *handle,
26716dac7d7fSViacheslav Ovsiienko 			    bool acquire);
26726dac7d7fSViacheslav Ovsiienko int mlx5_flex_release_index(struct rte_eth_dev *dev, int index);
26736dac7d7fSViacheslav Ovsiienko 
26749086ac09SGregory Etelson /* Flex parser list callbacks. */
26759086ac09SGregory Etelson struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
26769086ac09SGregory Etelson int mlx5_flex_parser_match_cb(void *list_ctx,
26779086ac09SGregory Etelson 			      struct mlx5_list_entry *iter, void *ctx);
26789086ac09SGregory Etelson void mlx5_flex_parser_remove_cb(void *list_ctx,	struct mlx5_list_entry *entry);
26799086ac09SGregory Etelson struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
26809086ac09SGregory Etelson 						  struct mlx5_list_entry *entry,
26819086ac09SGregory Etelson 						  void *ctx);
26829086ac09SGregory Etelson void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
26839086ac09SGregory Etelson 				    struct mlx5_list_entry *entry);
268400e57916SRongwei Liu 
268515896eafSGregory Etelson int
268615896eafSGregory Etelson mlx5_flow_quota_destroy(struct rte_eth_dev *dev);
268715896eafSGregory Etelson int
268815896eafSGregory Etelson mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas);
268915896eafSGregory Etelson struct rte_flow_action_handle *
269015896eafSGregory Etelson mlx5_quota_alloc(struct rte_eth_dev *dev, uint32_t queue,
269115896eafSGregory Etelson 		 const struct rte_flow_action_quota *conf,
269215896eafSGregory Etelson 		 struct mlx5_hw_q_job *job, bool push,
269315896eafSGregory Etelson 		 struct rte_flow_error *error);
269415896eafSGregory Etelson void
269515896eafSGregory Etelson mlx5_quota_async_completion(struct rte_eth_dev *dev, uint32_t queue,
269615896eafSGregory Etelson 			    struct mlx5_hw_q_job *job);
269715896eafSGregory Etelson int
269815896eafSGregory Etelson mlx5_quota_query_update(struct rte_eth_dev *dev, uint32_t queue,
269915896eafSGregory Etelson 			struct rte_flow_action_handle *handle,
270015896eafSGregory Etelson 			const struct rte_flow_action *update,
270115896eafSGregory Etelson 			struct rte_flow_query_quota *query,
270215896eafSGregory Etelson 			struct mlx5_hw_q_job *async_job, bool push,
270315896eafSGregory Etelson 			struct rte_flow_error *error);
270415896eafSGregory Etelson int mlx5_quota_query(struct rte_eth_dev *dev, uint32_t queue,
270515896eafSGregory Etelson 		     const struct rte_flow_action_handle *handle,
270615896eafSGregory Etelson 		     struct rte_flow_query_quota *query,
270715896eafSGregory Etelson 		     struct mlx5_hw_q_job *async_job, bool push,
270815896eafSGregory Etelson 		     struct rte_flow_error *error);
270915896eafSGregory Etelson 
271000e57916SRongwei Liu int mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev);
271100e57916SRongwei Liu 
271200e57916SRongwei Liu void mlx5_free_srh_flex_parser(struct rte_eth_dev *dev);
271361a81061SGregory Etelson 
271461a81061SGregory Etelson /* mlx5_flow_hw.c */
271561a81061SGregory Etelson struct rte_pmd_mlx5_host_action;
271661a81061SGregory Etelson 
271761a81061SGregory Etelson struct mlx5dr_action *
271861a81061SGregory Etelson mlx5_flow_hw_get_dr_action(struct rte_eth_dev *dev,
271961a81061SGregory Etelson 			   struct rte_pmd_mlx5_host_action *action,
272061a81061SGregory Etelson 			   void **release_data);
272161a81061SGregory Etelson 
272261a81061SGregory Etelson void
272361a81061SGregory Etelson mlx5_flow_hw_put_dr_action(struct rte_eth_dev *dev,
272461a81061SGregory Etelson 			   enum rte_flow_action_type type,
272561a81061SGregory Etelson 			   void *release_data);
272661a81061SGregory Etelson 
272761a81061SGregory Etelson bool
272861a81061SGregory Etelson mlx5_hw_ctx_validate(const struct rte_eth_dev *dev,
272961a81061SGregory Etelson 		     struct rte_flow_error *error);
273061a81061SGregory Etelson 
2731771fa900SAdrien Mazarguil #endif /* RTE_PMD_MLX5_H_ */
2732