xref: /dpdk/drivers/net/mlx5/mlx5.h (revision f44b09f9e35a1a37de7ddb73e2eceaeb049f5e6c)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2771fa900SAdrien Mazarguil  * Copyright 2015 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2015 Mellanox Technologies, Ltd
4771fa900SAdrien Mazarguil  */
5771fa900SAdrien Mazarguil 
6771fa900SAdrien Mazarguil #ifndef RTE_PMD_MLX5_H_
7771fa900SAdrien Mazarguil #define RTE_PMD_MLX5_H_
8771fa900SAdrien Mazarguil 
9771fa900SAdrien Mazarguil #include <stddef.h>
10028669bcSAnatoly Burakov #include <stdbool.h>
11771fa900SAdrien Mazarguil #include <stdint.h>
12771fa900SAdrien Mazarguil #include <limits.h>
13771fa900SAdrien Mazarguil #include <net/if.h>
14771fa900SAdrien Mazarguil #include <netinet/in.h>
151b37f5d8SNélio Laranjeiro #include <sys/queue.h>
16771fa900SAdrien Mazarguil 
17771fa900SAdrien Mazarguil /* Verbs header. */
18771fa900SAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
19771fa900SAdrien Mazarguil #ifdef PEDANTIC
20fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic"
21771fa900SAdrien Mazarguil #endif
22771fa900SAdrien Mazarguil #include <infiniband/verbs.h>
23771fa900SAdrien Mazarguil #ifdef PEDANTIC
24fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic"
25771fa900SAdrien Mazarguil #endif
26771fa900SAdrien Mazarguil 
275f08883aSGaetan Rivet #include <rte_pci.h>
28771fa900SAdrien Mazarguil #include <rte_ether.h>
29ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h>
30974f1e7eSYongseok Koh #include <rte_rwlock.h>
31198a3c33SNelio Laranjeiro #include <rte_interrupts.h>
32a48deadaSOr Ami #include <rte_errno.h>
330d356350SNélio Laranjeiro #include <rte_flow.h>
34771fa900SAdrien Mazarguil 
357b4f1e6bSMatan Azrad #include <mlx5_glue.h>
367b4f1e6bSMatan Azrad #include <mlx5_devx_cmds.h>
377b4f1e6bSMatan Azrad #include <mlx5_prm.h>
38654810b5SMatan Azrad #include <mlx5_nl.h>
39a4de9586SVu Pham #include <mlx5_common_mp.h>
40b8dc6b0eSVu Pham #include <mlx5_common_mr.h>
417b4f1e6bSMatan Azrad 
427b4f1e6bSMatan Azrad #include "mlx5_defs.h"
43771fa900SAdrien Mazarguil #include "mlx5_utils.h"
44771fa900SAdrien Mazarguil #include "mlx5_autoconf.h"
45771fa900SAdrien Mazarguil 
46014d1cbeSSuanming Mou 
47014d1cbeSSuanming Mou enum mlx5_ipool_index {
48b88341caSSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT
49014d1cbeSSuanming Mou 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
508acf8ac9SSuanming Mou 	MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
515f114269SSuanming Mou 	MLX5_IPOOL_TAG, /* Pool for tag resource. */
52f3faf9eaSSuanming Mou 	MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
537ac99475SSuanming Mou 	MLX5_IPOOL_JUMP, /* Pool for jump resource. */
54b88341caSSuanming Mou #endif
558638e2b0SSuanming Mou 	MLX5_IPOOL_MTR, /* Pool for meter resource. */
5690e6053aSSuanming Mou 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
57772dc0ebSSuanming Mou 	MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
58b88341caSSuanming Mou 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
59ab612adcSSuanming Mou 	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
60014d1cbeSSuanming Mou 	MLX5_IPOOL_MAX,
61014d1cbeSSuanming Mou };
62014d1cbeSSuanming Mou 
63a1da6f62SSuanming Mou /*
64a1da6f62SSuanming Mou  * There are three reclaim memory mode supported.
65a1da6f62SSuanming Mou  * 0(none) means no memory reclaim.
66a1da6f62SSuanming Mou  * 1(light) means only PMD level reclaim.
67a1da6f62SSuanming Mou  * 2(aggressive) means both PMD and rdma-core level reclaim.
68a1da6f62SSuanming Mou  */
69a1da6f62SSuanming Mou enum mlx5_reclaim_mem_mode {
70a1da6f62SSuanming Mou 	MLX5_RCM_NONE, /* Don't reclaim memory. */
71a1da6f62SSuanming Mou 	MLX5_RCM_LIGHT, /* Reclaim PMD level. */
72a1da6f62SSuanming Mou 	MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
73a1da6f62SSuanming Mou };
74a1da6f62SSuanming Mou 
759a8ab29bSYongseok Koh /** Key string for IPC. */
769a8ab29bSYongseok Koh #define MLX5_MP_NAME "net_mlx5_mp"
779a8ab29bSYongseok Koh 
7826c08b97SAdrien Mazarguil 
796e88bc42SOphir Munk LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
80974f1e7eSYongseok Koh 
817be600c8SYongseok Koh /* Shared data between primary and secondary processes. */
82974f1e7eSYongseok Koh struct mlx5_shared_data {
837be600c8SYongseok Koh 	rte_spinlock_t lock;
847be600c8SYongseok Koh 	/* Global spinlock for primary and secondary processes. */
857be600c8SYongseok Koh 	int init_done; /* Whether primary has done initialization. */
867be600c8SYongseok Koh 	unsigned int secondary_cnt; /* Number of secondary processes init'd. */
87974f1e7eSYongseok Koh 	struct mlx5_dev_list mem_event_cb_list;
88974f1e7eSYongseok Koh 	rte_rwlock_t mem_event_rwlock;
89974f1e7eSYongseok Koh };
90974f1e7eSYongseok Koh 
917be600c8SYongseok Koh /* Per-process data structure, not visible to other processes. */
927be600c8SYongseok Koh struct mlx5_local_data {
937be600c8SYongseok Koh 	int init_done; /* Whether a secondary has done initialization. */
947be600c8SYongseok Koh };
957be600c8SYongseok Koh 
96974f1e7eSYongseok Koh extern struct mlx5_shared_data *mlx5_shared_data;
97974f1e7eSYongseok Koh 
981a611fdaSShahaf Shuler struct mlx5_counter_ctrl {
991a611fdaSShahaf Shuler 	/* Name of the counter. */
1001a611fdaSShahaf Shuler 	char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
1011a611fdaSShahaf Shuler 	/* Name of the counter on the device table. */
1021a611fdaSShahaf Shuler 	char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
1031a611fdaSShahaf Shuler 	uint32_t ib:1; /**< Nonzero for IB counters. */
1041a611fdaSShahaf Shuler };
1051a611fdaSShahaf Shuler 
106a4193ae3SShahaf Shuler struct mlx5_xstats_ctrl {
107a4193ae3SShahaf Shuler 	/* Number of device stats. */
108a4193ae3SShahaf Shuler 	uint16_t stats_n;
1091a611fdaSShahaf Shuler 	/* Number of device stats identified by PMD. */
1101a611fdaSShahaf Shuler 	uint16_t  mlx5_stats_n;
111a4193ae3SShahaf Shuler 	/* Index in the device counters table. */
112a4193ae3SShahaf Shuler 	uint16_t dev_table_idx[MLX5_MAX_XSTATS];
113a4193ae3SShahaf Shuler 	uint64_t base[MLX5_MAX_XSTATS];
114c5193a0bSJiawei Wang 	uint64_t xstats[MLX5_MAX_XSTATS];
115c5193a0bSJiawei Wang 	uint64_t hw_stats[MLX5_MAX_XSTATS];
1161a611fdaSShahaf Shuler 	struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS];
117a4193ae3SShahaf Shuler };
118a4193ae3SShahaf Shuler 
119ce9494d7STom Barbette struct mlx5_stats_ctrl {
120ce9494d7STom Barbette 	/* Base for imissed counter. */
121ce9494d7STom Barbette 	uint64_t imissed_base;
122c5193a0bSJiawei Wang 	uint64_t imissed;
123ce9494d7STom Barbette };
124ce9494d7STom Barbette 
1257fe24446SShahaf Shuler /* Default PMD specific parameter value. */
1267fe24446SShahaf Shuler #define MLX5_ARG_UNSET (-1)
1277fe24446SShahaf Shuler 
12821bb6c7eSDekel Peled #define MLX5_LRO_SUPPORTED(dev) \
12921bb6c7eSDekel Peled 	(((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported)
13021bb6c7eSDekel Peled 
1313d491dd6SDekel Peled /* Maximal size of coalesced segment for LRO is set in chunks of 256 Bytes. */
1323d491dd6SDekel Peled #define MLX5_LRO_SEG_CHUNK_SIZE	256u
1333d491dd6SDekel Peled 
1341c7e57f9SDekel Peled /* Maximal size of aggregated LRO packet. */
1353d491dd6SDekel Peled #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
1361c7e57f9SDekel Peled 
13721bb6c7eSDekel Peled /* LRO configurations structure. */
13821bb6c7eSDekel Peled struct mlx5_lro_config {
13921bb6c7eSDekel Peled 	uint32_t supported:1; /* Whether LRO is supported. */
14021bb6c7eSDekel Peled 	uint32_t timeout; /* User configuration. */
14121bb6c7eSDekel Peled };
14221bb6c7eSDekel Peled 
1437fe24446SShahaf Shuler /*
1447fe24446SShahaf Shuler  * Device configuration structure.
1457fe24446SShahaf Shuler  *
1467fe24446SShahaf Shuler  * Merged configuration from:
1477fe24446SShahaf Shuler  *
1487fe24446SShahaf Shuler  *  - Device capabilities,
1497fe24446SShahaf Shuler  *  - User device parameters disabled features.
1507fe24446SShahaf Shuler  */
1517fe24446SShahaf Shuler struct mlx5_dev_config {
1527fe24446SShahaf Shuler 	unsigned int hw_csum:1; /* Checksum offload is supported. */
1537fe24446SShahaf Shuler 	unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
15438b4b397SViacheslav Ovsiienko 	unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
1557fe24446SShahaf Shuler 	unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
1567fe24446SShahaf Shuler 	unsigned int hw_padding:1; /* End alignment padding is supported. */
157ccdcba53SNélio Laranjeiro 	unsigned int vf:1; /* This is a VF. */
158038e7251SShahaf Shuler 	unsigned int tunnel_en:1;
159038e7251SShahaf Shuler 	/* Whether tunnel stateless offloads are supported. */
1601f106da2SMatan Azrad 	unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
1617fe24446SShahaf Shuler 	unsigned int cqe_comp:1; /* CQE compression is enabled. */
162bc91e8dbSYongseok Koh 	unsigned int cqe_pad:1; /* CQE padding is enabled. */
163dbccb4cdSShahaf Shuler 	unsigned int tso:1; /* Whether TSO is supported. */
1647fe24446SShahaf Shuler 	unsigned int rx_vec_en:1; /* Rx vector is enabled. */
165dceb5029SYongseok Koh 	unsigned int mr_ext_memseg_en:1;
166dceb5029SYongseok Koh 	/* Whether memseg should be extended for MR creation. */
16778a54648SXueming Li 	unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
168db209cc3SNélio Laranjeiro 	unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
169e2b4925eSOri Kam 	unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */
17051e72d38SOri Kam 	unsigned int dv_flow_en:1; /* Enable DV flow. */
1712d241515SViacheslav Ovsiienko 	unsigned int dv_xmeta_en:2; /* Enable extensive flow metadata. */
1725f8ba81cSXueming Li 	unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
173f5bf91deSMoti Haimovsky 	unsigned int devx:1; /* Whether devx interface is available or not. */
1743075bd23SDekel Peled 	unsigned int dest_tir:1; /* Whether advanced DR API is available. */
175a1da6f62SSuanming Mou 	unsigned int reclaim_mode:2; /* Memory reclaim mode. */
1767d6bf6b8SYongseok Koh 	struct {
1777d6bf6b8SYongseok Koh 		unsigned int enabled:1; /* Whether MPRQ is enabled. */
1787d6bf6b8SYongseok Koh 		unsigned int stride_num_n; /* Number of strides. */
179ecb16045SAlexander Kozyrev 		unsigned int stride_size_n; /* Size of a stride. */
1807d6bf6b8SYongseok Koh 		unsigned int min_stride_size_n; /* Min size of a stride. */
1817d6bf6b8SYongseok Koh 		unsigned int max_stride_size_n; /* Max size of a stride. */
1827d6bf6b8SYongseok Koh 		unsigned int max_memcpy_len;
1837d6bf6b8SYongseok Koh 		/* Maximum packet size to memcpy Rx packets. */
1847d6bf6b8SYongseok Koh 		unsigned int min_rxqs_num;
1857d6bf6b8SYongseok Koh 		/* Rx queue count threshold to enable MPRQ. */
1867d6bf6b8SYongseok Koh 	} mprq; /* Configurations for Multi-Packet RQ. */
187f9de8718SShahaf Shuler 	int mps; /* Multi-packet send supported mode. */
1888409a285SViacheslav Ovsiienko 	int dbnc; /* Skip doorbell register write barrier. */
1892815702bSNelio Laranjeiro 	unsigned int flow_prio; /* Number of flow priorities. */
1905e61bcddSViacheslav Ovsiienko 	enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];
1915e61bcddSViacheslav Ovsiienko 	/* Availibility of mreg_c's. */
1927fe24446SShahaf Shuler 	unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
1937fe24446SShahaf Shuler 	unsigned int ind_table_max_size; /* Maximum indirection table size. */
194066cfecdSMatan Azrad 	unsigned int max_dump_files_num; /* Maximum dump files per queue. */
1951ad9a3d0SBing Zhao 	unsigned int log_hp_size; /* Single hairpin queue data size in total. */
1967fe24446SShahaf Shuler 	int txqs_inline; /* Queue number threshold for inlining. */
197505f1fe4SViacheslav Ovsiienko 	int txq_inline_min; /* Minimal amount of data bytes to inline. */
198505f1fe4SViacheslav Ovsiienko 	int txq_inline_max; /* Max packet size for inlining with SEND. */
199505f1fe4SViacheslav Ovsiienko 	int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
200e2b4925eSOri Kam 	struct mlx5_hca_attr hca_attr; /* HCA attributes. */
20121bb6c7eSDekel Peled 	struct mlx5_lro_config lro; /* LRO configuration. */
2027fe24446SShahaf Shuler };
2037fe24446SShahaf Shuler 
204ae18a1aeSOri Kam 
205d10b09dbSOlivier Matz /**
20642280dd9SDekel Peled  * Type of object being allocated.
207d10b09dbSOlivier Matz  */
208d10b09dbSOlivier Matz enum mlx5_verbs_alloc_type {
209d10b09dbSOlivier Matz 	MLX5_VERBS_ALLOC_TYPE_NONE,
210d10b09dbSOlivier Matz 	MLX5_VERBS_ALLOC_TYPE_TX_QUEUE,
211d10b09dbSOlivier Matz 	MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
212d10b09dbSOlivier Matz };
213d10b09dbSOlivier Matz 
214dfedf3e3SViacheslav Ovsiienko /* Structure for VF VLAN workaround. */
215dfedf3e3SViacheslav Ovsiienko struct mlx5_vf_vlan {
216dfedf3e3SViacheslav Ovsiienko 	uint32_t tag:12;
217dfedf3e3SViacheslav Ovsiienko 	uint32_t created:1;
218dfedf3e3SViacheslav Ovsiienko };
219dfedf3e3SViacheslav Ovsiienko 
220d10b09dbSOlivier Matz /**
221d10b09dbSOlivier Matz  * Verbs allocator needs a context to know in the callback which kind of
222d10b09dbSOlivier Matz  * resources it is allocating.
223d10b09dbSOlivier Matz  */
224d10b09dbSOlivier Matz struct mlx5_verbs_alloc_ctx {
225d10b09dbSOlivier Matz 	enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */
226d10b09dbSOlivier Matz 	const void *obj; /* Pointer to the DPDK object. */
227d10b09dbSOlivier Matz };
228d10b09dbSOlivier Matz 
22978be8852SNelio Laranjeiro /* Flow drop context necessary due to Verbs API. */
23078be8852SNelio Laranjeiro struct mlx5_drop {
23178be8852SNelio Laranjeiro 	struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
23293403560SDekel Peled 	struct mlx5_rxq_obj *rxq; /* Rx queue object. */
23378be8852SNelio Laranjeiro };
23478be8852SNelio Laranjeiro 
2355382d28cSMatan Azrad #define MLX5_COUNTERS_PER_POOL 512
236f15db67dSMatan Azrad #define MLX5_MAX_PENDING_QUERIES 4
237c3d3b140SSuanming Mou #define MLX5_CNT_CONTAINER_RESIZE 64
238fa2d01c8SDong Zhou #define MLX5_CNT_AGE_OFFSET 0x80000000
2398d93c830SDong Zhou #define CNT_SIZE (sizeof(struct mlx5_flow_counter))
2408d93c830SDong Zhou #define CNTEXT_SIZE (sizeof(struct mlx5_flow_counter_ext))
241fa2d01c8SDong Zhou #define AGE_SIZE (sizeof(struct mlx5_age_param))
242fa2d01c8SDong Zhou #define MLX5_AGING_TIME_DELAY	7
2438d93c830SDong Zhou #define CNT_POOL_TYPE_EXT	(1 << 0)
244fa2d01c8SDong Zhou #define CNT_POOL_TYPE_AGE	(1 << 1)
2458d93c830SDong Zhou #define IS_EXT_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_EXT)
246fa2d01c8SDong Zhou #define IS_AGE_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_AGE)
247fa2d01c8SDong Zhou #define MLX_CNT_IS_AGE(counter) ((counter) & MLX5_CNT_AGE_OFFSET ? 1 : 0)
2488d93c830SDong Zhou #define MLX5_CNT_LEN(pool) \
249fa2d01c8SDong Zhou 	(CNT_SIZE + \
250fa2d01c8SDong Zhou 	(IS_AGE_POOL(pool) ? AGE_SIZE : 0) + \
251fa2d01c8SDong Zhou 	(IS_EXT_POOL(pool) ? CNTEXT_SIZE : 0))
2528d93c830SDong Zhou #define MLX5_POOL_GET_CNT(pool, index) \
2538d93c830SDong Zhou 	((struct mlx5_flow_counter *) \
2548d93c830SDong Zhou 	((uint8_t *)((pool) + 1) + (index) * (MLX5_CNT_LEN(pool))))
2558d93c830SDong Zhou #define MLX5_CNT_ARRAY_IDX(pool, cnt) \
2568d93c830SDong Zhou 	((int)(((uint8_t *)(cnt) - (uint8_t *)((pool) + 1)) / \
2578d93c830SDong Zhou 	MLX5_CNT_LEN(pool)))
258c3d3b140SSuanming Mou /*
259c3d3b140SSuanming Mou  * The pool index and offset of counter in the pool array makes up the
260c3d3b140SSuanming Mou  * counter index. In case the counter is from pool 0 and offset 0, it
261c3d3b140SSuanming Mou  * should plus 1 to avoid index 0, since 0 means invalid counter index
262c3d3b140SSuanming Mou  * currently.
263c3d3b140SSuanming Mou  */
264c3d3b140SSuanming Mou #define MLX5_MAKE_CNT_IDX(pi, offset) \
265c3d3b140SSuanming Mou 	((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1)
266fa2d01c8SDong Zhou #define MLX5_CNT_TO_CNT_EXT(pool, cnt) \
267fa2d01c8SDong Zhou 	((struct mlx5_flow_counter_ext *)\
268fa2d01c8SDong Zhou 	((uint8_t *)((cnt) + 1) + \
269fa2d01c8SDong Zhou 	(IS_AGE_POOL(pool) ? AGE_SIZE : 0)))
270826b8a87SSuanming Mou #define MLX5_GET_POOL_CNT_EXT(pool, offset) \
271fa2d01c8SDong Zhou 	MLX5_CNT_TO_CNT_EXT(pool, MLX5_POOL_GET_CNT((pool), (offset)))
272fa2d01c8SDong Zhou #define MLX5_CNT_TO_AGE(cnt) \
273fa2d01c8SDong Zhou 	((struct mlx5_age_param *)((cnt) + 1))
2745382d28cSMatan Azrad 
2755382d28cSMatan Azrad struct mlx5_flow_counter_pool;
2765382d28cSMatan Azrad 
277fa2d01c8SDong Zhou /*age status*/
278fa2d01c8SDong Zhou enum {
279fa2d01c8SDong Zhou 	AGE_FREE, /* Initialized state. */
280fa2d01c8SDong Zhou 	AGE_CANDIDATE, /* Counter assigned to flows. */
281fa2d01c8SDong Zhou 	AGE_TMOUT, /* Timeout, wait for rte_flow_get_aged_flows and destroy. */
282fa2d01c8SDong Zhou };
283fa2d01c8SDong Zhou 
2845af61440SMatan Azrad #define MLX5_CNT_CONTAINER(sh, batch, age) (&(sh)->cmng.ccont \
2855af61440SMatan Azrad 					    [(batch) * 2 + (age)])
2865af61440SMatan Azrad 
2875af61440SMatan Azrad enum {
2885af61440SMatan Azrad 	MLX5_CCONT_TYPE_SINGLE,
2895af61440SMatan Azrad 	MLX5_CCONT_TYPE_SINGLE_FOR_AGE,
2905af61440SMatan Azrad 	MLX5_CCONT_TYPE_BATCH,
2915af61440SMatan Azrad 	MLX5_CCONT_TYPE_BATCH_FOR_AGE,
2925af61440SMatan Azrad 	MLX5_CCONT_TYPE_MAX,
2935af61440SMatan Azrad };
2945af61440SMatan Azrad 
295fa2d01c8SDong Zhou /* Counter age parameter. */
296fa2d01c8SDong Zhou struct mlx5_age_param {
297fa2d01c8SDong Zhou 	rte_atomic16_t state; /**< Age state. */
298fa2d01c8SDong Zhou 	uint16_t port_id; /**< Port id of the counter. */
299fa2d01c8SDong Zhou 	uint32_t timeout:15; /**< Age timeout in unit of 0.1sec. */
300fa2d01c8SDong Zhou 	uint32_t expire:16; /**< Expire time(0.1sec) in the future. */
301fa2d01c8SDong Zhou 	void *context; /**< Flow counter age context. */
302fa2d01c8SDong Zhou };
303fa2d01c8SDong Zhou 
3045382d28cSMatan Azrad struct flow_counter_stats {
3055382d28cSMatan Azrad 	uint64_t hits;
3065382d28cSMatan Azrad 	uint64_t bytes;
3075382d28cSMatan Azrad };
3085382d28cSMatan Azrad 
309826b8a87SSuanming Mou /* Generic counters information. */
3105382d28cSMatan Azrad struct mlx5_flow_counter {
3115382d28cSMatan Azrad 	TAILQ_ENTRY(mlx5_flow_counter) next;
3125382d28cSMatan Azrad 	/**< Pointer to the next flow counter structure. */
313f15db67dSMatan Azrad 	union {
3145382d28cSMatan Azrad 		uint64_t hits; /**< Reset value of hits packets. */
315f15db67dSMatan Azrad 		int64_t query_gen; /**< Generation of the last release. */
316f15db67dSMatan Azrad 	};
3175382d28cSMatan Azrad 	uint64_t bytes; /**< Reset value of bytes. */
3185382d28cSMatan Azrad 	void *action; /**< Pointer to the dv action. */
3195382d28cSMatan Azrad };
3205382d28cSMatan Azrad 
321826b8a87SSuanming Mou /* Extend counters information for none batch counters. */
322826b8a87SSuanming Mou struct mlx5_flow_counter_ext {
323826b8a87SSuanming Mou 	uint32_t shared:1; /**< Share counter ID with other flow rules. */
324826b8a87SSuanming Mou 	uint32_t batch: 1;
325826b8a87SSuanming Mou 	/**< Whether the counter was allocated by batch command. */
326826b8a87SSuanming Mou 	uint32_t ref_cnt:30; /**< Reference counter. */
327826b8a87SSuanming Mou 	uint32_t id; /**< User counter ID. */
328826b8a87SSuanming Mou 	union {  /**< Holds the counters for the rule. */
329826b8a87SSuanming Mou #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
330826b8a87SSuanming Mou 		struct ibv_counter_set *cs;
331826b8a87SSuanming Mou #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
332826b8a87SSuanming Mou 		struct ibv_counters *cs;
333826b8a87SSuanming Mou #endif
334826b8a87SSuanming Mou 		struct mlx5_devx_obj *dcs; /**< Counter Devx object. */
335826b8a87SSuanming Mou 	};
336826b8a87SSuanming Mou };
337826b8a87SSuanming Mou 
3385382d28cSMatan Azrad TAILQ_HEAD(mlx5_counters, mlx5_flow_counter);
3395382d28cSMatan Azrad 
340826b8a87SSuanming Mou /* Generic counter pool structure - query is in pool resolution. */
3415382d28cSMatan Azrad struct mlx5_flow_counter_pool {
3425382d28cSMatan Azrad 	TAILQ_ENTRY(mlx5_flow_counter_pool) next;
3435382d28cSMatan Azrad 	struct mlx5_counters counters; /* Free counter list. */
344f15db67dSMatan Azrad 	union {
3455382d28cSMatan Azrad 		struct mlx5_devx_obj *min_dcs;
346f15db67dSMatan Azrad 		rte_atomic64_t a64_dcs;
347f15db67dSMatan Azrad 	};
348f15db67dSMatan Azrad 	/* The devx object of the minimum counter ID. */
349c989f49aSSuanming Mou 	rte_atomic64_t start_query_gen; /* Query start round. */
350c989f49aSSuanming Mou 	rte_atomic64_t end_query_gen; /* Query end round. */
3514001d7adSSuanming Mou 	uint32_t index; /* Pool index in container. */
352fa2d01c8SDong Zhou 	uint8_t type; /* Memory type behind the counter array. */
353f15db67dSMatan Azrad 	rte_spinlock_t sl; /* The pool lock. */
354f15db67dSMatan Azrad 	struct mlx5_counter_stats_raw *raw;
355f15db67dSMatan Azrad 	struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */
3565382d28cSMatan Azrad };
3575382d28cSMatan Azrad 
3585382d28cSMatan Azrad struct mlx5_counter_stats_raw;
3595382d28cSMatan Azrad 
3605382d28cSMatan Azrad /* Memory management structure for group of counter statistics raws. */
3615382d28cSMatan Azrad struct mlx5_counter_stats_mem_mng {
3625382d28cSMatan Azrad 	LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
3635382d28cSMatan Azrad 	struct mlx5_counter_stats_raw *raws;
3645382d28cSMatan Azrad 	struct mlx5_devx_obj *dm;
3655382d28cSMatan Azrad 	struct mlx5dv_devx_umem *umem;
3665382d28cSMatan Azrad };
3675382d28cSMatan Azrad 
3685382d28cSMatan Azrad /* Raw memory structure for the counter statistics values of a pool. */
3695382d28cSMatan Azrad struct mlx5_counter_stats_raw {
3705382d28cSMatan Azrad 	LIST_ENTRY(mlx5_counter_stats_raw) next;
3715382d28cSMatan Azrad 	int min_dcs_id;
3725382d28cSMatan Azrad 	struct mlx5_counter_stats_mem_mng *mem_mng;
3735382d28cSMatan Azrad 	volatile struct flow_counter_stats *data;
3745382d28cSMatan Azrad };
3755382d28cSMatan Azrad 
3765382d28cSMatan Azrad TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
3775382d28cSMatan Azrad 
3785382d28cSMatan Azrad /* Container structure for counter pools. */
3795382d28cSMatan Azrad struct mlx5_pools_container {
380f15db67dSMatan Azrad 	rte_atomic16_t n_valid; /* Number of valid pools. */
3815382d28cSMatan Azrad 	uint16_t n; /* Number of pools. */
3825af61440SMatan Azrad 	rte_spinlock_t resize_sl; /* The resize lock. */
3835382d28cSMatan Azrad 	struct mlx5_counter_pools pool_list; /* Counter pool list. */
3845382d28cSMatan Azrad 	struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
3855af61440SMatan Azrad 	struct mlx5_counter_stats_mem_mng *mem_mng;
3865382d28cSMatan Azrad 	/* Hold the memory management for the next allocated pools raws. */
3875382d28cSMatan Azrad };
3885382d28cSMatan Azrad 
3895382d28cSMatan Azrad /* Counter global management structure. */
3905382d28cSMatan Azrad struct mlx5_flow_counter_mng {
3915af61440SMatan Azrad 	struct mlx5_pools_container ccont[MLX5_CCONT_TYPE_MAX];
3925382d28cSMatan Azrad 	struct mlx5_counters flow_counters; /* Legacy flow counter list. */
393f15db67dSMatan Azrad 	uint8_t pending_queries;
394f15db67dSMatan Azrad 	uint8_t batch;
395f15db67dSMatan Azrad 	uint16_t pool_index;
396fa2d01c8SDong Zhou 	uint8_t age;
397f15db67dSMatan Azrad 	uint8_t query_thread_on;
3985382d28cSMatan Azrad 	LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
399f15db67dSMatan Azrad 	LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
4005382d28cSMatan Azrad };
4015af61440SMatan Azrad 
402fa2d01c8SDong Zhou #define MLX5_AGE_EVENT_NEW		1
403fa2d01c8SDong Zhou #define MLX5_AGE_TRIGGER		2
404fa2d01c8SDong Zhou #define MLX5_AGE_SET(age_info, BIT) \
405fa2d01c8SDong Zhou 	((age_info)->flags |= (1 << (BIT)))
406fa2d01c8SDong Zhou #define MLX5_AGE_GET(age_info, BIT) \
407fa2d01c8SDong Zhou 	((age_info)->flags & (1 << (BIT)))
408fa2d01c8SDong Zhou #define GET_PORT_AGE_INFO(priv) \
409fa2d01c8SDong Zhou 	(&((priv)->sh->port[(priv)->ibv_port - 1].age_info))
4105382d28cSMatan Azrad 
411fa2d01c8SDong Zhou /* Aging information for per port. */
412fa2d01c8SDong Zhou struct mlx5_age_info {
413fa2d01c8SDong Zhou 	uint8_t flags; /*Indicate if is new event or need be trigered*/
414fa2d01c8SDong Zhou 	struct mlx5_counters aged_counters; /* Aged flow counter list. */
415fa2d01c8SDong Zhou 	rte_spinlock_t aged_sl; /* Aged flow counter list lock. */
416fa2d01c8SDong Zhou };
4175af61440SMatan Azrad 
41817e19bc4SViacheslav Ovsiienko /* Per port data of shared IB device. */
41917e19bc4SViacheslav Ovsiienko struct mlx5_ibv_shared_port {
42017e19bc4SViacheslav Ovsiienko 	uint32_t ih_port_id;
42123242063SMatan Azrad 	uint32_t devx_ih_port_id;
42217e19bc4SViacheslav Ovsiienko 	/*
42317e19bc4SViacheslav Ovsiienko 	 * Interrupt handler port_id. Used by shared interrupt
42417e19bc4SViacheslav Ovsiienko 	 * handler to find the corresponding rte_eth device
42517e19bc4SViacheslav Ovsiienko 	 * by IB port index. If value is equal or greater
42617e19bc4SViacheslav Ovsiienko 	 * RTE_MAX_ETHPORTS it means there is no subhandler
42717e19bc4SViacheslav Ovsiienko 	 * installed for specified IB port index.
42817e19bc4SViacheslav Ovsiienko 	 */
429fa2d01c8SDong Zhou 	struct mlx5_age_info age_info;
430fa2d01c8SDong Zhou 	/* Aging information for per port. */
43117e19bc4SViacheslav Ovsiienko };
43217e19bc4SViacheslav Ovsiienko 
433860897d2SBing Zhao /* Table key of the hash organization. */
434860897d2SBing Zhao union mlx5_flow_tbl_key {
435860897d2SBing Zhao 	struct {
436860897d2SBing Zhao 		/* Table ID should be at the lowest address. */
437860897d2SBing Zhao 		uint32_t table_id;	/**< ID of the table. */
438860897d2SBing Zhao 		uint16_t reserved;	/**< must be zero for comparison. */
439860897d2SBing Zhao 		uint8_t domain;		/**< 1 - FDB, 0 - NIC TX/RX. */
440860897d2SBing Zhao 		uint8_t direction;	/**< 1 - egress, 0 - ingress. */
441860897d2SBing Zhao 	};
442860897d2SBing Zhao 	uint64_t v64;			/**< full 64bits value of key */
443860897d2SBing Zhao };
444860897d2SBing Zhao 
44579e35d0dSViacheslav Ovsiienko /* Table structure. */
44679e35d0dSViacheslav Ovsiienko struct mlx5_flow_tbl_resource {
44779e35d0dSViacheslav Ovsiienko 	void *obj; /**< Pointer to DR table object. */
44879e35d0dSViacheslav Ovsiienko 	rte_atomic32_t refcnt; /**< Reference counter. */
44979e35d0dSViacheslav Ovsiienko };
45079e35d0dSViacheslav Ovsiienko 
451b67b4ecbSDekel Peled #define MLX5_MAX_TABLES UINT16_MAX
45246a5e6bcSSuanming Mou #define MLX5_FLOW_TABLE_LEVEL_METER (UINT16_MAX - 3)
45346a5e6bcSSuanming Mou #define MLX5_FLOW_TABLE_LEVEL_SUFFIX (UINT16_MAX - 2)
4543c84f34eSOri Kam #define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1)
4555e61bcddSViacheslav Ovsiienko /* Reserve the last two tables for metadata register copy. */
4565e61bcddSViacheslav Ovsiienko #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
457dd3c774fSViacheslav Ovsiienko #define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
458dd3c774fSViacheslav Ovsiienko /* Tables for metering splits should be added here. */
459dd3c774fSViacheslav Ovsiienko #define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3)
460b67b4ecbSDekel Peled #define MLX5_MAX_TABLES_FDB UINT16_MAX
46179e35d0dSViacheslav Ovsiienko 
46221cae858SDekel Peled #define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */
46321cae858SDekel Peled #define MLX5_DBR_SIZE 8
46421cae858SDekel Peled #define MLX5_DBR_PER_PAGE (MLX5_DBR_PAGE_SIZE / MLX5_DBR_SIZE)
46521cae858SDekel Peled #define MLX5_DBR_BITMAP_SIZE (MLX5_DBR_PER_PAGE / 64)
46621cae858SDekel Peled 
46721cae858SDekel Peled struct mlx5_devx_dbr_page {
46821cae858SDekel Peled 	/* Door-bell records, must be first member in structure. */
46921cae858SDekel Peled 	uint8_t dbrs[MLX5_DBR_PAGE_SIZE];
47021cae858SDekel Peled 	LIST_ENTRY(mlx5_devx_dbr_page) next; /* Pointer to the next element. */
47121cae858SDekel Peled 	struct mlx5dv_devx_umem *umem;
47221cae858SDekel Peled 	uint32_t dbr_count; /* Number of door-bell records in use. */
47321cae858SDekel Peled 	/* 1 bit marks matching door-bell is in use. */
47421cae858SDekel Peled 	uint64_t dbr_bitmap[MLX5_DBR_BITMAP_SIZE];
47521cae858SDekel Peled };
47621cae858SDekel Peled 
477d85c7b5eSOri Kam /* ID generation structure. */
478d85c7b5eSOri Kam struct mlx5_flow_id_pool {
479d85c7b5eSOri Kam 	uint32_t *free_arr; /**< Pointer to the a array of free values. */
480d85c7b5eSOri Kam 	uint32_t base_index;
481d85c7b5eSOri Kam 	/**< The next index that can be used without any free elements. */
482d85c7b5eSOri Kam 	uint32_t *curr; /**< Pointer to the index to pop. */
483d85c7b5eSOri Kam 	uint32_t *last; /**< Pointer to the last element in the empty arrray. */
48430a3687dSSuanming Mou 	uint32_t max_id; /**< Maximum id can be allocated from the pool. */
485d85c7b5eSOri Kam };
486d85c7b5eSOri Kam 
48717e19bc4SViacheslav Ovsiienko /*
48817e19bc4SViacheslav Ovsiienko  * Shared Infiniband device context for Master/Representors
48917e19bc4SViacheslav Ovsiienko  * which belong to same IB device with multiple IB ports.
49017e19bc4SViacheslav Ovsiienko  **/
4916e88bc42SOphir Munk struct mlx5_dev_ctx_shared {
4926e88bc42SOphir Munk 	LIST_ENTRY(mlx5_dev_ctx_shared) next;
49317e19bc4SViacheslav Ovsiienko 	uint32_t refcnt;
49417e19bc4SViacheslav Ovsiienko 	uint32_t devx:1; /* Opened with DV. */
49517e19bc4SViacheslav Ovsiienko 	uint32_t max_port; /* Maximal IB device port index. */
496*f44b09f9SOphir Munk 	void *ctx; /* Verbs/DV/DevX context. */
49717e19bc4SViacheslav Ovsiienko 	struct ibv_pd *pd; /* Protection Domain. */
498b9d86122SDekel Peled 	uint32_t pdn; /* Protection Domain number. */
4998791ff42SDekel Peled 	uint32_t tdn; /* Transport Domain number. */
50017e19bc4SViacheslav Ovsiienko 	char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
50117e19bc4SViacheslav Ovsiienko 	char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
50217e19bc4SViacheslav Ovsiienko 	struct ibv_device_attr_ex device_attr; /* Device properties. */
5036e88bc42SOphir Munk 	LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;
504ccb38153SViacheslav Ovsiienko 	/**< Called by memory event callback. */
505b8dc6b0eSVu Pham 	struct mlx5_mr_share_cache share_cache;
506b2177648SViacheslav Ovsiienko 	/* Shared DV/DR flow data section. */
50779e35d0dSViacheslav Ovsiienko 	pthread_mutex_t dv_mutex; /* DV context mutex. */
50839139371SViacheslav Ovsiienko 	uint32_t dv_meta_mask; /* flow META metadata supported mask. */
50939139371SViacheslav Ovsiienko 	uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
51039139371SViacheslav Ovsiienko 	uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */
511b2177648SViacheslav Ovsiienko 	uint32_t dv_refcnt; /* DV/DR data reference counter. */
512d1e64fbfSOri Kam 	void *fdb_domain; /* FDB Direct Rules name space handle. */
513d1e64fbfSOri Kam 	void *rx_domain; /* RX Direct Rules name space handle. */
514d1e64fbfSOri Kam 	void *tx_domain; /* TX Direct Rules name space handle. */
515860897d2SBing Zhao 	struct mlx5_hlist *flow_tbls;
516860897d2SBing Zhao 	/* Direct Rules tables for FDB, NIC TX+RX */
51734fa7c02SOri Kam 	void *esw_drop_action; /* Pointer to DR E-Switch drop action. */
518b41e47daSMoti Haimovsky 	void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
519014d1cbeSSuanming Mou 	uint32_t encaps_decaps; /* Encap/decap action indexed memory list. */
52079e35d0dSViacheslav Ovsiienko 	LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds;
521e484e403SBing Zhao 	struct mlx5_hlist *tag_table;
522f3faf9eaSSuanming Mou 	uint32_t port_id_action_list; /* List of port ID actions. */
5238acf8ac9SSuanming Mou 	uint32_t push_vlan_action_list; /* List of push VLAN actions. */
5245382d28cSMatan Azrad 	struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
525014d1cbeSSuanming Mou 	struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
526014d1cbeSSuanming Mou 	/* Memory Pool for mlx5 flow resources. */
527b2177648SViacheslav Ovsiienko 	/* Shared interrupt handler section. */
52817e19bc4SViacheslav Ovsiienko 	struct rte_intr_handle intr_handle; /* Interrupt handler for device. */
529f15db67dSMatan Azrad 	struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */
530f15db67dSMatan Azrad 	struct mlx5dv_devx_cmd_comp *devx_comp; /* DEVX async comp obj. */
531ae18a1aeSOri Kam 	struct mlx5_devx_obj *tis; /* TIS object. */
532ae18a1aeSOri Kam 	struct mlx5_devx_obj *td; /* Transport domain. */
533d85c7b5eSOri Kam 	struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */
53417e19bc4SViacheslav Ovsiienko 	struct mlx5_ibv_shared_port port[]; /* per device port data array. */
53517e19bc4SViacheslav Ovsiienko };
53617e19bc4SViacheslav Ovsiienko 
537120dc4a7SYongseok Koh /* Per-process private structure. */
538120dc4a7SYongseok Koh struct mlx5_proc_priv {
539120dc4a7SYongseok Koh 	size_t uar_table_sz;
540120dc4a7SYongseok Koh 	/* Size of UAR register table. */
541120dc4a7SYongseok Koh 	void *uar_table[];
542120dc4a7SYongseok Koh 	/* Table of UAR registers for each process. */
543120dc4a7SYongseok Koh };
544120dc4a7SYongseok Koh 
5453bd26b23SSuanming Mou /* MTR profile list. */
5463bd26b23SSuanming Mou TAILQ_HEAD(mlx5_mtr_profiles, mlx5_flow_meter_profile);
5473f373f35SSuanming Mou /* MTR list. */
5483f373f35SSuanming Mou TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter);
5493bd26b23SSuanming Mou 
550120dc4a7SYongseok Koh #define MLX5_PROC_PRIV(port_id) \
551120dc4a7SYongseok Koh 	((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
552120dc4a7SYongseok Koh 
553dbeba4cfSThomas Monjalon struct mlx5_priv {
554df428ceeSYongseok Koh 	struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
5556e88bc42SOphir Munk 	struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
55617e19bc4SViacheslav Ovsiienko 	uint32_t ibv_port; /* IB device port number. */
55746e10a4cSViacheslav Ovsiienko 	struct rte_pci_device *pci_dev; /* Backend PCI device. */
5586d13ea8eSOlivier Matz 	struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
559ccdcba53SNélio Laranjeiro 	BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
560ccdcba53SNélio Laranjeiro 	/* Bit-field of MAC addresses owned by the PMD. */
561e9086978SAdrien Mazarguil 	uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
562e9086978SAdrien Mazarguil 	unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
563771fa900SAdrien Mazarguil 	/* Device properties. */
564771fa900SAdrien Mazarguil 	uint16_t mtu; /* Configured MTU. */
56551d5f8ecSNélio Laranjeiro 	unsigned int isolated:1; /* Whether isolated mode is enabled. */
5662b730263SAdrien Mazarguil 	unsigned int representor:1; /* Device is a port representor. */
567299d7dc2SViacheslav Ovsiienko 	unsigned int master:1; /* Device is a E-Switch master. */
568b2177648SViacheslav Ovsiienko 	unsigned int dr_shared:1; /* DV/DR data is shared. */
56931538ef6SMatan Azrad 	unsigned int counter_fallback:1; /* Use counter fallback management. */
5706bc327b9SSuanming Mou 	unsigned int mtr_en:1; /* Whether support meter. */
571792e749eSSuanming Mou 	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
5722b730263SAdrien Mazarguil 	uint16_t domain_id; /* Switch domain identifier. */
573299d7dc2SViacheslav Ovsiienko 	uint16_t vport_id; /* Associated VF vport index (if any). */
574d5c06b1bSViacheslav Ovsiienko 	uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
575d5c06b1bSViacheslav Ovsiienko 	uint32_t vport_meta_mask; /* Used for vport index field match mask. */
5762b730263SAdrien Mazarguil 	int32_t representor_id; /* Port representor identifier. */
577bee57a0aSViacheslav Ovsiienko 	int32_t pf_bond; /* >=0 means PF index in bonding configuration. */
578fa2e14d4SViacheslav Ovsiienko 	unsigned int if_index; /* Associated kernel network device index. */
5792e22920bSAdrien Mazarguil 	/* RX/TX queues. */
5802e22920bSAdrien Mazarguil 	unsigned int rxqs_n; /* RX queues array size. */
5812e22920bSAdrien Mazarguil 	unsigned int txqs_n; /* TX queues array size. */
58278142aacSNélio Laranjeiro 	struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
583991b04f6SNélio Laranjeiro 	struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
5847d6bf6b8SYongseok Koh 	struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
58529c1d8bbSNélio Laranjeiro 	struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
586634efbc2SNelio Laranjeiro 	unsigned int (*reta_idx)[]; /* RETA index table. */
587634efbc2SNelio Laranjeiro 	unsigned int reta_idx_n; /* RETA index size. */
58878be8852SNelio Laranjeiro 	struct mlx5_drop drop_queue; /* Flow drop queues. */
589ab612adcSSuanming Mou 	uint32_t flows; /* RTE Flow rules. */
590ab612adcSSuanming Mou 	uint32_t ctrl_flows; /* Control flow rules. */
591e7bfa359SBing Zhao 	void *inter_flows; /* Intermediate resources for flow creation. */
592e745f900SSuanming Mou 	void *rss_desc; /* Intermediate rss description resources. */
593e7bfa359SBing Zhao 	int flow_idx; /* Intermediate device flow index. */
5943ac3d823SBing Zhao 	int flow_nested_idx; /* Intermediate device flow index, nested. */
595a1366b1aSNélio Laranjeiro 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
59693403560SDekel Peled 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
597772dc0ebSSuanming Mou 	uint32_t hrxqs; /* Verbs Hash Rx queues. */
5986e78005aSNélio Laranjeiro 	LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
599894c4a8eSOri Kam 	LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
60015c80a12SDekel Peled 	/* Indirection tables. */
60115c80a12SDekel Peled 	LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
602684b9a1bSOri Kam 	/* Pointer to next element. */
603684b9a1bSOri Kam 	rte_atomic32_t refcnt; /**< Reference counter. */
604684b9a1bSOri Kam 	struct ibv_flow_action *verbs_action;
605684b9a1bSOri Kam 	/**< Verbs modify header action object. */
606684b9a1bSOri Kam 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
607ee39fe82SMatan Azrad 	uint8_t max_lro_msg_size;
608cbb66daaSOri Kam 	/* Tags resources cache. */
60975ef62a9SNélio Laranjeiro 	uint32_t link_speed_capa; /* Link speed capabilities. */
610a4193ae3SShahaf Shuler 	struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
611ce9494d7STom Barbette 	struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
6127fe24446SShahaf Shuler 	struct mlx5_dev_config config; /* Device configuration. */
613d10b09dbSOlivier Matz 	struct mlx5_verbs_alloc_ctx verbs_alloc_ctx;
614d10b09dbSOlivier Matz 	/* Context for Verbs allocator. */
61526c08b97SAdrien Mazarguil 	int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
61626c08b97SAdrien Mazarguil 	int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
61721cae858SDekel Peled 	LIST_HEAD(dbrpage, mlx5_devx_dbr_page) dbrpgs; /* Door-bell pages. */
618c12671e3SMatan Azrad 	struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
61971e254bcSViacheslav Ovsiienko 	struct mlx5_flow_id_pool *qrss_id_pool;
620dd3c774fSViacheslav Ovsiienko 	struct mlx5_hlist *mreg_cp_tbl;
621dd3c774fSViacheslav Ovsiienko 	/* Hash table of Rx metadata register copy table. */
62227efd5deSSuanming Mou 	uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
62327efd5deSSuanming Mou 	uint8_t mtr_color_reg; /* Meter color match REG_C. */
6243bd26b23SSuanming Mou 	struct mlx5_mtr_profiles flow_meter_profiles; /* MTR profile list. */
6253f373f35SSuanming Mou 	struct mlx5_flow_meters flow_meters; /* MTR list. */
6266bf10ab6SMoti Haimovsky #ifndef RTE_ARCH_64
6276bf10ab6SMoti Haimovsky 	rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
6286bf10ab6SMoti Haimovsky 	rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
6296bf10ab6SMoti Haimovsky 	/* UAR same-page access control required in 32bit implementations. */
6306bf10ab6SMoti Haimovsky #endif
63163bd1629SOri Kam 	uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
632fbde4331SMatan Azrad 	uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
633a4de9586SVu Pham 	struct mlx5_mp_id mp_id; /* ID of a multi-process process */
634c2ddde79SWentao Cui 	LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
635771fa900SAdrien Mazarguil };
636771fa900SAdrien Mazarguil 
637df428ceeSYongseok Koh #define PORT_ID(priv) ((priv)->dev_data->port_id)
638df428ceeSYongseok Koh #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
639df428ceeSYongseok Koh 
6404d803a72SOlga Shern /* mlx5.c */
6414d803a72SOlga Shern 
6424d803a72SOlga Shern int mlx5_getenv_int(const char *);
643120dc4a7SYongseok Koh int mlx5_proc_priv_init(struct rte_eth_dev *dev);
64421cae858SDekel Peled int64_t mlx5_get_dbr(struct rte_eth_dev *dev,
64521cae858SDekel Peled 		     struct mlx5_devx_dbr_page **dbr_page);
64621cae858SDekel Peled int32_t mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id,
64721cae858SDekel Peled 			 uint64_t offset);
648c9ba7523SRaslan Darawsheh int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
649c9ba7523SRaslan Darawsheh 			      struct rte_eth_udp_tunnel *udp_tunnel);
650fbc83412SViacheslav Ovsiienko uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev);
651f7e95215SViacheslav Ovsiienko 
652f7e95215SViacheslav Ovsiienko /* Macro to iterate over all valid ports for mlx5 driver. */
653fbc83412SViacheslav Ovsiienko #define MLX5_ETH_FOREACH_DEV(port_id, pci_dev) \
654fbc83412SViacheslav Ovsiienko 	for (port_id = mlx5_eth_find_next(0, pci_dev); \
655f7e95215SViacheslav Ovsiienko 	     port_id < RTE_MAX_ETHPORTS; \
656fbc83412SViacheslav Ovsiienko 	     port_id = mlx5_eth_find_next(port_id + 1, pci_dev))
6574d803a72SOlga Shern 
658771fa900SAdrien Mazarguil /* mlx5_ethdev.c */
659771fa900SAdrien Mazarguil 
660af4f09f2SNélio Laranjeiro int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]);
6619c2bbd04SViacheslav Ovsiienko int mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE]);
6623f8cb05dSAdrien Mazarguil unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
6637dd7be29SShahaf Shuler int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr);
664af4f09f2SNélio Laranjeiro int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
665af4f09f2SNélio Laranjeiro int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
666af4f09f2SNélio Laranjeiro 		   unsigned int flags);
6673692c7ecSNélio Laranjeiro int mlx5_dev_configure(struct rte_eth_dev *dev);
668bdad90d1SIvan Ilchenko int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
669e571ad55STom Barbette int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
670714bf46eSThomas Monjalon int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
67178a38edfSJianfeng Tan const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
6723692c7ecSNélio Laranjeiro int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
673af4f09f2SNélio Laranjeiro int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status);
6743692c7ecSNélio Laranjeiro int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
6753692c7ecSNélio Laranjeiro int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
6763692c7ecSNélio Laranjeiro 			   struct rte_eth_fc_conf *fc_conf);
6773692c7ecSNélio Laranjeiro int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
6783692c7ecSNélio Laranjeiro 			   struct rte_eth_fc_conf *fc_conf);
6793692c7ecSNélio Laranjeiro void mlx5_dev_link_status_handler(void *arg);
680af4f09f2SNélio Laranjeiro void mlx5_dev_interrupt_handler(void *arg);
681f15db67dSMatan Azrad void mlx5_dev_interrupt_handler_devx(void *arg);
682af4f09f2SNélio Laranjeiro void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev);
683af4f09f2SNélio Laranjeiro void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev);
68462072098SOr Ami int mlx5_set_link_down(struct rte_eth_dev *dev);
68562072098SOr Ami int mlx5_set_link_up(struct rte_eth_dev *dev);
686d3e0f392SMatan Azrad int mlx5_is_removed(struct rte_eth_dev *dev);
687af4f09f2SNélio Laranjeiro eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
688af4f09f2SNélio Laranjeiro eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
6895e61bcddSViacheslav Ovsiienko struct mlx5_priv *mlx5_port_to_eswitch_info(uint16_t port, bool valid);
69009a16bcaSViacheslav Ovsiienko struct mlx5_priv *mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev);
691f872b4b9SNelio Laranjeiro int mlx5_sysfs_switch_info(unsigned int ifindex,
692f872b4b9SNelio Laranjeiro 			   struct mlx5_switch_info *info);
69330a86157SViacheslav Ovsiienko void mlx5_sysfs_check_switch_info(bool device_dir,
69430a86157SViacheslav Ovsiienko 				  struct mlx5_switch_info *switch_info);
69530a86157SViacheslav Ovsiienko void mlx5_translate_port_name(const char *port_name_in,
696b2f3a381SDekel Peled 			      struct mlx5_switch_info *port_info_out);
6975897ac13SViacheslav Ovsiienko void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
6985897ac13SViacheslav Ovsiienko 				   rte_intr_callback_fn cb_fn, void *cb_arg);
6998a6a09f8SDekel Peled int mlx5_get_module_info(struct rte_eth_dev *dev,
7008a6a09f8SDekel Peled 			 struct rte_eth_dev_module_info *modinfo);
7018a6a09f8SDekel Peled int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
7028a6a09f8SDekel Peled 			   struct rte_dev_eeprom_info *info);
703b6b3bf86SOri Kam int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
704b6b3bf86SOri Kam 			 struct rte_eth_hairpin_cap *cap);
70563bd1629SOri Kam int mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev);
70663bd1629SOri Kam 
707771fa900SAdrien Mazarguil /* mlx5_mac.c */
708771fa900SAdrien Mazarguil 
70935b2d13fSOlivier Matz int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);
7103692c7ecSNélio Laranjeiro void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
7116d13ea8eSOlivier Matz int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
7123692c7ecSNélio Laranjeiro 		      uint32_t index, uint32_t vmdq);
713c12671e3SMatan Azrad struct mlx5_nl_vlan_vmwa_context *mlx5_vlan_vmwa_init
714c12671e3SMatan Azrad 				    (struct rte_eth_dev *dev, uint32_t ifindex);
7156d13ea8eSOlivier Matz int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
716e0586a8dSNélio Laranjeiro int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
7176d13ea8eSOlivier Matz 			struct rte_ether_addr *mc_addr_set,
7186d13ea8eSOlivier Matz 			uint32_t nb_mc_addr);
719771fa900SAdrien Mazarguil 
7202f97422eSNelio Laranjeiro /* mlx5_rss.c */
7212f97422eSNelio Laranjeiro 
7223692c7ecSNélio Laranjeiro int mlx5_rss_hash_update(struct rte_eth_dev *dev,
7233692c7ecSNélio Laranjeiro 			 struct rte_eth_rss_conf *rss_conf);
7243692c7ecSNélio Laranjeiro int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
7253692c7ecSNélio Laranjeiro 			   struct rte_eth_rss_conf *rss_conf);
726af4f09f2SNélio Laranjeiro int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
7273692c7ecSNélio Laranjeiro int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
7283692c7ecSNélio Laranjeiro 			    struct rte_eth_rss_reta_entry64 *reta_conf,
7293692c7ecSNélio Laranjeiro 			    uint16_t reta_size);
7303692c7ecSNélio Laranjeiro int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
7313692c7ecSNélio Laranjeiro 			     struct rte_eth_rss_reta_entry64 *reta_conf,
7323692c7ecSNélio Laranjeiro 			     uint16_t reta_size);
7332f97422eSNelio Laranjeiro 
7341bdbe1afSAdrien Mazarguil /* mlx5_rxmode.c */
7351bdbe1afSAdrien Mazarguil 
7369039c812SAndrew Rybchenko int mlx5_promiscuous_enable(struct rte_eth_dev *dev);
7379039c812SAndrew Rybchenko int mlx5_promiscuous_disable(struct rte_eth_dev *dev);
738ca041cd4SIvan Ilchenko int mlx5_allmulticast_enable(struct rte_eth_dev *dev);
739ca041cd4SIvan Ilchenko int mlx5_allmulticast_disable(struct rte_eth_dev *dev);
7401bdbe1afSAdrien Mazarguil 
74187011737SAdrien Mazarguil /* mlx5_stats.c */
74287011737SAdrien Mazarguil 
743ce9494d7STom Barbette void mlx5_stats_init(struct rte_eth_dev *dev);
7443692c7ecSNélio Laranjeiro int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
7459970a9adSIgor Romanov int mlx5_stats_reset(struct rte_eth_dev *dev);
746af4f09f2SNélio Laranjeiro int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
747af4f09f2SNélio Laranjeiro 		    unsigned int n);
7489970a9adSIgor Romanov int mlx5_xstats_reset(struct rte_eth_dev *dev);
749af4f09f2SNélio Laranjeiro int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
7503692c7ecSNélio Laranjeiro 			  struct rte_eth_xstat_name *xstats_names,
7513692c7ecSNélio Laranjeiro 			  unsigned int n);
75287011737SAdrien Mazarguil 
753e9086978SAdrien Mazarguil /* mlx5_vlan.c */
754e9086978SAdrien Mazarguil 
7553692c7ecSNélio Laranjeiro int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
7563692c7ecSNélio Laranjeiro void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
7573692c7ecSNélio Laranjeiro int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
758c12671e3SMatan Azrad void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *ctx);
759c12671e3SMatan Azrad void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
760c12671e3SMatan Azrad 			    struct mlx5_vf_vlan *vf_vlan);
761c12671e3SMatan Azrad void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
762c12671e3SMatan Azrad 			    struct mlx5_vf_vlan *vf_vlan);
763e9086978SAdrien Mazarguil 
764e60fbd5bSAdrien Mazarguil /* mlx5_trigger.c */
765e60fbd5bSAdrien Mazarguil 
7663692c7ecSNélio Laranjeiro int mlx5_dev_start(struct rte_eth_dev *dev);
7673692c7ecSNélio Laranjeiro void mlx5_dev_stop(struct rte_eth_dev *dev);
768af4f09f2SNélio Laranjeiro int mlx5_traffic_enable(struct rte_eth_dev *dev);
769925061b5SNélio Laranjeiro void mlx5_traffic_disable(struct rte_eth_dev *dev);
7703692c7ecSNélio Laranjeiro int mlx5_traffic_restart(struct rte_eth_dev *dev);
771e60fbd5bSAdrien Mazarguil 
7720d356350SNélio Laranjeiro /* mlx5_flow.c */
7730d356350SNélio Laranjeiro 
7745e61bcddSViacheslav Ovsiienko int mlx5_flow_discover_mreg_c(struct rte_eth_dev *eth_dev);
7755e61bcddSViacheslav Ovsiienko bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev);
7762815702bSNelio Laranjeiro int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
77778be8852SNelio Laranjeiro void mlx5_flow_print(struct rte_flow *flow);
7783692c7ecSNélio Laranjeiro int mlx5_flow_validate(struct rte_eth_dev *dev,
7793692c7ecSNélio Laranjeiro 		       const struct rte_flow_attr *attr,
7803692c7ecSNélio Laranjeiro 		       const struct rte_flow_item items[],
7813692c7ecSNélio Laranjeiro 		       const struct rte_flow_action actions[],
7823692c7ecSNélio Laranjeiro 		       struct rte_flow_error *error);
7833692c7ecSNélio Laranjeiro struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
7843692c7ecSNélio Laranjeiro 				  const struct rte_flow_attr *attr,
7853692c7ecSNélio Laranjeiro 				  const struct rte_flow_item items[],
7863692c7ecSNélio Laranjeiro 				  const struct rte_flow_action actions[],
7873692c7ecSNélio Laranjeiro 				  struct rte_flow_error *error);
7883692c7ecSNélio Laranjeiro int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
7893692c7ecSNélio Laranjeiro 		      struct rte_flow_error *error);
790ab612adcSSuanming Mou void mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active);
7913692c7ecSNélio Laranjeiro int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
7923692c7ecSNélio Laranjeiro int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
793fb8fd96dSDeclan Doherty 		    const struct rte_flow_action *action, void *data,
7943692c7ecSNélio Laranjeiro 		    struct rte_flow_error *error);
7953692c7ecSNélio Laranjeiro int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
7963692c7ecSNélio Laranjeiro 		      struct rte_flow_error *error);
7973692c7ecSNélio Laranjeiro int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
7983692c7ecSNélio Laranjeiro 			 enum rte_filter_type filter_type,
7993692c7ecSNélio Laranjeiro 			 enum rte_filter_op filter_op,
8003692c7ecSNélio Laranjeiro 			 void *arg);
801ab612adcSSuanming Mou int mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list);
802ab612adcSSuanming Mou void mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list);
8038db7e3b6SBing Zhao int mlx5_flow_start_default(struct rte_eth_dev *dev);
8048db7e3b6SBing Zhao void mlx5_flow_stop_default(struct rte_eth_dev *dev);
805e7bfa359SBing Zhao void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev);
806e7bfa359SBing Zhao void mlx5_flow_free_intermediate(struct rte_eth_dev *dev);
807af4f09f2SNélio Laranjeiro int mlx5_flow_verify(struct rte_eth_dev *dev);
8083c84f34eSOri Kam int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
809af4f09f2SNélio Laranjeiro int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
810af4f09f2SNélio Laranjeiro 			struct rte_flow_item_eth *eth_spec,
811af4f09f2SNélio Laranjeiro 			struct rte_flow_item_eth *eth_mask,
812af4f09f2SNélio Laranjeiro 			struct rte_flow_item_vlan *vlan_spec,
813af4f09f2SNélio Laranjeiro 			struct rte_flow_item_vlan *vlan_mask);
814af4f09f2SNélio Laranjeiro int mlx5_ctrl_flow(struct rte_eth_dev *dev,
815af4f09f2SNélio Laranjeiro 		   struct rte_flow_item_eth *eth_spec,
816af4f09f2SNélio Laranjeiro 		   struct rte_flow_item_eth *eth_mask);
817b67b4ecbSDekel Peled struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
818af4f09f2SNélio Laranjeiro int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
819af4f09f2SNélio Laranjeiro void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
8206e88bc42SOphir Munk void mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
821f15db67dSMatan Azrad 				       uint64_t async_id, int status);
8226e88bc42SOphir Munk void mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh);
823f15db67dSMatan Azrad void mlx5_flow_query_alarm(void *arg);
824956d5c74SSuanming Mou uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev);
825956d5c74SSuanming Mou void mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt);
826956d5c74SSuanming Mou int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
827e189f55cSSuanming Mou 		       bool clear, uint64_t *pkts, uint64_t *bytes);
828f6d72024SXiaoyu Min int mlx5_flow_dev_dump(struct rte_eth_dev *dev, FILE *file,
829f6d72024SXiaoyu Min 		       struct rte_flow_error *error);
8306c55b622SAlexander Kozyrev void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev);
831fa2d01c8SDong Zhou int mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
832fa2d01c8SDong Zhou 			uint32_t nb_contexts, struct rte_flow_error *error);
8330d356350SNélio Laranjeiro 
8349a8ab29bSYongseok Koh /* mlx5_mp.c */
835a4de9586SVu Pham int mlx5_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer);
836a4de9586SVu Pham int mlx5_mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer);
8372aac5b5dSYongseok Koh void mlx5_mp_req_start_rxtx(struct rte_eth_dev *dev);
8382aac5b5dSYongseok Koh void mlx5_mp_req_stop_rxtx(struct rte_eth_dev *dev);
839f8b9a3baSXueming Li 
840e6cdc54cSXueming Li /* mlx5_socket.c */
841e6cdc54cSXueming Li 
842e6cdc54cSXueming Li int mlx5_pmd_socket_init(void);
843e6cdc54cSXueming Li 
844d740eb50SSuanming Mou /* mlx5_flow_meter.c */
845d740eb50SSuanming Mou 
846d740eb50SSuanming Mou int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev, void *arg);
8473f373f35SSuanming Mou struct mlx5_flow_meter *mlx5_flow_meter_find(struct mlx5_priv *priv,
8483f373f35SSuanming Mou 					     uint32_t meter_id);
849266e9f3dSSuanming Mou struct mlx5_flow_meter *mlx5_flow_meter_attach
850266e9f3dSSuanming Mou 					(struct mlx5_priv *priv,
851266e9f3dSSuanming Mou 					 uint32_t meter_id,
852266e9f3dSSuanming Mou 					 const struct rte_flow_attr *attr,
853266e9f3dSSuanming Mou 					 struct rte_flow_error *error);
854266e9f3dSSuanming Mou void mlx5_flow_meter_detach(struct mlx5_flow_meter *fm);
855d740eb50SSuanming Mou 
856*f44b09f9SOphir Munk /* mlx5_os.c */
857*f44b09f9SOphir Munk const char *mlx5_os_get_ctx_device_name(void *ctx);
858*f44b09f9SOphir Munk const char *mlx5_os_get_ctx_device_path(void *ctx);
859*f44b09f9SOphir Munk 
860771fa900SAdrien Mazarguil #endif /* RTE_PMD_MLX5_H_ */
861