xref: /dpdk/drivers/net/mlx5/mlx5_flow.h (revision cb440babbd45a80c059f8bc80e87c48d09086fd7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_FLOW_H_
6 #define RTE_PMD_MLX5_FLOW_H_
7 
8 #include <netinet/in.h>
9 #include <sys/queue.h>
10 #include <stdalign.h>
11 #include <stdint.h>
12 #include <string.h>
13 
14 /* Verbs header. */
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic ignored "-Wpedantic"
18 #endif
19 #include <infiniband/verbs.h>
20 #ifdef PEDANTIC
21 #pragma GCC diagnostic error "-Wpedantic"
22 #endif
23 
24 #include <rte_atomic.h>
25 #include <rte_alarm.h>
26 #include <rte_mtr.h>
27 
28 #include <mlx5_prm.h>
29 
30 #include "mlx5.h"
31 
32 /* Private rte flow items. */
33 enum mlx5_rte_flow_item_type {
34 	MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
35 	MLX5_RTE_FLOW_ITEM_TYPE_TAG,
36 	MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
37 	MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
38 };
39 
40 /* Private (internal) rte flow actions. */
41 enum mlx5_rte_flow_action_type {
42 	MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
43 	MLX5_RTE_FLOW_ACTION_TYPE_TAG,
44 	MLX5_RTE_FLOW_ACTION_TYPE_MARK,
45 	MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
46 	MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
47 };
48 
49 /* Matches on selected register. */
50 struct mlx5_rte_flow_item_tag {
51 	enum modify_reg id;
52 	uint32_t data;
53 };
54 
55 /* Modify selected register. */
56 struct mlx5_rte_flow_action_set_tag {
57 	enum modify_reg id;
58 	uint32_t data;
59 };
60 
61 struct mlx5_flow_action_copy_mreg {
62 	enum modify_reg dst;
63 	enum modify_reg src;
64 };
65 
66 /* Matches on source queue. */
67 struct mlx5_rte_flow_item_tx_queue {
68 	uint32_t queue;
69 };
70 
71 /* Feature name to allocate metadata register. */
72 enum mlx5_feature_name {
73 	MLX5_HAIRPIN_RX,
74 	MLX5_HAIRPIN_TX,
75 	MLX5_METADATA_RX,
76 	MLX5_METADATA_TX,
77 	MLX5_METADATA_FDB,
78 	MLX5_FLOW_MARK,
79 	MLX5_APP_TAG,
80 	MLX5_COPY_MARK,
81 	MLX5_MTR_COLOR,
82 	MLX5_MTR_SFX,
83 };
84 
85 /* Pattern outer Layer bits. */
86 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
87 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
88 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
89 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
90 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
91 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
92 
93 /* Pattern inner Layer bits. */
94 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
95 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
96 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
97 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
98 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
99 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
100 
101 /* Pattern tunnel Layer bits. */
102 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
103 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
104 #define MLX5_FLOW_LAYER_GRE (1u << 14)
105 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
106 /* List of tunnel Layer bits continued below. */
107 
108 /* General pattern items bits. */
109 #define MLX5_FLOW_ITEM_METADATA (1u << 16)
110 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
111 #define MLX5_FLOW_ITEM_TAG (1u << 18)
112 #define MLX5_FLOW_ITEM_MARK (1u << 19)
113 
114 /* Pattern MISC bits. */
115 #define MLX5_FLOW_LAYER_ICMP (1u << 20)
116 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21)
117 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22)
118 
119 /* Pattern tunnel Layer bits (continued). */
120 #define MLX5_FLOW_LAYER_IPIP (1u << 23)
121 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24)
122 #define MLX5_FLOW_LAYER_NVGRE (1u << 25)
123 #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
124 
125 /* Queue items. */
126 #define MLX5_FLOW_ITEM_TX_QUEUE (1u << 27)
127 
128 /* Pattern tunnel Layer bits (continued). */
129 #define MLX5_FLOW_LAYER_GTP (1u << 28)
130 
131 /* Outer Masks. */
132 #define MLX5_FLOW_LAYER_OUTER_L3 \
133 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
134 #define MLX5_FLOW_LAYER_OUTER_L4 \
135 	(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
136 #define MLX5_FLOW_LAYER_OUTER \
137 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
138 	 MLX5_FLOW_LAYER_OUTER_L4)
139 
140 /* Tunnel Masks. */
141 #define MLX5_FLOW_LAYER_TUNNEL \
142 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
143 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
144 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
145 	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
146 
147 /* Inner Masks. */
148 #define MLX5_FLOW_LAYER_INNER_L3 \
149 	(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
150 #define MLX5_FLOW_LAYER_INNER_L4 \
151 	(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
152 #define MLX5_FLOW_LAYER_INNER \
153 	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
154 	 MLX5_FLOW_LAYER_INNER_L4)
155 
156 /* Layer Masks. */
157 #define MLX5_FLOW_LAYER_L2 \
158 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
159 #define MLX5_FLOW_LAYER_L3_IPV4 \
160 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4)
161 #define MLX5_FLOW_LAYER_L3_IPV6 \
162 	(MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
163 #define MLX5_FLOW_LAYER_L3 \
164 	(MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
165 #define MLX5_FLOW_LAYER_L4 \
166 	(MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4)
167 
168 /* Actions */
169 #define MLX5_FLOW_ACTION_DROP (1u << 0)
170 #define MLX5_FLOW_ACTION_QUEUE (1u << 1)
171 #define MLX5_FLOW_ACTION_RSS (1u << 2)
172 #define MLX5_FLOW_ACTION_FLAG (1u << 3)
173 #define MLX5_FLOW_ACTION_MARK (1u << 4)
174 #define MLX5_FLOW_ACTION_COUNT (1u << 5)
175 #define MLX5_FLOW_ACTION_PORT_ID (1u << 6)
176 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1u << 7)
177 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1u << 8)
178 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1u << 9)
179 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1u << 10)
180 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1u << 11)
181 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1u << 12)
182 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1u << 13)
183 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1u << 14)
184 #define MLX5_FLOW_ACTION_SET_TP_SRC (1u << 15)
185 #define MLX5_FLOW_ACTION_SET_TP_DST (1u << 16)
186 #define MLX5_FLOW_ACTION_JUMP (1u << 17)
187 #define MLX5_FLOW_ACTION_SET_TTL (1u << 18)
188 #define MLX5_FLOW_ACTION_DEC_TTL (1u << 19)
189 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1u << 20)
190 #define MLX5_FLOW_ACTION_SET_MAC_DST (1u << 21)
191 #define MLX5_FLOW_ACTION_ENCAP (1u << 22)
192 #define MLX5_FLOW_ACTION_DECAP (1u << 23)
193 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1u << 24)
194 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1u << 25)
195 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1u << 26)
196 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1u << 27)
197 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28)
198 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29)
199 #define MLX5_FLOW_ACTION_SET_META (1ull << 30)
200 #define MLX5_FLOW_ACTION_METER (1ull << 31)
201 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
202 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
203 #define MLX5_FLOW_ACTION_AGE (1ull << 34)
204 #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
205 
206 #define MLX5_FLOW_FATE_ACTIONS \
207 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
208 	 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
209 	 MLX5_FLOW_ACTION_DEFAULT_MISS)
210 
211 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
212 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
213 	 MLX5_FLOW_ACTION_JUMP)
214 
215 
216 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
217 				      MLX5_FLOW_ACTION_SET_IPV4_DST | \
218 				      MLX5_FLOW_ACTION_SET_IPV6_SRC | \
219 				      MLX5_FLOW_ACTION_SET_IPV6_DST | \
220 				      MLX5_FLOW_ACTION_SET_TP_SRC | \
221 				      MLX5_FLOW_ACTION_SET_TP_DST | \
222 				      MLX5_FLOW_ACTION_SET_TTL | \
223 				      MLX5_FLOW_ACTION_DEC_TTL | \
224 				      MLX5_FLOW_ACTION_SET_MAC_SRC | \
225 				      MLX5_FLOW_ACTION_SET_MAC_DST | \
226 				      MLX5_FLOW_ACTION_INC_TCP_SEQ | \
227 				      MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
228 				      MLX5_FLOW_ACTION_INC_TCP_ACK | \
229 				      MLX5_FLOW_ACTION_DEC_TCP_ACK | \
230 				      MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
231 				      MLX5_FLOW_ACTION_SET_TAG | \
232 				      MLX5_FLOW_ACTION_MARK_EXT | \
233 				      MLX5_FLOW_ACTION_SET_META | \
234 				      MLX5_FLOW_ACTION_SET_IPV4_DSCP | \
235 				      MLX5_FLOW_ACTION_SET_IPV6_DSCP)
236 
237 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
238 				MLX5_FLOW_ACTION_OF_PUSH_VLAN)
239 
240 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP)
241 
242 #ifndef IPPROTO_MPLS
243 #define IPPROTO_MPLS 137
244 #endif
245 
246 /* UDP port number for MPLS */
247 #define MLX5_UDP_PORT_MPLS 6635
248 
249 /* UDP port numbers for VxLAN. */
250 #define MLX5_UDP_PORT_VXLAN 4789
251 #define MLX5_UDP_PORT_VXLAN_GPE 4790
252 
253 /* UDP port numbers for GENEVE. */
254 #define MLX5_UDP_PORT_GENEVE 6081
255 
256 /* Priority reserved for default flows. */
257 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
258 
259 /*
260  * Number of sub priorities.
261  * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
262  * matching on the NIC (firmware dependent) L4 most have the higher priority
263  * followed by L3 and ending with L2.
264  */
265 #define MLX5_PRIORITY_MAP_L2 2
266 #define MLX5_PRIORITY_MAP_L3 1
267 #define MLX5_PRIORITY_MAP_L4 0
268 #define MLX5_PRIORITY_MAP_MAX 3
269 
270 /* Valid layer type for IPV4 RSS. */
271 #define MLX5_IPV4_LAYER_TYPES \
272 	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
273 	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
274 	 ETH_RSS_NONFRAG_IPV4_OTHER)
275 
276 /* IBV hash source bits  for IPV4. */
277 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
278 
279 /* Valid layer type for IPV6 RSS. */
280 #define MLX5_IPV6_LAYER_TYPES \
281 	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
282 	 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX  | ETH_RSS_IPV6_TCP_EX | \
283 	 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
284 
285 /* IBV hash source bits  for IPV6. */
286 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
287 
288 /* IBV hash bits for L3 SRC. */
289 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6)
290 
291 /* IBV hash bits for L3 DST. */
292 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6)
293 
294 /* IBV hash bits for TCP. */
295 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
296 			      IBV_RX_HASH_DST_PORT_TCP)
297 
298 /* IBV hash bits for UDP. */
299 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \
300 			      IBV_RX_HASH_DST_PORT_UDP)
301 
302 /* IBV hash bits for L4 SRC. */
303 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
304 				 IBV_RX_HASH_SRC_PORT_UDP)
305 
306 /* IBV hash bits for L4 DST. */
307 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \
308 				 IBV_RX_HASH_DST_PORT_UDP)
309 
310 /* Geneve header first 16Bit */
311 #define MLX5_GENEVE_VER_MASK 0x3
312 #define MLX5_GENEVE_VER_SHIFT 14
313 #define MLX5_GENEVE_VER_VAL(a) \
314 		(((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
315 #define MLX5_GENEVE_OPTLEN_MASK 0x3F
316 #define MLX5_GENEVE_OPTLEN_SHIFT 7
317 #define MLX5_GENEVE_OPTLEN_VAL(a) \
318 	    (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
319 #define MLX5_GENEVE_OAMF_MASK 0x1
320 #define MLX5_GENEVE_OAMF_SHIFT 7
321 #define MLX5_GENEVE_OAMF_VAL(a) \
322 		(((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
323 #define MLX5_GENEVE_CRITO_MASK 0x1
324 #define MLX5_GENEVE_CRITO_SHIFT 6
325 #define MLX5_GENEVE_CRITO_VAL(a) \
326 		(((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
327 #define MLX5_GENEVE_RSVD_MASK 0x3F
328 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
329 /*
330  * The length of the Geneve options fields, expressed in four byte multiples,
331  * not including the eight byte fixed tunnel.
332  */
333 #define MLX5_GENEVE_OPT_LEN_0 14
334 #define MLX5_GENEVE_OPT_LEN_1 63
335 
336 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
337 					  sizeof(struct rte_flow_item_ipv4))
338 
339 /* Software header modify action numbers of a flow. */
340 #define MLX5_ACT_NUM_MDF_IPV4		1
341 #define MLX5_ACT_NUM_MDF_IPV6		4
342 #define MLX5_ACT_NUM_MDF_MAC		2
343 #define MLX5_ACT_NUM_MDF_VID		1
344 #define MLX5_ACT_NUM_MDF_PORT		2
345 #define MLX5_ACT_NUM_MDF_TTL		1
346 #define MLX5_ACT_NUM_DEC_TTL		MLX5_ACT_NUM_MDF_TTL
347 #define MLX5_ACT_NUM_MDF_TCPSEQ		1
348 #define MLX5_ACT_NUM_MDF_TCPACK		1
349 #define MLX5_ACT_NUM_SET_REG		1
350 #define MLX5_ACT_NUM_SET_TAG		1
351 #define MLX5_ACT_NUM_CPY_MREG		MLX5_ACT_NUM_SET_TAG
352 #define MLX5_ACT_NUM_SET_MARK		MLX5_ACT_NUM_SET_TAG
353 #define MLX5_ACT_NUM_SET_META		MLX5_ACT_NUM_SET_TAG
354 #define MLX5_ACT_NUM_SET_DSCP		1
355 
356 enum mlx5_flow_drv_type {
357 	MLX5_FLOW_TYPE_MIN,
358 	MLX5_FLOW_TYPE_DV,
359 	MLX5_FLOW_TYPE_VERBS,
360 	MLX5_FLOW_TYPE_MAX,
361 };
362 
363 /* Fate action type. */
364 enum mlx5_flow_fate_type {
365 	MLX5_FLOW_FATE_NONE, /* Egress flow. */
366 	MLX5_FLOW_FATE_QUEUE,
367 	MLX5_FLOW_FATE_JUMP,
368 	MLX5_FLOW_FATE_PORT_ID,
369 	MLX5_FLOW_FATE_DROP,
370 	MLX5_FLOW_FATE_DEFAULT_MISS,
371 	MLX5_FLOW_FATE_MAX,
372 };
373 
374 /* Matcher PRM representation */
375 struct mlx5_flow_dv_match_params {
376 	size_t size;
377 	/**< Size of match value. Do NOT split size and key! */
378 	uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
379 	/**< Matcher value. This value is used as the mask or as a key. */
380 };
381 
382 /* Matcher structure. */
383 struct mlx5_flow_dv_matcher {
384 	LIST_ENTRY(mlx5_flow_dv_matcher) next;
385 	/**< Pointer to the next element. */
386 	struct mlx5_flow_tbl_resource *tbl;
387 	/**< Pointer to the table(group) the matcher associated with. */
388 	rte_atomic32_t refcnt; /**< Reference counter. */
389 	void *matcher_object; /**< Pointer to DV matcher */
390 	uint16_t crc; /**< CRC of key. */
391 	uint16_t priority; /**< Priority of matcher. */
392 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
393 };
394 
395 #define MLX5_ENCAP_MAX_LEN 132
396 
397 /* Encap/decap resource structure. */
398 struct mlx5_flow_dv_encap_decap_resource {
399 	ILIST_ENTRY(uint32_t)next;
400 	/* Pointer to next element. */
401 	rte_atomic32_t refcnt; /**< Reference counter. */
402 	void *action;
403 	/**< Encap/decap action object. */
404 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
405 	size_t size;
406 	uint8_t reformat_type;
407 	uint8_t ft_type;
408 	uint64_t flags; /**< Flags for RDMA API. */
409 };
410 
411 /* Tag resource structure. */
412 struct mlx5_flow_dv_tag_resource {
413 	struct mlx5_hlist_entry entry;
414 	/**< hash list entry for tag resource, tag value as the key. */
415 	void *action;
416 	/**< Tag action object. */
417 	rte_atomic32_t refcnt; /**< Reference counter. */
418 	uint32_t idx; /**< Index for the index memory pool. */
419 };
420 
421 /*
422  * Number of modification commands.
423  * The maximal actions amount in FW is some constant, and it is 16 in the
424  * latest releases. In some old releases, it will be limited to 8.
425  * Since there is no interface to query the capacity, the maximal value should
426  * be used to allow PMD to create the flow. The validation will be done in the
427  * lower driver layer or FW. A failure will be returned if exceeds the maximal
428  * supported actions number on the root table.
429  * On non-root tables, there is no limitation, but 32 is enough right now.
430  */
431 #define MLX5_MAX_MODIFY_NUM			32
432 #define MLX5_ROOT_TBL_MODIFY_NUM		16
433 
434 /* Modify resource structure */
435 struct mlx5_flow_dv_modify_hdr_resource {
436 	LIST_ENTRY(mlx5_flow_dv_modify_hdr_resource) next;
437 	/* Pointer to next element. */
438 	rte_atomic32_t refcnt; /**< Reference counter. */
439 	void *action;
440 	/**< Modify header action object. */
441 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
442 	uint32_t actions_num; /**< Number of modification actions. */
443 	uint64_t flags; /**< Flags for RDMA API. */
444 	struct mlx5_modification_cmd actions[];
445 	/**< Modification actions. */
446 };
447 
448 /* Jump action resource structure. */
449 struct mlx5_flow_dv_jump_tbl_resource {
450 	rte_atomic32_t refcnt; /**< Reference counter. */
451 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
452 	void *action; /**< Pointer to the rdma core action. */
453 };
454 
455 /* Port ID resource structure. */
456 struct mlx5_flow_dv_port_id_action_resource {
457 	ILIST_ENTRY(uint32_t)next;
458 	/* Pointer to next element. */
459 	rte_atomic32_t refcnt; /**< Reference counter. */
460 	void *action;
461 	/**< Action object. */
462 	uint32_t port_id; /**< Port ID value. */
463 };
464 
465 /* Push VLAN action resource structure */
466 struct mlx5_flow_dv_push_vlan_action_resource {
467 	ILIST_ENTRY(uint32_t)next;
468 	/* Pointer to next element. */
469 	rte_atomic32_t refcnt; /**< Reference counter. */
470 	void *action; /**< Action object. */
471 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
472 	rte_be32_t vlan_tag; /**< VLAN tag value. */
473 };
474 
475 /* Metadata register copy table entry. */
476 struct mlx5_flow_mreg_copy_resource {
477 	/*
478 	 * Hash list entry for copy table.
479 	 *  - Key is 32/64-bit MARK action ID.
480 	 *  - MUST be the first entry.
481 	 */
482 	struct mlx5_hlist_entry hlist_ent;
483 	LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
484 	/* List entry for device flows. */
485 	uint32_t refcnt; /* Reference counter. */
486 	uint32_t appcnt; /* Apply/Remove counter. */
487 	uint32_t idx;
488 	uint32_t rix_flow; /* Built flow for copy. */
489 };
490 
491 /* Table data structure of the hash organization. */
492 struct mlx5_flow_tbl_data_entry {
493 	struct mlx5_hlist_entry entry;
494 	/**< hash list entry, 64-bits key inside. */
495 	struct mlx5_flow_tbl_resource tbl;
496 	/**< flow table resource. */
497 	LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;
498 	/**< matchers' header associated with the flow table. */
499 	struct mlx5_flow_dv_jump_tbl_resource jump;
500 	/**< jump resource, at most one for each table created. */
501 	uint32_t idx; /**< index for the indexed mempool. */
502 };
503 
504 /* Verbs specification header. */
505 struct ibv_spec_header {
506 	enum ibv_flow_spec_type type;
507 	uint16_t size;
508 };
509 
510 /* RSS description. */
511 struct mlx5_flow_rss_desc {
512 	uint32_t level;
513 	uint32_t queue_num; /**< Number of entries in @p queue. */
514 	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
515 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
516 	uint16_t queue[]; /**< Destination queues to redirect traffic to. */
517 };
518 
519 
520 /** Device flow handle structure for DV mode only. */
521 struct mlx5_flow_handle_dv {
522 	/* Flow DV api: */
523 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
524 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
525 	/**< Pointer to modify header resource in cache. */
526 	uint32_t rix_encap_decap;
527 	/**< Index to encap/decap resource in cache. */
528 	uint32_t rix_push_vlan;
529 	/**< Index to push VLAN action resource in cache. */
530 	uint32_t rix_tag;
531 	/**< Index to the tag action. */
532 } __rte_packed;
533 
534 /** Device flow handle structure: used both for creating & destroying. */
535 struct mlx5_flow_handle {
536 	SILIST_ENTRY(uint32_t)next;
537 	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
538 	/**< Index to next device flow handle. */
539 	uint64_t layers;
540 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
541 	void *drv_flow; /**< pointer to driver flow object. */
542 	uint32_t split_flow_id:28; /**< Sub flow unique match flow id. */
543 	uint32_t mark:1; /**< Metadate rxq mark flag. */
544 	uint32_t fate_action:3; /**< Fate action type. */
545 	union {
546 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
547 		uint32_t rix_jump; /**< Index to the jump action resource. */
548 		uint32_t rix_port_id_action;
549 		/**< Index to port ID action resource. */
550 		uint32_t rix_fate;
551 		/**< Generic value indicates the fate action. */
552 		uint32_t rix_default_fate;
553 		/**< Indicates default miss fate action. */
554 	};
555 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
556 	struct mlx5_flow_handle_dv dvh;
557 #endif
558 } __rte_packed;
559 
560 /*
561  * Size for Verbs device flow handle structure only. Do not use the DV only
562  * structure in Verbs. No DV flows attributes will be accessed.
563  * Macro offsetof() could also be used here.
564  */
565 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
566 #define MLX5_FLOW_HANDLE_VERBS_SIZE \
567 	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
568 #else
569 #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
570 #endif
571 
572 /*
573  * Max number of actions per DV flow.
574  * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
575  * in rdma-core file providers/mlx5/verbs.c.
576  */
577 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
578 
579 /** Device flow structure only for DV flow creation. */
580 struct mlx5_flow_dv_workspace {
581 	uint32_t group; /**< The group index. */
582 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
583 	int actions_n; /**< number of actions. */
584 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
585 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
586 	/**< Pointer to encap/decap resource in cache. */
587 	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
588 	/**< Pointer to push VLAN action resource in cache. */
589 	struct mlx5_flow_dv_tag_resource *tag_resource;
590 	/**< pointer to the tag action. */
591 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
592 	/**< Pointer to port ID action resource. */
593 	struct mlx5_flow_dv_jump_tbl_resource *jump;
594 	/**< Pointer to the jump action resource. */
595 	struct mlx5_flow_dv_match_params value;
596 	/**< Holds the value that the packet is compared to. */
597 };
598 
599 /*
600  * Maximal Verbs flow specifications & actions size.
601  * Some elements are mutually exclusive, but enough space should be allocated.
602  * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
603  *               2. One tunnel header (exception: GRE + MPLS),
604  *                  SPEC length: GRE == tunnel.
605  * Actions: 1. 1 Mark OR Flag.
606  *          2. 1 Drop (if any).
607  *          3. No limitation for counters, but it makes no sense to support too
608  *             many counters in a single device flow.
609  */
610 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
611 #define MLX5_VERBS_MAX_SPEC_SIZE \
612 		( \
613 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
614 			      sizeof(struct ibv_flow_spec_ipv6) + \
615 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
616 			sizeof(struct ibv_flow_spec_gre) + \
617 			sizeof(struct ibv_flow_spec_mpls)) \
618 		)
619 #else
620 #define MLX5_VERBS_MAX_SPEC_SIZE \
621 		( \
622 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
623 			      sizeof(struct ibv_flow_spec_ipv6) + \
624 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
625 			sizeof(struct ibv_flow_spec_tunnel)) \
626 		)
627 #endif
628 
629 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
630 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
631 #define MLX5_VERBS_MAX_ACT_SIZE \
632 		( \
633 			sizeof(struct ibv_flow_spec_action_tag) + \
634 			sizeof(struct ibv_flow_spec_action_drop) + \
635 			sizeof(struct ibv_flow_spec_counter_action) * 4 \
636 		)
637 #else
638 #define MLX5_VERBS_MAX_ACT_SIZE \
639 		( \
640 			sizeof(struct ibv_flow_spec_action_tag) + \
641 			sizeof(struct ibv_flow_spec_action_drop) \
642 		)
643 #endif
644 
645 #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
646 		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
647 
648 /** Device flow structure only for Verbs flow creation. */
649 struct mlx5_flow_verbs_workspace {
650 	unsigned int size; /**< Size of the attribute. */
651 	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
652 	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
653 	/**< Specifications & actions buffer of verbs flow. */
654 };
655 
656 /** Maximal number of device sub-flows supported. */
657 #define MLX5_NUM_MAX_DEV_FLOWS 32
658 
659 /** Device flow structure. */
660 struct mlx5_flow {
661 	struct rte_flow *flow; /**< Pointer to the main flow. */
662 	uint32_t flow_idx; /**< The memory pool index to the main flow. */
663 	uint64_t hash_fields; /**< Hash Rx queue hash fields. */
664 	uint64_t act_flags;
665 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
666 	bool external; /**< true if the flow is created external to PMD. */
667 	uint8_t ingress; /**< 1 if the flow is ingress. */
668 	union {
669 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
670 		struct mlx5_flow_dv_workspace dv;
671 #endif
672 		struct mlx5_flow_verbs_workspace verbs;
673 	};
674 	struct mlx5_flow_handle *handle;
675 	uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
676 };
677 
678 /* Flow meter state. */
679 #define MLX5_FLOW_METER_DISABLE 0
680 #define MLX5_FLOW_METER_ENABLE 1
681 
682 #define MLX5_MAN_WIDTH 8
683 /* Modify this value if enum rte_mtr_color changes. */
684 #define RTE_MTR_DROPPED RTE_COLORS
685 
686 /* Meter policer statistics */
687 struct mlx5_flow_policer_stats {
688 	uint32_t cnt[RTE_COLORS + 1];
689 	/**< Color counter, extra for drop. */
690 	uint64_t stats_mask;
691 	/**< Statistics mask for the colors. */
692 };
693 
694 /* Meter table structure. */
695 struct mlx5_meter_domain_info {
696 	struct mlx5_flow_tbl_resource *tbl;
697 	/**< Meter table. */
698 	struct mlx5_flow_tbl_resource *sfx_tbl;
699 	/**< Meter suffix table. */
700 	void *any_matcher;
701 	/**< Meter color not match default criteria. */
702 	void *color_matcher;
703 	/**< Meter color match criteria. */
704 	void *jump_actn;
705 	/**< Meter match action. */
706 	void *policer_rules[RTE_MTR_DROPPED + 1];
707 	/**< Meter policer for the match. */
708 };
709 
710 /* Meter table set for TX RX FDB. */
711 struct mlx5_meter_domains_infos {
712 	uint32_t ref_cnt;
713 	/**< Table user count. */
714 	struct mlx5_meter_domain_info egress;
715 	/**< TX meter table. */
716 	struct mlx5_meter_domain_info ingress;
717 	/**< RX meter table. */
718 	struct mlx5_meter_domain_info transfer;
719 	/**< FDB meter table. */
720 	void *drop_actn;
721 	/**< Drop action as not matched. */
722 	void *count_actns[RTE_MTR_DROPPED + 1];
723 	/**< Counters for match and unmatched statistics. */
724 	uint32_t fmp[MLX5_ST_SZ_DW(flow_meter_parameters)];
725 	/**< Flow meter parameter. */
726 	size_t fmp_size;
727 	/**< Flow meter parameter size. */
728 	void *meter_action;
729 	/**< Flow meter action. */
730 };
731 
732 /* Meter parameter structure. */
733 struct mlx5_flow_meter {
734 	TAILQ_ENTRY(mlx5_flow_meter) next;
735 	/**< Pointer to the next flow meter structure. */
736 	uint32_t idx; /* Index to meter object. */
737 	uint32_t meter_id;
738 	/**< Meter id. */
739 	struct mlx5_flow_meter_profile *profile;
740 	/**< Meter profile parameters. */
741 
742 	/** Policer actions (per meter output color). */
743 	enum rte_mtr_policer_action action[RTE_COLORS];
744 
745 	/** Set of stats counters to be enabled.
746 	 * @see enum rte_mtr_stats_type
747 	 */
748 	uint64_t stats_mask;
749 
750 	/**< Rule applies to ingress traffic. */
751 	uint32_t ingress:1;
752 
753 	/**< Rule applies to egress traffic. */
754 	uint32_t egress:1;
755 	/**
756 	 * Instead of simply matching the properties of traffic as it would
757 	 * appear on a given DPDK port ID, enabling this attribute transfers
758 	 * a flow rule to the lowest possible level of any device endpoints
759 	 * found in the pattern.
760 	 *
761 	 * When supported, this effectively enables an application to
762 	 * re-route traffic not necessarily intended for it (e.g. coming
763 	 * from or addressed to different physical ports, VFs or
764 	 * applications) at the device level.
765 	 *
766 	 * It complements the behavior of some pattern items such as
767 	 * RTE_FLOW_ITEM_TYPE_PHY_PORT and is meaningless without them.
768 	 *
769 	 * When transferring flow rules, ingress and egress attributes keep
770 	 * their original meaning, as if processing traffic emitted or
771 	 * received by the application.
772 	 */
773 	uint32_t transfer:1;
774 	struct mlx5_meter_domains_infos *mfts;
775 	/**< Flow table created for this meter. */
776 	struct mlx5_flow_policer_stats policer_stats;
777 	/**< Meter policer statistics. */
778 	uint32_t ref_cnt;
779 	/**< Use count. */
780 	uint32_t active_state:1;
781 	/**< Meter state. */
782 	uint32_t shared:1;
783 	/**< Meter shared or not. */
784 };
785 
786 /* RFC2697 parameter structure. */
787 struct mlx5_flow_meter_srtcm_rfc2697_prm {
788 	/* green_saturation_value = cbs_mantissa * 2^cbs_exponent */
789 	uint32_t cbs_exponent:5;
790 	uint32_t cbs_mantissa:8;
791 	/* cir = 8G * cir_mantissa * 1/(2^cir_exponent) Bytes/Sec */
792 	uint32_t cir_exponent:5;
793 	uint32_t cir_mantissa:8;
794 	/* yellow _saturation_value = ebs_mantissa * 2^ebs_exponent */
795 	uint32_t ebs_exponent:5;
796 	uint32_t ebs_mantissa:8;
797 };
798 
799 /* Flow meter profile structure. */
800 struct mlx5_flow_meter_profile {
801 	TAILQ_ENTRY(mlx5_flow_meter_profile) next;
802 	/**< Pointer to the next flow meter structure. */
803 	uint32_t meter_profile_id; /**< Profile id. */
804 	struct rte_mtr_meter_profile profile; /**< Profile detail. */
805 	union {
806 		struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
807 		/**< srtcm_rfc2697 struct. */
808 	};
809 	uint32_t ref_cnt; /**< Use count. */
810 };
811 
812 /* Fdir flow structure */
813 struct mlx5_fdir_flow {
814 	LIST_ENTRY(mlx5_fdir_flow) next; /* Pointer to the next element. */
815 	struct mlx5_fdir *fdir; /* Pointer to fdir. */
816 	uint32_t rix_flow; /* Index to flow. */
817 };
818 
819 #define HAIRPIN_FLOW_ID_BITS 28
820 
821 /* Flow structure. */
822 struct rte_flow {
823 	ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
824 	uint32_t dev_handles;
825 	/**< Device flow handles that are part of the flow. */
826 	uint32_t drv_type:2; /**< Driver type. */
827 	uint32_t fdir:1; /**< Identifier of associated FDIR if any. */
828 	uint32_t hairpin_flow_id:HAIRPIN_FLOW_ID_BITS;
829 	/**< The flow id used for hairpin. */
830 	uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
831 	uint32_t rix_mreg_copy;
832 	/**< Index to metadata register copy table resource. */
833 	uint32_t counter; /**< Holds flow counter. */
834 	uint16_t meter; /**< Holds flow meter id. */
835 } __rte_packed;
836 
837 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
838 				    const struct rte_flow_attr *attr,
839 				    const struct rte_flow_item items[],
840 				    const struct rte_flow_action actions[],
841 				    bool external,
842 				    int hairpin,
843 				    struct rte_flow_error *error);
844 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
845 	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
846 	 const struct rte_flow_item items[],
847 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
848 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
849 				     struct mlx5_flow *dev_flow,
850 				     const struct rte_flow_attr *attr,
851 				     const struct rte_flow_item items[],
852 				     const struct rte_flow_action actions[],
853 				     struct rte_flow_error *error);
854 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
855 				 struct rte_flow_error *error);
856 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
857 				   struct rte_flow *flow);
858 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
859 				    struct rte_flow *flow);
860 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
861 				 struct rte_flow *flow,
862 				 const struct rte_flow_action *actions,
863 				 void *data,
864 				 struct rte_flow_error *error);
865 typedef struct mlx5_meter_domains_infos *(*mlx5_flow_create_mtr_tbls_t)
866 					    (struct rte_eth_dev *dev,
867 					     const struct mlx5_flow_meter *fm);
868 typedef int (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
869 					struct mlx5_meter_domains_infos *tbls);
870 typedef int (*mlx5_flow_create_policer_rules_t)
871 					(struct rte_eth_dev *dev,
872 					 struct mlx5_flow_meter *fm,
873 					 const struct rte_flow_attr *attr);
874 typedef int (*mlx5_flow_destroy_policer_rules_t)
875 					(struct rte_eth_dev *dev,
876 					 const struct mlx5_flow_meter *fm,
877 					 const struct rte_flow_attr *attr);
878 typedef uint32_t (*mlx5_flow_counter_alloc_t)
879 				   (struct rte_eth_dev *dev);
880 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev,
881 					 uint32_t cnt);
882 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
883 					 uint32_t cnt,
884 					 bool clear, uint64_t *pkts,
885 					 uint64_t *bytes);
886 typedef int (*mlx5_flow_get_aged_flows_t)
887 					(struct rte_eth_dev *dev,
888 					 void **context,
889 					 uint32_t nb_contexts,
890 					 struct rte_flow_error *error);
891 struct mlx5_flow_driver_ops {
892 	mlx5_flow_validate_t validate;
893 	mlx5_flow_prepare_t prepare;
894 	mlx5_flow_translate_t translate;
895 	mlx5_flow_apply_t apply;
896 	mlx5_flow_remove_t remove;
897 	mlx5_flow_destroy_t destroy;
898 	mlx5_flow_query_t query;
899 	mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
900 	mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
901 	mlx5_flow_create_policer_rules_t create_policer_rules;
902 	mlx5_flow_destroy_policer_rules_t destroy_policer_rules;
903 	mlx5_flow_counter_alloc_t counter_alloc;
904 	mlx5_flow_counter_free_t counter_free;
905 	mlx5_flow_counter_query_t counter_query;
906 	mlx5_flow_get_aged_flows_t get_aged_flows;
907 };
908 
909 /* mlx5_flow.c */
910 
911 struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id);
912 void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool);
913 uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id);
914 uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool,
915 			      uint32_t id);
916 int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes,
917 			     bool external, uint32_t group, bool fdb_def_rule,
918 			     uint32_t *table, struct rte_flow_error *error);
919 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
920 				     int tunnel, uint64_t layer_types,
921 				     uint64_t hash_fields);
922 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
923 				   uint32_t subpriority);
924 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
925 				     enum mlx5_feature_name feature,
926 				     uint32_t id,
927 				     struct rte_flow_error *error);
928 const struct rte_flow_action *mlx5_flow_find_action
929 					(const struct rte_flow_action *actions,
930 					 enum rte_flow_action_type action);
931 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
932 				    const struct rte_flow_attr *attr,
933 				    struct rte_flow_error *error);
934 int mlx5_flow_validate_action_drop(uint64_t action_flags,
935 				   const struct rte_flow_attr *attr,
936 				   struct rte_flow_error *error);
937 int mlx5_flow_validate_action_flag(uint64_t action_flags,
938 				   const struct rte_flow_attr *attr,
939 				   struct rte_flow_error *error);
940 int mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
941 				   uint64_t action_flags,
942 				   const struct rte_flow_attr *attr,
943 				   struct rte_flow_error *error);
944 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
945 				    uint64_t action_flags,
946 				    struct rte_eth_dev *dev,
947 				    const struct rte_flow_attr *attr,
948 				    struct rte_flow_error *error);
949 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
950 				  uint64_t action_flags,
951 				  struct rte_eth_dev *dev,
952 				  const struct rte_flow_attr *attr,
953 				  uint64_t item_flags,
954 				  struct rte_flow_error *error);
955 int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
956 				const struct rte_flow_attr *attr,
957 				struct rte_flow_error *error);
958 int mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
959 				  const struct rte_flow_attr *attributes,
960 				  struct rte_flow_error *error);
961 int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
962 			      const uint8_t *mask,
963 			      const uint8_t *nic_mask,
964 			      unsigned int size,
965 			      struct rte_flow_error *error);
966 int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
967 				uint64_t item_flags,
968 				struct rte_flow_error *error);
969 int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
970 				uint64_t item_flags,
971 				uint8_t target_protocol,
972 				struct rte_flow_error *error);
973 int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
974 				    uint64_t item_flags,
975 				    const struct rte_flow_item *gre_item,
976 				    struct rte_flow_error *error);
977 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
978 				 uint64_t item_flags,
979 				 uint64_t last_item,
980 				 uint16_t ether_type,
981 				 const struct rte_flow_item_ipv4 *acc_mask,
982 				 struct rte_flow_error *error);
983 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
984 				 uint64_t item_flags,
985 				 uint64_t last_item,
986 				 uint16_t ether_type,
987 				 const struct rte_flow_item_ipv6 *acc_mask,
988 				 struct rte_flow_error *error);
989 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
990 				 const struct rte_flow_item *item,
991 				 uint64_t item_flags,
992 				 uint64_t prev_layer,
993 				 struct rte_flow_error *error);
994 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
995 				uint64_t item_flags,
996 				uint8_t target_protocol,
997 				const struct rte_flow_item_tcp *flow_mask,
998 				struct rte_flow_error *error);
999 int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
1000 				uint64_t item_flags,
1001 				uint8_t target_protocol,
1002 				struct rte_flow_error *error);
1003 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1004 				 uint64_t item_flags,
1005 				 struct rte_eth_dev *dev,
1006 				 struct rte_flow_error *error);
1007 int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
1008 				  uint64_t item_flags,
1009 				  struct rte_flow_error *error);
1010 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
1011 				      uint64_t item_flags,
1012 				      struct rte_eth_dev *dev,
1013 				      struct rte_flow_error *error);
1014 int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1015 				 uint64_t item_flags,
1016 				 uint8_t target_protocol,
1017 				 struct rte_flow_error *error);
1018 int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1019 				   uint64_t item_flags,
1020 				   uint8_t target_protocol,
1021 				   struct rte_flow_error *error);
1022 int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
1023 				  uint64_t item_flags,
1024 				  uint8_t target_protocol,
1025 				  struct rte_flow_error *error);
1026 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
1027 				   uint64_t item_flags,
1028 				   struct rte_eth_dev *dev,
1029 				   struct rte_flow_error *error);
1030 struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls
1031 					(struct rte_eth_dev *dev,
1032 					 const struct mlx5_flow_meter *fm);
1033 int mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
1034 			       struct mlx5_meter_domains_infos *tbl);
1035 int mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
1036 				   struct mlx5_flow_meter *fm,
1037 				   const struct rte_flow_attr *attr);
1038 int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
1039 				    struct mlx5_flow_meter *fm,
1040 				    const struct rte_flow_attr *attr);
1041 int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
1042 			  struct rte_mtr_error *error);
1043 #endif /* RTE_PMD_MLX5_FLOW_H_ */
1044