xref: /dpdk/drivers/net/mlx5/mlx5_flow.h (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_FLOW_H_
6 #define RTE_PMD_MLX5_FLOW_H_
7 
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 
13 #include <rte_alarm.h>
14 #include <rte_mtr.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_prm.h>
18 
19 #include "mlx5.h"
20 #include "rte_pmd_mlx5.h"
21 #include "hws/mlx5dr.h"
22 
23 /* E-Switch Manager port, used for rte_flow_item_port_id. */
24 #define MLX5_PORT_ESW_MGR UINT32_MAX
25 
26 /* E-Switch Manager port, used for rte_flow_item_ethdev. */
27 #define MLX5_REPRESENTED_PORT_ESW_MGR UINT16_MAX
28 
29 /* Private rte flow items. */
30 enum mlx5_rte_flow_item_type {
31 	MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
32 	MLX5_RTE_FLOW_ITEM_TYPE_TAG,
33 	MLX5_RTE_FLOW_ITEM_TYPE_SQ,
34 	MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
35 	MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
36 };
37 
38 /* Private (internal) rte flow actions. */
39 enum mlx5_rte_flow_action_type {
40 	MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
41 	MLX5_RTE_FLOW_ACTION_TYPE_TAG,
42 	MLX5_RTE_FLOW_ACTION_TYPE_MARK,
43 	MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
44 	MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
45 	MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
46 	MLX5_RTE_FLOW_ACTION_TYPE_AGE,
47 	MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
48 	MLX5_RTE_FLOW_ACTION_TYPE_JUMP,
49 	MLX5_RTE_FLOW_ACTION_TYPE_RSS,
50 	MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
51 };
52 
53 /* Private (internal) Field IDs for MODIFY_FIELD action. */
54 enum mlx5_rte_flow_field_id {
55 	MLX5_RTE_FLOW_FIELD_END = INT_MIN,
56 	MLX5_RTE_FLOW_FIELD_META_REG,
57 };
58 
59 #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29
60 
61 #define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \
62 	(((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET)
63 
64 #define MLX5_INDIRECT_ACTION_IDX_GET(handle) \
65 	(((uint32_t)(uintptr_t)(handle)) & \
66 	 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
67 
68 enum mlx5_indirect_type {
69 	MLX5_INDIRECT_ACTION_TYPE_RSS,
70 	MLX5_INDIRECT_ACTION_TYPE_AGE,
71 	MLX5_INDIRECT_ACTION_TYPE_COUNT,
72 	MLX5_INDIRECT_ACTION_TYPE_CT,
73 	MLX5_INDIRECT_ACTION_TYPE_METER_MARK,
74 	MLX5_INDIRECT_ACTION_TYPE_QUOTA,
75 };
76 
77 /* Now, the maximal ports will be supported is 16, action number is 32M. */
78 #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10
79 
80 #define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25
81 #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
82 
83 /*
84  * When SW steering flow engine is used, the CT action handles are encoded in a following way:
85  * - bits 31:29 - type
86  * - bits 28:25 - port index of the action owner
87  * - bits 24:0 - action index
88  */
89 #define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \
90 	((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \
91 	 (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \
92 	  MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index))
93 
94 #define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \
95 	(((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \
96 	 MLX5_INDIRECT_ACT_CT_OWNER_MASK)
97 
98 #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
99 	((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
100 
101 /*
102  * When HW steering flow engine is used, the CT action handles are encoded in a following way:
103  * - bits 31:29 - type
104  * - bits 28:0 - action index
105  */
106 #define MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(index) \
107 	((struct rte_flow_action_handle *)(uintptr_t) \
108 	 ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (index)))
109 
110 enum mlx5_indirect_list_type {
111 	MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
112 	MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
113 	MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
114 	MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT = 3,
115 };
116 
117 /**
118  * Base type for indirect list type.
119  */
120 struct mlx5_indirect_list {
121 	/* Indirect list type. */
122 	enum mlx5_indirect_list_type type;
123 	/* Optional storage list entry */
124 	LIST_ENTRY(mlx5_indirect_list) entry;
125 };
126 
127 static __rte_always_inline void
128 mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
129 {
130 	LIST_HEAD(, mlx5_indirect_list) *h = head;
131 
132 	LIST_INSERT_HEAD(h, elem, entry);
133 }
134 
135 static __rte_always_inline void
136 mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
137 {
138 	if (elem->entry.le_prev)
139 		LIST_REMOVE(elem, entry);
140 }
141 
142 static __rte_always_inline enum mlx5_indirect_list_type
143 mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
144 {
145 	return ((const struct mlx5_indirect_list *)obj)->type;
146 }
147 
148 /* Matches on selected register. */
149 struct mlx5_rte_flow_item_tag {
150 	enum modify_reg id;
151 	uint32_t data;
152 };
153 
154 /* Modify selected register. */
155 struct mlx5_rte_flow_action_set_tag {
156 	enum modify_reg id;
157 	uint8_t offset;
158 	uint8_t length;
159 	uint32_t data;
160 };
161 
162 struct mlx5_flow_action_copy_mreg {
163 	enum modify_reg dst;
164 	enum modify_reg src;
165 };
166 
167 /* Matches on source queue. */
168 struct mlx5_rte_flow_item_sq {
169 	uint32_t queue; /* DevX SQ number */
170 };
171 
172 /* Map from registers to modify fields. */
173 extern enum mlx5_modification_field reg_to_field[];
174 extern const size_t mlx5_mod_reg_size;
175 
176 static __rte_always_inline enum mlx5_modification_field
177 mlx5_convert_reg_to_field(enum modify_reg reg)
178 {
179 	MLX5_ASSERT((size_t)reg < mlx5_mod_reg_size);
180 	return reg_to_field[reg];
181 }
182 
183 /* Feature name to allocate metadata register. */
184 enum mlx5_feature_name {
185 	MLX5_HAIRPIN_RX,
186 	MLX5_HAIRPIN_TX,
187 	MLX5_METADATA_RX,
188 	MLX5_METADATA_TX,
189 	MLX5_METADATA_FDB,
190 	MLX5_FLOW_MARK,
191 	MLX5_APP_TAG,
192 	MLX5_COPY_MARK,
193 	MLX5_MTR_COLOR,
194 	MLX5_MTR_ID,
195 	MLX5_ASO_FLOW_HIT,
196 	MLX5_ASO_CONNTRACK,
197 	MLX5_SAMPLE_ID,
198 };
199 
200 /* Default queue number. */
201 #define MLX5_RSSQ_DEFAULT_NUM 16
202 
203 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
204 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
205 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
206 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
207 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
208 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
209 
210 /* Pattern inner Layer bits. */
211 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
212 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
213 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
214 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
215 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
216 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
217 
218 /* Pattern tunnel Layer bits. */
219 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
220 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
221 #define MLX5_FLOW_LAYER_GRE (1u << 14)
222 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
223 /* List of tunnel Layer bits continued below. */
224 
225 /* General pattern items bits. */
226 #define MLX5_FLOW_ITEM_METADATA (1u << 16)
227 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
228 #define MLX5_FLOW_ITEM_TAG (1u << 18)
229 #define MLX5_FLOW_ITEM_MARK (1u << 19)
230 
231 /* Pattern MISC bits. */
232 #define MLX5_FLOW_LAYER_ICMP (1u << 20)
233 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21)
234 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22)
235 
236 /* Pattern tunnel Layer bits (continued). */
237 #define MLX5_FLOW_LAYER_IPIP (1u << 23)
238 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24)
239 #define MLX5_FLOW_LAYER_NVGRE (1u << 25)
240 #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
241 
242 /* Queue items. */
243 #define MLX5_FLOW_ITEM_SQ (1u << 27)
244 
245 /* Pattern tunnel Layer bits (continued). */
246 #define MLX5_FLOW_LAYER_GTP (1u << 28)
247 
248 /* Pattern eCPRI Layer bit. */
249 #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29)
250 
251 /* IPv6 Fragment Extension Header bit. */
252 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30)
253 #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31)
254 
255 /* Pattern tunnel Layer bits (continued). */
256 #define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
257 #define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
258 
259 /* INTEGRITY item bits */
260 #define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
261 #define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
262 #define MLX5_FLOW_ITEM_INTEGRITY \
263 	(MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
264 
265 /* Conntrack item. */
266 #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
267 
268 /* Flex item */
269 #define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
270 #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
271 #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
272 
273 /* ESP item */
274 #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
275 
276 /* Port Representor/Represented Port item */
277 #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41)
278 #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42)
279 
280 /* Meter color item */
281 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
282 #define MLX5_FLOW_ITEM_QUOTA (UINT64_C(1) << 45)
283 
284 
285 /* IPv6 routing extension item */
286 #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
287 #define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46)
288 
289 /* Aggregated affinity item */
290 #define MLX5_FLOW_ITEM_AGGR_AFFINITY (UINT64_C(1) << 49)
291 
292 /* IB BTH ITEM. */
293 #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
294 
295 /* PTYPE ITEM */
296 #define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
297 
298 /* NSH ITEM */
299 #define MLX5_FLOW_ITEM_NSH (1ull << 53)
300 
301 /* COMPARE ITEM */
302 #define MLX5_FLOW_ITEM_COMPARE (1ull << 54)
303 
304 /* Random ITEM */
305 #define MLX5_FLOW_ITEM_RANDOM (1ull << 55)
306 
307 /* Outer Masks. */
308 #define MLX5_FLOW_LAYER_OUTER_L3 \
309 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
310 #define MLX5_FLOW_LAYER_OUTER_L4 \
311 	(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
312 #define MLX5_FLOW_LAYER_OUTER \
313 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
314 	 MLX5_FLOW_LAYER_OUTER_L4)
315 
316 /* Tunnel Masks. */
317 #define MLX5_FLOW_LAYER_TUNNEL \
318 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
319 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
320 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
321 	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
322 	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
323 
324 /* Inner Masks. */
325 #define MLX5_FLOW_LAYER_INNER_L3 \
326 	(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
327 #define MLX5_FLOW_LAYER_INNER_L4 \
328 	(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
329 #define MLX5_FLOW_LAYER_INNER \
330 	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
331 	 MLX5_FLOW_LAYER_INNER_L4)
332 
333 /* Layer Masks. */
334 #define MLX5_FLOW_LAYER_L2 \
335 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
336 #define MLX5_FLOW_LAYER_L3_IPV4 \
337 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4)
338 #define MLX5_FLOW_LAYER_L3_IPV6 \
339 	(MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
340 #define MLX5_FLOW_LAYER_L3 \
341 	(MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
342 #define MLX5_FLOW_LAYER_L4 \
343 	(MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4)
344 
345 /* Actions */
346 #define MLX5_FLOW_ACTION_DROP (1ull << 0)
347 #define MLX5_FLOW_ACTION_QUEUE (1ull << 1)
348 #define MLX5_FLOW_ACTION_RSS (1ull << 2)
349 #define MLX5_FLOW_ACTION_FLAG (1ull << 3)
350 #define MLX5_FLOW_ACTION_MARK (1ull << 4)
351 #define MLX5_FLOW_ACTION_COUNT (1ull << 5)
352 #define MLX5_FLOW_ACTION_PORT_ID (1ull << 6)
353 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7)
354 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8)
355 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9)
356 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10)
357 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11)
358 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12)
359 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13)
360 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14)
361 #define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15)
362 #define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16)
363 #define MLX5_FLOW_ACTION_JUMP (1ull << 17)
364 #define MLX5_FLOW_ACTION_SET_TTL (1ull << 18)
365 #define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19)
366 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20)
367 #define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21)
368 #define MLX5_FLOW_ACTION_ENCAP (1ull << 22)
369 #define MLX5_FLOW_ACTION_DECAP (1ull << 23)
370 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24)
371 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25)
372 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26)
373 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27)
374 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28)
375 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29)
376 #define MLX5_FLOW_ACTION_SET_META (1ull << 30)
377 #define MLX5_FLOW_ACTION_METER (1ull << 31)
378 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
379 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
380 #define MLX5_FLOW_ACTION_AGE (1ull << 34)
381 #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
382 #define MLX5_FLOW_ACTION_SAMPLE (1ull << 36)
383 #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37)
384 #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
385 #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
386 #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
387 #define MLX5_FLOW_ACTION_CT (1ull << 41)
388 #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
389 #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
390 #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
391 #define MLX5_FLOW_ACTION_QUOTA (1ull << 46)
392 #define MLX5_FLOW_ACTION_PORT_REPRESENTOR (1ull << 47)
393 #define MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE (1ull << 48)
394 #define MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH (1ull << 49)
395 #define MLX5_FLOW_ACTION_NAT64 (1ull << 50)
396 
397 #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \
398 	(MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE)
399 
400 #define MLX5_FLOW_FATE_ACTIONS \
401 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
402 	 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
403 	 MLX5_FLOW_ACTION_DEFAULT_MISS | \
404 	 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
405 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
406 	 MLX5_FLOW_ACTION_PORT_REPRESENTOR)
407 
408 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
409 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
410 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
411 	 MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
412 
413 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
414 				      MLX5_FLOW_ACTION_SET_IPV4_DST | \
415 				      MLX5_FLOW_ACTION_SET_IPV6_SRC | \
416 				      MLX5_FLOW_ACTION_SET_IPV6_DST | \
417 				      MLX5_FLOW_ACTION_SET_TP_SRC | \
418 				      MLX5_FLOW_ACTION_SET_TP_DST | \
419 				      MLX5_FLOW_ACTION_SET_TTL | \
420 				      MLX5_FLOW_ACTION_DEC_TTL | \
421 				      MLX5_FLOW_ACTION_SET_MAC_SRC | \
422 				      MLX5_FLOW_ACTION_SET_MAC_DST | \
423 				      MLX5_FLOW_ACTION_INC_TCP_SEQ | \
424 				      MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
425 				      MLX5_FLOW_ACTION_INC_TCP_ACK | \
426 				      MLX5_FLOW_ACTION_DEC_TCP_ACK | \
427 				      MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
428 				      MLX5_FLOW_ACTION_SET_TAG | \
429 				      MLX5_FLOW_ACTION_MARK_EXT | \
430 				      MLX5_FLOW_ACTION_SET_META | \
431 				      MLX5_FLOW_ACTION_SET_IPV4_DSCP | \
432 				      MLX5_FLOW_ACTION_SET_IPV6_DSCP | \
433 				      MLX5_FLOW_ACTION_MODIFY_FIELD)
434 
435 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
436 				MLX5_FLOW_ACTION_OF_PUSH_VLAN)
437 
438 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP)
439 
440 #ifndef IPPROTO_MPLS
441 #define IPPROTO_MPLS 137
442 #endif
443 
444 #define MLX5_IPV6_HDR_ECN_MASK 0x3
445 #define MLX5_IPV6_HDR_DSCP_SHIFT 2
446 
447 /* UDP port number for MPLS */
448 #define MLX5_UDP_PORT_MPLS 6635
449 
450 /* UDP port numbers for VxLAN. */
451 #define MLX5_UDP_PORT_VXLAN 4789
452 #define MLX5_UDP_PORT_VXLAN_GPE 4790
453 
454 /* UDP port numbers for RoCEv2. */
455 #define MLX5_UDP_PORT_ROCEv2 4791
456 
457 /* UDP port numbers for GENEVE. */
458 #define MLX5_UDP_PORT_GENEVE 6081
459 
460 /* Lowest priority indicator. */
461 #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1)
462 
463 /*
464  * Max priority for ingress\egress flow groups
465  * greater than 0 and for any transfer flow group.
466  * From user configation: 0 - 21843.
467  */
468 #define MLX5_NON_ROOT_FLOW_MAX_PRIO	(21843 + 1)
469 
470 /*
471  * Number of sub priorities.
472  * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
473  * matching on the NIC (firmware dependent) L4 most have the higher priority
474  * followed by L3 and ending with L2.
475  */
476 #define MLX5_PRIORITY_MAP_L2 2
477 #define MLX5_PRIORITY_MAP_L3 1
478 #define MLX5_PRIORITY_MAP_L4 0
479 #define MLX5_PRIORITY_MAP_MAX 3
480 
481 /* Valid layer type for IPV4 RSS. */
482 #define MLX5_IPV4_LAYER_TYPES \
483 	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
484 	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
485 	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
486 
487 /* IBV hash source bits  for IPV4. */
488 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
489 
490 /* Valid layer type for IPV6 RSS. */
491 #define MLX5_IPV6_LAYER_TYPES \
492 	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
493 	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
494 	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
495 
496 /* IBV hash source bits  for IPV6. */
497 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
498 
499 /* IBV hash bits for L3 SRC. */
500 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6)
501 
502 /* IBV hash bits for L3 DST. */
503 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6)
504 
505 /* IBV hash bits for TCP. */
506 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
507 			      IBV_RX_HASH_DST_PORT_TCP)
508 
509 /* IBV hash bits for UDP. */
510 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \
511 			      IBV_RX_HASH_DST_PORT_UDP)
512 
513 /* IBV hash bits for L4 SRC. */
514 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
515 				 IBV_RX_HASH_SRC_PORT_UDP)
516 
517 /* IBV hash bits for L4 DST. */
518 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \
519 				 IBV_RX_HASH_DST_PORT_UDP)
520 
521 /* Geneve header first 16Bit */
522 #define MLX5_GENEVE_VER_MASK 0x3
523 #define MLX5_GENEVE_VER_SHIFT 14
524 #define MLX5_GENEVE_VER_VAL(a) \
525 		(((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
526 #define MLX5_GENEVE_OPTLEN_MASK 0x3F
527 #define MLX5_GENEVE_OPTLEN_SHIFT 8
528 #define MLX5_GENEVE_OPTLEN_VAL(a) \
529 	    (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
530 #define MLX5_GENEVE_OAMF_MASK 0x1
531 #define MLX5_GENEVE_OAMF_SHIFT 7
532 #define MLX5_GENEVE_OAMF_VAL(a) \
533 		(((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
534 #define MLX5_GENEVE_CRITO_MASK 0x1
535 #define MLX5_GENEVE_CRITO_SHIFT 6
536 #define MLX5_GENEVE_CRITO_VAL(a) \
537 		(((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
538 #define MLX5_GENEVE_RSVD_MASK 0x3F
539 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
540 /*
541  * The length of the Geneve options fields, expressed in four byte multiples,
542  * not including the eight byte fixed tunnel.
543  */
544 #define MLX5_GENEVE_OPT_LEN_0 14
545 #define MLX5_GENEVE_OPT_LEN_1 63
546 
547 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \
548 					  sizeof(struct rte_ipv4_hdr))
549 /* GTP extension header flag. */
550 #define MLX5_GTP_EXT_HEADER_FLAG 4
551 
552 /* GTP extension header PDU type shift. */
553 #define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4)
554 
555 /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
556 #define MLX5_IPV4_FRAG_OFFSET_MASK \
557 		(RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)
558 
559 /* Specific item's fields can accept a range of values (using spec and last). */
560 #define MLX5_ITEM_RANGE_NOT_ACCEPTED	false
561 #define MLX5_ITEM_RANGE_ACCEPTED	true
562 
563 /* Software header modify action numbers of a flow. */
564 #define MLX5_ACT_NUM_MDF_IPV4		1
565 #define MLX5_ACT_NUM_MDF_IPV6		4
566 #define MLX5_ACT_NUM_MDF_MAC		2
567 #define MLX5_ACT_NUM_MDF_VID		1
568 #define MLX5_ACT_NUM_MDF_PORT		1
569 #define MLX5_ACT_NUM_MDF_TTL		1
570 #define MLX5_ACT_NUM_DEC_TTL		MLX5_ACT_NUM_MDF_TTL
571 #define MLX5_ACT_NUM_MDF_TCPSEQ		1
572 #define MLX5_ACT_NUM_MDF_TCPACK		1
573 #define MLX5_ACT_NUM_SET_REG		1
574 #define MLX5_ACT_NUM_SET_TAG		1
575 #define MLX5_ACT_NUM_CPY_MREG		MLX5_ACT_NUM_SET_TAG
576 #define MLX5_ACT_NUM_SET_MARK		MLX5_ACT_NUM_SET_TAG
577 #define MLX5_ACT_NUM_SET_META		MLX5_ACT_NUM_SET_TAG
578 #define MLX5_ACT_NUM_SET_DSCP		1
579 
580 /* Maximum number of fields to modify in MODIFY_FIELD */
581 #define MLX5_ACT_MAX_MOD_FIELDS 5
582 
583 /* Syndrome bits definition for connection tracking. */
584 #define MLX5_CT_SYNDROME_VALID		(0x0 << 6)
585 #define MLX5_CT_SYNDROME_INVALID	(0x1 << 6)
586 #define MLX5_CT_SYNDROME_TRAP		(0x2 << 6)
587 #define MLX5_CT_SYNDROME_STATE_CHANGE	(0x1 << 1)
588 #define MLX5_CT_SYNDROME_BAD_PACKET	(0x1 << 0)
589 
590 enum mlx5_flow_drv_type {
591 	MLX5_FLOW_TYPE_MIN,
592 	MLX5_FLOW_TYPE_DV,
593 	MLX5_FLOW_TYPE_VERBS,
594 	MLX5_FLOW_TYPE_HW,
595 	MLX5_FLOW_TYPE_MAX,
596 };
597 
598 /* Fate action type. */
599 enum mlx5_flow_fate_type {
600 	MLX5_FLOW_FATE_NONE, /* Egress flow. */
601 	MLX5_FLOW_FATE_QUEUE,
602 	MLX5_FLOW_FATE_JUMP,
603 	MLX5_FLOW_FATE_PORT_ID,
604 	MLX5_FLOW_FATE_DROP,
605 	MLX5_FLOW_FATE_DEFAULT_MISS,
606 	MLX5_FLOW_FATE_SHARED_RSS,
607 	MLX5_FLOW_FATE_MTR,
608 	MLX5_FLOW_FATE_SEND_TO_KERNEL,
609 	MLX5_FLOW_FATE_MAX,
610 };
611 
612 /* Matcher PRM representation */
613 struct mlx5_flow_dv_match_params {
614 	size_t size;
615 	/**< Size of match value. Do NOT split size and key! */
616 	uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
617 	/**< Matcher value. This value is used as the mask or as a key. */
618 };
619 
620 /* Matcher structure. */
621 struct mlx5_flow_dv_matcher {
622 	struct mlx5_list_entry entry; /**< Pointer to the next element. */
623 	struct mlx5_flow_tbl_resource *tbl;
624 	/**< Pointer to the table(group) the matcher associated with. */
625 	void *matcher_object; /**< Pointer to DV matcher */
626 	uint16_t crc; /**< CRC of key. */
627 	uint16_t priority; /**< Priority of matcher. */
628 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
629 };
630 
631 #define MLX5_PUSH_MAX_LEN 128
632 #define MLX5_ENCAP_MAX_LEN 132
633 
634 /* Encap/decap resource structure. */
635 struct mlx5_flow_dv_encap_decap_resource {
636 	struct mlx5_list_entry entry;
637 	/* Pointer to next element. */
638 	uint32_t refcnt; /**< Reference counter. */
639 	void *action;
640 	/**< Encap/decap action object. */
641 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
642 	size_t size;
643 	uint8_t reformat_type;
644 	uint8_t ft_type;
645 	uint64_t flags; /**< Flags for RDMA API. */
646 	uint32_t idx; /**< Index for the index memory pool. */
647 };
648 
649 /* Tag resource structure. */
650 struct mlx5_flow_dv_tag_resource {
651 	struct mlx5_list_entry entry;
652 	/**< hash list entry for tag resource, tag value as the key. */
653 	void *action;
654 	/**< Tag action object. */
655 	uint32_t refcnt; /**< Reference counter. */
656 	uint32_t idx; /**< Index for the index memory pool. */
657 	uint32_t tag_id; /**< Tag ID. */
658 };
659 
660 /* Modify resource structure */
661 struct mlx5_flow_dv_modify_hdr_resource {
662 	struct mlx5_list_entry entry;
663 	void *action; /**< Modify header action object. */
664 	uint32_t idx;
665 	/* Key area for hash list matching: */
666 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
667 	uint8_t actions_num; /**< Number of modification actions. */
668 	bool root; /**< Whether action is in root table. */
669 	struct mlx5_modification_cmd actions[];
670 	/**< Modification actions. */
671 } __rte_packed;
672 
673 /* Modify resource key of the hash organization. */
674 union mlx5_flow_modify_hdr_key {
675 	struct {
676 		uint32_t ft_type:8;	/**< Flow table type, Rx or Tx. */
677 		uint32_t actions_num:5;	/**< Number of modification actions. */
678 		uint32_t group:19;	/**< Flow group id. */
679 		uint32_t cksum;		/**< Actions check sum. */
680 	};
681 	uint64_t v64;			/**< full 64bits value of key */
682 };
683 
684 /* Jump action resource structure. */
685 struct mlx5_flow_dv_jump_tbl_resource {
686 	void *action; /**< Pointer to the rdma core action. */
687 };
688 
689 /* Port ID resource structure. */
690 struct mlx5_flow_dv_port_id_action_resource {
691 	struct mlx5_list_entry entry;
692 	void *action; /**< Action object. */
693 	uint32_t port_id; /**< Port ID value. */
694 	uint32_t idx; /**< Indexed pool memory index. */
695 };
696 
697 /* Push VLAN action resource structure */
698 struct mlx5_flow_dv_push_vlan_action_resource {
699 	struct mlx5_list_entry entry; /* Cache entry. */
700 	void *action; /**< Action object. */
701 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
702 	rte_be32_t vlan_tag; /**< VLAN tag value. */
703 	uint32_t idx; /**< Indexed pool memory index. */
704 };
705 
706 /* Metadata register copy table entry. */
707 struct mlx5_flow_mreg_copy_resource {
708 	/*
709 	 * Hash list entry for copy table.
710 	 *  - Key is 32/64-bit MARK action ID.
711 	 *  - MUST be the first entry.
712 	 */
713 	struct mlx5_list_entry hlist_ent;
714 	LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
715 	/* List entry for device flows. */
716 	uint32_t idx;
717 	uint32_t rix_flow; /* Built flow for copy. */
718 	uint32_t mark_id;
719 };
720 
721 /* Table tunnel parameter. */
722 struct mlx5_flow_tbl_tunnel_prm {
723 	const struct mlx5_flow_tunnel *tunnel;
724 	uint32_t group_id;
725 	bool external;
726 };
727 
728 /* Table data structure of the hash organization. */
729 struct mlx5_flow_tbl_data_entry {
730 	struct mlx5_list_entry entry;
731 	/**< hash list entry, 64-bits key inside. */
732 	struct mlx5_flow_tbl_resource tbl;
733 	/**< flow table resource. */
734 	struct mlx5_list *matchers;
735 	/**< matchers' header associated with the flow table. */
736 	struct mlx5_flow_dv_jump_tbl_resource jump;
737 	/**< jump resource, at most one for each table created. */
738 	uint32_t idx; /**< index for the indexed mempool. */
739 	/**< tunnel offload */
740 	const struct mlx5_flow_tunnel *tunnel;
741 	uint32_t group_id;
742 	uint32_t external:1;
743 	uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
744 	uint32_t is_egress:1; /**< Egress table. */
745 	uint32_t is_transfer:1; /**< Transfer table. */
746 	uint32_t dummy:1; /**<  DR table. */
747 	uint32_t id:22; /**< Table ID. */
748 	uint32_t reserve:5; /**< Reserved to future using. */
749 	uint32_t level; /**< Table level. */
750 };
751 
752 /* Sub rdma-core actions list. */
753 struct mlx5_flow_sub_actions_list {
754 	uint32_t actions_num; /**< Number of sample actions. */
755 	uint64_t action_flags;
756 	void *dr_queue_action;
757 	void *dr_tag_action;
758 	void *dr_cnt_action;
759 	void *dr_port_id_action;
760 	void *dr_encap_action;
761 	void *dr_jump_action;
762 };
763 
764 /* Sample sub-actions resource list. */
765 struct mlx5_flow_sub_actions_idx {
766 	uint32_t rix_hrxq; /**< Hash Rx queue object index. */
767 	uint32_t rix_tag; /**< Index to the tag action. */
768 	uint32_t rix_port_id_action; /**< Index to port ID action resource. */
769 	uint32_t rix_encap_decap; /**< Index to encap/decap resource. */
770 	uint32_t rix_jump; /**< Index to the jump action resource. */
771 };
772 
773 /* Sample action resource structure. */
774 struct mlx5_flow_dv_sample_resource {
775 	struct mlx5_list_entry entry; /**< Cache entry. */
776 	union {
777 		void *verbs_action; /**< Verbs sample action object. */
778 		void **sub_actions; /**< Sample sub-action array. */
779 	};
780 	struct rte_eth_dev *dev; /**< Device registers the action. */
781 	uint32_t idx; /** Sample object index. */
782 	uint8_t ft_type; /** Flow Table Type */
783 	uint32_t ft_id; /** Flow Table Level */
784 	uint32_t ratio;   /** Sample Ratio */
785 	uint64_t set_action; /** Restore reg_c0 value */
786 	void *normal_path_tbl; /** Flow Table pointer */
787 	struct mlx5_flow_sub_actions_idx sample_idx;
788 	/**< Action index resources. */
789 	struct mlx5_flow_sub_actions_list sample_act;
790 	/**< Action resources. */
791 };
792 
793 #define MLX5_MAX_DEST_NUM	2
794 
795 /* Destination array action resource structure. */
796 struct mlx5_flow_dv_dest_array_resource {
797 	struct mlx5_list_entry entry; /**< Cache entry. */
798 	uint32_t idx; /** Destination array action object index. */
799 	uint8_t ft_type; /** Flow Table Type */
800 	uint8_t num_of_dest; /**< Number of destination actions. */
801 	struct rte_eth_dev *dev; /**< Device registers the action. */
802 	void *action; /**< Pointer to the rdma core action. */
803 	struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
804 	/**< Action index resources. */
805 	struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM];
806 	/**< Action resources. */
807 };
808 
809 /* PMD flow priority for tunnel */
810 #define MLX5_TUNNEL_PRIO_GET(rss_desc) \
811 	((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
812 
813 
814 /** Device flow handle structure for DV mode only. */
815 struct mlx5_flow_handle_dv {
816 	/* Flow DV api: */
817 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
818 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
819 	/**< Pointer to modify header resource in cache. */
820 	uint32_t rix_encap_decap;
821 	/**< Index to encap/decap resource in cache. */
822 	uint32_t rix_push_vlan;
823 	/**< Index to push VLAN action resource in cache. */
824 	uint32_t rix_tag;
825 	/**< Index to the tag action. */
826 	uint32_t rix_sample;
827 	/**< Index to sample action resource in cache. */
828 	uint32_t rix_dest_array;
829 	/**< Index to destination array resource in cache. */
830 } __rte_packed;
831 
832 /** Device flow handle structure: used both for creating & destroying. */
833 struct mlx5_flow_handle {
834 	SILIST_ENTRY(uint32_t)next;
835 	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
836 	/**< Index to next device flow handle. */
837 	uint64_t layers;
838 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
839 	void *drv_flow; /**< pointer to driver flow object. */
840 	uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
841 	uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
842 	uint32_t fate_action:4; /**< Fate action type. */
843 	union {
844 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
845 		uint32_t rix_jump; /**< Index to the jump action resource. */
846 		uint32_t rix_port_id_action;
847 		/**< Index to port ID action resource. */
848 		uint32_t rix_fate;
849 		/**< Generic value indicates the fate action. */
850 		uint32_t rix_default_fate;
851 		/**< Indicates default miss fate action. */
852 		uint32_t rix_srss;
853 		/**< Indicates shared RSS fate action. */
854 	};
855 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
856 	struct mlx5_flow_handle_dv dvh;
857 #endif
858 	uint8_t flex_item; /**< referenced Flex Item bitmask. */
859 } __rte_packed;
860 
861 /*
862  * Size for Verbs device flow handle structure only. Do not use the DV only
863  * structure in Verbs. No DV flows attributes will be accessed.
864  * Macro offsetof() could also be used here.
865  */
866 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
867 #define MLX5_FLOW_HANDLE_VERBS_SIZE \
868 	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
869 #else
870 #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
871 #endif
872 
873 /** Device flow structure only for DV flow creation. */
874 struct mlx5_flow_dv_workspace {
875 	uint32_t group; /**< The group index. */
876 	uint32_t table_id; /**< Flow table identifier. */
877 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
878 	int actions_n; /**< number of actions. */
879 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
880 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
881 	/**< Pointer to encap/decap resource in cache. */
882 	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
883 	/**< Pointer to push VLAN action resource in cache. */
884 	struct mlx5_flow_dv_tag_resource *tag_resource;
885 	/**< pointer to the tag action. */
886 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
887 	/**< Pointer to port ID action resource. */
888 	struct mlx5_flow_dv_jump_tbl_resource *jump;
889 	/**< Pointer to the jump action resource. */
890 	struct mlx5_flow_dv_match_params value;
891 	/**< Holds the value that the packet is compared to. */
892 	struct mlx5_flow_dv_sample_resource *sample_res;
893 	/**< Pointer to the sample action resource. */
894 	struct mlx5_flow_dv_dest_array_resource *dest_array_res;
895 	/**< Pointer to the destination array resource. */
896 };
897 
898 #ifdef HAVE_INFINIBAND_VERBS_H
899 /*
900  * Maximal Verbs flow specifications & actions size.
901  * Some elements are mutually exclusive, but enough space should be allocated.
902  * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
903  *               2. One tunnel header (exception: GRE + MPLS),
904  *                  SPEC length: GRE == tunnel.
905  * Actions: 1. 1 Mark OR Flag.
906  *          2. 1 Drop (if any).
907  *          3. No limitation for counters, but it makes no sense to support too
908  *             many counters in a single device flow.
909  */
910 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
911 #define MLX5_VERBS_MAX_SPEC_SIZE \
912 		( \
913 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
914 			      sizeof(struct ibv_flow_spec_ipv6) + \
915 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
916 			sizeof(struct ibv_flow_spec_gre) + \
917 			sizeof(struct ibv_flow_spec_mpls)) \
918 		)
919 #else
920 #define MLX5_VERBS_MAX_SPEC_SIZE \
921 		( \
922 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
923 			      sizeof(struct ibv_flow_spec_ipv6) + \
924 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
925 			sizeof(struct ibv_flow_spec_tunnel)) \
926 		)
927 #endif
928 
929 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
930 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
931 #define MLX5_VERBS_MAX_ACT_SIZE \
932 		( \
933 			sizeof(struct ibv_flow_spec_action_tag) + \
934 			sizeof(struct ibv_flow_spec_action_drop) + \
935 			sizeof(struct ibv_flow_spec_counter_action) * 4 \
936 		)
937 #else
938 #define MLX5_VERBS_MAX_ACT_SIZE \
939 		( \
940 			sizeof(struct ibv_flow_spec_action_tag) + \
941 			sizeof(struct ibv_flow_spec_action_drop) \
942 		)
943 #endif
944 
945 #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
946 		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
947 
948 /** Device flow structure only for Verbs flow creation. */
949 struct mlx5_flow_verbs_workspace {
950 	unsigned int size; /**< Size of the attribute. */
951 	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
952 	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
953 	/**< Specifications & actions buffer of verbs flow. */
954 };
955 #endif /* HAVE_INFINIBAND_VERBS_H */
956 
957 #define MLX5_SCALE_FLOW_GROUP_BIT 0
958 #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1
959 
960 /** Maximal number of device sub-flows supported. */
961 #define MLX5_NUM_MAX_DEV_FLOWS 32
962 
963 /**
964  * tunnel offload rules type
965  */
966 enum mlx5_tof_rule_type {
967 	MLX5_TUNNEL_OFFLOAD_NONE = 0,
968 	MLX5_TUNNEL_OFFLOAD_SET_RULE,
969 	MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
970 	MLX5_TUNNEL_OFFLOAD_MISS_RULE,
971 };
972 
973 /** Device flow structure. */
974 __extension__
975 struct mlx5_flow {
976 	struct rte_flow *flow; /**< Pointer to the main flow. */
977 	uint32_t flow_idx; /**< The memory pool index to the main flow. */
978 	uint64_t hash_fields; /**< Hash Rx queue hash fields. */
979 	uint64_t act_flags;
980 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
981 	bool external; /**< true if the flow is created external to PMD. */
982 	uint8_t ingress:1; /**< 1 if the flow is ingress. */
983 	uint8_t skip_scale:2;
984 	uint8_t symmetric_hash_function:1;
985 	/**
986 	 * Each Bit be set to 1 if Skip the scale the flow group with factor.
987 	 * If bit0 be set to 1, then skip the scale the original flow group;
988 	 * If bit1 be set to 1, then skip the scale the jump flow group if
989 	 * having jump action.
990 	 * 00: Enable scale in a flow, default value.
991 	 * 01: Skip scale the flow group with factor, enable scale the group
992 	 * of jump action.
993 	 * 10: Enable scale the group with factor, skip scale the group of
994 	 * jump action.
995 	 * 11: Skip scale the table with factor both for flow group and jump
996 	 * group.
997 	 */
998 	union {
999 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1000 		struct mlx5_flow_dv_workspace dv;
1001 #endif
1002 #ifdef HAVE_INFINIBAND_VERBS_H
1003 		struct mlx5_flow_verbs_workspace verbs;
1004 #endif
1005 	};
1006 	struct mlx5_flow_handle *handle;
1007 	uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
1008 	const struct mlx5_flow_tunnel *tunnel;
1009 	enum mlx5_tof_rule_type tof_type;
1010 };
1011 
1012 /* Flow meter state. */
1013 #define MLX5_FLOW_METER_DISABLE 0
1014 #define MLX5_FLOW_METER_ENABLE 1
1015 
1016 #define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
1017 #define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
1018 
1019 #define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES
1020 
1021 #define MLX5_MAN_WIDTH 8
1022 /* Legacy Meter parameter structure. */
1023 struct mlx5_legacy_flow_meter {
1024 	struct mlx5_flow_meter_info fm;
1025 	/* Must be the first in struct. */
1026 	TAILQ_ENTRY(mlx5_legacy_flow_meter) next;
1027 	/**< Pointer to the next flow meter structure. */
1028 	uint32_t idx;
1029 	/* Index to meter object. */
1030 };
1031 
1032 #define MLX5_MAX_TUNNELS 256
1033 #define MLX5_TNL_MISS_RULE_PRIORITY 3
1034 #define MLX5_TNL_MISS_FDB_JUMP_GRP  0x1234faac
1035 
1036 /*
1037  * When tunnel offload is active, all JUMP group ids are converted
1038  * using the same method. That conversion is applied both to tunnel and
1039  * regular rule types.
1040  * Group ids used in tunnel rules are relative to it's tunnel (!).
1041  * Application can create number of steer rules, using the same
1042  * tunnel, with different group id in each rule.
1043  * Each tunnel stores its groups internally in PMD tunnel object.
1044  * Groups used in regular rules do not belong to any tunnel and are stored
1045  * in tunnel hub.
1046  */
1047 
1048 struct mlx5_flow_tunnel {
1049 	LIST_ENTRY(mlx5_flow_tunnel) chain;
1050 	struct rte_flow_tunnel app_tunnel;	/** app tunnel copy */
1051 	uint32_t tunnel_id;			/** unique tunnel ID */
1052 	RTE_ATOMIC(uint32_t) refctn;
1053 	struct rte_flow_action action;
1054 	struct rte_flow_item item;
1055 	struct mlx5_hlist *groups;		/** tunnel groups */
1056 };
1057 
1058 /** PMD tunnel related context */
1059 struct mlx5_flow_tunnel_hub {
1060 	/* Tunnels list
1061 	 * Access to the list MUST be MT protected
1062 	 */
1063 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
1064 	 /* protect access to the tunnels list */
1065 	rte_spinlock_t sl;
1066 	struct mlx5_hlist *groups;		/** non tunnel groups */
1067 };
1068 
1069 /* convert jump group to flow table ID in tunnel rules */
1070 struct tunnel_tbl_entry {
1071 	struct mlx5_list_entry hash;
1072 	uint32_t flow_table;
1073 	uint32_t tunnel_id;
1074 	uint32_t group;
1075 };
1076 
1077 static inline uint32_t
1078 tunnel_id_to_flow_tbl(uint32_t id)
1079 {
1080 	return id | (1u << 16);
1081 }
1082 
1083 static inline uint32_t
1084 tunnel_flow_tbl_to_id(uint32_t flow_tbl)
1085 {
1086 	return flow_tbl & ~(1u << 16);
1087 }
1088 
1089 union tunnel_tbl_key {
1090 	uint64_t val;
1091 	struct {
1092 		uint32_t tunnel_id;
1093 		uint32_t group;
1094 	};
1095 };
1096 
1097 static inline struct mlx5_flow_tunnel_hub *
1098 mlx5_tunnel_hub(struct rte_eth_dev *dev)
1099 {
1100 	struct mlx5_priv *priv = dev->data->dev_private;
1101 	return priv->sh->tunnel_hub;
1102 }
1103 
1104 static inline bool
1105 is_tunnel_offload_active(const struct rte_eth_dev *dev)
1106 {
1107 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1108 	const struct mlx5_priv *priv = dev->data->dev_private;
1109 	return !!priv->sh->config.dv_miss_info;
1110 #else
1111 	RTE_SET_USED(dev);
1112 	return false;
1113 #endif
1114 }
1115 
1116 static inline bool
1117 is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
1118 {
1119 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
1120 }
1121 
1122 static inline bool
1123 is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
1124 {
1125 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
1126 }
1127 
1128 static inline const struct mlx5_flow_tunnel *
1129 flow_actions_to_tunnel(const struct rte_flow_action actions[])
1130 {
1131 	return actions[0].conf;
1132 }
1133 
1134 static inline const struct mlx5_flow_tunnel *
1135 flow_items_to_tunnel(const struct rte_flow_item items[])
1136 {
1137 	return items[0].spec;
1138 }
1139 
1140 /**
1141  * Gets the tag array given for RTE_FLOW_FIELD_TAG type.
1142  *
1143  * In old API the value was provided in "level" field, but in new API
1144  * it is provided in "tag_array" field. Since encapsulation level is not
1145  * relevant for metadata, the tag array can be still provided in "level"
1146  * for backwards compatibility.
1147  *
1148  * @param[in] data
1149  *   Pointer to tag modify data structure.
1150  *
1151  * @return
1152  *   Tag array index.
1153  */
1154 static inline uint8_t
1155 flow_tag_index_get(const struct rte_flow_field_data *data)
1156 {
1157 	return data->tag_index ? data->tag_index : data->level;
1158 }
1159 
1160 /**
1161  * Fetch 1, 2, 3 or 4 byte field from the byte array
1162  * and return as unsigned integer in host-endian format.
1163  *
1164  * @param[in] data
1165  *   Pointer to data array.
1166  * @param[in] size
1167  *   Size of field to extract.
1168  *
1169  * @return
1170  *   converted field in host endian format.
1171  */
1172 static inline uint32_t
1173 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
1174 {
1175 	uint32_t ret;
1176 
1177 	switch (size) {
1178 	case 1:
1179 		ret = *data;
1180 		break;
1181 	case 2:
1182 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1183 		break;
1184 	case 3:
1185 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1186 		ret = (ret << 8) | *(data + sizeof(uint16_t));
1187 		break;
1188 	case 4:
1189 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
1190 		break;
1191 	default:
1192 		MLX5_ASSERT(false);
1193 		ret = 0;
1194 		break;
1195 	}
1196 	return ret;
1197 }
1198 
1199 static inline bool
1200 flow_modify_field_support_tag_array(enum rte_flow_field_id field)
1201 {
1202 	switch ((int)field) {
1203 	case RTE_FLOW_FIELD_TAG:
1204 	case RTE_FLOW_FIELD_MPLS:
1205 	case MLX5_RTE_FLOW_FIELD_META_REG:
1206 		return true;
1207 	default:
1208 		break;
1209 	}
1210 	return false;
1211 }
1212 
1213 struct field_modify_info {
1214 	uint32_t size; /* Size of field in protocol header, in bytes. */
1215 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
1216 	enum mlx5_modification_field id;
1217 	uint32_t shift;
1218 	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
1219 };
1220 
1221 /* HW steering flow attributes. */
1222 struct mlx5_flow_attr {
1223 	uint32_t port_id; /* Port index. */
1224 	uint32_t group; /* Flow group. */
1225 	uint32_t priority; /* Original Priority. */
1226 	/* rss level, used by priority adjustment. */
1227 	uint32_t rss_level;
1228 	/* Action flags, used by priority adjustment. */
1229 	uint32_t act_flags;
1230 	uint32_t tbl_type; /* Flow table type. */
1231 };
1232 
1233 /* Flow structure. */
1234 struct rte_flow {
1235 	uint32_t dev_handles;
1236 	/**< Device flow handles that are part of the flow. */
1237 	uint32_t type:2;
1238 	uint32_t drv_type:2; /**< Driver type. */
1239 	uint32_t tunnel:1;
1240 	uint32_t meter:24; /**< Holds flow meter id. */
1241 	uint32_t indirect_type:2; /**< Indirect action type. */
1242 	uint32_t matcher_selector:1; /**< Matcher index in resizable table. */
1243 	uint32_t rix_mreg_copy;
1244 	/**< Index to metadata register copy table resource. */
1245 	uint32_t counter; /**< Holds flow counter. */
1246 	uint32_t tunnel_id;  /**< Tunnel id */
1247 	union {
1248 		uint32_t age; /**< Holds ASO age bit index. */
1249 		uint32_t ct; /**< Holds ASO CT index. */
1250 	};
1251 	uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
1252 } __rte_packed;
1253 
1254 /*
1255  * HWS COUNTER ID's layout
1256  *       3                   2                   1                   0
1257  *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1258  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1259  *    |  T  |     | D |                                               |
1260  *    ~  Y  |     | C |                    IDX                        ~
1261  *    |  P  |     | S |                                               |
1262  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1263  *
1264  *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
1265  *    Bit 25:24 = DCS index
1266  *    Bit 23:00 = IDX in this counter belonged DCS bulk.
1267  */
1268 typedef uint32_t cnt_id_t;
1269 
1270 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1271 
1272 enum {
1273 	MLX5_FLOW_HW_FLOW_OP_TYPE_NONE,
1274 	MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE,
1275 	MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY,
1276 	MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE,
1277 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE,
1278 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY,
1279 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE,
1280 };
1281 
1282 enum {
1283 	MLX5_FLOW_HW_FLOW_FLAG_CNT_ID = RTE_BIT32(0),
1284 	MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP = RTE_BIT32(1),
1285 	MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ = RTE_BIT32(2),
1286 	MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX = RTE_BIT32(3),
1287 	MLX5_FLOW_HW_FLOW_FLAG_MTR_ID = RTE_BIT32(4),
1288 	MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR = RTE_BIT32(5),
1289 	MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW = RTE_BIT32(6),
1290 };
1291 
1292 #define MLX5_FLOW_HW_FLOW_FLAGS_ALL ( \
1293 		MLX5_FLOW_HW_FLOW_FLAG_CNT_ID | \
1294 		MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP | \
1295 		MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ | \
1296 		MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX | \
1297 		MLX5_FLOW_HW_FLOW_FLAG_MTR_ID | \
1298 		MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR | \
1299 		MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW \
1300 	)
1301 
1302 #ifdef PEDANTIC
1303 #pragma GCC diagnostic ignored "-Wpedantic"
1304 #endif
1305 
1306 /** HWS flow struct. */
1307 struct rte_flow_hw {
1308 	/** The table flow allcated from. */
1309 	struct rte_flow_template_table *table;
1310 	/** Application's private data passed to enqueued flow operation. */
1311 	void *user_data;
1312 	/** Flow index from indexed pool. */
1313 	uint32_t idx;
1314 	/** Resource index from indexed pool. */
1315 	uint32_t res_idx;
1316 	/** HWS flow rule index passed to mlx5dr. */
1317 	uint32_t rule_idx;
1318 	/** Which flow fields (inline or in auxiliary struct) are used. */
1319 	uint32_t flags;
1320 	/** Ongoing flow operation type. */
1321 	uint8_t operation_type;
1322 	/** Index of pattern template this flow is based on. */
1323 	uint8_t mt_idx;
1324 
1325 	/** COUNT action index. */
1326 	cnt_id_t cnt_id;
1327 	union {
1328 		/** Jump action. */
1329 		struct mlx5_hw_jump_action *jump;
1330 		/** TIR action. */
1331 		struct mlx5_hrxq *hrxq;
1332 	};
1333 
1334 	/**
1335 	 * Padding for alignment to 56 bytes.
1336 	 * Since mlx5dr rule is 72 bytes, whole flow is contained within 128 B (2 cache lines).
1337 	 * This space is reserved for future additions to flow struct.
1338 	 */
1339 	uint8_t padding[10];
1340 	/** HWS layer data struct. */
1341 	uint8_t rule[];
1342 } __rte_packed;
1343 
1344 /** Auxiliary data fields that are updatable. */
1345 struct rte_flow_hw_aux_fields {
1346 	/** AGE action index. */
1347 	uint32_t age_idx;
1348 	/** Direct meter (METER or METER_MARK) action index. */
1349 	uint32_t mtr_id;
1350 };
1351 
1352 /** Auxiliary data stored per flow which is not required to be stored in main flow structure. */
1353 struct rte_flow_hw_aux {
1354 	/** Auxiliary fields associated with the original flow. */
1355 	struct rte_flow_hw_aux_fields orig;
1356 	/** Auxiliary fields associated with the updated flow. */
1357 	struct rte_flow_hw_aux_fields upd;
1358 	/** Index of resizable matcher associated with this flow. */
1359 	uint8_t matcher_selector;
1360 	/** Placeholder flow struct used during flow rule update operation. */
1361 	struct rte_flow_hw upd_flow;
1362 };
1363 
1364 #ifdef PEDANTIC
1365 #pragma GCC diagnostic error "-Wpedantic"
1366 #endif
1367 
1368 struct mlx5_action_construct_data;
1369 typedef int
1370 (*indirect_list_callback_t)(struct rte_eth_dev *,
1371 			    const struct mlx5_action_construct_data *,
1372 			    const struct rte_flow_action *,
1373 			    struct mlx5dr_rule_action *);
1374 
1375 #define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
1376 
1377 /** Container for flow action data constructed during flow rule creation. */
1378 struct mlx5_flow_hw_action_params {
1379 	/** Array of constructed modify header commands. */
1380 	struct mlx5_modification_cmd mhdr_cmd[MLX5_MHDR_MAX_CMD];
1381 	/** Constructed encap/decap data buffer. */
1382 	uint8_t encap_data[MLX5_ENCAP_MAX_LEN];
1383 	/** Constructed IPv6 routing data buffer. */
1384 	uint8_t ipv6_push_data[MLX5_PUSH_MAX_LEN];
1385 };
1386 
1387 /** Container for dynamically generated flow items used during flow rule creation. */
1388 struct mlx5_flow_hw_pattern_params {
1389 	/** Array of dynamically generated flow items. */
1390 	struct rte_flow_item items[MLX5_HW_MAX_ITEMS];
1391 	/** Temporary REPRESENTED_PORT item generated by PMD. */
1392 	struct rte_flow_item_ethdev port_spec;
1393 	/** Temporary TAG item generated by PMD. */
1394 	struct rte_flow_item_tag tag_spec;
1395 };
1396 
1397 /* rte flow action translate to DR action struct. */
1398 struct mlx5_action_construct_data {
1399 	LIST_ENTRY(mlx5_action_construct_data) next;
1400 	/* Ensure the action types are matched. */
1401 	int type;
1402 	uint32_t idx;  /* Data index. */
1403 	uint16_t action_src; /* rte_flow_action src offset. */
1404 	uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
1405 	indirect_list_callback_t indirect_list_cb;
1406 	union {
1407 		struct {
1408 			/* encap data len. */
1409 			uint16_t len;
1410 		} encap;
1411 		struct {
1412 			/* Modify header action offset in pattern. */
1413 			uint16_t mhdr_cmds_off;
1414 			/* Offset in pattern after modify header actions. */
1415 			uint16_t mhdr_cmds_end;
1416 			/*
1417 			 * True if this action is masked and does not need to
1418 			 * be generated.
1419 			 */
1420 			bool shared;
1421 			/*
1422 			 * Modified field definitions in dst field (SET, ADD)
1423 			 * or src field (COPY).
1424 			 */
1425 			struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS];
1426 			/* Modified field definitions in dst field (COPY). */
1427 			struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS];
1428 			/*
1429 			 * Masks applied to field values to generate
1430 			 * PRM actions.
1431 			 */
1432 			uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS];
1433 		} modify_header;
1434 		struct {
1435 			bool symmetric_hash_function; /* Symmetric RSS hash */
1436 			uint64_t types; /* RSS hash types. */
1437 			uint32_t level; /* RSS level. */
1438 			uint32_t idx; /* Shared action index. */
1439 		} shared_rss;
1440 		struct {
1441 			cnt_id_t id;
1442 		} shared_counter;
1443 		struct {
1444 			/* IPv6 extension push data len. */
1445 			uint16_t len;
1446 		} ipv6_ext;
1447 		struct {
1448 			uint32_t id;
1449 			uint32_t conf_masked:1;
1450 		} shared_meter;
1451 	};
1452 };
1453 
1454 #define MAX_GENEVE_OPTIONS_RESOURCES 7
1455 
1456 /* GENEVE TLV options manager structure. */
1457 struct mlx5_geneve_tlv_options_mng {
1458 	uint8_t nb_options; /* Number of options inside the template. */
1459 	struct {
1460 		uint8_t opt_type;
1461 		uint16_t opt_class;
1462 	} options[MAX_GENEVE_OPTIONS_RESOURCES];
1463 };
1464 
1465 /* Flow item template struct. */
1466 struct rte_flow_pattern_template {
1467 	LIST_ENTRY(rte_flow_pattern_template) next;
1468 	/* Template attributes. */
1469 	struct rte_flow_pattern_template_attr attr;
1470 	struct mlx5dr_match_template *mt; /* mlx5 match template. */
1471 	uint64_t item_flags; /* Item layer flags. */
1472 	uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
1473 	RTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */
1474 	/*
1475 	 * If true, then rule pattern should be prepended with
1476 	 * represented_port pattern item.
1477 	 */
1478 	bool implicit_port;
1479 	/*
1480 	 * If true, then rule pattern should be prepended with
1481 	 * tag pattern item for representor matching.
1482 	 */
1483 	bool implicit_tag;
1484 	/* Manages all GENEVE TLV options used by this pattern template. */
1485 	struct mlx5_geneve_tlv_options_mng geneve_opt_mng;
1486 	uint8_t flex_item; /* flex item index. */
1487 };
1488 
1489 /* Flow action template struct. */
1490 struct rte_flow_actions_template {
1491 	LIST_ENTRY(rte_flow_actions_template) next;
1492 	/* Template attributes. */
1493 	struct rte_flow_actions_template_attr attr;
1494 	struct rte_flow_action *actions; /* Cached flow actions. */
1495 	struct rte_flow_action *masks; /* Cached action masks.*/
1496 	struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */
1497 	uint64_t action_flags; /* Bit-map of all valid action in template. */
1498 	uint16_t dr_actions_num; /* Amount of DR rules actions. */
1499 	uint16_t actions_num; /* Amount of flow actions */
1500 	uint16_t *dr_off; /* DR action offset for given rte action offset. */
1501 	uint16_t *src_off; /* RTE action displacement from app. template */
1502 	uint16_t reformat_off; /* Offset of DR reformat action. */
1503 	uint16_t mhdr_off; /* Offset of DR modify header action. */
1504 	uint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */
1505 	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
1506 	uint8_t flex_item; /* flex item index. */
1507 };
1508 
1509 /* Jump action struct. */
1510 struct mlx5_hw_jump_action {
1511 	/* Action jump from root. */
1512 	struct mlx5dr_action *root_action;
1513 	/* HW steering jump action. */
1514 	struct mlx5dr_action *hws_action;
1515 };
1516 
1517 /* Encap decap action struct. */
1518 struct mlx5_hw_encap_decap_action {
1519 	struct mlx5_indirect_list indirect;
1520 	enum mlx5dr_action_type action_type;
1521 	struct mlx5dr_action *action; /* Action object. */
1522 	/* Is header_reformat action shared across flows in table. */
1523 	uint32_t shared:1;
1524 	uint32_t multi_pattern:1;
1525 	size_t data_size; /* Action metadata size. */
1526 	uint8_t data[]; /* Action data. */
1527 };
1528 
1529 /* Push remove action struct. */
1530 struct mlx5_hw_push_remove_action {
1531 	struct mlx5dr_action *action; /* Action object. */
1532 	/* Is push_remove action shared across flows in table. */
1533 	uint8_t shared;
1534 	size_t data_size; /* Action metadata size. */
1535 	uint8_t data[]; /* Action data. */
1536 };
1537 
1538 /* Modify field action struct. */
1539 struct mlx5_hw_modify_header_action {
1540 	/* Reference to DR action */
1541 	struct mlx5dr_action *action;
1542 	/* Modify header action position in action rule table. */
1543 	uint16_t pos;
1544 	/* Is MODIFY_HEADER action shared across flows in table. */
1545 	uint32_t shared:1;
1546 	uint32_t multi_pattern:1;
1547 	/* Amount of modification commands stored in the precompiled buffer. */
1548 	uint32_t mhdr_cmds_num;
1549 	/* Precompiled modification commands. */
1550 	struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD];
1551 };
1552 
1553 /* The maximum actions support in the flow. */
1554 #define MLX5_HW_MAX_ACTS 16
1555 
1556 /* DR action set struct. */
1557 struct mlx5_hw_actions {
1558 	/* Dynamic action list. */
1559 	LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
1560 	struct mlx5_hw_jump_action *jump; /* Jump action. */
1561 	struct mlx5_hrxq *tir; /* TIR action. */
1562 	struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */
1563 	/* Encap/Decap action. */
1564 	struct mlx5_hw_encap_decap_action *encap_decap;
1565 	uint16_t encap_decap_pos; /* Encap/Decap action position. */
1566 	/* Push/remove action. */
1567 	struct mlx5_hw_push_remove_action *push_remove;
1568 	uint16_t push_remove_pos; /* Push/remove action position. */
1569 	uint32_t mark:1; /* Indicate the mark action. */
1570 	cnt_id_t cnt_id; /* Counter id. */
1571 	uint32_t mtr_id; /* Meter id. */
1572 	/* Translated DR action array from action template. */
1573 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
1574 };
1575 
1576 /* mlx5 action template struct. */
1577 struct mlx5_hw_action_template {
1578 	/* Action template pointer. */
1579 	struct rte_flow_actions_template *action_template;
1580 	struct mlx5_hw_actions acts; /* Template actions. */
1581 };
1582 
1583 /* mlx5 flow group struct. */
1584 struct mlx5_flow_group {
1585 	struct mlx5_list_entry entry;
1586 	LIST_ENTRY(mlx5_flow_group) next;
1587 	struct rte_eth_dev *dev; /* Reference to corresponding device. */
1588 	struct mlx5dr_table *tbl; /* HWS table object. */
1589 	struct mlx5_hw_jump_action jump; /* Jump action. */
1590 	struct mlx5_flow_group *miss_group; /* Group pointed to by miss action. */
1591 	enum mlx5dr_table_type type; /* Table type. */
1592 	uint32_t group_id; /* Group id. */
1593 	uint32_t idx; /* Group memory index. */
1594 };
1595 
1596 
1597 #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 2
1598 #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
1599 
1600 #define MLX5_MULTIPATTERN_ENCAP_NUM 5
1601 #define MLX5_MAX_TABLE_RESIZE_NUM 64
1602 
1603 struct mlx5_multi_pattern_segment {
1604 	/*
1605 	 * Modify Header Argument Objects number allocated for action in that
1606 	 * segment.
1607 	 * Capacity is always power of 2.
1608 	 */
1609 	uint32_t capacity;
1610 	uint32_t head_index;
1611 	struct mlx5dr_action *mhdr_action;
1612 	struct mlx5dr_action *reformat_action[MLX5_MULTIPATTERN_ENCAP_NUM];
1613 };
1614 
1615 struct mlx5_tbl_multi_pattern_ctx {
1616 	struct {
1617 		uint32_t elements_num;
1618 		struct mlx5dr_action_reformat_header reformat_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1619 		/**
1620 		 * insert_header structure is larger than reformat_header.
1621 		 * Enclosing these structures with union will case a gap between
1622 		 * reformat_hdr array elements.
1623 		 * mlx5dr_action_create_reformat() expects adjacent array elements.
1624 		 */
1625 		struct mlx5dr_action_insert_header insert_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1626 	} reformat[MLX5_MULTIPATTERN_ENCAP_NUM];
1627 
1628 	struct {
1629 		uint32_t elements_num;
1630 		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1631 	} mh;
1632 	struct mlx5_multi_pattern_segment segments[MLX5_MAX_TABLE_RESIZE_NUM];
1633 };
1634 
1635 static __rte_always_inline void
1636 mlx5_multi_pattern_activate(struct mlx5_tbl_multi_pattern_ctx *mpctx)
1637 {
1638 	mpctx->segments[0].head_index = 1;
1639 }
1640 
1641 static __rte_always_inline bool
1642 mlx5_is_multi_pattern_active(const struct mlx5_tbl_multi_pattern_ctx *mpctx)
1643 {
1644 	return mpctx->segments[0].head_index == 1;
1645 }
1646 
1647 struct mlx5_flow_template_table_cfg {
1648 	struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */
1649 	bool external; /* True if created by flow API, false if table is internal to PMD. */
1650 };
1651 
1652 struct mlx5_matcher_info {
1653 	struct mlx5dr_matcher *matcher; /* Template matcher. */
1654 	RTE_ATOMIC(uint32_t) refcnt;
1655 };
1656 
1657 struct __rte_cache_aligned mlx5_dr_rule_action_container {
1658 	struct mlx5dr_rule_action acts[MLX5_HW_MAX_ACTS];
1659 };
1660 
1661 struct rte_flow_template_table {
1662 	LIST_ENTRY(rte_flow_template_table) next;
1663 	struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
1664 	struct mlx5_matcher_info matcher_info[2];
1665 	uint32_t matcher_selector;
1666 	rte_rwlock_t matcher_replace_rwlk; /* RW lock for resizable tables */
1667 	/* Item templates bind to the table. */
1668 	struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
1669 	/* Action templates bind to the table. */
1670 	struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1671 	struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
1672 	struct rte_flow_hw_aux *flow_aux; /**< Auxiliary data stored per flow. */
1673 	struct mlx5_indexed_pool *resource; /* The table's resource ipool. */
1674 	struct mlx5_flow_template_table_cfg cfg;
1675 	uint32_t type; /* Flow table type RX/TX/FDB. */
1676 	uint8_t nb_item_templates; /* Item template number. */
1677 	uint8_t nb_action_templates; /* Action template number. */
1678 	uint32_t refcnt; /* Table reference counter. */
1679 	struct mlx5_tbl_multi_pattern_ctx mpctx;
1680 	struct mlx5dr_matcher_attr matcher_attr;
1681 	/**
1682 	 * Variable length array of containers containing precalculated templates of DR actions
1683 	 * arrays. This array is allocated at template table creation time and contains
1684 	 * one container per each queue, per each actions template.
1685 	 * Essentially rule_acts is a 2-dimensional array indexed with (AT index, queue) pair.
1686 	 * Each container will provide a local "queue buffer" to work on for flow creation
1687 	 * operations when using a given actions template.
1688 	 */
1689 	struct mlx5_dr_rule_action_container rule_acts[];
1690 };
1691 
1692 static __rte_always_inline struct mlx5dr_matcher *
1693 mlx5_table_matcher(const struct rte_flow_template_table *table)
1694 {
1695 	return table->matcher_info[table->matcher_selector].matcher;
1696 }
1697 
1698 static __rte_always_inline struct mlx5_multi_pattern_segment *
1699 mlx5_multi_pattern_segment_find(struct rte_flow_template_table *table,
1700 				uint32_t flow_resource_ix)
1701 {
1702 	int i;
1703 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
1704 
1705 	if (likely(!rte_flow_template_table_resizable(0, &table->cfg.attr)))
1706 		return &mpctx->segments[0];
1707 	for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
1708 		uint32_t limit = mpctx->segments[i].head_index +
1709 				 mpctx->segments[i].capacity;
1710 
1711 		if (flow_resource_ix < limit)
1712 			return &mpctx->segments[i];
1713 	}
1714 	return NULL;
1715 }
1716 
1717 /*
1718  * Convert metadata or tag to the actual register.
1719  * META: Fixed C_1 for FDB mode, REG_A for NIC TX and REG_B for NIC RX.
1720  * TAG: C_x expect meter color reg and the reserved ones.
1721  */
1722 static __rte_always_inline int
1723 flow_hw_get_reg_id_by_domain(struct rte_eth_dev *dev,
1724 			     enum rte_flow_item_type type,
1725 			     enum mlx5dr_table_type domain_type, uint32_t id)
1726 {
1727 	struct mlx5_dev_ctx_shared *sh = MLX5_SH(dev);
1728 	struct mlx5_dev_registers *reg = &sh->registers;
1729 
1730 	switch (type) {
1731 	case RTE_FLOW_ITEM_TYPE_META:
1732 		if (sh->config.dv_esw_en &&
1733 		    sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
1734 			return REG_C_1;
1735 		}
1736 		/*
1737 		 * On root table - PMD allows only egress META matching, thus
1738 		 * REG_A matching is sufficient.
1739 		 *
1740 		 * On non-root tables - REG_A corresponds to general_purpose_lookup_field,
1741 		 * which translates to REG_A in NIC TX and to REG_B in NIC RX.
1742 		 * However, current FW does not implement REG_B case right now, so
1743 		 * REG_B case is return explicitly by this function for NIC RX.
1744 		 */
1745 		if (domain_type == MLX5DR_TABLE_TYPE_NIC_RX)
1746 			return REG_B;
1747 		return REG_A;
1748 	case RTE_FLOW_ITEM_TYPE_CONNTRACK:
1749 	case RTE_FLOW_ITEM_TYPE_METER_COLOR:
1750 		return reg->aso_reg;
1751 	case RTE_FLOW_ITEM_TYPE_TAG:
1752 		if (id == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1753 			return REG_C_3;
1754 		MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);
1755 		return reg->hw_avl_tags[id];
1756 	default:
1757 		return REG_NON;
1758 	}
1759 }
1760 
1761 static __rte_always_inline int
1762 flow_hw_get_reg_id_from_ctx(void *dr_ctx, enum rte_flow_item_type type,
1763 			    enum mlx5dr_table_type domain_type, uint32_t id)
1764 {
1765 	uint16_t port;
1766 
1767 	MLX5_ETH_FOREACH_DEV(port, NULL) {
1768 		struct mlx5_priv *priv;
1769 
1770 		priv = rte_eth_devices[port].data->dev_private;
1771 		if (priv->dr_ctx == dr_ctx)
1772 			return flow_hw_get_reg_id_by_domain(&rte_eth_devices[port],
1773 							    type, domain_type, id);
1774 	}
1775 	return REG_NON;
1776 }
1777 
1778 #endif
1779 
1780 /*
1781  * Define list of valid combinations of RX Hash fields
1782  * (see enum ibv_rx_hash_fields).
1783  */
1784 #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
1785 #define MLX5_RSS_HASH_IPV4_TCP \
1786 	(MLX5_RSS_HASH_IPV4 | \
1787 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1788 #define MLX5_RSS_HASH_IPV4_UDP \
1789 	(MLX5_RSS_HASH_IPV4 | \
1790 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1791 #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
1792 #define MLX5_RSS_HASH_IPV6_TCP \
1793 	(MLX5_RSS_HASH_IPV6 | \
1794 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1795 #define MLX5_RSS_HASH_IPV6_UDP \
1796 	(MLX5_RSS_HASH_IPV6 | \
1797 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1798 #define MLX5_RSS_HASH_IPV4_SRC_ONLY IBV_RX_HASH_SRC_IPV4
1799 #define MLX5_RSS_HASH_IPV4_DST_ONLY IBV_RX_HASH_DST_IPV4
1800 #define MLX5_RSS_HASH_IPV6_SRC_ONLY IBV_RX_HASH_SRC_IPV6
1801 #define MLX5_RSS_HASH_IPV6_DST_ONLY IBV_RX_HASH_DST_IPV6
1802 #define MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY \
1803 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_UDP)
1804 #define MLX5_RSS_HASH_IPV4_UDP_DST_ONLY \
1805 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_UDP)
1806 #define MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY \
1807 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_UDP)
1808 #define MLX5_RSS_HASH_IPV6_UDP_DST_ONLY \
1809 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_UDP)
1810 #define MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY \
1811 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_TCP)
1812 #define MLX5_RSS_HASH_IPV4_TCP_DST_ONLY \
1813 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_TCP)
1814 #define MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY \
1815 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
1816 #define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
1817 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
1818 
1819 #ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
1820 #define IBV_RX_HASH_IPSEC_SPI (1U << 8)
1821 #endif
1822 
1823 #define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
1824 #define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
1825 				MLX5_RSS_HASH_ESP_SPI)
1826 #define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
1827 				MLX5_RSS_HASH_ESP_SPI)
1828 #define MLX5_RSS_HASH_NONE 0ULL
1829 
1830 #define MLX5_RSS_IS_SYMM(func) \
1831 		(((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) || \
1832 		 ((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT))
1833 
1834 /* extract next protocol type from Ethernet & VLAN headers */
1835 #define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
1836 	(_prt) = ((const struct _s *)(_itm)->mask)->_m;       \
1837 	(_prt) &= ((const struct _s *)(_itm)->spec)->_m;      \
1838 	(_prt) = rte_be_to_cpu_16((_prt));                    \
1839 } while (0)
1840 
1841 /* array of valid combinations of RX Hash fields for RSS */
1842 static const uint64_t mlx5_rss_hash_fields[] = {
1843 	MLX5_RSS_HASH_IPV4,
1844 	MLX5_RSS_HASH_IPV4_TCP,
1845 	MLX5_RSS_HASH_IPV4_UDP,
1846 	MLX5_RSS_HASH_IPV4_ESP,
1847 	MLX5_RSS_HASH_IPV6,
1848 	MLX5_RSS_HASH_IPV6_TCP,
1849 	MLX5_RSS_HASH_IPV6_UDP,
1850 	MLX5_RSS_HASH_IPV6_ESP,
1851 	MLX5_RSS_HASH_ESP_SPI,
1852 	MLX5_RSS_HASH_NONE,
1853 };
1854 
1855 /* Shared RSS action structure */
1856 struct mlx5_shared_action_rss {
1857 	ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
1858 	RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
1859 	struct rte_flow_action_rss origin; /**< Original rte RSS action. */
1860 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
1861 	struct mlx5_ind_table_obj *ind_tbl;
1862 	/**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
1863 	uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
1864 	/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
1865 	rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
1866 };
1867 
1868 struct rte_flow_action_handle {
1869 	uint32_t id;
1870 };
1871 
1872 /* Thread specific flow workspace intermediate data. */
1873 struct mlx5_flow_workspace {
1874 	/* If creating another flow in same thread, push new as stack. */
1875 	struct mlx5_flow_workspace *prev;
1876 	struct mlx5_flow_workspace *next;
1877 	struct mlx5_flow_workspace *gc;
1878 	uint32_t inuse; /* can't create new flow with current. */
1879 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
1880 	struct mlx5_flow_rss_desc rss_desc;
1881 	uint32_t flow_idx; /* Intermediate device flow index. */
1882 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
1883 	struct mlx5_flow_meter_policy *policy;
1884 	/* The meter policy used by meter in flow. */
1885 	struct mlx5_flow_meter_policy *final_policy;
1886 	/* The final policy when meter policy is hierarchy. */
1887 	uint32_t skip_matcher_reg:1;
1888 	/* Indicates if need to skip matcher register in translate. */
1889 	uint32_t mark:1; /* Indicates if flow contains mark action. */
1890 	uint32_t vport_meta_tag; /* Used for vport index match. */
1891 };
1892 
1893 /* Matcher translate type. */
1894 enum MLX5_SET_MATCHER {
1895 	MLX5_SET_MATCHER_SW_V = 1 << 0,
1896 	MLX5_SET_MATCHER_SW_M = 1 << 1,
1897 	MLX5_SET_MATCHER_HS_V = 1 << 2,
1898 	MLX5_SET_MATCHER_HS_M = 1 << 3,
1899 };
1900 
1901 #define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M)
1902 #define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M)
1903 #define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V)
1904 #define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M)
1905 
1906 /* Flow matcher workspace intermediate data. */
1907 struct mlx5_dv_matcher_workspace {
1908 	uint8_t priority; /* Flow priority. */
1909 	uint64_t last_item; /* Last item in pattern. */
1910 	uint64_t item_flags; /* Flow item pattern flags. */
1911 	uint64_t action_flags; /* Flow action flags. */
1912 	bool external; /* External flow or not. */
1913 	uint32_t vlan_tag:12; /* Flow item VLAN tag. */
1914 	uint8_t next_protocol; /* Tunnel next protocol */
1915 	uint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */
1916 	uint32_t group; /* Flow group. */
1917 	uint16_t udp_dport; /* Flow item UDP port. */
1918 	const struct rte_flow_attr *attr; /* Flow attribute. */
1919 	struct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */
1920 	const struct rte_flow_item *tunnel_item; /* Flow tunnel item. */
1921 	const struct rte_flow_item *gre_item; /* Flow GRE item. */
1922 	const struct rte_flow_item *integrity_items[2];
1923 };
1924 
1925 struct mlx5_flow_split_info {
1926 	uint32_t external:1;
1927 	/**< True if flow is created by request external to PMD. */
1928 	uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
1929 	uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
1930 	uint32_t flow_idx; /**< This memory pool index to the flow. */
1931 	uint32_t table_id; /**< Flow table identifier. */
1932 	uint64_t prefix_layers; /**< Prefix subflow layers. */
1933 };
1934 
1935 struct mlx5_hl_data {
1936 	uint8_t dw_offset;
1937 	uint32_t dw_mask;
1938 };
1939 
1940 struct flow_hw_port_info {
1941 	uint32_t regc_mask;
1942 	uint32_t regc_value;
1943 	uint32_t is_wire:1;
1944 };
1945 
1946 extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
1947 
1948 /*
1949  * Get metadata match tag and mask for given rte_eth_dev port.
1950  * Used in HWS rule creation.
1951  */
1952 static __rte_always_inline const struct flow_hw_port_info *
1953 flow_hw_conv_port_id(const uint16_t port_id)
1954 {
1955 	struct flow_hw_port_info *port_info;
1956 
1957 	if (port_id >= RTE_MAX_ETHPORTS)
1958 		return NULL;
1959 	port_info = &mlx5_flow_hw_port_infos[port_id];
1960 	return !!port_info->regc_mask ? port_info : NULL;
1961 }
1962 
1963 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1964 /*
1965  * Get metadata match tag and mask for the uplink port represented
1966  * by given IB context. Used in HWS context creation.
1967  */
1968 static __rte_always_inline const struct flow_hw_port_info *
1969 flow_hw_get_wire_port(struct ibv_context *ibctx)
1970 {
1971 	struct ibv_device *ibdev = ibctx->device;
1972 	uint16_t port_id;
1973 
1974 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
1975 		const struct mlx5_priv *priv =
1976 				rte_eth_devices[port_id].data->dev_private;
1977 
1978 		if (priv && priv->master) {
1979 			struct ibv_context *port_ibctx = priv->sh->cdev->ctx;
1980 
1981 			if (port_ibctx->device == ibdev)
1982 				return flow_hw_conv_port_id(port_id);
1983 		}
1984 	}
1985 	return NULL;
1986 }
1987 #endif
1988 
1989 static __rte_always_inline int
1990 flow_hw_get_reg_id(struct rte_eth_dev *dev,
1991 		   enum rte_flow_item_type type, uint32_t id)
1992 {
1993 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1994 	return flow_hw_get_reg_id_by_domain(dev, type,
1995 					    MLX5DR_TABLE_TYPE_DONTCARE, id);
1996 #else
1997 	RTE_SET_USED(dev);
1998 	RTE_SET_USED(type);
1999 	RTE_SET_USED(id);
2000 	return REG_NON;
2001 #endif
2002 }
2003 
2004 static __rte_always_inline int
2005 flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val)
2006 {
2007 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2008 	uint32_t port;
2009 
2010 	MLX5_ETH_FOREACH_DEV(port, NULL) {
2011 		struct mlx5_priv *priv;
2012 		priv = rte_eth_devices[port].data->dev_private;
2013 
2014 		if (priv->dr_ctx == dr_ctx) {
2015 			*port_val = port;
2016 			return 0;
2017 		}
2018 	}
2019 #else
2020 	RTE_SET_USED(dr_ctx);
2021 	RTE_SET_USED(port_val);
2022 #endif
2023 	return -EINVAL;
2024 }
2025 
2026 /**
2027  * Get GENEVE TLV option FW information according type and class.
2028  *
2029  * @param[in] dr_ctx
2030  *   Pointer to HW steering DR context.
2031  * @param[in] type
2032  *   GENEVE TLV option type.
2033  * @param[in] class
2034  *   GENEVE TLV option class.
2035  * @param[out] hl_ok_bit
2036  *   Pointer to header layout structure describing OK bit FW information.
2037  * @param[out] num_of_dws
2038  *   Pointer to fill inside the size of 'hl_dws' array.
2039  * @param[out] hl_dws
2040  *   Pointer to header layout array describing data DWs FW information.
2041  * @param[out] ok_bit_on_class
2042  *   Pointer to an indicator whether OK bit includes class along with type.
2043  *
2044  * @return
2045  *   0 on success, negative errno otherwise and rte_errno is set.
2046  */
2047 int
2048 mlx5_get_geneve_hl_data(const void *dr_ctx, uint8_t type, uint16_t class,
2049 			struct mlx5_hl_data ** const hl_ok_bit,
2050 			uint8_t *num_of_dws,
2051 			struct mlx5_hl_data ** const hl_dws,
2052 			bool *ok_bit_on_class);
2053 
2054 /**
2055  * Get modify field ID for single DW inside configured GENEVE TLV option.
2056  *
2057  * @param[in] dr_ctx
2058  *   Pointer to HW steering DR context.
2059  * @param[in] type
2060  *   GENEVE TLV option type.
2061  * @param[in] class
2062  *   GENEVE TLV option class.
2063  * @param[in] dw_offset
2064  *   Offset of DW inside the option.
2065  *
2066  * @return
2067  *   Modify field ID on success, negative errno otherwise and rte_errno is set.
2068  */
2069 int
2070 mlx5_get_geneve_option_modify_field_id(const void *dr_ctx, uint8_t type,
2071 				       uint16_t class, uint8_t dw_offset);
2072 
2073 void *
2074 mlx5_geneve_tlv_parser_create(uint16_t port_id,
2075 			      const struct rte_pmd_mlx5_geneve_tlv tlv_list[],
2076 			      uint8_t nb_options);
2077 int mlx5_geneve_tlv_parser_destroy(void *handle);
2078 int mlx5_flow_geneve_tlv_option_validate(struct mlx5_priv *priv,
2079 					 const struct rte_flow_item *geneve_opt,
2080 					 struct rte_flow_error *error);
2081 int mlx5_geneve_opt_modi_field_get(struct mlx5_priv *priv,
2082 				   const struct rte_flow_field_data *data);
2083 
2084 struct mlx5_geneve_tlv_options_mng;
2085 int mlx5_geneve_tlv_option_register(struct mlx5_priv *priv,
2086 				    const struct rte_flow_item_geneve_opt *spec,
2087 				    struct mlx5_geneve_tlv_options_mng *mng);
2088 void mlx5_geneve_tlv_options_unregister(struct mlx5_priv *priv,
2089 					struct mlx5_geneve_tlv_options_mng *mng);
2090 
2091 void flow_hw_set_port_info(struct rte_eth_dev *dev);
2092 void flow_hw_clear_port_info(struct rte_eth_dev *dev);
2093 int flow_hw_create_vport_action(struct rte_eth_dev *dev);
2094 void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);
2095 
2096 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
2097 				    const struct rte_flow_attr *attr,
2098 				    const struct rte_flow_item items[],
2099 				    const struct rte_flow_action actions[],
2100 				    bool external,
2101 				    int hairpin,
2102 				    struct rte_flow_error *error);
2103 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
2104 	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2105 	 const struct rte_flow_item items[],
2106 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
2107 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
2108 				     struct mlx5_flow *dev_flow,
2109 				     const struct rte_flow_attr *attr,
2110 				     const struct rte_flow_item items[],
2111 				     const struct rte_flow_action actions[],
2112 				     struct rte_flow_error *error);
2113 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
2114 				 struct rte_flow_error *error);
2115 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
2116 				   struct rte_flow *flow);
2117 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
2118 				    struct rte_flow *flow);
2119 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
2120 				 struct rte_flow *flow,
2121 				 const struct rte_flow_action *actions,
2122 				 void *data,
2123 				 struct rte_flow_error *error);
2124 typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
2125 					struct mlx5_flow_meter_info *fm,
2126 					uint32_t mtr_idx,
2127 					uint8_t domain_bitmap);
2128 typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
2129 				struct mlx5_flow_meter_info *fm);
2130 typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
2131 typedef struct mlx5_flow_meter_sub_policy *
2132 	(*mlx5_flow_meter_sub_policy_rss_prepare_t)
2133 		(struct rte_eth_dev *dev,
2134 		struct mlx5_flow_meter_policy *mtr_policy,
2135 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
2136 typedef int (*mlx5_flow_meter_hierarchy_rule_create_t)
2137 		(struct rte_eth_dev *dev,
2138 		struct mlx5_flow_meter_info *fm,
2139 		int32_t src_port,
2140 		const struct rte_flow_item *item,
2141 		struct rte_flow_error *error);
2142 typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
2143 	(struct rte_eth_dev *dev,
2144 	struct mlx5_flow_meter_policy *mtr_policy);
2145 typedef uint32_t (*mlx5_flow_mtr_alloc_t)
2146 					    (struct rte_eth_dev *dev);
2147 typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
2148 						uint32_t mtr_idx);
2149 typedef uint32_t (*mlx5_flow_counter_alloc_t)
2150 				   (struct rte_eth_dev *dev);
2151 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev,
2152 					 uint32_t cnt);
2153 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
2154 					 uint32_t cnt,
2155 					 bool clear, uint64_t *pkts,
2156 					 uint64_t *bytes, void **action);
2157 typedef int (*mlx5_flow_get_aged_flows_t)
2158 					(struct rte_eth_dev *dev,
2159 					 void **context,
2160 					 uint32_t nb_contexts,
2161 					 struct rte_flow_error *error);
2162 typedef int (*mlx5_flow_get_q_aged_flows_t)
2163 					(struct rte_eth_dev *dev,
2164 					 uint32_t queue_id,
2165 					 void **context,
2166 					 uint32_t nb_contexts,
2167 					 struct rte_flow_error *error);
2168 typedef int (*mlx5_flow_action_validate_t)
2169 				(struct rte_eth_dev *dev,
2170 				 const struct rte_flow_indir_action_conf *conf,
2171 				 const struct rte_flow_action *action,
2172 				 struct rte_flow_error *error);
2173 typedef struct rte_flow_action_handle *(*mlx5_flow_action_create_t)
2174 				(struct rte_eth_dev *dev,
2175 				 const struct rte_flow_indir_action_conf *conf,
2176 				 const struct rte_flow_action *action,
2177 				 struct rte_flow_error *error);
2178 typedef int (*mlx5_flow_action_destroy_t)
2179 				(struct rte_eth_dev *dev,
2180 				 struct rte_flow_action_handle *action,
2181 				 struct rte_flow_error *error);
2182 typedef int (*mlx5_flow_action_update_t)
2183 			(struct rte_eth_dev *dev,
2184 			 struct rte_flow_action_handle *action,
2185 			 const void *update,
2186 			 struct rte_flow_error *error);
2187 typedef int (*mlx5_flow_action_query_t)
2188 			(struct rte_eth_dev *dev,
2189 			 const struct rte_flow_action_handle *action,
2190 			 void *data,
2191 			 struct rte_flow_error *error);
2192 typedef int (*mlx5_flow_action_query_update_t)
2193 			(struct rte_eth_dev *dev,
2194 			 struct rte_flow_action_handle *handle,
2195 			 const void *update, void *data,
2196 			 enum rte_flow_query_update_mode qu_mode,
2197 			 struct rte_flow_error *error);
2198 typedef struct rte_flow_action_list_handle *
2199 (*mlx5_flow_action_list_handle_create_t)
2200 			(struct rte_eth_dev *dev,
2201 			 const struct rte_flow_indir_action_conf *conf,
2202 			 const struct rte_flow_action *actions,
2203 			 struct rte_flow_error *error);
2204 typedef int
2205 (*mlx5_flow_action_list_handle_destroy_t)
2206 			(struct rte_eth_dev *dev,
2207 			 struct rte_flow_action_list_handle *handle,
2208 			 struct rte_flow_error *error);
2209 typedef int (*mlx5_flow_sync_domain_t)
2210 			(struct rte_eth_dev *dev,
2211 			 uint32_t domains,
2212 			 uint32_t flags);
2213 typedef int (*mlx5_flow_validate_mtr_acts_t)
2214 			(struct rte_eth_dev *dev,
2215 			 const struct rte_flow_action *actions[RTE_COLORS],
2216 			 struct rte_flow_attr *attr,
2217 			 bool *is_rss,
2218 			 uint8_t *domain_bitmap,
2219 			 uint8_t *policy_mode,
2220 			 struct rte_mtr_error *error);
2221 typedef int (*mlx5_flow_create_mtr_acts_t)
2222 			(struct rte_eth_dev *dev,
2223 		      struct mlx5_flow_meter_policy *mtr_policy,
2224 		      const struct rte_flow_action *actions[RTE_COLORS],
2225 		      struct rte_flow_attr *attr,
2226 		      struct rte_mtr_error *error);
2227 typedef void (*mlx5_flow_destroy_mtr_acts_t)
2228 			(struct rte_eth_dev *dev,
2229 		      struct mlx5_flow_meter_policy *mtr_policy);
2230 typedef int (*mlx5_flow_create_policy_rules_t)
2231 			(struct rte_eth_dev *dev,
2232 			  struct mlx5_flow_meter_policy *mtr_policy);
2233 typedef void (*mlx5_flow_destroy_policy_rules_t)
2234 			(struct rte_eth_dev *dev,
2235 			  struct mlx5_flow_meter_policy *mtr_policy);
2236 typedef int (*mlx5_flow_create_def_policy_t)
2237 			(struct rte_eth_dev *dev);
2238 typedef void (*mlx5_flow_destroy_def_policy_t)
2239 			(struct rte_eth_dev *dev);
2240 typedef int (*mlx5_flow_discover_priorities_t)
2241 			(struct rte_eth_dev *dev,
2242 			 const uint16_t *vprio, int vprio_n);
2243 typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
2244 			(struct rte_eth_dev *dev,
2245 			 const struct rte_flow_item_flex_conf *conf,
2246 			 struct rte_flow_error *error);
2247 typedef int (*mlx5_flow_item_release_t)
2248 			(struct rte_eth_dev *dev,
2249 			 const struct rte_flow_item_flex_handle *handle,
2250 			 struct rte_flow_error *error);
2251 typedef int (*mlx5_flow_item_update_t)
2252 			(struct rte_eth_dev *dev,
2253 			 const struct rte_flow_item_flex_handle *handle,
2254 			 const struct rte_flow_item_flex_conf *conf,
2255 			 struct rte_flow_error *error);
2256 typedef int (*mlx5_flow_info_get_t)
2257 			(struct rte_eth_dev *dev,
2258 			 struct rte_flow_port_info *port_info,
2259 			 struct rte_flow_queue_info *queue_info,
2260 			 struct rte_flow_error *error);
2261 typedef int (*mlx5_flow_port_configure_t)
2262 			(struct rte_eth_dev *dev,
2263 			 const struct rte_flow_port_attr *port_attr,
2264 			 uint16_t nb_queue,
2265 			 const struct rte_flow_queue_attr *queue_attr[],
2266 			 struct rte_flow_error *err);
2267 typedef int (*mlx5_flow_pattern_validate_t)
2268 			(struct rte_eth_dev *dev,
2269 			 const struct rte_flow_pattern_template_attr *attr,
2270 			 const struct rte_flow_item items[],
2271 			 struct rte_flow_error *error);
2272 typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
2273 			(struct rte_eth_dev *dev,
2274 			 const struct rte_flow_pattern_template_attr *attr,
2275 			 const struct rte_flow_item items[],
2276 			 struct rte_flow_error *error);
2277 typedef int (*mlx5_flow_pattern_template_destroy_t)
2278 			(struct rte_eth_dev *dev,
2279 			 struct rte_flow_pattern_template *template,
2280 			 struct rte_flow_error *error);
2281 typedef int (*mlx5_flow_actions_validate_t)
2282 			(struct rte_eth_dev *dev,
2283 			 const struct rte_flow_actions_template_attr *attr,
2284 			 const struct rte_flow_action actions[],
2285 			 const struct rte_flow_action masks[],
2286 			 struct rte_flow_error *error);
2287 typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
2288 			(struct rte_eth_dev *dev,
2289 			 const struct rte_flow_actions_template_attr *attr,
2290 			 const struct rte_flow_action actions[],
2291 			 const struct rte_flow_action masks[],
2292 			 struct rte_flow_error *error);
2293 typedef int (*mlx5_flow_actions_template_destroy_t)
2294 			(struct rte_eth_dev *dev,
2295 			 struct rte_flow_actions_template *template,
2296 			 struct rte_flow_error *error);
2297 typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
2298 		(struct rte_eth_dev *dev,
2299 		 const struct rte_flow_template_table_attr *attr,
2300 		 struct rte_flow_pattern_template *item_templates[],
2301 		 uint8_t nb_item_templates,
2302 		 struct rte_flow_actions_template *action_templates[],
2303 		 uint8_t nb_action_templates,
2304 		 struct rte_flow_error *error);
2305 typedef int (*mlx5_flow_table_destroy_t)
2306 			(struct rte_eth_dev *dev,
2307 			 struct rte_flow_template_table *table,
2308 			 struct rte_flow_error *error);
2309 typedef int (*mlx5_flow_group_set_miss_actions_t)
2310 			(struct rte_eth_dev *dev,
2311 			 uint32_t group_id,
2312 			 const struct rte_flow_group_attr *attr,
2313 			 const struct rte_flow_action actions[],
2314 			 struct rte_flow_error *error);
2315 typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
2316 			(struct rte_eth_dev *dev,
2317 			 uint32_t queue,
2318 			 const struct rte_flow_op_attr *attr,
2319 			 struct rte_flow_template_table *table,
2320 			 const struct rte_flow_item items[],
2321 			 uint8_t pattern_template_index,
2322 			 const struct rte_flow_action actions[],
2323 			 uint8_t action_template_index,
2324 			 void *user_data,
2325 			 struct rte_flow_error *error);
2326 typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t)
2327 			(struct rte_eth_dev *dev,
2328 			 uint32_t queue,
2329 			 const struct rte_flow_op_attr *attr,
2330 			 struct rte_flow_template_table *table,
2331 			 uint32_t rule_index,
2332 			 const struct rte_flow_action actions[],
2333 			 uint8_t action_template_index,
2334 			 void *user_data,
2335 			 struct rte_flow_error *error);
2336 typedef int (*mlx5_flow_async_flow_update_t)
2337 			(struct rte_eth_dev *dev,
2338 			 uint32_t queue,
2339 			 const struct rte_flow_op_attr *attr,
2340 			 struct rte_flow *flow,
2341 			 const struct rte_flow_action actions[],
2342 			 uint8_t action_template_index,
2343 			 void *user_data,
2344 			 struct rte_flow_error *error);
2345 typedef int (*mlx5_flow_async_flow_destroy_t)
2346 			(struct rte_eth_dev *dev,
2347 			 uint32_t queue,
2348 			 const struct rte_flow_op_attr *attr,
2349 			 struct rte_flow *flow,
2350 			 void *user_data,
2351 			 struct rte_flow_error *error);
2352 typedef int (*mlx5_flow_pull_t)
2353 			(struct rte_eth_dev *dev,
2354 			 uint32_t queue,
2355 			 struct rte_flow_op_result res[],
2356 			 uint16_t n_res,
2357 			 struct rte_flow_error *error);
2358 typedef int (*mlx5_flow_push_t)
2359 			(struct rte_eth_dev *dev,
2360 			 uint32_t queue,
2361 			 struct rte_flow_error *error);
2362 
2363 typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)
2364 			(struct rte_eth_dev *dev,
2365 			 uint32_t queue,
2366 			 const struct rte_flow_op_attr *attr,
2367 			 const struct rte_flow_indir_action_conf *conf,
2368 			 const struct rte_flow_action *action,
2369 			 void *user_data,
2370 			 struct rte_flow_error *error);
2371 
2372 typedef int (*mlx5_flow_async_action_handle_update_t)
2373 			(struct rte_eth_dev *dev,
2374 			 uint32_t queue,
2375 			 const struct rte_flow_op_attr *attr,
2376 			 struct rte_flow_action_handle *handle,
2377 			 const void *update,
2378 			 void *user_data,
2379 			 struct rte_flow_error *error);
2380 typedef int (*mlx5_flow_async_action_handle_query_update_t)
2381 			(struct rte_eth_dev *dev, uint32_t queue_id,
2382 			 const struct rte_flow_op_attr *op_attr,
2383 			 struct rte_flow_action_handle *action_handle,
2384 			 const void *update, void *data,
2385 			 enum rte_flow_query_update_mode qu_mode,
2386 			 void *user_data, struct rte_flow_error *error);
2387 typedef int (*mlx5_flow_async_action_handle_query_t)
2388 			(struct rte_eth_dev *dev,
2389 			 uint32_t queue,
2390 			 const struct rte_flow_op_attr *attr,
2391 			 const struct rte_flow_action_handle *handle,
2392 			 void *data,
2393 			 void *user_data,
2394 			 struct rte_flow_error *error);
2395 
2396 typedef int (*mlx5_flow_async_action_handle_destroy_t)
2397 			(struct rte_eth_dev *dev,
2398 			 uint32_t queue,
2399 			 const struct rte_flow_op_attr *attr,
2400 			 struct rte_flow_action_handle *handle,
2401 			 void *user_data,
2402 			 struct rte_flow_error *error);
2403 typedef struct rte_flow_action_list_handle *
2404 (*mlx5_flow_async_action_list_handle_create_t)
2405 			(struct rte_eth_dev *dev, uint32_t queue_id,
2406 			 const struct rte_flow_op_attr *attr,
2407 			 const struct rte_flow_indir_action_conf *conf,
2408 			 const struct rte_flow_action *actions,
2409 			 void *user_data, struct rte_flow_error *error);
2410 typedef int
2411 (*mlx5_flow_async_action_list_handle_destroy_t)
2412 			(struct rte_eth_dev *dev, uint32_t queue_id,
2413 			 const struct rte_flow_op_attr *op_attr,
2414 			 struct rte_flow_action_list_handle *action_handle,
2415 			 void *user_data, struct rte_flow_error *error);
2416 typedef int
2417 (*mlx5_flow_action_list_handle_query_update_t)
2418 			(struct rte_eth_dev *dev,
2419 			const struct rte_flow_action_list_handle *handle,
2420 			const void **update, void **query,
2421 			enum rte_flow_query_update_mode mode,
2422 			struct rte_flow_error *error);
2423 typedef int
2424 (*mlx5_flow_async_action_list_handle_query_update_t)
2425 			(struct rte_eth_dev *dev, uint32_t queue_id,
2426 			const struct rte_flow_op_attr *attr,
2427 			const struct rte_flow_action_list_handle *handle,
2428 			const void **update, void **query,
2429 			enum rte_flow_query_update_mode mode,
2430 			void *user_data, struct rte_flow_error *error);
2431 typedef int
2432 (*mlx5_flow_calc_table_hash_t)
2433 			(struct rte_eth_dev *dev,
2434 			 const struct rte_flow_template_table *table,
2435 			 const struct rte_flow_item pattern[],
2436 			 uint8_t pattern_template_index,
2437 			 uint32_t *hash, struct rte_flow_error *error);
2438 typedef int
2439 (*mlx5_flow_calc_encap_hash_t)
2440 			(struct rte_eth_dev *dev,
2441 			 const struct rte_flow_item pattern[],
2442 			 enum rte_flow_encap_hash_field dest_field,
2443 			 uint8_t *hash,
2444 			 struct rte_flow_error *error);
2445 typedef int (*mlx5_table_resize_t)(struct rte_eth_dev *dev,
2446 				   struct rte_flow_template_table *table,
2447 				   uint32_t nb_rules, struct rte_flow_error *error);
2448 typedef int (*mlx5_flow_update_resized_t)
2449 			(struct rte_eth_dev *dev, uint32_t queue,
2450 			 const struct rte_flow_op_attr *attr,
2451 			 struct rte_flow *rule, void *user_data,
2452 			 struct rte_flow_error *error);
2453 typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev,
2454 				       struct rte_flow_template_table *table,
2455 				       struct rte_flow_error *error);
2456 
2457 struct mlx5_flow_driver_ops {
2458 	mlx5_flow_validate_t validate;
2459 	mlx5_flow_prepare_t prepare;
2460 	mlx5_flow_translate_t translate;
2461 	mlx5_flow_apply_t apply;
2462 	mlx5_flow_remove_t remove;
2463 	mlx5_flow_destroy_t destroy;
2464 	mlx5_flow_query_t query;
2465 	mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
2466 	mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
2467 	mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls;
2468 	mlx5_flow_mtr_alloc_t create_meter;
2469 	mlx5_flow_mtr_free_t free_meter;
2470 	mlx5_flow_validate_mtr_acts_t validate_mtr_acts;
2471 	mlx5_flow_create_mtr_acts_t create_mtr_acts;
2472 	mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts;
2473 	mlx5_flow_create_policy_rules_t create_policy_rules;
2474 	mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
2475 	mlx5_flow_create_def_policy_t create_def_policy;
2476 	mlx5_flow_destroy_def_policy_t destroy_def_policy;
2477 	mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
2478 	mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create;
2479 	mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
2480 	mlx5_flow_counter_alloc_t counter_alloc;
2481 	mlx5_flow_counter_free_t counter_free;
2482 	mlx5_flow_counter_query_t counter_query;
2483 	mlx5_flow_get_aged_flows_t get_aged_flows;
2484 	mlx5_flow_get_q_aged_flows_t get_q_aged_flows;
2485 	mlx5_flow_action_validate_t action_validate;
2486 	mlx5_flow_action_create_t action_create;
2487 	mlx5_flow_action_destroy_t action_destroy;
2488 	mlx5_flow_action_update_t action_update;
2489 	mlx5_flow_action_query_t action_query;
2490 	mlx5_flow_action_query_update_t action_query_update;
2491 	mlx5_flow_action_list_handle_create_t action_list_handle_create;
2492 	mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
2493 	mlx5_flow_sync_domain_t sync_domain;
2494 	mlx5_flow_discover_priorities_t discover_priorities;
2495 	mlx5_flow_item_create_t item_create;
2496 	mlx5_flow_item_release_t item_release;
2497 	mlx5_flow_item_update_t item_update;
2498 	mlx5_flow_info_get_t info_get;
2499 	mlx5_flow_port_configure_t configure;
2500 	mlx5_flow_pattern_validate_t pattern_validate;
2501 	mlx5_flow_pattern_template_create_t pattern_template_create;
2502 	mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
2503 	mlx5_flow_actions_validate_t actions_validate;
2504 	mlx5_flow_actions_template_create_t actions_template_create;
2505 	mlx5_flow_actions_template_destroy_t actions_template_destroy;
2506 	mlx5_flow_table_create_t template_table_create;
2507 	mlx5_flow_table_destroy_t template_table_destroy;
2508 	mlx5_flow_group_set_miss_actions_t group_set_miss_actions;
2509 	mlx5_flow_async_flow_create_t async_flow_create;
2510 	mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;
2511 	mlx5_flow_async_flow_update_t async_flow_update;
2512 	mlx5_flow_async_flow_destroy_t async_flow_destroy;
2513 	mlx5_flow_pull_t pull;
2514 	mlx5_flow_push_t push;
2515 	mlx5_flow_async_action_handle_create_t async_action_create;
2516 	mlx5_flow_async_action_handle_update_t async_action_update;
2517 	mlx5_flow_async_action_handle_query_update_t async_action_query_update;
2518 	mlx5_flow_async_action_handle_query_t async_action_query;
2519 	mlx5_flow_async_action_handle_destroy_t async_action_destroy;
2520 	mlx5_flow_async_action_list_handle_create_t
2521 		async_action_list_handle_create;
2522 	mlx5_flow_async_action_list_handle_destroy_t
2523 		async_action_list_handle_destroy;
2524 	mlx5_flow_action_list_handle_query_update_t
2525 		action_list_handle_query_update;
2526 	mlx5_flow_async_action_list_handle_query_update_t
2527 		async_action_list_handle_query_update;
2528 	mlx5_flow_calc_table_hash_t flow_calc_table_hash;
2529 	mlx5_flow_calc_encap_hash_t flow_calc_encap_hash;
2530 	mlx5_table_resize_t table_resize;
2531 	mlx5_flow_update_resized_t flow_update_resized;
2532 	table_resize_complete_t table_resize_complete;
2533 };
2534 
2535 /* mlx5_flow.c */
2536 
2537 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
2538 void mlx5_flow_pop_thread_workspace(void);
2539 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
2540 
2541 __extension__
2542 struct flow_grp_info {
2543 	uint64_t external:1;
2544 	uint64_t transfer:1;
2545 	uint64_t fdb_def_rule:1;
2546 	/* force standard group translation */
2547 	uint64_t std_tbl_fix:1;
2548 	uint64_t skip_scale:2;
2549 };
2550 
2551 static inline bool
2552 tunnel_use_standard_attr_group_translate
2553 		    (const struct rte_eth_dev *dev,
2554 		     const struct rte_flow_attr *attr,
2555 		     const struct mlx5_flow_tunnel *tunnel,
2556 		     enum mlx5_tof_rule_type tof_rule_type)
2557 {
2558 	bool verdict;
2559 
2560 	if (!is_tunnel_offload_active(dev))
2561 		/* no tunnel offload API */
2562 		verdict = true;
2563 	else if (tunnel) {
2564 		/*
2565 		 * OvS will use jump to group 0 in tunnel steer rule.
2566 		 * If tunnel steer rule starts from group 0 (attr.group == 0)
2567 		 * that 0 group must be translated with standard method.
2568 		 * attr.group == 0 in tunnel match rule translated with tunnel
2569 		 * method
2570 		 */
2571 		verdict = !attr->group &&
2572 			  is_flow_tunnel_steer_rule(tof_rule_type);
2573 	} else {
2574 		/*
2575 		 * non-tunnel group translation uses standard method for
2576 		 * root group only: attr.group == 0
2577 		 */
2578 		verdict = !attr->group;
2579 	}
2580 
2581 	return verdict;
2582 }
2583 
2584 /**
2585  * Get DV flow aso meter by index.
2586  *
2587  * @param[in] dev
2588  *   Pointer to the Ethernet device structure.
2589  * @param[in] idx
2590  *   mlx5 flow aso meter index in the container.
2591  * @param[out] ppool
2592  *   mlx5 flow aso meter pool in the container,
2593  *
2594  * @return
2595  *   Pointer to the aso meter, NULL otherwise.
2596  */
2597 static inline struct mlx5_aso_mtr *
2598 mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
2599 {
2600 	struct mlx5_aso_mtr_pool *pool;
2601 	struct mlx5_aso_mtr_pools_mng *pools_mng =
2602 				&priv->sh->mtrmng->pools_mng;
2603 
2604 	if (priv->mtr_bulk.aso)
2605 		return priv->mtr_bulk.aso + idx;
2606 	/* Decrease to original index. */
2607 	idx--;
2608 	MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
2609 	rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
2610 	pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
2611 	rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
2612 	return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
2613 }
2614 
2615 static __rte_always_inline const struct rte_flow_item *
2616 mlx5_find_end_item(const struct rte_flow_item *item)
2617 {
2618 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
2619 	return item;
2620 }
2621 
2622 static __rte_always_inline bool
2623 mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item)
2624 {
2625 	struct rte_flow_item_integrity test = *item;
2626 	test.l3_ok = 0;
2627 	test.l4_ok = 0;
2628 	test.ipv4_csum_ok = 0;
2629 	test.l4_csum_ok = 0;
2630 	return (test.value == 0);
2631 }
2632 
2633 /*
2634  * Get ASO CT action by device and index.
2635  *
2636  * @param[in] dev
2637  *   Pointer to the Ethernet device structure.
2638  * @param[in] idx
2639  *   Index to the ASO CT action.
2640  *
2641  * @return
2642  *   The specified ASO CT action pointer.
2643  */
2644 static inline struct mlx5_aso_ct_action *
2645 flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx)
2646 {
2647 	struct mlx5_priv *priv = dev->data->dev_private;
2648 	struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
2649 	struct mlx5_aso_ct_pool *pool;
2650 
2651 	idx--;
2652 	MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n);
2653 	/* Bit operation AND could be used. */
2654 	rte_rwlock_read_lock(&mng->resize_rwl);
2655 	pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL];
2656 	rte_rwlock_read_unlock(&mng->resize_rwl);
2657 	return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL];
2658 }
2659 
2660 /*
2661  * Get ASO CT action by owner & index.
2662  *
2663  * @param[in] dev
2664  *   Pointer to the Ethernet device structure.
2665  * @param[in] idx
2666  *   Index to the ASO CT action and owner port combination.
2667  *
2668  * @return
2669  *   The specified ASO CT action pointer.
2670  */
2671 static inline struct mlx5_aso_ct_action *
2672 flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
2673 {
2674 	struct mlx5_priv *priv = dev->data->dev_private;
2675 	struct mlx5_aso_ct_action *ct;
2676 	uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
2677 	uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
2678 
2679 	if (owner == PORT_ID(priv)) {
2680 		ct = flow_aso_ct_get_by_dev_idx(dev, idx);
2681 	} else {
2682 		struct rte_eth_dev *owndev = &rte_eth_devices[owner];
2683 
2684 		MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
2685 		if (dev->data->dev_started != 1)
2686 			return NULL;
2687 		ct = flow_aso_ct_get_by_dev_idx(owndev, idx);
2688 		if (ct->peer != PORT_ID(priv))
2689 			return NULL;
2690 	}
2691 	return ct;
2692 }
2693 
2694 static inline uint16_t
2695 mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
2696 {
2697 	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
2698 		return RTE_ETHER_TYPE_TEB;
2699 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2700 		return RTE_ETHER_TYPE_IPV4;
2701 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2702 		return RTE_ETHER_TYPE_IPV6;
2703 	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
2704 		return RTE_ETHER_TYPE_MPLS;
2705 	return 0;
2706 }
2707 
2708 int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
2709 			 struct rte_flow_error *error);
2710 
2711 /*
2712  * Convert rte_mtr_color to mlx5 color.
2713  *
2714  * @param[in] rcol
2715  *   rte_mtr_color.
2716  *
2717  * @return
2718  *   mlx5 color.
2719  */
2720 static inline int
2721 rte_col_2_mlx5_col(enum rte_color rcol)
2722 {
2723 	switch (rcol) {
2724 	case RTE_COLOR_GREEN:
2725 		return MLX5_FLOW_COLOR_GREEN;
2726 	case RTE_COLOR_YELLOW:
2727 		return MLX5_FLOW_COLOR_YELLOW;
2728 	case RTE_COLOR_RED:
2729 		return MLX5_FLOW_COLOR_RED;
2730 	default:
2731 		break;
2732 	}
2733 	return MLX5_FLOW_COLOR_UNDEFINED;
2734 }
2735 
2736 /**
2737  * Indicates whether flow source vport is representor port.
2738  *
2739  * @param[in] priv
2740  *   Pointer to device private context structure.
2741  * @param[in] act_priv
2742  *   Pointer to actual device private context structure if have.
2743  *
2744  * @return
2745  *   True when the flow source vport is representor port, false otherwise.
2746  */
2747 static inline bool
2748 flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv)
2749 {
2750 	MLX5_ASSERT(priv);
2751 	return (!act_priv ? (priv->representor_id != UINT16_MAX) :
2752 		 (act_priv->representor_id != UINT16_MAX));
2753 }
2754 
2755 /* All types of Ethernet patterns used in control flow rules. */
2756 enum mlx5_flow_ctrl_rx_eth_pattern_type {
2757 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL = 0,
2758 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST,
2759 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST,
2760 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN,
2761 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST,
2762 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN,
2763 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST,
2764 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN,
2765 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
2766 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
2767 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX,
2768 };
2769 
2770 /* All types of RSS actions used in control flow rules. */
2771 enum mlx5_flow_ctrl_rx_expanded_rss_type {
2772 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP = 0,
2773 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4,
2774 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP,
2775 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP,
2776 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6,
2777 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP,
2778 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP,
2779 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX,
2780 };
2781 
2782 /**
2783  * Contains pattern template, template table and its attributes for a single
2784  * combination of Ethernet pattern and RSS action. Used to create control flow rules
2785  * with HWS.
2786  */
2787 struct mlx5_flow_hw_ctrl_rx_table {
2788 	struct rte_flow_template_table_attr attr;
2789 	struct rte_flow_pattern_template *pt;
2790 	struct rte_flow_template_table *tbl;
2791 };
2792 
2793 /* Contains all templates required to create control flow rules with HWS. */
2794 struct mlx5_flow_hw_ctrl_rx {
2795 	struct rte_flow_actions_template *rss[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2796 	struct mlx5_flow_hw_ctrl_rx_table tables[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX]
2797 						[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2798 };
2799 
2800 /* Contains all templates required for control flow rules in FDB with HWS. */
2801 struct mlx5_flow_hw_ctrl_fdb {
2802 	struct rte_flow_pattern_template *esw_mgr_items_tmpl;
2803 	struct rte_flow_actions_template *regc_jump_actions_tmpl;
2804 	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
2805 	struct rte_flow_pattern_template *regc_sq_items_tmpl;
2806 	struct rte_flow_actions_template *port_actions_tmpl;
2807 	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
2808 	struct rte_flow_pattern_template *port_items_tmpl;
2809 	struct rte_flow_actions_template *jump_one_actions_tmpl;
2810 	struct rte_flow_template_table *hw_esw_zero_tbl;
2811 	struct rte_flow_pattern_template *tx_meta_items_tmpl;
2812 	struct rte_flow_actions_template *tx_meta_actions_tmpl;
2813 	struct rte_flow_template_table *hw_tx_meta_cpy_tbl;
2814 	struct rte_flow_pattern_template *lacp_rx_items_tmpl;
2815 	struct rte_flow_actions_template *lacp_rx_actions_tmpl;
2816 	struct rte_flow_template_table *hw_lacp_rx_tbl;
2817 };
2818 
2819 #define MLX5_CTRL_PROMISCUOUS    (RTE_BIT32(0))
2820 #define MLX5_CTRL_ALL_MULTICAST  (RTE_BIT32(1))
2821 #define MLX5_CTRL_BROADCAST      (RTE_BIT32(2))
2822 #define MLX5_CTRL_IPV4_MULTICAST (RTE_BIT32(3))
2823 #define MLX5_CTRL_IPV6_MULTICAST (RTE_BIT32(4))
2824 #define MLX5_CTRL_DMAC           (RTE_BIT32(5))
2825 #define MLX5_CTRL_VLAN_FILTER    (RTE_BIT32(6))
2826 
2827 int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
2828 void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
2829 
2830 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
2831 			     const struct mlx5_flow_tunnel *tunnel,
2832 			     uint32_t group, uint32_t *table,
2833 			     const struct flow_grp_info *flags,
2834 			     struct rte_flow_error *error);
2835 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
2836 				     int tunnel, uint64_t layer_types,
2837 				     uint64_t hash_fields);
2838 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
2839 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
2840 				   uint32_t subpriority);
2841 uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
2842 					const struct rte_flow_attr *attr);
2843 uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
2844 				   const struct rte_flow_attr *attr,
2845 				   uint32_t subpriority, bool external);
2846 uint32_t mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev);
2847 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
2848 				     enum mlx5_feature_name feature,
2849 				     uint32_t id,
2850 				     struct rte_flow_error *error);
2851 const struct rte_flow_action *mlx5_flow_find_action
2852 					(const struct rte_flow_action *actions,
2853 					 enum rte_flow_action_type action);
2854 int mlx5_validate_action_rss(struct rte_eth_dev *dev,
2855 			     const struct rte_flow_action *action,
2856 			     struct rte_flow_error *error);
2857 
2858 struct mlx5_hw_encap_decap_action*
2859 mlx5_reformat_action_create(struct rte_eth_dev *dev,
2860 			    const struct rte_flow_indir_action_conf *conf,
2861 			    const struct rte_flow_action *encap_action,
2862 			    const struct rte_flow_action *decap_action,
2863 			    struct rte_flow_error *error);
2864 int mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
2865 				 struct rte_flow_action_list_handle *handle,
2866 				 struct rte_flow_error *error);
2867 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
2868 				    const struct rte_flow_attr *attr,
2869 				    struct rte_flow_error *error);
2870 int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev,
2871 				   bool is_root,
2872 				   const struct rte_flow_attr *attr,
2873 				   struct rte_flow_error *error);
2874 int mlx5_flow_validate_action_flag(uint64_t action_flags,
2875 				   const struct rte_flow_attr *attr,
2876 				   struct rte_flow_error *error);
2877 int mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
2878 				   uint64_t action_flags,
2879 				   const struct rte_flow_attr *attr,
2880 				   struct rte_flow_error *error);
2881 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
2882 				    uint64_t action_flags,
2883 				    struct rte_eth_dev *dev,
2884 				    const struct rte_flow_attr *attr,
2885 				    struct rte_flow_error *error);
2886 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
2887 				  uint64_t action_flags,
2888 				  struct rte_eth_dev *dev,
2889 				  const struct rte_flow_attr *attr,
2890 				  uint64_t item_flags,
2891 				  struct rte_flow_error *error);
2892 int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
2893 				const struct rte_flow_attr *attr,
2894 				struct rte_flow_error *error);
2895 int flow_validate_modify_field_level
2896 			(const struct rte_flow_field_data *data,
2897 			 struct rte_flow_error *error);
2898 int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
2899 			      const uint8_t *mask,
2900 			      const uint8_t *nic_mask,
2901 			      unsigned int size,
2902 			      bool range_accepted,
2903 			      struct rte_flow_error *error);
2904 int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
2905 				uint64_t item_flags, bool ext_vlan_sup,
2906 				struct rte_flow_error *error);
2907 int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2908 				uint64_t item_flags,
2909 				uint8_t target_protocol,
2910 				struct rte_flow_error *error);
2911 int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2912 				    uint64_t item_flags,
2913 				    const struct rte_flow_item *gre_item,
2914 				    struct rte_flow_error *error);
2915 int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
2916 				       const struct rte_flow_item *item,
2917 				       uint64_t item_flags,
2918 				       const struct rte_flow_attr *attr,
2919 				       const struct rte_flow_item *gre_item,
2920 				       struct rte_flow_error *error);
2921 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2922 				 uint64_t item_flags,
2923 				 uint64_t last_item,
2924 				 uint16_t ether_type,
2925 				 const struct rte_flow_item_ipv4 *acc_mask,
2926 				 bool range_accepted,
2927 				 struct rte_flow_error *error);
2928 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2929 				 uint64_t item_flags,
2930 				 uint64_t last_item,
2931 				 uint16_t ether_type,
2932 				 const struct rte_flow_item_ipv6 *acc_mask,
2933 				 struct rte_flow_error *error);
2934 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
2935 				 const struct rte_flow_item *item,
2936 				 uint64_t item_flags,
2937 				 uint64_t prev_layer,
2938 				 struct rte_flow_error *error);
2939 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2940 				uint64_t item_flags,
2941 				uint8_t target_protocol,
2942 				const struct rte_flow_item_tcp *flow_mask,
2943 				struct rte_flow_error *error);
2944 int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2945 				uint64_t item_flags,
2946 				uint8_t target_protocol,
2947 				struct rte_flow_error *error);
2948 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
2949 				 uint64_t item_flags,
2950 				 struct rte_eth_dev *dev,
2951 				 struct rte_flow_error *error);
2952 int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
2953 				  uint16_t udp_dport,
2954 				  const struct rte_flow_item *item,
2955 				  uint64_t item_flags,
2956 				  bool root,
2957 				  struct rte_flow_error *error);
2958 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2959 				      uint64_t item_flags,
2960 				      struct rte_eth_dev *dev,
2961 				      struct rte_flow_error *error);
2962 int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
2963 				 uint64_t item_flags,
2964 				 uint8_t target_protocol,
2965 				 struct rte_flow_error *error);
2966 int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
2967 				   uint64_t item_flags,
2968 				   uint8_t target_protocol,
2969 				   struct rte_flow_error *error);
2970 int mlx5_flow_validate_item_icmp6_echo(const struct rte_flow_item *item,
2971 				       uint64_t item_flags,
2972 				       uint8_t target_protocol,
2973 				       struct rte_flow_error *error);
2974 int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2975 				  uint64_t item_flags,
2976 				  uint8_t target_protocol,
2977 				  struct rte_flow_error *error);
2978 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2979 				   uint64_t item_flags,
2980 				   struct rte_eth_dev *dev,
2981 				   struct rte_flow_error *error);
2982 int mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
2983 				   uint64_t last_item,
2984 				   const struct rte_flow_item *geneve_item,
2985 				   struct rte_eth_dev *dev,
2986 				   struct rte_flow_error *error);
2987 int mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
2988 				  uint64_t item_flags,
2989 				  uint64_t last_item,
2990 				  uint16_t ether_type,
2991 				  const struct rte_flow_item_ecpri *acc_mask,
2992 				  struct rte_flow_error *error);
2993 int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
2994 				const struct rte_flow_item *item,
2995 				struct rte_flow_error *error);
2996 int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
2997 			      struct mlx5_flow_meter_info *fm,
2998 			      uint32_t mtr_idx,
2999 			      uint8_t domain_bitmap);
3000 void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
3001 			       struct mlx5_flow_meter_info *fm);
3002 void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
3003 struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
3004 		(struct rte_eth_dev *dev,
3005 		struct mlx5_flow_meter_policy *mtr_policy,
3006 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
3007 void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
3008 		struct mlx5_flow_meter_policy *mtr_policy);
3009 int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
3010 int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
3011 int mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev);
3012 int mlx5_action_handle_attach(struct rte_eth_dev *dev);
3013 int mlx5_action_handle_detach(struct rte_eth_dev *dev);
3014 int mlx5_action_handle_flush(struct rte_eth_dev *dev);
3015 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
3016 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
3017 
3018 struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx);
3019 int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3020 			 void *cb_ctx);
3021 void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3022 struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx,
3023 					     struct mlx5_list_entry *oentry,
3024 					     void *entry_ctx);
3025 void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3026 struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3027 		uint32_t table_level, uint8_t egress, uint8_t transfer,
3028 		bool external, const struct mlx5_flow_tunnel *tunnel,
3029 		uint32_t group_id, uint8_t dummy,
3030 		uint32_t table_id, struct rte_flow_error *error);
3031 int flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
3032 				 struct mlx5_flow_tbl_resource *tbl);
3033 
3034 struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx);
3035 int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3036 			 void *cb_ctx);
3037 void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3038 struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx,
3039 					     struct mlx5_list_entry *oentry,
3040 					     void *cb_ctx);
3041 void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3042 
3043 int flow_dv_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3044 			    void *cb_ctx);
3045 struct mlx5_list_entry *flow_dv_modify_create_cb(void *tool_ctx, void *ctx);
3046 void flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3047 struct mlx5_list_entry *flow_dv_modify_clone_cb(void *tool_ctx,
3048 						struct mlx5_list_entry *oentry,
3049 						void *ctx);
3050 void flow_dv_modify_clone_free_cb(void *tool_ctx,
3051 				  struct mlx5_list_entry *entry);
3052 
3053 struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx);
3054 int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3055 			  void *cb_ctx);
3056 void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3057 struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx,
3058 					      struct mlx5_list_entry *entry,
3059 					      void *ctx);
3060 void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3061 
3062 int flow_dv_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3063 				 void *cb_ctx);
3064 struct mlx5_list_entry *flow_dv_encap_decap_create_cb(void *tool_ctx,
3065 						      void *cb_ctx);
3066 void flow_dv_encap_decap_remove_cb(void *tool_ctx,
3067 				   struct mlx5_list_entry *entry);
3068 struct mlx5_list_entry *flow_dv_encap_decap_clone_cb(void *tool_ctx,
3069 						  struct mlx5_list_entry *entry,
3070 						  void *cb_ctx);
3071 void flow_dv_encap_decap_clone_free_cb(void *tool_ctx,
3072 				       struct mlx5_list_entry *entry);
3073 
3074 int flow_dv_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3075 			     void *ctx);
3076 struct mlx5_list_entry *flow_dv_matcher_create_cb(void *tool_ctx, void *ctx);
3077 void flow_dv_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3078 
3079 int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3080 			     void *cb_ctx);
3081 struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
3082 void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3083 struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx,
3084 				struct mlx5_list_entry *entry, void *cb_ctx);
3085 void flow_dv_port_id_clone_free_cb(void *tool_ctx,
3086 				   struct mlx5_list_entry *entry);
3087 
3088 int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3089 			       void *cb_ctx);
3090 struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx,
3091 						    void *cb_ctx);
3092 void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3093 struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx,
3094 				 struct mlx5_list_entry *entry, void *cb_ctx);
3095 void flow_dv_push_vlan_clone_free_cb(void *tool_ctx,
3096 				     struct mlx5_list_entry *entry);
3097 
3098 int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3099 			    void *cb_ctx);
3100 struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx);
3101 void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3102 struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx,
3103 				 struct mlx5_list_entry *entry, void *cb_ctx);
3104 void flow_dv_sample_clone_free_cb(void *tool_ctx,
3105 				  struct mlx5_list_entry *entry);
3106 
3107 int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3108 				void *cb_ctx);
3109 struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx,
3110 						     void *cb_ctx);
3111 void flow_dv_dest_array_remove_cb(void *tool_ctx,
3112 				  struct mlx5_list_entry *entry);
3113 struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
3114 				   struct mlx5_list_entry *entry, void *cb_ctx);
3115 void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
3116 				      struct mlx5_list_entry *entry);
3117 void flow_dv_hashfields_set(uint64_t item_flags,
3118 			    struct mlx5_flow_rss_desc *rss_desc,
3119 			    uint64_t *hash_fields);
3120 void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
3121 					uint64_t *hash_field);
3122 uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
3123 					const uint64_t hash_fields);
3124 
3125 struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
3126 void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3127 int flow_hw_grp_match_cb(void *tool_ctx,
3128 			 struct mlx5_list_entry *entry,
3129 			 void *cb_ctx);
3130 struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
3131 					     struct mlx5_list_entry *oentry,
3132 					     void *cb_ctx);
3133 void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3134 
3135 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
3136 						    uint32_t age_idx);
3137 
3138 void flow_release_workspace(void *data);
3139 int mlx5_flow_os_init_workspace_once(void);
3140 void *mlx5_flow_os_get_specific_workspace(void);
3141 int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);
3142 void mlx5_flow_os_release_workspace(void);
3143 uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev);
3144 void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx);
3145 int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
3146 			const struct rte_flow_action *actions[RTE_COLORS],
3147 			struct rte_flow_attr *attr,
3148 			bool *is_rss,
3149 			uint8_t *domain_bitmap,
3150 			uint8_t *policy_mode,
3151 			struct rte_mtr_error *error);
3152 void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
3153 		      struct mlx5_flow_meter_policy *mtr_policy);
3154 int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
3155 		      struct mlx5_flow_meter_policy *mtr_policy,
3156 		      const struct rte_flow_action *actions[RTE_COLORS],
3157 		      struct rte_flow_attr *attr,
3158 		      struct rte_mtr_error *error);
3159 int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
3160 			     struct mlx5_flow_meter_policy *mtr_policy);
3161 void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
3162 			     struct mlx5_flow_meter_policy *mtr_policy);
3163 int mlx5_flow_create_def_policy(struct rte_eth_dev *dev);
3164 void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
3165 void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
3166 		       struct mlx5_flow_handle *dev_handle);
3167 const struct mlx5_flow_tunnel *
3168 mlx5_get_tof(const struct rte_flow_item *items,
3169 	     const struct rte_flow_action *actions,
3170 	     enum mlx5_tof_rule_type *rule_type);
3171 void
3172 flow_hw_resource_release(struct rte_eth_dev *dev);
3173 int
3174 mlx5_geneve_tlv_options_destroy(struct mlx5_geneve_tlv_options *options,
3175 				struct mlx5_physical_device *phdev);
3176 int
3177 mlx5_geneve_tlv_options_check_busy(struct mlx5_priv *priv);
3178 void
3179 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable);
3180 int flow_dv_action_validate(struct rte_eth_dev *dev,
3181 			    const struct rte_flow_indir_action_conf *conf,
3182 			    const struct rte_flow_action *action,
3183 			    struct rte_flow_error *err);
3184 struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,
3185 		      const struct rte_flow_indir_action_conf *conf,
3186 		      const struct rte_flow_action *action,
3187 		      struct rte_flow_error *err);
3188 int flow_dv_action_destroy(struct rte_eth_dev *dev,
3189 			   struct rte_flow_action_handle *handle,
3190 			   struct rte_flow_error *error);
3191 int flow_dv_action_update(struct rte_eth_dev *dev,
3192 			  struct rte_flow_action_handle *handle,
3193 			  const void *update,
3194 			  struct rte_flow_error *err);
3195 int flow_dv_action_query(struct rte_eth_dev *dev,
3196 			 const struct rte_flow_action_handle *handle,
3197 			 void *data,
3198 			 struct rte_flow_error *error);
3199 size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
3200 int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3201 			   size_t *size, struct rte_flow_error *error);
3202 void mlx5_flow_field_id_to_modify_info
3203 		(const struct rte_flow_field_data *data,
3204 		 struct field_modify_info *info, uint32_t *mask,
3205 		 uint32_t width, struct rte_eth_dev *dev,
3206 		 const struct rte_flow_attr *attr, struct rte_flow_error *error);
3207 int flow_dv_convert_modify_action(struct rte_flow_item *item,
3208 			      struct field_modify_info *field,
3209 			      struct field_modify_info *dest,
3210 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
3211 			      uint32_t type, struct rte_flow_error *error);
3212 
3213 #define MLX5_PF_VPORT_ID 0
3214 #define MLX5_ECPF_VPORT_ID 0xFFFE
3215 
3216 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev);
3217 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
3218 				const struct rte_flow_item *item,
3219 				uint16_t *vport_id,
3220 				bool *all_ports,
3221 				struct rte_flow_error *error);
3222 
3223 int flow_dv_translate_items_hws(const struct rte_flow_item *items,
3224 				struct mlx5_flow_attr *attr, void *key,
3225 				uint32_t key_type, uint64_t *item_flags,
3226 				uint8_t *match_criteria,
3227 				struct rte_flow_error *error);
3228 
3229 int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
3230 				  uint16_t *proxy_port_id,
3231 				  struct rte_flow_error *error);
3232 int flow_null_get_aged_flows(struct rte_eth_dev *dev,
3233 		    void **context,
3234 		    uint32_t nb_contexts,
3235 		    struct rte_flow_error *error);
3236 uint32_t flow_null_counter_allocate(struct rte_eth_dev *dev);
3237 void flow_null_counter_free(struct rte_eth_dev *dev,
3238 			uint32_t counter);
3239 int flow_null_counter_query(struct rte_eth_dev *dev,
3240 			uint32_t counter,
3241 			bool clear,
3242 		    uint64_t *pkts,
3243 			uint64_t *bytes,
3244 			void **action);
3245 
3246 int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
3247 
3248 int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
3249 					 uint32_t sqn, bool external);
3250 int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
3251 					  uint32_t sqn);
3252 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
3253 int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
3254 int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
3255 int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev);
3256 int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
3257 		const struct rte_flow_actions_template_attr *attr,
3258 		const struct rte_flow_action actions[],
3259 		const struct rte_flow_action masks[],
3260 		struct rte_flow_error *error);
3261 int mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
3262 		const struct rte_flow_pattern_template_attr *attr,
3263 		const struct rte_flow_item items[],
3264 		struct rte_flow_error *error);
3265 int flow_hw_table_update(struct rte_eth_dev *dev,
3266 			 struct rte_flow_error *error);
3267 int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
3268 			   enum rte_flow_field_id field, int inherit,
3269 			   const struct rte_flow_attr *attr,
3270 			   struct rte_flow_error *error);
3271 
3272 static __rte_always_inline int
3273 flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
3274 {
3275 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3276 	uint16_t port;
3277 
3278 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3279 		struct mlx5_priv *priv;
3280 		struct mlx5_hca_flex_attr *attr;
3281 		struct mlx5_devx_match_sample_info_query_attr *info;
3282 
3283 		priv = rte_eth_devices[port].data->dev_private;
3284 		attr = &priv->sh->cdev->config.hca_attr.flex;
3285 		if (priv->dr_ctx == dr_ctx && attr->query_match_sample_info) {
3286 			info = &priv->sh->srh_flex_parser.flex.devx_fp->sample_info[0];
3287 			if (priv->sh->srh_flex_parser.flex.mapnum)
3288 				return info->sample_dw_data * sizeof(uint32_t);
3289 			else
3290 				return UINT32_MAX;
3291 		}
3292 	}
3293 #endif
3294 	return UINT32_MAX;
3295 }
3296 
3297 static __rte_always_inline uint8_t
3298 flow_hw_get_ipv6_route_ext_anchor_from_ctx(void *dr_ctx)
3299 {
3300 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3301 	uint16_t port;
3302 	struct mlx5_priv *priv;
3303 
3304 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3305 		priv = rte_eth_devices[port].data->dev_private;
3306 		if (priv->dr_ctx == dr_ctx)
3307 			return priv->sh->srh_flex_parser.flex.devx_fp->anchor_id;
3308 	}
3309 #else
3310 	RTE_SET_USED(dr_ctx);
3311 #endif
3312 	return 0;
3313 }
3314 
3315 static __rte_always_inline uint16_t
3316 flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx)
3317 {
3318 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3319 	uint16_t port;
3320 	struct mlx5_priv *priv;
3321 	struct mlx5_flex_parser_devx *fp;
3322 
3323 	if (idx >= MLX5_GRAPH_NODE_SAMPLE_NUM || idx >= MLX5_SRV6_SAMPLE_NUM)
3324 		return 0;
3325 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3326 		priv = rte_eth_devices[port].data->dev_private;
3327 		if (priv->dr_ctx == dr_ctx) {
3328 			fp = priv->sh->srh_flex_parser.flex.devx_fp;
3329 			return fp->sample_info[idx].modify_field_id;
3330 		}
3331 	}
3332 #else
3333 	RTE_SET_USED(dr_ctx);
3334 	RTE_SET_USED(idx);
3335 #endif
3336 	return 0;
3337 }
3338 
3339 void
3340 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
3341 #ifdef HAVE_MLX5_HWS_SUPPORT
3342 struct mlx5_mirror;
3343 void
3344 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
3345 void
3346 mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
3347 			     struct mlx5_indirect_list *ptr);
3348 void
3349 mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,
3350 			    struct mlx5_indirect_list *reformat);
3351 #endif
3352 #endif /* RTE_PMD_MLX5_FLOW_H_ */
3353