xref: /dpdk/drivers/net/mlx5/mlx5_flow.h (revision 3cd695c34528571c378c5f6be7ff81d3cca9a84c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_FLOW_H_
6 #define RTE_PMD_MLX5_FLOW_H_
7 
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 
13 #include <rte_alarm.h>
14 #include <rte_mtr.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_prm.h>
18 
19 #include "mlx5.h"
20 #include "rte_pmd_mlx5.h"
21 #include "hws/mlx5dr.h"
22 #include "mlx5_tx.h"
23 
24 /* E-Switch Manager port, used for rte_flow_item_port_id. */
25 #define MLX5_PORT_ESW_MGR UINT32_MAX
26 
27 /* E-Switch Manager port, used for rte_flow_item_ethdev. */
28 #define MLX5_REPRESENTED_PORT_ESW_MGR UINT16_MAX
29 
30 /* Private rte flow items. */
31 enum mlx5_rte_flow_item_type {
32 	MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
33 	MLX5_RTE_FLOW_ITEM_TYPE_TAG,
34 	MLX5_RTE_FLOW_ITEM_TYPE_SQ,
35 	MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
36 	MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
37 };
38 
39 /* Private (internal) rte flow actions. */
40 enum mlx5_rte_flow_action_type {
41 	MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
42 	MLX5_RTE_FLOW_ACTION_TYPE_TAG,
43 	MLX5_RTE_FLOW_ACTION_TYPE_MARK,
44 	MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
45 	MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
46 	MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
47 	MLX5_RTE_FLOW_ACTION_TYPE_AGE,
48 	MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
49 	MLX5_RTE_FLOW_ACTION_TYPE_JUMP,
50 	MLX5_RTE_FLOW_ACTION_TYPE_RSS,
51 	MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
52 };
53 
54 /* Private (internal) Field IDs for MODIFY_FIELD action. */
55 enum mlx5_rte_flow_field_id {
56 	MLX5_RTE_FLOW_FIELD_END = INT_MIN,
57 	MLX5_RTE_FLOW_FIELD_META_REG,
58 };
59 
60 #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29
61 
62 #define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \
63 	(((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET)
64 
65 #define MLX5_INDIRECT_ACTION_IDX_GET(handle) \
66 	(((uint32_t)(uintptr_t)(handle)) & \
67 	 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
68 
69 enum mlx5_indirect_type {
70 	MLX5_INDIRECT_ACTION_TYPE_RSS,
71 	MLX5_INDIRECT_ACTION_TYPE_AGE,
72 	MLX5_INDIRECT_ACTION_TYPE_COUNT,
73 	MLX5_INDIRECT_ACTION_TYPE_CT,
74 	MLX5_INDIRECT_ACTION_TYPE_METER_MARK,
75 	MLX5_INDIRECT_ACTION_TYPE_QUOTA,
76 };
77 
78 /* Now, the maximal ports will be supported is 16, action number is 32M. */
79 #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10
80 
81 #define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25
82 #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
83 
84 /*
85  * When SW steering flow engine is used, the CT action handles are encoded in a following way:
86  * - bits 31:29 - type
87  * - bits 28:25 - port index of the action owner
88  * - bits 24:0 - action index
89  */
90 #define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \
91 	((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \
92 	 (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \
93 	  MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index))
94 
95 #define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \
96 	(((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \
97 	 MLX5_INDIRECT_ACT_CT_OWNER_MASK)
98 
99 #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
100 	((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
101 
102 /*
103  * When HW steering flow engine is used, the CT action handles are encoded in a following way:
104  * - bits 31:29 - type
105  * - bits 28:0 - action index
106  */
107 #define MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(index) \
108 	((struct rte_flow_action_handle *)(uintptr_t) \
109 	 ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (index)))
110 
111 enum mlx5_indirect_list_type {
112 	MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
113 	MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
114 	MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
115 	MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT = 3,
116 };
117 
118 /**
119  * Base type for indirect list type.
120  */
121 struct mlx5_indirect_list {
122 	/* Indirect list type. */
123 	enum mlx5_indirect_list_type type;
124 	/* Optional storage list entry */
125 	LIST_ENTRY(mlx5_indirect_list) entry;
126 };
127 
128 static __rte_always_inline void
129 mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
130 {
131 	LIST_HEAD(, mlx5_indirect_list) *h = head;
132 
133 	LIST_INSERT_HEAD(h, elem, entry);
134 }
135 
136 static __rte_always_inline void
137 mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
138 {
139 	if (elem->entry.le_prev)
140 		LIST_REMOVE(elem, entry);
141 }
142 
143 static __rte_always_inline enum mlx5_indirect_list_type
144 mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
145 {
146 	return ((const struct mlx5_indirect_list *)obj)->type;
147 }
148 
149 /* Matches on selected register. */
150 struct mlx5_rte_flow_item_tag {
151 	enum modify_reg id;
152 	uint32_t data;
153 };
154 
155 /* Modify selected register. */
156 struct mlx5_rte_flow_action_set_tag {
157 	enum modify_reg id;
158 	uint8_t offset;
159 	uint8_t length;
160 	uint32_t data;
161 };
162 
163 struct mlx5_flow_action_copy_mreg {
164 	enum modify_reg dst;
165 	enum modify_reg src;
166 };
167 
168 /* Matches on source queue. */
169 struct mlx5_rte_flow_item_sq {
170 	uint32_t queue; /* DevX SQ number */
171 #ifdef RTE_ARCH_64
172 	uint32_t reserved;
173 #endif
174 };
175 
176 /* Map from registers to modify fields. */
177 extern enum mlx5_modification_field reg_to_field[];
178 extern const size_t mlx5_mod_reg_size;
179 
180 static __rte_always_inline enum mlx5_modification_field
181 mlx5_convert_reg_to_field(enum modify_reg reg)
182 {
183 	MLX5_ASSERT((size_t)reg < mlx5_mod_reg_size);
184 	return reg_to_field[reg];
185 }
186 
187 /* Feature name to allocate metadata register. */
188 enum mlx5_feature_name {
189 	MLX5_HAIRPIN_RX,
190 	MLX5_HAIRPIN_TX,
191 	MLX5_METADATA_RX,
192 	MLX5_METADATA_TX,
193 	MLX5_METADATA_FDB,
194 	MLX5_FLOW_MARK,
195 	MLX5_APP_TAG,
196 	MLX5_COPY_MARK,
197 	MLX5_MTR_COLOR,
198 	MLX5_MTR_ID,
199 	MLX5_ASO_FLOW_HIT,
200 	MLX5_ASO_CONNTRACK,
201 	MLX5_SAMPLE_ID,
202 };
203 
204 /* Default queue number. */
205 #define MLX5_RSSQ_DEFAULT_NUM 16
206 
207 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
208 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
209 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
210 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
211 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
212 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
213 
214 /* Pattern inner Layer bits. */
215 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
216 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
217 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
218 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
219 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
220 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
221 
222 /* Pattern tunnel Layer bits. */
223 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
224 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
225 #define MLX5_FLOW_LAYER_GRE (1u << 14)
226 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
227 /* List of tunnel Layer bits continued below. */
228 
229 /* General pattern items bits. */
230 #define MLX5_FLOW_ITEM_METADATA (1u << 16)
231 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
232 #define MLX5_FLOW_ITEM_TAG (1u << 18)
233 #define MLX5_FLOW_ITEM_MARK (1u << 19)
234 
235 /* Pattern MISC bits. */
236 #define MLX5_FLOW_LAYER_ICMP (1u << 20)
237 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21)
238 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22)
239 
240 /* Pattern tunnel Layer bits (continued). */
241 #define MLX5_FLOW_LAYER_IPIP (1u << 23)
242 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24)
243 #define MLX5_FLOW_LAYER_NVGRE (1u << 25)
244 #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
245 
246 /* Queue items. */
247 #define MLX5_FLOW_ITEM_SQ (1u << 27)
248 
249 /* Pattern tunnel Layer bits (continued). */
250 #define MLX5_FLOW_LAYER_GTP (1u << 28)
251 
252 /* Pattern eCPRI Layer bit. */
253 #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29)
254 
255 /* IPv6 Fragment Extension Header bit. */
256 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30)
257 #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31)
258 
259 /* Pattern tunnel Layer bits (continued). */
260 #define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
261 #define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
262 
263 /* INTEGRITY item bits */
264 #define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
265 #define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
266 #define MLX5_FLOW_ITEM_INTEGRITY \
267 	(MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
268 
269 /* Conntrack item. */
270 #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
271 
272 /* Flex item */
273 #define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
274 #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
275 #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
276 
277 #define MLX5_FLOW_ITEM_FLEX \
278 	(MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX | \
279 	MLX5_FLOW_ITEM_FLEX_TUNNEL)
280 
281 /* ESP item */
282 #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
283 
284 /* Port Representor/Represented Port item */
285 #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41)
286 #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42)
287 
288 /* Meter color item */
289 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
290 #define MLX5_FLOW_ITEM_QUOTA (UINT64_C(1) << 45)
291 
292 
293 /* IPv6 routing extension item */
294 #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
295 #define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46)
296 
297 /* Aggregated affinity item */
298 #define MLX5_FLOW_ITEM_AGGR_AFFINITY (UINT64_C(1) << 49)
299 
300 /* IB BTH ITEM. */
301 #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
302 
303 /* PTYPE ITEM */
304 #define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
305 
306 /* NSH ITEM */
307 #define MLX5_FLOW_ITEM_NSH (1ull << 53)
308 
309 /* COMPARE ITEM */
310 #define MLX5_FLOW_ITEM_COMPARE (1ull << 54)
311 
312 /* Random ITEM */
313 #define MLX5_FLOW_ITEM_RANDOM (1ull << 55)
314 
315 /* Outer Masks. */
316 #define MLX5_FLOW_LAYER_OUTER_L3 \
317 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
318 #define MLX5_FLOW_LAYER_OUTER_L4 \
319 	(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
320 #define MLX5_FLOW_LAYER_OUTER \
321 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
322 	 MLX5_FLOW_LAYER_OUTER_L4)
323 
324 /* Tunnel Masks. */
325 #define MLX5_FLOW_LAYER_TUNNEL \
326 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
327 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
328 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
329 	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
330 	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
331 
332 /* Inner Masks. */
333 #define MLX5_FLOW_LAYER_INNER_L3 \
334 	(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
335 #define MLX5_FLOW_LAYER_INNER_L4 \
336 	(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
337 #define MLX5_FLOW_LAYER_INNER \
338 	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
339 	 MLX5_FLOW_LAYER_INNER_L4)
340 
341 /* Layer Masks. */
342 #define MLX5_FLOW_LAYER_L2 \
343 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
344 #define MLX5_FLOW_LAYER_L3_IPV4 \
345 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4)
346 #define MLX5_FLOW_LAYER_L3_IPV6 \
347 	(MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
348 #define MLX5_FLOW_LAYER_L3 \
349 	(MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
350 #define MLX5_FLOW_LAYER_L4 \
351 	(MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4)
352 
353 /* Actions */
354 #define MLX5_FLOW_ACTION_DROP (1ull << 0)
355 #define MLX5_FLOW_ACTION_QUEUE (1ull << 1)
356 #define MLX5_FLOW_ACTION_RSS (1ull << 2)
357 #define MLX5_FLOW_ACTION_FLAG (1ull << 3)
358 #define MLX5_FLOW_ACTION_MARK (1ull << 4)
359 #define MLX5_FLOW_ACTION_COUNT (1ull << 5)
360 #define MLX5_FLOW_ACTION_PORT_ID (1ull << 6)
361 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7)
362 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8)
363 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9)
364 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10)
365 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11)
366 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12)
367 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13)
368 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14)
369 #define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15)
370 #define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16)
371 #define MLX5_FLOW_ACTION_JUMP (1ull << 17)
372 #define MLX5_FLOW_ACTION_SET_TTL (1ull << 18)
373 #define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19)
374 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20)
375 #define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21)
376 #define MLX5_FLOW_ACTION_ENCAP (1ull << 22)
377 #define MLX5_FLOW_ACTION_DECAP (1ull << 23)
378 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24)
379 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25)
380 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26)
381 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27)
382 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28)
383 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29)
384 #define MLX5_FLOW_ACTION_SET_META (1ull << 30)
385 #define MLX5_FLOW_ACTION_METER (1ull << 31)
386 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
387 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
388 #define MLX5_FLOW_ACTION_AGE (1ull << 34)
389 #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
390 #define MLX5_FLOW_ACTION_SAMPLE (1ull << 36)
391 #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37)
392 #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
393 #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
394 #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
395 #define MLX5_FLOW_ACTION_CT (1ull << 41)
396 #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
397 #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
398 #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
399 #define MLX5_FLOW_ACTION_QUOTA (1ull << 46)
400 #define MLX5_FLOW_ACTION_PORT_REPRESENTOR (1ull << 47)
401 #define MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE (1ull << 48)
402 #define MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH (1ull << 49)
403 #define MLX5_FLOW_ACTION_NAT64 (1ull << 50)
404 #define MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX (1ull << 51)
405 
406 #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \
407 	(MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE)
408 
409 #define MLX5_FLOW_FATE_ACTIONS \
410 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
411 	 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
412 	 MLX5_FLOW_ACTION_DEFAULT_MISS | \
413 	 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
414 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
415 	 MLX5_FLOW_ACTION_PORT_REPRESENTOR | \
416 	 MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX)
417 
418 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
419 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
420 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
421 	 MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
422 	 MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX)
423 
424 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
425 				      MLX5_FLOW_ACTION_SET_IPV4_DST | \
426 				      MLX5_FLOW_ACTION_SET_IPV6_SRC | \
427 				      MLX5_FLOW_ACTION_SET_IPV6_DST | \
428 				      MLX5_FLOW_ACTION_SET_TP_SRC | \
429 				      MLX5_FLOW_ACTION_SET_TP_DST | \
430 				      MLX5_FLOW_ACTION_SET_TTL | \
431 				      MLX5_FLOW_ACTION_DEC_TTL | \
432 				      MLX5_FLOW_ACTION_SET_MAC_SRC | \
433 				      MLX5_FLOW_ACTION_SET_MAC_DST | \
434 				      MLX5_FLOW_ACTION_INC_TCP_SEQ | \
435 				      MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
436 				      MLX5_FLOW_ACTION_INC_TCP_ACK | \
437 				      MLX5_FLOW_ACTION_DEC_TCP_ACK | \
438 				      MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
439 				      MLX5_FLOW_ACTION_SET_TAG | \
440 				      MLX5_FLOW_ACTION_MARK_EXT | \
441 				      MLX5_FLOW_ACTION_SET_META | \
442 				      MLX5_FLOW_ACTION_SET_IPV4_DSCP | \
443 				      MLX5_FLOW_ACTION_SET_IPV6_DSCP | \
444 				      MLX5_FLOW_ACTION_MODIFY_FIELD)
445 
446 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
447 				MLX5_FLOW_ACTION_OF_PUSH_VLAN)
448 
449 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP)
450 
451 #ifndef IPPROTO_MPLS
452 #define IPPROTO_MPLS 137
453 #endif
454 
455 #define MLX5_IPV6_HDR_ECN_MASK 0x3
456 #define MLX5_IPV6_HDR_DSCP_SHIFT 2
457 
458 /* UDP port number for MPLS */
459 #define MLX5_UDP_PORT_MPLS 6635
460 
461 /* UDP port numbers for VxLAN. */
462 #define MLX5_UDP_PORT_VXLAN 4789
463 #define MLX5_UDP_PORT_VXLAN_GPE 4790
464 
465 /* UDP port numbers for RoCEv2. */
466 #define MLX5_UDP_PORT_ROCEv2 4791
467 
468 /* UDP port numbers for GENEVE. */
469 #define MLX5_UDP_PORT_GENEVE 6081
470 
471 /* Lowest priority indicator. */
472 #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1)
473 
474 /*
475  * Max priority for ingress\egress flow groups
476  * greater than 0 and for any transfer flow group.
477  * From user configation: 0 - 21843.
478  */
479 #define MLX5_NON_ROOT_FLOW_MAX_PRIO	(21843 + 1)
480 
481 /*
482  * Number of sub priorities.
483  * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
484  * matching on the NIC (firmware dependent) L4 most have the higher priority
485  * followed by L3 and ending with L2.
486  */
487 #define MLX5_PRIORITY_MAP_L2 2
488 #define MLX5_PRIORITY_MAP_L3 1
489 #define MLX5_PRIORITY_MAP_L4 0
490 #define MLX5_PRIORITY_MAP_MAX 3
491 
492 /* Valid layer type for IPV4 RSS. */
493 #define MLX5_IPV4_LAYER_TYPES \
494 	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
495 	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
496 	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
497 
498 /* Valid L4 RSS types */
499 #define MLX5_L4_RSS_TYPES (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
500 
501 /* IBV hash source bits  for IPV4. */
502 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
503 
504 /* Valid layer type for IPV6 RSS. */
505 #define MLX5_IPV6_LAYER_TYPES \
506 	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
507 	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
508 	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
509 
510 /* IBV hash source bits  for IPV6. */
511 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
512 
513 /* IBV hash bits for L3 SRC. */
514 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6)
515 
516 /* IBV hash bits for L3 DST. */
517 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6)
518 
519 /* IBV hash bits for TCP. */
520 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
521 			      IBV_RX_HASH_DST_PORT_TCP)
522 
523 /* IBV hash bits for UDP. */
524 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \
525 			      IBV_RX_HASH_DST_PORT_UDP)
526 
527 /* IBV hash bits for L4 SRC. */
528 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
529 				 IBV_RX_HASH_SRC_PORT_UDP)
530 
531 /* IBV hash bits for L4 DST. */
532 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \
533 				 IBV_RX_HASH_DST_PORT_UDP)
534 
535 /* Geneve header first 16Bit */
536 #define MLX5_GENEVE_VER_MASK 0x3
537 #define MLX5_GENEVE_VER_SHIFT 14
538 #define MLX5_GENEVE_VER_VAL(a) \
539 		(((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
540 #define MLX5_GENEVE_OPTLEN_MASK 0x3F
541 #define MLX5_GENEVE_OPTLEN_SHIFT 8
542 #define MLX5_GENEVE_OPTLEN_VAL(a) \
543 	    (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
544 #define MLX5_GENEVE_OAMF_MASK 0x1
545 #define MLX5_GENEVE_OAMF_SHIFT 7
546 #define MLX5_GENEVE_OAMF_VAL(a) \
547 		(((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
548 #define MLX5_GENEVE_CRITO_MASK 0x1
549 #define MLX5_GENEVE_CRITO_SHIFT 6
550 #define MLX5_GENEVE_CRITO_VAL(a) \
551 		(((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
552 #define MLX5_GENEVE_RSVD_MASK 0x3F
553 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
554 /*
555  * The length of the Geneve options fields, expressed in four byte multiples,
556  * not including the eight byte fixed tunnel.
557  */
558 #define MLX5_GENEVE_OPT_LEN_0 14
559 #define MLX5_GENEVE_OPT_LEN_1 63
560 
561 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \
562 					  sizeof(struct rte_ipv4_hdr))
563 /* GTP extension header flag. */
564 #define MLX5_GTP_EXT_HEADER_FLAG 4
565 
566 /* GTP extension header PDU type shift. */
567 #define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4)
568 
569 /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
570 #define MLX5_IPV4_FRAG_OFFSET_MASK \
571 		(RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)
572 
573 /* Specific item's fields can accept a range of values (using spec and last). */
574 #define MLX5_ITEM_RANGE_NOT_ACCEPTED	false
575 #define MLX5_ITEM_RANGE_ACCEPTED	true
576 
577 /* Software header modify action numbers of a flow. */
578 #define MLX5_ACT_NUM_MDF_IPV4		1
579 #define MLX5_ACT_NUM_MDF_IPV6		4
580 #define MLX5_ACT_NUM_MDF_MAC		2
581 #define MLX5_ACT_NUM_MDF_VID		1
582 #define MLX5_ACT_NUM_MDF_PORT		1
583 #define MLX5_ACT_NUM_MDF_TTL		1
584 #define MLX5_ACT_NUM_DEC_TTL		MLX5_ACT_NUM_MDF_TTL
585 #define MLX5_ACT_NUM_MDF_TCPSEQ		1
586 #define MLX5_ACT_NUM_MDF_TCPACK		1
587 #define MLX5_ACT_NUM_SET_REG		1
588 #define MLX5_ACT_NUM_SET_TAG		1
589 #define MLX5_ACT_NUM_CPY_MREG		MLX5_ACT_NUM_SET_TAG
590 #define MLX5_ACT_NUM_SET_MARK		MLX5_ACT_NUM_SET_TAG
591 #define MLX5_ACT_NUM_SET_META		MLX5_ACT_NUM_SET_TAG
592 #define MLX5_ACT_NUM_SET_DSCP		1
593 
594 /* Maximum number of fields to modify in MODIFY_FIELD */
595 #define MLX5_ACT_MAX_MOD_FIELDS 5
596 
597 /* Syndrome bits definition for connection tracking. */
598 #define MLX5_CT_SYNDROME_VALID		(0x0 << 6)
599 #define MLX5_CT_SYNDROME_INVALID	(0x1 << 6)
600 #define MLX5_CT_SYNDROME_TRAP		(0x2 << 6)
601 #define MLX5_CT_SYNDROME_STATE_CHANGE	(0x1 << 1)
602 #define MLX5_CT_SYNDROME_BAD_PACKET	(0x1 << 0)
603 
604 enum mlx5_flow_drv_type {
605 	MLX5_FLOW_TYPE_MIN,
606 	MLX5_FLOW_TYPE_DV,
607 	MLX5_FLOW_TYPE_VERBS,
608 	MLX5_FLOW_TYPE_HW,
609 	MLX5_FLOW_TYPE_MAX,
610 };
611 
612 /* Fate action type. */
613 enum mlx5_flow_fate_type {
614 	MLX5_FLOW_FATE_NONE, /* Egress flow. */
615 	MLX5_FLOW_FATE_QUEUE,
616 	MLX5_FLOW_FATE_JUMP,
617 	MLX5_FLOW_FATE_PORT_ID,
618 	MLX5_FLOW_FATE_DROP,
619 	MLX5_FLOW_FATE_DEFAULT_MISS,
620 	MLX5_FLOW_FATE_SHARED_RSS,
621 	MLX5_FLOW_FATE_MTR,
622 	MLX5_FLOW_FATE_SEND_TO_KERNEL,
623 	MLX5_FLOW_FATE_MAX,
624 };
625 
626 /* Matcher PRM representation */
627 struct mlx5_flow_dv_match_params {
628 	size_t size;
629 	/**< Size of match value. Do NOT split size and key! */
630 	uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
631 	/**< Matcher value. This value is used as the mask or as a key. */
632 };
633 
634 /* Matcher structure. */
635 struct mlx5_flow_dv_matcher {
636 	struct mlx5_list_entry entry; /**< Pointer to the next element. */
637 	union {
638 		struct mlx5_flow_tbl_resource *tbl;
639 		/**< Pointer to the table(group) the matcher associated with for DV flow. */
640 		struct mlx5_flow_group *group;
641 		/* Group of this matcher for HWS non template flow. */
642 	};
643 	void *matcher_object; /**< Pointer to DV matcher */
644 	uint16_t crc; /**< CRC of key. */
645 	uint16_t priority; /**< Priority of matcher. */
646 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
647 };
648 
649 /* Encap/decap resource structure. */
650 struct mlx5_flow_dv_encap_decap_resource {
651 	struct mlx5_list_entry entry;
652 	/* Pointer to next element. */
653 	uint32_t refcnt; /**< Reference counter. */
654 	void *action;
655 	/**< Encap/decap action object. */
656 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
657 	size_t size;
658 	uint8_t reformat_type;
659 	uint8_t ft_type;
660 	uint64_t flags; /**< Flags for RDMA API. */
661 	uint32_t idx; /**< Index for the index memory pool. */
662 };
663 
664 /* Tag resource structure. */
665 struct mlx5_flow_dv_tag_resource {
666 	struct mlx5_list_entry entry;
667 	/**< hash list entry for tag resource, tag value as the key. */
668 	void *action;
669 	/**< Tag action object. */
670 	uint32_t refcnt; /**< Reference counter. */
671 	uint32_t idx; /**< Index for the index memory pool. */
672 	uint32_t tag_id; /**< Tag ID. */
673 };
674 
675 /* Modify resource structure */
676 struct __rte_packed_begin mlx5_flow_dv_modify_hdr_resource {
677 	struct mlx5_list_entry entry;
678 	void *action; /**< Modify header action object. */
679 	uint32_t idx;
680 	uint64_t flags; /**< Flags for RDMA API(HWS only). */
681 	/* Key area for hash list matching: */
682 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
683 	uint8_t actions_num; /**< Number of modification actions. */
684 	bool root; /**< Whether action is in root table. */
685 	struct mlx5_modification_cmd actions[];
686 	/**< Modification actions. */
687 } __rte_packed_end;
688 
689 /* Modify resource key of the hash organization. */
690 union mlx5_flow_modify_hdr_key {
691 	struct {
692 		uint32_t ft_type:8;	/**< Flow table type, Rx or Tx. */
693 		uint32_t actions_num:5;	/**< Number of modification actions. */
694 		uint32_t group:19;	/**< Flow group id. */
695 		uint32_t cksum;		/**< Actions check sum. */
696 	};
697 	uint64_t v64;			/**< full 64bits value of key */
698 };
699 
700 /* Jump action resource structure. */
701 struct mlx5_flow_dv_jump_tbl_resource {
702 	void *action; /**< Pointer to the rdma core action. */
703 };
704 
705 /* Port ID resource structure. */
706 struct mlx5_flow_dv_port_id_action_resource {
707 	struct mlx5_list_entry entry;
708 	void *action; /**< Action object. */
709 	uint32_t port_id; /**< Port ID value. */
710 	uint32_t idx; /**< Indexed pool memory index. */
711 };
712 
713 /* Push VLAN action resource structure */
714 struct mlx5_flow_dv_push_vlan_action_resource {
715 	struct mlx5_list_entry entry; /* Cache entry. */
716 	void *action; /**< Action object. */
717 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
718 	rte_be32_t vlan_tag; /**< VLAN tag value. */
719 	uint32_t idx; /**< Indexed pool memory index. */
720 };
721 
722 /* Metadata register copy table entry. */
723 struct mlx5_flow_mreg_copy_resource {
724 	/*
725 	 * Hash list entry for copy table.
726 	 *  - Key is 32/64-bit MARK action ID.
727 	 *  - MUST be the first entry.
728 	 */
729 	struct mlx5_list_entry hlist_ent;
730 	LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
731 	/* List entry for device flows. */
732 	uint32_t idx;
733 	uint32_t mark_id;
734 	union {
735 		uint32_t rix_flow; /* Built flow for copy. */
736 		uintptr_t hw_flow;
737 	};
738 };
739 
740 /* Table tunnel parameter. */
741 struct mlx5_flow_tbl_tunnel_prm {
742 	const struct mlx5_flow_tunnel *tunnel;
743 	uint32_t group_id;
744 	bool external;
745 };
746 
747 /* Table data structure of the hash organization. */
748 struct mlx5_flow_tbl_data_entry {
749 	struct mlx5_list_entry entry;
750 	/**< hash list entry, 64-bits key inside. */
751 	struct mlx5_flow_tbl_resource tbl;
752 	/**< flow table resource. */
753 	struct mlx5_list *matchers;
754 	/**< matchers' header associated with the flow table. */
755 	struct mlx5_flow_dv_jump_tbl_resource jump;
756 	/**< jump resource, at most one for each table created. */
757 	uint32_t idx; /**< index for the indexed mempool. */
758 	/**< tunnel offload */
759 	const struct mlx5_flow_tunnel *tunnel;
760 	uint32_t group_id;
761 	uint32_t external:1;
762 	uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
763 	uint32_t is_egress:1; /**< Egress table. */
764 	uint32_t is_transfer:1; /**< Transfer table. */
765 	uint32_t dummy:1; /**<  DR table. */
766 	uint32_t id:22; /**< Table ID. */
767 	uint32_t reserve:5; /**< Reserved to future using. */
768 	uint32_t level; /**< Table level. */
769 };
770 
771 /* Sub rdma-core actions list. */
772 struct mlx5_flow_sub_actions_list {
773 	uint32_t actions_num; /**< Number of sample actions. */
774 	uint64_t action_flags;
775 	void *dr_queue_action;
776 	void *dr_tag_action;
777 	void *dr_cnt_action;
778 	void *dr_port_id_action;
779 	void *dr_encap_action;
780 	void *dr_jump_action;
781 };
782 
783 /* Sample sub-actions resource list. */
784 struct mlx5_flow_sub_actions_idx {
785 	uint32_t rix_hrxq; /**< Hash Rx queue object index. */
786 	uint32_t rix_tag; /**< Index to the tag action. */
787 	uint32_t rix_port_id_action; /**< Index to port ID action resource. */
788 	uint32_t rix_encap_decap; /**< Index to encap/decap resource. */
789 	uint32_t rix_jump; /**< Index to the jump action resource. */
790 };
791 
792 /* Sample action resource structure. */
793 struct mlx5_flow_dv_sample_resource {
794 	struct mlx5_list_entry entry; /**< Cache entry. */
795 	union {
796 		void *verbs_action; /**< Verbs sample action object. */
797 		void **sub_actions; /**< Sample sub-action array. */
798 	};
799 	struct rte_eth_dev *dev; /**< Device registers the action. */
800 	uint32_t idx; /** Sample object index. */
801 	uint8_t ft_type; /** Flow Table Type */
802 	uint32_t ft_id; /** Flow Table Level */
803 	uint32_t ratio;   /** Sample Ratio */
804 	uint64_t set_action; /** Restore reg_c0 value */
805 	void *normal_path_tbl; /** Flow Table pointer */
806 	struct mlx5_flow_sub_actions_idx sample_idx;
807 	/**< Action index resources. */
808 	struct mlx5_flow_sub_actions_list sample_act;
809 	/**< Action resources. */
810 };
811 
812 #define MLX5_MAX_DEST_NUM	2
813 
814 /* Destination array action resource structure. */
815 struct mlx5_flow_dv_dest_array_resource {
816 	struct mlx5_list_entry entry; /**< Cache entry. */
817 	uint32_t idx; /** Destination array action object index. */
818 	uint8_t ft_type; /** Flow Table Type */
819 	uint8_t num_of_dest; /**< Number of destination actions. */
820 	struct rte_eth_dev *dev; /**< Device registers the action. */
821 	void *action; /**< Pointer to the rdma core action. */
822 	struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
823 	/**< Action index resources. */
824 	struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM];
825 	/**< Action resources. */
826 };
827 
828 /* PMD flow priority for tunnel */
829 #define MLX5_TUNNEL_PRIO_GET(rss_desc) \
830 	((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
831 
832 
833 /** Device flow handle structure for DV mode only. */
834 struct __rte_packed_begin mlx5_flow_handle_dv {
835 	/* Flow DV api: */
836 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
837 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
838 	/**< Pointer to modify header resource in cache. */
839 	uint32_t rix_encap_decap;
840 	/**< Index to encap/decap resource in cache. */
841 	uint32_t rix_push_vlan;
842 	/**< Index to push VLAN action resource in cache. */
843 	uint32_t rix_tag;
844 	/**< Index to the tag action. */
845 	uint32_t rix_sample;
846 	/**< Index to sample action resource in cache. */
847 	uint32_t rix_dest_array;
848 	/**< Index to destination array resource in cache. */
849 } __rte_packed_end;
850 
851 /** Device flow handle structure: used both for creating & destroying. */
852 struct __rte_packed_begin mlx5_flow_handle {
853 	SILIST_ENTRY(uint32_t)next;
854 	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
855 	/**< Index to next device flow handle. */
856 	uint64_t layers;
857 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
858 	void *drv_flow; /**< pointer to driver flow object. */
859 	uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
860 	uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
861 	uint32_t fate_action:4; /**< Fate action type. */
862 	union {
863 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
864 		uint32_t rix_jump; /**< Index to the jump action resource. */
865 		uint32_t rix_port_id_action;
866 		/**< Index to port ID action resource. */
867 		uint32_t rix_fate;
868 		/**< Generic value indicates the fate action. */
869 		uint32_t rix_default_fate;
870 		/**< Indicates default miss fate action. */
871 		uint32_t rix_srss;
872 		/**< Indicates shared RSS fate action. */
873 	};
874 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
875 	struct mlx5_flow_handle_dv dvh;
876 #endif
877 	uint8_t flex_item; /**< referenced Flex Item bitmask. */
878 } __rte_packed_end;
879 
880 /*
881  * Size for Verbs device flow handle structure only. Do not use the DV only
882  * structure in Verbs. No DV flows attributes will be accessed.
883  * Macro offsetof() could also be used here.
884  */
885 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
886 #define MLX5_FLOW_HANDLE_VERBS_SIZE \
887 	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
888 #else
889 #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
890 #endif
891 
892 /** Device flow structure only for DV flow creation. */
893 struct mlx5_flow_dv_workspace {
894 	uint32_t group; /**< The group index. */
895 	uint32_t table_id; /**< Flow table identifier. */
896 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
897 	int actions_n; /**< number of actions. */
898 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
899 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
900 	/**< Pointer to encap/decap resource in cache. */
901 	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
902 	/**< Pointer to push VLAN action resource in cache. */
903 	struct mlx5_flow_dv_tag_resource *tag_resource;
904 	/**< pointer to the tag action. */
905 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
906 	/**< Pointer to port ID action resource. */
907 	struct mlx5_flow_dv_jump_tbl_resource *jump;
908 	/**< Pointer to the jump action resource. */
909 	struct mlx5_flow_dv_match_params value;
910 	/**< Holds the value that the packet is compared to. */
911 	struct mlx5_flow_dv_sample_resource *sample_res;
912 	/**< Pointer to the sample action resource. */
913 	struct mlx5_flow_dv_dest_array_resource *dest_array_res;
914 	/**< Pointer to the destination array resource. */
915 };
916 
917 #ifdef HAVE_INFINIBAND_VERBS_H
918 /*
919  * Maximal Verbs flow specifications & actions size.
920  * Some elements are mutually exclusive, but enough space should be allocated.
921  * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
922  *               2. One tunnel header (exception: GRE + MPLS),
923  *                  SPEC length: GRE == tunnel.
924  * Actions: 1. 1 Mark OR Flag.
925  *          2. 1 Drop (if any).
926  *          3. No limitation for counters, but it makes no sense to support too
927  *             many counters in a single device flow.
928  */
929 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
930 #define MLX5_VERBS_MAX_SPEC_SIZE \
931 		( \
932 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
933 			      sizeof(struct ibv_flow_spec_ipv6) + \
934 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
935 			sizeof(struct ibv_flow_spec_gre) + \
936 			sizeof(struct ibv_flow_spec_mpls)) \
937 		)
938 #else
939 #define MLX5_VERBS_MAX_SPEC_SIZE \
940 		( \
941 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
942 			      sizeof(struct ibv_flow_spec_ipv6) + \
943 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
944 			sizeof(struct ibv_flow_spec_tunnel)) \
945 		)
946 #endif
947 
948 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
949 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
950 #define MLX5_VERBS_MAX_ACT_SIZE \
951 		( \
952 			sizeof(struct ibv_flow_spec_action_tag) + \
953 			sizeof(struct ibv_flow_spec_action_drop) + \
954 			sizeof(struct ibv_flow_spec_counter_action) * 4 \
955 		)
956 #else
957 #define MLX5_VERBS_MAX_ACT_SIZE \
958 		( \
959 			sizeof(struct ibv_flow_spec_action_tag) + \
960 			sizeof(struct ibv_flow_spec_action_drop) \
961 		)
962 #endif
963 
964 #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
965 		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
966 
967 /** Device flow structure only for Verbs flow creation. */
968 struct mlx5_flow_verbs_workspace {
969 	unsigned int size; /**< Size of the attribute. */
970 	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
971 	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
972 	/**< Specifications & actions buffer of verbs flow. */
973 };
974 #endif /* HAVE_INFINIBAND_VERBS_H */
975 
976 #define MLX5_SCALE_FLOW_GROUP_BIT 0
977 #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1
978 
979 /** Maximal number of device sub-flows supported. */
980 #define MLX5_NUM_MAX_DEV_FLOWS 64
981 
982 /**
983  * tunnel offload rules type
984  */
985 enum mlx5_tof_rule_type {
986 	MLX5_TUNNEL_OFFLOAD_NONE = 0,
987 	MLX5_TUNNEL_OFFLOAD_SET_RULE,
988 	MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
989 	MLX5_TUNNEL_OFFLOAD_MISS_RULE,
990 };
991 
992 /** Device flow structure. */
993 __extension__
994 struct mlx5_flow {
995 	struct rte_flow *flow; /**< Pointer to the main flow. */
996 	uint32_t flow_idx; /**< The memory pool index to the main flow. */
997 	uint64_t hash_fields; /**< Hash Rx queue hash fields. */
998 	uint64_t act_flags;
999 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
1000 	bool external; /**< true if the flow is created external to PMD. */
1001 	uint8_t ingress:1; /**< 1 if the flow is ingress. */
1002 	uint8_t skip_scale:2;
1003 	uint8_t symmetric_hash_function:1;
1004 	/**
1005 	 * Each Bit be set to 1 if Skip the scale the flow group with factor.
1006 	 * If bit0 be set to 1, then skip the scale the original flow group;
1007 	 * If bit1 be set to 1, then skip the scale the jump flow group if
1008 	 * having jump action.
1009 	 * 00: Enable scale in a flow, default value.
1010 	 * 01: Skip scale the flow group with factor, enable scale the group
1011 	 * of jump action.
1012 	 * 10: Enable scale the group with factor, skip scale the group of
1013 	 * jump action.
1014 	 * 11: Skip scale the table with factor both for flow group and jump
1015 	 * group.
1016 	 */
1017 	union {
1018 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1019 		struct mlx5_flow_dv_workspace dv;
1020 #endif
1021 #ifdef HAVE_INFINIBAND_VERBS_H
1022 		struct mlx5_flow_verbs_workspace verbs;
1023 #endif
1024 	};
1025 	struct mlx5_flow_handle *handle;
1026 	uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
1027 	const struct mlx5_flow_tunnel *tunnel;
1028 	enum mlx5_tof_rule_type tof_type;
1029 };
1030 
1031 /* Flow meter state. */
1032 #define MLX5_FLOW_METER_DISABLE 0
1033 #define MLX5_FLOW_METER_ENABLE 1
1034 
1035 #define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
1036 #define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
1037 
1038 #define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES
1039 
1040 #define MLX5_MAN_WIDTH 8
1041 /* Legacy Meter parameter structure. */
1042 struct mlx5_legacy_flow_meter {
1043 	struct mlx5_flow_meter_info fm;
1044 	/* Must be the first in struct. */
1045 	TAILQ_ENTRY(mlx5_legacy_flow_meter) next;
1046 	/**< Pointer to the next flow meter structure. */
1047 	uint32_t idx;
1048 	/* Index to meter object. */
1049 };
1050 
1051 #define MLX5_MAX_TUNNELS 256
1052 #define MLX5_TNL_MISS_RULE_PRIORITY 3
1053 #define MLX5_TNL_MISS_FDB_JUMP_GRP  0x1234faac
1054 
1055 /*
1056  * When tunnel offload is active, all JUMP group ids are converted
1057  * using the same method. That conversion is applied both to tunnel and
1058  * regular rule types.
1059  * Group ids used in tunnel rules are relative to it's tunnel (!).
1060  * Application can create number of steer rules, using the same
1061  * tunnel, with different group id in each rule.
1062  * Each tunnel stores its groups internally in PMD tunnel object.
1063  * Groups used in regular rules do not belong to any tunnel and are stored
1064  * in tunnel hub.
1065  */
1066 
1067 struct mlx5_flow_tunnel {
1068 	LIST_ENTRY(mlx5_flow_tunnel) chain;
1069 	struct rte_flow_tunnel app_tunnel;	/** app tunnel copy */
1070 	uint32_t tunnel_id;			/** unique tunnel ID */
1071 	RTE_ATOMIC(uint32_t) refctn;
1072 	struct rte_flow_action action;
1073 	struct rte_flow_item item;
1074 	struct mlx5_hlist *groups;		/** tunnel groups */
1075 };
1076 
1077 /** PMD tunnel related context */
1078 struct mlx5_flow_tunnel_hub {
1079 	/* Tunnels list
1080 	 * Access to the list MUST be MT protected
1081 	 */
1082 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
1083 	 /* protect access to the tunnels list */
1084 	rte_spinlock_t sl;
1085 	struct mlx5_hlist *groups;		/** non tunnel groups */
1086 };
1087 
1088 /* convert jump group to flow table ID in tunnel rules */
1089 struct tunnel_tbl_entry {
1090 	struct mlx5_list_entry hash;
1091 	uint32_t flow_table;
1092 	uint32_t tunnel_id;
1093 	uint32_t group;
1094 };
1095 
1096 static inline uint32_t
1097 tunnel_id_to_flow_tbl(uint32_t id)
1098 {
1099 	return id | (1u << 16);
1100 }
1101 
1102 static inline uint32_t
1103 tunnel_flow_tbl_to_id(uint32_t flow_tbl)
1104 {
1105 	return flow_tbl & ~(1u << 16);
1106 }
1107 
1108 union tunnel_tbl_key {
1109 	uint64_t val;
1110 	struct {
1111 		uint32_t tunnel_id;
1112 		uint32_t group;
1113 	};
1114 };
1115 
1116 static inline struct mlx5_flow_tunnel_hub *
1117 mlx5_tunnel_hub(struct rte_eth_dev *dev)
1118 {
1119 	struct mlx5_priv *priv = dev->data->dev_private;
1120 	return priv->sh->tunnel_hub;
1121 }
1122 
1123 static inline bool
1124 is_tunnel_offload_active(const struct rte_eth_dev *dev)
1125 {
1126 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1127 	const struct mlx5_priv *priv = dev->data->dev_private;
1128 	return !!priv->sh->config.dv_miss_info;
1129 #else
1130 	RTE_SET_USED(dev);
1131 	return false;
1132 #endif
1133 }
1134 
1135 static inline bool
1136 is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
1137 {
1138 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
1139 }
1140 
1141 static inline bool
1142 is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
1143 {
1144 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
1145 }
1146 
1147 static inline const struct mlx5_flow_tunnel *
1148 flow_actions_to_tunnel(const struct rte_flow_action actions[])
1149 {
1150 	return actions[0].conf;
1151 }
1152 
1153 static inline const struct mlx5_flow_tunnel *
1154 flow_items_to_tunnel(const struct rte_flow_item items[])
1155 {
1156 	return items[0].spec;
1157 }
1158 
1159 /**
1160  * Gets the tag array given for RTE_FLOW_FIELD_TAG type.
1161  *
1162  * In old API the value was provided in "level" field, but in new API
1163  * it is provided in "tag_array" field. Since encapsulation level is not
1164  * relevant for metadata, the tag array can be still provided in "level"
1165  * for backwards compatibility.
1166  *
1167  * @param[in] data
1168  *   Pointer to tag modify data structure.
1169  *
1170  * @return
1171  *   Tag array index.
1172  */
1173 static inline uint8_t
1174 flow_tag_index_get(const struct rte_flow_field_data *data)
1175 {
1176 	return data->tag_index ? data->tag_index : data->level;
1177 }
1178 
1179 /**
1180  * Fetch 1, 2, 3 or 4 byte field from the byte array
1181  * and return as unsigned integer in host-endian format.
1182  *
1183  * @param[in] data
1184  *   Pointer to data array.
1185  * @param[in] size
1186  *   Size of field to extract.
1187  *
1188  * @return
1189  *   converted field in host endian format.
1190  */
1191 static inline uint32_t
1192 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
1193 {
1194 	uint32_t ret;
1195 
1196 	switch (size) {
1197 	case 1:
1198 		ret = *data;
1199 		break;
1200 	case 2:
1201 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1202 		break;
1203 	case 3:
1204 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1205 		ret = (ret << 8) | *(data + sizeof(uint16_t));
1206 		break;
1207 	case 4:
1208 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
1209 		break;
1210 	default:
1211 		MLX5_ASSERT(false);
1212 		ret = 0;
1213 		break;
1214 	}
1215 	return ret;
1216 }
1217 
1218 static inline bool
1219 flow_modify_field_support_tag_array(enum rte_flow_field_id field)
1220 {
1221 	switch ((int)field) {
1222 	case RTE_FLOW_FIELD_TAG:
1223 	case RTE_FLOW_FIELD_MPLS:
1224 	case MLX5_RTE_FLOW_FIELD_META_REG:
1225 		return true;
1226 	default:
1227 		break;
1228 	}
1229 	return false;
1230 }
1231 
1232 struct field_modify_info {
1233 	uint32_t size; /* Size of field in protocol header, in bytes. */
1234 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
1235 	enum mlx5_modification_field id;
1236 	uint32_t shift;
1237 	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
1238 };
1239 
1240 /* HW steering flow attributes. */
1241 struct mlx5_flow_attr {
1242 	uint32_t port_id; /* Port index. */
1243 	uint32_t group; /* Flow group. */
1244 	uint32_t priority; /* Original Priority. */
1245 	/* rss level, used by priority adjustment. */
1246 	uint32_t rss_level;
1247 	/* Action flags, used by priority adjustment. */
1248 	uint32_t act_flags;
1249 	uint32_t tbl_type; /* Flow table type. */
1250 };
1251 
1252 /* Flow structure. */
1253 struct __rte_packed_begin rte_flow {
1254 	uint32_t dev_handles;
1255 	/**< Device flow handles that are part of the flow. */
1256 	uint32_t type:2;
1257 	uint32_t drv_type:2; /**< Driver type. */
1258 	uint32_t tunnel:1;
1259 	uint32_t meter:24; /**< Holds flow meter id. */
1260 	uint32_t indirect_type:2; /**< Indirect action type. */
1261 	uint32_t matcher_selector:1; /**< Matcher index in resizable table. */
1262 	uint32_t rix_mreg_copy;
1263 	/**< Index to metadata register copy table resource. */
1264 	uint32_t counter; /**< Holds flow counter. */
1265 	uint32_t tunnel_id;  /**< Tunnel id */
1266 	union {
1267 		uint32_t age; /**< Holds ASO age bit index. */
1268 		uint32_t ct; /**< Holds ASO CT index. */
1269 	};
1270 	uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
1271 } __rte_packed_end;
1272 
1273 /*
1274  * HWS COUNTER ID's layout
1275  *       3                   2                   1                   0
1276  *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1277  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1278  *    |  T  |     | D |                                               |
1279  *    ~  Y  |     | C |                    IDX                        ~
1280  *    |  P  |     | S |                                               |
1281  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1282  *
1283  *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
1284  *    Bit 25:24 = DCS index
1285  *    Bit 23:00 = IDX in this counter belonged DCS bulk.
1286  */
1287 typedef uint32_t cnt_id_t;
1288 
1289 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1290 
1291 enum {
1292 	MLX5_FLOW_HW_FLOW_OP_TYPE_NONE,
1293 	MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE,
1294 	MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY,
1295 	MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE,
1296 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE,
1297 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY,
1298 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE,
1299 };
1300 
1301 enum {
1302 	MLX5_FLOW_HW_FLOW_FLAG_CNT_ID = RTE_BIT32(0),
1303 	MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP = RTE_BIT32(1),
1304 	MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ = RTE_BIT32(2),
1305 	MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX = RTE_BIT32(3),
1306 	MLX5_FLOW_HW_FLOW_FLAG_MTR_ID = RTE_BIT32(4),
1307 	MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR = RTE_BIT32(5),
1308 	MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW = RTE_BIT32(6),
1309 };
1310 
1311 #define MLX5_FLOW_HW_FLOW_FLAGS_ALL ( \
1312 		MLX5_FLOW_HW_FLOW_FLAG_CNT_ID | \
1313 		MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP | \
1314 		MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ | \
1315 		MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX | \
1316 		MLX5_FLOW_HW_FLOW_FLAG_MTR_ID | \
1317 		MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR | \
1318 		MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW \
1319 	)
1320 
1321 #ifdef PEDANTIC
1322 #pragma GCC diagnostic ignored "-Wpedantic"
1323 #endif
1324 
1325 #define MLX5_DR_RULE_SIZE 72
1326 
1327 SLIST_HEAD(mlx5_nta_rss_flow_head, rte_flow_hw);
1328 
1329 /** HWS non template flow data. */
1330 struct rte_flow_nt2hws {
1331 	/** BWC rule pointer. */
1332 	struct mlx5dr_bwc_rule *nt_rule;
1333 	/** The matcher for non template api. */
1334 	struct mlx5_flow_dv_matcher *matcher;
1335 	/**< Auxiliary data stored per flow. */
1336 	struct rte_flow_hw_aux *flow_aux;
1337 	/** Modify header pointer. */
1338 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
1339 	/** Chain NTA flows. */
1340 	SLIST_ENTRY(rte_flow_hw) next;
1341 	/** Encap/decap index. */
1342 	uint32_t rix_encap_decap;
1343 	uint32_t rix_mreg_copy;
1344 	uint8_t chaned_flow;
1345 };
1346 
1347 /** HWS flow struct. */
1348 struct rte_flow_hw {
1349 	union {
1350 		/** The table flow allcated from. */
1351 		struct rte_flow_template_table *table;
1352 		/** Data needed for non template flows. */
1353 		struct rte_flow_nt2hws *nt2hws;
1354 	};
1355 	/** Application's private data passed to enqueued flow operation. */
1356 	void *user_data;
1357 	union {
1358 		/** Jump action. */
1359 		struct mlx5_hw_jump_action *jump;
1360 		/** TIR action. */
1361 		struct mlx5_hrxq *hrxq;
1362 	};
1363 	/** Flow index from indexed pool. */
1364 	uint32_t idx;
1365 	/** Resource index from indexed pool. */
1366 	uint32_t res_idx;
1367 	/** HWS flow rule index passed to mlx5dr. */
1368 	uint32_t rule_idx;
1369 	/** Which flow fields (inline or in auxiliary struct) are used. */
1370 	uint32_t flags;
1371 	/** COUNT action index. */
1372 	cnt_id_t cnt_id;
1373 	/** Ongoing flow operation type. */
1374 	uint8_t operation_type;
1375 	/** Index of pattern template this flow is based on. */
1376 	uint8_t mt_idx;
1377 	/** Equals true if it is non template rule. */
1378 	bool nt_rule;
1379 	/**
1380 	 * Padding for alignment to 56 bytes.
1381 	 * Since mlx5dr rule is 72 bytes, whole flow is contained within 128 B (2 cache lines).
1382 	 * This space is reserved for future additions to flow struct.
1383 	 */
1384 	uint8_t padding[9];
1385 	/** HWS layer data struct. */
1386 	uint8_t rule[];
1387 };
1388 
1389 /** Auxiliary data fields that are updatable. */
1390 struct rte_flow_hw_aux_fields {
1391 	/** AGE action index. */
1392 	uint32_t age_idx;
1393 	/** Direct meter (METER or METER_MARK) action index. */
1394 	uint32_t mtr_id;
1395 };
1396 
1397 /** Auxiliary data stored per flow which is not required to be stored in main flow structure. */
1398 struct rte_flow_hw_aux {
1399 	/** Auxiliary fields associated with the original flow. */
1400 	struct rte_flow_hw_aux_fields orig;
1401 	/** Auxiliary fields associated with the updated flow. */
1402 	struct rte_flow_hw_aux_fields upd;
1403 	/** Index of resizable matcher associated with this flow. */
1404 	uint8_t matcher_selector;
1405 	/** Placeholder flow struct used during flow rule update operation. */
1406 	struct rte_flow_hw upd_flow;
1407 };
1408 
1409 #ifdef PEDANTIC
1410 #pragma GCC diagnostic error "-Wpedantic"
1411 #endif
1412 
1413 struct mlx5_action_construct_data;
1414 typedef int
1415 (*indirect_list_callback_t)(struct rte_eth_dev *,
1416 			    const struct mlx5_action_construct_data *,
1417 			    const struct rte_flow_action *,
1418 			    struct mlx5dr_rule_action *);
1419 
1420 /* rte flow action translate to DR action struct. */
1421 struct mlx5_action_construct_data {
1422 	LIST_ENTRY(mlx5_action_construct_data) next;
1423 	/* Ensure the action types are matched. */
1424 	int type;
1425 	uint32_t idx;  /* Data index. */
1426 	uint16_t action_src; /* rte_flow_action src offset. */
1427 	uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
1428 	indirect_list_callback_t indirect_list_cb;
1429 	union {
1430 		struct {
1431 			/* Expected type of indirection action. */
1432 			enum rte_flow_action_type expected_type;
1433 		} indirect;
1434 		struct {
1435 			/* encap data len. */
1436 			uint16_t len;
1437 		} encap;
1438 		struct {
1439 			/* Modify header action offset in pattern. */
1440 			uint16_t mhdr_cmds_off;
1441 			/* Offset in pattern after modify header actions. */
1442 			uint16_t mhdr_cmds_end;
1443 			/*
1444 			 * True if this action is masked and does not need to
1445 			 * be generated.
1446 			 */
1447 			bool shared;
1448 			/*
1449 			 * Modified field definitions in dst field (SET, ADD)
1450 			 * or src field (COPY).
1451 			 */
1452 			struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS];
1453 			/* Modified field definitions in dst field (COPY). */
1454 			struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS];
1455 			/*
1456 			 * Masks applied to field values to generate
1457 			 * PRM actions.
1458 			 */
1459 			uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS];
1460 			/* Copy of action passed to the action template. */
1461 			struct rte_flow_action_modify_field action;
1462 		} modify_header;
1463 		struct {
1464 			bool symmetric_hash_function; /* Symmetric RSS hash */
1465 			uint64_t types; /* RSS hash types. */
1466 			uint32_t level; /* RSS level. */
1467 			uint32_t idx; /* Shared action index. */
1468 		} shared_rss;
1469 		struct {
1470 			cnt_id_t id;
1471 		} shared_counter;
1472 		struct {
1473 			/* IPv6 extension push data len. */
1474 			uint16_t len;
1475 		} ipv6_ext;
1476 		struct {
1477 			uint32_t id;
1478 			uint32_t conf_masked:1;
1479 		} shared_meter;
1480 	};
1481 };
1482 
1483 #define MAX_GENEVE_OPTIONS_RESOURCES 7
1484 
1485 /* GENEVE TLV options manager structure. */
1486 struct mlx5_geneve_tlv_options_mng {
1487 	uint8_t nb_options; /* Number of options inside the template. */
1488 	struct {
1489 		uint8_t opt_type;
1490 		uint16_t opt_class;
1491 	} options[MAX_GENEVE_OPTIONS_RESOURCES];
1492 };
1493 
1494 /* Flow item template struct. */
1495 struct rte_flow_pattern_template {
1496 	LIST_ENTRY(rte_flow_pattern_template) next;
1497 	/* Template attributes. */
1498 	struct rte_flow_pattern_template_attr attr;
1499 	struct mlx5dr_match_template *mt; /* mlx5 match template. */
1500 	uint64_t item_flags; /* Item layer flags. */
1501 	uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
1502 	RTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */
1503 	/*
1504 	 * If true, then rule pattern should be prepended with
1505 	 * represented_port pattern item.
1506 	 */
1507 	bool implicit_port;
1508 	/*
1509 	 * If true, then rule pattern should be prepended with
1510 	 * tag pattern item for representor matching.
1511 	 */
1512 	bool implicit_tag;
1513 	/* Manages all GENEVE TLV options used by this pattern template. */
1514 	struct mlx5_geneve_tlv_options_mng geneve_opt_mng;
1515 	uint8_t flex_item; /* flex item index. */
1516 	/* Items on which this pattern template is based on. */
1517 	struct rte_flow_item *items;
1518 };
1519 
1520 /* Flow action template struct. */
1521 struct rte_flow_actions_template {
1522 	LIST_ENTRY(rte_flow_actions_template) next;
1523 	/* Template attributes. */
1524 	struct rte_flow_actions_template_attr attr;
1525 	struct rte_flow_action *actions; /* Cached flow actions. */
1526 	struct rte_flow_action *orig_actions; /* Original flow actions. */
1527 	struct rte_flow_action *masks; /* Cached action masks.*/
1528 	struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */
1529 	uint64_t action_flags; /* Bit-map of all valid action in template. */
1530 	uint16_t dr_actions_num; /* Amount of DR rules actions. */
1531 	uint16_t actions_num; /* Amount of flow actions */
1532 	uint16_t *dr_off; /* DR action offset for given rte action offset. */
1533 	uint16_t *src_off; /* RTE action displacement from app. template */
1534 	uint16_t reformat_off; /* Offset of DR reformat action. */
1535 	uint16_t mhdr_off; /* Offset of DR modify header action. */
1536 	uint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */
1537 	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
1538 	uint8_t flex_item; /* flex item index. */
1539 };
1540 
1541 /* Jump action struct. */
1542 struct mlx5_hw_jump_action {
1543 	/* Action jump from root. */
1544 	struct mlx5dr_action *root_action;
1545 	/* HW steering jump action. */
1546 	struct mlx5dr_action *hws_action;
1547 };
1548 
1549 /* Encap decap action struct. */
1550 struct mlx5_hw_encap_decap_action {
1551 	struct mlx5_indirect_list indirect;
1552 	enum mlx5dr_action_type action_type;
1553 	struct mlx5dr_action *action; /* Action object. */
1554 	/* Is header_reformat action shared across flows in table. */
1555 	uint32_t shared:1;
1556 	uint32_t multi_pattern:1;
1557 	size_t data_size; /* Action metadata size. */
1558 	uint8_t data[]; /* Action data. */
1559 };
1560 
1561 /* Push remove action struct. */
1562 struct mlx5_hw_push_remove_action {
1563 	struct mlx5dr_action *action; /* Action object. */
1564 	/* Is push_remove action shared across flows in table. */
1565 	uint8_t shared;
1566 	size_t data_size; /* Action metadata size. */
1567 	uint8_t data[]; /* Action data. */
1568 };
1569 
1570 /* Modify field action struct. */
1571 struct mlx5_hw_modify_header_action {
1572 	/* Reference to DR action */
1573 	struct mlx5dr_action *action;
1574 	/* Modify header action position in action rule table. */
1575 	uint16_t pos;
1576 	/* Is MODIFY_HEADER action shared across flows in table. */
1577 	uint32_t shared:1;
1578 	uint32_t multi_pattern:1;
1579 	/* Amount of modification commands stored in the precompiled buffer. */
1580 	uint32_t mhdr_cmds_num;
1581 	/* Precompiled modification commands. */
1582 	struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD];
1583 };
1584 
1585 /* The maximum actions support in the flow. */
1586 #define MLX5_HW_MAX_ACTS 16
1587 
1588 /* DR action set struct. */
1589 struct mlx5_hw_actions {
1590 	/* Dynamic action list. */
1591 	LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
1592 	struct mlx5_hw_jump_action *jump; /* Jump action. */
1593 	struct mlx5_hrxq *tir; /* TIR action. */
1594 	struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */
1595 	/* Encap/Decap action. */
1596 	struct mlx5_hw_encap_decap_action *encap_decap;
1597 	uint16_t encap_decap_pos; /* Encap/Decap action position. */
1598 	/* Push/remove action. */
1599 	struct mlx5_hw_push_remove_action *push_remove;
1600 	uint16_t push_remove_pos; /* Push/remove action position. */
1601 	uint32_t mark:1; /* Indicate the mark action. */
1602 	cnt_id_t cnt_id; /* Counter id. */
1603 	uint32_t mtr_id; /* Meter id. */
1604 	/* Translated DR action array from action template. */
1605 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
1606 };
1607 
1608 /* mlx5 action template struct. */
1609 struct mlx5_hw_action_template {
1610 	/* Action template pointer. */
1611 	struct rte_flow_actions_template *action_template;
1612 	struct mlx5_hw_actions acts; /* Template actions. */
1613 };
1614 
1615 /* mlx5 flow group struct. */
1616 struct mlx5_flow_group {
1617 	struct mlx5_list_entry entry;
1618 	LIST_ENTRY(mlx5_flow_group) next;
1619 	struct rte_eth_dev *dev; /* Reference to corresponding device. */
1620 	struct mlx5dr_table *tbl; /* HWS table object. */
1621 	struct mlx5_hw_jump_action jump; /* Jump action. */
1622 	struct mlx5_flow_group *miss_group; /* Group pointed to by miss action. */
1623 	enum mlx5dr_table_type type; /* Table type. */
1624 	uint32_t group_id; /* Group id. */
1625 	uint32_t idx; /* Group memory index. */
1626 	/* List of all matchers created for this group in non template api */
1627 	struct mlx5_list *matchers;
1628 };
1629 
1630 
1631 #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 32
1632 #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
1633 
1634 #define MLX5_MULTIPATTERN_ENCAP_NUM 5
1635 #define MLX5_MAX_TABLE_RESIZE_NUM 64
1636 
1637 struct mlx5_multi_pattern_segment {
1638 	/*
1639 	 * Modify Header Argument Objects number allocated for action in that
1640 	 * segment.
1641 	 * Capacity is always power of 2.
1642 	 */
1643 	uint32_t capacity;
1644 	uint32_t head_index;
1645 	struct mlx5dr_action *mhdr_action;
1646 	struct mlx5dr_action *reformat_action[MLX5_MULTIPATTERN_ENCAP_NUM];
1647 };
1648 
1649 struct mlx5_tbl_multi_pattern_ctx {
1650 	struct {
1651 		uint32_t elements_num;
1652 		struct mlx5dr_action_reformat_header reformat_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1653 		/**
1654 		 * insert_header structure is larger than reformat_header.
1655 		 * Enclosing these structures with union will case a gap between
1656 		 * reformat_hdr array elements.
1657 		 * mlx5dr_action_create_reformat() expects adjacent array elements.
1658 		 */
1659 		struct mlx5dr_action_insert_header insert_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1660 	} reformat[MLX5_MULTIPATTERN_ENCAP_NUM];
1661 
1662 	struct {
1663 		uint32_t elements_num;
1664 		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1665 	} mh;
1666 	struct mlx5_multi_pattern_segment segments[MLX5_MAX_TABLE_RESIZE_NUM];
1667 };
1668 
1669 static __rte_always_inline void
1670 mlx5_multi_pattern_activate(struct mlx5_tbl_multi_pattern_ctx *mpctx)
1671 {
1672 	mpctx->segments[0].head_index = 1;
1673 }
1674 
1675 static __rte_always_inline bool
1676 mlx5_is_multi_pattern_active(const struct mlx5_tbl_multi_pattern_ctx *mpctx)
1677 {
1678 	return mpctx->segments[0].head_index == 1;
1679 }
1680 
1681 struct mlx5_flow_template_table_cfg {
1682 	struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */
1683 	bool external; /* True if created by flow API, false if table is internal to PMD. */
1684 };
1685 
1686 struct mlx5_matcher_info {
1687 	struct mlx5dr_matcher *matcher; /* Template matcher. */
1688 	struct mlx5dr_action *jump; /* Jump to matcher action. */
1689 	RTE_ATOMIC(uint32_t) refcnt;
1690 };
1691 
1692 struct __rte_cache_aligned mlx5_dr_rule_action_container {
1693 	struct mlx5dr_rule_action acts[MLX5_HW_MAX_ACTS];
1694 };
1695 
1696 struct rte_flow_template_table {
1697 	LIST_ENTRY(rte_flow_template_table) next;
1698 	struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
1699 	struct mlx5_matcher_info matcher_info[2];
1700 	uint32_t matcher_selector;
1701 	rte_rwlock_t matcher_replace_rwlk; /* RW lock for resizable tables */
1702 	/* Item templates bind to the table. */
1703 	struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
1704 	/* Action templates bind to the table. */
1705 	struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1706 	struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
1707 	struct rte_flow_hw_aux *flow_aux; /**< Auxiliary data stored per flow. */
1708 	struct mlx5_indexed_pool *resource; /* The table's resource ipool. */
1709 	struct mlx5_flow_template_table_cfg cfg;
1710 	uint32_t type; /* Flow table type RX/TX/FDB. */
1711 	uint8_t nb_item_templates; /* Item template number. */
1712 	uint8_t nb_action_templates; /* Action template number. */
1713 	uint32_t refcnt; /* Table reference counter. */
1714 	struct mlx5_tbl_multi_pattern_ctx mpctx;
1715 	struct mlx5dr_matcher_attr matcher_attr;
1716 	/**
1717 	 * Variable length array of containers containing precalculated templates of DR actions
1718 	 * arrays. This array is allocated at template table creation time and contains
1719 	 * one container per each queue, per each actions template.
1720 	 * Essentially rule_acts is a 2-dimensional array indexed with (AT index, queue) pair.
1721 	 * Each container will provide a local "queue buffer" to work on for flow creation
1722 	 * operations when using a given actions template.
1723 	 */
1724 	struct mlx5_dr_rule_action_container rule_acts[];
1725 };
1726 
1727 static __rte_always_inline struct mlx5dr_matcher *
1728 mlx5_table_matcher(const struct rte_flow_template_table *table)
1729 {
1730 	return table->matcher_info[table->matcher_selector].matcher;
1731 }
1732 
1733 static __rte_always_inline struct mlx5_multi_pattern_segment *
1734 mlx5_multi_pattern_segment_find(struct rte_flow_template_table *table,
1735 				uint32_t flow_resource_ix)
1736 {
1737 	int i;
1738 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
1739 
1740 	if (likely(!rte_flow_template_table_resizable(0, &table->cfg.attr)))
1741 		return &mpctx->segments[0];
1742 	for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
1743 		uint32_t limit = mpctx->segments[i].head_index +
1744 				 mpctx->segments[i].capacity;
1745 
1746 		if (flow_resource_ix < limit)
1747 			return &mpctx->segments[i];
1748 	}
1749 	return NULL;
1750 }
1751 
1752 /*
1753  * Convert metadata or tag to the actual register.
1754  * META: Fixed C_1 for FDB mode, REG_A for NIC TX and REG_B for NIC RX.
1755  * TAG: C_x expect meter color reg and the reserved ones.
1756  */
1757 static __rte_always_inline int
1758 flow_hw_get_reg_id_by_domain(struct rte_eth_dev *dev,
1759 			     enum rte_flow_item_type type,
1760 			     enum mlx5dr_table_type domain_type, uint32_t id)
1761 {
1762 	struct mlx5_dev_ctx_shared *sh = MLX5_SH(dev);
1763 	struct mlx5_dev_registers *reg = &sh->registers;
1764 
1765 	switch (type) {
1766 	case RTE_FLOW_ITEM_TYPE_META:
1767 		if (sh->config.dv_esw_en &&
1768 		    sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
1769 			return REG_C_1;
1770 		}
1771 		/*
1772 		 * On root table - PMD allows only egress META matching, thus
1773 		 * REG_A matching is sufficient.
1774 		 *
1775 		 * On non-root tables - REG_A corresponds to general_purpose_lookup_field,
1776 		 * which translates to REG_A in NIC TX and to REG_B in NIC RX.
1777 		 * However, current FW does not implement REG_B case right now, so
1778 		 * REG_B case is return explicitly by this function for NIC RX.
1779 		 */
1780 		if (domain_type == MLX5DR_TABLE_TYPE_NIC_RX)
1781 			return REG_B;
1782 		return REG_A;
1783 	case RTE_FLOW_ITEM_TYPE_CONNTRACK:
1784 	case RTE_FLOW_ITEM_TYPE_METER_COLOR:
1785 		return reg->aso_reg;
1786 	case RTE_FLOW_ITEM_TYPE_TAG:
1787 		if (id == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1788 			return REG_C_3;
1789 		MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);
1790 		return reg->hw_avl_tags[id];
1791 	default:
1792 		return REG_NON;
1793 	}
1794 }
1795 
1796 static __rte_always_inline int
1797 flow_hw_get_reg_id_from_ctx(void *dr_ctx, enum rte_flow_item_type type,
1798 			    enum mlx5dr_table_type domain_type, uint32_t id)
1799 {
1800 	uint16_t port;
1801 
1802 	MLX5_ETH_FOREACH_DEV(port, NULL) {
1803 		struct mlx5_priv *priv;
1804 
1805 		priv = rte_eth_devices[port].data->dev_private;
1806 		if (priv->dr_ctx == dr_ctx)
1807 			return flow_hw_get_reg_id_by_domain(&rte_eth_devices[port],
1808 							    type, domain_type, id);
1809 	}
1810 	return REG_NON;
1811 }
1812 
1813 #endif
1814 
1815 /*
1816  * Define list of valid combinations of RX Hash fields
1817  * (see enum ibv_rx_hash_fields).
1818  */
1819 #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
1820 #define MLX5_RSS_HASH_IPV4_TCP \
1821 	(MLX5_RSS_HASH_IPV4 | \
1822 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1823 #define MLX5_RSS_HASH_IPV4_UDP \
1824 	(MLX5_RSS_HASH_IPV4 | \
1825 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1826 #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
1827 #define MLX5_RSS_HASH_IPV6_TCP \
1828 	(MLX5_RSS_HASH_IPV6 | \
1829 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1830 #define MLX5_RSS_HASH_IPV6_UDP \
1831 	(MLX5_RSS_HASH_IPV6 | \
1832 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1833 #define MLX5_RSS_HASH_IPV4_SRC_ONLY IBV_RX_HASH_SRC_IPV4
1834 #define MLX5_RSS_HASH_IPV4_DST_ONLY IBV_RX_HASH_DST_IPV4
1835 #define MLX5_RSS_HASH_IPV6_SRC_ONLY IBV_RX_HASH_SRC_IPV6
1836 #define MLX5_RSS_HASH_IPV6_DST_ONLY IBV_RX_HASH_DST_IPV6
1837 #define MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY \
1838 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_UDP)
1839 #define MLX5_RSS_HASH_IPV4_UDP_DST_ONLY \
1840 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_UDP)
1841 #define MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY \
1842 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_UDP)
1843 #define MLX5_RSS_HASH_IPV6_UDP_DST_ONLY \
1844 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_UDP)
1845 #define MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY \
1846 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_TCP)
1847 #define MLX5_RSS_HASH_IPV4_TCP_DST_ONLY \
1848 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_TCP)
1849 #define MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY \
1850 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
1851 #define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
1852 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
1853 
1854 #ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
1855 #define IBV_RX_HASH_IPSEC_SPI (1U << 8)
1856 #endif
1857 
1858 #define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
1859 #define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
1860 				MLX5_RSS_HASH_ESP_SPI)
1861 #define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
1862 				MLX5_RSS_HASH_ESP_SPI)
1863 #define MLX5_RSS_HASH_NONE 0ULL
1864 
1865 #define MLX5_RSS_IS_SYMM(func) \
1866 		(((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) || \
1867 		 ((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT))
1868 
1869 /* extract next protocol type from Ethernet & VLAN headers */
1870 #define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
1871 	(_prt) = ((const struct _s *)(_itm)->mask)->_m;       \
1872 	(_prt) &= ((const struct _s *)(_itm)->spec)->_m;      \
1873 	(_prt) = rte_be_to_cpu_16((_prt));                    \
1874 } while (0)
1875 
1876 /* array of valid combinations of RX Hash fields for RSS */
1877 static const uint64_t mlx5_rss_hash_fields[] = {
1878 	MLX5_RSS_HASH_IPV4,
1879 	MLX5_RSS_HASH_IPV4_TCP,
1880 	MLX5_RSS_HASH_IPV4_UDP,
1881 	MLX5_RSS_HASH_IPV4_ESP,
1882 	MLX5_RSS_HASH_IPV6,
1883 	MLX5_RSS_HASH_IPV6_TCP,
1884 	MLX5_RSS_HASH_IPV6_UDP,
1885 	MLX5_RSS_HASH_IPV6_ESP,
1886 	MLX5_RSS_HASH_ESP_SPI,
1887 	MLX5_RSS_HASH_NONE,
1888 };
1889 
1890 /* Shared RSS action structure */
1891 struct mlx5_shared_action_rss {
1892 	ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
1893 	RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
1894 	struct rte_flow_action_rss origin; /**< Original rte RSS action. */
1895 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
1896 	struct mlx5_ind_table_obj *ind_tbl;
1897 	/**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
1898 	uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
1899 	/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
1900 	rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
1901 };
1902 
1903 struct rte_flow_action_handle {
1904 	uint32_t id;
1905 };
1906 
1907 /* Thread specific flow workspace intermediate data. */
1908 struct mlx5_flow_workspace {
1909 	/* If creating another flow in same thread, push new as stack. */
1910 	struct mlx5_flow_workspace *prev;
1911 	struct mlx5_flow_workspace *next;
1912 	struct mlx5_flow_workspace *gc;
1913 	uint32_t inuse; /* can't create new flow with current. */
1914 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
1915 	struct mlx5_flow_rss_desc rss_desc;
1916 	uint32_t flow_idx; /* Intermediate device flow index. */
1917 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
1918 	struct mlx5_flow_meter_policy *policy;
1919 	/* The meter policy used by meter in flow. */
1920 	struct mlx5_flow_meter_policy *final_policy;
1921 	/* The final policy when meter policy is hierarchy. */
1922 #ifdef HAVE_MLX5_HWS_SUPPORT
1923 	struct rte_flow_template_table *table;
1924 #endif
1925 	uint32_t skip_matcher_reg:1;
1926 	/* Indicates if need to skip matcher register in translate. */
1927 	uint32_t mark:1; /* Indicates if flow contains mark action. */
1928 	uint32_t vport_meta_tag; /* Used for vport index match. */
1929 };
1930 
1931 /* Matcher translate type. */
1932 enum MLX5_SET_MATCHER {
1933 	MLX5_SET_MATCHER_SW_V = 1 << 0,
1934 	MLX5_SET_MATCHER_SW_M = 1 << 1,
1935 	MLX5_SET_MATCHER_HS_V = 1 << 2,
1936 	MLX5_SET_MATCHER_HS_M = 1 << 3,
1937 };
1938 
1939 #define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M)
1940 #define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M)
1941 #define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V)
1942 #define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M)
1943 
1944 /* Flow matcher workspace intermediate data. */
1945 struct mlx5_dv_matcher_workspace {
1946 	uint8_t priority; /* Flow priority. */
1947 	uint64_t last_item; /* Last item in pattern. */
1948 	uint64_t item_flags; /* Flow item pattern flags. */
1949 	uint64_t action_flags; /* Flow action flags. */
1950 	bool external; /* External flow or not. */
1951 	uint32_t vlan_tag:12; /* Flow item VLAN tag. */
1952 	uint8_t next_protocol; /* Tunnel next protocol */
1953 	uint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */
1954 	uint32_t group; /* Flow group. */
1955 	uint16_t udp_dport; /* Flow item UDP port. */
1956 	const struct rte_flow_attr *attr; /* Flow attribute. */
1957 	struct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */
1958 	const struct rte_flow_item *tunnel_item; /* Flow tunnel item. */
1959 	const struct rte_flow_item *gre_item; /* Flow GRE item. */
1960 	const struct rte_flow_item *integrity_items[2];
1961 };
1962 
1963 struct mlx5_flow_split_info {
1964 	uint32_t external:1;
1965 	/**< True if flow is created by request external to PMD. */
1966 	uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
1967 	uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
1968 	uint32_t flow_idx; /**< This memory pool index to the flow. */
1969 	uint32_t table_id; /**< Flow table identifier. */
1970 	uint64_t prefix_layers; /**< Prefix subflow layers. */
1971 };
1972 
1973 struct mlx5_flow_hw_partial_resource {
1974 	const struct rte_flow_attr *attr;
1975 	const struct rte_flow_item *items;
1976 	const struct rte_flow_action *actions;
1977 };
1978 
1979 struct mlx5_flow_hw_split_resource {
1980 	struct mlx5_flow_hw_partial_resource prefix;
1981 	struct mlx5_flow_hw_partial_resource suffix;
1982 	void *buf_start; /* start address of continuous buffer. */
1983 	uint32_t flow_idx; /* This memory pool index to the flow. */
1984 };
1985 
1986 struct mlx5_hl_data {
1987 	uint8_t dw_offset;
1988 	uint32_t dw_mask;
1989 };
1990 
1991 extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
1992 
1993 /*
1994  * Get sqn for given tx_queue.
1995  * Used in HWS rule creation.
1996  */
1997 static __rte_always_inline int
1998 flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
1999 {
2000 	struct mlx5_txq_ctrl *txq;
2001 	struct mlx5_external_q *ext_txq;
2002 
2003 	/* Means Tx queue is PF0. */
2004 	if (tx_queue == UINT16_MAX) {
2005 		*sqn = 0;
2006 		return 0;
2007 	}
2008 	if (mlx5_is_external_txq(dev, tx_queue)) {
2009 		ext_txq = mlx5_ext_txq_get(dev, tx_queue);
2010 		*sqn = ext_txq->hw_id;
2011 		return 0;
2012 	}
2013 	txq = mlx5_txq_get(dev, tx_queue);
2014 	if (unlikely(!txq))
2015 		return -ENOENT;
2016 	*sqn = mlx5_txq_get_sqn(txq);
2017 	mlx5_txq_release(dev, tx_queue);
2018 	return 0;
2019 }
2020 
2021 /*
2022  * Convert sqn for given rte_eth_dev port.
2023  * Used in HWS rule creation.
2024  */
2025 static __rte_always_inline int
2026 flow_hw_conv_sqn(uint16_t port_id, uint16_t tx_queue, uint32_t *sqn)
2027 {
2028 	if (port_id >= RTE_MAX_ETHPORTS)
2029 		return -EINVAL;
2030 	return flow_hw_get_sqn(&rte_eth_devices[port_id], tx_queue, sqn);
2031 }
2032 
2033 /*
2034  * Get given rte_eth_dev port_id.
2035  * Used in HWS rule creation.
2036  */
2037 static __rte_always_inline uint16_t
2038 flow_hw_get_port_id(void *dr_ctx)
2039 {
2040 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2041 	uint16_t port_id;
2042 
2043 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2044 		struct mlx5_priv *priv;
2045 
2046 		priv = rte_eth_devices[port_id].data->dev_private;
2047 		if (priv->dr_ctx == dr_ctx)
2048 			return port_id;
2049 	}
2050 #else
2051 	RTE_SET_USED(dr_ctx);
2052 #endif
2053 	return UINT16_MAX;
2054 }
2055 
2056 /*
2057  * Get given eswitch manager id.
2058  * Used in HWS match with port creation.
2059  */
2060 static __rte_always_inline const struct flow_hw_port_info *
2061 flow_hw_get_esw_mgr_id(void *dr_ctx)
2062 {
2063 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2064 	uint16_t port_id;
2065 
2066 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2067 		struct mlx5_priv *priv;
2068 
2069 		priv = rte_eth_devices[port_id].data->dev_private;
2070 		if (priv->dr_ctx == dr_ctx)
2071 			return &priv->sh->dev_cap.esw_info;
2072 	}
2073 #else
2074 	RTE_SET_USED(dr_ctx);
2075 #endif
2076 	return NULL;
2077 }
2078 
2079 /*
2080  * Get metadata match tag and mask for given rte_eth_dev port.
2081  * Used in HWS rule creation.
2082  */
2083 static __rte_always_inline const struct flow_hw_port_info *
2084 flow_hw_conv_port_id(void *ctx, const uint16_t port_id)
2085 {
2086 	struct flow_hw_port_info *port_info;
2087 
2088 	if (port_id == UINT16_MAX && ctx)
2089 		return flow_hw_get_esw_mgr_id(ctx);
2090 
2091 	if (port_id >= RTE_MAX_ETHPORTS)
2092 		return NULL;
2093 	port_info = &mlx5_flow_hw_port_infos[port_id];
2094 	return !!port_info->regc_mask ? port_info : NULL;
2095 }
2096 
2097 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2098 /*
2099  * Get metadata match tag and mask for the uplink port represented
2100  * by given IB context. Used in HWS context creation.
2101  */
2102 static __rte_always_inline const struct flow_hw_port_info *
2103 flow_hw_get_wire_port(struct ibv_context *ibctx)
2104 {
2105 	struct ibv_device *ibdev = ibctx->device;
2106 	uint16_t port_id;
2107 
2108 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2109 		const struct mlx5_priv *priv =
2110 				rte_eth_devices[port_id].data->dev_private;
2111 
2112 		if (priv && priv->master) {
2113 			struct ibv_context *port_ibctx = priv->sh->cdev->ctx;
2114 
2115 			if (port_ibctx->device == ibdev)
2116 				return flow_hw_conv_port_id(priv->dr_ctx, port_id);
2117 		}
2118 	}
2119 	return NULL;
2120 }
2121 #endif
2122 
2123 static __rte_always_inline int
2124 flow_hw_get_reg_id(struct rte_eth_dev *dev,
2125 		   enum rte_flow_item_type type, uint32_t id)
2126 {
2127 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2128 	return flow_hw_get_reg_id_by_domain(dev, type,
2129 					    MLX5DR_TABLE_TYPE_MAX, id);
2130 #else
2131 	RTE_SET_USED(dev);
2132 	RTE_SET_USED(type);
2133 	RTE_SET_USED(id);
2134 	return REG_NON;
2135 #endif
2136 }
2137 
2138 static __rte_always_inline int
2139 flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val)
2140 {
2141 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2142 	uint32_t port;
2143 
2144 	MLX5_ETH_FOREACH_DEV(port, NULL) {
2145 		struct mlx5_priv *priv;
2146 		priv = rte_eth_devices[port].data->dev_private;
2147 
2148 		if (priv->dr_ctx == dr_ctx) {
2149 			*port_val = port;
2150 			return 0;
2151 		}
2152 	}
2153 #else
2154 	RTE_SET_USED(dr_ctx);
2155 	RTE_SET_USED(port_val);
2156 #endif
2157 	return -EINVAL;
2158 }
2159 
2160 /**
2161  * Get GENEVE TLV option FW information according type and class.
2162  *
2163  * @param[in] dr_ctx
2164  *   Pointer to HW steering DR context.
2165  * @param[in] type
2166  *   GENEVE TLV option type.
2167  * @param[in] class
2168  *   GENEVE TLV option class.
2169  * @param[out] hl_ok_bit
2170  *   Pointer to header layout structure describing OK bit FW information.
2171  * @param[out] num_of_dws
2172  *   Pointer to fill inside the size of 'hl_dws' array.
2173  * @param[out] hl_dws
2174  *   Pointer to header layout array describing data DWs FW information.
2175  * @param[out] ok_bit_on_class
2176  *   Pointer to an indicator whether OK bit includes class along with type.
2177  *
2178  * @return
2179  *   0 on success, negative errno otherwise and rte_errno is set.
2180  */
2181 int
2182 mlx5_get_geneve_hl_data(const void *dr_ctx, uint8_t type, uint16_t class,
2183 			struct mlx5_hl_data ** const hl_ok_bit,
2184 			uint8_t *num_of_dws,
2185 			struct mlx5_hl_data ** const hl_dws,
2186 			bool *ok_bit_on_class);
2187 
2188 /**
2189  * Get modify field ID for single DW inside configured GENEVE TLV option.
2190  *
2191  * @param[in] dr_ctx
2192  *   Pointer to HW steering DR context.
2193  * @param[in] type
2194  *   GENEVE TLV option type.
2195  * @param[in] class
2196  *   GENEVE TLV option class.
2197  * @param[in] dw_offset
2198  *   Offset of DW inside the option.
2199  *
2200  * @return
2201  *   Modify field ID on success, negative errno otherwise and rte_errno is set.
2202  */
2203 int
2204 mlx5_get_geneve_option_modify_field_id(const void *dr_ctx, uint8_t type,
2205 				       uint16_t class, uint8_t dw_offset);
2206 
2207 void *
2208 mlx5_geneve_tlv_parser_create(uint16_t port_id,
2209 			      const struct rte_pmd_mlx5_geneve_tlv tlv_list[],
2210 			      uint8_t nb_options);
2211 int mlx5_geneve_tlv_parser_destroy(void *handle);
2212 int mlx5_flow_geneve_tlv_option_validate(struct mlx5_priv *priv,
2213 					 const struct rte_flow_item *geneve_opt,
2214 					 struct rte_flow_error *error);
2215 int mlx5_geneve_opt_modi_field_get(struct mlx5_priv *priv,
2216 				   const struct rte_flow_field_data *data);
2217 
2218 struct mlx5_geneve_tlv_options_mng;
2219 int mlx5_geneve_tlv_option_register(struct mlx5_priv *priv,
2220 				    const struct rte_flow_item_geneve_opt *spec,
2221 				    struct mlx5_geneve_tlv_options_mng *mng);
2222 void mlx5_geneve_tlv_options_unregister(struct mlx5_priv *priv,
2223 					struct mlx5_geneve_tlv_options_mng *mng);
2224 
2225 void flow_hw_set_port_info(struct rte_eth_dev *dev);
2226 void flow_hw_clear_port_info(struct rte_eth_dev *dev);
2227 int flow_hw_create_vport_action(struct rte_eth_dev *dev);
2228 void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);
2229 int
2230 flow_hw_init(struct rte_eth_dev *dev,
2231 	     struct rte_flow_error *error);
2232 
2233 typedef uintptr_t (*mlx5_flow_list_create_t)(struct rte_eth_dev *dev,
2234 					enum mlx5_flow_type type,
2235 					const struct rte_flow_attr *attr,
2236 					const struct rte_flow_item items[],
2237 					const struct rte_flow_action actions[],
2238 					bool external,
2239 					struct rte_flow_error *error);
2240 typedef void (*mlx5_flow_list_destroy_t)(struct rte_eth_dev *dev,
2241 					enum mlx5_flow_type type,
2242 					uintptr_t flow_idx);
2243 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
2244 				    const struct rte_flow_attr *attr,
2245 				    const struct rte_flow_item items[],
2246 				    const struct rte_flow_action actions[],
2247 				    bool external,
2248 				    int hairpin,
2249 				    struct rte_flow_error *error);
2250 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
2251 	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2252 	 const struct rte_flow_item items[],
2253 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
2254 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
2255 				     struct mlx5_flow *dev_flow,
2256 				     const struct rte_flow_attr *attr,
2257 				     const struct rte_flow_item items[],
2258 				     const struct rte_flow_action actions[],
2259 				     struct rte_flow_error *error);
2260 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
2261 				 struct rte_flow_error *error);
2262 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
2263 				   struct rte_flow *flow);
2264 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
2265 				    struct rte_flow *flow);
2266 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
2267 				 struct rte_flow *flow,
2268 				 const struct rte_flow_action *actions,
2269 				 void *data,
2270 				 struct rte_flow_error *error);
2271 typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
2272 					struct mlx5_flow_meter_info *fm,
2273 					uint32_t mtr_idx,
2274 					uint8_t domain_bitmap);
2275 typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
2276 				struct mlx5_flow_meter_info *fm);
2277 typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
2278 typedef struct mlx5_flow_meter_sub_policy *
2279 	(*mlx5_flow_meter_sub_policy_rss_prepare_t)
2280 		(struct rte_eth_dev *dev,
2281 		struct mlx5_flow_meter_policy *mtr_policy,
2282 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
2283 typedef int (*mlx5_flow_meter_hierarchy_rule_create_t)
2284 		(struct rte_eth_dev *dev,
2285 		struct mlx5_flow_meter_info *fm,
2286 		int32_t src_port,
2287 		const struct rte_flow_item *item,
2288 		struct rte_flow_error *error);
2289 typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
2290 	(struct rte_eth_dev *dev,
2291 	struct mlx5_flow_meter_policy *mtr_policy);
2292 typedef uint32_t (*mlx5_flow_mtr_alloc_t)
2293 					    (struct rte_eth_dev *dev);
2294 typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
2295 						uint32_t mtr_idx);
2296 typedef uint32_t (*mlx5_flow_counter_alloc_t)
2297 				   (struct rte_eth_dev *dev);
2298 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev,
2299 					 uint32_t cnt);
2300 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
2301 					 uint32_t cnt,
2302 					 bool clear, uint64_t *pkts,
2303 					 uint64_t *bytes, void **action);
2304 typedef int (*mlx5_flow_get_aged_flows_t)
2305 					(struct rte_eth_dev *dev,
2306 					 void **context,
2307 					 uint32_t nb_contexts,
2308 					 struct rte_flow_error *error);
2309 typedef int (*mlx5_flow_get_q_aged_flows_t)
2310 					(struct rte_eth_dev *dev,
2311 					 uint32_t queue_id,
2312 					 void **context,
2313 					 uint32_t nb_contexts,
2314 					 struct rte_flow_error *error);
2315 typedef int (*mlx5_flow_action_validate_t)
2316 				(struct rte_eth_dev *dev,
2317 				 const struct rte_flow_indir_action_conf *conf,
2318 				 const struct rte_flow_action *action,
2319 				 struct rte_flow_error *error);
2320 typedef struct rte_flow_action_handle *(*mlx5_flow_action_create_t)
2321 				(struct rte_eth_dev *dev,
2322 				 const struct rte_flow_indir_action_conf *conf,
2323 				 const struct rte_flow_action *action,
2324 				 struct rte_flow_error *error);
2325 typedef int (*mlx5_flow_action_destroy_t)
2326 				(struct rte_eth_dev *dev,
2327 				 struct rte_flow_action_handle *action,
2328 				 struct rte_flow_error *error);
2329 typedef int (*mlx5_flow_action_update_t)
2330 			(struct rte_eth_dev *dev,
2331 			 struct rte_flow_action_handle *action,
2332 			 const void *update,
2333 			 struct rte_flow_error *error);
2334 typedef int (*mlx5_flow_action_query_t)
2335 			(struct rte_eth_dev *dev,
2336 			 const struct rte_flow_action_handle *action,
2337 			 void *data,
2338 			 struct rte_flow_error *error);
2339 typedef int (*mlx5_flow_action_query_update_t)
2340 			(struct rte_eth_dev *dev,
2341 			 struct rte_flow_action_handle *handle,
2342 			 const void *update, void *data,
2343 			 enum rte_flow_query_update_mode qu_mode,
2344 			 struct rte_flow_error *error);
2345 typedef struct rte_flow_action_list_handle *
2346 (*mlx5_flow_action_list_handle_create_t)
2347 			(struct rte_eth_dev *dev,
2348 			 const struct rte_flow_indir_action_conf *conf,
2349 			 const struct rte_flow_action *actions,
2350 			 struct rte_flow_error *error);
2351 typedef int
2352 (*mlx5_flow_action_list_handle_destroy_t)
2353 			(struct rte_eth_dev *dev,
2354 			 struct rte_flow_action_list_handle *handle,
2355 			 struct rte_flow_error *error);
2356 typedef int (*mlx5_flow_sync_domain_t)
2357 			(struct rte_eth_dev *dev,
2358 			 uint32_t domains,
2359 			 uint32_t flags);
2360 typedef int (*mlx5_flow_validate_mtr_acts_t)
2361 			(struct rte_eth_dev *dev,
2362 			 const struct rte_flow_action *actions[RTE_COLORS],
2363 			 struct rte_flow_attr *attr,
2364 			 bool *is_rss,
2365 			 uint8_t *domain_bitmap,
2366 			 uint8_t *policy_mode,
2367 			 struct rte_mtr_error *error);
2368 typedef int (*mlx5_flow_create_mtr_acts_t)
2369 			(struct rte_eth_dev *dev,
2370 		      struct mlx5_flow_meter_policy *mtr_policy,
2371 		      const struct rte_flow_action *actions[RTE_COLORS],
2372 		      struct rte_flow_attr *attr,
2373 		      struct rte_mtr_error *error);
2374 typedef void (*mlx5_flow_destroy_mtr_acts_t)
2375 			(struct rte_eth_dev *dev,
2376 		      struct mlx5_flow_meter_policy *mtr_policy);
2377 typedef int (*mlx5_flow_create_policy_rules_t)
2378 			(struct rte_eth_dev *dev,
2379 			  struct mlx5_flow_meter_policy *mtr_policy);
2380 typedef void (*mlx5_flow_destroy_policy_rules_t)
2381 			(struct rte_eth_dev *dev,
2382 			  struct mlx5_flow_meter_policy *mtr_policy);
2383 typedef int (*mlx5_flow_create_def_policy_t)
2384 			(struct rte_eth_dev *dev);
2385 typedef void (*mlx5_flow_destroy_def_policy_t)
2386 			(struct rte_eth_dev *dev);
2387 typedef int (*mlx5_flow_discover_priorities_t)
2388 			(struct rte_eth_dev *dev,
2389 			 const uint16_t *vprio, int vprio_n);
2390 typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
2391 			(struct rte_eth_dev *dev,
2392 			 const struct rte_flow_item_flex_conf *conf,
2393 			 struct rte_flow_error *error);
2394 typedef int (*mlx5_flow_item_release_t)
2395 			(struct rte_eth_dev *dev,
2396 			 const struct rte_flow_item_flex_handle *handle,
2397 			 struct rte_flow_error *error);
2398 typedef int (*mlx5_flow_item_update_t)
2399 			(struct rte_eth_dev *dev,
2400 			 const struct rte_flow_item_flex_handle *handle,
2401 			 const struct rte_flow_item_flex_conf *conf,
2402 			 struct rte_flow_error *error);
2403 typedef int (*mlx5_flow_info_get_t)
2404 			(struct rte_eth_dev *dev,
2405 			 struct rte_flow_port_info *port_info,
2406 			 struct rte_flow_queue_info *queue_info,
2407 			 struct rte_flow_error *error);
2408 typedef int (*mlx5_flow_port_configure_t)
2409 			(struct rte_eth_dev *dev,
2410 			 const struct rte_flow_port_attr *port_attr,
2411 			 uint16_t nb_queue,
2412 			 const struct rte_flow_queue_attr *queue_attr[],
2413 			 struct rte_flow_error *err);
2414 typedef int (*mlx5_flow_pattern_validate_t)
2415 			(struct rte_eth_dev *dev,
2416 			 const struct rte_flow_pattern_template_attr *attr,
2417 			 const struct rte_flow_item items[],
2418 			 uint64_t *item_flags,
2419 			 struct rte_flow_error *error);
2420 typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
2421 			(struct rte_eth_dev *dev,
2422 			 const struct rte_flow_pattern_template_attr *attr,
2423 			 const struct rte_flow_item items[],
2424 			 struct rte_flow_error *error);
2425 typedef int (*mlx5_flow_pattern_template_destroy_t)
2426 			(struct rte_eth_dev *dev,
2427 			 struct rte_flow_pattern_template *template,
2428 			 struct rte_flow_error *error);
2429 typedef int (*mlx5_flow_actions_validate_t)
2430 			(struct rte_eth_dev *dev,
2431 			 const struct rte_flow_actions_template_attr *attr,
2432 			 const struct rte_flow_action actions[],
2433 			 const struct rte_flow_action masks[],
2434 			 struct rte_flow_error *error);
2435 typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
2436 			(struct rte_eth_dev *dev,
2437 			 const struct rte_flow_actions_template_attr *attr,
2438 			 const struct rte_flow_action actions[],
2439 			 const struct rte_flow_action masks[],
2440 			 struct rte_flow_error *error);
2441 typedef int (*mlx5_flow_actions_template_destroy_t)
2442 			(struct rte_eth_dev *dev,
2443 			 struct rte_flow_actions_template *template,
2444 			 struct rte_flow_error *error);
2445 typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
2446 		(struct rte_eth_dev *dev,
2447 		 const struct rte_flow_template_table_attr *attr,
2448 		 struct rte_flow_pattern_template *item_templates[],
2449 		 uint8_t nb_item_templates,
2450 		 struct rte_flow_actions_template *action_templates[],
2451 		 uint8_t nb_action_templates,
2452 		 struct rte_flow_error *error);
2453 typedef int (*mlx5_flow_table_destroy_t)
2454 			(struct rte_eth_dev *dev,
2455 			 struct rte_flow_template_table *table,
2456 			 struct rte_flow_error *error);
2457 typedef int (*mlx5_flow_group_set_miss_actions_t)
2458 			(struct rte_eth_dev *dev,
2459 			 uint32_t group_id,
2460 			 const struct rte_flow_group_attr *attr,
2461 			 const struct rte_flow_action actions[],
2462 			 struct rte_flow_error *error);
2463 typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
2464 			(struct rte_eth_dev *dev,
2465 			 uint32_t queue,
2466 			 const struct rte_flow_op_attr *attr,
2467 			 struct rte_flow_template_table *table,
2468 			 const struct rte_flow_item items[],
2469 			 uint8_t pattern_template_index,
2470 			 const struct rte_flow_action actions[],
2471 			 uint8_t action_template_index,
2472 			 void *user_data,
2473 			 struct rte_flow_error *error);
2474 typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t)
2475 			(struct rte_eth_dev *dev,
2476 			 uint32_t queue,
2477 			 const struct rte_flow_op_attr *attr,
2478 			 struct rte_flow_template_table *table,
2479 			 uint32_t rule_index,
2480 			 const struct rte_flow_action actions[],
2481 			 uint8_t action_template_index,
2482 			 void *user_data,
2483 			 struct rte_flow_error *error);
2484 typedef int (*mlx5_flow_async_flow_update_t)
2485 			(struct rte_eth_dev *dev,
2486 			 uint32_t queue,
2487 			 const struct rte_flow_op_attr *attr,
2488 			 struct rte_flow *flow,
2489 			 const struct rte_flow_action actions[],
2490 			 uint8_t action_template_index,
2491 			 void *user_data,
2492 			 struct rte_flow_error *error);
2493 typedef int (*mlx5_flow_async_flow_destroy_t)
2494 			(struct rte_eth_dev *dev,
2495 			 uint32_t queue,
2496 			 const struct rte_flow_op_attr *attr,
2497 			 struct rte_flow *flow,
2498 			 void *user_data,
2499 			 struct rte_flow_error *error);
2500 typedef int (*mlx5_flow_pull_t)
2501 			(struct rte_eth_dev *dev,
2502 			 uint32_t queue,
2503 			 struct rte_flow_op_result res[],
2504 			 uint16_t n_res,
2505 			 struct rte_flow_error *error);
2506 typedef int (*mlx5_flow_push_t)
2507 			(struct rte_eth_dev *dev,
2508 			 uint32_t queue,
2509 			 struct rte_flow_error *error);
2510 
2511 typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)
2512 			(struct rte_eth_dev *dev,
2513 			 uint32_t queue,
2514 			 const struct rte_flow_op_attr *attr,
2515 			 const struct rte_flow_indir_action_conf *conf,
2516 			 const struct rte_flow_action *action,
2517 			 void *user_data,
2518 			 struct rte_flow_error *error);
2519 
2520 typedef int (*mlx5_flow_async_action_handle_update_t)
2521 			(struct rte_eth_dev *dev,
2522 			 uint32_t queue,
2523 			 const struct rte_flow_op_attr *attr,
2524 			 struct rte_flow_action_handle *handle,
2525 			 const void *update,
2526 			 void *user_data,
2527 			 struct rte_flow_error *error);
2528 typedef int (*mlx5_flow_async_action_handle_query_update_t)
2529 			(struct rte_eth_dev *dev, uint32_t queue_id,
2530 			 const struct rte_flow_op_attr *op_attr,
2531 			 struct rte_flow_action_handle *action_handle,
2532 			 const void *update, void *data,
2533 			 enum rte_flow_query_update_mode qu_mode,
2534 			 void *user_data, struct rte_flow_error *error);
2535 typedef int (*mlx5_flow_async_action_handle_query_t)
2536 			(struct rte_eth_dev *dev,
2537 			 uint32_t queue,
2538 			 const struct rte_flow_op_attr *attr,
2539 			 const struct rte_flow_action_handle *handle,
2540 			 void *data,
2541 			 void *user_data,
2542 			 struct rte_flow_error *error);
2543 
2544 typedef int (*mlx5_flow_async_action_handle_destroy_t)
2545 			(struct rte_eth_dev *dev,
2546 			 uint32_t queue,
2547 			 const struct rte_flow_op_attr *attr,
2548 			 struct rte_flow_action_handle *handle,
2549 			 void *user_data,
2550 			 struct rte_flow_error *error);
2551 typedef struct rte_flow_action_list_handle *
2552 (*mlx5_flow_async_action_list_handle_create_t)
2553 			(struct rte_eth_dev *dev, uint32_t queue_id,
2554 			 const struct rte_flow_op_attr *attr,
2555 			 const struct rte_flow_indir_action_conf *conf,
2556 			 const struct rte_flow_action *actions,
2557 			 void *user_data, struct rte_flow_error *error);
2558 typedef int
2559 (*mlx5_flow_async_action_list_handle_destroy_t)
2560 			(struct rte_eth_dev *dev, uint32_t queue_id,
2561 			 const struct rte_flow_op_attr *op_attr,
2562 			 struct rte_flow_action_list_handle *action_handle,
2563 			 void *user_data, struct rte_flow_error *error);
2564 typedef int
2565 (*mlx5_flow_action_list_handle_query_update_t)
2566 			(struct rte_eth_dev *dev,
2567 			const struct rte_flow_action_list_handle *handle,
2568 			const void **update, void **query,
2569 			enum rte_flow_query_update_mode mode,
2570 			struct rte_flow_error *error);
2571 typedef int
2572 (*mlx5_flow_async_action_list_handle_query_update_t)
2573 			(struct rte_eth_dev *dev, uint32_t queue_id,
2574 			const struct rte_flow_op_attr *attr,
2575 			const struct rte_flow_action_list_handle *handle,
2576 			const void **update, void **query,
2577 			enum rte_flow_query_update_mode mode,
2578 			void *user_data, struct rte_flow_error *error);
2579 typedef int
2580 (*mlx5_flow_calc_table_hash_t)
2581 			(struct rte_eth_dev *dev,
2582 			 const struct rte_flow_template_table *table,
2583 			 const struct rte_flow_item pattern[],
2584 			 uint8_t pattern_template_index,
2585 			 uint32_t *hash, struct rte_flow_error *error);
2586 typedef int
2587 (*mlx5_flow_calc_encap_hash_t)
2588 			(struct rte_eth_dev *dev,
2589 			 const struct rte_flow_item pattern[],
2590 			 enum rte_flow_encap_hash_field dest_field,
2591 			 uint8_t *hash,
2592 			 struct rte_flow_error *error);
2593 typedef int (*mlx5_table_resize_t)(struct rte_eth_dev *dev,
2594 				   struct rte_flow_template_table *table,
2595 				   uint32_t nb_rules, struct rte_flow_error *error);
2596 typedef int (*mlx5_flow_update_resized_t)
2597 			(struct rte_eth_dev *dev, uint32_t queue,
2598 			 const struct rte_flow_op_attr *attr,
2599 			 struct rte_flow *rule, void *user_data,
2600 			 struct rte_flow_error *error);
2601 typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev,
2602 				       struct rte_flow_template_table *table,
2603 				       struct rte_flow_error *error);
2604 
2605 struct mlx5_flow_driver_ops {
2606 	mlx5_flow_list_create_t list_create;
2607 	mlx5_flow_list_destroy_t list_destroy;
2608 	mlx5_flow_validate_t validate;
2609 	mlx5_flow_prepare_t prepare;
2610 	mlx5_flow_translate_t translate;
2611 	mlx5_flow_apply_t apply;
2612 	mlx5_flow_remove_t remove;
2613 	mlx5_flow_destroy_t destroy;
2614 	mlx5_flow_query_t query;
2615 	mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
2616 	mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
2617 	mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls;
2618 	mlx5_flow_mtr_alloc_t create_meter;
2619 	mlx5_flow_mtr_free_t free_meter;
2620 	mlx5_flow_validate_mtr_acts_t validate_mtr_acts;
2621 	mlx5_flow_create_mtr_acts_t create_mtr_acts;
2622 	mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts;
2623 	mlx5_flow_create_policy_rules_t create_policy_rules;
2624 	mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
2625 	mlx5_flow_create_def_policy_t create_def_policy;
2626 	mlx5_flow_destroy_def_policy_t destroy_def_policy;
2627 	mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
2628 	mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create;
2629 	mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
2630 	mlx5_flow_counter_alloc_t counter_alloc;
2631 	mlx5_flow_counter_free_t counter_free;
2632 	mlx5_flow_counter_query_t counter_query;
2633 	mlx5_flow_get_aged_flows_t get_aged_flows;
2634 	mlx5_flow_get_q_aged_flows_t get_q_aged_flows;
2635 	mlx5_flow_action_validate_t action_validate;
2636 	mlx5_flow_action_create_t action_create;
2637 	mlx5_flow_action_destroy_t action_destroy;
2638 	mlx5_flow_action_update_t action_update;
2639 	mlx5_flow_action_query_t action_query;
2640 	mlx5_flow_action_query_update_t action_query_update;
2641 	mlx5_flow_action_list_handle_create_t action_list_handle_create;
2642 	mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
2643 	mlx5_flow_sync_domain_t sync_domain;
2644 	mlx5_flow_discover_priorities_t discover_priorities;
2645 	mlx5_flow_item_create_t item_create;
2646 	mlx5_flow_item_release_t item_release;
2647 	mlx5_flow_item_update_t item_update;
2648 	mlx5_flow_info_get_t info_get;
2649 	mlx5_flow_port_configure_t configure;
2650 	mlx5_flow_pattern_validate_t pattern_validate;
2651 	mlx5_flow_pattern_template_create_t pattern_template_create;
2652 	mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
2653 	mlx5_flow_actions_validate_t actions_validate;
2654 	mlx5_flow_actions_template_create_t actions_template_create;
2655 	mlx5_flow_actions_template_destroy_t actions_template_destroy;
2656 	mlx5_flow_table_create_t template_table_create;
2657 	mlx5_flow_table_destroy_t template_table_destroy;
2658 	mlx5_flow_group_set_miss_actions_t group_set_miss_actions;
2659 	mlx5_flow_async_flow_create_t async_flow_create;
2660 	mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;
2661 	mlx5_flow_async_flow_update_t async_flow_update;
2662 	mlx5_flow_async_flow_destroy_t async_flow_destroy;
2663 	mlx5_flow_pull_t pull;
2664 	mlx5_flow_push_t push;
2665 	mlx5_flow_async_action_handle_create_t async_action_create;
2666 	mlx5_flow_async_action_handle_update_t async_action_update;
2667 	mlx5_flow_async_action_handle_query_update_t async_action_query_update;
2668 	mlx5_flow_async_action_handle_query_t async_action_query;
2669 	mlx5_flow_async_action_handle_destroy_t async_action_destroy;
2670 	mlx5_flow_async_action_list_handle_create_t
2671 		async_action_list_handle_create;
2672 	mlx5_flow_async_action_list_handle_destroy_t
2673 		async_action_list_handle_destroy;
2674 	mlx5_flow_action_list_handle_query_update_t
2675 		action_list_handle_query_update;
2676 	mlx5_flow_async_action_list_handle_query_update_t
2677 		async_action_list_handle_query_update;
2678 	mlx5_flow_calc_table_hash_t flow_calc_table_hash;
2679 	mlx5_flow_calc_encap_hash_t flow_calc_encap_hash;
2680 	mlx5_table_resize_t table_resize;
2681 	mlx5_flow_update_resized_t flow_update_resized;
2682 	table_resize_complete_t table_resize_complete;
2683 };
2684 
2685 /* mlx5_flow.c */
2686 
2687 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
2688 void mlx5_flow_pop_thread_workspace(void);
2689 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
2690 
2691 __extension__
2692 struct flow_grp_info {
2693 	uint64_t external:1;
2694 	uint64_t transfer:1;
2695 	uint64_t fdb_def_rule:1;
2696 	/* force standard group translation */
2697 	uint64_t std_tbl_fix:1;
2698 	uint64_t skip_scale:2;
2699 };
2700 
2701 static inline bool
2702 tunnel_use_standard_attr_group_translate
2703 		    (const struct rte_eth_dev *dev,
2704 		     const struct rte_flow_attr *attr,
2705 		     const struct mlx5_flow_tunnel *tunnel,
2706 		     enum mlx5_tof_rule_type tof_rule_type)
2707 {
2708 	bool verdict;
2709 
2710 	if (!is_tunnel_offload_active(dev))
2711 		/* no tunnel offload API */
2712 		verdict = true;
2713 	else if (tunnel) {
2714 		/*
2715 		 * OvS will use jump to group 0 in tunnel steer rule.
2716 		 * If tunnel steer rule starts from group 0 (attr.group == 0)
2717 		 * that 0 group must be translated with standard method.
2718 		 * attr.group == 0 in tunnel match rule translated with tunnel
2719 		 * method
2720 		 */
2721 		verdict = !attr->group &&
2722 			  is_flow_tunnel_steer_rule(tof_rule_type);
2723 	} else {
2724 		/*
2725 		 * non-tunnel group translation uses standard method for
2726 		 * root group only: attr.group == 0
2727 		 */
2728 		verdict = !attr->group;
2729 	}
2730 
2731 	return verdict;
2732 }
2733 
2734 /**
2735  * Get DV flow aso meter by index.
2736  *
2737  * @param[in] dev
2738  *   Pointer to the Ethernet device structure.
2739  * @param[in] idx
2740  *   mlx5 flow aso meter index in the container.
2741  * @param[out] ppool
2742  *   mlx5 flow aso meter pool in the container,
2743  *
2744  * @return
2745  *   Pointer to the aso meter, NULL otherwise.
2746  */
2747 static inline struct mlx5_aso_mtr *
2748 mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
2749 {
2750 	struct mlx5_aso_mtr_pool *pool;
2751 	struct mlx5_aso_mtr_pools_mng *pools_mng =
2752 				&priv->sh->mtrmng->pools_mng;
2753 
2754 	if (priv->mtr_bulk.aso)
2755 		return priv->mtr_bulk.aso + idx;
2756 	/* Decrease to original index. */
2757 	idx--;
2758 	MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
2759 	rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
2760 	pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
2761 	rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
2762 	return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
2763 }
2764 
2765 static __rte_always_inline const struct rte_flow_item *
2766 mlx5_find_end_item(const struct rte_flow_item *item)
2767 {
2768 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
2769 	return item;
2770 }
2771 
2772 static __rte_always_inline bool
2773 mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item)
2774 {
2775 	struct rte_flow_item_integrity test = *item;
2776 	test.l3_ok = 0;
2777 	test.l4_ok = 0;
2778 	test.ipv4_csum_ok = 0;
2779 	test.l4_csum_ok = 0;
2780 	return (test.value == 0);
2781 }
2782 
2783 /*
2784  * Get ASO CT action by device and index.
2785  *
2786  * @param[in] dev
2787  *   Pointer to the Ethernet device structure.
2788  * @param[in] idx
2789  *   Index to the ASO CT action.
2790  *
2791  * @return
2792  *   The specified ASO CT action pointer.
2793  */
2794 static inline struct mlx5_aso_ct_action *
2795 flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx)
2796 {
2797 	struct mlx5_priv *priv = dev->data->dev_private;
2798 	struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
2799 	struct mlx5_aso_ct_pool *pool;
2800 
2801 	idx--;
2802 	MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n);
2803 	/* Bit operation AND could be used. */
2804 	rte_rwlock_read_lock(&mng->resize_rwl);
2805 	pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL];
2806 	rte_rwlock_read_unlock(&mng->resize_rwl);
2807 	return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL];
2808 }
2809 
2810 /*
2811  * Get ASO CT action by owner & index.
2812  *
2813  * @param[in] dev
2814  *   Pointer to the Ethernet device structure.
2815  * @param[in] idx
2816  *   Index to the ASO CT action and owner port combination.
2817  *
2818  * @return
2819  *   The specified ASO CT action pointer.
2820  */
2821 static inline struct mlx5_aso_ct_action *
2822 flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
2823 {
2824 	struct mlx5_priv *priv = dev->data->dev_private;
2825 	struct mlx5_aso_ct_action *ct;
2826 	uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
2827 	uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
2828 
2829 	if (owner == PORT_ID(priv)) {
2830 		ct = flow_aso_ct_get_by_dev_idx(dev, idx);
2831 	} else {
2832 		struct rte_eth_dev *owndev = &rte_eth_devices[owner];
2833 
2834 		MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
2835 		if (dev->data->dev_started != 1)
2836 			return NULL;
2837 		ct = flow_aso_ct_get_by_dev_idx(owndev, idx);
2838 		if (ct->peer != PORT_ID(priv))
2839 			return NULL;
2840 	}
2841 	return ct;
2842 }
2843 
2844 static inline uint16_t
2845 mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
2846 {
2847 	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
2848 		return RTE_ETHER_TYPE_TEB;
2849 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2850 		return RTE_ETHER_TYPE_IPV4;
2851 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2852 		return RTE_ETHER_TYPE_IPV6;
2853 	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
2854 		return RTE_ETHER_TYPE_MPLS;
2855 	return 0;
2856 }
2857 
2858 int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
2859 			 struct rte_flow_error *error);
2860 
2861 /*
2862  * Convert rte_mtr_color to mlx5 color.
2863  *
2864  * @param[in] rcol
2865  *   rte_mtr_color.
2866  *
2867  * @return
2868  *   mlx5 color.
2869  */
2870 static inline int
2871 rte_col_2_mlx5_col(enum rte_color rcol)
2872 {
2873 	switch (rcol) {
2874 	case RTE_COLOR_GREEN:
2875 		return MLX5_FLOW_COLOR_GREEN;
2876 	case RTE_COLOR_YELLOW:
2877 		return MLX5_FLOW_COLOR_YELLOW;
2878 	case RTE_COLOR_RED:
2879 		return MLX5_FLOW_COLOR_RED;
2880 	default:
2881 		break;
2882 	}
2883 	return MLX5_FLOW_COLOR_UNDEFINED;
2884 }
2885 
2886 /**
2887  * Indicates whether flow source vport is representor port.
2888  *
2889  * @param[in] priv
2890  *   Pointer to device private context structure.
2891  * @param[in] act_priv
2892  *   Pointer to actual device private context structure if have.
2893  *
2894  * @return
2895  *   True when the flow source vport is representor port, false otherwise.
2896  */
2897 static inline bool
2898 flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv)
2899 {
2900 	MLX5_ASSERT(priv);
2901 	return (!act_priv ? (priv->representor_id != UINT16_MAX) :
2902 		 (act_priv->representor_id != UINT16_MAX));
2903 }
2904 
2905 /* All types of Ethernet patterns used in control flow rules. */
2906 enum mlx5_flow_ctrl_rx_eth_pattern_type {
2907 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL = 0,
2908 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST,
2909 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST,
2910 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN,
2911 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST,
2912 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN,
2913 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST,
2914 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN,
2915 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
2916 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
2917 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX,
2918 };
2919 
2920 /* All types of RSS actions used in control flow rules. */
2921 enum mlx5_flow_ctrl_rx_expanded_rss_type {
2922 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP = 0,
2923 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP,
2924 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP,
2925 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP,
2926 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6,
2927 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4,
2928 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP,
2929 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX,
2930 };
2931 
2932 /**
2933  * Contains pattern template, template table and its attributes for a single
2934  * combination of Ethernet pattern and RSS action. Used to create control flow rules
2935  * with HWS.
2936  */
2937 struct mlx5_flow_hw_ctrl_rx_table {
2938 	struct rte_flow_template_table_attr attr;
2939 	struct rte_flow_pattern_template *pt;
2940 	struct rte_flow_template_table *tbl;
2941 };
2942 
2943 /* Contains all templates required to create control flow rules with HWS. */
2944 struct mlx5_flow_hw_ctrl_rx {
2945 	struct rte_flow_actions_template *rss[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2946 	struct mlx5_flow_hw_ctrl_rx_table tables[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX]
2947 						[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2948 };
2949 
2950 /* Contains all templates required for control flow rules in FDB with HWS. */
2951 struct mlx5_flow_hw_ctrl_fdb {
2952 	struct rte_flow_pattern_template *esw_mgr_items_tmpl;
2953 	struct rte_flow_actions_template *regc_jump_actions_tmpl;
2954 	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
2955 	struct rte_flow_pattern_template *regc_sq_items_tmpl;
2956 	struct rte_flow_actions_template *port_actions_tmpl;
2957 	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
2958 	struct rte_flow_pattern_template *port_items_tmpl;
2959 	struct rte_flow_actions_template *jump_one_actions_tmpl;
2960 	struct rte_flow_template_table *hw_esw_zero_tbl;
2961 	struct rte_flow_pattern_template *tx_meta_items_tmpl;
2962 	struct rte_flow_actions_template *tx_meta_actions_tmpl;
2963 	struct rte_flow_template_table *hw_tx_meta_cpy_tbl;
2964 	struct rte_flow_pattern_template *lacp_rx_items_tmpl;
2965 	struct rte_flow_actions_template *lacp_rx_actions_tmpl;
2966 	struct rte_flow_template_table *hw_lacp_rx_tbl;
2967 };
2968 
2969 #define MLX5_CTRL_PROMISCUOUS    (RTE_BIT32(0))
2970 #define MLX5_CTRL_ALL_MULTICAST  (RTE_BIT32(1))
2971 #define MLX5_CTRL_BROADCAST      (RTE_BIT32(2))
2972 #define MLX5_CTRL_IPV4_MULTICAST (RTE_BIT32(3))
2973 #define MLX5_CTRL_IPV6_MULTICAST (RTE_BIT32(4))
2974 #define MLX5_CTRL_DMAC           (RTE_BIT32(5))
2975 #define MLX5_CTRL_VLAN_FILTER    (RTE_BIT32(6))
2976 
2977 int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
2978 
2979 /** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
2980 int mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
2981 
2982 /** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
2983 int mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
2984 
2985 /** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
2986 int mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev,
2987 				      const struct rte_ether_addr *addr,
2988 				      const uint16_t vid);
2989 
2990 /** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
2991 int mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev,
2992 				      const struct rte_ether_addr *addr,
2993 				      const uint16_t vid);
2994 
2995 /** Destroy a control flow rule registered on port level control flow rule type. */
2996 void mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry);
2997 
2998 /** Create a control flow rule for matching unicast DMAC (HWS). */
2999 int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
3000 
3001 /** Destroy a control flow rule for matching unicast DMAC (HWS). */
3002 int mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
3003 
3004 /** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
3005 int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
3006 				     const struct rte_ether_addr *addr,
3007 				     const uint16_t vlan);
3008 
3009 /** Destroy a control flow rule for matching unicast DMAC with VLAN (HWS). */
3010 int mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
3011 					     const struct rte_ether_addr *addr,
3012 					     const uint16_t vlan);
3013 
3014 void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
3015 
3016 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
3017 			     const struct mlx5_flow_tunnel *tunnel,
3018 			     uint32_t group, uint32_t *table,
3019 			     const struct flow_grp_info *flags,
3020 			     struct rte_flow_error *error);
3021 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
3022 				     int tunnel, uint64_t layer_types,
3023 				     uint64_t hash_fields);
3024 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
3025 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
3026 				   uint32_t subpriority);
3027 uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
3028 					const struct rte_flow_attr *attr);
3029 uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
3030 				   const struct rte_flow_attr *attr,
3031 				   uint32_t subpriority, bool external);
3032 uint32_t mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev);
3033 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
3034 				     enum mlx5_feature_name feature,
3035 				     uint32_t id,
3036 				     struct rte_flow_error *error);
3037 const struct rte_flow_action *mlx5_flow_find_action
3038 					(const struct rte_flow_action *actions,
3039 					 enum rte_flow_action_type action);
3040 int mlx5_validate_action_rss(struct rte_eth_dev *dev,
3041 			     const struct rte_flow_action *action,
3042 			     struct rte_flow_error *error);
3043 
3044 struct mlx5_hw_encap_decap_action*
3045 mlx5_reformat_action_create(struct rte_eth_dev *dev,
3046 			    const struct rte_flow_indir_action_conf *conf,
3047 			    const struct rte_flow_action *encap_action,
3048 			    const struct rte_flow_action *decap_action,
3049 			    struct rte_flow_error *error);
3050 int mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
3051 				 struct rte_flow_action_list_handle *handle,
3052 				 struct rte_flow_error *error);
3053 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
3054 				    const struct rte_flow_attr *attr,
3055 				    struct rte_flow_error *error);
3056 int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev,
3057 				   bool is_root,
3058 				   const struct rte_flow_attr *attr,
3059 				   struct rte_flow_error *error);
3060 int mlx5_flow_validate_action_flag(uint64_t action_flags,
3061 				   const struct rte_flow_attr *attr,
3062 				   struct rte_flow_error *error);
3063 int mlx5_flow_validate_action_mark(struct rte_eth_dev *dev,
3064 				   const struct rte_flow_action *action,
3065 				   uint64_t action_flags,
3066 				   const struct rte_flow_attr *attr,
3067 				   struct rte_flow_error *error);
3068 int mlx5_flow_validate_target_queue(struct rte_eth_dev *dev,
3069 				    const struct rte_flow_action *action,
3070 				    struct rte_flow_error *error);
3071 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
3072 				    uint64_t action_flags,
3073 				    struct rte_eth_dev *dev,
3074 				    const struct rte_flow_attr *attr,
3075 				    struct rte_flow_error *error);
3076 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
3077 				  uint64_t action_flags,
3078 				  struct rte_eth_dev *dev,
3079 				  const struct rte_flow_attr *attr,
3080 				  uint64_t item_flags,
3081 				  struct rte_flow_error *error);
3082 int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
3083 				const struct rte_flow_attr *attr,
3084 				struct rte_flow_error *error);
3085 int flow_validate_modify_field_level
3086 			(const struct rte_flow_field_data *data,
3087 			 struct rte_flow_error *error);
3088 int
3089 mlx5_flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3090 				      uint64_t action_flags,
3091 				      const struct rte_flow_action *action,
3092 				      const struct rte_flow_attr *attr,
3093 				      struct rte_flow_error *error);
3094 int
3095 mlx5_flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3096 				   uint64_t action_flags,
3097 				   const struct rte_flow_action *action,
3098 				   const uint64_t item_flags,
3099 				   const struct rte_flow_attr *attr,
3100 				   struct rte_flow_error *error);
3101 int
3102 mlx5_flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3103 				    uint64_t action_flags,
3104 				    uint64_t item_flags,
3105 				    bool root,
3106 				    struct rte_flow_error *error);
3107 int
3108 mlx5_flow_dv_validate_action_raw_encap_decap
3109 	(struct rte_eth_dev *dev,
3110 	 const struct rte_flow_action_raw_decap *decap,
3111 	 const struct rte_flow_action_raw_encap *encap,
3112 	 const struct rte_flow_attr *attr, uint64_t *action_flags,
3113 	 int *actions_n, const struct rte_flow_action *action,
3114 	 uint64_t item_flags, struct rte_flow_error *error);
3115 int mlx5_flow_item_acceptable(const struct rte_eth_dev *dev,
3116 			      const struct rte_flow_item *item,
3117 			      const uint8_t *mask,
3118 			      const uint8_t *nic_mask,
3119 			      unsigned int size,
3120 			      bool range_accepted,
3121 			      struct rte_flow_error *error);
3122 int mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev,
3123 				const struct rte_flow_item *item,
3124 				uint64_t item_flags, bool ext_vlan_sup,
3125 				struct rte_flow_error *error);
3126 int
3127 mlx5_flow_dv_validate_item_vlan(const struct rte_flow_item *item,
3128 				uint64_t item_flags,
3129 				struct rte_eth_dev *dev,
3130 				struct rte_flow_error *error);
3131 int
3132 mlx5_flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
3133 				const struct rte_flow_item *item,
3134 				uint64_t item_flags,
3135 				uint64_t last_item,
3136 				uint16_t ether_type,
3137 				const struct rte_flow_item_ipv4 *acc_mask,
3138 				struct rte_flow_error *error);
3139 int
3140 mlx5_flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
3141 			       const struct rte_flow_item *item,
3142 			       uint64_t item_flags,
3143 			       struct rte_flow_error *error);
3144 int
3145 mlx5_flow_dv_validate_item_gtp_psc(const struct rte_eth_dev *dev,
3146 				   const struct rte_flow_item *item,
3147 				   uint64_t last_item,
3148 				   const struct rte_flow_item *gtp_item,
3149 				   bool root, struct rte_flow_error *error);
3150 int
3151 mlx5_flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
3152 				  const struct rte_flow_item *item,
3153 				  uint64_t *item_flags,
3154 				  struct rte_flow_error *error);
3155 int mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev,
3156 				const struct rte_flow_item *item,
3157 				uint64_t item_flags,
3158 				uint8_t target_protocol,
3159 				struct rte_flow_error *error);
3160 int mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev,
3161 				    const struct rte_flow_item *item,
3162 				    uint64_t item_flags,
3163 				    const struct rte_flow_item *gre_item,
3164 				    struct rte_flow_error *error);
3165 int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
3166 				       const struct rte_flow_item *item,
3167 				       uint64_t item_flags,
3168 				       const struct rte_flow_attr *attr,
3169 				       const struct rte_flow_item *gre_item,
3170 				       struct rte_flow_error *error);
3171 int mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev,
3172 				 const struct rte_flow_item *item,
3173 				 uint64_t item_flags,
3174 				 uint64_t last_item,
3175 				 uint16_t ether_type,
3176 				 const struct rte_flow_item_ipv4 *acc_mask,
3177 				 bool range_accepted,
3178 				 struct rte_flow_error *error);
3179 int mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
3180 				 const struct rte_flow_item *item,
3181 				 uint64_t item_flags,
3182 				 uint64_t last_item,
3183 				 uint16_t ether_type,
3184 				 const struct rte_flow_item_ipv6 *acc_mask,
3185 				 struct rte_flow_error *error);
3186 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
3187 				 const struct rte_flow_item *item,
3188 				 uint64_t item_flags,
3189 				 uint64_t prev_layer,
3190 				 struct rte_flow_error *error);
3191 int mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev,
3192 				const struct rte_flow_item *item,
3193 				uint64_t item_flags,
3194 				uint8_t target_protocol,
3195 				const struct rte_flow_item_tcp *flow_mask,
3196 				struct rte_flow_error *error);
3197 int mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev,
3198 				const struct rte_flow_item *item,
3199 				uint64_t item_flags,
3200 				uint8_t target_protocol,
3201 				struct rte_flow_error *error);
3202 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
3203 				 uint64_t item_flags,
3204 				 struct rte_eth_dev *dev,
3205 				 struct rte_flow_error *error);
3206 int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
3207 				  uint16_t udp_dport,
3208 				  const struct rte_flow_item *item,
3209 				  uint64_t item_flags,
3210 				  bool root,
3211 				  struct rte_flow_error *error);
3212 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
3213 				      uint64_t item_flags,
3214 				      struct rte_eth_dev *dev,
3215 				      struct rte_flow_error *error);
3216 int mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev,
3217 				 const struct rte_flow_item *item,
3218 				 uint64_t item_flags,
3219 				 uint8_t target_protocol,
3220 				 struct rte_flow_error *error);
3221 int mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev,
3222 				  const struct rte_flow_item *item,
3223 				  uint64_t item_flags,
3224 				  uint8_t target_protocol,
3225 				  struct rte_flow_error *error);
3226 int mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev,
3227 				       const struct rte_flow_item *item,
3228 				       uint64_t item_flags,
3229 				       uint8_t target_protocol,
3230 				       struct rte_flow_error *error);
3231 int mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev,
3232 				  const struct rte_flow_item *item,
3233 				  uint64_t item_flags,
3234 				  uint8_t target_protocol,
3235 				  struct rte_flow_error *error);
3236 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
3237 				   uint64_t item_flags,
3238 				   struct rte_eth_dev *dev,
3239 				   struct rte_flow_error *error);
3240 int mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
3241 				   uint64_t last_item,
3242 				   const struct rte_flow_item *geneve_item,
3243 				   struct rte_eth_dev *dev,
3244 				   struct rte_flow_error *error);
3245 int mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev,
3246 				  const struct rte_flow_item *item,
3247 				  uint64_t item_flags,
3248 				  uint64_t last_item,
3249 				  uint16_t ether_type,
3250 				  const struct rte_flow_item_ecpri *acc_mask,
3251 				  struct rte_flow_error *error);
3252 int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
3253 				const struct rte_flow_item *item,
3254 				struct rte_flow_error *error);
3255 int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
3256 			      struct mlx5_flow_meter_info *fm,
3257 			      uint32_t mtr_idx,
3258 			      uint8_t domain_bitmap);
3259 void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
3260 			       struct mlx5_flow_meter_info *fm);
3261 void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
3262 struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
3263 		(struct rte_eth_dev *dev,
3264 		struct mlx5_flow_meter_policy *mtr_policy,
3265 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
3266 void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
3267 		struct mlx5_flow_meter_policy *mtr_policy);
3268 int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
3269 int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
3270 int mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev);
3271 int mlx5_action_handle_attach(struct rte_eth_dev *dev);
3272 int mlx5_action_handle_detach(struct rte_eth_dev *dev);
3273 int mlx5_action_handle_flush(struct rte_eth_dev *dev);
3274 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
3275 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
3276 
3277 struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx);
3278 int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3279 			 void *cb_ctx);
3280 void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3281 struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx,
3282 					     struct mlx5_list_entry *oentry,
3283 					     void *entry_ctx);
3284 void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3285 struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3286 		uint32_t table_level, uint8_t egress, uint8_t transfer,
3287 		bool external, const struct mlx5_flow_tunnel *tunnel,
3288 		uint32_t group_id, uint8_t dummy,
3289 		uint32_t table_id, struct rte_flow_error *error);
3290 int flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
3291 				 struct mlx5_flow_tbl_resource *tbl);
3292 
3293 struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx);
3294 int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3295 			 void *cb_ctx);
3296 void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3297 struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx,
3298 					     struct mlx5_list_entry *oentry,
3299 					     void *cb_ctx);
3300 void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3301 
3302 int flow_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3303 			    void *cb_ctx);
3304 struct mlx5_list_entry *flow_modify_create_cb(void *tool_ctx, void *ctx);
3305 void flow_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3306 struct mlx5_list_entry *flow_modify_clone_cb(void *tool_ctx,
3307 						struct mlx5_list_entry *oentry,
3308 						void *ctx);
3309 void flow_modify_clone_free_cb(void *tool_ctx,
3310 				  struct mlx5_list_entry *entry);
3311 
3312 struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx);
3313 int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3314 			  void *cb_ctx);
3315 void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3316 struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx,
3317 					      struct mlx5_list_entry *entry,
3318 					      void *ctx);
3319 void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3320 
3321 int flow_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3322 				 void *cb_ctx);
3323 struct mlx5_list_entry *flow_encap_decap_create_cb(void *tool_ctx,
3324 						      void *cb_ctx);
3325 void flow_encap_decap_remove_cb(void *tool_ctx,
3326 				   struct mlx5_list_entry *entry);
3327 struct mlx5_list_entry *flow_encap_decap_clone_cb(void *tool_ctx,
3328 						  struct mlx5_list_entry *entry,
3329 						  void *cb_ctx);
3330 void flow_encap_decap_clone_free_cb(void *tool_ctx,
3331 				       struct mlx5_list_entry *entry);
3332 int __flow_encap_decap_resource_register
3333 			(struct rte_eth_dev *dev,
3334 			 struct mlx5_flow_dv_encap_decap_resource *resource,
3335 			 bool is_root,
3336 			 struct mlx5_flow_dv_encap_decap_resource **encap_decap,
3337 			 struct rte_flow_error *error);
3338 int __flow_modify_hdr_resource_register
3339 			(struct rte_eth_dev *dev,
3340 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
3341 			 struct mlx5_flow_dv_modify_hdr_resource **modify,
3342 			 struct rte_flow_error *error);
3343 int flow_encap_decap_resource_release(struct rte_eth_dev *dev,
3344 				     uint32_t encap_decap_idx);
3345 int flow_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3346 			     void *ctx);
3347 struct mlx5_list_entry *flow_matcher_create_cb(void *tool_ctx, void *ctx);
3348 void flow_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3349 struct mlx5_list_entry *flow_matcher_clone_cb(void *tool_ctx __rte_unused,
3350 			 struct mlx5_list_entry *entry, void *cb_ctx);
3351 void flow_matcher_clone_free_cb(void *tool_ctx __rte_unused,
3352 			     struct mlx5_list_entry *entry);
3353 int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3354 			     void *cb_ctx);
3355 struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
3356 void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3357 struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx,
3358 				struct mlx5_list_entry *entry, void *cb_ctx);
3359 void flow_dv_port_id_clone_free_cb(void *tool_ctx,
3360 				   struct mlx5_list_entry *entry);
3361 
3362 int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3363 			       void *cb_ctx);
3364 struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx,
3365 						    void *cb_ctx);
3366 void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3367 struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx,
3368 				 struct mlx5_list_entry *entry, void *cb_ctx);
3369 void flow_dv_push_vlan_clone_free_cb(void *tool_ctx,
3370 				     struct mlx5_list_entry *entry);
3371 
3372 int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3373 			    void *cb_ctx);
3374 struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx);
3375 void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3376 struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx,
3377 				 struct mlx5_list_entry *entry, void *cb_ctx);
3378 void flow_dv_sample_clone_free_cb(void *tool_ctx,
3379 				  struct mlx5_list_entry *entry);
3380 
3381 int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3382 				void *cb_ctx);
3383 struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx,
3384 						     void *cb_ctx);
3385 void flow_dv_dest_array_remove_cb(void *tool_ctx,
3386 				  struct mlx5_list_entry *entry);
3387 struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
3388 				   struct mlx5_list_entry *entry, void *cb_ctx);
3389 void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
3390 				      struct mlx5_list_entry *entry);
3391 void flow_dv_hashfields_set(uint64_t item_flags,
3392 			    struct mlx5_flow_rss_desc *rss_desc,
3393 			    uint64_t *hash_fields);
3394 void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
3395 					uint64_t *hash_field);
3396 uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
3397 					const uint64_t hash_fields);
3398 int flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3399 		     const struct rte_flow_item items[],
3400 		     const struct rte_flow_action actions[],
3401 		     bool external, int hairpin, struct rte_flow_error *error);
3402 
3403 struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
3404 void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3405 int flow_hw_grp_match_cb(void *tool_ctx,
3406 			 struct mlx5_list_entry *entry,
3407 			 void *cb_ctx);
3408 struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
3409 					     struct mlx5_list_entry *oentry,
3410 					     void *cb_ctx);
3411 void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3412 
3413 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
3414 						    uint32_t age_idx);
3415 
3416 void flow_release_workspace(void *data);
3417 int mlx5_flow_os_init_workspace_once(void);
3418 void *mlx5_flow_os_get_specific_workspace(void);
3419 int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);
3420 void mlx5_flow_os_release_workspace(void);
3421 uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev);
3422 void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx);
3423 int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
3424 			const struct rte_flow_action *actions[RTE_COLORS],
3425 			struct rte_flow_attr *attr,
3426 			bool *is_rss,
3427 			uint8_t *domain_bitmap,
3428 			uint8_t *policy_mode,
3429 			struct rte_mtr_error *error);
3430 void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
3431 		      struct mlx5_flow_meter_policy *mtr_policy);
3432 int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
3433 		      struct mlx5_flow_meter_policy *mtr_policy,
3434 		      const struct rte_flow_action *actions[RTE_COLORS],
3435 		      struct rte_flow_attr *attr,
3436 		      struct rte_mtr_error *error);
3437 int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
3438 			     struct mlx5_flow_meter_policy *mtr_policy);
3439 void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
3440 			     struct mlx5_flow_meter_policy *mtr_policy);
3441 int mlx5_flow_create_def_policy(struct rte_eth_dev *dev);
3442 void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
3443 void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
3444 		       struct mlx5_flow_handle *dev_handle);
3445 const struct mlx5_flow_tunnel *
3446 mlx5_get_tof(const struct rte_flow_item *items,
3447 	     const struct rte_flow_action *actions,
3448 	     enum mlx5_tof_rule_type *rule_type);
3449 void
3450 flow_hw_resource_release(struct rte_eth_dev *dev);
3451 int
3452 mlx5_geneve_tlv_options_destroy(struct mlx5_geneve_tlv_options *options,
3453 				struct mlx5_physical_device *phdev);
3454 int
3455 mlx5_geneve_tlv_options_check_busy(struct mlx5_priv *priv);
3456 void
3457 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable);
3458 int flow_dv_action_validate(struct rte_eth_dev *dev,
3459 			    const struct rte_flow_indir_action_conf *conf,
3460 			    const struct rte_flow_action *action,
3461 			    struct rte_flow_error *err);
3462 struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,
3463 		      const struct rte_flow_indir_action_conf *conf,
3464 		      const struct rte_flow_action *action,
3465 		      struct rte_flow_error *err);
3466 int flow_dv_action_destroy(struct rte_eth_dev *dev,
3467 			   struct rte_flow_action_handle *handle,
3468 			   struct rte_flow_error *error);
3469 int flow_dv_action_update(struct rte_eth_dev *dev,
3470 			  struct rte_flow_action_handle *handle,
3471 			  const void *update,
3472 			  struct rte_flow_error *err);
3473 int flow_dv_action_query(struct rte_eth_dev *dev,
3474 			 const struct rte_flow_action_handle *handle,
3475 			 void *data,
3476 			 struct rte_flow_error *error);
3477 size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
3478 int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3479 			   size_t *size, struct rte_flow_error *error);
3480 void mlx5_flow_field_id_to_modify_info
3481 		(const struct rte_flow_field_data *data,
3482 		 struct field_modify_info *info, uint32_t *mask,
3483 		 uint32_t width, struct rte_eth_dev *dev,
3484 		 const struct rte_flow_attr *attr, struct rte_flow_error *error);
3485 int flow_dv_convert_modify_action(struct rte_flow_item *item,
3486 			      struct field_modify_info *field,
3487 			      struct field_modify_info *dest,
3488 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
3489 			      uint32_t type, struct rte_flow_error *error);
3490 
3491 #define MLX5_PF_VPORT_ID 0
3492 #define MLX5_ECPF_VPORT_ID 0xFFFE
3493 
3494 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev);
3495 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
3496 				const struct rte_flow_item *item,
3497 				uint16_t *vport_id,
3498 				bool *all_ports,
3499 				struct rte_flow_error *error);
3500 
3501 int flow_dv_translate_items_hws(const struct rte_flow_item *items,
3502 				struct mlx5_flow_attr *attr, void *key,
3503 				uint32_t key_type, uint64_t *item_flags,
3504 				uint8_t *match_criteria,
3505 				struct rte_flow_error *error);
3506 
3507 int __flow_dv_translate_items_hws(const struct rte_flow_item *items,
3508 				struct mlx5_flow_attr *attr, void *key,
3509 				uint32_t key_type, uint64_t *item_flags,
3510 				uint8_t *match_criteria,
3511 				bool nt_flow,
3512 				struct rte_flow_error *error);
3513 
3514 int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
3515 				  uint16_t *proxy_port_id,
3516 				  struct rte_flow_error *error);
3517 int flow_null_get_aged_flows(struct rte_eth_dev *dev,
3518 		    void **context,
3519 		    uint32_t nb_contexts,
3520 		    struct rte_flow_error *error);
3521 uint32_t flow_null_counter_allocate(struct rte_eth_dev *dev);
3522 void flow_null_counter_free(struct rte_eth_dev *dev,
3523 			uint32_t counter);
3524 int flow_null_counter_query(struct rte_eth_dev *dev,
3525 			uint32_t counter,
3526 			bool clear,
3527 		    uint64_t *pkts,
3528 			uint64_t *bytes,
3529 			void **action);
3530 
3531 int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
3532 
3533 int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
3534 					 uint32_t sqn, bool external);
3535 int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
3536 					  uint32_t sqn);
3537 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
3538 int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
3539 int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
3540 int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev);
3541 int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
3542 		const struct rte_flow_actions_template_attr *attr,
3543 		const struct rte_flow_action actions[],
3544 		const struct rte_flow_action masks[],
3545 		struct rte_flow_error *error);
3546 int mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
3547 		const struct rte_flow_pattern_template_attr *attr,
3548 		const struct rte_flow_item items[],
3549 		struct rte_flow_error *error);
3550 int flow_hw_table_update(struct rte_eth_dev *dev,
3551 			 struct rte_flow_error *error);
3552 int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
3553 			   enum rte_flow_field_id field, int inherit,
3554 			   const struct rte_flow_attr *attr,
3555 			   struct rte_flow_error *error);
3556 uintptr_t flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3557 				const struct rte_flow_attr *attr,
3558 				const struct rte_flow_item items[],
3559 				const struct rte_flow_action actions[],
3560 				bool external, struct rte_flow_error *error);
3561 void flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3562 				uintptr_t flow_idx);
3563 
3564 static __rte_always_inline int
3565 flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
3566 {
3567 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3568 	uint16_t port;
3569 
3570 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3571 		struct mlx5_priv *priv;
3572 		struct mlx5_hca_flex_attr *attr;
3573 		struct mlx5_devx_match_sample_info_query_attr *info;
3574 
3575 		priv = rte_eth_devices[port].data->dev_private;
3576 		attr = &priv->sh->cdev->config.hca_attr.flex;
3577 		if (priv->dr_ctx == dr_ctx && attr->query_match_sample_info) {
3578 			info = &priv->sh->srh_flex_parser.flex.devx_fp->sample_info[0];
3579 			if (priv->sh->srh_flex_parser.flex.mapnum)
3580 				return info->sample_dw_data * sizeof(uint32_t);
3581 			else
3582 				return UINT32_MAX;
3583 		}
3584 	}
3585 #endif
3586 	return UINT32_MAX;
3587 }
3588 
3589 static __rte_always_inline uint8_t
3590 flow_hw_get_ipv6_route_ext_anchor_from_ctx(void *dr_ctx)
3591 {
3592 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3593 	uint16_t port;
3594 	struct mlx5_priv *priv;
3595 
3596 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3597 		priv = rte_eth_devices[port].data->dev_private;
3598 		if (priv->dr_ctx == dr_ctx)
3599 			return priv->sh->srh_flex_parser.flex.devx_fp->anchor_id;
3600 	}
3601 #else
3602 	RTE_SET_USED(dr_ctx);
3603 #endif
3604 	return 0;
3605 }
3606 
3607 static __rte_always_inline uint16_t
3608 flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx)
3609 {
3610 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3611 	uint16_t port;
3612 	struct mlx5_priv *priv;
3613 	struct mlx5_flex_parser_devx *fp;
3614 
3615 	if (idx >= MLX5_GRAPH_NODE_SAMPLE_NUM || idx >= MLX5_SRV6_SAMPLE_NUM)
3616 		return 0;
3617 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3618 		priv = rte_eth_devices[port].data->dev_private;
3619 		if (priv->dr_ctx == dr_ctx) {
3620 			fp = priv->sh->srh_flex_parser.flex.devx_fp;
3621 			return fp->sample_info[idx].modify_field_id;
3622 		}
3623 	}
3624 #else
3625 	RTE_SET_USED(dr_ctx);
3626 	RTE_SET_USED(idx);
3627 #endif
3628 	return 0;
3629 }
3630 void
3631 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
3632 #ifdef HAVE_MLX5_HWS_SUPPORT
3633 
3634 #define MLX5_REPR_STC_MEMORY_LOG 11
3635 
3636 struct mlx5_mirror;
3637 void
3638 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
3639 void
3640 mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
3641 			     struct mlx5_indirect_list *ptr);
3642 void
3643 mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,
3644 			    struct mlx5_indirect_list *reformat);
3645 int
3646 flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3647 		    const struct rte_flow_attr *attr,
3648 		    const struct rte_flow_item items[],
3649 		    const struct rte_flow_action actions[],
3650 		    uint64_t item_flags, uint64_t action_flags, bool external,
3651 		    struct rte_flow_hw **flow, struct rte_flow_error *error);
3652 void
3653 flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow);
3654 void
3655 flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3656 		     uintptr_t flow_idx);
3657 const struct rte_flow_action_rss *
3658 flow_nta_locate_rss(struct rte_eth_dev *dev,
3659 		    const struct rte_flow_action actions[],
3660 		    struct rte_flow_error *error);
3661 struct rte_flow_hw *
3662 flow_nta_handle_rss(struct rte_eth_dev *dev,
3663 		    const struct rte_flow_attr *attr,
3664 		    const struct rte_flow_item items[],
3665 		    const struct rte_flow_action actions[],
3666 		    const struct rte_flow_action_rss *rss_conf,
3667 		    uint64_t item_flags, uint64_t action_flags,
3668 		    bool external, enum mlx5_flow_type flow_type,
3669 		    struct rte_flow_error *error);
3670 
3671 extern const struct rte_flow_action_raw_decap empty_decap;
3672 extern const struct rte_flow_item_ipv6 nic_ipv6_mask;
3673 extern const struct rte_flow_item_tcp nic_tcp_mask;
3674 
3675 /* mlx5_nta_split.c */
3676 int
3677 mlx5_flow_nta_split_metadata(struct rte_eth_dev *dev,
3678 			     const struct rte_flow_attr *attr,
3679 			     const struct rte_flow_action actions[],
3680 			     const struct rte_flow_action *qrss,
3681 			     uint64_t action_flags,
3682 			     int actions_n,
3683 			     bool external,
3684 			     struct mlx5_flow_hw_split_resource *res,
3685 			     struct rte_flow_error *error);
3686 void
3687 mlx5_flow_nta_split_resource_free(struct rte_eth_dev *dev,
3688 				  struct mlx5_flow_hw_split_resource *res);
3689 struct mlx5_list_entry *
3690 flow_nta_mreg_create_cb(void *tool_ctx, void *cb_ctx);
3691 void
3692 flow_nta_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3693 void
3694 mlx5_flow_nta_del_copy_action(struct rte_eth_dev *dev, uint32_t idx);
3695 void
3696 mlx5_flow_nta_del_default_copy_action(struct rte_eth_dev *dev);
3697 int
3698 mlx5_flow_nta_add_default_copy_action(struct rte_eth_dev *dev,
3699 				      struct rte_flow_error *error);
3700 int
3701 mlx5_flow_nta_update_copy_table(struct rte_eth_dev *dev,
3702 				uint32_t *idx,
3703 				const struct rte_flow_action *mark,
3704 				uint64_t action_flags,
3705 				struct rte_flow_error *error);
3706 
3707 #endif
3708 #endif /* RTE_PMD_MLX5_FLOW_H_ */
3709