xref: /dpdk/drivers/net/mlx5/mlx5_flow.h (revision 37dda90ee15b7098bc48356868a87d34f727eecc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_FLOW_H_
6 #define RTE_PMD_MLX5_FLOW_H_
7 
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 
13 #include <rte_alarm.h>
14 #include <rte_mtr.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_prm.h>
18 
19 #include "mlx5.h"
20 #include "rte_pmd_mlx5.h"
21 #include "hws/mlx5dr.h"
22 #include "mlx5_tx.h"
23 
24 /* E-Switch Manager port, used for rte_flow_item_port_id. */
25 #define MLX5_PORT_ESW_MGR UINT32_MAX
26 
27 /* E-Switch Manager port, used for rte_flow_item_ethdev. */
28 #define MLX5_REPRESENTED_PORT_ESW_MGR UINT16_MAX
29 
30 /* Private rte flow items. */
31 enum mlx5_rte_flow_item_type {
32 	MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
33 	MLX5_RTE_FLOW_ITEM_TYPE_TAG,
34 	MLX5_RTE_FLOW_ITEM_TYPE_SQ,
35 	MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
36 	MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
37 };
38 
39 /* Private (internal) rte flow actions. */
40 enum mlx5_rte_flow_action_type {
41 	MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
42 	MLX5_RTE_FLOW_ACTION_TYPE_TAG,
43 	MLX5_RTE_FLOW_ACTION_TYPE_MARK,
44 	MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
45 	MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
46 	MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
47 	MLX5_RTE_FLOW_ACTION_TYPE_AGE,
48 	MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
49 	MLX5_RTE_FLOW_ACTION_TYPE_JUMP,
50 	MLX5_RTE_FLOW_ACTION_TYPE_RSS,
51 	MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
52 };
53 
54 /* Private (internal) Field IDs for MODIFY_FIELD action. */
55 enum mlx5_rte_flow_field_id {
56 	MLX5_RTE_FLOW_FIELD_END = INT_MIN,
57 	MLX5_RTE_FLOW_FIELD_META_REG,
58 };
59 
60 #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29
61 
62 #define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \
63 	(((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET)
64 
65 #define MLX5_INDIRECT_ACTION_IDX_GET(handle) \
66 	(((uint32_t)(uintptr_t)(handle)) & \
67 	 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
68 
69 enum mlx5_indirect_type {
70 	MLX5_INDIRECT_ACTION_TYPE_RSS,
71 	MLX5_INDIRECT_ACTION_TYPE_AGE,
72 	MLX5_INDIRECT_ACTION_TYPE_COUNT,
73 	MLX5_INDIRECT_ACTION_TYPE_CT,
74 	MLX5_INDIRECT_ACTION_TYPE_METER_MARK,
75 	MLX5_INDIRECT_ACTION_TYPE_QUOTA,
76 };
77 
78 /* Now, the maximal ports will be supported is 16, action number is 32M. */
79 #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10
80 
81 #define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25
82 #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
83 
84 /*
85  * When SW steering flow engine is used, the CT action handles are encoded in a following way:
86  * - bits 31:29 - type
87  * - bits 28:25 - port index of the action owner
88  * - bits 24:0 - action index
89  */
90 #define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \
91 	((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \
92 	 (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \
93 	  MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index))
94 
95 #define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \
96 	(((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \
97 	 MLX5_INDIRECT_ACT_CT_OWNER_MASK)
98 
99 #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
100 	((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
101 
102 /*
103  * When HW steering flow engine is used, the CT action handles are encoded in a following way:
104  * - bits 31:29 - type
105  * - bits 28:0 - action index
106  */
107 #define MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(index) \
108 	((struct rte_flow_action_handle *)(uintptr_t) \
109 	 ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (index)))
110 
111 enum mlx5_indirect_list_type {
112 	MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
113 	MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
114 	MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
115 	MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT = 3,
116 };
117 
118 /**
119  * Base type for indirect list type.
120  */
121 struct mlx5_indirect_list {
122 	/* Indirect list type. */
123 	enum mlx5_indirect_list_type type;
124 	/* Optional storage list entry */
125 	LIST_ENTRY(mlx5_indirect_list) entry;
126 };
127 
128 static __rte_always_inline void
129 mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
130 {
131 	LIST_HEAD(, mlx5_indirect_list) *h = head;
132 
133 	LIST_INSERT_HEAD(h, elem, entry);
134 }
135 
136 static __rte_always_inline void
137 mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
138 {
139 	if (elem->entry.le_prev)
140 		LIST_REMOVE(elem, entry);
141 }
142 
143 static __rte_always_inline enum mlx5_indirect_list_type
144 mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
145 {
146 	return ((const struct mlx5_indirect_list *)obj)->type;
147 }
148 
149 /* Matches on selected register. */
150 struct mlx5_rte_flow_item_tag {
151 	enum modify_reg id;
152 	uint32_t data;
153 };
154 
155 /* Modify selected register. */
156 struct mlx5_rte_flow_action_set_tag {
157 	enum modify_reg id;
158 	uint8_t offset;
159 	uint8_t length;
160 	uint32_t data;
161 };
162 
163 struct mlx5_flow_action_copy_mreg {
164 	enum modify_reg dst;
165 	enum modify_reg src;
166 };
167 
168 /* Matches on source queue. */
169 struct mlx5_rte_flow_item_sq {
170 	uint32_t queue; /* DevX SQ number */
171 #ifdef RTE_ARCH_64
172 	uint32_t reserved;
173 #endif
174 };
175 
176 /* Map from registers to modify fields. */
177 extern enum mlx5_modification_field reg_to_field[];
178 extern const size_t mlx5_mod_reg_size;
179 
180 static __rte_always_inline enum mlx5_modification_field
181 mlx5_convert_reg_to_field(enum modify_reg reg)
182 {
183 	MLX5_ASSERT((size_t)reg < mlx5_mod_reg_size);
184 	return reg_to_field[reg];
185 }
186 
187 /* Feature name to allocate metadata register. */
188 enum mlx5_feature_name {
189 	MLX5_HAIRPIN_RX,
190 	MLX5_HAIRPIN_TX,
191 	MLX5_METADATA_RX,
192 	MLX5_METADATA_TX,
193 	MLX5_METADATA_FDB,
194 	MLX5_FLOW_MARK,
195 	MLX5_APP_TAG,
196 	MLX5_COPY_MARK,
197 	MLX5_MTR_COLOR,
198 	MLX5_MTR_ID,
199 	MLX5_ASO_FLOW_HIT,
200 	MLX5_ASO_CONNTRACK,
201 	MLX5_SAMPLE_ID,
202 };
203 
204 /* Default queue number. */
205 #define MLX5_RSSQ_DEFAULT_NUM 16
206 
207 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
208 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
209 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
210 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
211 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
212 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
213 
214 /* Pattern inner Layer bits. */
215 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
216 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
217 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
218 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
219 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
220 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
221 
222 /* Pattern tunnel Layer bits. */
223 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
224 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
225 #define MLX5_FLOW_LAYER_GRE (1u << 14)
226 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
227 /* List of tunnel Layer bits continued below. */
228 
229 /* General pattern items bits. */
230 #define MLX5_FLOW_ITEM_METADATA (1u << 16)
231 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
232 #define MLX5_FLOW_ITEM_TAG (1u << 18)
233 #define MLX5_FLOW_ITEM_MARK (1u << 19)
234 
235 /* Pattern MISC bits. */
236 #define MLX5_FLOW_LAYER_ICMP (1u << 20)
237 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21)
238 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22)
239 
240 /* Pattern tunnel Layer bits (continued). */
241 #define MLX5_FLOW_LAYER_IPIP (1u << 23)
242 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24)
243 #define MLX5_FLOW_LAYER_NVGRE (1u << 25)
244 #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
245 
246 /* Queue items. */
247 #define MLX5_FLOW_ITEM_SQ (1u << 27)
248 
249 /* Pattern tunnel Layer bits (continued). */
250 #define MLX5_FLOW_LAYER_GTP (1u << 28)
251 
252 /* Pattern eCPRI Layer bit. */
253 #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29)
254 
255 /* IPv6 Fragment Extension Header bit. */
256 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30)
257 #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31)
258 
259 /* Pattern tunnel Layer bits (continued). */
260 #define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
261 #define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
262 
263 /* INTEGRITY item bits */
264 #define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
265 #define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
266 #define MLX5_FLOW_ITEM_INTEGRITY \
267 	(MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
268 
269 /* Conntrack item. */
270 #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
271 
272 /* Flex item */
273 #define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
274 #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
275 #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
276 
277 #define MLX5_FLOW_ITEM_FLEX \
278 	(MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX | \
279 	MLX5_FLOW_ITEM_FLEX_TUNNEL)
280 
281 /* ESP item */
282 #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
283 
284 /* Port Representor/Represented Port item */
285 #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41)
286 #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42)
287 
288 /* Meter color item */
289 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
290 #define MLX5_FLOW_ITEM_QUOTA (UINT64_C(1) << 45)
291 
292 
293 /* IPv6 routing extension item */
294 #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
295 #define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46)
296 
297 /* Aggregated affinity item */
298 #define MLX5_FLOW_ITEM_AGGR_AFFINITY (UINT64_C(1) << 49)
299 
300 /* IB BTH ITEM. */
301 #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
302 
303 /* PTYPE ITEM */
304 #define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
305 
306 /* NSH ITEM */
307 #define MLX5_FLOW_ITEM_NSH (1ull << 53)
308 
309 /* COMPARE ITEM */
310 #define MLX5_FLOW_ITEM_COMPARE (1ull << 54)
311 
312 /* Random ITEM */
313 #define MLX5_FLOW_ITEM_RANDOM (1ull << 55)
314 
315 /* Outer Masks. */
316 #define MLX5_FLOW_LAYER_OUTER_L3 \
317 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
318 #define MLX5_FLOW_LAYER_OUTER_L4 \
319 	(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
320 #define MLX5_FLOW_LAYER_OUTER \
321 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
322 	 MLX5_FLOW_LAYER_OUTER_L4)
323 
324 /* Tunnel Masks. */
325 #define MLX5_FLOW_LAYER_TUNNEL \
326 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
327 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
328 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
329 	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
330 	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
331 
332 /* Inner Masks. */
333 #define MLX5_FLOW_LAYER_INNER_L3 \
334 	(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
335 #define MLX5_FLOW_LAYER_INNER_L4 \
336 	(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
337 #define MLX5_FLOW_LAYER_INNER \
338 	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
339 	 MLX5_FLOW_LAYER_INNER_L4)
340 
341 /* Layer Masks. */
342 #define MLX5_FLOW_LAYER_L2 \
343 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
344 #define MLX5_FLOW_LAYER_L3_IPV4 \
345 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4)
346 #define MLX5_FLOW_LAYER_L3_IPV6 \
347 	(MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
348 #define MLX5_FLOW_LAYER_L3 \
349 	(MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
350 #define MLX5_FLOW_LAYER_L4 \
351 	(MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4)
352 
353 /* Actions */
354 #define MLX5_FLOW_ACTION_DROP (1ull << 0)
355 #define MLX5_FLOW_ACTION_QUEUE (1ull << 1)
356 #define MLX5_FLOW_ACTION_RSS (1ull << 2)
357 #define MLX5_FLOW_ACTION_FLAG (1ull << 3)
358 #define MLX5_FLOW_ACTION_MARK (1ull << 4)
359 #define MLX5_FLOW_ACTION_COUNT (1ull << 5)
360 #define MLX5_FLOW_ACTION_PORT_ID (1ull << 6)
361 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7)
362 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8)
363 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9)
364 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10)
365 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11)
366 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12)
367 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13)
368 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14)
369 #define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15)
370 #define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16)
371 #define MLX5_FLOW_ACTION_JUMP (1ull << 17)
372 #define MLX5_FLOW_ACTION_SET_TTL (1ull << 18)
373 #define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19)
374 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20)
375 #define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21)
376 #define MLX5_FLOW_ACTION_ENCAP (1ull << 22)
377 #define MLX5_FLOW_ACTION_DECAP (1ull << 23)
378 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24)
379 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25)
380 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26)
381 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27)
382 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28)
383 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29)
384 #define MLX5_FLOW_ACTION_SET_META (1ull << 30)
385 #define MLX5_FLOW_ACTION_METER (1ull << 31)
386 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
387 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
388 #define MLX5_FLOW_ACTION_AGE (1ull << 34)
389 #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
390 #define MLX5_FLOW_ACTION_SAMPLE (1ull << 36)
391 #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37)
392 #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
393 #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
394 #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
395 #define MLX5_FLOW_ACTION_CT (1ull << 41)
396 #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
397 #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
398 #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
399 #define MLX5_FLOW_ACTION_QUOTA (1ull << 46)
400 #define MLX5_FLOW_ACTION_PORT_REPRESENTOR (1ull << 47)
401 #define MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE (1ull << 48)
402 #define MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH (1ull << 49)
403 #define MLX5_FLOW_ACTION_NAT64 (1ull << 50)
404 #define MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX (1ull << 51)
405 
406 #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \
407 	(MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE)
408 
409 #define MLX5_FLOW_FATE_ACTIONS \
410 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
411 	 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
412 	 MLX5_FLOW_ACTION_DEFAULT_MISS | \
413 	 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
414 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
415 	 MLX5_FLOW_ACTION_PORT_REPRESENTOR | \
416 	 MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX)
417 
418 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
419 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
420 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
421 	 MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
422 	 MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX)
423 
424 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
425 				      MLX5_FLOW_ACTION_SET_IPV4_DST | \
426 				      MLX5_FLOW_ACTION_SET_IPV6_SRC | \
427 				      MLX5_FLOW_ACTION_SET_IPV6_DST | \
428 				      MLX5_FLOW_ACTION_SET_TP_SRC | \
429 				      MLX5_FLOW_ACTION_SET_TP_DST | \
430 				      MLX5_FLOW_ACTION_SET_TTL | \
431 				      MLX5_FLOW_ACTION_DEC_TTL | \
432 				      MLX5_FLOW_ACTION_SET_MAC_SRC | \
433 				      MLX5_FLOW_ACTION_SET_MAC_DST | \
434 				      MLX5_FLOW_ACTION_INC_TCP_SEQ | \
435 				      MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
436 				      MLX5_FLOW_ACTION_INC_TCP_ACK | \
437 				      MLX5_FLOW_ACTION_DEC_TCP_ACK | \
438 				      MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
439 				      MLX5_FLOW_ACTION_SET_TAG | \
440 				      MLX5_FLOW_ACTION_MARK_EXT | \
441 				      MLX5_FLOW_ACTION_SET_META | \
442 				      MLX5_FLOW_ACTION_SET_IPV4_DSCP | \
443 				      MLX5_FLOW_ACTION_SET_IPV6_DSCP | \
444 				      MLX5_FLOW_ACTION_MODIFY_FIELD)
445 
446 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
447 				MLX5_FLOW_ACTION_OF_PUSH_VLAN)
448 
449 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP)
450 
451 #ifndef IPPROTO_MPLS
452 #define IPPROTO_MPLS 137
453 #endif
454 
455 #define MLX5_IPV6_HDR_ECN_MASK 0x3
456 #define MLX5_IPV6_HDR_DSCP_SHIFT 2
457 
458 /* UDP port number for MPLS */
459 #define MLX5_UDP_PORT_MPLS 6635
460 
461 /* UDP port numbers for VxLAN. */
462 #define MLX5_UDP_PORT_VXLAN 4789
463 #define MLX5_UDP_PORT_VXLAN_GPE 4790
464 
465 /* UDP port numbers for RoCEv2. */
466 #define MLX5_UDP_PORT_ROCEv2 4791
467 
468 /* UDP port numbers for GENEVE. */
469 #define MLX5_UDP_PORT_GENEVE 6081
470 
471 /* Lowest priority indicator. */
472 #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1)
473 
474 /*
475  * Max priority for ingress\egress flow groups
476  * greater than 0 and for any transfer flow group.
477  * From user configation: 0 - 21843.
478  */
479 #define MLX5_NON_ROOT_FLOW_MAX_PRIO	(21843 + 1)
480 
481 /*
482  * Number of sub priorities.
483  * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
484  * matching on the NIC (firmware dependent) L4 most have the higher priority
485  * followed by L3 and ending with L2.
486  */
487 #define MLX5_PRIORITY_MAP_L2 2
488 #define MLX5_PRIORITY_MAP_L3 1
489 #define MLX5_PRIORITY_MAP_L4 0
490 #define MLX5_PRIORITY_MAP_MAX 3
491 
492 /* Valid layer type for IPV4 RSS. */
493 #define MLX5_IPV4_LAYER_TYPES \
494 	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
495 	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
496 	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
497 
498 /* Valid L4 RSS types */
499 #define MLX5_L4_RSS_TYPES (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
500 
501 /* IBV hash source bits  for IPV4. */
502 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
503 
504 /* Valid layer type for IPV6 RSS. */
505 #define MLX5_IPV6_LAYER_TYPES \
506 	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
507 	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
508 	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
509 
510 /* IBV hash source bits  for IPV6. */
511 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
512 
513 /* IBV hash bits for L3 SRC. */
514 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6)
515 
516 /* IBV hash bits for L3 DST. */
517 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6)
518 
519 /* IBV hash bits for TCP. */
520 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
521 			      IBV_RX_HASH_DST_PORT_TCP)
522 
523 /* IBV hash bits for UDP. */
524 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \
525 			      IBV_RX_HASH_DST_PORT_UDP)
526 
527 /* IBV hash bits for L4 SRC. */
528 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
529 				 IBV_RX_HASH_SRC_PORT_UDP)
530 
531 /* IBV hash bits for L4 DST. */
532 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \
533 				 IBV_RX_HASH_DST_PORT_UDP)
534 
535 /* Geneve header first 16Bit */
536 #define MLX5_GENEVE_VER_MASK 0x3
537 #define MLX5_GENEVE_VER_SHIFT 14
538 #define MLX5_GENEVE_VER_VAL(a) \
539 		(((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
540 #define MLX5_GENEVE_OPTLEN_MASK 0x3F
541 #define MLX5_GENEVE_OPTLEN_SHIFT 8
542 #define MLX5_GENEVE_OPTLEN_VAL(a) \
543 	    (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
544 #define MLX5_GENEVE_OAMF_MASK 0x1
545 #define MLX5_GENEVE_OAMF_SHIFT 7
546 #define MLX5_GENEVE_OAMF_VAL(a) \
547 		(((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
548 #define MLX5_GENEVE_CRITO_MASK 0x1
549 #define MLX5_GENEVE_CRITO_SHIFT 6
550 #define MLX5_GENEVE_CRITO_VAL(a) \
551 		(((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
552 #define MLX5_GENEVE_RSVD_MASK 0x3F
553 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
554 /*
555  * The length of the Geneve options fields, expressed in four byte multiples,
556  * not including the eight byte fixed tunnel.
557  */
558 #define MLX5_GENEVE_OPT_LEN_0 14
559 #define MLX5_GENEVE_OPT_LEN_1 63
560 
561 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \
562 					  sizeof(struct rte_ipv4_hdr))
563 /* GTP extension header flag. */
564 #define MLX5_GTP_EXT_HEADER_FLAG 4
565 
566 /* GTP extension header PDU type shift. */
567 #define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4)
568 
569 /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
570 #define MLX5_IPV4_FRAG_OFFSET_MASK \
571 		(RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)
572 
573 /* Specific item's fields can accept a range of values (using spec and last). */
574 #define MLX5_ITEM_RANGE_NOT_ACCEPTED	false
575 #define MLX5_ITEM_RANGE_ACCEPTED	true
576 
577 /* Software header modify action numbers of a flow. */
578 #define MLX5_ACT_NUM_MDF_IPV4		1
579 #define MLX5_ACT_NUM_MDF_IPV6		4
580 #define MLX5_ACT_NUM_MDF_MAC		2
581 #define MLX5_ACT_NUM_MDF_VID		1
582 #define MLX5_ACT_NUM_MDF_PORT		1
583 #define MLX5_ACT_NUM_MDF_TTL		1
584 #define MLX5_ACT_NUM_DEC_TTL		MLX5_ACT_NUM_MDF_TTL
585 #define MLX5_ACT_NUM_MDF_TCPSEQ		1
586 #define MLX5_ACT_NUM_MDF_TCPACK		1
587 #define MLX5_ACT_NUM_SET_REG		1
588 #define MLX5_ACT_NUM_SET_TAG		1
589 #define MLX5_ACT_NUM_CPY_MREG		MLX5_ACT_NUM_SET_TAG
590 #define MLX5_ACT_NUM_SET_MARK		MLX5_ACT_NUM_SET_TAG
591 #define MLX5_ACT_NUM_SET_META		MLX5_ACT_NUM_SET_TAG
592 #define MLX5_ACT_NUM_SET_DSCP		1
593 
594 /* Maximum number of fields to modify in MODIFY_FIELD */
595 #define MLX5_ACT_MAX_MOD_FIELDS 5
596 
597 /* Syndrome bits definition for connection tracking. */
598 #define MLX5_CT_SYNDROME_VALID		(0x0 << 6)
599 #define MLX5_CT_SYNDROME_INVALID	(0x1 << 6)
600 #define MLX5_CT_SYNDROME_TRAP		(0x2 << 6)
601 #define MLX5_CT_SYNDROME_STATE_CHANGE	(0x1 << 1)
602 #define MLX5_CT_SYNDROME_BAD_PACKET	(0x1 << 0)
603 
604 enum mlx5_flow_drv_type {
605 	MLX5_FLOW_TYPE_MIN,
606 	MLX5_FLOW_TYPE_DV,
607 	MLX5_FLOW_TYPE_VERBS,
608 	MLX5_FLOW_TYPE_HW,
609 	MLX5_FLOW_TYPE_MAX,
610 };
611 
612 /* Fate action type. */
613 enum mlx5_flow_fate_type {
614 	MLX5_FLOW_FATE_NONE, /* Egress flow. */
615 	MLX5_FLOW_FATE_QUEUE,
616 	MLX5_FLOW_FATE_JUMP,
617 	MLX5_FLOW_FATE_PORT_ID,
618 	MLX5_FLOW_FATE_DROP,
619 	MLX5_FLOW_FATE_DEFAULT_MISS,
620 	MLX5_FLOW_FATE_SHARED_RSS,
621 	MLX5_FLOW_FATE_MTR,
622 	MLX5_FLOW_FATE_SEND_TO_KERNEL,
623 	MLX5_FLOW_FATE_MAX,
624 };
625 
626 /* Matcher PRM representation */
627 struct mlx5_flow_dv_match_params {
628 	size_t size;
629 	/**< Size of match value. Do NOT split size and key! */
630 	uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
631 	/**< Matcher value. This value is used as the mask or as a key. */
632 };
633 
634 /* Matcher structure. */
635 struct mlx5_flow_dv_matcher {
636 	struct mlx5_list_entry entry; /**< Pointer to the next element. */
637 	union {
638 		struct mlx5_flow_tbl_resource *tbl;
639 		/**< Pointer to the table(group) the matcher associated with for DV flow. */
640 		struct mlx5_flow_group *group;
641 		/* Group of this matcher for HWS non template flow. */
642 	};
643 	void *matcher_object; /**< Pointer to DV matcher */
644 	uint16_t crc; /**< CRC of key. */
645 	uint16_t priority; /**< Priority of matcher. */
646 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
647 };
648 
649 /* Encap/decap resource structure. */
650 struct mlx5_flow_dv_encap_decap_resource {
651 	struct mlx5_list_entry entry;
652 	/* Pointer to next element. */
653 	uint32_t refcnt; /**< Reference counter. */
654 	void *action;
655 	/**< Encap/decap action object. */
656 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
657 	size_t size;
658 	uint8_t reformat_type;
659 	uint8_t ft_type;
660 	uint64_t flags; /**< Flags for RDMA API. */
661 	uint32_t idx; /**< Index for the index memory pool. */
662 };
663 
664 /* Tag resource structure. */
665 struct mlx5_flow_dv_tag_resource {
666 	struct mlx5_list_entry entry;
667 	/**< hash list entry for tag resource, tag value as the key. */
668 	void *action;
669 	/**< Tag action object. */
670 	uint32_t refcnt; /**< Reference counter. */
671 	uint32_t idx; /**< Index for the index memory pool. */
672 	uint32_t tag_id; /**< Tag ID. */
673 };
674 
675 /* Modify resource structure */
676 struct mlx5_flow_dv_modify_hdr_resource {
677 	struct mlx5_list_entry entry;
678 	void *action; /**< Modify header action object. */
679 	uint32_t idx;
680 	uint64_t flags; /**< Flags for RDMA API(HWS only). */
681 	/* Key area for hash list matching: */
682 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
683 	uint8_t actions_num; /**< Number of modification actions. */
684 	bool root; /**< Whether action is in root table. */
685 	struct mlx5_modification_cmd actions[];
686 	/**< Modification actions. */
687 } __rte_packed;
688 
689 /* Modify resource key of the hash organization. */
690 union mlx5_flow_modify_hdr_key {
691 	struct {
692 		uint32_t ft_type:8;	/**< Flow table type, Rx or Tx. */
693 		uint32_t actions_num:5;	/**< Number of modification actions. */
694 		uint32_t group:19;	/**< Flow group id. */
695 		uint32_t cksum;		/**< Actions check sum. */
696 	};
697 	uint64_t v64;			/**< full 64bits value of key */
698 };
699 
700 /* Jump action resource structure. */
701 struct mlx5_flow_dv_jump_tbl_resource {
702 	void *action; /**< Pointer to the rdma core action. */
703 };
704 
705 /* Port ID resource structure. */
706 struct mlx5_flow_dv_port_id_action_resource {
707 	struct mlx5_list_entry entry;
708 	void *action; /**< Action object. */
709 	uint32_t port_id; /**< Port ID value. */
710 	uint32_t idx; /**< Indexed pool memory index. */
711 };
712 
713 /* Push VLAN action resource structure */
714 struct mlx5_flow_dv_push_vlan_action_resource {
715 	struct mlx5_list_entry entry; /* Cache entry. */
716 	void *action; /**< Action object. */
717 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
718 	rte_be32_t vlan_tag; /**< VLAN tag value. */
719 	uint32_t idx; /**< Indexed pool memory index. */
720 };
721 
722 /* Metadata register copy table entry. */
723 struct mlx5_flow_mreg_copy_resource {
724 	/*
725 	 * Hash list entry for copy table.
726 	 *  - Key is 32/64-bit MARK action ID.
727 	 *  - MUST be the first entry.
728 	 */
729 	struct mlx5_list_entry hlist_ent;
730 	LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
731 	/* List entry for device flows. */
732 	uint32_t idx;
733 	uint32_t mark_id;
734 	union {
735 		uint32_t rix_flow; /* Built flow for copy. */
736 		uintptr_t hw_flow;
737 	};
738 };
739 
740 /* Table tunnel parameter. */
741 struct mlx5_flow_tbl_tunnel_prm {
742 	const struct mlx5_flow_tunnel *tunnel;
743 	uint32_t group_id;
744 	bool external;
745 };
746 
747 /* Table data structure of the hash organization. */
748 struct mlx5_flow_tbl_data_entry {
749 	struct mlx5_list_entry entry;
750 	/**< hash list entry, 64-bits key inside. */
751 	struct mlx5_flow_tbl_resource tbl;
752 	/**< flow table resource. */
753 	struct mlx5_list *matchers;
754 	/**< matchers' header associated with the flow table. */
755 	struct mlx5_flow_dv_jump_tbl_resource jump;
756 	/**< jump resource, at most one for each table created. */
757 	uint32_t idx; /**< index for the indexed mempool. */
758 	/**< tunnel offload */
759 	const struct mlx5_flow_tunnel *tunnel;
760 	uint32_t group_id;
761 	uint32_t external:1;
762 	uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
763 	uint32_t is_egress:1; /**< Egress table. */
764 	uint32_t is_transfer:1; /**< Transfer table. */
765 	uint32_t dummy:1; /**<  DR table. */
766 	uint32_t id:22; /**< Table ID. */
767 	uint32_t reserve:5; /**< Reserved to future using. */
768 	uint32_t level; /**< Table level. */
769 };
770 
771 /* Sub rdma-core actions list. */
772 struct mlx5_flow_sub_actions_list {
773 	uint32_t actions_num; /**< Number of sample actions. */
774 	uint64_t action_flags;
775 	void *dr_queue_action;
776 	void *dr_tag_action;
777 	void *dr_cnt_action;
778 	void *dr_port_id_action;
779 	void *dr_encap_action;
780 	void *dr_jump_action;
781 };
782 
783 /* Sample sub-actions resource list. */
784 struct mlx5_flow_sub_actions_idx {
785 	uint32_t rix_hrxq; /**< Hash Rx queue object index. */
786 	uint32_t rix_tag; /**< Index to the tag action. */
787 	uint32_t rix_port_id_action; /**< Index to port ID action resource. */
788 	uint32_t rix_encap_decap; /**< Index to encap/decap resource. */
789 	uint32_t rix_jump; /**< Index to the jump action resource. */
790 };
791 
792 /* Sample action resource structure. */
793 struct mlx5_flow_dv_sample_resource {
794 	struct mlx5_list_entry entry; /**< Cache entry. */
795 	union {
796 		void *verbs_action; /**< Verbs sample action object. */
797 		void **sub_actions; /**< Sample sub-action array. */
798 	};
799 	struct rte_eth_dev *dev; /**< Device registers the action. */
800 	uint32_t idx; /** Sample object index. */
801 	uint8_t ft_type; /** Flow Table Type */
802 	uint32_t ft_id; /** Flow Table Level */
803 	uint32_t ratio;   /** Sample Ratio */
804 	uint64_t set_action; /** Restore reg_c0 value */
805 	void *normal_path_tbl; /** Flow Table pointer */
806 	struct mlx5_flow_sub_actions_idx sample_idx;
807 	/**< Action index resources. */
808 	struct mlx5_flow_sub_actions_list sample_act;
809 	/**< Action resources. */
810 };
811 
812 #define MLX5_MAX_DEST_NUM	2
813 
814 /* Destination array action resource structure. */
815 struct mlx5_flow_dv_dest_array_resource {
816 	struct mlx5_list_entry entry; /**< Cache entry. */
817 	uint32_t idx; /** Destination array action object index. */
818 	uint8_t ft_type; /** Flow Table Type */
819 	uint8_t num_of_dest; /**< Number of destination actions. */
820 	struct rte_eth_dev *dev; /**< Device registers the action. */
821 	void *action; /**< Pointer to the rdma core action. */
822 	struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
823 	/**< Action index resources. */
824 	struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM];
825 	/**< Action resources. */
826 };
827 
828 /* PMD flow priority for tunnel */
829 #define MLX5_TUNNEL_PRIO_GET(rss_desc) \
830 	((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
831 
832 
833 /** Device flow handle structure for DV mode only. */
834 struct mlx5_flow_handle_dv {
835 	/* Flow DV api: */
836 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
837 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
838 	/**< Pointer to modify header resource in cache. */
839 	uint32_t rix_encap_decap;
840 	/**< Index to encap/decap resource in cache. */
841 	uint32_t rix_push_vlan;
842 	/**< Index to push VLAN action resource in cache. */
843 	uint32_t rix_tag;
844 	/**< Index to the tag action. */
845 	uint32_t rix_sample;
846 	/**< Index to sample action resource in cache. */
847 	uint32_t rix_dest_array;
848 	/**< Index to destination array resource in cache. */
849 } __rte_packed;
850 
851 /** Device flow handle structure: used both for creating & destroying. */
852 struct mlx5_flow_handle {
853 	SILIST_ENTRY(uint32_t)next;
854 	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
855 	/**< Index to next device flow handle. */
856 	uint64_t layers;
857 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
858 	void *drv_flow; /**< pointer to driver flow object. */
859 	uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
860 	uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
861 	uint32_t fate_action:4; /**< Fate action type. */
862 	union {
863 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
864 		uint32_t rix_jump; /**< Index to the jump action resource. */
865 		uint32_t rix_port_id_action;
866 		/**< Index to port ID action resource. */
867 		uint32_t rix_fate;
868 		/**< Generic value indicates the fate action. */
869 		uint32_t rix_default_fate;
870 		/**< Indicates default miss fate action. */
871 		uint32_t rix_srss;
872 		/**< Indicates shared RSS fate action. */
873 	};
874 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
875 	struct mlx5_flow_handle_dv dvh;
876 #endif
877 	uint8_t flex_item; /**< referenced Flex Item bitmask. */
878 } __rte_packed;
879 
880 /*
881  * Size for Verbs device flow handle structure only. Do not use the DV only
882  * structure in Verbs. No DV flows attributes will be accessed.
883  * Macro offsetof() could also be used here.
884  */
885 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
886 #define MLX5_FLOW_HANDLE_VERBS_SIZE \
887 	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
888 #else
889 #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
890 #endif
891 
892 /** Device flow structure only for DV flow creation. */
893 struct mlx5_flow_dv_workspace {
894 	uint32_t group; /**< The group index. */
895 	uint32_t table_id; /**< Flow table identifier. */
896 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
897 	int actions_n; /**< number of actions. */
898 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
899 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
900 	/**< Pointer to encap/decap resource in cache. */
901 	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
902 	/**< Pointer to push VLAN action resource in cache. */
903 	struct mlx5_flow_dv_tag_resource *tag_resource;
904 	/**< pointer to the tag action. */
905 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
906 	/**< Pointer to port ID action resource. */
907 	struct mlx5_flow_dv_jump_tbl_resource *jump;
908 	/**< Pointer to the jump action resource. */
909 	struct mlx5_flow_dv_match_params value;
910 	/**< Holds the value that the packet is compared to. */
911 	struct mlx5_flow_dv_sample_resource *sample_res;
912 	/**< Pointer to the sample action resource. */
913 	struct mlx5_flow_dv_dest_array_resource *dest_array_res;
914 	/**< Pointer to the destination array resource. */
915 };
916 
917 #ifdef HAVE_INFINIBAND_VERBS_H
918 /*
919  * Maximal Verbs flow specifications & actions size.
920  * Some elements are mutually exclusive, but enough space should be allocated.
921  * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
922  *               2. One tunnel header (exception: GRE + MPLS),
923  *                  SPEC length: GRE == tunnel.
924  * Actions: 1. 1 Mark OR Flag.
925  *          2. 1 Drop (if any).
926  *          3. No limitation for counters, but it makes no sense to support too
927  *             many counters in a single device flow.
928  */
929 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
930 #define MLX5_VERBS_MAX_SPEC_SIZE \
931 		( \
932 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
933 			      sizeof(struct ibv_flow_spec_ipv6) + \
934 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
935 			sizeof(struct ibv_flow_spec_gre) + \
936 			sizeof(struct ibv_flow_spec_mpls)) \
937 		)
938 #else
939 #define MLX5_VERBS_MAX_SPEC_SIZE \
940 		( \
941 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
942 			      sizeof(struct ibv_flow_spec_ipv6) + \
943 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
944 			sizeof(struct ibv_flow_spec_tunnel)) \
945 		)
946 #endif
947 
948 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
949 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
950 #define MLX5_VERBS_MAX_ACT_SIZE \
951 		( \
952 			sizeof(struct ibv_flow_spec_action_tag) + \
953 			sizeof(struct ibv_flow_spec_action_drop) + \
954 			sizeof(struct ibv_flow_spec_counter_action) * 4 \
955 		)
956 #else
957 #define MLX5_VERBS_MAX_ACT_SIZE \
958 		( \
959 			sizeof(struct ibv_flow_spec_action_tag) + \
960 			sizeof(struct ibv_flow_spec_action_drop) \
961 		)
962 #endif
963 
964 #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
965 		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
966 
967 /** Device flow structure only for Verbs flow creation. */
968 struct mlx5_flow_verbs_workspace {
969 	unsigned int size; /**< Size of the attribute. */
970 	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
971 	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
972 	/**< Specifications & actions buffer of verbs flow. */
973 };
974 #endif /* HAVE_INFINIBAND_VERBS_H */
975 
976 #define MLX5_SCALE_FLOW_GROUP_BIT 0
977 #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1
978 
979 /** Maximal number of device sub-flows supported. */
980 #define MLX5_NUM_MAX_DEV_FLOWS 64
981 
982 /**
983  * tunnel offload rules type
984  */
985 enum mlx5_tof_rule_type {
986 	MLX5_TUNNEL_OFFLOAD_NONE = 0,
987 	MLX5_TUNNEL_OFFLOAD_SET_RULE,
988 	MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
989 	MLX5_TUNNEL_OFFLOAD_MISS_RULE,
990 };
991 
992 /** Device flow structure. */
993 __extension__
994 struct mlx5_flow {
995 	struct rte_flow *flow; /**< Pointer to the main flow. */
996 	uint32_t flow_idx; /**< The memory pool index to the main flow. */
997 	uint64_t hash_fields; /**< Hash Rx queue hash fields. */
998 	uint64_t act_flags;
999 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
1000 	bool external; /**< true if the flow is created external to PMD. */
1001 	uint8_t ingress:1; /**< 1 if the flow is ingress. */
1002 	uint8_t skip_scale:2;
1003 	uint8_t symmetric_hash_function:1;
1004 	/**
1005 	 * Each Bit be set to 1 if Skip the scale the flow group with factor.
1006 	 * If bit0 be set to 1, then skip the scale the original flow group;
1007 	 * If bit1 be set to 1, then skip the scale the jump flow group if
1008 	 * having jump action.
1009 	 * 00: Enable scale in a flow, default value.
1010 	 * 01: Skip scale the flow group with factor, enable scale the group
1011 	 * of jump action.
1012 	 * 10: Enable scale the group with factor, skip scale the group of
1013 	 * jump action.
1014 	 * 11: Skip scale the table with factor both for flow group and jump
1015 	 * group.
1016 	 */
1017 	union {
1018 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1019 		struct mlx5_flow_dv_workspace dv;
1020 #endif
1021 #ifdef HAVE_INFINIBAND_VERBS_H
1022 		struct mlx5_flow_verbs_workspace verbs;
1023 #endif
1024 	};
1025 	struct mlx5_flow_handle *handle;
1026 	uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
1027 	const struct mlx5_flow_tunnel *tunnel;
1028 	enum mlx5_tof_rule_type tof_type;
1029 };
1030 
1031 /* Flow meter state. */
1032 #define MLX5_FLOW_METER_DISABLE 0
1033 #define MLX5_FLOW_METER_ENABLE 1
1034 
1035 #define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
1036 #define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
1037 
1038 #define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES
1039 
1040 #define MLX5_MAN_WIDTH 8
1041 /* Legacy Meter parameter structure. */
1042 struct mlx5_legacy_flow_meter {
1043 	struct mlx5_flow_meter_info fm;
1044 	/* Must be the first in struct. */
1045 	TAILQ_ENTRY(mlx5_legacy_flow_meter) next;
1046 	/**< Pointer to the next flow meter structure. */
1047 	uint32_t idx;
1048 	/* Index to meter object. */
1049 };
1050 
1051 #define MLX5_MAX_TUNNELS 256
1052 #define MLX5_TNL_MISS_RULE_PRIORITY 3
1053 #define MLX5_TNL_MISS_FDB_JUMP_GRP  0x1234faac
1054 
1055 /*
1056  * When tunnel offload is active, all JUMP group ids are converted
1057  * using the same method. That conversion is applied both to tunnel and
1058  * regular rule types.
1059  * Group ids used in tunnel rules are relative to it's tunnel (!).
1060  * Application can create number of steer rules, using the same
1061  * tunnel, with different group id in each rule.
1062  * Each tunnel stores its groups internally in PMD tunnel object.
1063  * Groups used in regular rules do not belong to any tunnel and are stored
1064  * in tunnel hub.
1065  */
1066 
1067 struct mlx5_flow_tunnel {
1068 	LIST_ENTRY(mlx5_flow_tunnel) chain;
1069 	struct rte_flow_tunnel app_tunnel;	/** app tunnel copy */
1070 	uint32_t tunnel_id;			/** unique tunnel ID */
1071 	RTE_ATOMIC(uint32_t) refctn;
1072 	struct rte_flow_action action;
1073 	struct rte_flow_item item;
1074 	struct mlx5_hlist *groups;		/** tunnel groups */
1075 };
1076 
1077 /** PMD tunnel related context */
1078 struct mlx5_flow_tunnel_hub {
1079 	/* Tunnels list
1080 	 * Access to the list MUST be MT protected
1081 	 */
1082 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
1083 	 /* protect access to the tunnels list */
1084 	rte_spinlock_t sl;
1085 	struct mlx5_hlist *groups;		/** non tunnel groups */
1086 };
1087 
1088 /* convert jump group to flow table ID in tunnel rules */
1089 struct tunnel_tbl_entry {
1090 	struct mlx5_list_entry hash;
1091 	uint32_t flow_table;
1092 	uint32_t tunnel_id;
1093 	uint32_t group;
1094 };
1095 
1096 static inline uint32_t
1097 tunnel_id_to_flow_tbl(uint32_t id)
1098 {
1099 	return id | (1u << 16);
1100 }
1101 
1102 static inline uint32_t
1103 tunnel_flow_tbl_to_id(uint32_t flow_tbl)
1104 {
1105 	return flow_tbl & ~(1u << 16);
1106 }
1107 
1108 union tunnel_tbl_key {
1109 	uint64_t val;
1110 	struct {
1111 		uint32_t tunnel_id;
1112 		uint32_t group;
1113 	};
1114 };
1115 
1116 static inline struct mlx5_flow_tunnel_hub *
1117 mlx5_tunnel_hub(struct rte_eth_dev *dev)
1118 {
1119 	struct mlx5_priv *priv = dev->data->dev_private;
1120 	return priv->sh->tunnel_hub;
1121 }
1122 
1123 static inline bool
1124 is_tunnel_offload_active(const struct rte_eth_dev *dev)
1125 {
1126 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1127 	const struct mlx5_priv *priv = dev->data->dev_private;
1128 	return !!priv->sh->config.dv_miss_info;
1129 #else
1130 	RTE_SET_USED(dev);
1131 	return false;
1132 #endif
1133 }
1134 
1135 static inline bool
1136 is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
1137 {
1138 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
1139 }
1140 
1141 static inline bool
1142 is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
1143 {
1144 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
1145 }
1146 
1147 static inline const struct mlx5_flow_tunnel *
1148 flow_actions_to_tunnel(const struct rte_flow_action actions[])
1149 {
1150 	return actions[0].conf;
1151 }
1152 
1153 static inline const struct mlx5_flow_tunnel *
1154 flow_items_to_tunnel(const struct rte_flow_item items[])
1155 {
1156 	return items[0].spec;
1157 }
1158 
1159 /**
1160  * Gets the tag array given for RTE_FLOW_FIELD_TAG type.
1161  *
1162  * In old API the value was provided in "level" field, but in new API
1163  * it is provided in "tag_array" field. Since encapsulation level is not
1164  * relevant for metadata, the tag array can be still provided in "level"
1165  * for backwards compatibility.
1166  *
1167  * @param[in] data
1168  *   Pointer to tag modify data structure.
1169  *
1170  * @return
1171  *   Tag array index.
1172  */
1173 static inline uint8_t
1174 flow_tag_index_get(const struct rte_flow_field_data *data)
1175 {
1176 	return data->tag_index ? data->tag_index : data->level;
1177 }
1178 
1179 /**
1180  * Fetch 1, 2, 3 or 4 byte field from the byte array
1181  * and return as unsigned integer in host-endian format.
1182  *
1183  * @param[in] data
1184  *   Pointer to data array.
1185  * @param[in] size
1186  *   Size of field to extract.
1187  *
1188  * @return
1189  *   converted field in host endian format.
1190  */
1191 static inline uint32_t
1192 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
1193 {
1194 	uint32_t ret;
1195 
1196 	switch (size) {
1197 	case 1:
1198 		ret = *data;
1199 		break;
1200 	case 2:
1201 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1202 		break;
1203 	case 3:
1204 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1205 		ret = (ret << 8) | *(data + sizeof(uint16_t));
1206 		break;
1207 	case 4:
1208 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
1209 		break;
1210 	default:
1211 		MLX5_ASSERT(false);
1212 		ret = 0;
1213 		break;
1214 	}
1215 	return ret;
1216 }
1217 
1218 static inline bool
1219 flow_modify_field_support_tag_array(enum rte_flow_field_id field)
1220 {
1221 	switch ((int)field) {
1222 	case RTE_FLOW_FIELD_TAG:
1223 	case RTE_FLOW_FIELD_MPLS:
1224 	case MLX5_RTE_FLOW_FIELD_META_REG:
1225 		return true;
1226 	default:
1227 		break;
1228 	}
1229 	return false;
1230 }
1231 
1232 struct field_modify_info {
1233 	uint32_t size; /* Size of field in protocol header, in bytes. */
1234 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
1235 	enum mlx5_modification_field id;
1236 	uint32_t shift;
1237 	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
1238 };
1239 
1240 /* HW steering flow attributes. */
1241 struct mlx5_flow_attr {
1242 	uint32_t port_id; /* Port index. */
1243 	uint32_t group; /* Flow group. */
1244 	uint32_t priority; /* Original Priority. */
1245 	/* rss level, used by priority adjustment. */
1246 	uint32_t rss_level;
1247 	/* Action flags, used by priority adjustment. */
1248 	uint32_t act_flags;
1249 	uint32_t tbl_type; /* Flow table type. */
1250 };
1251 
1252 /* Flow structure. */
1253 struct rte_flow {
1254 	uint32_t dev_handles;
1255 	/**< Device flow handles that are part of the flow. */
1256 	uint32_t type:2;
1257 	uint32_t drv_type:2; /**< Driver type. */
1258 	uint32_t tunnel:1;
1259 	uint32_t meter:24; /**< Holds flow meter id. */
1260 	uint32_t indirect_type:2; /**< Indirect action type. */
1261 	uint32_t matcher_selector:1; /**< Matcher index in resizable table. */
1262 	uint32_t rix_mreg_copy;
1263 	/**< Index to metadata register copy table resource. */
1264 	uint32_t counter; /**< Holds flow counter. */
1265 	uint32_t tunnel_id;  /**< Tunnel id */
1266 	union {
1267 		uint32_t age; /**< Holds ASO age bit index. */
1268 		uint32_t ct; /**< Holds ASO CT index. */
1269 	};
1270 	uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
1271 } __rte_packed;
1272 
1273 /*
1274  * HWS COUNTER ID's layout
1275  *       3                   2                   1                   0
1276  *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1277  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1278  *    |  T  |     | D |                                               |
1279  *    ~  Y  |     | C |                    IDX                        ~
1280  *    |  P  |     | S |                                               |
1281  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1282  *
1283  *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
1284  *    Bit 25:24 = DCS index
1285  *    Bit 23:00 = IDX in this counter belonged DCS bulk.
1286  */
1287 typedef uint32_t cnt_id_t;
1288 
1289 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1290 
1291 enum {
1292 	MLX5_FLOW_HW_FLOW_OP_TYPE_NONE,
1293 	MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE,
1294 	MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY,
1295 	MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE,
1296 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE,
1297 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY,
1298 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE,
1299 };
1300 
1301 enum {
1302 	MLX5_FLOW_HW_FLOW_FLAG_CNT_ID = RTE_BIT32(0),
1303 	MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP = RTE_BIT32(1),
1304 	MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ = RTE_BIT32(2),
1305 	MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX = RTE_BIT32(3),
1306 	MLX5_FLOW_HW_FLOW_FLAG_MTR_ID = RTE_BIT32(4),
1307 	MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR = RTE_BIT32(5),
1308 	MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW = RTE_BIT32(6),
1309 };
1310 
1311 #define MLX5_FLOW_HW_FLOW_FLAGS_ALL ( \
1312 		MLX5_FLOW_HW_FLOW_FLAG_CNT_ID | \
1313 		MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP | \
1314 		MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ | \
1315 		MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX | \
1316 		MLX5_FLOW_HW_FLOW_FLAG_MTR_ID | \
1317 		MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR | \
1318 		MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW \
1319 	)
1320 
1321 #ifdef PEDANTIC
1322 #pragma GCC diagnostic ignored "-Wpedantic"
1323 #endif
1324 
1325 #define MLX5_DR_RULE_SIZE 72
1326 
1327 SLIST_HEAD(mlx5_nta_rss_flow_head, rte_flow_hw);
1328 
1329 /** HWS non template flow data. */
1330 struct rte_flow_nt2hws {
1331 	/** BWC rule pointer. */
1332 	struct mlx5dr_bwc_rule *nt_rule;
1333 	/** The matcher for non template api. */
1334 	struct mlx5_flow_dv_matcher *matcher;
1335 	/**< Auxiliary data stored per flow. */
1336 	struct rte_flow_hw_aux *flow_aux;
1337 	/** Modify header pointer. */
1338 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
1339 	/** Chain NTA flows. */
1340 	SLIST_ENTRY(rte_flow_hw) next;
1341 	/** Encap/decap index. */
1342 	uint32_t rix_encap_decap;
1343 	uint32_t rix_mreg_copy;
1344 	uint8_t chaned_flow;
1345 };
1346 
1347 /** HWS flow struct. */
1348 struct rte_flow_hw {
1349 	union {
1350 		/** The table flow allcated from. */
1351 		struct rte_flow_template_table *table;
1352 		/** Data needed for non template flows. */
1353 		struct rte_flow_nt2hws *nt2hws;
1354 	};
1355 	/** Application's private data passed to enqueued flow operation. */
1356 	void *user_data;
1357 	union {
1358 		/** Jump action. */
1359 		struct mlx5_hw_jump_action *jump;
1360 		/** TIR action. */
1361 		struct mlx5_hrxq *hrxq;
1362 	};
1363 	/** Flow index from indexed pool. */
1364 	uint32_t idx;
1365 	/** Resource index from indexed pool. */
1366 	uint32_t res_idx;
1367 	/** HWS flow rule index passed to mlx5dr. */
1368 	uint32_t rule_idx;
1369 	/** Which flow fields (inline or in auxiliary struct) are used. */
1370 	uint32_t flags;
1371 	/** COUNT action index. */
1372 	cnt_id_t cnt_id;
1373 	/** Ongoing flow operation type. */
1374 	uint8_t operation_type;
1375 	/** Index of pattern template this flow is based on. */
1376 	uint8_t mt_idx;
1377 	/** Equals true if it is non template rule. */
1378 	bool nt_rule;
1379 	/**
1380 	 * Padding for alignment to 56 bytes.
1381 	 * Since mlx5dr rule is 72 bytes, whole flow is contained within 128 B (2 cache lines).
1382 	 * This space is reserved for future additions to flow struct.
1383 	 */
1384 	uint8_t padding[9];
1385 	/** HWS layer data struct. */
1386 	uint8_t rule[];
1387 };
1388 
1389 /** Auxiliary data fields that are updatable. */
1390 struct rte_flow_hw_aux_fields {
1391 	/** AGE action index. */
1392 	uint32_t age_idx;
1393 	/** Direct meter (METER or METER_MARK) action index. */
1394 	uint32_t mtr_id;
1395 };
1396 
1397 /** Auxiliary data stored per flow which is not required to be stored in main flow structure. */
1398 struct rte_flow_hw_aux {
1399 	/** Auxiliary fields associated with the original flow. */
1400 	struct rte_flow_hw_aux_fields orig;
1401 	/** Auxiliary fields associated with the updated flow. */
1402 	struct rte_flow_hw_aux_fields upd;
1403 	/** Index of resizable matcher associated with this flow. */
1404 	uint8_t matcher_selector;
1405 	/** Placeholder flow struct used during flow rule update operation. */
1406 	struct rte_flow_hw upd_flow;
1407 };
1408 
1409 #ifdef PEDANTIC
1410 #pragma GCC diagnostic error "-Wpedantic"
1411 #endif
1412 
1413 struct mlx5_action_construct_data;
1414 typedef int
1415 (*indirect_list_callback_t)(struct rte_eth_dev *,
1416 			    const struct mlx5_action_construct_data *,
1417 			    const struct rte_flow_action *,
1418 			    struct mlx5dr_rule_action *);
1419 
1420 /* rte flow action translate to DR action struct. */
1421 struct mlx5_action_construct_data {
1422 	LIST_ENTRY(mlx5_action_construct_data) next;
1423 	/* Ensure the action types are matched. */
1424 	int type;
1425 	uint32_t idx;  /* Data index. */
1426 	uint16_t action_src; /* rte_flow_action src offset. */
1427 	uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
1428 	indirect_list_callback_t indirect_list_cb;
1429 	union {
1430 		struct {
1431 			/* Expected type of indirection action. */
1432 			enum rte_flow_action_type expected_type;
1433 		} indirect;
1434 		struct {
1435 			/* encap data len. */
1436 			uint16_t len;
1437 		} encap;
1438 		struct {
1439 			/* Modify header action offset in pattern. */
1440 			uint16_t mhdr_cmds_off;
1441 			/* Offset in pattern after modify header actions. */
1442 			uint16_t mhdr_cmds_end;
1443 			/*
1444 			 * True if this action is masked and does not need to
1445 			 * be generated.
1446 			 */
1447 			bool shared;
1448 			/*
1449 			 * Modified field definitions in dst field (SET, ADD)
1450 			 * or src field (COPY).
1451 			 */
1452 			struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS];
1453 			/* Modified field definitions in dst field (COPY). */
1454 			struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS];
1455 			/*
1456 			 * Masks applied to field values to generate
1457 			 * PRM actions.
1458 			 */
1459 			uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS];
1460 			/* Copy of action passed to the action template. */
1461 			struct rte_flow_action_modify_field action;
1462 		} modify_header;
1463 		struct {
1464 			bool symmetric_hash_function; /* Symmetric RSS hash */
1465 			uint64_t types; /* RSS hash types. */
1466 			uint32_t level; /* RSS level. */
1467 			uint32_t idx; /* Shared action index. */
1468 		} shared_rss;
1469 		struct {
1470 			cnt_id_t id;
1471 		} shared_counter;
1472 		struct {
1473 			/* IPv6 extension push data len. */
1474 			uint16_t len;
1475 		} ipv6_ext;
1476 		struct {
1477 			uint32_t id;
1478 			uint32_t conf_masked:1;
1479 		} shared_meter;
1480 	};
1481 };
1482 
1483 #define MAX_GENEVE_OPTIONS_RESOURCES 7
1484 
1485 /* GENEVE TLV options manager structure. */
1486 struct mlx5_geneve_tlv_options_mng {
1487 	uint8_t nb_options; /* Number of options inside the template. */
1488 	struct {
1489 		uint8_t opt_type;
1490 		uint16_t opt_class;
1491 	} options[MAX_GENEVE_OPTIONS_RESOURCES];
1492 };
1493 
1494 /* Flow item template struct. */
1495 struct rte_flow_pattern_template {
1496 	LIST_ENTRY(rte_flow_pattern_template) next;
1497 	/* Template attributes. */
1498 	struct rte_flow_pattern_template_attr attr;
1499 	struct mlx5dr_match_template *mt; /* mlx5 match template. */
1500 	uint64_t item_flags; /* Item layer flags. */
1501 	uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
1502 	RTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */
1503 	/*
1504 	 * If true, then rule pattern should be prepended with
1505 	 * represented_port pattern item.
1506 	 */
1507 	bool implicit_port;
1508 	/*
1509 	 * If true, then rule pattern should be prepended with
1510 	 * tag pattern item for representor matching.
1511 	 */
1512 	bool implicit_tag;
1513 	/* Manages all GENEVE TLV options used by this pattern template. */
1514 	struct mlx5_geneve_tlv_options_mng geneve_opt_mng;
1515 	uint8_t flex_item; /* flex item index. */
1516 	/* Items on which this pattern template is based on. */
1517 	struct rte_flow_item *items;
1518 };
1519 
1520 /* Flow action template struct. */
1521 struct rte_flow_actions_template {
1522 	LIST_ENTRY(rte_flow_actions_template) next;
1523 	/* Template attributes. */
1524 	struct rte_flow_actions_template_attr attr;
1525 	struct rte_flow_action *actions; /* Cached flow actions. */
1526 	struct rte_flow_action *orig_actions; /* Original flow actions. */
1527 	struct rte_flow_action *masks; /* Cached action masks.*/
1528 	struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */
1529 	uint64_t action_flags; /* Bit-map of all valid action in template. */
1530 	uint16_t dr_actions_num; /* Amount of DR rules actions. */
1531 	uint16_t actions_num; /* Amount of flow actions */
1532 	uint16_t *dr_off; /* DR action offset for given rte action offset. */
1533 	uint16_t *src_off; /* RTE action displacement from app. template */
1534 	uint16_t reformat_off; /* Offset of DR reformat action. */
1535 	uint16_t mhdr_off; /* Offset of DR modify header action. */
1536 	uint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */
1537 	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
1538 	uint8_t flex_item; /* flex item index. */
1539 };
1540 
1541 /* Jump action struct. */
1542 struct mlx5_hw_jump_action {
1543 	/* Action jump from root. */
1544 	struct mlx5dr_action *root_action;
1545 	/* HW steering jump action. */
1546 	struct mlx5dr_action *hws_action;
1547 };
1548 
1549 /* Encap decap action struct. */
1550 struct mlx5_hw_encap_decap_action {
1551 	struct mlx5_indirect_list indirect;
1552 	enum mlx5dr_action_type action_type;
1553 	struct mlx5dr_action *action; /* Action object. */
1554 	/* Is header_reformat action shared across flows in table. */
1555 	uint32_t shared:1;
1556 	uint32_t multi_pattern:1;
1557 	size_t data_size; /* Action metadata size. */
1558 	uint8_t data[]; /* Action data. */
1559 };
1560 
1561 /* Push remove action struct. */
1562 struct mlx5_hw_push_remove_action {
1563 	struct mlx5dr_action *action; /* Action object. */
1564 	/* Is push_remove action shared across flows in table. */
1565 	uint8_t shared;
1566 	size_t data_size; /* Action metadata size. */
1567 	uint8_t data[]; /* Action data. */
1568 };
1569 
1570 /* Modify field action struct. */
1571 struct mlx5_hw_modify_header_action {
1572 	/* Reference to DR action */
1573 	struct mlx5dr_action *action;
1574 	/* Modify header action position in action rule table. */
1575 	uint16_t pos;
1576 	/* Is MODIFY_HEADER action shared across flows in table. */
1577 	uint32_t shared:1;
1578 	uint32_t multi_pattern:1;
1579 	/* Amount of modification commands stored in the precompiled buffer. */
1580 	uint32_t mhdr_cmds_num;
1581 	/* Precompiled modification commands. */
1582 	struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD];
1583 };
1584 
1585 /* The maximum actions support in the flow. */
1586 #define MLX5_HW_MAX_ACTS 16
1587 
1588 /* DR action set struct. */
1589 struct mlx5_hw_actions {
1590 	/* Dynamic action list. */
1591 	LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
1592 	struct mlx5_hw_jump_action *jump; /* Jump action. */
1593 	struct mlx5_hrxq *tir; /* TIR action. */
1594 	struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */
1595 	/* Encap/Decap action. */
1596 	struct mlx5_hw_encap_decap_action *encap_decap;
1597 	uint16_t encap_decap_pos; /* Encap/Decap action position. */
1598 	/* Push/remove action. */
1599 	struct mlx5_hw_push_remove_action *push_remove;
1600 	uint16_t push_remove_pos; /* Push/remove action position. */
1601 	uint32_t mark:1; /* Indicate the mark action. */
1602 	cnt_id_t cnt_id; /* Counter id. */
1603 	uint32_t mtr_id; /* Meter id. */
1604 	/* Translated DR action array from action template. */
1605 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
1606 };
1607 
1608 /* mlx5 action template struct. */
1609 struct mlx5_hw_action_template {
1610 	/* Action template pointer. */
1611 	struct rte_flow_actions_template *action_template;
1612 	struct mlx5_hw_actions acts; /* Template actions. */
1613 };
1614 
1615 /* mlx5 flow group struct. */
1616 struct mlx5_flow_group {
1617 	struct mlx5_list_entry entry;
1618 	LIST_ENTRY(mlx5_flow_group) next;
1619 	struct rte_eth_dev *dev; /* Reference to corresponding device. */
1620 	struct mlx5dr_table *tbl; /* HWS table object. */
1621 	struct mlx5_hw_jump_action jump; /* Jump action. */
1622 	struct mlx5_flow_group *miss_group; /* Group pointed to by miss action. */
1623 	enum mlx5dr_table_type type; /* Table type. */
1624 	uint32_t group_id; /* Group id. */
1625 	uint32_t idx; /* Group memory index. */
1626 	/* List of all matchers created for this group in non template api */
1627 	struct mlx5_list *matchers;
1628 };
1629 
1630 
1631 #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 32
1632 #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
1633 
1634 #define MLX5_MULTIPATTERN_ENCAP_NUM 5
1635 #define MLX5_MAX_TABLE_RESIZE_NUM 64
1636 
1637 struct mlx5_multi_pattern_segment {
1638 	/*
1639 	 * Modify Header Argument Objects number allocated for action in that
1640 	 * segment.
1641 	 * Capacity is always power of 2.
1642 	 */
1643 	uint32_t capacity;
1644 	uint32_t head_index;
1645 	struct mlx5dr_action *mhdr_action;
1646 	struct mlx5dr_action *reformat_action[MLX5_MULTIPATTERN_ENCAP_NUM];
1647 };
1648 
1649 struct mlx5_tbl_multi_pattern_ctx {
1650 	struct {
1651 		uint32_t elements_num;
1652 		struct mlx5dr_action_reformat_header reformat_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1653 		/**
1654 		 * insert_header structure is larger than reformat_header.
1655 		 * Enclosing these structures with union will case a gap between
1656 		 * reformat_hdr array elements.
1657 		 * mlx5dr_action_create_reformat() expects adjacent array elements.
1658 		 */
1659 		struct mlx5dr_action_insert_header insert_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1660 	} reformat[MLX5_MULTIPATTERN_ENCAP_NUM];
1661 
1662 	struct {
1663 		uint32_t elements_num;
1664 		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1665 	} mh;
1666 	struct mlx5_multi_pattern_segment segments[MLX5_MAX_TABLE_RESIZE_NUM];
1667 };
1668 
1669 static __rte_always_inline void
1670 mlx5_multi_pattern_activate(struct mlx5_tbl_multi_pattern_ctx *mpctx)
1671 {
1672 	mpctx->segments[0].head_index = 1;
1673 }
1674 
1675 static __rte_always_inline bool
1676 mlx5_is_multi_pattern_active(const struct mlx5_tbl_multi_pattern_ctx *mpctx)
1677 {
1678 	return mpctx->segments[0].head_index == 1;
1679 }
1680 
1681 struct mlx5_flow_template_table_cfg {
1682 	struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */
1683 	bool external; /* True if created by flow API, false if table is internal to PMD. */
1684 };
1685 
1686 struct mlx5_matcher_info {
1687 	struct mlx5dr_matcher *matcher; /* Template matcher. */
1688 	struct mlx5dr_action *jump; /* Jump to matcher action. */
1689 	RTE_ATOMIC(uint32_t) refcnt;
1690 };
1691 
1692 struct __rte_cache_aligned mlx5_dr_rule_action_container {
1693 	struct mlx5dr_rule_action acts[MLX5_HW_MAX_ACTS];
1694 };
1695 
1696 struct rte_flow_template_table {
1697 	LIST_ENTRY(rte_flow_template_table) next;
1698 	struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
1699 	struct mlx5_matcher_info matcher_info[2];
1700 	uint32_t matcher_selector;
1701 	rte_rwlock_t matcher_replace_rwlk; /* RW lock for resizable tables */
1702 	/* Item templates bind to the table. */
1703 	struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
1704 	/* Action templates bind to the table. */
1705 	struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1706 	struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
1707 	struct rte_flow_hw_aux *flow_aux; /**< Auxiliary data stored per flow. */
1708 	struct mlx5_indexed_pool *resource; /* The table's resource ipool. */
1709 	struct mlx5_flow_template_table_cfg cfg;
1710 	uint32_t type; /* Flow table type RX/TX/FDB. */
1711 	uint8_t nb_item_templates; /* Item template number. */
1712 	uint8_t nb_action_templates; /* Action template number. */
1713 	uint32_t refcnt; /* Table reference counter. */
1714 	struct mlx5_tbl_multi_pattern_ctx mpctx;
1715 	struct mlx5dr_matcher_attr matcher_attr;
1716 	/**
1717 	 * Variable length array of containers containing precalculated templates of DR actions
1718 	 * arrays. This array is allocated at template table creation time and contains
1719 	 * one container per each queue, per each actions template.
1720 	 * Essentially rule_acts is a 2-dimensional array indexed with (AT index, queue) pair.
1721 	 * Each container will provide a local "queue buffer" to work on for flow creation
1722 	 * operations when using a given actions template.
1723 	 */
1724 	struct mlx5_dr_rule_action_container rule_acts[];
1725 };
1726 
1727 static __rte_always_inline struct mlx5dr_matcher *
1728 mlx5_table_matcher(const struct rte_flow_template_table *table)
1729 {
1730 	return table->matcher_info[table->matcher_selector].matcher;
1731 }
1732 
1733 static __rte_always_inline struct mlx5_multi_pattern_segment *
1734 mlx5_multi_pattern_segment_find(struct rte_flow_template_table *table,
1735 				uint32_t flow_resource_ix)
1736 {
1737 	int i;
1738 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
1739 
1740 	if (likely(!rte_flow_template_table_resizable(0, &table->cfg.attr)))
1741 		return &mpctx->segments[0];
1742 	for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
1743 		uint32_t limit = mpctx->segments[i].head_index +
1744 				 mpctx->segments[i].capacity;
1745 
1746 		if (flow_resource_ix < limit)
1747 			return &mpctx->segments[i];
1748 	}
1749 	return NULL;
1750 }
1751 
1752 /*
1753  * Convert metadata or tag to the actual register.
1754  * META: Fixed C_1 for FDB mode, REG_A for NIC TX and REG_B for NIC RX.
1755  * TAG: C_x expect meter color reg and the reserved ones.
1756  */
1757 static __rte_always_inline int
1758 flow_hw_get_reg_id_by_domain(struct rte_eth_dev *dev,
1759 			     enum rte_flow_item_type type,
1760 			     enum mlx5dr_table_type domain_type, uint32_t id)
1761 {
1762 	struct mlx5_dev_ctx_shared *sh = MLX5_SH(dev);
1763 	struct mlx5_dev_registers *reg = &sh->registers;
1764 
1765 	switch (type) {
1766 	case RTE_FLOW_ITEM_TYPE_META:
1767 		if (sh->config.dv_esw_en &&
1768 		    sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
1769 			return REG_C_1;
1770 		}
1771 		/*
1772 		 * On root table - PMD allows only egress META matching, thus
1773 		 * REG_A matching is sufficient.
1774 		 *
1775 		 * On non-root tables - REG_A corresponds to general_purpose_lookup_field,
1776 		 * which translates to REG_A in NIC TX and to REG_B in NIC RX.
1777 		 * However, current FW does not implement REG_B case right now, so
1778 		 * REG_B case is return explicitly by this function for NIC RX.
1779 		 */
1780 		if (domain_type == MLX5DR_TABLE_TYPE_NIC_RX)
1781 			return REG_B;
1782 		return REG_A;
1783 	case RTE_FLOW_ITEM_TYPE_CONNTRACK:
1784 	case RTE_FLOW_ITEM_TYPE_METER_COLOR:
1785 		return reg->aso_reg;
1786 	case RTE_FLOW_ITEM_TYPE_TAG:
1787 		if (id == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1788 			return REG_C_3;
1789 		MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);
1790 		return reg->hw_avl_tags[id];
1791 	default:
1792 		return REG_NON;
1793 	}
1794 }
1795 
1796 static __rte_always_inline int
1797 flow_hw_get_reg_id_from_ctx(void *dr_ctx, enum rte_flow_item_type type,
1798 			    enum mlx5dr_table_type domain_type, uint32_t id)
1799 {
1800 	uint16_t port;
1801 
1802 	MLX5_ETH_FOREACH_DEV(port, NULL) {
1803 		struct mlx5_priv *priv;
1804 
1805 		priv = rte_eth_devices[port].data->dev_private;
1806 		if (priv->dr_ctx == dr_ctx)
1807 			return flow_hw_get_reg_id_by_domain(&rte_eth_devices[port],
1808 							    type, domain_type, id);
1809 	}
1810 	return REG_NON;
1811 }
1812 
1813 #endif
1814 
1815 /*
1816  * Define list of valid combinations of RX Hash fields
1817  * (see enum ibv_rx_hash_fields).
1818  */
1819 #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
1820 #define MLX5_RSS_HASH_IPV4_TCP \
1821 	(MLX5_RSS_HASH_IPV4 | \
1822 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1823 #define MLX5_RSS_HASH_IPV4_UDP \
1824 	(MLX5_RSS_HASH_IPV4 | \
1825 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1826 #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
1827 #define MLX5_RSS_HASH_IPV6_TCP \
1828 	(MLX5_RSS_HASH_IPV6 | \
1829 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1830 #define MLX5_RSS_HASH_IPV6_UDP \
1831 	(MLX5_RSS_HASH_IPV6 | \
1832 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1833 #define MLX5_RSS_HASH_IPV4_SRC_ONLY IBV_RX_HASH_SRC_IPV4
1834 #define MLX5_RSS_HASH_IPV4_DST_ONLY IBV_RX_HASH_DST_IPV4
1835 #define MLX5_RSS_HASH_IPV6_SRC_ONLY IBV_RX_HASH_SRC_IPV6
1836 #define MLX5_RSS_HASH_IPV6_DST_ONLY IBV_RX_HASH_DST_IPV6
1837 #define MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY \
1838 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_UDP)
1839 #define MLX5_RSS_HASH_IPV4_UDP_DST_ONLY \
1840 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_UDP)
1841 #define MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY \
1842 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_UDP)
1843 #define MLX5_RSS_HASH_IPV6_UDP_DST_ONLY \
1844 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_UDP)
1845 #define MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY \
1846 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_TCP)
1847 #define MLX5_RSS_HASH_IPV4_TCP_DST_ONLY \
1848 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_TCP)
1849 #define MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY \
1850 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
1851 #define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
1852 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
1853 
1854 #ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
1855 #define IBV_RX_HASH_IPSEC_SPI (1U << 8)
1856 #endif
1857 
1858 #define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
1859 #define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
1860 				MLX5_RSS_HASH_ESP_SPI)
1861 #define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
1862 				MLX5_RSS_HASH_ESP_SPI)
1863 #define MLX5_RSS_HASH_NONE 0ULL
1864 
1865 #define MLX5_RSS_IS_SYMM(func) \
1866 		(((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) || \
1867 		 ((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT))
1868 
1869 /* extract next protocol type from Ethernet & VLAN headers */
1870 #define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
1871 	(_prt) = ((const struct _s *)(_itm)->mask)->_m;       \
1872 	(_prt) &= ((const struct _s *)(_itm)->spec)->_m;      \
1873 	(_prt) = rte_be_to_cpu_16((_prt));                    \
1874 } while (0)
1875 
1876 /* array of valid combinations of RX Hash fields for RSS */
1877 static const uint64_t mlx5_rss_hash_fields[] = {
1878 	MLX5_RSS_HASH_IPV4,
1879 	MLX5_RSS_HASH_IPV4_TCP,
1880 	MLX5_RSS_HASH_IPV4_UDP,
1881 	MLX5_RSS_HASH_IPV4_ESP,
1882 	MLX5_RSS_HASH_IPV6,
1883 	MLX5_RSS_HASH_IPV6_TCP,
1884 	MLX5_RSS_HASH_IPV6_UDP,
1885 	MLX5_RSS_HASH_IPV6_ESP,
1886 	MLX5_RSS_HASH_ESP_SPI,
1887 	MLX5_RSS_HASH_NONE,
1888 };
1889 
1890 /* Shared RSS action structure */
1891 struct mlx5_shared_action_rss {
1892 	ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
1893 	RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
1894 	struct rte_flow_action_rss origin; /**< Original rte RSS action. */
1895 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
1896 	struct mlx5_ind_table_obj *ind_tbl;
1897 	/**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
1898 	uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
1899 	/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
1900 	rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
1901 };
1902 
1903 struct rte_flow_action_handle {
1904 	uint32_t id;
1905 };
1906 
1907 /* Thread specific flow workspace intermediate data. */
1908 struct mlx5_flow_workspace {
1909 	/* If creating another flow in same thread, push new as stack. */
1910 	struct mlx5_flow_workspace *prev;
1911 	struct mlx5_flow_workspace *next;
1912 	struct mlx5_flow_workspace *gc;
1913 	uint32_t inuse; /* can't create new flow with current. */
1914 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
1915 	struct mlx5_flow_rss_desc rss_desc;
1916 	uint32_t flow_idx; /* Intermediate device flow index. */
1917 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
1918 	struct mlx5_flow_meter_policy *policy;
1919 	/* The meter policy used by meter in flow. */
1920 	struct mlx5_flow_meter_policy *final_policy;
1921 	/* The final policy when meter policy is hierarchy. */
1922 	uint32_t skip_matcher_reg:1;
1923 	/* Indicates if need to skip matcher register in translate. */
1924 	uint32_t mark:1; /* Indicates if flow contains mark action. */
1925 	uint32_t vport_meta_tag; /* Used for vport index match. */
1926 };
1927 
1928 /* Matcher translate type. */
1929 enum MLX5_SET_MATCHER {
1930 	MLX5_SET_MATCHER_SW_V = 1 << 0,
1931 	MLX5_SET_MATCHER_SW_M = 1 << 1,
1932 	MLX5_SET_MATCHER_HS_V = 1 << 2,
1933 	MLX5_SET_MATCHER_HS_M = 1 << 3,
1934 };
1935 
1936 #define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M)
1937 #define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M)
1938 #define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V)
1939 #define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M)
1940 
1941 /* Flow matcher workspace intermediate data. */
1942 struct mlx5_dv_matcher_workspace {
1943 	uint8_t priority; /* Flow priority. */
1944 	uint64_t last_item; /* Last item in pattern. */
1945 	uint64_t item_flags; /* Flow item pattern flags. */
1946 	uint64_t action_flags; /* Flow action flags. */
1947 	bool external; /* External flow or not. */
1948 	uint32_t vlan_tag:12; /* Flow item VLAN tag. */
1949 	uint8_t next_protocol; /* Tunnel next protocol */
1950 	uint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */
1951 	uint32_t group; /* Flow group. */
1952 	uint16_t udp_dport; /* Flow item UDP port. */
1953 	const struct rte_flow_attr *attr; /* Flow attribute. */
1954 	struct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */
1955 	const struct rte_flow_item *tunnel_item; /* Flow tunnel item. */
1956 	const struct rte_flow_item *gre_item; /* Flow GRE item. */
1957 	const struct rte_flow_item *integrity_items[2];
1958 };
1959 
1960 struct mlx5_flow_split_info {
1961 	uint32_t external:1;
1962 	/**< True if flow is created by request external to PMD. */
1963 	uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
1964 	uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
1965 	uint32_t flow_idx; /**< This memory pool index to the flow. */
1966 	uint32_t table_id; /**< Flow table identifier. */
1967 	uint64_t prefix_layers; /**< Prefix subflow layers. */
1968 };
1969 
1970 struct mlx5_flow_hw_partial_resource {
1971 	const struct rte_flow_attr *attr;
1972 	const struct rte_flow_item *items;
1973 	const struct rte_flow_action *actions;
1974 };
1975 
1976 struct mlx5_flow_hw_split_resource {
1977 	struct mlx5_flow_hw_partial_resource prefix;
1978 	struct mlx5_flow_hw_partial_resource suffix;
1979 	void *buf_start; /* start address of continuous buffer. */
1980 	uint32_t flow_idx; /* This memory pool index to the flow. */
1981 };
1982 
1983 struct mlx5_hl_data {
1984 	uint8_t dw_offset;
1985 	uint32_t dw_mask;
1986 };
1987 
1988 extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
1989 
1990 /*
1991  * Get sqn for given tx_queue.
1992  * Used in HWS rule creation.
1993  */
1994 static __rte_always_inline int
1995 flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
1996 {
1997 	struct mlx5_txq_ctrl *txq;
1998 	struct mlx5_external_q *ext_txq;
1999 
2000 	/* Means Tx queue is PF0. */
2001 	if (tx_queue == UINT16_MAX) {
2002 		*sqn = 0;
2003 		return 0;
2004 	}
2005 	if (mlx5_is_external_txq(dev, tx_queue)) {
2006 		ext_txq = mlx5_ext_txq_get(dev, tx_queue);
2007 		*sqn = ext_txq->hw_id;
2008 		return 0;
2009 	}
2010 	txq = mlx5_txq_get(dev, tx_queue);
2011 	if (unlikely(!txq))
2012 		return -ENOENT;
2013 	*sqn = mlx5_txq_get_sqn(txq);
2014 	mlx5_txq_release(dev, tx_queue);
2015 	return 0;
2016 }
2017 
2018 /*
2019  * Convert sqn for given rte_eth_dev port.
2020  * Used in HWS rule creation.
2021  */
2022 static __rte_always_inline int
2023 flow_hw_conv_sqn(uint16_t port_id, uint16_t tx_queue, uint32_t *sqn)
2024 {
2025 	if (port_id >= RTE_MAX_ETHPORTS)
2026 		return -EINVAL;
2027 	return flow_hw_get_sqn(&rte_eth_devices[port_id], tx_queue, sqn);
2028 }
2029 
2030 /*
2031  * Get given rte_eth_dev port_id.
2032  * Used in HWS rule creation.
2033  */
2034 static __rte_always_inline uint16_t
2035 flow_hw_get_port_id(void *dr_ctx)
2036 {
2037 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2038 	uint16_t port_id;
2039 
2040 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2041 		struct mlx5_priv *priv;
2042 
2043 		priv = rte_eth_devices[port_id].data->dev_private;
2044 		if (priv->dr_ctx == dr_ctx)
2045 			return port_id;
2046 	}
2047 #else
2048 	RTE_SET_USED(dr_ctx);
2049 #endif
2050 	return UINT16_MAX;
2051 }
2052 
2053 /*
2054  * Get given eswitch manager id.
2055  * Used in HWS match with port creation.
2056  */
2057 static __rte_always_inline const struct flow_hw_port_info *
2058 flow_hw_get_esw_mgr_id(void *dr_ctx)
2059 {
2060 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2061 	uint16_t port_id;
2062 
2063 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2064 		struct mlx5_priv *priv;
2065 
2066 		priv = rte_eth_devices[port_id].data->dev_private;
2067 		if (priv->dr_ctx == dr_ctx)
2068 			return &priv->sh->dev_cap.esw_info;
2069 	}
2070 #else
2071 	RTE_SET_USED(dr_ctx);
2072 #endif
2073 	return NULL;
2074 }
2075 
2076 /*
2077  * Get metadata match tag and mask for given rte_eth_dev port.
2078  * Used in HWS rule creation.
2079  */
2080 static __rte_always_inline const struct flow_hw_port_info *
2081 flow_hw_conv_port_id(void *ctx, const uint16_t port_id)
2082 {
2083 	struct flow_hw_port_info *port_info;
2084 
2085 	if (port_id == UINT16_MAX && ctx)
2086 		return flow_hw_get_esw_mgr_id(ctx);
2087 
2088 	if (port_id >= RTE_MAX_ETHPORTS)
2089 		return NULL;
2090 	port_info = &mlx5_flow_hw_port_infos[port_id];
2091 	return !!port_info->regc_mask ? port_info : NULL;
2092 }
2093 
2094 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2095 /*
2096  * Get metadata match tag and mask for the uplink port represented
2097  * by given IB context. Used in HWS context creation.
2098  */
2099 static __rte_always_inline const struct flow_hw_port_info *
2100 flow_hw_get_wire_port(struct ibv_context *ibctx)
2101 {
2102 	struct ibv_device *ibdev = ibctx->device;
2103 	uint16_t port_id;
2104 
2105 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2106 		const struct mlx5_priv *priv =
2107 				rte_eth_devices[port_id].data->dev_private;
2108 
2109 		if (priv && priv->master) {
2110 			struct ibv_context *port_ibctx = priv->sh->cdev->ctx;
2111 
2112 			if (port_ibctx->device == ibdev)
2113 				return flow_hw_conv_port_id(priv->dr_ctx, port_id);
2114 		}
2115 	}
2116 	return NULL;
2117 }
2118 #endif
2119 
2120 static __rte_always_inline int
2121 flow_hw_get_reg_id(struct rte_eth_dev *dev,
2122 		   enum rte_flow_item_type type, uint32_t id)
2123 {
2124 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2125 	return flow_hw_get_reg_id_by_domain(dev, type,
2126 					    MLX5DR_TABLE_TYPE_MAX, id);
2127 #else
2128 	RTE_SET_USED(dev);
2129 	RTE_SET_USED(type);
2130 	RTE_SET_USED(id);
2131 	return REG_NON;
2132 #endif
2133 }
2134 
2135 static __rte_always_inline int
2136 flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val)
2137 {
2138 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2139 	uint32_t port;
2140 
2141 	MLX5_ETH_FOREACH_DEV(port, NULL) {
2142 		struct mlx5_priv *priv;
2143 		priv = rte_eth_devices[port].data->dev_private;
2144 
2145 		if (priv->dr_ctx == dr_ctx) {
2146 			*port_val = port;
2147 			return 0;
2148 		}
2149 	}
2150 #else
2151 	RTE_SET_USED(dr_ctx);
2152 	RTE_SET_USED(port_val);
2153 #endif
2154 	return -EINVAL;
2155 }
2156 
2157 /**
2158  * Get GENEVE TLV option FW information according type and class.
2159  *
2160  * @param[in] dr_ctx
2161  *   Pointer to HW steering DR context.
2162  * @param[in] type
2163  *   GENEVE TLV option type.
2164  * @param[in] class
2165  *   GENEVE TLV option class.
2166  * @param[out] hl_ok_bit
2167  *   Pointer to header layout structure describing OK bit FW information.
2168  * @param[out] num_of_dws
2169  *   Pointer to fill inside the size of 'hl_dws' array.
2170  * @param[out] hl_dws
2171  *   Pointer to header layout array describing data DWs FW information.
2172  * @param[out] ok_bit_on_class
2173  *   Pointer to an indicator whether OK bit includes class along with type.
2174  *
2175  * @return
2176  *   0 on success, negative errno otherwise and rte_errno is set.
2177  */
2178 int
2179 mlx5_get_geneve_hl_data(const void *dr_ctx, uint8_t type, uint16_t class,
2180 			struct mlx5_hl_data ** const hl_ok_bit,
2181 			uint8_t *num_of_dws,
2182 			struct mlx5_hl_data ** const hl_dws,
2183 			bool *ok_bit_on_class);
2184 
2185 /**
2186  * Get modify field ID for single DW inside configured GENEVE TLV option.
2187  *
2188  * @param[in] dr_ctx
2189  *   Pointer to HW steering DR context.
2190  * @param[in] type
2191  *   GENEVE TLV option type.
2192  * @param[in] class
2193  *   GENEVE TLV option class.
2194  * @param[in] dw_offset
2195  *   Offset of DW inside the option.
2196  *
2197  * @return
2198  *   Modify field ID on success, negative errno otherwise and rte_errno is set.
2199  */
2200 int
2201 mlx5_get_geneve_option_modify_field_id(const void *dr_ctx, uint8_t type,
2202 				       uint16_t class, uint8_t dw_offset);
2203 
2204 void *
2205 mlx5_geneve_tlv_parser_create(uint16_t port_id,
2206 			      const struct rte_pmd_mlx5_geneve_tlv tlv_list[],
2207 			      uint8_t nb_options);
2208 int mlx5_geneve_tlv_parser_destroy(void *handle);
2209 int mlx5_flow_geneve_tlv_option_validate(struct mlx5_priv *priv,
2210 					 const struct rte_flow_item *geneve_opt,
2211 					 struct rte_flow_error *error);
2212 int mlx5_geneve_opt_modi_field_get(struct mlx5_priv *priv,
2213 				   const struct rte_flow_field_data *data);
2214 
2215 struct mlx5_geneve_tlv_options_mng;
2216 int mlx5_geneve_tlv_option_register(struct mlx5_priv *priv,
2217 				    const struct rte_flow_item_geneve_opt *spec,
2218 				    struct mlx5_geneve_tlv_options_mng *mng);
2219 void mlx5_geneve_tlv_options_unregister(struct mlx5_priv *priv,
2220 					struct mlx5_geneve_tlv_options_mng *mng);
2221 
2222 void flow_hw_set_port_info(struct rte_eth_dev *dev);
2223 void flow_hw_clear_port_info(struct rte_eth_dev *dev);
2224 int flow_hw_create_vport_action(struct rte_eth_dev *dev);
2225 void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);
2226 int
2227 flow_hw_init(struct rte_eth_dev *dev,
2228 	     struct rte_flow_error *error);
2229 
2230 typedef uintptr_t (*mlx5_flow_list_create_t)(struct rte_eth_dev *dev,
2231 					enum mlx5_flow_type type,
2232 					const struct rte_flow_attr *attr,
2233 					const struct rte_flow_item items[],
2234 					const struct rte_flow_action actions[],
2235 					bool external,
2236 					struct rte_flow_error *error);
2237 typedef void (*mlx5_flow_list_destroy_t)(struct rte_eth_dev *dev,
2238 					enum mlx5_flow_type type,
2239 					uintptr_t flow_idx);
2240 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
2241 				    const struct rte_flow_attr *attr,
2242 				    const struct rte_flow_item items[],
2243 				    const struct rte_flow_action actions[],
2244 				    bool external,
2245 				    int hairpin,
2246 				    struct rte_flow_error *error);
2247 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
2248 	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2249 	 const struct rte_flow_item items[],
2250 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
2251 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
2252 				     struct mlx5_flow *dev_flow,
2253 				     const struct rte_flow_attr *attr,
2254 				     const struct rte_flow_item items[],
2255 				     const struct rte_flow_action actions[],
2256 				     struct rte_flow_error *error);
2257 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
2258 				 struct rte_flow_error *error);
2259 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
2260 				   struct rte_flow *flow);
2261 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
2262 				    struct rte_flow *flow);
2263 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
2264 				 struct rte_flow *flow,
2265 				 const struct rte_flow_action *actions,
2266 				 void *data,
2267 				 struct rte_flow_error *error);
2268 typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
2269 					struct mlx5_flow_meter_info *fm,
2270 					uint32_t mtr_idx,
2271 					uint8_t domain_bitmap);
2272 typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
2273 				struct mlx5_flow_meter_info *fm);
2274 typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
2275 typedef struct mlx5_flow_meter_sub_policy *
2276 	(*mlx5_flow_meter_sub_policy_rss_prepare_t)
2277 		(struct rte_eth_dev *dev,
2278 		struct mlx5_flow_meter_policy *mtr_policy,
2279 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
2280 typedef int (*mlx5_flow_meter_hierarchy_rule_create_t)
2281 		(struct rte_eth_dev *dev,
2282 		struct mlx5_flow_meter_info *fm,
2283 		int32_t src_port,
2284 		const struct rte_flow_item *item,
2285 		struct rte_flow_error *error);
2286 typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
2287 	(struct rte_eth_dev *dev,
2288 	struct mlx5_flow_meter_policy *mtr_policy);
2289 typedef uint32_t (*mlx5_flow_mtr_alloc_t)
2290 					    (struct rte_eth_dev *dev);
2291 typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
2292 						uint32_t mtr_idx);
2293 typedef uint32_t (*mlx5_flow_counter_alloc_t)
2294 				   (struct rte_eth_dev *dev);
2295 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev,
2296 					 uint32_t cnt);
2297 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
2298 					 uint32_t cnt,
2299 					 bool clear, uint64_t *pkts,
2300 					 uint64_t *bytes, void **action);
2301 typedef int (*mlx5_flow_get_aged_flows_t)
2302 					(struct rte_eth_dev *dev,
2303 					 void **context,
2304 					 uint32_t nb_contexts,
2305 					 struct rte_flow_error *error);
2306 typedef int (*mlx5_flow_get_q_aged_flows_t)
2307 					(struct rte_eth_dev *dev,
2308 					 uint32_t queue_id,
2309 					 void **context,
2310 					 uint32_t nb_contexts,
2311 					 struct rte_flow_error *error);
2312 typedef int (*mlx5_flow_action_validate_t)
2313 				(struct rte_eth_dev *dev,
2314 				 const struct rte_flow_indir_action_conf *conf,
2315 				 const struct rte_flow_action *action,
2316 				 struct rte_flow_error *error);
2317 typedef struct rte_flow_action_handle *(*mlx5_flow_action_create_t)
2318 				(struct rte_eth_dev *dev,
2319 				 const struct rte_flow_indir_action_conf *conf,
2320 				 const struct rte_flow_action *action,
2321 				 struct rte_flow_error *error);
2322 typedef int (*mlx5_flow_action_destroy_t)
2323 				(struct rte_eth_dev *dev,
2324 				 struct rte_flow_action_handle *action,
2325 				 struct rte_flow_error *error);
2326 typedef int (*mlx5_flow_action_update_t)
2327 			(struct rte_eth_dev *dev,
2328 			 struct rte_flow_action_handle *action,
2329 			 const void *update,
2330 			 struct rte_flow_error *error);
2331 typedef int (*mlx5_flow_action_query_t)
2332 			(struct rte_eth_dev *dev,
2333 			 const struct rte_flow_action_handle *action,
2334 			 void *data,
2335 			 struct rte_flow_error *error);
2336 typedef int (*mlx5_flow_action_query_update_t)
2337 			(struct rte_eth_dev *dev,
2338 			 struct rte_flow_action_handle *handle,
2339 			 const void *update, void *data,
2340 			 enum rte_flow_query_update_mode qu_mode,
2341 			 struct rte_flow_error *error);
2342 typedef struct rte_flow_action_list_handle *
2343 (*mlx5_flow_action_list_handle_create_t)
2344 			(struct rte_eth_dev *dev,
2345 			 const struct rte_flow_indir_action_conf *conf,
2346 			 const struct rte_flow_action *actions,
2347 			 struct rte_flow_error *error);
2348 typedef int
2349 (*mlx5_flow_action_list_handle_destroy_t)
2350 			(struct rte_eth_dev *dev,
2351 			 struct rte_flow_action_list_handle *handle,
2352 			 struct rte_flow_error *error);
2353 typedef int (*mlx5_flow_sync_domain_t)
2354 			(struct rte_eth_dev *dev,
2355 			 uint32_t domains,
2356 			 uint32_t flags);
2357 typedef int (*mlx5_flow_validate_mtr_acts_t)
2358 			(struct rte_eth_dev *dev,
2359 			 const struct rte_flow_action *actions[RTE_COLORS],
2360 			 struct rte_flow_attr *attr,
2361 			 bool *is_rss,
2362 			 uint8_t *domain_bitmap,
2363 			 uint8_t *policy_mode,
2364 			 struct rte_mtr_error *error);
2365 typedef int (*mlx5_flow_create_mtr_acts_t)
2366 			(struct rte_eth_dev *dev,
2367 		      struct mlx5_flow_meter_policy *mtr_policy,
2368 		      const struct rte_flow_action *actions[RTE_COLORS],
2369 		      struct rte_flow_attr *attr,
2370 		      struct rte_mtr_error *error);
2371 typedef void (*mlx5_flow_destroy_mtr_acts_t)
2372 			(struct rte_eth_dev *dev,
2373 		      struct mlx5_flow_meter_policy *mtr_policy);
2374 typedef int (*mlx5_flow_create_policy_rules_t)
2375 			(struct rte_eth_dev *dev,
2376 			  struct mlx5_flow_meter_policy *mtr_policy);
2377 typedef void (*mlx5_flow_destroy_policy_rules_t)
2378 			(struct rte_eth_dev *dev,
2379 			  struct mlx5_flow_meter_policy *mtr_policy);
2380 typedef int (*mlx5_flow_create_def_policy_t)
2381 			(struct rte_eth_dev *dev);
2382 typedef void (*mlx5_flow_destroy_def_policy_t)
2383 			(struct rte_eth_dev *dev);
2384 typedef int (*mlx5_flow_discover_priorities_t)
2385 			(struct rte_eth_dev *dev,
2386 			 const uint16_t *vprio, int vprio_n);
2387 typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
2388 			(struct rte_eth_dev *dev,
2389 			 const struct rte_flow_item_flex_conf *conf,
2390 			 struct rte_flow_error *error);
2391 typedef int (*mlx5_flow_item_release_t)
2392 			(struct rte_eth_dev *dev,
2393 			 const struct rte_flow_item_flex_handle *handle,
2394 			 struct rte_flow_error *error);
2395 typedef int (*mlx5_flow_item_update_t)
2396 			(struct rte_eth_dev *dev,
2397 			 const struct rte_flow_item_flex_handle *handle,
2398 			 const struct rte_flow_item_flex_conf *conf,
2399 			 struct rte_flow_error *error);
2400 typedef int (*mlx5_flow_info_get_t)
2401 			(struct rte_eth_dev *dev,
2402 			 struct rte_flow_port_info *port_info,
2403 			 struct rte_flow_queue_info *queue_info,
2404 			 struct rte_flow_error *error);
2405 typedef int (*mlx5_flow_port_configure_t)
2406 			(struct rte_eth_dev *dev,
2407 			 const struct rte_flow_port_attr *port_attr,
2408 			 uint16_t nb_queue,
2409 			 const struct rte_flow_queue_attr *queue_attr[],
2410 			 struct rte_flow_error *err);
2411 typedef int (*mlx5_flow_pattern_validate_t)
2412 			(struct rte_eth_dev *dev,
2413 			 const struct rte_flow_pattern_template_attr *attr,
2414 			 const struct rte_flow_item items[],
2415 			 uint64_t *item_flags,
2416 			 struct rte_flow_error *error);
2417 typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
2418 			(struct rte_eth_dev *dev,
2419 			 const struct rte_flow_pattern_template_attr *attr,
2420 			 const struct rte_flow_item items[],
2421 			 struct rte_flow_error *error);
2422 typedef int (*mlx5_flow_pattern_template_destroy_t)
2423 			(struct rte_eth_dev *dev,
2424 			 struct rte_flow_pattern_template *template,
2425 			 struct rte_flow_error *error);
2426 typedef int (*mlx5_flow_actions_validate_t)
2427 			(struct rte_eth_dev *dev,
2428 			 const struct rte_flow_actions_template_attr *attr,
2429 			 const struct rte_flow_action actions[],
2430 			 const struct rte_flow_action masks[],
2431 			 struct rte_flow_error *error);
2432 typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
2433 			(struct rte_eth_dev *dev,
2434 			 const struct rte_flow_actions_template_attr *attr,
2435 			 const struct rte_flow_action actions[],
2436 			 const struct rte_flow_action masks[],
2437 			 struct rte_flow_error *error);
2438 typedef int (*mlx5_flow_actions_template_destroy_t)
2439 			(struct rte_eth_dev *dev,
2440 			 struct rte_flow_actions_template *template,
2441 			 struct rte_flow_error *error);
2442 typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
2443 		(struct rte_eth_dev *dev,
2444 		 const struct rte_flow_template_table_attr *attr,
2445 		 struct rte_flow_pattern_template *item_templates[],
2446 		 uint8_t nb_item_templates,
2447 		 struct rte_flow_actions_template *action_templates[],
2448 		 uint8_t nb_action_templates,
2449 		 struct rte_flow_error *error);
2450 typedef int (*mlx5_flow_table_destroy_t)
2451 			(struct rte_eth_dev *dev,
2452 			 struct rte_flow_template_table *table,
2453 			 struct rte_flow_error *error);
2454 typedef int (*mlx5_flow_group_set_miss_actions_t)
2455 			(struct rte_eth_dev *dev,
2456 			 uint32_t group_id,
2457 			 const struct rte_flow_group_attr *attr,
2458 			 const struct rte_flow_action actions[],
2459 			 struct rte_flow_error *error);
2460 typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
2461 			(struct rte_eth_dev *dev,
2462 			 uint32_t queue,
2463 			 const struct rte_flow_op_attr *attr,
2464 			 struct rte_flow_template_table *table,
2465 			 const struct rte_flow_item items[],
2466 			 uint8_t pattern_template_index,
2467 			 const struct rte_flow_action actions[],
2468 			 uint8_t action_template_index,
2469 			 void *user_data,
2470 			 struct rte_flow_error *error);
2471 typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t)
2472 			(struct rte_eth_dev *dev,
2473 			 uint32_t queue,
2474 			 const struct rte_flow_op_attr *attr,
2475 			 struct rte_flow_template_table *table,
2476 			 uint32_t rule_index,
2477 			 const struct rte_flow_action actions[],
2478 			 uint8_t action_template_index,
2479 			 void *user_data,
2480 			 struct rte_flow_error *error);
2481 typedef int (*mlx5_flow_async_flow_update_t)
2482 			(struct rte_eth_dev *dev,
2483 			 uint32_t queue,
2484 			 const struct rte_flow_op_attr *attr,
2485 			 struct rte_flow *flow,
2486 			 const struct rte_flow_action actions[],
2487 			 uint8_t action_template_index,
2488 			 void *user_data,
2489 			 struct rte_flow_error *error);
2490 typedef int (*mlx5_flow_async_flow_destroy_t)
2491 			(struct rte_eth_dev *dev,
2492 			 uint32_t queue,
2493 			 const struct rte_flow_op_attr *attr,
2494 			 struct rte_flow *flow,
2495 			 void *user_data,
2496 			 struct rte_flow_error *error);
2497 typedef int (*mlx5_flow_pull_t)
2498 			(struct rte_eth_dev *dev,
2499 			 uint32_t queue,
2500 			 struct rte_flow_op_result res[],
2501 			 uint16_t n_res,
2502 			 struct rte_flow_error *error);
2503 typedef int (*mlx5_flow_push_t)
2504 			(struct rte_eth_dev *dev,
2505 			 uint32_t queue,
2506 			 struct rte_flow_error *error);
2507 
2508 typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)
2509 			(struct rte_eth_dev *dev,
2510 			 uint32_t queue,
2511 			 const struct rte_flow_op_attr *attr,
2512 			 const struct rte_flow_indir_action_conf *conf,
2513 			 const struct rte_flow_action *action,
2514 			 void *user_data,
2515 			 struct rte_flow_error *error);
2516 
2517 typedef int (*mlx5_flow_async_action_handle_update_t)
2518 			(struct rte_eth_dev *dev,
2519 			 uint32_t queue,
2520 			 const struct rte_flow_op_attr *attr,
2521 			 struct rte_flow_action_handle *handle,
2522 			 const void *update,
2523 			 void *user_data,
2524 			 struct rte_flow_error *error);
2525 typedef int (*mlx5_flow_async_action_handle_query_update_t)
2526 			(struct rte_eth_dev *dev, uint32_t queue_id,
2527 			 const struct rte_flow_op_attr *op_attr,
2528 			 struct rte_flow_action_handle *action_handle,
2529 			 const void *update, void *data,
2530 			 enum rte_flow_query_update_mode qu_mode,
2531 			 void *user_data, struct rte_flow_error *error);
2532 typedef int (*mlx5_flow_async_action_handle_query_t)
2533 			(struct rte_eth_dev *dev,
2534 			 uint32_t queue,
2535 			 const struct rte_flow_op_attr *attr,
2536 			 const struct rte_flow_action_handle *handle,
2537 			 void *data,
2538 			 void *user_data,
2539 			 struct rte_flow_error *error);
2540 
2541 typedef int (*mlx5_flow_async_action_handle_destroy_t)
2542 			(struct rte_eth_dev *dev,
2543 			 uint32_t queue,
2544 			 const struct rte_flow_op_attr *attr,
2545 			 struct rte_flow_action_handle *handle,
2546 			 void *user_data,
2547 			 struct rte_flow_error *error);
2548 typedef struct rte_flow_action_list_handle *
2549 (*mlx5_flow_async_action_list_handle_create_t)
2550 			(struct rte_eth_dev *dev, uint32_t queue_id,
2551 			 const struct rte_flow_op_attr *attr,
2552 			 const struct rte_flow_indir_action_conf *conf,
2553 			 const struct rte_flow_action *actions,
2554 			 void *user_data, struct rte_flow_error *error);
2555 typedef int
2556 (*mlx5_flow_async_action_list_handle_destroy_t)
2557 			(struct rte_eth_dev *dev, uint32_t queue_id,
2558 			 const struct rte_flow_op_attr *op_attr,
2559 			 struct rte_flow_action_list_handle *action_handle,
2560 			 void *user_data, struct rte_flow_error *error);
2561 typedef int
2562 (*mlx5_flow_action_list_handle_query_update_t)
2563 			(struct rte_eth_dev *dev,
2564 			const struct rte_flow_action_list_handle *handle,
2565 			const void **update, void **query,
2566 			enum rte_flow_query_update_mode mode,
2567 			struct rte_flow_error *error);
2568 typedef int
2569 (*mlx5_flow_async_action_list_handle_query_update_t)
2570 			(struct rte_eth_dev *dev, uint32_t queue_id,
2571 			const struct rte_flow_op_attr *attr,
2572 			const struct rte_flow_action_list_handle *handle,
2573 			const void **update, void **query,
2574 			enum rte_flow_query_update_mode mode,
2575 			void *user_data, struct rte_flow_error *error);
2576 typedef int
2577 (*mlx5_flow_calc_table_hash_t)
2578 			(struct rte_eth_dev *dev,
2579 			 const struct rte_flow_template_table *table,
2580 			 const struct rte_flow_item pattern[],
2581 			 uint8_t pattern_template_index,
2582 			 uint32_t *hash, struct rte_flow_error *error);
2583 typedef int
2584 (*mlx5_flow_calc_encap_hash_t)
2585 			(struct rte_eth_dev *dev,
2586 			 const struct rte_flow_item pattern[],
2587 			 enum rte_flow_encap_hash_field dest_field,
2588 			 uint8_t *hash,
2589 			 struct rte_flow_error *error);
2590 typedef int (*mlx5_table_resize_t)(struct rte_eth_dev *dev,
2591 				   struct rte_flow_template_table *table,
2592 				   uint32_t nb_rules, struct rte_flow_error *error);
2593 typedef int (*mlx5_flow_update_resized_t)
2594 			(struct rte_eth_dev *dev, uint32_t queue,
2595 			 const struct rte_flow_op_attr *attr,
2596 			 struct rte_flow *rule, void *user_data,
2597 			 struct rte_flow_error *error);
2598 typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev,
2599 				       struct rte_flow_template_table *table,
2600 				       struct rte_flow_error *error);
2601 
2602 struct mlx5_flow_driver_ops {
2603 	mlx5_flow_list_create_t list_create;
2604 	mlx5_flow_list_destroy_t list_destroy;
2605 	mlx5_flow_validate_t validate;
2606 	mlx5_flow_prepare_t prepare;
2607 	mlx5_flow_translate_t translate;
2608 	mlx5_flow_apply_t apply;
2609 	mlx5_flow_remove_t remove;
2610 	mlx5_flow_destroy_t destroy;
2611 	mlx5_flow_query_t query;
2612 	mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
2613 	mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
2614 	mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls;
2615 	mlx5_flow_mtr_alloc_t create_meter;
2616 	mlx5_flow_mtr_free_t free_meter;
2617 	mlx5_flow_validate_mtr_acts_t validate_mtr_acts;
2618 	mlx5_flow_create_mtr_acts_t create_mtr_acts;
2619 	mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts;
2620 	mlx5_flow_create_policy_rules_t create_policy_rules;
2621 	mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
2622 	mlx5_flow_create_def_policy_t create_def_policy;
2623 	mlx5_flow_destroy_def_policy_t destroy_def_policy;
2624 	mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
2625 	mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create;
2626 	mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
2627 	mlx5_flow_counter_alloc_t counter_alloc;
2628 	mlx5_flow_counter_free_t counter_free;
2629 	mlx5_flow_counter_query_t counter_query;
2630 	mlx5_flow_get_aged_flows_t get_aged_flows;
2631 	mlx5_flow_get_q_aged_flows_t get_q_aged_flows;
2632 	mlx5_flow_action_validate_t action_validate;
2633 	mlx5_flow_action_create_t action_create;
2634 	mlx5_flow_action_destroy_t action_destroy;
2635 	mlx5_flow_action_update_t action_update;
2636 	mlx5_flow_action_query_t action_query;
2637 	mlx5_flow_action_query_update_t action_query_update;
2638 	mlx5_flow_action_list_handle_create_t action_list_handle_create;
2639 	mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
2640 	mlx5_flow_sync_domain_t sync_domain;
2641 	mlx5_flow_discover_priorities_t discover_priorities;
2642 	mlx5_flow_item_create_t item_create;
2643 	mlx5_flow_item_release_t item_release;
2644 	mlx5_flow_item_update_t item_update;
2645 	mlx5_flow_info_get_t info_get;
2646 	mlx5_flow_port_configure_t configure;
2647 	mlx5_flow_pattern_validate_t pattern_validate;
2648 	mlx5_flow_pattern_template_create_t pattern_template_create;
2649 	mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
2650 	mlx5_flow_actions_validate_t actions_validate;
2651 	mlx5_flow_actions_template_create_t actions_template_create;
2652 	mlx5_flow_actions_template_destroy_t actions_template_destroy;
2653 	mlx5_flow_table_create_t template_table_create;
2654 	mlx5_flow_table_destroy_t template_table_destroy;
2655 	mlx5_flow_group_set_miss_actions_t group_set_miss_actions;
2656 	mlx5_flow_async_flow_create_t async_flow_create;
2657 	mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;
2658 	mlx5_flow_async_flow_update_t async_flow_update;
2659 	mlx5_flow_async_flow_destroy_t async_flow_destroy;
2660 	mlx5_flow_pull_t pull;
2661 	mlx5_flow_push_t push;
2662 	mlx5_flow_async_action_handle_create_t async_action_create;
2663 	mlx5_flow_async_action_handle_update_t async_action_update;
2664 	mlx5_flow_async_action_handle_query_update_t async_action_query_update;
2665 	mlx5_flow_async_action_handle_query_t async_action_query;
2666 	mlx5_flow_async_action_handle_destroy_t async_action_destroy;
2667 	mlx5_flow_async_action_list_handle_create_t
2668 		async_action_list_handle_create;
2669 	mlx5_flow_async_action_list_handle_destroy_t
2670 		async_action_list_handle_destroy;
2671 	mlx5_flow_action_list_handle_query_update_t
2672 		action_list_handle_query_update;
2673 	mlx5_flow_async_action_list_handle_query_update_t
2674 		async_action_list_handle_query_update;
2675 	mlx5_flow_calc_table_hash_t flow_calc_table_hash;
2676 	mlx5_flow_calc_encap_hash_t flow_calc_encap_hash;
2677 	mlx5_table_resize_t table_resize;
2678 	mlx5_flow_update_resized_t flow_update_resized;
2679 	table_resize_complete_t table_resize_complete;
2680 };
2681 
2682 /* mlx5_flow.c */
2683 
2684 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
2685 void mlx5_flow_pop_thread_workspace(void);
2686 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
2687 
2688 __extension__
2689 struct flow_grp_info {
2690 	uint64_t external:1;
2691 	uint64_t transfer:1;
2692 	uint64_t fdb_def_rule:1;
2693 	/* force standard group translation */
2694 	uint64_t std_tbl_fix:1;
2695 	uint64_t skip_scale:2;
2696 };
2697 
2698 static inline bool
2699 tunnel_use_standard_attr_group_translate
2700 		    (const struct rte_eth_dev *dev,
2701 		     const struct rte_flow_attr *attr,
2702 		     const struct mlx5_flow_tunnel *tunnel,
2703 		     enum mlx5_tof_rule_type tof_rule_type)
2704 {
2705 	bool verdict;
2706 
2707 	if (!is_tunnel_offload_active(dev))
2708 		/* no tunnel offload API */
2709 		verdict = true;
2710 	else if (tunnel) {
2711 		/*
2712 		 * OvS will use jump to group 0 in tunnel steer rule.
2713 		 * If tunnel steer rule starts from group 0 (attr.group == 0)
2714 		 * that 0 group must be translated with standard method.
2715 		 * attr.group == 0 in tunnel match rule translated with tunnel
2716 		 * method
2717 		 */
2718 		verdict = !attr->group &&
2719 			  is_flow_tunnel_steer_rule(tof_rule_type);
2720 	} else {
2721 		/*
2722 		 * non-tunnel group translation uses standard method for
2723 		 * root group only: attr.group == 0
2724 		 */
2725 		verdict = !attr->group;
2726 	}
2727 
2728 	return verdict;
2729 }
2730 
2731 /**
2732  * Get DV flow aso meter by index.
2733  *
2734  * @param[in] dev
2735  *   Pointer to the Ethernet device structure.
2736  * @param[in] idx
2737  *   mlx5 flow aso meter index in the container.
2738  * @param[out] ppool
2739  *   mlx5 flow aso meter pool in the container,
2740  *
2741  * @return
2742  *   Pointer to the aso meter, NULL otherwise.
2743  */
2744 static inline struct mlx5_aso_mtr *
2745 mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
2746 {
2747 	struct mlx5_aso_mtr_pool *pool;
2748 	struct mlx5_aso_mtr_pools_mng *pools_mng =
2749 				&priv->sh->mtrmng->pools_mng;
2750 
2751 	if (priv->mtr_bulk.aso)
2752 		return priv->mtr_bulk.aso + idx;
2753 	/* Decrease to original index. */
2754 	idx--;
2755 	MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
2756 	rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
2757 	pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
2758 	rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
2759 	return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
2760 }
2761 
2762 static __rte_always_inline const struct rte_flow_item *
2763 mlx5_find_end_item(const struct rte_flow_item *item)
2764 {
2765 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
2766 	return item;
2767 }
2768 
2769 static __rte_always_inline bool
2770 mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item)
2771 {
2772 	struct rte_flow_item_integrity test = *item;
2773 	test.l3_ok = 0;
2774 	test.l4_ok = 0;
2775 	test.ipv4_csum_ok = 0;
2776 	test.l4_csum_ok = 0;
2777 	return (test.value == 0);
2778 }
2779 
2780 /*
2781  * Get ASO CT action by device and index.
2782  *
2783  * @param[in] dev
2784  *   Pointer to the Ethernet device structure.
2785  * @param[in] idx
2786  *   Index to the ASO CT action.
2787  *
2788  * @return
2789  *   The specified ASO CT action pointer.
2790  */
2791 static inline struct mlx5_aso_ct_action *
2792 flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx)
2793 {
2794 	struct mlx5_priv *priv = dev->data->dev_private;
2795 	struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
2796 	struct mlx5_aso_ct_pool *pool;
2797 
2798 	idx--;
2799 	MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n);
2800 	/* Bit operation AND could be used. */
2801 	rte_rwlock_read_lock(&mng->resize_rwl);
2802 	pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL];
2803 	rte_rwlock_read_unlock(&mng->resize_rwl);
2804 	return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL];
2805 }
2806 
2807 /*
2808  * Get ASO CT action by owner & index.
2809  *
2810  * @param[in] dev
2811  *   Pointer to the Ethernet device structure.
2812  * @param[in] idx
2813  *   Index to the ASO CT action and owner port combination.
2814  *
2815  * @return
2816  *   The specified ASO CT action pointer.
2817  */
2818 static inline struct mlx5_aso_ct_action *
2819 flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
2820 {
2821 	struct mlx5_priv *priv = dev->data->dev_private;
2822 	struct mlx5_aso_ct_action *ct;
2823 	uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
2824 	uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
2825 
2826 	if (owner == PORT_ID(priv)) {
2827 		ct = flow_aso_ct_get_by_dev_idx(dev, idx);
2828 	} else {
2829 		struct rte_eth_dev *owndev = &rte_eth_devices[owner];
2830 
2831 		MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
2832 		if (dev->data->dev_started != 1)
2833 			return NULL;
2834 		ct = flow_aso_ct_get_by_dev_idx(owndev, idx);
2835 		if (ct->peer != PORT_ID(priv))
2836 			return NULL;
2837 	}
2838 	return ct;
2839 }
2840 
2841 static inline uint16_t
2842 mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
2843 {
2844 	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
2845 		return RTE_ETHER_TYPE_TEB;
2846 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2847 		return RTE_ETHER_TYPE_IPV4;
2848 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2849 		return RTE_ETHER_TYPE_IPV6;
2850 	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
2851 		return RTE_ETHER_TYPE_MPLS;
2852 	return 0;
2853 }
2854 
2855 int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
2856 			 struct rte_flow_error *error);
2857 
2858 /*
2859  * Convert rte_mtr_color to mlx5 color.
2860  *
2861  * @param[in] rcol
2862  *   rte_mtr_color.
2863  *
2864  * @return
2865  *   mlx5 color.
2866  */
2867 static inline int
2868 rte_col_2_mlx5_col(enum rte_color rcol)
2869 {
2870 	switch (rcol) {
2871 	case RTE_COLOR_GREEN:
2872 		return MLX5_FLOW_COLOR_GREEN;
2873 	case RTE_COLOR_YELLOW:
2874 		return MLX5_FLOW_COLOR_YELLOW;
2875 	case RTE_COLOR_RED:
2876 		return MLX5_FLOW_COLOR_RED;
2877 	default:
2878 		break;
2879 	}
2880 	return MLX5_FLOW_COLOR_UNDEFINED;
2881 }
2882 
2883 /**
2884  * Indicates whether flow source vport is representor port.
2885  *
2886  * @param[in] priv
2887  *   Pointer to device private context structure.
2888  * @param[in] act_priv
2889  *   Pointer to actual device private context structure if have.
2890  *
2891  * @return
2892  *   True when the flow source vport is representor port, false otherwise.
2893  */
2894 static inline bool
2895 flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv)
2896 {
2897 	MLX5_ASSERT(priv);
2898 	return (!act_priv ? (priv->representor_id != UINT16_MAX) :
2899 		 (act_priv->representor_id != UINT16_MAX));
2900 }
2901 
2902 /* All types of Ethernet patterns used in control flow rules. */
2903 enum mlx5_flow_ctrl_rx_eth_pattern_type {
2904 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL = 0,
2905 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST,
2906 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST,
2907 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN,
2908 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST,
2909 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN,
2910 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST,
2911 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN,
2912 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
2913 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
2914 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX,
2915 };
2916 
2917 /* All types of RSS actions used in control flow rules. */
2918 enum mlx5_flow_ctrl_rx_expanded_rss_type {
2919 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP = 0,
2920 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP,
2921 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP,
2922 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP,
2923 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6,
2924 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4,
2925 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP,
2926 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX,
2927 };
2928 
2929 /**
2930  * Contains pattern template, template table and its attributes for a single
2931  * combination of Ethernet pattern and RSS action. Used to create control flow rules
2932  * with HWS.
2933  */
2934 struct mlx5_flow_hw_ctrl_rx_table {
2935 	struct rte_flow_template_table_attr attr;
2936 	struct rte_flow_pattern_template *pt;
2937 	struct rte_flow_template_table *tbl;
2938 };
2939 
2940 /* Contains all templates required to create control flow rules with HWS. */
2941 struct mlx5_flow_hw_ctrl_rx {
2942 	struct rte_flow_actions_template *rss[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2943 	struct mlx5_flow_hw_ctrl_rx_table tables[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX]
2944 						[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2945 };
2946 
2947 /* Contains all templates required for control flow rules in FDB with HWS. */
2948 struct mlx5_flow_hw_ctrl_fdb {
2949 	struct rte_flow_pattern_template *esw_mgr_items_tmpl;
2950 	struct rte_flow_actions_template *regc_jump_actions_tmpl;
2951 	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
2952 	struct rte_flow_pattern_template *regc_sq_items_tmpl;
2953 	struct rte_flow_actions_template *port_actions_tmpl;
2954 	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
2955 	struct rte_flow_pattern_template *port_items_tmpl;
2956 	struct rte_flow_actions_template *jump_one_actions_tmpl;
2957 	struct rte_flow_template_table *hw_esw_zero_tbl;
2958 	struct rte_flow_pattern_template *tx_meta_items_tmpl;
2959 	struct rte_flow_actions_template *tx_meta_actions_tmpl;
2960 	struct rte_flow_template_table *hw_tx_meta_cpy_tbl;
2961 	struct rte_flow_pattern_template *lacp_rx_items_tmpl;
2962 	struct rte_flow_actions_template *lacp_rx_actions_tmpl;
2963 	struct rte_flow_template_table *hw_lacp_rx_tbl;
2964 };
2965 
2966 #define MLX5_CTRL_PROMISCUOUS    (RTE_BIT32(0))
2967 #define MLX5_CTRL_ALL_MULTICAST  (RTE_BIT32(1))
2968 #define MLX5_CTRL_BROADCAST      (RTE_BIT32(2))
2969 #define MLX5_CTRL_IPV4_MULTICAST (RTE_BIT32(3))
2970 #define MLX5_CTRL_IPV6_MULTICAST (RTE_BIT32(4))
2971 #define MLX5_CTRL_DMAC           (RTE_BIT32(5))
2972 #define MLX5_CTRL_VLAN_FILTER    (RTE_BIT32(6))
2973 
2974 int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
2975 
2976 /** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
2977 int mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
2978 
2979 /** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
2980 int mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
2981 
2982 /** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
2983 int mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev,
2984 				      const struct rte_ether_addr *addr,
2985 				      const uint16_t vid);
2986 
2987 /** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
2988 int mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev,
2989 				      const struct rte_ether_addr *addr,
2990 				      const uint16_t vid);
2991 
2992 /** Destroy a control flow rule registered on port level control flow rule type. */
2993 void mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry);
2994 
2995 /** Create a control flow rule for matching unicast DMAC (HWS). */
2996 int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
2997 
2998 /** Destroy a control flow rule for matching unicast DMAC (HWS). */
2999 int mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
3000 
3001 /** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
3002 int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
3003 				     const struct rte_ether_addr *addr,
3004 				     const uint16_t vlan);
3005 
3006 /** Destroy a control flow rule for matching unicast DMAC with VLAN (HWS). */
3007 int mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
3008 					     const struct rte_ether_addr *addr,
3009 					     const uint16_t vlan);
3010 
3011 void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
3012 
3013 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
3014 			     const struct mlx5_flow_tunnel *tunnel,
3015 			     uint32_t group, uint32_t *table,
3016 			     const struct flow_grp_info *flags,
3017 			     struct rte_flow_error *error);
3018 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
3019 				     int tunnel, uint64_t layer_types,
3020 				     uint64_t hash_fields);
3021 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
3022 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
3023 				   uint32_t subpriority);
3024 uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
3025 					const struct rte_flow_attr *attr);
3026 uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
3027 				   const struct rte_flow_attr *attr,
3028 				   uint32_t subpriority, bool external);
3029 uint32_t mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev);
3030 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
3031 				     enum mlx5_feature_name feature,
3032 				     uint32_t id,
3033 				     struct rte_flow_error *error);
3034 const struct rte_flow_action *mlx5_flow_find_action
3035 					(const struct rte_flow_action *actions,
3036 					 enum rte_flow_action_type action);
3037 int mlx5_validate_action_rss(struct rte_eth_dev *dev,
3038 			     const struct rte_flow_action *action,
3039 			     struct rte_flow_error *error);
3040 
3041 struct mlx5_hw_encap_decap_action*
3042 mlx5_reformat_action_create(struct rte_eth_dev *dev,
3043 			    const struct rte_flow_indir_action_conf *conf,
3044 			    const struct rte_flow_action *encap_action,
3045 			    const struct rte_flow_action *decap_action,
3046 			    struct rte_flow_error *error);
3047 int mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
3048 				 struct rte_flow_action_list_handle *handle,
3049 				 struct rte_flow_error *error);
3050 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
3051 				    const struct rte_flow_attr *attr,
3052 				    struct rte_flow_error *error);
3053 int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev,
3054 				   bool is_root,
3055 				   const struct rte_flow_attr *attr,
3056 				   struct rte_flow_error *error);
3057 int mlx5_flow_validate_action_flag(uint64_t action_flags,
3058 				   const struct rte_flow_attr *attr,
3059 				   struct rte_flow_error *error);
3060 int mlx5_flow_validate_action_mark(struct rte_eth_dev *dev,
3061 				   const struct rte_flow_action *action,
3062 				   uint64_t action_flags,
3063 				   const struct rte_flow_attr *attr,
3064 				   struct rte_flow_error *error);
3065 int mlx5_flow_validate_target_queue(struct rte_eth_dev *dev,
3066 				    const struct rte_flow_action *action,
3067 				    struct rte_flow_error *error);
3068 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
3069 				    uint64_t action_flags,
3070 				    struct rte_eth_dev *dev,
3071 				    const struct rte_flow_attr *attr,
3072 				    struct rte_flow_error *error);
3073 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
3074 				  uint64_t action_flags,
3075 				  struct rte_eth_dev *dev,
3076 				  const struct rte_flow_attr *attr,
3077 				  uint64_t item_flags,
3078 				  struct rte_flow_error *error);
3079 int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
3080 				const struct rte_flow_attr *attr,
3081 				struct rte_flow_error *error);
3082 int flow_validate_modify_field_level
3083 			(const struct rte_flow_field_data *data,
3084 			 struct rte_flow_error *error);
3085 int
3086 mlx5_flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3087 				      uint64_t action_flags,
3088 				      const struct rte_flow_action *action,
3089 				      const struct rte_flow_attr *attr,
3090 				      struct rte_flow_error *error);
3091 int
3092 mlx5_flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3093 				   uint64_t action_flags,
3094 				   const struct rte_flow_action *action,
3095 				   const uint64_t item_flags,
3096 				   const struct rte_flow_attr *attr,
3097 				   struct rte_flow_error *error);
3098 int
3099 mlx5_flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3100 				    uint64_t action_flags,
3101 				    uint64_t item_flags,
3102 				    bool root,
3103 				    struct rte_flow_error *error);
3104 int
3105 mlx5_flow_dv_validate_action_raw_encap_decap
3106 	(struct rte_eth_dev *dev,
3107 	 const struct rte_flow_action_raw_decap *decap,
3108 	 const struct rte_flow_action_raw_encap *encap,
3109 	 const struct rte_flow_attr *attr, uint64_t *action_flags,
3110 	 int *actions_n, const struct rte_flow_action *action,
3111 	 uint64_t item_flags, struct rte_flow_error *error);
3112 int mlx5_flow_item_acceptable(const struct rte_eth_dev *dev,
3113 			      const struct rte_flow_item *item,
3114 			      const uint8_t *mask,
3115 			      const uint8_t *nic_mask,
3116 			      unsigned int size,
3117 			      bool range_accepted,
3118 			      struct rte_flow_error *error);
3119 int mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev,
3120 				const struct rte_flow_item *item,
3121 				uint64_t item_flags, bool ext_vlan_sup,
3122 				struct rte_flow_error *error);
3123 int
3124 mlx5_flow_dv_validate_item_vlan(const struct rte_flow_item *item,
3125 				uint64_t item_flags,
3126 				struct rte_eth_dev *dev,
3127 				struct rte_flow_error *error);
3128 int
3129 mlx5_flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
3130 				const struct rte_flow_item *item,
3131 				uint64_t item_flags,
3132 				uint64_t last_item,
3133 				uint16_t ether_type,
3134 				const struct rte_flow_item_ipv4 *acc_mask,
3135 				struct rte_flow_error *error);
3136 int
3137 mlx5_flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
3138 			       const struct rte_flow_item *item,
3139 			       uint64_t item_flags,
3140 			       struct rte_flow_error *error);
3141 int
3142 mlx5_flow_dv_validate_item_gtp_psc(const struct rte_eth_dev *dev,
3143 				   const struct rte_flow_item *item,
3144 				   uint64_t last_item,
3145 				   const struct rte_flow_item *gtp_item,
3146 				   bool root, struct rte_flow_error *error);
3147 int
3148 mlx5_flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
3149 				  const struct rte_flow_item *item,
3150 				  uint64_t *item_flags,
3151 				  struct rte_flow_error *error);
3152 int mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev,
3153 				const struct rte_flow_item *item,
3154 				uint64_t item_flags,
3155 				uint8_t target_protocol,
3156 				struct rte_flow_error *error);
3157 int mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev,
3158 				    const struct rte_flow_item *item,
3159 				    uint64_t item_flags,
3160 				    const struct rte_flow_item *gre_item,
3161 				    struct rte_flow_error *error);
3162 int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
3163 				       const struct rte_flow_item *item,
3164 				       uint64_t item_flags,
3165 				       const struct rte_flow_attr *attr,
3166 				       const struct rte_flow_item *gre_item,
3167 				       struct rte_flow_error *error);
3168 int mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev,
3169 				 const struct rte_flow_item *item,
3170 				 uint64_t item_flags,
3171 				 uint64_t last_item,
3172 				 uint16_t ether_type,
3173 				 const struct rte_flow_item_ipv4 *acc_mask,
3174 				 bool range_accepted,
3175 				 struct rte_flow_error *error);
3176 int mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
3177 				 const struct rte_flow_item *item,
3178 				 uint64_t item_flags,
3179 				 uint64_t last_item,
3180 				 uint16_t ether_type,
3181 				 const struct rte_flow_item_ipv6 *acc_mask,
3182 				 struct rte_flow_error *error);
3183 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
3184 				 const struct rte_flow_item *item,
3185 				 uint64_t item_flags,
3186 				 uint64_t prev_layer,
3187 				 struct rte_flow_error *error);
3188 int mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev,
3189 				const struct rte_flow_item *item,
3190 				uint64_t item_flags,
3191 				uint8_t target_protocol,
3192 				const struct rte_flow_item_tcp *flow_mask,
3193 				struct rte_flow_error *error);
3194 int mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev,
3195 				const struct rte_flow_item *item,
3196 				uint64_t item_flags,
3197 				uint8_t target_protocol,
3198 				struct rte_flow_error *error);
3199 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
3200 				 uint64_t item_flags,
3201 				 struct rte_eth_dev *dev,
3202 				 struct rte_flow_error *error);
3203 int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
3204 				  uint16_t udp_dport,
3205 				  const struct rte_flow_item *item,
3206 				  uint64_t item_flags,
3207 				  bool root,
3208 				  struct rte_flow_error *error);
3209 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
3210 				      uint64_t item_flags,
3211 				      struct rte_eth_dev *dev,
3212 				      struct rte_flow_error *error);
3213 int mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev,
3214 				 const struct rte_flow_item *item,
3215 				 uint64_t item_flags,
3216 				 uint8_t target_protocol,
3217 				 struct rte_flow_error *error);
3218 int mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev,
3219 				  const struct rte_flow_item *item,
3220 				  uint64_t item_flags,
3221 				  uint8_t target_protocol,
3222 				  struct rte_flow_error *error);
3223 int mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev,
3224 				       const struct rte_flow_item *item,
3225 				       uint64_t item_flags,
3226 				       uint8_t target_protocol,
3227 				       struct rte_flow_error *error);
3228 int mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev,
3229 				  const struct rte_flow_item *item,
3230 				  uint64_t item_flags,
3231 				  uint8_t target_protocol,
3232 				  struct rte_flow_error *error);
3233 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
3234 				   uint64_t item_flags,
3235 				   struct rte_eth_dev *dev,
3236 				   struct rte_flow_error *error);
3237 int mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
3238 				   uint64_t last_item,
3239 				   const struct rte_flow_item *geneve_item,
3240 				   struct rte_eth_dev *dev,
3241 				   struct rte_flow_error *error);
3242 int mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev,
3243 				  const struct rte_flow_item *item,
3244 				  uint64_t item_flags,
3245 				  uint64_t last_item,
3246 				  uint16_t ether_type,
3247 				  const struct rte_flow_item_ecpri *acc_mask,
3248 				  struct rte_flow_error *error);
3249 int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
3250 				const struct rte_flow_item *item,
3251 				struct rte_flow_error *error);
3252 int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
3253 			      struct mlx5_flow_meter_info *fm,
3254 			      uint32_t mtr_idx,
3255 			      uint8_t domain_bitmap);
3256 void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
3257 			       struct mlx5_flow_meter_info *fm);
3258 void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
3259 struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
3260 		(struct rte_eth_dev *dev,
3261 		struct mlx5_flow_meter_policy *mtr_policy,
3262 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
3263 void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
3264 		struct mlx5_flow_meter_policy *mtr_policy);
3265 int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
3266 int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
3267 int mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev);
3268 int mlx5_action_handle_attach(struct rte_eth_dev *dev);
3269 int mlx5_action_handle_detach(struct rte_eth_dev *dev);
3270 int mlx5_action_handle_flush(struct rte_eth_dev *dev);
3271 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
3272 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
3273 
3274 struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx);
3275 int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3276 			 void *cb_ctx);
3277 void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3278 struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx,
3279 					     struct mlx5_list_entry *oentry,
3280 					     void *entry_ctx);
3281 void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3282 struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3283 		uint32_t table_level, uint8_t egress, uint8_t transfer,
3284 		bool external, const struct mlx5_flow_tunnel *tunnel,
3285 		uint32_t group_id, uint8_t dummy,
3286 		uint32_t table_id, struct rte_flow_error *error);
3287 int flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
3288 				 struct mlx5_flow_tbl_resource *tbl);
3289 
3290 struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx);
3291 int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3292 			 void *cb_ctx);
3293 void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3294 struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx,
3295 					     struct mlx5_list_entry *oentry,
3296 					     void *cb_ctx);
3297 void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3298 
3299 int flow_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3300 			    void *cb_ctx);
3301 struct mlx5_list_entry *flow_modify_create_cb(void *tool_ctx, void *ctx);
3302 void flow_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3303 struct mlx5_list_entry *flow_modify_clone_cb(void *tool_ctx,
3304 						struct mlx5_list_entry *oentry,
3305 						void *ctx);
3306 void flow_modify_clone_free_cb(void *tool_ctx,
3307 				  struct mlx5_list_entry *entry);
3308 
3309 struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx);
3310 int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3311 			  void *cb_ctx);
3312 void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3313 struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx,
3314 					      struct mlx5_list_entry *entry,
3315 					      void *ctx);
3316 void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3317 
3318 int flow_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3319 				 void *cb_ctx);
3320 struct mlx5_list_entry *flow_encap_decap_create_cb(void *tool_ctx,
3321 						      void *cb_ctx);
3322 void flow_encap_decap_remove_cb(void *tool_ctx,
3323 				   struct mlx5_list_entry *entry);
3324 struct mlx5_list_entry *flow_encap_decap_clone_cb(void *tool_ctx,
3325 						  struct mlx5_list_entry *entry,
3326 						  void *cb_ctx);
3327 void flow_encap_decap_clone_free_cb(void *tool_ctx,
3328 				       struct mlx5_list_entry *entry);
3329 int __flow_encap_decap_resource_register
3330 			(struct rte_eth_dev *dev,
3331 			 struct mlx5_flow_dv_encap_decap_resource *resource,
3332 			 bool is_root,
3333 			 struct mlx5_flow_dv_encap_decap_resource **encap_decap,
3334 			 struct rte_flow_error *error);
3335 int __flow_modify_hdr_resource_register
3336 			(struct rte_eth_dev *dev,
3337 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
3338 			 struct mlx5_flow_dv_modify_hdr_resource **modify,
3339 			 struct rte_flow_error *error);
3340 int flow_encap_decap_resource_release(struct rte_eth_dev *dev,
3341 				     uint32_t encap_decap_idx);
3342 int flow_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3343 			     void *ctx);
3344 struct mlx5_list_entry *flow_matcher_create_cb(void *tool_ctx, void *ctx);
3345 void flow_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3346 struct mlx5_list_entry *flow_matcher_clone_cb(void *tool_ctx __rte_unused,
3347 			 struct mlx5_list_entry *entry, void *cb_ctx);
3348 void flow_matcher_clone_free_cb(void *tool_ctx __rte_unused,
3349 			     struct mlx5_list_entry *entry);
3350 int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3351 			     void *cb_ctx);
3352 struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
3353 void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3354 struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx,
3355 				struct mlx5_list_entry *entry, void *cb_ctx);
3356 void flow_dv_port_id_clone_free_cb(void *tool_ctx,
3357 				   struct mlx5_list_entry *entry);
3358 
3359 int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3360 			       void *cb_ctx);
3361 struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx,
3362 						    void *cb_ctx);
3363 void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3364 struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx,
3365 				 struct mlx5_list_entry *entry, void *cb_ctx);
3366 void flow_dv_push_vlan_clone_free_cb(void *tool_ctx,
3367 				     struct mlx5_list_entry *entry);
3368 
3369 int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3370 			    void *cb_ctx);
3371 struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx);
3372 void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3373 struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx,
3374 				 struct mlx5_list_entry *entry, void *cb_ctx);
3375 void flow_dv_sample_clone_free_cb(void *tool_ctx,
3376 				  struct mlx5_list_entry *entry);
3377 
3378 int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3379 				void *cb_ctx);
3380 struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx,
3381 						     void *cb_ctx);
3382 void flow_dv_dest_array_remove_cb(void *tool_ctx,
3383 				  struct mlx5_list_entry *entry);
3384 struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
3385 				   struct mlx5_list_entry *entry, void *cb_ctx);
3386 void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
3387 				      struct mlx5_list_entry *entry);
3388 void flow_dv_hashfields_set(uint64_t item_flags,
3389 			    struct mlx5_flow_rss_desc *rss_desc,
3390 			    uint64_t *hash_fields);
3391 void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
3392 					uint64_t *hash_field);
3393 uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
3394 					const uint64_t hash_fields);
3395 int flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3396 		     const struct rte_flow_item items[],
3397 		     const struct rte_flow_action actions[],
3398 		     bool external, int hairpin, struct rte_flow_error *error);
3399 
3400 struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
3401 void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3402 int flow_hw_grp_match_cb(void *tool_ctx,
3403 			 struct mlx5_list_entry *entry,
3404 			 void *cb_ctx);
3405 struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
3406 					     struct mlx5_list_entry *oentry,
3407 					     void *cb_ctx);
3408 void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3409 
3410 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
3411 						    uint32_t age_idx);
3412 
3413 void flow_release_workspace(void *data);
3414 int mlx5_flow_os_init_workspace_once(void);
3415 void *mlx5_flow_os_get_specific_workspace(void);
3416 int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);
3417 void mlx5_flow_os_release_workspace(void);
3418 uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev);
3419 void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx);
3420 int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
3421 			const struct rte_flow_action *actions[RTE_COLORS],
3422 			struct rte_flow_attr *attr,
3423 			bool *is_rss,
3424 			uint8_t *domain_bitmap,
3425 			uint8_t *policy_mode,
3426 			struct rte_mtr_error *error);
3427 void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
3428 		      struct mlx5_flow_meter_policy *mtr_policy);
3429 int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
3430 		      struct mlx5_flow_meter_policy *mtr_policy,
3431 		      const struct rte_flow_action *actions[RTE_COLORS],
3432 		      struct rte_flow_attr *attr,
3433 		      struct rte_mtr_error *error);
3434 int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
3435 			     struct mlx5_flow_meter_policy *mtr_policy);
3436 void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
3437 			     struct mlx5_flow_meter_policy *mtr_policy);
3438 int mlx5_flow_create_def_policy(struct rte_eth_dev *dev);
3439 void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
3440 void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
3441 		       struct mlx5_flow_handle *dev_handle);
3442 const struct mlx5_flow_tunnel *
3443 mlx5_get_tof(const struct rte_flow_item *items,
3444 	     const struct rte_flow_action *actions,
3445 	     enum mlx5_tof_rule_type *rule_type);
3446 void
3447 flow_hw_resource_release(struct rte_eth_dev *dev);
3448 int
3449 mlx5_geneve_tlv_options_destroy(struct mlx5_geneve_tlv_options *options,
3450 				struct mlx5_physical_device *phdev);
3451 int
3452 mlx5_geneve_tlv_options_check_busy(struct mlx5_priv *priv);
3453 void
3454 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable);
3455 int flow_dv_action_validate(struct rte_eth_dev *dev,
3456 			    const struct rte_flow_indir_action_conf *conf,
3457 			    const struct rte_flow_action *action,
3458 			    struct rte_flow_error *err);
3459 struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,
3460 		      const struct rte_flow_indir_action_conf *conf,
3461 		      const struct rte_flow_action *action,
3462 		      struct rte_flow_error *err);
3463 int flow_dv_action_destroy(struct rte_eth_dev *dev,
3464 			   struct rte_flow_action_handle *handle,
3465 			   struct rte_flow_error *error);
3466 int flow_dv_action_update(struct rte_eth_dev *dev,
3467 			  struct rte_flow_action_handle *handle,
3468 			  const void *update,
3469 			  struct rte_flow_error *err);
3470 int flow_dv_action_query(struct rte_eth_dev *dev,
3471 			 const struct rte_flow_action_handle *handle,
3472 			 void *data,
3473 			 struct rte_flow_error *error);
3474 size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
3475 int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3476 			   size_t *size, struct rte_flow_error *error);
3477 void mlx5_flow_field_id_to_modify_info
3478 		(const struct rte_flow_field_data *data,
3479 		 struct field_modify_info *info, uint32_t *mask,
3480 		 uint32_t width, struct rte_eth_dev *dev,
3481 		 const struct rte_flow_attr *attr, struct rte_flow_error *error);
3482 int flow_dv_convert_modify_action(struct rte_flow_item *item,
3483 			      struct field_modify_info *field,
3484 			      struct field_modify_info *dest,
3485 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
3486 			      uint32_t type, struct rte_flow_error *error);
3487 
3488 #define MLX5_PF_VPORT_ID 0
3489 #define MLX5_ECPF_VPORT_ID 0xFFFE
3490 
3491 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev);
3492 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
3493 				const struct rte_flow_item *item,
3494 				uint16_t *vport_id,
3495 				bool *all_ports,
3496 				struct rte_flow_error *error);
3497 
3498 int flow_dv_translate_items_hws(const struct rte_flow_item *items,
3499 				struct mlx5_flow_attr *attr, void *key,
3500 				uint32_t key_type, uint64_t *item_flags,
3501 				uint8_t *match_criteria,
3502 				struct rte_flow_error *error);
3503 
3504 int __flow_dv_translate_items_hws(const struct rte_flow_item *items,
3505 				struct mlx5_flow_attr *attr, void *key,
3506 				uint32_t key_type, uint64_t *item_flags,
3507 				uint8_t *match_criteria,
3508 				bool nt_flow,
3509 				struct rte_flow_error *error);
3510 
3511 int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
3512 				  uint16_t *proxy_port_id,
3513 				  struct rte_flow_error *error);
3514 int flow_null_get_aged_flows(struct rte_eth_dev *dev,
3515 		    void **context,
3516 		    uint32_t nb_contexts,
3517 		    struct rte_flow_error *error);
3518 uint32_t flow_null_counter_allocate(struct rte_eth_dev *dev);
3519 void flow_null_counter_free(struct rte_eth_dev *dev,
3520 			uint32_t counter);
3521 int flow_null_counter_query(struct rte_eth_dev *dev,
3522 			uint32_t counter,
3523 			bool clear,
3524 		    uint64_t *pkts,
3525 			uint64_t *bytes,
3526 			void **action);
3527 
3528 int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
3529 
3530 int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
3531 					 uint32_t sqn, bool external);
3532 int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
3533 					  uint32_t sqn);
3534 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
3535 int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
3536 int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
3537 int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev);
3538 int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
3539 		const struct rte_flow_actions_template_attr *attr,
3540 		const struct rte_flow_action actions[],
3541 		const struct rte_flow_action masks[],
3542 		struct rte_flow_error *error);
3543 int mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
3544 		const struct rte_flow_pattern_template_attr *attr,
3545 		const struct rte_flow_item items[],
3546 		struct rte_flow_error *error);
3547 int flow_hw_table_update(struct rte_eth_dev *dev,
3548 			 struct rte_flow_error *error);
3549 int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
3550 			   enum rte_flow_field_id field, int inherit,
3551 			   const struct rte_flow_attr *attr,
3552 			   struct rte_flow_error *error);
3553 uintptr_t flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3554 				const struct rte_flow_attr *attr,
3555 				const struct rte_flow_item items[],
3556 				const struct rte_flow_action actions[],
3557 				bool external, struct rte_flow_error *error);
3558 void flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3559 				uintptr_t flow_idx);
3560 
3561 static __rte_always_inline int
3562 flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
3563 {
3564 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3565 	uint16_t port;
3566 
3567 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3568 		struct mlx5_priv *priv;
3569 		struct mlx5_hca_flex_attr *attr;
3570 		struct mlx5_devx_match_sample_info_query_attr *info;
3571 
3572 		priv = rte_eth_devices[port].data->dev_private;
3573 		attr = &priv->sh->cdev->config.hca_attr.flex;
3574 		if (priv->dr_ctx == dr_ctx && attr->query_match_sample_info) {
3575 			info = &priv->sh->srh_flex_parser.flex.devx_fp->sample_info[0];
3576 			if (priv->sh->srh_flex_parser.flex.mapnum)
3577 				return info->sample_dw_data * sizeof(uint32_t);
3578 			else
3579 				return UINT32_MAX;
3580 		}
3581 	}
3582 #endif
3583 	return UINT32_MAX;
3584 }
3585 
3586 static __rte_always_inline uint8_t
3587 flow_hw_get_ipv6_route_ext_anchor_from_ctx(void *dr_ctx)
3588 {
3589 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3590 	uint16_t port;
3591 	struct mlx5_priv *priv;
3592 
3593 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3594 		priv = rte_eth_devices[port].data->dev_private;
3595 		if (priv->dr_ctx == dr_ctx)
3596 			return priv->sh->srh_flex_parser.flex.devx_fp->anchor_id;
3597 	}
3598 #else
3599 	RTE_SET_USED(dr_ctx);
3600 #endif
3601 	return 0;
3602 }
3603 
3604 static __rte_always_inline uint16_t
3605 flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx)
3606 {
3607 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3608 	uint16_t port;
3609 	struct mlx5_priv *priv;
3610 	struct mlx5_flex_parser_devx *fp;
3611 
3612 	if (idx >= MLX5_GRAPH_NODE_SAMPLE_NUM || idx >= MLX5_SRV6_SAMPLE_NUM)
3613 		return 0;
3614 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3615 		priv = rte_eth_devices[port].data->dev_private;
3616 		if (priv->dr_ctx == dr_ctx) {
3617 			fp = priv->sh->srh_flex_parser.flex.devx_fp;
3618 			return fp->sample_info[idx].modify_field_id;
3619 		}
3620 	}
3621 #else
3622 	RTE_SET_USED(dr_ctx);
3623 	RTE_SET_USED(idx);
3624 #endif
3625 	return 0;
3626 }
3627 void
3628 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
3629 #ifdef HAVE_MLX5_HWS_SUPPORT
3630 
3631 #define MLX5_REPR_STC_MEMORY_LOG 11
3632 
3633 struct mlx5_mirror;
3634 void
3635 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
3636 void
3637 mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
3638 			     struct mlx5_indirect_list *ptr);
3639 void
3640 mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,
3641 			    struct mlx5_indirect_list *reformat);
3642 int
3643 flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3644 		    const struct rte_flow_attr *attr,
3645 		    const struct rte_flow_item items[],
3646 		    const struct rte_flow_action actions[],
3647 		    uint64_t item_flags, uint64_t action_flags, bool external,
3648 		    struct rte_flow_hw **flow, struct rte_flow_error *error);
3649 void
3650 flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow);
3651 void
3652 flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3653 		     uintptr_t flow_idx);
3654 const struct rte_flow_action_rss *
3655 flow_nta_locate_rss(struct rte_eth_dev *dev,
3656 		    const struct rte_flow_action actions[],
3657 		    struct rte_flow_error *error);
3658 struct rte_flow_hw *
3659 flow_nta_handle_rss(struct rte_eth_dev *dev,
3660 		    const struct rte_flow_attr *attr,
3661 		    const struct rte_flow_item items[],
3662 		    const struct rte_flow_action actions[],
3663 		    const struct rte_flow_action_rss *rss_conf,
3664 		    uint64_t item_flags, uint64_t action_flags,
3665 		    bool external, enum mlx5_flow_type flow_type,
3666 		    struct rte_flow_error *error);
3667 
3668 extern const struct rte_flow_action_raw_decap empty_decap;
3669 extern const struct rte_flow_item_ipv6 nic_ipv6_mask;
3670 extern const struct rte_flow_item_tcp nic_tcp_mask;
3671 
3672 /* mlx5_nta_split.c */
3673 int
3674 mlx5_flow_nta_split_metadata(struct rte_eth_dev *dev,
3675 			     const struct rte_flow_attr *attr,
3676 			     const struct rte_flow_action actions[],
3677 			     const struct rte_flow_action *qrss,
3678 			     uint64_t action_flags,
3679 			     int actions_n,
3680 			     bool external,
3681 			     struct mlx5_flow_hw_split_resource *res,
3682 			     struct rte_flow_error *error);
3683 void
3684 mlx5_flow_nta_split_resource_free(struct rte_eth_dev *dev,
3685 				  struct mlx5_flow_hw_split_resource *res);
3686 struct mlx5_list_entry *
3687 flow_nta_mreg_create_cb(void *tool_ctx, void *cb_ctx);
3688 void
3689 flow_nta_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3690 void
3691 mlx5_flow_nta_del_copy_action(struct rte_eth_dev *dev, uint32_t idx);
3692 void
3693 mlx5_flow_nta_del_default_copy_action(struct rte_eth_dev *dev);
3694 int
3695 mlx5_flow_nta_add_default_copy_action(struct rte_eth_dev *dev,
3696 				      struct rte_flow_error *error);
3697 int
3698 mlx5_flow_nta_update_copy_table(struct rte_eth_dev *dev,
3699 				uint32_t *idx,
3700 				const struct rte_flow_action *mark,
3701 				uint64_t action_flags,
3702 				struct rte_flow_error *error);
3703 
3704 #endif
3705 #endif /* RTE_PMD_MLX5_FLOW_H_ */
3706