xref: /dpdk/drivers/net/mlx5/mlx5_flow.h (revision 21a66096bb44a4468353782c36fc85913520dc6c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_FLOW_H_
6 #define RTE_PMD_MLX5_FLOW_H_
7 
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 
13 #include <rte_alarm.h>
14 #include <rte_mtr.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_prm.h>
18 
19 #include "mlx5.h"
20 #include "rte_pmd_mlx5.h"
21 #include "hws/mlx5dr.h"
22 #include "mlx5_tx.h"
23 
24 /* E-Switch Manager port, used for rte_flow_item_port_id. */
25 #define MLX5_PORT_ESW_MGR UINT32_MAX
26 
27 /* E-Switch Manager port, used for rte_flow_item_ethdev. */
28 #define MLX5_REPRESENTED_PORT_ESW_MGR UINT16_MAX
29 
30 /* Private rte flow items. */
31 enum mlx5_rte_flow_item_type {
32 	MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
33 	MLX5_RTE_FLOW_ITEM_TYPE_TAG,
34 	MLX5_RTE_FLOW_ITEM_TYPE_SQ,
35 	MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
36 	MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
37 };
38 
39 /* Private (internal) rte flow actions. */
40 enum mlx5_rte_flow_action_type {
41 	MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
42 	MLX5_RTE_FLOW_ACTION_TYPE_TAG,
43 	MLX5_RTE_FLOW_ACTION_TYPE_MARK,
44 	MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
45 	MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
46 	MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
47 	MLX5_RTE_FLOW_ACTION_TYPE_AGE,
48 	MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
49 	MLX5_RTE_FLOW_ACTION_TYPE_JUMP,
50 	MLX5_RTE_FLOW_ACTION_TYPE_RSS,
51 	MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
52 };
53 
54 /* Private (internal) Field IDs for MODIFY_FIELD action. */
55 enum mlx5_rte_flow_field_id {
56 	MLX5_RTE_FLOW_FIELD_END = INT_MIN,
57 	MLX5_RTE_FLOW_FIELD_META_REG,
58 };
59 
60 #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29
61 
62 #define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \
63 	(((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET)
64 
65 #define MLX5_INDIRECT_ACTION_IDX_GET(handle) \
66 	(((uint32_t)(uintptr_t)(handle)) & \
67 	 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
68 
69 enum mlx5_indirect_type {
70 	MLX5_INDIRECT_ACTION_TYPE_RSS,
71 	MLX5_INDIRECT_ACTION_TYPE_AGE,
72 	MLX5_INDIRECT_ACTION_TYPE_COUNT,
73 	MLX5_INDIRECT_ACTION_TYPE_CT,
74 	MLX5_INDIRECT_ACTION_TYPE_METER_MARK,
75 	MLX5_INDIRECT_ACTION_TYPE_QUOTA,
76 };
77 
78 /* Now, the maximal ports will be supported is 16, action number is 32M. */
79 #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10
80 
81 #define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25
82 #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
83 
84 /*
85  * When SW steering flow engine is used, the CT action handles are encoded in a following way:
86  * - bits 31:29 - type
87  * - bits 28:25 - port index of the action owner
88  * - bits 24:0 - action index
89  */
90 #define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \
91 	((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \
92 	 (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \
93 	  MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index))
94 
95 #define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \
96 	(((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \
97 	 MLX5_INDIRECT_ACT_CT_OWNER_MASK)
98 
99 #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
100 	((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
101 
102 /*
103  * When HW steering flow engine is used, the CT action handles are encoded in a following way:
104  * - bits 31:29 - type
105  * - bits 28:0 - action index
106  */
107 #define MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(index) \
108 	((struct rte_flow_action_handle *)(uintptr_t) \
109 	 ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (index)))
110 
111 enum mlx5_indirect_list_type {
112 	MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
113 	MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
114 	MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
115 	MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT = 3,
116 };
117 
118 /**
119  * Base type for indirect list type.
120  */
121 struct mlx5_indirect_list {
122 	/* Indirect list type. */
123 	enum mlx5_indirect_list_type type;
124 	/* Optional storage list entry */
125 	LIST_ENTRY(mlx5_indirect_list) entry;
126 };
127 
128 static __rte_always_inline void
129 mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
130 {
131 	LIST_HEAD(, mlx5_indirect_list) *h = head;
132 
133 	LIST_INSERT_HEAD(h, elem, entry);
134 }
135 
136 static __rte_always_inline void
137 mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
138 {
139 	if (elem->entry.le_prev)
140 		LIST_REMOVE(elem, entry);
141 }
142 
143 static __rte_always_inline enum mlx5_indirect_list_type
144 mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
145 {
146 	return ((const struct mlx5_indirect_list *)obj)->type;
147 }
148 
149 /* Matches on selected register. */
150 struct mlx5_rte_flow_item_tag {
151 	enum modify_reg id;
152 	uint32_t data;
153 };
154 
155 /* Modify selected register. */
156 struct mlx5_rte_flow_action_set_tag {
157 	enum modify_reg id;
158 	uint8_t offset;
159 	uint8_t length;
160 	uint32_t data;
161 };
162 
163 struct mlx5_flow_action_copy_mreg {
164 	enum modify_reg dst;
165 	enum modify_reg src;
166 };
167 
168 /* Matches on source queue. */
169 struct mlx5_rte_flow_item_sq {
170 	uint32_t queue; /* DevX SQ number */
171 #ifdef RTE_ARCH_64
172 	uint32_t reserved;
173 #endif
174 };
175 
176 /* Map from registers to modify fields. */
177 extern enum mlx5_modification_field reg_to_field[];
178 extern const size_t mlx5_mod_reg_size;
179 
180 static __rte_always_inline enum mlx5_modification_field
181 mlx5_convert_reg_to_field(enum modify_reg reg)
182 {
183 	MLX5_ASSERT((size_t)reg < mlx5_mod_reg_size);
184 	return reg_to_field[reg];
185 }
186 
187 /* Feature name to allocate metadata register. */
188 enum mlx5_feature_name {
189 	MLX5_HAIRPIN_RX,
190 	MLX5_HAIRPIN_TX,
191 	MLX5_METADATA_RX,
192 	MLX5_METADATA_TX,
193 	MLX5_METADATA_FDB,
194 	MLX5_FLOW_MARK,
195 	MLX5_APP_TAG,
196 	MLX5_COPY_MARK,
197 	MLX5_MTR_COLOR,
198 	MLX5_MTR_ID,
199 	MLX5_ASO_FLOW_HIT,
200 	MLX5_ASO_CONNTRACK,
201 	MLX5_SAMPLE_ID,
202 };
203 
204 /* Default queue number. */
205 #define MLX5_RSSQ_DEFAULT_NUM 16
206 
207 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
208 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
209 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
210 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
211 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
212 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
213 
214 /* Pattern inner Layer bits. */
215 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
216 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
217 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
218 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
219 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
220 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
221 
222 /* Pattern tunnel Layer bits. */
223 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
224 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
225 #define MLX5_FLOW_LAYER_GRE (1u << 14)
226 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
227 /* List of tunnel Layer bits continued below. */
228 
229 /* General pattern items bits. */
230 #define MLX5_FLOW_ITEM_METADATA (1u << 16)
231 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
232 #define MLX5_FLOW_ITEM_TAG (1u << 18)
233 #define MLX5_FLOW_ITEM_MARK (1u << 19)
234 
235 /* Pattern MISC bits. */
236 #define MLX5_FLOW_LAYER_ICMP (1u << 20)
237 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21)
238 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22)
239 
240 /* Pattern tunnel Layer bits (continued). */
241 #define MLX5_FLOW_LAYER_IPIP (1u << 23)
242 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24)
243 #define MLX5_FLOW_LAYER_NVGRE (1u << 25)
244 #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
245 
246 /* Queue items. */
247 #define MLX5_FLOW_ITEM_SQ (1u << 27)
248 
249 /* Pattern tunnel Layer bits (continued). */
250 #define MLX5_FLOW_LAYER_GTP (1u << 28)
251 
252 /* Pattern eCPRI Layer bit. */
253 #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29)
254 
255 /* IPv6 Fragment Extension Header bit. */
256 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30)
257 #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31)
258 
259 /* Pattern tunnel Layer bits (continued). */
260 #define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
261 #define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
262 
263 /* INTEGRITY item bits */
264 #define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
265 #define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
266 #define MLX5_FLOW_ITEM_INTEGRITY \
267 	(MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
268 
269 /* Conntrack item. */
270 #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
271 
272 /* Flex item */
273 #define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
274 #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
275 #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
276 
277 #define MLX5_FLOW_ITEM_FLEX \
278 	(MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX | \
279 	MLX5_FLOW_ITEM_FLEX_TUNNEL)
280 
281 /* ESP item */
282 #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
283 
284 /* Port Representor/Represented Port item */
285 #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41)
286 #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42)
287 
288 /* Meter color item */
289 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
290 #define MLX5_FLOW_ITEM_QUOTA (UINT64_C(1) << 45)
291 
292 
293 /* IPv6 routing extension item */
294 #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
295 #define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46)
296 
297 /* Aggregated affinity item */
298 #define MLX5_FLOW_ITEM_AGGR_AFFINITY (UINT64_C(1) << 49)
299 
300 /* IB BTH ITEM. */
301 #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
302 
303 /* PTYPE ITEM */
304 #define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
305 
306 /* NSH ITEM */
307 #define MLX5_FLOW_ITEM_NSH (1ull << 53)
308 
309 /* COMPARE ITEM */
310 #define MLX5_FLOW_ITEM_COMPARE (1ull << 54)
311 
312 /* Random ITEM */
313 #define MLX5_FLOW_ITEM_RANDOM (1ull << 55)
314 
315 /* Outer Masks. */
316 #define MLX5_FLOW_LAYER_OUTER_L3 \
317 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
318 #define MLX5_FLOW_LAYER_OUTER_L4 \
319 	(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
320 #define MLX5_FLOW_LAYER_OUTER \
321 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
322 	 MLX5_FLOW_LAYER_OUTER_L4)
323 
324 /* Tunnel Masks. */
325 #define MLX5_FLOW_LAYER_TUNNEL \
326 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
327 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
328 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
329 	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
330 	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
331 
332 /* Inner Masks. */
333 #define MLX5_FLOW_LAYER_INNER_L3 \
334 	(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
335 #define MLX5_FLOW_LAYER_INNER_L4 \
336 	(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
337 #define MLX5_FLOW_LAYER_INNER \
338 	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
339 	 MLX5_FLOW_LAYER_INNER_L4)
340 
341 /* Layer Masks. */
342 #define MLX5_FLOW_LAYER_L2 \
343 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
344 #define MLX5_FLOW_LAYER_L3_IPV4 \
345 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4)
346 #define MLX5_FLOW_LAYER_L3_IPV6 \
347 	(MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
348 #define MLX5_FLOW_LAYER_L3 \
349 	(MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
350 #define MLX5_FLOW_LAYER_L4 \
351 	(MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4)
352 
353 /* Actions */
354 #define MLX5_FLOW_ACTION_DROP (1ull << 0)
355 #define MLX5_FLOW_ACTION_QUEUE (1ull << 1)
356 #define MLX5_FLOW_ACTION_RSS (1ull << 2)
357 #define MLX5_FLOW_ACTION_FLAG (1ull << 3)
358 #define MLX5_FLOW_ACTION_MARK (1ull << 4)
359 #define MLX5_FLOW_ACTION_COUNT (1ull << 5)
360 #define MLX5_FLOW_ACTION_PORT_ID (1ull << 6)
361 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7)
362 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8)
363 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9)
364 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10)
365 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11)
366 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12)
367 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13)
368 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14)
369 #define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15)
370 #define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16)
371 #define MLX5_FLOW_ACTION_JUMP (1ull << 17)
372 #define MLX5_FLOW_ACTION_SET_TTL (1ull << 18)
373 #define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19)
374 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20)
375 #define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21)
376 #define MLX5_FLOW_ACTION_ENCAP (1ull << 22)
377 #define MLX5_FLOW_ACTION_DECAP (1ull << 23)
378 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24)
379 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25)
380 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26)
381 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27)
382 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28)
383 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29)
384 #define MLX5_FLOW_ACTION_SET_META (1ull << 30)
385 #define MLX5_FLOW_ACTION_METER (1ull << 31)
386 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
387 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
388 #define MLX5_FLOW_ACTION_AGE (1ull << 34)
389 #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
390 #define MLX5_FLOW_ACTION_SAMPLE (1ull << 36)
391 #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37)
392 #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
393 #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
394 #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
395 #define MLX5_FLOW_ACTION_CT (1ull << 41)
396 #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
397 #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
398 #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
399 #define MLX5_FLOW_ACTION_QUOTA (1ull << 46)
400 #define MLX5_FLOW_ACTION_PORT_REPRESENTOR (1ull << 47)
401 #define MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE (1ull << 48)
402 #define MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH (1ull << 49)
403 #define MLX5_FLOW_ACTION_NAT64 (1ull << 50)
404 #define MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX (1ull << 51)
405 
406 #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \
407 	(MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE)
408 
409 #define MLX5_FLOW_FATE_ACTIONS \
410 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
411 	 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
412 	 MLX5_FLOW_ACTION_DEFAULT_MISS | \
413 	 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
414 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
415 	 MLX5_FLOW_ACTION_PORT_REPRESENTOR | \
416 	 MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX)
417 
418 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
419 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
420 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
421 	 MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
422 	 MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX)
423 
424 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
425 				      MLX5_FLOW_ACTION_SET_IPV4_DST | \
426 				      MLX5_FLOW_ACTION_SET_IPV6_SRC | \
427 				      MLX5_FLOW_ACTION_SET_IPV6_DST | \
428 				      MLX5_FLOW_ACTION_SET_TP_SRC | \
429 				      MLX5_FLOW_ACTION_SET_TP_DST | \
430 				      MLX5_FLOW_ACTION_SET_TTL | \
431 				      MLX5_FLOW_ACTION_DEC_TTL | \
432 				      MLX5_FLOW_ACTION_SET_MAC_SRC | \
433 				      MLX5_FLOW_ACTION_SET_MAC_DST | \
434 				      MLX5_FLOW_ACTION_INC_TCP_SEQ | \
435 				      MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
436 				      MLX5_FLOW_ACTION_INC_TCP_ACK | \
437 				      MLX5_FLOW_ACTION_DEC_TCP_ACK | \
438 				      MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
439 				      MLX5_FLOW_ACTION_SET_TAG | \
440 				      MLX5_FLOW_ACTION_MARK_EXT | \
441 				      MLX5_FLOW_ACTION_SET_META | \
442 				      MLX5_FLOW_ACTION_SET_IPV4_DSCP | \
443 				      MLX5_FLOW_ACTION_SET_IPV6_DSCP | \
444 				      MLX5_FLOW_ACTION_MODIFY_FIELD)
445 
446 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
447 				MLX5_FLOW_ACTION_OF_PUSH_VLAN)
448 
449 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP)
450 
451 #ifndef IPPROTO_MPLS
452 #define IPPROTO_MPLS 137
453 #endif
454 
455 #define MLX5_IPV6_HDR_ECN_MASK 0x3
456 #define MLX5_IPV6_HDR_DSCP_SHIFT 2
457 
458 /* UDP port number for MPLS */
459 #define MLX5_UDP_PORT_MPLS 6635
460 
461 /* UDP port numbers for VxLAN. */
462 #define MLX5_UDP_PORT_VXLAN 4789
463 #define MLX5_UDP_PORT_VXLAN_GPE 4790
464 
465 /* UDP port numbers for RoCEv2. */
466 #define MLX5_UDP_PORT_ROCEv2 4791
467 
468 /* UDP port numbers for GENEVE. */
469 #define MLX5_UDP_PORT_GENEVE 6081
470 
471 /* Lowest priority indicator. */
472 #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1)
473 
474 /*
475  * Max priority for ingress\egress flow groups
476  * greater than 0 and for any transfer flow group.
477  * From user configation: 0 - 21843.
478  */
479 #define MLX5_NON_ROOT_FLOW_MAX_PRIO	(21843 + 1)
480 
481 /*
482  * Number of sub priorities.
483  * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
484  * matching on the NIC (firmware dependent) L4 most have the higher priority
485  * followed by L3 and ending with L2.
486  */
487 #define MLX5_PRIORITY_MAP_L2 2
488 #define MLX5_PRIORITY_MAP_L3 1
489 #define MLX5_PRIORITY_MAP_L4 0
490 #define MLX5_PRIORITY_MAP_MAX 3
491 
492 /* Valid layer type for IPV4 RSS. */
493 #define MLX5_IPV4_LAYER_TYPES \
494 	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
495 	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
496 	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
497 
498 /* Valid L4 RSS types */
499 #define MLX5_L4_RSS_TYPES (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
500 
501 /* IBV hash source bits  for IPV4. */
502 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
503 
504 /* Valid layer type for IPV6 RSS. */
505 #define MLX5_IPV6_LAYER_TYPES \
506 	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
507 	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
508 	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
509 
510 /* IBV hash source bits  for IPV6. */
511 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
512 
513 /* IBV hash bits for L3 SRC. */
514 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6)
515 
516 /* IBV hash bits for L3 DST. */
517 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6)
518 
519 /* IBV hash bits for TCP. */
520 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
521 			      IBV_RX_HASH_DST_PORT_TCP)
522 
523 /* IBV hash bits for UDP. */
524 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \
525 			      IBV_RX_HASH_DST_PORT_UDP)
526 
527 /* IBV hash bits for L4 SRC. */
528 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
529 				 IBV_RX_HASH_SRC_PORT_UDP)
530 
531 /* IBV hash bits for L4 DST. */
532 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \
533 				 IBV_RX_HASH_DST_PORT_UDP)
534 
535 /* Geneve header first 16Bit */
536 #define MLX5_GENEVE_VER_MASK 0x3
537 #define MLX5_GENEVE_VER_SHIFT 14
538 #define MLX5_GENEVE_VER_VAL(a) \
539 		(((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
540 #define MLX5_GENEVE_OPTLEN_MASK 0x3F
541 #define MLX5_GENEVE_OPTLEN_SHIFT 8
542 #define MLX5_GENEVE_OPTLEN_VAL(a) \
543 	    (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
544 #define MLX5_GENEVE_OAMF_MASK 0x1
545 #define MLX5_GENEVE_OAMF_SHIFT 7
546 #define MLX5_GENEVE_OAMF_VAL(a) \
547 		(((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
548 #define MLX5_GENEVE_CRITO_MASK 0x1
549 #define MLX5_GENEVE_CRITO_SHIFT 6
550 #define MLX5_GENEVE_CRITO_VAL(a) \
551 		(((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
552 #define MLX5_GENEVE_RSVD_MASK 0x3F
553 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
554 /*
555  * The length of the Geneve options fields, expressed in four byte multiples,
556  * not including the eight byte fixed tunnel.
557  */
558 #define MLX5_GENEVE_OPT_LEN_0 14
559 #define MLX5_GENEVE_OPT_LEN_1 63
560 
561 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \
562 					  sizeof(struct rte_ipv4_hdr))
563 /* GTP extension header flag. */
564 #define MLX5_GTP_EXT_HEADER_FLAG 4
565 
566 /* GTP extension header PDU type shift. */
567 #define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4)
568 
569 /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
570 #define MLX5_IPV4_FRAG_OFFSET_MASK \
571 		(RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)
572 
573 /* Specific item's fields can accept a range of values (using spec and last). */
574 #define MLX5_ITEM_RANGE_NOT_ACCEPTED	false
575 #define MLX5_ITEM_RANGE_ACCEPTED	true
576 
577 /* Software header modify action numbers of a flow. */
578 #define MLX5_ACT_NUM_MDF_IPV4		1
579 #define MLX5_ACT_NUM_MDF_IPV6		4
580 #define MLX5_ACT_NUM_MDF_MAC		2
581 #define MLX5_ACT_NUM_MDF_VID		1
582 #define MLX5_ACT_NUM_MDF_PORT		1
583 #define MLX5_ACT_NUM_MDF_TTL		1
584 #define MLX5_ACT_NUM_DEC_TTL		MLX5_ACT_NUM_MDF_TTL
585 #define MLX5_ACT_NUM_MDF_TCPSEQ		1
586 #define MLX5_ACT_NUM_MDF_TCPACK		1
587 #define MLX5_ACT_NUM_SET_REG		1
588 #define MLX5_ACT_NUM_SET_TAG		1
589 #define MLX5_ACT_NUM_CPY_MREG		MLX5_ACT_NUM_SET_TAG
590 #define MLX5_ACT_NUM_SET_MARK		MLX5_ACT_NUM_SET_TAG
591 #define MLX5_ACT_NUM_SET_META		MLX5_ACT_NUM_SET_TAG
592 #define MLX5_ACT_NUM_SET_DSCP		1
593 
594 /* Maximum number of fields to modify in MODIFY_FIELD */
595 #define MLX5_ACT_MAX_MOD_FIELDS 5
596 
597 /* Syndrome bits definition for connection tracking. */
598 #define MLX5_CT_SYNDROME_VALID		(0x0 << 6)
599 #define MLX5_CT_SYNDROME_INVALID	(0x1 << 6)
600 #define MLX5_CT_SYNDROME_TRAP		(0x2 << 6)
601 #define MLX5_CT_SYNDROME_STATE_CHANGE	(0x1 << 1)
602 #define MLX5_CT_SYNDROME_BAD_PACKET	(0x1 << 0)
603 
604 enum mlx5_flow_drv_type {
605 	MLX5_FLOW_TYPE_MIN,
606 	MLX5_FLOW_TYPE_DV,
607 	MLX5_FLOW_TYPE_VERBS,
608 	MLX5_FLOW_TYPE_HW,
609 	MLX5_FLOW_TYPE_MAX,
610 };
611 
612 /* Fate action type. */
613 enum mlx5_flow_fate_type {
614 	MLX5_FLOW_FATE_NONE, /* Egress flow. */
615 	MLX5_FLOW_FATE_QUEUE,
616 	MLX5_FLOW_FATE_JUMP,
617 	MLX5_FLOW_FATE_PORT_ID,
618 	MLX5_FLOW_FATE_DROP,
619 	MLX5_FLOW_FATE_DEFAULT_MISS,
620 	MLX5_FLOW_FATE_SHARED_RSS,
621 	MLX5_FLOW_FATE_MTR,
622 	MLX5_FLOW_FATE_SEND_TO_KERNEL,
623 	MLX5_FLOW_FATE_MAX,
624 };
625 
626 /* Matcher PRM representation */
627 struct mlx5_flow_dv_match_params {
628 	size_t size;
629 	/**< Size of match value. Do NOT split size and key! */
630 	uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
631 	/**< Matcher value. This value is used as the mask or as a key. */
632 };
633 
634 /* Matcher structure. */
635 struct mlx5_flow_dv_matcher {
636 	struct mlx5_list_entry entry; /**< Pointer to the next element. */
637 	union {
638 		struct mlx5_flow_tbl_resource *tbl;
639 		/**< Pointer to the table(group) the matcher associated with for DV flow. */
640 		struct mlx5_flow_group *group;
641 		/* Group of this matcher for HWS non template flow. */
642 	};
643 	void *matcher_object; /**< Pointer to DV matcher */
644 	uint16_t crc; /**< CRC of key. */
645 	uint16_t priority; /**< Priority of matcher. */
646 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
647 };
648 
649 #define MLX5_PUSH_MAX_LEN 128
650 #define MLX5_ENCAP_MAX_LEN 132
651 
652 /* Encap/decap resource structure. */
653 struct mlx5_flow_dv_encap_decap_resource {
654 	struct mlx5_list_entry entry;
655 	/* Pointer to next element. */
656 	uint32_t refcnt; /**< Reference counter. */
657 	void *action;
658 	/**< Encap/decap action object. */
659 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
660 	size_t size;
661 	uint8_t reformat_type;
662 	uint8_t ft_type;
663 	uint64_t flags; /**< Flags for RDMA API. */
664 	uint32_t idx; /**< Index for the index memory pool. */
665 };
666 
667 /* Tag resource structure. */
668 struct mlx5_flow_dv_tag_resource {
669 	struct mlx5_list_entry entry;
670 	/**< hash list entry for tag resource, tag value as the key. */
671 	void *action;
672 	/**< Tag action object. */
673 	uint32_t refcnt; /**< Reference counter. */
674 	uint32_t idx; /**< Index for the index memory pool. */
675 	uint32_t tag_id; /**< Tag ID. */
676 };
677 
678 /* Modify resource structure */
679 struct mlx5_flow_dv_modify_hdr_resource {
680 	struct mlx5_list_entry entry;
681 	void *action; /**< Modify header action object. */
682 	uint32_t idx;
683 	uint64_t flags; /**< Flags for RDMA API(HWS only). */
684 	/* Key area for hash list matching: */
685 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
686 	uint8_t actions_num; /**< Number of modification actions. */
687 	bool root; /**< Whether action is in root table. */
688 	struct mlx5_modification_cmd actions[];
689 	/**< Modification actions. */
690 } __rte_packed;
691 
692 /* Modify resource key of the hash organization. */
693 union mlx5_flow_modify_hdr_key {
694 	struct {
695 		uint32_t ft_type:8;	/**< Flow table type, Rx or Tx. */
696 		uint32_t actions_num:5;	/**< Number of modification actions. */
697 		uint32_t group:19;	/**< Flow group id. */
698 		uint32_t cksum;		/**< Actions check sum. */
699 	};
700 	uint64_t v64;			/**< full 64bits value of key */
701 };
702 
703 /* Jump action resource structure. */
704 struct mlx5_flow_dv_jump_tbl_resource {
705 	void *action; /**< Pointer to the rdma core action. */
706 };
707 
708 /* Port ID resource structure. */
709 struct mlx5_flow_dv_port_id_action_resource {
710 	struct mlx5_list_entry entry;
711 	void *action; /**< Action object. */
712 	uint32_t port_id; /**< Port ID value. */
713 	uint32_t idx; /**< Indexed pool memory index. */
714 };
715 
716 /* Push VLAN action resource structure */
717 struct mlx5_flow_dv_push_vlan_action_resource {
718 	struct mlx5_list_entry entry; /* Cache entry. */
719 	void *action; /**< Action object. */
720 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
721 	rte_be32_t vlan_tag; /**< VLAN tag value. */
722 	uint32_t idx; /**< Indexed pool memory index. */
723 };
724 
725 /* Metadata register copy table entry. */
726 struct mlx5_flow_mreg_copy_resource {
727 	/*
728 	 * Hash list entry for copy table.
729 	 *  - Key is 32/64-bit MARK action ID.
730 	 *  - MUST be the first entry.
731 	 */
732 	struct mlx5_list_entry hlist_ent;
733 	LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
734 	/* List entry for device flows. */
735 	uint32_t idx;
736 	uint32_t mark_id;
737 	union {
738 		uint32_t rix_flow; /* Built flow for copy. */
739 		uintptr_t hw_flow;
740 	};
741 };
742 
743 /* Table tunnel parameter. */
744 struct mlx5_flow_tbl_tunnel_prm {
745 	const struct mlx5_flow_tunnel *tunnel;
746 	uint32_t group_id;
747 	bool external;
748 };
749 
750 /* Table data structure of the hash organization. */
751 struct mlx5_flow_tbl_data_entry {
752 	struct mlx5_list_entry entry;
753 	/**< hash list entry, 64-bits key inside. */
754 	struct mlx5_flow_tbl_resource tbl;
755 	/**< flow table resource. */
756 	struct mlx5_list *matchers;
757 	/**< matchers' header associated with the flow table. */
758 	struct mlx5_flow_dv_jump_tbl_resource jump;
759 	/**< jump resource, at most one for each table created. */
760 	uint32_t idx; /**< index for the indexed mempool. */
761 	/**< tunnel offload */
762 	const struct mlx5_flow_tunnel *tunnel;
763 	uint32_t group_id;
764 	uint32_t external:1;
765 	uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
766 	uint32_t is_egress:1; /**< Egress table. */
767 	uint32_t is_transfer:1; /**< Transfer table. */
768 	uint32_t dummy:1; /**<  DR table. */
769 	uint32_t id:22; /**< Table ID. */
770 	uint32_t reserve:5; /**< Reserved to future using. */
771 	uint32_t level; /**< Table level. */
772 };
773 
774 /* Sub rdma-core actions list. */
775 struct mlx5_flow_sub_actions_list {
776 	uint32_t actions_num; /**< Number of sample actions. */
777 	uint64_t action_flags;
778 	void *dr_queue_action;
779 	void *dr_tag_action;
780 	void *dr_cnt_action;
781 	void *dr_port_id_action;
782 	void *dr_encap_action;
783 	void *dr_jump_action;
784 };
785 
786 /* Sample sub-actions resource list. */
787 struct mlx5_flow_sub_actions_idx {
788 	uint32_t rix_hrxq; /**< Hash Rx queue object index. */
789 	uint32_t rix_tag; /**< Index to the tag action. */
790 	uint32_t rix_port_id_action; /**< Index to port ID action resource. */
791 	uint32_t rix_encap_decap; /**< Index to encap/decap resource. */
792 	uint32_t rix_jump; /**< Index to the jump action resource. */
793 };
794 
795 /* Sample action resource structure. */
796 struct mlx5_flow_dv_sample_resource {
797 	struct mlx5_list_entry entry; /**< Cache entry. */
798 	union {
799 		void *verbs_action; /**< Verbs sample action object. */
800 		void **sub_actions; /**< Sample sub-action array. */
801 	};
802 	struct rte_eth_dev *dev; /**< Device registers the action. */
803 	uint32_t idx; /** Sample object index. */
804 	uint8_t ft_type; /** Flow Table Type */
805 	uint32_t ft_id; /** Flow Table Level */
806 	uint32_t ratio;   /** Sample Ratio */
807 	uint64_t set_action; /** Restore reg_c0 value */
808 	void *normal_path_tbl; /** Flow Table pointer */
809 	struct mlx5_flow_sub_actions_idx sample_idx;
810 	/**< Action index resources. */
811 	struct mlx5_flow_sub_actions_list sample_act;
812 	/**< Action resources. */
813 };
814 
815 #define MLX5_MAX_DEST_NUM	2
816 
817 /* Destination array action resource structure. */
818 struct mlx5_flow_dv_dest_array_resource {
819 	struct mlx5_list_entry entry; /**< Cache entry. */
820 	uint32_t idx; /** Destination array action object index. */
821 	uint8_t ft_type; /** Flow Table Type */
822 	uint8_t num_of_dest; /**< Number of destination actions. */
823 	struct rte_eth_dev *dev; /**< Device registers the action. */
824 	void *action; /**< Pointer to the rdma core action. */
825 	struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
826 	/**< Action index resources. */
827 	struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM];
828 	/**< Action resources. */
829 };
830 
831 /* PMD flow priority for tunnel */
832 #define MLX5_TUNNEL_PRIO_GET(rss_desc) \
833 	((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
834 
835 
836 /** Device flow handle structure for DV mode only. */
837 struct mlx5_flow_handle_dv {
838 	/* Flow DV api: */
839 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
840 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
841 	/**< Pointer to modify header resource in cache. */
842 	uint32_t rix_encap_decap;
843 	/**< Index to encap/decap resource in cache. */
844 	uint32_t rix_push_vlan;
845 	/**< Index to push VLAN action resource in cache. */
846 	uint32_t rix_tag;
847 	/**< Index to the tag action. */
848 	uint32_t rix_sample;
849 	/**< Index to sample action resource in cache. */
850 	uint32_t rix_dest_array;
851 	/**< Index to destination array resource in cache. */
852 } __rte_packed;
853 
854 /** Device flow handle structure: used both for creating & destroying. */
855 struct mlx5_flow_handle {
856 	SILIST_ENTRY(uint32_t)next;
857 	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
858 	/**< Index to next device flow handle. */
859 	uint64_t layers;
860 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
861 	void *drv_flow; /**< pointer to driver flow object. */
862 	uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
863 	uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
864 	uint32_t fate_action:4; /**< Fate action type. */
865 	union {
866 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
867 		uint32_t rix_jump; /**< Index to the jump action resource. */
868 		uint32_t rix_port_id_action;
869 		/**< Index to port ID action resource. */
870 		uint32_t rix_fate;
871 		/**< Generic value indicates the fate action. */
872 		uint32_t rix_default_fate;
873 		/**< Indicates default miss fate action. */
874 		uint32_t rix_srss;
875 		/**< Indicates shared RSS fate action. */
876 	};
877 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
878 	struct mlx5_flow_handle_dv dvh;
879 #endif
880 	uint8_t flex_item; /**< referenced Flex Item bitmask. */
881 } __rte_packed;
882 
883 /*
884  * Size for Verbs device flow handle structure only. Do not use the DV only
885  * structure in Verbs. No DV flows attributes will be accessed.
886  * Macro offsetof() could also be used here.
887  */
888 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
889 #define MLX5_FLOW_HANDLE_VERBS_SIZE \
890 	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
891 #else
892 #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
893 #endif
894 
895 /** Device flow structure only for DV flow creation. */
896 struct mlx5_flow_dv_workspace {
897 	uint32_t group; /**< The group index. */
898 	uint32_t table_id; /**< Flow table identifier. */
899 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
900 	int actions_n; /**< number of actions. */
901 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
902 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
903 	/**< Pointer to encap/decap resource in cache. */
904 	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
905 	/**< Pointer to push VLAN action resource in cache. */
906 	struct mlx5_flow_dv_tag_resource *tag_resource;
907 	/**< pointer to the tag action. */
908 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
909 	/**< Pointer to port ID action resource. */
910 	struct mlx5_flow_dv_jump_tbl_resource *jump;
911 	/**< Pointer to the jump action resource. */
912 	struct mlx5_flow_dv_match_params value;
913 	/**< Holds the value that the packet is compared to. */
914 	struct mlx5_flow_dv_sample_resource *sample_res;
915 	/**< Pointer to the sample action resource. */
916 	struct mlx5_flow_dv_dest_array_resource *dest_array_res;
917 	/**< Pointer to the destination array resource. */
918 };
919 
920 #ifdef HAVE_INFINIBAND_VERBS_H
921 /*
922  * Maximal Verbs flow specifications & actions size.
923  * Some elements are mutually exclusive, but enough space should be allocated.
924  * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
925  *               2. One tunnel header (exception: GRE + MPLS),
926  *                  SPEC length: GRE == tunnel.
927  * Actions: 1. 1 Mark OR Flag.
928  *          2. 1 Drop (if any).
929  *          3. No limitation for counters, but it makes no sense to support too
930  *             many counters in a single device flow.
931  */
932 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
933 #define MLX5_VERBS_MAX_SPEC_SIZE \
934 		( \
935 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
936 			      sizeof(struct ibv_flow_spec_ipv6) + \
937 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
938 			sizeof(struct ibv_flow_spec_gre) + \
939 			sizeof(struct ibv_flow_spec_mpls)) \
940 		)
941 #else
942 #define MLX5_VERBS_MAX_SPEC_SIZE \
943 		( \
944 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
945 			      sizeof(struct ibv_flow_spec_ipv6) + \
946 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
947 			sizeof(struct ibv_flow_spec_tunnel)) \
948 		)
949 #endif
950 
951 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
952 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
953 #define MLX5_VERBS_MAX_ACT_SIZE \
954 		( \
955 			sizeof(struct ibv_flow_spec_action_tag) + \
956 			sizeof(struct ibv_flow_spec_action_drop) + \
957 			sizeof(struct ibv_flow_spec_counter_action) * 4 \
958 		)
959 #else
960 #define MLX5_VERBS_MAX_ACT_SIZE \
961 		( \
962 			sizeof(struct ibv_flow_spec_action_tag) + \
963 			sizeof(struct ibv_flow_spec_action_drop) \
964 		)
965 #endif
966 
967 #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
968 		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
969 
970 /** Device flow structure only for Verbs flow creation. */
971 struct mlx5_flow_verbs_workspace {
972 	unsigned int size; /**< Size of the attribute. */
973 	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
974 	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
975 	/**< Specifications & actions buffer of verbs flow. */
976 };
977 #endif /* HAVE_INFINIBAND_VERBS_H */
978 
979 #define MLX5_SCALE_FLOW_GROUP_BIT 0
980 #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1
981 
982 /** Maximal number of device sub-flows supported. */
983 #define MLX5_NUM_MAX_DEV_FLOWS 64
984 
985 /**
986  * tunnel offload rules type
987  */
988 enum mlx5_tof_rule_type {
989 	MLX5_TUNNEL_OFFLOAD_NONE = 0,
990 	MLX5_TUNNEL_OFFLOAD_SET_RULE,
991 	MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
992 	MLX5_TUNNEL_OFFLOAD_MISS_RULE,
993 };
994 
995 /** Device flow structure. */
996 __extension__
997 struct mlx5_flow {
998 	struct rte_flow *flow; /**< Pointer to the main flow. */
999 	uint32_t flow_idx; /**< The memory pool index to the main flow. */
1000 	uint64_t hash_fields; /**< Hash Rx queue hash fields. */
1001 	uint64_t act_flags;
1002 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
1003 	bool external; /**< true if the flow is created external to PMD. */
1004 	uint8_t ingress:1; /**< 1 if the flow is ingress. */
1005 	uint8_t skip_scale:2;
1006 	uint8_t symmetric_hash_function:1;
1007 	/**
1008 	 * Each Bit be set to 1 if Skip the scale the flow group with factor.
1009 	 * If bit0 be set to 1, then skip the scale the original flow group;
1010 	 * If bit1 be set to 1, then skip the scale the jump flow group if
1011 	 * having jump action.
1012 	 * 00: Enable scale in a flow, default value.
1013 	 * 01: Skip scale the flow group with factor, enable scale the group
1014 	 * of jump action.
1015 	 * 10: Enable scale the group with factor, skip scale the group of
1016 	 * jump action.
1017 	 * 11: Skip scale the table with factor both for flow group and jump
1018 	 * group.
1019 	 */
1020 	union {
1021 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1022 		struct mlx5_flow_dv_workspace dv;
1023 #endif
1024 #ifdef HAVE_INFINIBAND_VERBS_H
1025 		struct mlx5_flow_verbs_workspace verbs;
1026 #endif
1027 	};
1028 	struct mlx5_flow_handle *handle;
1029 	uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
1030 	const struct mlx5_flow_tunnel *tunnel;
1031 	enum mlx5_tof_rule_type tof_type;
1032 };
1033 
1034 /* Flow meter state. */
1035 #define MLX5_FLOW_METER_DISABLE 0
1036 #define MLX5_FLOW_METER_ENABLE 1
1037 
1038 #define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
1039 #define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
1040 
1041 #define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES
1042 
1043 #define MLX5_MAN_WIDTH 8
1044 /* Legacy Meter parameter structure. */
1045 struct mlx5_legacy_flow_meter {
1046 	struct mlx5_flow_meter_info fm;
1047 	/* Must be the first in struct. */
1048 	TAILQ_ENTRY(mlx5_legacy_flow_meter) next;
1049 	/**< Pointer to the next flow meter structure. */
1050 	uint32_t idx;
1051 	/* Index to meter object. */
1052 };
1053 
1054 #define MLX5_MAX_TUNNELS 256
1055 #define MLX5_TNL_MISS_RULE_PRIORITY 3
1056 #define MLX5_TNL_MISS_FDB_JUMP_GRP  0x1234faac
1057 
1058 /*
1059  * When tunnel offload is active, all JUMP group ids are converted
1060  * using the same method. That conversion is applied both to tunnel and
1061  * regular rule types.
1062  * Group ids used in tunnel rules are relative to it's tunnel (!).
1063  * Application can create number of steer rules, using the same
1064  * tunnel, with different group id in each rule.
1065  * Each tunnel stores its groups internally in PMD tunnel object.
1066  * Groups used in regular rules do not belong to any tunnel and are stored
1067  * in tunnel hub.
1068  */
1069 
1070 struct mlx5_flow_tunnel {
1071 	LIST_ENTRY(mlx5_flow_tunnel) chain;
1072 	struct rte_flow_tunnel app_tunnel;	/** app tunnel copy */
1073 	uint32_t tunnel_id;			/** unique tunnel ID */
1074 	RTE_ATOMIC(uint32_t) refctn;
1075 	struct rte_flow_action action;
1076 	struct rte_flow_item item;
1077 	struct mlx5_hlist *groups;		/** tunnel groups */
1078 };
1079 
1080 /** PMD tunnel related context */
1081 struct mlx5_flow_tunnel_hub {
1082 	/* Tunnels list
1083 	 * Access to the list MUST be MT protected
1084 	 */
1085 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
1086 	 /* protect access to the tunnels list */
1087 	rte_spinlock_t sl;
1088 	struct mlx5_hlist *groups;		/** non tunnel groups */
1089 };
1090 
1091 /* convert jump group to flow table ID in tunnel rules */
1092 struct tunnel_tbl_entry {
1093 	struct mlx5_list_entry hash;
1094 	uint32_t flow_table;
1095 	uint32_t tunnel_id;
1096 	uint32_t group;
1097 };
1098 
1099 static inline uint32_t
1100 tunnel_id_to_flow_tbl(uint32_t id)
1101 {
1102 	return id | (1u << 16);
1103 }
1104 
1105 static inline uint32_t
1106 tunnel_flow_tbl_to_id(uint32_t flow_tbl)
1107 {
1108 	return flow_tbl & ~(1u << 16);
1109 }
1110 
1111 union tunnel_tbl_key {
1112 	uint64_t val;
1113 	struct {
1114 		uint32_t tunnel_id;
1115 		uint32_t group;
1116 	};
1117 };
1118 
1119 static inline struct mlx5_flow_tunnel_hub *
1120 mlx5_tunnel_hub(struct rte_eth_dev *dev)
1121 {
1122 	struct mlx5_priv *priv = dev->data->dev_private;
1123 	return priv->sh->tunnel_hub;
1124 }
1125 
1126 static inline bool
1127 is_tunnel_offload_active(const struct rte_eth_dev *dev)
1128 {
1129 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1130 	const struct mlx5_priv *priv = dev->data->dev_private;
1131 	return !!priv->sh->config.dv_miss_info;
1132 #else
1133 	RTE_SET_USED(dev);
1134 	return false;
1135 #endif
1136 }
1137 
1138 static inline bool
1139 is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
1140 {
1141 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
1142 }
1143 
1144 static inline bool
1145 is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
1146 {
1147 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
1148 }
1149 
1150 static inline const struct mlx5_flow_tunnel *
1151 flow_actions_to_tunnel(const struct rte_flow_action actions[])
1152 {
1153 	return actions[0].conf;
1154 }
1155 
1156 static inline const struct mlx5_flow_tunnel *
1157 flow_items_to_tunnel(const struct rte_flow_item items[])
1158 {
1159 	return items[0].spec;
1160 }
1161 
1162 /**
1163  * Gets the tag array given for RTE_FLOW_FIELD_TAG type.
1164  *
1165  * In old API the value was provided in "level" field, but in new API
1166  * it is provided in "tag_array" field. Since encapsulation level is not
1167  * relevant for metadata, the tag array can be still provided in "level"
1168  * for backwards compatibility.
1169  *
1170  * @param[in] data
1171  *   Pointer to tag modify data structure.
1172  *
1173  * @return
1174  *   Tag array index.
1175  */
1176 static inline uint8_t
1177 flow_tag_index_get(const struct rte_flow_field_data *data)
1178 {
1179 	return data->tag_index ? data->tag_index : data->level;
1180 }
1181 
1182 /**
1183  * Fetch 1, 2, 3 or 4 byte field from the byte array
1184  * and return as unsigned integer in host-endian format.
1185  *
1186  * @param[in] data
1187  *   Pointer to data array.
1188  * @param[in] size
1189  *   Size of field to extract.
1190  *
1191  * @return
1192  *   converted field in host endian format.
1193  */
1194 static inline uint32_t
1195 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
1196 {
1197 	uint32_t ret;
1198 
1199 	switch (size) {
1200 	case 1:
1201 		ret = *data;
1202 		break;
1203 	case 2:
1204 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1205 		break;
1206 	case 3:
1207 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1208 		ret = (ret << 8) | *(data + sizeof(uint16_t));
1209 		break;
1210 	case 4:
1211 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
1212 		break;
1213 	default:
1214 		MLX5_ASSERT(false);
1215 		ret = 0;
1216 		break;
1217 	}
1218 	return ret;
1219 }
1220 
1221 static inline bool
1222 flow_modify_field_support_tag_array(enum rte_flow_field_id field)
1223 {
1224 	switch ((int)field) {
1225 	case RTE_FLOW_FIELD_TAG:
1226 	case RTE_FLOW_FIELD_MPLS:
1227 	case MLX5_RTE_FLOW_FIELD_META_REG:
1228 		return true;
1229 	default:
1230 		break;
1231 	}
1232 	return false;
1233 }
1234 
1235 struct field_modify_info {
1236 	uint32_t size; /* Size of field in protocol header, in bytes. */
1237 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
1238 	enum mlx5_modification_field id;
1239 	uint32_t shift;
1240 	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
1241 };
1242 
1243 /* HW steering flow attributes. */
1244 struct mlx5_flow_attr {
1245 	uint32_t port_id; /* Port index. */
1246 	uint32_t group; /* Flow group. */
1247 	uint32_t priority; /* Original Priority. */
1248 	/* rss level, used by priority adjustment. */
1249 	uint32_t rss_level;
1250 	/* Action flags, used by priority adjustment. */
1251 	uint32_t act_flags;
1252 	uint32_t tbl_type; /* Flow table type. */
1253 };
1254 
1255 /* Flow structure. */
1256 struct rte_flow {
1257 	uint32_t dev_handles;
1258 	/**< Device flow handles that are part of the flow. */
1259 	uint32_t type:2;
1260 	uint32_t drv_type:2; /**< Driver type. */
1261 	uint32_t tunnel:1;
1262 	uint32_t meter:24; /**< Holds flow meter id. */
1263 	uint32_t indirect_type:2; /**< Indirect action type. */
1264 	uint32_t matcher_selector:1; /**< Matcher index in resizable table. */
1265 	uint32_t rix_mreg_copy;
1266 	/**< Index to metadata register copy table resource. */
1267 	uint32_t counter; /**< Holds flow counter. */
1268 	uint32_t tunnel_id;  /**< Tunnel id */
1269 	union {
1270 		uint32_t age; /**< Holds ASO age bit index. */
1271 		uint32_t ct; /**< Holds ASO CT index. */
1272 	};
1273 	uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
1274 } __rte_packed;
1275 
1276 /*
1277  * HWS COUNTER ID's layout
1278  *       3                   2                   1                   0
1279  *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1280  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1281  *    |  T  |     | D |                                               |
1282  *    ~  Y  |     | C |                    IDX                        ~
1283  *    |  P  |     | S |                                               |
1284  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1285  *
1286  *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
1287  *    Bit 25:24 = DCS index
1288  *    Bit 23:00 = IDX in this counter belonged DCS bulk.
1289  */
1290 typedef uint32_t cnt_id_t;
1291 
1292 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1293 
1294 enum {
1295 	MLX5_FLOW_HW_FLOW_OP_TYPE_NONE,
1296 	MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE,
1297 	MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY,
1298 	MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE,
1299 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE,
1300 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY,
1301 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE,
1302 };
1303 
1304 enum {
1305 	MLX5_FLOW_HW_FLOW_FLAG_CNT_ID = RTE_BIT32(0),
1306 	MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP = RTE_BIT32(1),
1307 	MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ = RTE_BIT32(2),
1308 	MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX = RTE_BIT32(3),
1309 	MLX5_FLOW_HW_FLOW_FLAG_MTR_ID = RTE_BIT32(4),
1310 	MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR = RTE_BIT32(5),
1311 	MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW = RTE_BIT32(6),
1312 };
1313 
1314 #define MLX5_FLOW_HW_FLOW_FLAGS_ALL ( \
1315 		MLX5_FLOW_HW_FLOW_FLAG_CNT_ID | \
1316 		MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP | \
1317 		MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ | \
1318 		MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX | \
1319 		MLX5_FLOW_HW_FLOW_FLAG_MTR_ID | \
1320 		MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR | \
1321 		MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW \
1322 	)
1323 
1324 #ifdef PEDANTIC
1325 #pragma GCC diagnostic ignored "-Wpedantic"
1326 #endif
1327 
1328 #define MLX5_DR_RULE_SIZE 72
1329 
1330 SLIST_HEAD(mlx5_nta_rss_flow_head, rte_flow_hw);
1331 
1332 /** HWS non template flow data. */
1333 struct rte_flow_nt2hws {
1334 	/** BWC rule pointer. */
1335 	struct mlx5dr_bwc_rule *nt_rule;
1336 	/** The matcher for non template api. */
1337 	struct mlx5_flow_dv_matcher *matcher;
1338 	/**< Auxiliary data stored per flow. */
1339 	struct rte_flow_hw_aux *flow_aux;
1340 	/** Modify header pointer. */
1341 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
1342 	/** Chain NTA flows. */
1343 	SLIST_ENTRY(rte_flow_hw) next;
1344 	/** Encap/decap index. */
1345 	uint32_t rix_encap_decap;
1346 	uint32_t rix_mreg_copy;
1347 	uint8_t chaned_flow;
1348 };
1349 
1350 /** HWS flow struct. */
1351 struct rte_flow_hw {
1352 	union {
1353 		/** The table flow allcated from. */
1354 		struct rte_flow_template_table *table;
1355 		/** Data needed for non template flows. */
1356 		struct rte_flow_nt2hws *nt2hws;
1357 	};
1358 	/** Application's private data passed to enqueued flow operation. */
1359 	void *user_data;
1360 	union {
1361 		/** Jump action. */
1362 		struct mlx5_hw_jump_action *jump;
1363 		/** TIR action. */
1364 		struct mlx5_hrxq *hrxq;
1365 	};
1366 	/** Flow index from indexed pool. */
1367 	uint32_t idx;
1368 	/** Resource index from indexed pool. */
1369 	uint32_t res_idx;
1370 	/** HWS flow rule index passed to mlx5dr. */
1371 	uint32_t rule_idx;
1372 	/** Which flow fields (inline or in auxiliary struct) are used. */
1373 	uint32_t flags;
1374 	/** COUNT action index. */
1375 	cnt_id_t cnt_id;
1376 	/** Ongoing flow operation type. */
1377 	uint8_t operation_type;
1378 	/** Index of pattern template this flow is based on. */
1379 	uint8_t mt_idx;
1380 	/** Equals true if it is non template rule. */
1381 	bool nt_rule;
1382 	/**
1383 	 * Padding for alignment to 56 bytes.
1384 	 * Since mlx5dr rule is 72 bytes, whole flow is contained within 128 B (2 cache lines).
1385 	 * This space is reserved for future additions to flow struct.
1386 	 */
1387 	uint8_t padding[9];
1388 	/** HWS layer data struct. */
1389 	uint8_t rule[];
1390 };
1391 
1392 /** Auxiliary data fields that are updatable. */
1393 struct rte_flow_hw_aux_fields {
1394 	/** AGE action index. */
1395 	uint32_t age_idx;
1396 	/** Direct meter (METER or METER_MARK) action index. */
1397 	uint32_t mtr_id;
1398 };
1399 
1400 /** Auxiliary data stored per flow which is not required to be stored in main flow structure. */
1401 struct rte_flow_hw_aux {
1402 	/** Auxiliary fields associated with the original flow. */
1403 	struct rte_flow_hw_aux_fields orig;
1404 	/** Auxiliary fields associated with the updated flow. */
1405 	struct rte_flow_hw_aux_fields upd;
1406 	/** Index of resizable matcher associated with this flow. */
1407 	uint8_t matcher_selector;
1408 	/** Placeholder flow struct used during flow rule update operation. */
1409 	struct rte_flow_hw upd_flow;
1410 };
1411 
1412 #ifdef PEDANTIC
1413 #pragma GCC diagnostic error "-Wpedantic"
1414 #endif
1415 
1416 struct mlx5_action_construct_data;
1417 typedef int
1418 (*indirect_list_callback_t)(struct rte_eth_dev *,
1419 			    const struct mlx5_action_construct_data *,
1420 			    const struct rte_flow_action *,
1421 			    struct mlx5dr_rule_action *);
1422 
1423 #define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
1424 
1425 /** Container for flow action data constructed during flow rule creation. */
1426 struct mlx5_flow_hw_action_params {
1427 	/** Array of constructed modify header commands. */
1428 	struct mlx5_modification_cmd mhdr_cmd[MLX5_MHDR_MAX_CMD];
1429 	/** Constructed encap/decap data buffer. */
1430 	uint8_t encap_data[MLX5_ENCAP_MAX_LEN];
1431 	/** Constructed IPv6 routing data buffer. */
1432 	uint8_t ipv6_push_data[MLX5_PUSH_MAX_LEN];
1433 };
1434 
1435 /** Container for dynamically generated flow items used during flow rule creation. */
1436 struct mlx5_flow_hw_pattern_params {
1437 	/** Array of dynamically generated flow items. */
1438 	struct rte_flow_item items[MLX5_HW_MAX_ITEMS];
1439 	/** Temporary REPRESENTED_PORT item generated by PMD. */
1440 	struct rte_flow_item_ethdev port_spec;
1441 	/** Temporary TAG item generated by PMD. */
1442 	struct rte_flow_item_tag tag_spec;
1443 };
1444 
1445 /* rte flow action translate to DR action struct. */
1446 struct mlx5_action_construct_data {
1447 	LIST_ENTRY(mlx5_action_construct_data) next;
1448 	/* Ensure the action types are matched. */
1449 	int type;
1450 	uint32_t idx;  /* Data index. */
1451 	uint16_t action_src; /* rte_flow_action src offset. */
1452 	uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
1453 	indirect_list_callback_t indirect_list_cb;
1454 	union {
1455 		struct {
1456 			/* Expected type of indirection action. */
1457 			enum rte_flow_action_type expected_type;
1458 		} indirect;
1459 		struct {
1460 			/* encap data len. */
1461 			uint16_t len;
1462 		} encap;
1463 		struct {
1464 			/* Modify header action offset in pattern. */
1465 			uint16_t mhdr_cmds_off;
1466 			/* Offset in pattern after modify header actions. */
1467 			uint16_t mhdr_cmds_end;
1468 			/*
1469 			 * True if this action is masked and does not need to
1470 			 * be generated.
1471 			 */
1472 			bool shared;
1473 			/*
1474 			 * Modified field definitions in dst field (SET, ADD)
1475 			 * or src field (COPY).
1476 			 */
1477 			struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS];
1478 			/* Modified field definitions in dst field (COPY). */
1479 			struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS];
1480 			/*
1481 			 * Masks applied to field values to generate
1482 			 * PRM actions.
1483 			 */
1484 			uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS];
1485 			/* Copy of action passed to the action template. */
1486 			struct rte_flow_action_modify_field action;
1487 		} modify_header;
1488 		struct {
1489 			bool symmetric_hash_function; /* Symmetric RSS hash */
1490 			uint64_t types; /* RSS hash types. */
1491 			uint32_t level; /* RSS level. */
1492 			uint32_t idx; /* Shared action index. */
1493 		} shared_rss;
1494 		struct {
1495 			cnt_id_t id;
1496 		} shared_counter;
1497 		struct {
1498 			/* IPv6 extension push data len. */
1499 			uint16_t len;
1500 		} ipv6_ext;
1501 		struct {
1502 			uint32_t id;
1503 			uint32_t conf_masked:1;
1504 		} shared_meter;
1505 	};
1506 };
1507 
1508 #define MAX_GENEVE_OPTIONS_RESOURCES 7
1509 
1510 /* GENEVE TLV options manager structure. */
1511 struct mlx5_geneve_tlv_options_mng {
1512 	uint8_t nb_options; /* Number of options inside the template. */
1513 	struct {
1514 		uint8_t opt_type;
1515 		uint16_t opt_class;
1516 	} options[MAX_GENEVE_OPTIONS_RESOURCES];
1517 };
1518 
1519 /* Flow item template struct. */
1520 struct rte_flow_pattern_template {
1521 	LIST_ENTRY(rte_flow_pattern_template) next;
1522 	/* Template attributes. */
1523 	struct rte_flow_pattern_template_attr attr;
1524 	struct mlx5dr_match_template *mt; /* mlx5 match template. */
1525 	uint64_t item_flags; /* Item layer flags. */
1526 	uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
1527 	RTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */
1528 	/*
1529 	 * If true, then rule pattern should be prepended with
1530 	 * represented_port pattern item.
1531 	 */
1532 	bool implicit_port;
1533 	/*
1534 	 * If true, then rule pattern should be prepended with
1535 	 * tag pattern item for representor matching.
1536 	 */
1537 	bool implicit_tag;
1538 	/* Manages all GENEVE TLV options used by this pattern template. */
1539 	struct mlx5_geneve_tlv_options_mng geneve_opt_mng;
1540 	uint8_t flex_item; /* flex item index. */
1541 	/* Items on which this pattern template is based on. */
1542 	struct rte_flow_item *items;
1543 };
1544 
1545 /* Flow action template struct. */
1546 struct rte_flow_actions_template {
1547 	LIST_ENTRY(rte_flow_actions_template) next;
1548 	/* Template attributes. */
1549 	struct rte_flow_actions_template_attr attr;
1550 	struct rte_flow_action *actions; /* Cached flow actions. */
1551 	struct rte_flow_action *orig_actions; /* Original flow actions. */
1552 	struct rte_flow_action *masks; /* Cached action masks.*/
1553 	struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */
1554 	uint64_t action_flags; /* Bit-map of all valid action in template. */
1555 	uint16_t dr_actions_num; /* Amount of DR rules actions. */
1556 	uint16_t actions_num; /* Amount of flow actions */
1557 	uint16_t *dr_off; /* DR action offset for given rte action offset. */
1558 	uint16_t *src_off; /* RTE action displacement from app. template */
1559 	uint16_t reformat_off; /* Offset of DR reformat action. */
1560 	uint16_t mhdr_off; /* Offset of DR modify header action. */
1561 	uint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */
1562 	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
1563 	uint8_t flex_item; /* flex item index. */
1564 };
1565 
1566 /* Jump action struct. */
1567 struct mlx5_hw_jump_action {
1568 	/* Action jump from root. */
1569 	struct mlx5dr_action *root_action;
1570 	/* HW steering jump action. */
1571 	struct mlx5dr_action *hws_action;
1572 };
1573 
1574 /* Encap decap action struct. */
1575 struct mlx5_hw_encap_decap_action {
1576 	struct mlx5_indirect_list indirect;
1577 	enum mlx5dr_action_type action_type;
1578 	struct mlx5dr_action *action; /* Action object. */
1579 	/* Is header_reformat action shared across flows in table. */
1580 	uint32_t shared:1;
1581 	uint32_t multi_pattern:1;
1582 	size_t data_size; /* Action metadata size. */
1583 	uint8_t data[]; /* Action data. */
1584 };
1585 
1586 /* Push remove action struct. */
1587 struct mlx5_hw_push_remove_action {
1588 	struct mlx5dr_action *action; /* Action object. */
1589 	/* Is push_remove action shared across flows in table. */
1590 	uint8_t shared;
1591 	size_t data_size; /* Action metadata size. */
1592 	uint8_t data[]; /* Action data. */
1593 };
1594 
1595 /* Modify field action struct. */
1596 struct mlx5_hw_modify_header_action {
1597 	/* Reference to DR action */
1598 	struct mlx5dr_action *action;
1599 	/* Modify header action position in action rule table. */
1600 	uint16_t pos;
1601 	/* Is MODIFY_HEADER action shared across flows in table. */
1602 	uint32_t shared:1;
1603 	uint32_t multi_pattern:1;
1604 	/* Amount of modification commands stored in the precompiled buffer. */
1605 	uint32_t mhdr_cmds_num;
1606 	/* Precompiled modification commands. */
1607 	struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD];
1608 };
1609 
1610 /* The maximum actions support in the flow. */
1611 #define MLX5_HW_MAX_ACTS 16
1612 
1613 /* DR action set struct. */
1614 struct mlx5_hw_actions {
1615 	/* Dynamic action list. */
1616 	LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
1617 	struct mlx5_hw_jump_action *jump; /* Jump action. */
1618 	struct mlx5_hrxq *tir; /* TIR action. */
1619 	struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */
1620 	/* Encap/Decap action. */
1621 	struct mlx5_hw_encap_decap_action *encap_decap;
1622 	uint16_t encap_decap_pos; /* Encap/Decap action position. */
1623 	/* Push/remove action. */
1624 	struct mlx5_hw_push_remove_action *push_remove;
1625 	uint16_t push_remove_pos; /* Push/remove action position. */
1626 	uint32_t mark:1; /* Indicate the mark action. */
1627 	cnt_id_t cnt_id; /* Counter id. */
1628 	uint32_t mtr_id; /* Meter id. */
1629 	/* Translated DR action array from action template. */
1630 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
1631 };
1632 
1633 /* mlx5 action template struct. */
1634 struct mlx5_hw_action_template {
1635 	/* Action template pointer. */
1636 	struct rte_flow_actions_template *action_template;
1637 	struct mlx5_hw_actions acts; /* Template actions. */
1638 };
1639 
1640 /* mlx5 flow group struct. */
1641 struct mlx5_flow_group {
1642 	struct mlx5_list_entry entry;
1643 	LIST_ENTRY(mlx5_flow_group) next;
1644 	struct rte_eth_dev *dev; /* Reference to corresponding device. */
1645 	struct mlx5dr_table *tbl; /* HWS table object. */
1646 	struct mlx5_hw_jump_action jump; /* Jump action. */
1647 	struct mlx5_flow_group *miss_group; /* Group pointed to by miss action. */
1648 	enum mlx5dr_table_type type; /* Table type. */
1649 	uint32_t group_id; /* Group id. */
1650 	uint32_t idx; /* Group memory index. */
1651 	/* List of all matchers created for this group in non template api */
1652 	struct mlx5_list *matchers;
1653 };
1654 
1655 
1656 #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 32
1657 #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
1658 
1659 #define MLX5_MULTIPATTERN_ENCAP_NUM 5
1660 #define MLX5_MAX_TABLE_RESIZE_NUM 64
1661 
1662 struct mlx5_multi_pattern_segment {
1663 	/*
1664 	 * Modify Header Argument Objects number allocated for action in that
1665 	 * segment.
1666 	 * Capacity is always power of 2.
1667 	 */
1668 	uint32_t capacity;
1669 	uint32_t head_index;
1670 	struct mlx5dr_action *mhdr_action;
1671 	struct mlx5dr_action *reformat_action[MLX5_MULTIPATTERN_ENCAP_NUM];
1672 };
1673 
1674 struct mlx5_tbl_multi_pattern_ctx {
1675 	struct {
1676 		uint32_t elements_num;
1677 		struct mlx5dr_action_reformat_header reformat_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1678 		/**
1679 		 * insert_header structure is larger than reformat_header.
1680 		 * Enclosing these structures with union will case a gap between
1681 		 * reformat_hdr array elements.
1682 		 * mlx5dr_action_create_reformat() expects adjacent array elements.
1683 		 */
1684 		struct mlx5dr_action_insert_header insert_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1685 	} reformat[MLX5_MULTIPATTERN_ENCAP_NUM];
1686 
1687 	struct {
1688 		uint32_t elements_num;
1689 		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1690 	} mh;
1691 	struct mlx5_multi_pattern_segment segments[MLX5_MAX_TABLE_RESIZE_NUM];
1692 };
1693 
1694 static __rte_always_inline void
1695 mlx5_multi_pattern_activate(struct mlx5_tbl_multi_pattern_ctx *mpctx)
1696 {
1697 	mpctx->segments[0].head_index = 1;
1698 }
1699 
1700 static __rte_always_inline bool
1701 mlx5_is_multi_pattern_active(const struct mlx5_tbl_multi_pattern_ctx *mpctx)
1702 {
1703 	return mpctx->segments[0].head_index == 1;
1704 }
1705 
1706 struct mlx5_flow_template_table_cfg {
1707 	struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */
1708 	bool external; /* True if created by flow API, false if table is internal to PMD. */
1709 };
1710 
1711 struct mlx5_matcher_info {
1712 	struct mlx5dr_matcher *matcher; /* Template matcher. */
1713 	struct mlx5dr_action *jump; /* Jump to matcher action. */
1714 	RTE_ATOMIC(uint32_t) refcnt;
1715 };
1716 
1717 struct __rte_cache_aligned mlx5_dr_rule_action_container {
1718 	struct mlx5dr_rule_action acts[MLX5_HW_MAX_ACTS];
1719 };
1720 
1721 struct rte_flow_template_table {
1722 	LIST_ENTRY(rte_flow_template_table) next;
1723 	struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
1724 	struct mlx5_matcher_info matcher_info[2];
1725 	uint32_t matcher_selector;
1726 	rte_rwlock_t matcher_replace_rwlk; /* RW lock for resizable tables */
1727 	/* Item templates bind to the table. */
1728 	struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
1729 	/* Action templates bind to the table. */
1730 	struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1731 	struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
1732 	struct rte_flow_hw_aux *flow_aux; /**< Auxiliary data stored per flow. */
1733 	struct mlx5_indexed_pool *resource; /* The table's resource ipool. */
1734 	struct mlx5_flow_template_table_cfg cfg;
1735 	uint32_t type; /* Flow table type RX/TX/FDB. */
1736 	uint8_t nb_item_templates; /* Item template number. */
1737 	uint8_t nb_action_templates; /* Action template number. */
1738 	uint32_t refcnt; /* Table reference counter. */
1739 	struct mlx5_tbl_multi_pattern_ctx mpctx;
1740 	struct mlx5dr_matcher_attr matcher_attr;
1741 	/**
1742 	 * Variable length array of containers containing precalculated templates of DR actions
1743 	 * arrays. This array is allocated at template table creation time and contains
1744 	 * one container per each queue, per each actions template.
1745 	 * Essentially rule_acts is a 2-dimensional array indexed with (AT index, queue) pair.
1746 	 * Each container will provide a local "queue buffer" to work on for flow creation
1747 	 * operations when using a given actions template.
1748 	 */
1749 	struct mlx5_dr_rule_action_container rule_acts[];
1750 };
1751 
1752 static __rte_always_inline struct mlx5dr_matcher *
1753 mlx5_table_matcher(const struct rte_flow_template_table *table)
1754 {
1755 	return table->matcher_info[table->matcher_selector].matcher;
1756 }
1757 
1758 static __rte_always_inline struct mlx5_multi_pattern_segment *
1759 mlx5_multi_pattern_segment_find(struct rte_flow_template_table *table,
1760 				uint32_t flow_resource_ix)
1761 {
1762 	int i;
1763 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
1764 
1765 	if (likely(!rte_flow_template_table_resizable(0, &table->cfg.attr)))
1766 		return &mpctx->segments[0];
1767 	for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
1768 		uint32_t limit = mpctx->segments[i].head_index +
1769 				 mpctx->segments[i].capacity;
1770 
1771 		if (flow_resource_ix < limit)
1772 			return &mpctx->segments[i];
1773 	}
1774 	return NULL;
1775 }
1776 
1777 /*
1778  * Convert metadata or tag to the actual register.
1779  * META: Fixed C_1 for FDB mode, REG_A for NIC TX and REG_B for NIC RX.
1780  * TAG: C_x expect meter color reg and the reserved ones.
1781  */
1782 static __rte_always_inline int
1783 flow_hw_get_reg_id_by_domain(struct rte_eth_dev *dev,
1784 			     enum rte_flow_item_type type,
1785 			     enum mlx5dr_table_type domain_type, uint32_t id)
1786 {
1787 	struct mlx5_dev_ctx_shared *sh = MLX5_SH(dev);
1788 	struct mlx5_dev_registers *reg = &sh->registers;
1789 
1790 	switch (type) {
1791 	case RTE_FLOW_ITEM_TYPE_META:
1792 		if (sh->config.dv_esw_en &&
1793 		    sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
1794 			return REG_C_1;
1795 		}
1796 		/*
1797 		 * On root table - PMD allows only egress META matching, thus
1798 		 * REG_A matching is sufficient.
1799 		 *
1800 		 * On non-root tables - REG_A corresponds to general_purpose_lookup_field,
1801 		 * which translates to REG_A in NIC TX and to REG_B in NIC RX.
1802 		 * However, current FW does not implement REG_B case right now, so
1803 		 * REG_B case is return explicitly by this function for NIC RX.
1804 		 */
1805 		if (domain_type == MLX5DR_TABLE_TYPE_NIC_RX)
1806 			return REG_B;
1807 		return REG_A;
1808 	case RTE_FLOW_ITEM_TYPE_CONNTRACK:
1809 	case RTE_FLOW_ITEM_TYPE_METER_COLOR:
1810 		return reg->aso_reg;
1811 	case RTE_FLOW_ITEM_TYPE_TAG:
1812 		if (id == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1813 			return REG_C_3;
1814 		MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);
1815 		return reg->hw_avl_tags[id];
1816 	default:
1817 		return REG_NON;
1818 	}
1819 }
1820 
1821 static __rte_always_inline int
1822 flow_hw_get_reg_id_from_ctx(void *dr_ctx, enum rte_flow_item_type type,
1823 			    enum mlx5dr_table_type domain_type, uint32_t id)
1824 {
1825 	uint16_t port;
1826 
1827 	MLX5_ETH_FOREACH_DEV(port, NULL) {
1828 		struct mlx5_priv *priv;
1829 
1830 		priv = rte_eth_devices[port].data->dev_private;
1831 		if (priv->dr_ctx == dr_ctx)
1832 			return flow_hw_get_reg_id_by_domain(&rte_eth_devices[port],
1833 							    type, domain_type, id);
1834 	}
1835 	return REG_NON;
1836 }
1837 
1838 #endif
1839 
1840 /*
1841  * Define list of valid combinations of RX Hash fields
1842  * (see enum ibv_rx_hash_fields).
1843  */
1844 #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
1845 #define MLX5_RSS_HASH_IPV4_TCP \
1846 	(MLX5_RSS_HASH_IPV4 | \
1847 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1848 #define MLX5_RSS_HASH_IPV4_UDP \
1849 	(MLX5_RSS_HASH_IPV4 | \
1850 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1851 #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
1852 #define MLX5_RSS_HASH_IPV6_TCP \
1853 	(MLX5_RSS_HASH_IPV6 | \
1854 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1855 #define MLX5_RSS_HASH_IPV6_UDP \
1856 	(MLX5_RSS_HASH_IPV6 | \
1857 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1858 #define MLX5_RSS_HASH_IPV4_SRC_ONLY IBV_RX_HASH_SRC_IPV4
1859 #define MLX5_RSS_HASH_IPV4_DST_ONLY IBV_RX_HASH_DST_IPV4
1860 #define MLX5_RSS_HASH_IPV6_SRC_ONLY IBV_RX_HASH_SRC_IPV6
1861 #define MLX5_RSS_HASH_IPV6_DST_ONLY IBV_RX_HASH_DST_IPV6
1862 #define MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY \
1863 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_UDP)
1864 #define MLX5_RSS_HASH_IPV4_UDP_DST_ONLY \
1865 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_UDP)
1866 #define MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY \
1867 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_UDP)
1868 #define MLX5_RSS_HASH_IPV6_UDP_DST_ONLY \
1869 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_UDP)
1870 #define MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY \
1871 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_TCP)
1872 #define MLX5_RSS_HASH_IPV4_TCP_DST_ONLY \
1873 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_TCP)
1874 #define MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY \
1875 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
1876 #define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
1877 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
1878 
1879 #ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
1880 #define IBV_RX_HASH_IPSEC_SPI (1U << 8)
1881 #endif
1882 
1883 #define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
1884 #define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
1885 				MLX5_RSS_HASH_ESP_SPI)
1886 #define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
1887 				MLX5_RSS_HASH_ESP_SPI)
1888 #define MLX5_RSS_HASH_NONE 0ULL
1889 
1890 #define MLX5_RSS_IS_SYMM(func) \
1891 		(((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) || \
1892 		 ((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT))
1893 
1894 /* extract next protocol type from Ethernet & VLAN headers */
1895 #define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
1896 	(_prt) = ((const struct _s *)(_itm)->mask)->_m;       \
1897 	(_prt) &= ((const struct _s *)(_itm)->spec)->_m;      \
1898 	(_prt) = rte_be_to_cpu_16((_prt));                    \
1899 } while (0)
1900 
1901 /* array of valid combinations of RX Hash fields for RSS */
1902 static const uint64_t mlx5_rss_hash_fields[] = {
1903 	MLX5_RSS_HASH_IPV4,
1904 	MLX5_RSS_HASH_IPV4_TCP,
1905 	MLX5_RSS_HASH_IPV4_UDP,
1906 	MLX5_RSS_HASH_IPV4_ESP,
1907 	MLX5_RSS_HASH_IPV6,
1908 	MLX5_RSS_HASH_IPV6_TCP,
1909 	MLX5_RSS_HASH_IPV6_UDP,
1910 	MLX5_RSS_HASH_IPV6_ESP,
1911 	MLX5_RSS_HASH_ESP_SPI,
1912 	MLX5_RSS_HASH_NONE,
1913 };
1914 
1915 /* Shared RSS action structure */
1916 struct mlx5_shared_action_rss {
1917 	ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
1918 	RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
1919 	struct rte_flow_action_rss origin; /**< Original rte RSS action. */
1920 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
1921 	struct mlx5_ind_table_obj *ind_tbl;
1922 	/**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
1923 	uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
1924 	/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
1925 	rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
1926 };
1927 
1928 struct rte_flow_action_handle {
1929 	uint32_t id;
1930 };
1931 
1932 /* Thread specific flow workspace intermediate data. */
1933 struct mlx5_flow_workspace {
1934 	/* If creating another flow in same thread, push new as stack. */
1935 	struct mlx5_flow_workspace *prev;
1936 	struct mlx5_flow_workspace *next;
1937 	struct mlx5_flow_workspace *gc;
1938 	uint32_t inuse; /* can't create new flow with current. */
1939 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
1940 	struct mlx5_flow_rss_desc rss_desc;
1941 	uint32_t flow_idx; /* Intermediate device flow index. */
1942 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
1943 	struct mlx5_flow_meter_policy *policy;
1944 	/* The meter policy used by meter in flow. */
1945 	struct mlx5_flow_meter_policy *final_policy;
1946 	/* The final policy when meter policy is hierarchy. */
1947 	uint32_t skip_matcher_reg:1;
1948 	/* Indicates if need to skip matcher register in translate. */
1949 	uint32_t mark:1; /* Indicates if flow contains mark action. */
1950 	uint32_t vport_meta_tag; /* Used for vport index match. */
1951 };
1952 
1953 /* Matcher translate type. */
1954 enum MLX5_SET_MATCHER {
1955 	MLX5_SET_MATCHER_SW_V = 1 << 0,
1956 	MLX5_SET_MATCHER_SW_M = 1 << 1,
1957 	MLX5_SET_MATCHER_HS_V = 1 << 2,
1958 	MLX5_SET_MATCHER_HS_M = 1 << 3,
1959 };
1960 
1961 #define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M)
1962 #define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M)
1963 #define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V)
1964 #define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M)
1965 
1966 /* Flow matcher workspace intermediate data. */
1967 struct mlx5_dv_matcher_workspace {
1968 	uint8_t priority; /* Flow priority. */
1969 	uint64_t last_item; /* Last item in pattern. */
1970 	uint64_t item_flags; /* Flow item pattern flags. */
1971 	uint64_t action_flags; /* Flow action flags. */
1972 	bool external; /* External flow or not. */
1973 	uint32_t vlan_tag:12; /* Flow item VLAN tag. */
1974 	uint8_t next_protocol; /* Tunnel next protocol */
1975 	uint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */
1976 	uint32_t group; /* Flow group. */
1977 	uint16_t udp_dport; /* Flow item UDP port. */
1978 	const struct rte_flow_attr *attr; /* Flow attribute. */
1979 	struct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */
1980 	const struct rte_flow_item *tunnel_item; /* Flow tunnel item. */
1981 	const struct rte_flow_item *gre_item; /* Flow GRE item. */
1982 	const struct rte_flow_item *integrity_items[2];
1983 };
1984 
1985 struct mlx5_flow_split_info {
1986 	uint32_t external:1;
1987 	/**< True if flow is created by request external to PMD. */
1988 	uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
1989 	uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
1990 	uint32_t flow_idx; /**< This memory pool index to the flow. */
1991 	uint32_t table_id; /**< Flow table identifier. */
1992 	uint64_t prefix_layers; /**< Prefix subflow layers. */
1993 };
1994 
1995 struct mlx5_flow_hw_partial_resource {
1996 	const struct rte_flow_attr *attr;
1997 	const struct rte_flow_item *items;
1998 	const struct rte_flow_action *actions;
1999 };
2000 
2001 struct mlx5_flow_hw_split_resource {
2002 	struct mlx5_flow_hw_partial_resource prefix;
2003 	struct mlx5_flow_hw_partial_resource suffix;
2004 	void *buf_start; /* start address of continuous buffer. */
2005 	uint32_t flow_idx; /* This memory pool index to the flow. */
2006 };
2007 
2008 struct mlx5_hl_data {
2009 	uint8_t dw_offset;
2010 	uint32_t dw_mask;
2011 };
2012 
2013 extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
2014 
2015 /*
2016  * Get sqn for given tx_queue.
2017  * Used in HWS rule creation.
2018  */
2019 static __rte_always_inline int
2020 flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
2021 {
2022 	struct mlx5_txq_ctrl *txq;
2023 	struct mlx5_external_q *ext_txq;
2024 
2025 	/* Means Tx queue is PF0. */
2026 	if (tx_queue == UINT16_MAX) {
2027 		*sqn = 0;
2028 		return 0;
2029 	}
2030 	if (mlx5_is_external_txq(dev, tx_queue)) {
2031 		ext_txq = mlx5_ext_txq_get(dev, tx_queue);
2032 		*sqn = ext_txq->hw_id;
2033 		return 0;
2034 	}
2035 	txq = mlx5_txq_get(dev, tx_queue);
2036 	if (unlikely(!txq))
2037 		return -ENOENT;
2038 	*sqn = mlx5_txq_get_sqn(txq);
2039 	mlx5_txq_release(dev, tx_queue);
2040 	return 0;
2041 }
2042 
2043 /*
2044  * Convert sqn for given rte_eth_dev port.
2045  * Used in HWS rule creation.
2046  */
2047 static __rte_always_inline int
2048 flow_hw_conv_sqn(uint16_t port_id, uint16_t tx_queue, uint32_t *sqn)
2049 {
2050 	if (port_id >= RTE_MAX_ETHPORTS)
2051 		return -EINVAL;
2052 	return flow_hw_get_sqn(&rte_eth_devices[port_id], tx_queue, sqn);
2053 }
2054 
2055 /*
2056  * Get given rte_eth_dev port_id.
2057  * Used in HWS rule creation.
2058  */
2059 static __rte_always_inline uint16_t
2060 flow_hw_get_port_id(void *dr_ctx)
2061 {
2062 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2063 	uint16_t port_id;
2064 
2065 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2066 		struct mlx5_priv *priv;
2067 
2068 		priv = rte_eth_devices[port_id].data->dev_private;
2069 		if (priv->dr_ctx == dr_ctx)
2070 			return port_id;
2071 	}
2072 #else
2073 	RTE_SET_USED(dr_ctx);
2074 #endif
2075 	return UINT16_MAX;
2076 }
2077 
2078 /*
2079  * Get given eswitch manager id.
2080  * Used in HWS match with port creation.
2081  */
2082 static __rte_always_inline const struct flow_hw_port_info *
2083 flow_hw_get_esw_mgr_id(void *dr_ctx)
2084 {
2085 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2086 	uint16_t port_id;
2087 
2088 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2089 		struct mlx5_priv *priv;
2090 
2091 		priv = rte_eth_devices[port_id].data->dev_private;
2092 		if (priv->dr_ctx == dr_ctx)
2093 			return &priv->sh->dev_cap.esw_info;
2094 	}
2095 #else
2096 	RTE_SET_USED(dr_ctx);
2097 #endif
2098 	return NULL;
2099 }
2100 
2101 /*
2102  * Get metadata match tag and mask for given rte_eth_dev port.
2103  * Used in HWS rule creation.
2104  */
2105 static __rte_always_inline const struct flow_hw_port_info *
2106 flow_hw_conv_port_id(void *ctx, const uint16_t port_id)
2107 {
2108 	struct flow_hw_port_info *port_info;
2109 
2110 	if (port_id == UINT16_MAX && ctx)
2111 		return flow_hw_get_esw_mgr_id(ctx);
2112 
2113 	if (port_id >= RTE_MAX_ETHPORTS)
2114 		return NULL;
2115 	port_info = &mlx5_flow_hw_port_infos[port_id];
2116 	return !!port_info->regc_mask ? port_info : NULL;
2117 }
2118 
2119 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2120 /*
2121  * Get metadata match tag and mask for the uplink port represented
2122  * by given IB context. Used in HWS context creation.
2123  */
2124 static __rte_always_inline const struct flow_hw_port_info *
2125 flow_hw_get_wire_port(struct ibv_context *ibctx)
2126 {
2127 	struct ibv_device *ibdev = ibctx->device;
2128 	uint16_t port_id;
2129 
2130 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2131 		const struct mlx5_priv *priv =
2132 				rte_eth_devices[port_id].data->dev_private;
2133 
2134 		if (priv && priv->master) {
2135 			struct ibv_context *port_ibctx = priv->sh->cdev->ctx;
2136 
2137 			if (port_ibctx->device == ibdev)
2138 				return flow_hw_conv_port_id(priv->dr_ctx, port_id);
2139 		}
2140 	}
2141 	return NULL;
2142 }
2143 #endif
2144 
2145 static __rte_always_inline int
2146 flow_hw_get_reg_id(struct rte_eth_dev *dev,
2147 		   enum rte_flow_item_type type, uint32_t id)
2148 {
2149 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2150 	return flow_hw_get_reg_id_by_domain(dev, type,
2151 					    MLX5DR_TABLE_TYPE_MAX, id);
2152 #else
2153 	RTE_SET_USED(dev);
2154 	RTE_SET_USED(type);
2155 	RTE_SET_USED(id);
2156 	return REG_NON;
2157 #endif
2158 }
2159 
2160 static __rte_always_inline int
2161 flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val)
2162 {
2163 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2164 	uint32_t port;
2165 
2166 	MLX5_ETH_FOREACH_DEV(port, NULL) {
2167 		struct mlx5_priv *priv;
2168 		priv = rte_eth_devices[port].data->dev_private;
2169 
2170 		if (priv->dr_ctx == dr_ctx) {
2171 			*port_val = port;
2172 			return 0;
2173 		}
2174 	}
2175 #else
2176 	RTE_SET_USED(dr_ctx);
2177 	RTE_SET_USED(port_val);
2178 #endif
2179 	return -EINVAL;
2180 }
2181 
2182 /**
2183  * Get GENEVE TLV option FW information according type and class.
2184  *
2185  * @param[in] dr_ctx
2186  *   Pointer to HW steering DR context.
2187  * @param[in] type
2188  *   GENEVE TLV option type.
2189  * @param[in] class
2190  *   GENEVE TLV option class.
2191  * @param[out] hl_ok_bit
2192  *   Pointer to header layout structure describing OK bit FW information.
2193  * @param[out] num_of_dws
2194  *   Pointer to fill inside the size of 'hl_dws' array.
2195  * @param[out] hl_dws
2196  *   Pointer to header layout array describing data DWs FW information.
2197  * @param[out] ok_bit_on_class
2198  *   Pointer to an indicator whether OK bit includes class along with type.
2199  *
2200  * @return
2201  *   0 on success, negative errno otherwise and rte_errno is set.
2202  */
2203 int
2204 mlx5_get_geneve_hl_data(const void *dr_ctx, uint8_t type, uint16_t class,
2205 			struct mlx5_hl_data ** const hl_ok_bit,
2206 			uint8_t *num_of_dws,
2207 			struct mlx5_hl_data ** const hl_dws,
2208 			bool *ok_bit_on_class);
2209 
2210 /**
2211  * Get modify field ID for single DW inside configured GENEVE TLV option.
2212  *
2213  * @param[in] dr_ctx
2214  *   Pointer to HW steering DR context.
2215  * @param[in] type
2216  *   GENEVE TLV option type.
2217  * @param[in] class
2218  *   GENEVE TLV option class.
2219  * @param[in] dw_offset
2220  *   Offset of DW inside the option.
2221  *
2222  * @return
2223  *   Modify field ID on success, negative errno otherwise and rte_errno is set.
2224  */
2225 int
2226 mlx5_get_geneve_option_modify_field_id(const void *dr_ctx, uint8_t type,
2227 				       uint16_t class, uint8_t dw_offset);
2228 
2229 void *
2230 mlx5_geneve_tlv_parser_create(uint16_t port_id,
2231 			      const struct rte_pmd_mlx5_geneve_tlv tlv_list[],
2232 			      uint8_t nb_options);
2233 int mlx5_geneve_tlv_parser_destroy(void *handle);
2234 int mlx5_flow_geneve_tlv_option_validate(struct mlx5_priv *priv,
2235 					 const struct rte_flow_item *geneve_opt,
2236 					 struct rte_flow_error *error);
2237 int mlx5_geneve_opt_modi_field_get(struct mlx5_priv *priv,
2238 				   const struct rte_flow_field_data *data);
2239 
2240 struct mlx5_geneve_tlv_options_mng;
2241 int mlx5_geneve_tlv_option_register(struct mlx5_priv *priv,
2242 				    const struct rte_flow_item_geneve_opt *spec,
2243 				    struct mlx5_geneve_tlv_options_mng *mng);
2244 void mlx5_geneve_tlv_options_unregister(struct mlx5_priv *priv,
2245 					struct mlx5_geneve_tlv_options_mng *mng);
2246 
2247 void flow_hw_set_port_info(struct rte_eth_dev *dev);
2248 void flow_hw_clear_port_info(struct rte_eth_dev *dev);
2249 int flow_hw_create_vport_action(struct rte_eth_dev *dev);
2250 void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);
2251 int
2252 flow_hw_init(struct rte_eth_dev *dev,
2253 	     struct rte_flow_error *error);
2254 
2255 typedef uintptr_t (*mlx5_flow_list_create_t)(struct rte_eth_dev *dev,
2256 					enum mlx5_flow_type type,
2257 					const struct rte_flow_attr *attr,
2258 					const struct rte_flow_item items[],
2259 					const struct rte_flow_action actions[],
2260 					bool external,
2261 					struct rte_flow_error *error);
2262 typedef void (*mlx5_flow_list_destroy_t)(struct rte_eth_dev *dev,
2263 					enum mlx5_flow_type type,
2264 					uintptr_t flow_idx);
2265 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
2266 				    const struct rte_flow_attr *attr,
2267 				    const struct rte_flow_item items[],
2268 				    const struct rte_flow_action actions[],
2269 				    bool external,
2270 				    int hairpin,
2271 				    struct rte_flow_error *error);
2272 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
2273 	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2274 	 const struct rte_flow_item items[],
2275 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
2276 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
2277 				     struct mlx5_flow *dev_flow,
2278 				     const struct rte_flow_attr *attr,
2279 				     const struct rte_flow_item items[],
2280 				     const struct rte_flow_action actions[],
2281 				     struct rte_flow_error *error);
2282 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
2283 				 struct rte_flow_error *error);
2284 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
2285 				   struct rte_flow *flow);
2286 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
2287 				    struct rte_flow *flow);
2288 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
2289 				 struct rte_flow *flow,
2290 				 const struct rte_flow_action *actions,
2291 				 void *data,
2292 				 struct rte_flow_error *error);
2293 typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
2294 					struct mlx5_flow_meter_info *fm,
2295 					uint32_t mtr_idx,
2296 					uint8_t domain_bitmap);
2297 typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
2298 				struct mlx5_flow_meter_info *fm);
2299 typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
2300 typedef struct mlx5_flow_meter_sub_policy *
2301 	(*mlx5_flow_meter_sub_policy_rss_prepare_t)
2302 		(struct rte_eth_dev *dev,
2303 		struct mlx5_flow_meter_policy *mtr_policy,
2304 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
2305 typedef int (*mlx5_flow_meter_hierarchy_rule_create_t)
2306 		(struct rte_eth_dev *dev,
2307 		struct mlx5_flow_meter_info *fm,
2308 		int32_t src_port,
2309 		const struct rte_flow_item *item,
2310 		struct rte_flow_error *error);
2311 typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
2312 	(struct rte_eth_dev *dev,
2313 	struct mlx5_flow_meter_policy *mtr_policy);
2314 typedef uint32_t (*mlx5_flow_mtr_alloc_t)
2315 					    (struct rte_eth_dev *dev);
2316 typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
2317 						uint32_t mtr_idx);
2318 typedef uint32_t (*mlx5_flow_counter_alloc_t)
2319 				   (struct rte_eth_dev *dev);
2320 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev,
2321 					 uint32_t cnt);
2322 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
2323 					 uint32_t cnt,
2324 					 bool clear, uint64_t *pkts,
2325 					 uint64_t *bytes, void **action);
2326 typedef int (*mlx5_flow_get_aged_flows_t)
2327 					(struct rte_eth_dev *dev,
2328 					 void **context,
2329 					 uint32_t nb_contexts,
2330 					 struct rte_flow_error *error);
2331 typedef int (*mlx5_flow_get_q_aged_flows_t)
2332 					(struct rte_eth_dev *dev,
2333 					 uint32_t queue_id,
2334 					 void **context,
2335 					 uint32_t nb_contexts,
2336 					 struct rte_flow_error *error);
2337 typedef int (*mlx5_flow_action_validate_t)
2338 				(struct rte_eth_dev *dev,
2339 				 const struct rte_flow_indir_action_conf *conf,
2340 				 const struct rte_flow_action *action,
2341 				 struct rte_flow_error *error);
2342 typedef struct rte_flow_action_handle *(*mlx5_flow_action_create_t)
2343 				(struct rte_eth_dev *dev,
2344 				 const struct rte_flow_indir_action_conf *conf,
2345 				 const struct rte_flow_action *action,
2346 				 struct rte_flow_error *error);
2347 typedef int (*mlx5_flow_action_destroy_t)
2348 				(struct rte_eth_dev *dev,
2349 				 struct rte_flow_action_handle *action,
2350 				 struct rte_flow_error *error);
2351 typedef int (*mlx5_flow_action_update_t)
2352 			(struct rte_eth_dev *dev,
2353 			 struct rte_flow_action_handle *action,
2354 			 const void *update,
2355 			 struct rte_flow_error *error);
2356 typedef int (*mlx5_flow_action_query_t)
2357 			(struct rte_eth_dev *dev,
2358 			 const struct rte_flow_action_handle *action,
2359 			 void *data,
2360 			 struct rte_flow_error *error);
2361 typedef int (*mlx5_flow_action_query_update_t)
2362 			(struct rte_eth_dev *dev,
2363 			 struct rte_flow_action_handle *handle,
2364 			 const void *update, void *data,
2365 			 enum rte_flow_query_update_mode qu_mode,
2366 			 struct rte_flow_error *error);
2367 typedef struct rte_flow_action_list_handle *
2368 (*mlx5_flow_action_list_handle_create_t)
2369 			(struct rte_eth_dev *dev,
2370 			 const struct rte_flow_indir_action_conf *conf,
2371 			 const struct rte_flow_action *actions,
2372 			 struct rte_flow_error *error);
2373 typedef int
2374 (*mlx5_flow_action_list_handle_destroy_t)
2375 			(struct rte_eth_dev *dev,
2376 			 struct rte_flow_action_list_handle *handle,
2377 			 struct rte_flow_error *error);
2378 typedef int (*mlx5_flow_sync_domain_t)
2379 			(struct rte_eth_dev *dev,
2380 			 uint32_t domains,
2381 			 uint32_t flags);
2382 typedef int (*mlx5_flow_validate_mtr_acts_t)
2383 			(struct rte_eth_dev *dev,
2384 			 const struct rte_flow_action *actions[RTE_COLORS],
2385 			 struct rte_flow_attr *attr,
2386 			 bool *is_rss,
2387 			 uint8_t *domain_bitmap,
2388 			 uint8_t *policy_mode,
2389 			 struct rte_mtr_error *error);
2390 typedef int (*mlx5_flow_create_mtr_acts_t)
2391 			(struct rte_eth_dev *dev,
2392 		      struct mlx5_flow_meter_policy *mtr_policy,
2393 		      const struct rte_flow_action *actions[RTE_COLORS],
2394 		      struct rte_flow_attr *attr,
2395 		      struct rte_mtr_error *error);
2396 typedef void (*mlx5_flow_destroy_mtr_acts_t)
2397 			(struct rte_eth_dev *dev,
2398 		      struct mlx5_flow_meter_policy *mtr_policy);
2399 typedef int (*mlx5_flow_create_policy_rules_t)
2400 			(struct rte_eth_dev *dev,
2401 			  struct mlx5_flow_meter_policy *mtr_policy);
2402 typedef void (*mlx5_flow_destroy_policy_rules_t)
2403 			(struct rte_eth_dev *dev,
2404 			  struct mlx5_flow_meter_policy *mtr_policy);
2405 typedef int (*mlx5_flow_create_def_policy_t)
2406 			(struct rte_eth_dev *dev);
2407 typedef void (*mlx5_flow_destroy_def_policy_t)
2408 			(struct rte_eth_dev *dev);
2409 typedef int (*mlx5_flow_discover_priorities_t)
2410 			(struct rte_eth_dev *dev,
2411 			 const uint16_t *vprio, int vprio_n);
2412 typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
2413 			(struct rte_eth_dev *dev,
2414 			 const struct rte_flow_item_flex_conf *conf,
2415 			 struct rte_flow_error *error);
2416 typedef int (*mlx5_flow_item_release_t)
2417 			(struct rte_eth_dev *dev,
2418 			 const struct rte_flow_item_flex_handle *handle,
2419 			 struct rte_flow_error *error);
2420 typedef int (*mlx5_flow_item_update_t)
2421 			(struct rte_eth_dev *dev,
2422 			 const struct rte_flow_item_flex_handle *handle,
2423 			 const struct rte_flow_item_flex_conf *conf,
2424 			 struct rte_flow_error *error);
2425 typedef int (*mlx5_flow_info_get_t)
2426 			(struct rte_eth_dev *dev,
2427 			 struct rte_flow_port_info *port_info,
2428 			 struct rte_flow_queue_info *queue_info,
2429 			 struct rte_flow_error *error);
2430 typedef int (*mlx5_flow_port_configure_t)
2431 			(struct rte_eth_dev *dev,
2432 			 const struct rte_flow_port_attr *port_attr,
2433 			 uint16_t nb_queue,
2434 			 const struct rte_flow_queue_attr *queue_attr[],
2435 			 struct rte_flow_error *err);
2436 typedef int (*mlx5_flow_pattern_validate_t)
2437 			(struct rte_eth_dev *dev,
2438 			 const struct rte_flow_pattern_template_attr *attr,
2439 			 const struct rte_flow_item items[],
2440 			 uint64_t *item_flags,
2441 			 struct rte_flow_error *error);
2442 typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
2443 			(struct rte_eth_dev *dev,
2444 			 const struct rte_flow_pattern_template_attr *attr,
2445 			 const struct rte_flow_item items[],
2446 			 struct rte_flow_error *error);
2447 typedef int (*mlx5_flow_pattern_template_destroy_t)
2448 			(struct rte_eth_dev *dev,
2449 			 struct rte_flow_pattern_template *template,
2450 			 struct rte_flow_error *error);
2451 typedef int (*mlx5_flow_actions_validate_t)
2452 			(struct rte_eth_dev *dev,
2453 			 const struct rte_flow_actions_template_attr *attr,
2454 			 const struct rte_flow_action actions[],
2455 			 const struct rte_flow_action masks[],
2456 			 struct rte_flow_error *error);
2457 typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
2458 			(struct rte_eth_dev *dev,
2459 			 const struct rte_flow_actions_template_attr *attr,
2460 			 const struct rte_flow_action actions[],
2461 			 const struct rte_flow_action masks[],
2462 			 struct rte_flow_error *error);
2463 typedef int (*mlx5_flow_actions_template_destroy_t)
2464 			(struct rte_eth_dev *dev,
2465 			 struct rte_flow_actions_template *template,
2466 			 struct rte_flow_error *error);
2467 typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
2468 		(struct rte_eth_dev *dev,
2469 		 const struct rte_flow_template_table_attr *attr,
2470 		 struct rte_flow_pattern_template *item_templates[],
2471 		 uint8_t nb_item_templates,
2472 		 struct rte_flow_actions_template *action_templates[],
2473 		 uint8_t nb_action_templates,
2474 		 struct rte_flow_error *error);
2475 typedef int (*mlx5_flow_table_destroy_t)
2476 			(struct rte_eth_dev *dev,
2477 			 struct rte_flow_template_table *table,
2478 			 struct rte_flow_error *error);
2479 typedef int (*mlx5_flow_group_set_miss_actions_t)
2480 			(struct rte_eth_dev *dev,
2481 			 uint32_t group_id,
2482 			 const struct rte_flow_group_attr *attr,
2483 			 const struct rte_flow_action actions[],
2484 			 struct rte_flow_error *error);
2485 typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
2486 			(struct rte_eth_dev *dev,
2487 			 uint32_t queue,
2488 			 const struct rte_flow_op_attr *attr,
2489 			 struct rte_flow_template_table *table,
2490 			 const struct rte_flow_item items[],
2491 			 uint8_t pattern_template_index,
2492 			 const struct rte_flow_action actions[],
2493 			 uint8_t action_template_index,
2494 			 void *user_data,
2495 			 struct rte_flow_error *error);
2496 typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t)
2497 			(struct rte_eth_dev *dev,
2498 			 uint32_t queue,
2499 			 const struct rte_flow_op_attr *attr,
2500 			 struct rte_flow_template_table *table,
2501 			 uint32_t rule_index,
2502 			 const struct rte_flow_action actions[],
2503 			 uint8_t action_template_index,
2504 			 void *user_data,
2505 			 struct rte_flow_error *error);
2506 typedef int (*mlx5_flow_async_flow_update_t)
2507 			(struct rte_eth_dev *dev,
2508 			 uint32_t queue,
2509 			 const struct rte_flow_op_attr *attr,
2510 			 struct rte_flow *flow,
2511 			 const struct rte_flow_action actions[],
2512 			 uint8_t action_template_index,
2513 			 void *user_data,
2514 			 struct rte_flow_error *error);
2515 typedef int (*mlx5_flow_async_flow_destroy_t)
2516 			(struct rte_eth_dev *dev,
2517 			 uint32_t queue,
2518 			 const struct rte_flow_op_attr *attr,
2519 			 struct rte_flow *flow,
2520 			 void *user_data,
2521 			 struct rte_flow_error *error);
2522 typedef int (*mlx5_flow_pull_t)
2523 			(struct rte_eth_dev *dev,
2524 			 uint32_t queue,
2525 			 struct rte_flow_op_result res[],
2526 			 uint16_t n_res,
2527 			 struct rte_flow_error *error);
2528 typedef int (*mlx5_flow_push_t)
2529 			(struct rte_eth_dev *dev,
2530 			 uint32_t queue,
2531 			 struct rte_flow_error *error);
2532 
2533 typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)
2534 			(struct rte_eth_dev *dev,
2535 			 uint32_t queue,
2536 			 const struct rte_flow_op_attr *attr,
2537 			 const struct rte_flow_indir_action_conf *conf,
2538 			 const struct rte_flow_action *action,
2539 			 void *user_data,
2540 			 struct rte_flow_error *error);
2541 
2542 typedef int (*mlx5_flow_async_action_handle_update_t)
2543 			(struct rte_eth_dev *dev,
2544 			 uint32_t queue,
2545 			 const struct rte_flow_op_attr *attr,
2546 			 struct rte_flow_action_handle *handle,
2547 			 const void *update,
2548 			 void *user_data,
2549 			 struct rte_flow_error *error);
2550 typedef int (*mlx5_flow_async_action_handle_query_update_t)
2551 			(struct rte_eth_dev *dev, uint32_t queue_id,
2552 			 const struct rte_flow_op_attr *op_attr,
2553 			 struct rte_flow_action_handle *action_handle,
2554 			 const void *update, void *data,
2555 			 enum rte_flow_query_update_mode qu_mode,
2556 			 void *user_data, struct rte_flow_error *error);
2557 typedef int (*mlx5_flow_async_action_handle_query_t)
2558 			(struct rte_eth_dev *dev,
2559 			 uint32_t queue,
2560 			 const struct rte_flow_op_attr *attr,
2561 			 const struct rte_flow_action_handle *handle,
2562 			 void *data,
2563 			 void *user_data,
2564 			 struct rte_flow_error *error);
2565 
2566 typedef int (*mlx5_flow_async_action_handle_destroy_t)
2567 			(struct rte_eth_dev *dev,
2568 			 uint32_t queue,
2569 			 const struct rte_flow_op_attr *attr,
2570 			 struct rte_flow_action_handle *handle,
2571 			 void *user_data,
2572 			 struct rte_flow_error *error);
2573 typedef struct rte_flow_action_list_handle *
2574 (*mlx5_flow_async_action_list_handle_create_t)
2575 			(struct rte_eth_dev *dev, uint32_t queue_id,
2576 			 const struct rte_flow_op_attr *attr,
2577 			 const struct rte_flow_indir_action_conf *conf,
2578 			 const struct rte_flow_action *actions,
2579 			 void *user_data, struct rte_flow_error *error);
2580 typedef int
2581 (*mlx5_flow_async_action_list_handle_destroy_t)
2582 			(struct rte_eth_dev *dev, uint32_t queue_id,
2583 			 const struct rte_flow_op_attr *op_attr,
2584 			 struct rte_flow_action_list_handle *action_handle,
2585 			 void *user_data, struct rte_flow_error *error);
2586 typedef int
2587 (*mlx5_flow_action_list_handle_query_update_t)
2588 			(struct rte_eth_dev *dev,
2589 			const struct rte_flow_action_list_handle *handle,
2590 			const void **update, void **query,
2591 			enum rte_flow_query_update_mode mode,
2592 			struct rte_flow_error *error);
2593 typedef int
2594 (*mlx5_flow_async_action_list_handle_query_update_t)
2595 			(struct rte_eth_dev *dev, uint32_t queue_id,
2596 			const struct rte_flow_op_attr *attr,
2597 			const struct rte_flow_action_list_handle *handle,
2598 			const void **update, void **query,
2599 			enum rte_flow_query_update_mode mode,
2600 			void *user_data, struct rte_flow_error *error);
2601 typedef int
2602 (*mlx5_flow_calc_table_hash_t)
2603 			(struct rte_eth_dev *dev,
2604 			 const struct rte_flow_template_table *table,
2605 			 const struct rte_flow_item pattern[],
2606 			 uint8_t pattern_template_index,
2607 			 uint32_t *hash, struct rte_flow_error *error);
2608 typedef int
2609 (*mlx5_flow_calc_encap_hash_t)
2610 			(struct rte_eth_dev *dev,
2611 			 const struct rte_flow_item pattern[],
2612 			 enum rte_flow_encap_hash_field dest_field,
2613 			 uint8_t *hash,
2614 			 struct rte_flow_error *error);
2615 typedef int (*mlx5_table_resize_t)(struct rte_eth_dev *dev,
2616 				   struct rte_flow_template_table *table,
2617 				   uint32_t nb_rules, struct rte_flow_error *error);
2618 typedef int (*mlx5_flow_update_resized_t)
2619 			(struct rte_eth_dev *dev, uint32_t queue,
2620 			 const struct rte_flow_op_attr *attr,
2621 			 struct rte_flow *rule, void *user_data,
2622 			 struct rte_flow_error *error);
2623 typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev,
2624 				       struct rte_flow_template_table *table,
2625 				       struct rte_flow_error *error);
2626 
2627 struct mlx5_flow_driver_ops {
2628 	mlx5_flow_list_create_t list_create;
2629 	mlx5_flow_list_destroy_t list_destroy;
2630 	mlx5_flow_validate_t validate;
2631 	mlx5_flow_prepare_t prepare;
2632 	mlx5_flow_translate_t translate;
2633 	mlx5_flow_apply_t apply;
2634 	mlx5_flow_remove_t remove;
2635 	mlx5_flow_destroy_t destroy;
2636 	mlx5_flow_query_t query;
2637 	mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
2638 	mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
2639 	mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls;
2640 	mlx5_flow_mtr_alloc_t create_meter;
2641 	mlx5_flow_mtr_free_t free_meter;
2642 	mlx5_flow_validate_mtr_acts_t validate_mtr_acts;
2643 	mlx5_flow_create_mtr_acts_t create_mtr_acts;
2644 	mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts;
2645 	mlx5_flow_create_policy_rules_t create_policy_rules;
2646 	mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
2647 	mlx5_flow_create_def_policy_t create_def_policy;
2648 	mlx5_flow_destroy_def_policy_t destroy_def_policy;
2649 	mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
2650 	mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create;
2651 	mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
2652 	mlx5_flow_counter_alloc_t counter_alloc;
2653 	mlx5_flow_counter_free_t counter_free;
2654 	mlx5_flow_counter_query_t counter_query;
2655 	mlx5_flow_get_aged_flows_t get_aged_flows;
2656 	mlx5_flow_get_q_aged_flows_t get_q_aged_flows;
2657 	mlx5_flow_action_validate_t action_validate;
2658 	mlx5_flow_action_create_t action_create;
2659 	mlx5_flow_action_destroy_t action_destroy;
2660 	mlx5_flow_action_update_t action_update;
2661 	mlx5_flow_action_query_t action_query;
2662 	mlx5_flow_action_query_update_t action_query_update;
2663 	mlx5_flow_action_list_handle_create_t action_list_handle_create;
2664 	mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
2665 	mlx5_flow_sync_domain_t sync_domain;
2666 	mlx5_flow_discover_priorities_t discover_priorities;
2667 	mlx5_flow_item_create_t item_create;
2668 	mlx5_flow_item_release_t item_release;
2669 	mlx5_flow_item_update_t item_update;
2670 	mlx5_flow_info_get_t info_get;
2671 	mlx5_flow_port_configure_t configure;
2672 	mlx5_flow_pattern_validate_t pattern_validate;
2673 	mlx5_flow_pattern_template_create_t pattern_template_create;
2674 	mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
2675 	mlx5_flow_actions_validate_t actions_validate;
2676 	mlx5_flow_actions_template_create_t actions_template_create;
2677 	mlx5_flow_actions_template_destroy_t actions_template_destroy;
2678 	mlx5_flow_table_create_t template_table_create;
2679 	mlx5_flow_table_destroy_t template_table_destroy;
2680 	mlx5_flow_group_set_miss_actions_t group_set_miss_actions;
2681 	mlx5_flow_async_flow_create_t async_flow_create;
2682 	mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;
2683 	mlx5_flow_async_flow_update_t async_flow_update;
2684 	mlx5_flow_async_flow_destroy_t async_flow_destroy;
2685 	mlx5_flow_pull_t pull;
2686 	mlx5_flow_push_t push;
2687 	mlx5_flow_async_action_handle_create_t async_action_create;
2688 	mlx5_flow_async_action_handle_update_t async_action_update;
2689 	mlx5_flow_async_action_handle_query_update_t async_action_query_update;
2690 	mlx5_flow_async_action_handle_query_t async_action_query;
2691 	mlx5_flow_async_action_handle_destroy_t async_action_destroy;
2692 	mlx5_flow_async_action_list_handle_create_t
2693 		async_action_list_handle_create;
2694 	mlx5_flow_async_action_list_handle_destroy_t
2695 		async_action_list_handle_destroy;
2696 	mlx5_flow_action_list_handle_query_update_t
2697 		action_list_handle_query_update;
2698 	mlx5_flow_async_action_list_handle_query_update_t
2699 		async_action_list_handle_query_update;
2700 	mlx5_flow_calc_table_hash_t flow_calc_table_hash;
2701 	mlx5_flow_calc_encap_hash_t flow_calc_encap_hash;
2702 	mlx5_table_resize_t table_resize;
2703 	mlx5_flow_update_resized_t flow_update_resized;
2704 	table_resize_complete_t table_resize_complete;
2705 };
2706 
2707 /* mlx5_flow.c */
2708 
2709 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
2710 void mlx5_flow_pop_thread_workspace(void);
2711 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
2712 
2713 __extension__
2714 struct flow_grp_info {
2715 	uint64_t external:1;
2716 	uint64_t transfer:1;
2717 	uint64_t fdb_def_rule:1;
2718 	/* force standard group translation */
2719 	uint64_t std_tbl_fix:1;
2720 	uint64_t skip_scale:2;
2721 };
2722 
2723 static inline bool
2724 tunnel_use_standard_attr_group_translate
2725 		    (const struct rte_eth_dev *dev,
2726 		     const struct rte_flow_attr *attr,
2727 		     const struct mlx5_flow_tunnel *tunnel,
2728 		     enum mlx5_tof_rule_type tof_rule_type)
2729 {
2730 	bool verdict;
2731 
2732 	if (!is_tunnel_offload_active(dev))
2733 		/* no tunnel offload API */
2734 		verdict = true;
2735 	else if (tunnel) {
2736 		/*
2737 		 * OvS will use jump to group 0 in tunnel steer rule.
2738 		 * If tunnel steer rule starts from group 0 (attr.group == 0)
2739 		 * that 0 group must be translated with standard method.
2740 		 * attr.group == 0 in tunnel match rule translated with tunnel
2741 		 * method
2742 		 */
2743 		verdict = !attr->group &&
2744 			  is_flow_tunnel_steer_rule(tof_rule_type);
2745 	} else {
2746 		/*
2747 		 * non-tunnel group translation uses standard method for
2748 		 * root group only: attr.group == 0
2749 		 */
2750 		verdict = !attr->group;
2751 	}
2752 
2753 	return verdict;
2754 }
2755 
2756 /**
2757  * Get DV flow aso meter by index.
2758  *
2759  * @param[in] dev
2760  *   Pointer to the Ethernet device structure.
2761  * @param[in] idx
2762  *   mlx5 flow aso meter index in the container.
2763  * @param[out] ppool
2764  *   mlx5 flow aso meter pool in the container,
2765  *
2766  * @return
2767  *   Pointer to the aso meter, NULL otherwise.
2768  */
2769 static inline struct mlx5_aso_mtr *
2770 mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
2771 {
2772 	struct mlx5_aso_mtr_pool *pool;
2773 	struct mlx5_aso_mtr_pools_mng *pools_mng =
2774 				&priv->sh->mtrmng->pools_mng;
2775 
2776 	if (priv->mtr_bulk.aso)
2777 		return priv->mtr_bulk.aso + idx;
2778 	/* Decrease to original index. */
2779 	idx--;
2780 	MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
2781 	rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
2782 	pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
2783 	rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
2784 	return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
2785 }
2786 
2787 static __rte_always_inline const struct rte_flow_item *
2788 mlx5_find_end_item(const struct rte_flow_item *item)
2789 {
2790 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
2791 	return item;
2792 }
2793 
2794 static __rte_always_inline bool
2795 mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item)
2796 {
2797 	struct rte_flow_item_integrity test = *item;
2798 	test.l3_ok = 0;
2799 	test.l4_ok = 0;
2800 	test.ipv4_csum_ok = 0;
2801 	test.l4_csum_ok = 0;
2802 	return (test.value == 0);
2803 }
2804 
2805 /*
2806  * Get ASO CT action by device and index.
2807  *
2808  * @param[in] dev
2809  *   Pointer to the Ethernet device structure.
2810  * @param[in] idx
2811  *   Index to the ASO CT action.
2812  *
2813  * @return
2814  *   The specified ASO CT action pointer.
2815  */
2816 static inline struct mlx5_aso_ct_action *
2817 flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx)
2818 {
2819 	struct mlx5_priv *priv = dev->data->dev_private;
2820 	struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
2821 	struct mlx5_aso_ct_pool *pool;
2822 
2823 	idx--;
2824 	MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n);
2825 	/* Bit operation AND could be used. */
2826 	rte_rwlock_read_lock(&mng->resize_rwl);
2827 	pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL];
2828 	rte_rwlock_read_unlock(&mng->resize_rwl);
2829 	return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL];
2830 }
2831 
2832 /*
2833  * Get ASO CT action by owner & index.
2834  *
2835  * @param[in] dev
2836  *   Pointer to the Ethernet device structure.
2837  * @param[in] idx
2838  *   Index to the ASO CT action and owner port combination.
2839  *
2840  * @return
2841  *   The specified ASO CT action pointer.
2842  */
2843 static inline struct mlx5_aso_ct_action *
2844 flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
2845 {
2846 	struct mlx5_priv *priv = dev->data->dev_private;
2847 	struct mlx5_aso_ct_action *ct;
2848 	uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
2849 	uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
2850 
2851 	if (owner == PORT_ID(priv)) {
2852 		ct = flow_aso_ct_get_by_dev_idx(dev, idx);
2853 	} else {
2854 		struct rte_eth_dev *owndev = &rte_eth_devices[owner];
2855 
2856 		MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
2857 		if (dev->data->dev_started != 1)
2858 			return NULL;
2859 		ct = flow_aso_ct_get_by_dev_idx(owndev, idx);
2860 		if (ct->peer != PORT_ID(priv))
2861 			return NULL;
2862 	}
2863 	return ct;
2864 }
2865 
2866 static inline uint16_t
2867 mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
2868 {
2869 	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
2870 		return RTE_ETHER_TYPE_TEB;
2871 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2872 		return RTE_ETHER_TYPE_IPV4;
2873 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2874 		return RTE_ETHER_TYPE_IPV6;
2875 	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
2876 		return RTE_ETHER_TYPE_MPLS;
2877 	return 0;
2878 }
2879 
2880 int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
2881 			 struct rte_flow_error *error);
2882 
2883 /*
2884  * Convert rte_mtr_color to mlx5 color.
2885  *
2886  * @param[in] rcol
2887  *   rte_mtr_color.
2888  *
2889  * @return
2890  *   mlx5 color.
2891  */
2892 static inline int
2893 rte_col_2_mlx5_col(enum rte_color rcol)
2894 {
2895 	switch (rcol) {
2896 	case RTE_COLOR_GREEN:
2897 		return MLX5_FLOW_COLOR_GREEN;
2898 	case RTE_COLOR_YELLOW:
2899 		return MLX5_FLOW_COLOR_YELLOW;
2900 	case RTE_COLOR_RED:
2901 		return MLX5_FLOW_COLOR_RED;
2902 	default:
2903 		break;
2904 	}
2905 	return MLX5_FLOW_COLOR_UNDEFINED;
2906 }
2907 
2908 /**
2909  * Indicates whether flow source vport is representor port.
2910  *
2911  * @param[in] priv
2912  *   Pointer to device private context structure.
2913  * @param[in] act_priv
2914  *   Pointer to actual device private context structure if have.
2915  *
2916  * @return
2917  *   True when the flow source vport is representor port, false otherwise.
2918  */
2919 static inline bool
2920 flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv)
2921 {
2922 	MLX5_ASSERT(priv);
2923 	return (!act_priv ? (priv->representor_id != UINT16_MAX) :
2924 		 (act_priv->representor_id != UINT16_MAX));
2925 }
2926 
2927 /* All types of Ethernet patterns used in control flow rules. */
2928 enum mlx5_flow_ctrl_rx_eth_pattern_type {
2929 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL = 0,
2930 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST,
2931 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST,
2932 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN,
2933 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST,
2934 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN,
2935 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST,
2936 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN,
2937 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
2938 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
2939 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX,
2940 };
2941 
2942 /* All types of RSS actions used in control flow rules. */
2943 enum mlx5_flow_ctrl_rx_expanded_rss_type {
2944 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP = 0,
2945 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4,
2946 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP,
2947 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP,
2948 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6,
2949 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP,
2950 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP,
2951 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX,
2952 };
2953 
2954 /**
2955  * Contains pattern template, template table and its attributes for a single
2956  * combination of Ethernet pattern and RSS action. Used to create control flow rules
2957  * with HWS.
2958  */
2959 struct mlx5_flow_hw_ctrl_rx_table {
2960 	struct rte_flow_template_table_attr attr;
2961 	struct rte_flow_pattern_template *pt;
2962 	struct rte_flow_template_table *tbl;
2963 };
2964 
2965 /* Contains all templates required to create control flow rules with HWS. */
2966 struct mlx5_flow_hw_ctrl_rx {
2967 	struct rte_flow_actions_template *rss[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2968 	struct mlx5_flow_hw_ctrl_rx_table tables[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX]
2969 						[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2970 };
2971 
2972 /* Contains all templates required for control flow rules in FDB with HWS. */
2973 struct mlx5_flow_hw_ctrl_fdb {
2974 	struct rte_flow_pattern_template *esw_mgr_items_tmpl;
2975 	struct rte_flow_actions_template *regc_jump_actions_tmpl;
2976 	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
2977 	struct rte_flow_pattern_template *regc_sq_items_tmpl;
2978 	struct rte_flow_actions_template *port_actions_tmpl;
2979 	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
2980 	struct rte_flow_pattern_template *port_items_tmpl;
2981 	struct rte_flow_actions_template *jump_one_actions_tmpl;
2982 	struct rte_flow_template_table *hw_esw_zero_tbl;
2983 	struct rte_flow_pattern_template *tx_meta_items_tmpl;
2984 	struct rte_flow_actions_template *tx_meta_actions_tmpl;
2985 	struct rte_flow_template_table *hw_tx_meta_cpy_tbl;
2986 	struct rte_flow_pattern_template *lacp_rx_items_tmpl;
2987 	struct rte_flow_actions_template *lacp_rx_actions_tmpl;
2988 	struct rte_flow_template_table *hw_lacp_rx_tbl;
2989 };
2990 
2991 #define MLX5_CTRL_PROMISCUOUS    (RTE_BIT32(0))
2992 #define MLX5_CTRL_ALL_MULTICAST  (RTE_BIT32(1))
2993 #define MLX5_CTRL_BROADCAST      (RTE_BIT32(2))
2994 #define MLX5_CTRL_IPV4_MULTICAST (RTE_BIT32(3))
2995 #define MLX5_CTRL_IPV6_MULTICAST (RTE_BIT32(4))
2996 #define MLX5_CTRL_DMAC           (RTE_BIT32(5))
2997 #define MLX5_CTRL_VLAN_FILTER    (RTE_BIT32(6))
2998 
2999 int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
3000 
3001 /** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
3002 int mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
3003 
3004 /** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
3005 int mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
3006 
3007 /** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
3008 int mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev,
3009 				      const struct rte_ether_addr *addr,
3010 				      const uint16_t vid);
3011 
3012 /** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
3013 int mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev,
3014 				      const struct rte_ether_addr *addr,
3015 				      const uint16_t vid);
3016 
3017 /** Destroy a control flow rule registered on port level control flow rule type. */
3018 void mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry);
3019 
3020 /** Create a control flow rule for matching unicast DMAC (HWS). */
3021 int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
3022 
3023 /** Destroy a control flow rule for matching unicast DMAC (HWS). */
3024 int mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
3025 
3026 /** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
3027 int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
3028 				     const struct rte_ether_addr *addr,
3029 				     const uint16_t vlan);
3030 
3031 /** Destroy a control flow rule for matching unicast DMAC with VLAN (HWS). */
3032 int mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
3033 					     const struct rte_ether_addr *addr,
3034 					     const uint16_t vlan);
3035 
3036 void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
3037 
3038 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
3039 			     const struct mlx5_flow_tunnel *tunnel,
3040 			     uint32_t group, uint32_t *table,
3041 			     const struct flow_grp_info *flags,
3042 			     struct rte_flow_error *error);
3043 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
3044 				     int tunnel, uint64_t layer_types,
3045 				     uint64_t hash_fields);
3046 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
3047 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
3048 				   uint32_t subpriority);
3049 uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
3050 					const struct rte_flow_attr *attr);
3051 uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
3052 				   const struct rte_flow_attr *attr,
3053 				   uint32_t subpriority, bool external);
3054 uint32_t mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev);
3055 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
3056 				     enum mlx5_feature_name feature,
3057 				     uint32_t id,
3058 				     struct rte_flow_error *error);
3059 const struct rte_flow_action *mlx5_flow_find_action
3060 					(const struct rte_flow_action *actions,
3061 					 enum rte_flow_action_type action);
3062 int mlx5_validate_action_rss(struct rte_eth_dev *dev,
3063 			     const struct rte_flow_action *action,
3064 			     struct rte_flow_error *error);
3065 
3066 struct mlx5_hw_encap_decap_action*
3067 mlx5_reformat_action_create(struct rte_eth_dev *dev,
3068 			    const struct rte_flow_indir_action_conf *conf,
3069 			    const struct rte_flow_action *encap_action,
3070 			    const struct rte_flow_action *decap_action,
3071 			    struct rte_flow_error *error);
3072 int mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
3073 				 struct rte_flow_action_list_handle *handle,
3074 				 struct rte_flow_error *error);
3075 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
3076 				    const struct rte_flow_attr *attr,
3077 				    struct rte_flow_error *error);
3078 int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev,
3079 				   bool is_root,
3080 				   const struct rte_flow_attr *attr,
3081 				   struct rte_flow_error *error);
3082 int mlx5_flow_validate_action_flag(uint64_t action_flags,
3083 				   const struct rte_flow_attr *attr,
3084 				   struct rte_flow_error *error);
3085 int mlx5_flow_validate_action_mark(struct rte_eth_dev *dev,
3086 				   const struct rte_flow_action *action,
3087 				   uint64_t action_flags,
3088 				   const struct rte_flow_attr *attr,
3089 				   struct rte_flow_error *error);
3090 int mlx5_flow_validate_target_queue(struct rte_eth_dev *dev,
3091 				    const struct rte_flow_action *action,
3092 				    struct rte_flow_error *error);
3093 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
3094 				    uint64_t action_flags,
3095 				    struct rte_eth_dev *dev,
3096 				    const struct rte_flow_attr *attr,
3097 				    struct rte_flow_error *error);
3098 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
3099 				  uint64_t action_flags,
3100 				  struct rte_eth_dev *dev,
3101 				  const struct rte_flow_attr *attr,
3102 				  uint64_t item_flags,
3103 				  struct rte_flow_error *error);
3104 int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
3105 				const struct rte_flow_attr *attr,
3106 				struct rte_flow_error *error);
3107 int flow_validate_modify_field_level
3108 			(const struct rte_flow_field_data *data,
3109 			 struct rte_flow_error *error);
3110 int
3111 mlx5_flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3112 				      uint64_t action_flags,
3113 				      const struct rte_flow_action *action,
3114 				      const struct rte_flow_attr *attr,
3115 				      struct rte_flow_error *error);
3116 int
3117 mlx5_flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3118 				   uint64_t action_flags,
3119 				   const struct rte_flow_action *action,
3120 				   const uint64_t item_flags,
3121 				   const struct rte_flow_attr *attr,
3122 				   struct rte_flow_error *error);
3123 int
3124 mlx5_flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3125 				    uint64_t action_flags,
3126 				    uint64_t item_flags,
3127 				    bool root,
3128 				    struct rte_flow_error *error);
3129 int
3130 mlx5_flow_dv_validate_action_raw_encap_decap
3131 	(struct rte_eth_dev *dev,
3132 	 const struct rte_flow_action_raw_decap *decap,
3133 	 const struct rte_flow_action_raw_encap *encap,
3134 	 const struct rte_flow_attr *attr, uint64_t *action_flags,
3135 	 int *actions_n, const struct rte_flow_action *action,
3136 	 uint64_t item_flags, struct rte_flow_error *error);
3137 int mlx5_flow_item_acceptable(const struct rte_eth_dev *dev,
3138 			      const struct rte_flow_item *item,
3139 			      const uint8_t *mask,
3140 			      const uint8_t *nic_mask,
3141 			      unsigned int size,
3142 			      bool range_accepted,
3143 			      struct rte_flow_error *error);
3144 int mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev,
3145 				const struct rte_flow_item *item,
3146 				uint64_t item_flags, bool ext_vlan_sup,
3147 				struct rte_flow_error *error);
3148 int
3149 mlx5_flow_dv_validate_item_vlan(const struct rte_flow_item *item,
3150 				uint64_t item_flags,
3151 				struct rte_eth_dev *dev,
3152 				struct rte_flow_error *error);
3153 int
3154 mlx5_flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
3155 				const struct rte_flow_item *item,
3156 				uint64_t item_flags,
3157 				uint64_t last_item,
3158 				uint16_t ether_type,
3159 				const struct rte_flow_item_ipv4 *acc_mask,
3160 				struct rte_flow_error *error);
3161 int
3162 mlx5_flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
3163 			       const struct rte_flow_item *item,
3164 			       uint64_t item_flags,
3165 			       struct rte_flow_error *error);
3166 int
3167 mlx5_flow_dv_validate_item_gtp_psc(const struct rte_eth_dev *dev,
3168 				   const struct rte_flow_item *item,
3169 				   uint64_t last_item,
3170 				   const struct rte_flow_item *gtp_item,
3171 				   bool root, struct rte_flow_error *error);
3172 int
3173 mlx5_flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
3174 				  const struct rte_flow_item *item,
3175 				  uint64_t *item_flags,
3176 				  struct rte_flow_error *error);
3177 int mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev,
3178 				const struct rte_flow_item *item,
3179 				uint64_t item_flags,
3180 				uint8_t target_protocol,
3181 				struct rte_flow_error *error);
3182 int mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev,
3183 				    const struct rte_flow_item *item,
3184 				    uint64_t item_flags,
3185 				    const struct rte_flow_item *gre_item,
3186 				    struct rte_flow_error *error);
3187 int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
3188 				       const struct rte_flow_item *item,
3189 				       uint64_t item_flags,
3190 				       const struct rte_flow_attr *attr,
3191 				       const struct rte_flow_item *gre_item,
3192 				       struct rte_flow_error *error);
3193 int mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev,
3194 				 const struct rte_flow_item *item,
3195 				 uint64_t item_flags,
3196 				 uint64_t last_item,
3197 				 uint16_t ether_type,
3198 				 const struct rte_flow_item_ipv4 *acc_mask,
3199 				 bool range_accepted,
3200 				 struct rte_flow_error *error);
3201 int mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
3202 				 const struct rte_flow_item *item,
3203 				 uint64_t item_flags,
3204 				 uint64_t last_item,
3205 				 uint16_t ether_type,
3206 				 const struct rte_flow_item_ipv6 *acc_mask,
3207 				 struct rte_flow_error *error);
3208 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
3209 				 const struct rte_flow_item *item,
3210 				 uint64_t item_flags,
3211 				 uint64_t prev_layer,
3212 				 struct rte_flow_error *error);
3213 int mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev,
3214 				const struct rte_flow_item *item,
3215 				uint64_t item_flags,
3216 				uint8_t target_protocol,
3217 				const struct rte_flow_item_tcp *flow_mask,
3218 				struct rte_flow_error *error);
3219 int mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev,
3220 				const struct rte_flow_item *item,
3221 				uint64_t item_flags,
3222 				uint8_t target_protocol,
3223 				struct rte_flow_error *error);
3224 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
3225 				 uint64_t item_flags,
3226 				 struct rte_eth_dev *dev,
3227 				 struct rte_flow_error *error);
3228 int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
3229 				  uint16_t udp_dport,
3230 				  const struct rte_flow_item *item,
3231 				  uint64_t item_flags,
3232 				  bool root,
3233 				  struct rte_flow_error *error);
3234 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
3235 				      uint64_t item_flags,
3236 				      struct rte_eth_dev *dev,
3237 				      struct rte_flow_error *error);
3238 int mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev,
3239 				 const struct rte_flow_item *item,
3240 				 uint64_t item_flags,
3241 				 uint8_t target_protocol,
3242 				 struct rte_flow_error *error);
3243 int mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev,
3244 				  const struct rte_flow_item *item,
3245 				  uint64_t item_flags,
3246 				  uint8_t target_protocol,
3247 				  struct rte_flow_error *error);
3248 int mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev,
3249 				       const struct rte_flow_item *item,
3250 				       uint64_t item_flags,
3251 				       uint8_t target_protocol,
3252 				       struct rte_flow_error *error);
3253 int mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev,
3254 				  const struct rte_flow_item *item,
3255 				  uint64_t item_flags,
3256 				  uint8_t target_protocol,
3257 				  struct rte_flow_error *error);
3258 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
3259 				   uint64_t item_flags,
3260 				   struct rte_eth_dev *dev,
3261 				   struct rte_flow_error *error);
3262 int mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
3263 				   uint64_t last_item,
3264 				   const struct rte_flow_item *geneve_item,
3265 				   struct rte_eth_dev *dev,
3266 				   struct rte_flow_error *error);
3267 int mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev,
3268 				  const struct rte_flow_item *item,
3269 				  uint64_t item_flags,
3270 				  uint64_t last_item,
3271 				  uint16_t ether_type,
3272 				  const struct rte_flow_item_ecpri *acc_mask,
3273 				  struct rte_flow_error *error);
3274 int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
3275 				const struct rte_flow_item *item,
3276 				struct rte_flow_error *error);
3277 int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
3278 			      struct mlx5_flow_meter_info *fm,
3279 			      uint32_t mtr_idx,
3280 			      uint8_t domain_bitmap);
3281 void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
3282 			       struct mlx5_flow_meter_info *fm);
3283 void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
3284 struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
3285 		(struct rte_eth_dev *dev,
3286 		struct mlx5_flow_meter_policy *mtr_policy,
3287 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
3288 void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
3289 		struct mlx5_flow_meter_policy *mtr_policy);
3290 int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
3291 int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
3292 int mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev);
3293 int mlx5_action_handle_attach(struct rte_eth_dev *dev);
3294 int mlx5_action_handle_detach(struct rte_eth_dev *dev);
3295 int mlx5_action_handle_flush(struct rte_eth_dev *dev);
3296 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
3297 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
3298 
3299 struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx);
3300 int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3301 			 void *cb_ctx);
3302 void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3303 struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx,
3304 					     struct mlx5_list_entry *oentry,
3305 					     void *entry_ctx);
3306 void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3307 struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3308 		uint32_t table_level, uint8_t egress, uint8_t transfer,
3309 		bool external, const struct mlx5_flow_tunnel *tunnel,
3310 		uint32_t group_id, uint8_t dummy,
3311 		uint32_t table_id, struct rte_flow_error *error);
3312 int flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
3313 				 struct mlx5_flow_tbl_resource *tbl);
3314 
3315 struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx);
3316 int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3317 			 void *cb_ctx);
3318 void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3319 struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx,
3320 					     struct mlx5_list_entry *oentry,
3321 					     void *cb_ctx);
3322 void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3323 
3324 int flow_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3325 			    void *cb_ctx);
3326 struct mlx5_list_entry *flow_modify_create_cb(void *tool_ctx, void *ctx);
3327 void flow_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3328 struct mlx5_list_entry *flow_modify_clone_cb(void *tool_ctx,
3329 						struct mlx5_list_entry *oentry,
3330 						void *ctx);
3331 void flow_modify_clone_free_cb(void *tool_ctx,
3332 				  struct mlx5_list_entry *entry);
3333 
3334 struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx);
3335 int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3336 			  void *cb_ctx);
3337 void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3338 struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx,
3339 					      struct mlx5_list_entry *entry,
3340 					      void *ctx);
3341 void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3342 
3343 int flow_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3344 				 void *cb_ctx);
3345 struct mlx5_list_entry *flow_encap_decap_create_cb(void *tool_ctx,
3346 						      void *cb_ctx);
3347 void flow_encap_decap_remove_cb(void *tool_ctx,
3348 				   struct mlx5_list_entry *entry);
3349 struct mlx5_list_entry *flow_encap_decap_clone_cb(void *tool_ctx,
3350 						  struct mlx5_list_entry *entry,
3351 						  void *cb_ctx);
3352 void flow_encap_decap_clone_free_cb(void *tool_ctx,
3353 				       struct mlx5_list_entry *entry);
3354 int __flow_encap_decap_resource_register
3355 			(struct rte_eth_dev *dev,
3356 			 struct mlx5_flow_dv_encap_decap_resource *resource,
3357 			 bool is_root,
3358 			 struct mlx5_flow_dv_encap_decap_resource **encap_decap,
3359 			 struct rte_flow_error *error);
3360 int __flow_modify_hdr_resource_register
3361 			(struct rte_eth_dev *dev,
3362 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
3363 			 struct mlx5_flow_dv_modify_hdr_resource **modify,
3364 			 struct rte_flow_error *error);
3365 int flow_encap_decap_resource_release(struct rte_eth_dev *dev,
3366 				     uint32_t encap_decap_idx);
3367 int flow_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3368 			     void *ctx);
3369 struct mlx5_list_entry *flow_matcher_create_cb(void *tool_ctx, void *ctx);
3370 void flow_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3371 struct mlx5_list_entry *flow_matcher_clone_cb(void *tool_ctx __rte_unused,
3372 			 struct mlx5_list_entry *entry, void *cb_ctx);
3373 void flow_matcher_clone_free_cb(void *tool_ctx __rte_unused,
3374 			     struct mlx5_list_entry *entry);
3375 int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3376 			     void *cb_ctx);
3377 struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
3378 void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3379 struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx,
3380 				struct mlx5_list_entry *entry, void *cb_ctx);
3381 void flow_dv_port_id_clone_free_cb(void *tool_ctx,
3382 				   struct mlx5_list_entry *entry);
3383 
3384 int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3385 			       void *cb_ctx);
3386 struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx,
3387 						    void *cb_ctx);
3388 void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3389 struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx,
3390 				 struct mlx5_list_entry *entry, void *cb_ctx);
3391 void flow_dv_push_vlan_clone_free_cb(void *tool_ctx,
3392 				     struct mlx5_list_entry *entry);
3393 
3394 int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3395 			    void *cb_ctx);
3396 struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx);
3397 void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3398 struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx,
3399 				 struct mlx5_list_entry *entry, void *cb_ctx);
3400 void flow_dv_sample_clone_free_cb(void *tool_ctx,
3401 				  struct mlx5_list_entry *entry);
3402 
3403 int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3404 				void *cb_ctx);
3405 struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx,
3406 						     void *cb_ctx);
3407 void flow_dv_dest_array_remove_cb(void *tool_ctx,
3408 				  struct mlx5_list_entry *entry);
3409 struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
3410 				   struct mlx5_list_entry *entry, void *cb_ctx);
3411 void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
3412 				      struct mlx5_list_entry *entry);
3413 void flow_dv_hashfields_set(uint64_t item_flags,
3414 			    struct mlx5_flow_rss_desc *rss_desc,
3415 			    uint64_t *hash_fields);
3416 void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
3417 					uint64_t *hash_field);
3418 uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
3419 					const uint64_t hash_fields);
3420 int flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3421 		     const struct rte_flow_item items[],
3422 		     const struct rte_flow_action actions[],
3423 		     bool external, int hairpin, struct rte_flow_error *error);
3424 
3425 struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
3426 void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3427 int flow_hw_grp_match_cb(void *tool_ctx,
3428 			 struct mlx5_list_entry *entry,
3429 			 void *cb_ctx);
3430 struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
3431 					     struct mlx5_list_entry *oentry,
3432 					     void *cb_ctx);
3433 void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3434 
3435 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
3436 						    uint32_t age_idx);
3437 
3438 void flow_release_workspace(void *data);
3439 int mlx5_flow_os_init_workspace_once(void);
3440 void *mlx5_flow_os_get_specific_workspace(void);
3441 int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);
3442 void mlx5_flow_os_release_workspace(void);
3443 uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev);
3444 void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx);
3445 int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
3446 			const struct rte_flow_action *actions[RTE_COLORS],
3447 			struct rte_flow_attr *attr,
3448 			bool *is_rss,
3449 			uint8_t *domain_bitmap,
3450 			uint8_t *policy_mode,
3451 			struct rte_mtr_error *error);
3452 void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
3453 		      struct mlx5_flow_meter_policy *mtr_policy);
3454 int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
3455 		      struct mlx5_flow_meter_policy *mtr_policy,
3456 		      const struct rte_flow_action *actions[RTE_COLORS],
3457 		      struct rte_flow_attr *attr,
3458 		      struct rte_mtr_error *error);
3459 int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
3460 			     struct mlx5_flow_meter_policy *mtr_policy);
3461 void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
3462 			     struct mlx5_flow_meter_policy *mtr_policy);
3463 int mlx5_flow_create_def_policy(struct rte_eth_dev *dev);
3464 void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
3465 void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
3466 		       struct mlx5_flow_handle *dev_handle);
3467 const struct mlx5_flow_tunnel *
3468 mlx5_get_tof(const struct rte_flow_item *items,
3469 	     const struct rte_flow_action *actions,
3470 	     enum mlx5_tof_rule_type *rule_type);
3471 void
3472 flow_hw_resource_release(struct rte_eth_dev *dev);
3473 int
3474 mlx5_geneve_tlv_options_destroy(struct mlx5_geneve_tlv_options *options,
3475 				struct mlx5_physical_device *phdev);
3476 int
3477 mlx5_geneve_tlv_options_check_busy(struct mlx5_priv *priv);
3478 void
3479 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable);
3480 int flow_dv_action_validate(struct rte_eth_dev *dev,
3481 			    const struct rte_flow_indir_action_conf *conf,
3482 			    const struct rte_flow_action *action,
3483 			    struct rte_flow_error *err);
3484 struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,
3485 		      const struct rte_flow_indir_action_conf *conf,
3486 		      const struct rte_flow_action *action,
3487 		      struct rte_flow_error *err);
3488 int flow_dv_action_destroy(struct rte_eth_dev *dev,
3489 			   struct rte_flow_action_handle *handle,
3490 			   struct rte_flow_error *error);
3491 int flow_dv_action_update(struct rte_eth_dev *dev,
3492 			  struct rte_flow_action_handle *handle,
3493 			  const void *update,
3494 			  struct rte_flow_error *err);
3495 int flow_dv_action_query(struct rte_eth_dev *dev,
3496 			 const struct rte_flow_action_handle *handle,
3497 			 void *data,
3498 			 struct rte_flow_error *error);
3499 size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
3500 int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3501 			   size_t *size, struct rte_flow_error *error);
3502 void mlx5_flow_field_id_to_modify_info
3503 		(const struct rte_flow_field_data *data,
3504 		 struct field_modify_info *info, uint32_t *mask,
3505 		 uint32_t width, struct rte_eth_dev *dev,
3506 		 const struct rte_flow_attr *attr, struct rte_flow_error *error);
3507 int flow_dv_convert_modify_action(struct rte_flow_item *item,
3508 			      struct field_modify_info *field,
3509 			      struct field_modify_info *dest,
3510 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
3511 			      uint32_t type, struct rte_flow_error *error);
3512 
3513 #define MLX5_PF_VPORT_ID 0
3514 #define MLX5_ECPF_VPORT_ID 0xFFFE
3515 
3516 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev);
3517 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
3518 				const struct rte_flow_item *item,
3519 				uint16_t *vport_id,
3520 				bool *all_ports,
3521 				struct rte_flow_error *error);
3522 
3523 int flow_dv_translate_items_hws(const struct rte_flow_item *items,
3524 				struct mlx5_flow_attr *attr, void *key,
3525 				uint32_t key_type, uint64_t *item_flags,
3526 				uint8_t *match_criteria,
3527 				struct rte_flow_error *error);
3528 
3529 int __flow_dv_translate_items_hws(const struct rte_flow_item *items,
3530 				struct mlx5_flow_attr *attr, void *key,
3531 				uint32_t key_type, uint64_t *item_flags,
3532 				uint8_t *match_criteria,
3533 				bool nt_flow,
3534 				struct rte_flow_error *error);
3535 
3536 int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
3537 				  uint16_t *proxy_port_id,
3538 				  struct rte_flow_error *error);
3539 int flow_null_get_aged_flows(struct rte_eth_dev *dev,
3540 		    void **context,
3541 		    uint32_t nb_contexts,
3542 		    struct rte_flow_error *error);
3543 uint32_t flow_null_counter_allocate(struct rte_eth_dev *dev);
3544 void flow_null_counter_free(struct rte_eth_dev *dev,
3545 			uint32_t counter);
3546 int flow_null_counter_query(struct rte_eth_dev *dev,
3547 			uint32_t counter,
3548 			bool clear,
3549 		    uint64_t *pkts,
3550 			uint64_t *bytes,
3551 			void **action);
3552 
3553 int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
3554 
3555 int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
3556 					 uint32_t sqn, bool external);
3557 int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
3558 					  uint32_t sqn);
3559 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
3560 int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
3561 int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
3562 int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev);
3563 int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
3564 		const struct rte_flow_actions_template_attr *attr,
3565 		const struct rte_flow_action actions[],
3566 		const struct rte_flow_action masks[],
3567 		struct rte_flow_error *error);
3568 int mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
3569 		const struct rte_flow_pattern_template_attr *attr,
3570 		const struct rte_flow_item items[],
3571 		struct rte_flow_error *error);
3572 int flow_hw_table_update(struct rte_eth_dev *dev,
3573 			 struct rte_flow_error *error);
3574 int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
3575 			   enum rte_flow_field_id field, int inherit,
3576 			   const struct rte_flow_attr *attr,
3577 			   struct rte_flow_error *error);
3578 uintptr_t flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3579 				const struct rte_flow_attr *attr,
3580 				const struct rte_flow_item items[],
3581 				const struct rte_flow_action actions[],
3582 				bool external, struct rte_flow_error *error);
3583 void flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3584 				uintptr_t flow_idx);
3585 
3586 static __rte_always_inline int
3587 flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
3588 {
3589 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3590 	uint16_t port;
3591 
3592 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3593 		struct mlx5_priv *priv;
3594 		struct mlx5_hca_flex_attr *attr;
3595 		struct mlx5_devx_match_sample_info_query_attr *info;
3596 
3597 		priv = rte_eth_devices[port].data->dev_private;
3598 		attr = &priv->sh->cdev->config.hca_attr.flex;
3599 		if (priv->dr_ctx == dr_ctx && attr->query_match_sample_info) {
3600 			info = &priv->sh->srh_flex_parser.flex.devx_fp->sample_info[0];
3601 			if (priv->sh->srh_flex_parser.flex.mapnum)
3602 				return info->sample_dw_data * sizeof(uint32_t);
3603 			else
3604 				return UINT32_MAX;
3605 		}
3606 	}
3607 #endif
3608 	return UINT32_MAX;
3609 }
3610 
3611 static __rte_always_inline uint8_t
3612 flow_hw_get_ipv6_route_ext_anchor_from_ctx(void *dr_ctx)
3613 {
3614 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3615 	uint16_t port;
3616 	struct mlx5_priv *priv;
3617 
3618 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3619 		priv = rte_eth_devices[port].data->dev_private;
3620 		if (priv->dr_ctx == dr_ctx)
3621 			return priv->sh->srh_flex_parser.flex.devx_fp->anchor_id;
3622 	}
3623 #else
3624 	RTE_SET_USED(dr_ctx);
3625 #endif
3626 	return 0;
3627 }
3628 
3629 static __rte_always_inline uint16_t
3630 flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx)
3631 {
3632 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3633 	uint16_t port;
3634 	struct mlx5_priv *priv;
3635 	struct mlx5_flex_parser_devx *fp;
3636 
3637 	if (idx >= MLX5_GRAPH_NODE_SAMPLE_NUM || idx >= MLX5_SRV6_SAMPLE_NUM)
3638 		return 0;
3639 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3640 		priv = rte_eth_devices[port].data->dev_private;
3641 		if (priv->dr_ctx == dr_ctx) {
3642 			fp = priv->sh->srh_flex_parser.flex.devx_fp;
3643 			return fp->sample_info[idx].modify_field_id;
3644 		}
3645 	}
3646 #else
3647 	RTE_SET_USED(dr_ctx);
3648 	RTE_SET_USED(idx);
3649 #endif
3650 	return 0;
3651 }
3652 void
3653 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
3654 #ifdef HAVE_MLX5_HWS_SUPPORT
3655 struct mlx5_mirror;
3656 void
3657 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
3658 void
3659 mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
3660 			     struct mlx5_indirect_list *ptr);
3661 void
3662 mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,
3663 			    struct mlx5_indirect_list *reformat);
3664 int
3665 flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3666 		    const struct rte_flow_attr *attr,
3667 		    const struct rte_flow_item items[],
3668 		    const struct rte_flow_action actions[],
3669 		    uint64_t item_flags, uint64_t action_flags, bool external,
3670 		    struct rte_flow_hw **flow, struct rte_flow_error *error);
3671 void
3672 flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow);
3673 void
3674 flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3675 		     uintptr_t flow_idx);
3676 const struct rte_flow_action_rss *
3677 flow_nta_locate_rss(struct rte_eth_dev *dev,
3678 		    const struct rte_flow_action actions[],
3679 		    struct rte_flow_error *error);
3680 struct rte_flow_hw *
3681 flow_nta_handle_rss(struct rte_eth_dev *dev,
3682 		    const struct rte_flow_attr *attr,
3683 		    const struct rte_flow_item items[],
3684 		    const struct rte_flow_action actions[],
3685 		    const struct rte_flow_action_rss *rss_conf,
3686 		    uint64_t item_flags, uint64_t action_flags,
3687 		    bool external, enum mlx5_flow_type flow_type,
3688 		    struct rte_flow_error *error);
3689 
3690 extern const struct rte_flow_action_raw_decap empty_decap;
3691 extern const struct rte_flow_item_ipv6 nic_ipv6_mask;
3692 extern const struct rte_flow_item_tcp nic_tcp_mask;
3693 
3694 /* mlx5_nta_split.c */
3695 int
3696 mlx5_flow_nta_split_metadata(struct rte_eth_dev *dev,
3697 			     const struct rte_flow_attr *attr,
3698 			     const struct rte_flow_action actions[],
3699 			     const struct rte_flow_action *qrss,
3700 			     uint64_t action_flags,
3701 			     int actions_n,
3702 			     bool external,
3703 			     struct mlx5_flow_hw_split_resource *res,
3704 			     struct rte_flow_error *error);
3705 void
3706 mlx5_flow_nta_split_resource_free(struct rte_eth_dev *dev,
3707 				  struct mlx5_flow_hw_split_resource *res);
3708 struct mlx5_list_entry *
3709 flow_nta_mreg_create_cb(void *tool_ctx, void *cb_ctx);
3710 void
3711 flow_nta_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3712 void
3713 mlx5_flow_nta_del_copy_action(struct rte_eth_dev *dev, uint32_t idx);
3714 void
3715 mlx5_flow_nta_del_default_copy_action(struct rte_eth_dev *dev);
3716 int
3717 mlx5_flow_nta_add_default_copy_action(struct rte_eth_dev *dev,
3718 				      struct rte_flow_error *error);
3719 int
3720 mlx5_flow_nta_update_copy_table(struct rte_eth_dev *dev,
3721 				uint32_t *idx,
3722 				const struct rte_flow_action *mark,
3723 				uint64_t action_flags,
3724 				struct rte_flow_error *error);
3725 
3726 #endif
3727 #endif /* RTE_PMD_MLX5_FLOW_H_ */
3728