xref: /dpdk/drivers/net/mlx5/mlx5_flow.h (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_FLOW_H_
6 #define RTE_PMD_MLX5_FLOW_H_
7 
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 
13 #include <rte_alarm.h>
14 #include <rte_mtr.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_prm.h>
18 
19 #include "mlx5.h"
20 #include "rte_pmd_mlx5.h"
21 #include "hws/mlx5dr.h"
22 #include "mlx5_tx.h"
23 
24 /* E-Switch Manager port, used for rte_flow_item_port_id. */
25 #define MLX5_PORT_ESW_MGR UINT32_MAX
26 
27 /* E-Switch Manager port, used for rte_flow_item_ethdev. */
28 #define MLX5_REPRESENTED_PORT_ESW_MGR UINT16_MAX
29 
30 /* Private rte flow items. */
31 enum mlx5_rte_flow_item_type {
32 	MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
33 	MLX5_RTE_FLOW_ITEM_TYPE_TAG,
34 	MLX5_RTE_FLOW_ITEM_TYPE_SQ,
35 	MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
36 	MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
37 };
38 
39 /* Private (internal) rte flow actions. */
40 enum mlx5_rte_flow_action_type {
41 	MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
42 	MLX5_RTE_FLOW_ACTION_TYPE_TAG,
43 	MLX5_RTE_FLOW_ACTION_TYPE_MARK,
44 	MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
45 	MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
46 	MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
47 	MLX5_RTE_FLOW_ACTION_TYPE_AGE,
48 	MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
49 	MLX5_RTE_FLOW_ACTION_TYPE_JUMP,
50 	MLX5_RTE_FLOW_ACTION_TYPE_RSS,
51 	MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
52 };
53 
54 /* Private (internal) Field IDs for MODIFY_FIELD action. */
55 enum mlx5_rte_flow_field_id {
56 	MLX5_RTE_FLOW_FIELD_END = INT_MIN,
57 	MLX5_RTE_FLOW_FIELD_META_REG,
58 };
59 
60 #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29
61 
62 #define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \
63 	(((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET)
64 
65 #define MLX5_INDIRECT_ACTION_IDX_GET(handle) \
66 	(((uint32_t)(uintptr_t)(handle)) & \
67 	 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
68 
69 enum mlx5_indirect_type {
70 	MLX5_INDIRECT_ACTION_TYPE_RSS,
71 	MLX5_INDIRECT_ACTION_TYPE_AGE,
72 	MLX5_INDIRECT_ACTION_TYPE_COUNT,
73 	MLX5_INDIRECT_ACTION_TYPE_CT,
74 	MLX5_INDIRECT_ACTION_TYPE_METER_MARK,
75 	MLX5_INDIRECT_ACTION_TYPE_QUOTA,
76 };
77 
78 /* Now, the maximal ports will be supported is 16, action number is 32M. */
79 #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10
80 
81 #define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25
82 #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
83 
84 /*
85  * When SW steering flow engine is used, the CT action handles are encoded in a following way:
86  * - bits 31:29 - type
87  * - bits 28:25 - port index of the action owner
88  * - bits 24:0 - action index
89  */
90 #define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \
91 	((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \
92 	 (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \
93 	  MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index))
94 
95 #define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \
96 	(((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \
97 	 MLX5_INDIRECT_ACT_CT_OWNER_MASK)
98 
99 #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
100 	((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
101 
102 /*
103  * When HW steering flow engine is used, the CT action handles are encoded in a following way:
104  * - bits 31:29 - type
105  * - bits 28:0 - action index
106  */
107 #define MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(index) \
108 	((struct rte_flow_action_handle *)(uintptr_t) \
109 	 ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (index)))
110 
111 enum mlx5_indirect_list_type {
112 	MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
113 	MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
114 	MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
115 	MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT = 3,
116 };
117 
118 /**
119  * Base type for indirect list type.
120  */
121 struct mlx5_indirect_list {
122 	/* Indirect list type. */
123 	enum mlx5_indirect_list_type type;
124 	/* Optional storage list entry */
125 	LIST_ENTRY(mlx5_indirect_list) entry;
126 };
127 
128 static __rte_always_inline void
129 mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
130 {
131 	LIST_HEAD(, mlx5_indirect_list) *h = head;
132 
133 	LIST_INSERT_HEAD(h, elem, entry);
134 }
135 
136 static __rte_always_inline void
137 mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
138 {
139 	if (elem->entry.le_prev)
140 		LIST_REMOVE(elem, entry);
141 }
142 
143 static __rte_always_inline enum mlx5_indirect_list_type
144 mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
145 {
146 	return ((const struct mlx5_indirect_list *)obj)->type;
147 }
148 
149 /* Matches on selected register. */
150 struct mlx5_rte_flow_item_tag {
151 	enum modify_reg id;
152 	uint32_t data;
153 };
154 
155 /* Modify selected register. */
156 struct mlx5_rte_flow_action_set_tag {
157 	enum modify_reg id;
158 	uint8_t offset;
159 	uint8_t length;
160 	uint32_t data;
161 };
162 
163 struct mlx5_flow_action_copy_mreg {
164 	enum modify_reg dst;
165 	enum modify_reg src;
166 };
167 
168 /* Matches on source queue. */
169 struct mlx5_rte_flow_item_sq {
170 	uint32_t queue; /* DevX SQ number */
171 };
172 
173 /* Map from registers to modify fields. */
174 extern enum mlx5_modification_field reg_to_field[];
175 extern const size_t mlx5_mod_reg_size;
176 
177 static __rte_always_inline enum mlx5_modification_field
178 mlx5_convert_reg_to_field(enum modify_reg reg)
179 {
180 	MLX5_ASSERT((size_t)reg < mlx5_mod_reg_size);
181 	return reg_to_field[reg];
182 }
183 
184 /* Feature name to allocate metadata register. */
185 enum mlx5_feature_name {
186 	MLX5_HAIRPIN_RX,
187 	MLX5_HAIRPIN_TX,
188 	MLX5_METADATA_RX,
189 	MLX5_METADATA_TX,
190 	MLX5_METADATA_FDB,
191 	MLX5_FLOW_MARK,
192 	MLX5_APP_TAG,
193 	MLX5_COPY_MARK,
194 	MLX5_MTR_COLOR,
195 	MLX5_MTR_ID,
196 	MLX5_ASO_FLOW_HIT,
197 	MLX5_ASO_CONNTRACK,
198 	MLX5_SAMPLE_ID,
199 };
200 
201 /* Default queue number. */
202 #define MLX5_RSSQ_DEFAULT_NUM 16
203 
204 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
205 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
206 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
207 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
208 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
209 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
210 
211 /* Pattern inner Layer bits. */
212 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
213 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
214 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
215 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
216 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
217 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
218 
219 /* Pattern tunnel Layer bits. */
220 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
221 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
222 #define MLX5_FLOW_LAYER_GRE (1u << 14)
223 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
224 /* List of tunnel Layer bits continued below. */
225 
226 /* General pattern items bits. */
227 #define MLX5_FLOW_ITEM_METADATA (1u << 16)
228 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
229 #define MLX5_FLOW_ITEM_TAG (1u << 18)
230 #define MLX5_FLOW_ITEM_MARK (1u << 19)
231 
232 /* Pattern MISC bits. */
233 #define MLX5_FLOW_LAYER_ICMP (1u << 20)
234 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21)
235 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22)
236 
237 /* Pattern tunnel Layer bits (continued). */
238 #define MLX5_FLOW_LAYER_IPIP (1u << 23)
239 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24)
240 #define MLX5_FLOW_LAYER_NVGRE (1u << 25)
241 #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
242 
243 /* Queue items. */
244 #define MLX5_FLOW_ITEM_SQ (1u << 27)
245 
246 /* Pattern tunnel Layer bits (continued). */
247 #define MLX5_FLOW_LAYER_GTP (1u << 28)
248 
249 /* Pattern eCPRI Layer bit. */
250 #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29)
251 
252 /* IPv6 Fragment Extension Header bit. */
253 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30)
254 #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31)
255 
256 /* Pattern tunnel Layer bits (continued). */
257 #define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
258 #define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
259 
260 /* INTEGRITY item bits */
261 #define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
262 #define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
263 #define MLX5_FLOW_ITEM_INTEGRITY \
264 	(MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
265 
266 /* Conntrack item. */
267 #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
268 
269 /* Flex item */
270 #define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
271 #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
272 #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
273 
274 #define MLX5_FLOW_ITEM_FLEX \
275 	(MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX | \
276 	MLX5_FLOW_ITEM_FLEX_TUNNEL)
277 
278 /* ESP item */
279 #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
280 
281 /* Port Representor/Represented Port item */
282 #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41)
283 #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42)
284 
285 /* Meter color item */
286 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
287 #define MLX5_FLOW_ITEM_QUOTA (UINT64_C(1) << 45)
288 
289 
290 /* IPv6 routing extension item */
291 #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
292 #define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46)
293 
294 /* Aggregated affinity item */
295 #define MLX5_FLOW_ITEM_AGGR_AFFINITY (UINT64_C(1) << 49)
296 
297 /* IB BTH ITEM. */
298 #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
299 
300 /* PTYPE ITEM */
301 #define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
302 
303 /* NSH ITEM */
304 #define MLX5_FLOW_ITEM_NSH (1ull << 53)
305 
306 /* COMPARE ITEM */
307 #define MLX5_FLOW_ITEM_COMPARE (1ull << 54)
308 
309 /* Random ITEM */
310 #define MLX5_FLOW_ITEM_RANDOM (1ull << 55)
311 
312 /* Outer Masks. */
313 #define MLX5_FLOW_LAYER_OUTER_L3 \
314 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
315 #define MLX5_FLOW_LAYER_OUTER_L4 \
316 	(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
317 #define MLX5_FLOW_LAYER_OUTER \
318 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
319 	 MLX5_FLOW_LAYER_OUTER_L4)
320 
321 /* Tunnel Masks. */
322 #define MLX5_FLOW_LAYER_TUNNEL \
323 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
324 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
325 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
326 	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
327 	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
328 
329 /* Inner Masks. */
330 #define MLX5_FLOW_LAYER_INNER_L3 \
331 	(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
332 #define MLX5_FLOW_LAYER_INNER_L4 \
333 	(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
334 #define MLX5_FLOW_LAYER_INNER \
335 	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
336 	 MLX5_FLOW_LAYER_INNER_L4)
337 
338 /* Layer Masks. */
339 #define MLX5_FLOW_LAYER_L2 \
340 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
341 #define MLX5_FLOW_LAYER_L3_IPV4 \
342 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4)
343 #define MLX5_FLOW_LAYER_L3_IPV6 \
344 	(MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
345 #define MLX5_FLOW_LAYER_L3 \
346 	(MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
347 #define MLX5_FLOW_LAYER_L4 \
348 	(MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4)
349 
350 /* Actions */
351 #define MLX5_FLOW_ACTION_DROP (1ull << 0)
352 #define MLX5_FLOW_ACTION_QUEUE (1ull << 1)
353 #define MLX5_FLOW_ACTION_RSS (1ull << 2)
354 #define MLX5_FLOW_ACTION_FLAG (1ull << 3)
355 #define MLX5_FLOW_ACTION_MARK (1ull << 4)
356 #define MLX5_FLOW_ACTION_COUNT (1ull << 5)
357 #define MLX5_FLOW_ACTION_PORT_ID (1ull << 6)
358 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7)
359 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8)
360 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9)
361 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10)
362 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11)
363 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12)
364 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13)
365 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14)
366 #define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15)
367 #define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16)
368 #define MLX5_FLOW_ACTION_JUMP (1ull << 17)
369 #define MLX5_FLOW_ACTION_SET_TTL (1ull << 18)
370 #define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19)
371 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20)
372 #define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21)
373 #define MLX5_FLOW_ACTION_ENCAP (1ull << 22)
374 #define MLX5_FLOW_ACTION_DECAP (1ull << 23)
375 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24)
376 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25)
377 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26)
378 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27)
379 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28)
380 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29)
381 #define MLX5_FLOW_ACTION_SET_META (1ull << 30)
382 #define MLX5_FLOW_ACTION_METER (1ull << 31)
383 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
384 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
385 #define MLX5_FLOW_ACTION_AGE (1ull << 34)
386 #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
387 #define MLX5_FLOW_ACTION_SAMPLE (1ull << 36)
388 #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37)
389 #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
390 #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
391 #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
392 #define MLX5_FLOW_ACTION_CT (1ull << 41)
393 #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
394 #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
395 #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
396 #define MLX5_FLOW_ACTION_QUOTA (1ull << 46)
397 #define MLX5_FLOW_ACTION_PORT_REPRESENTOR (1ull << 47)
398 #define MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE (1ull << 48)
399 #define MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH (1ull << 49)
400 #define MLX5_FLOW_ACTION_NAT64 (1ull << 50)
401 
402 #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \
403 	(MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE)
404 
405 #define MLX5_FLOW_FATE_ACTIONS \
406 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
407 	 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
408 	 MLX5_FLOW_ACTION_DEFAULT_MISS | \
409 	 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
410 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
411 	 MLX5_FLOW_ACTION_PORT_REPRESENTOR)
412 
413 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
414 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
415 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
416 	 MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
417 
418 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
419 				      MLX5_FLOW_ACTION_SET_IPV4_DST | \
420 				      MLX5_FLOW_ACTION_SET_IPV6_SRC | \
421 				      MLX5_FLOW_ACTION_SET_IPV6_DST | \
422 				      MLX5_FLOW_ACTION_SET_TP_SRC | \
423 				      MLX5_FLOW_ACTION_SET_TP_DST | \
424 				      MLX5_FLOW_ACTION_SET_TTL | \
425 				      MLX5_FLOW_ACTION_DEC_TTL | \
426 				      MLX5_FLOW_ACTION_SET_MAC_SRC | \
427 				      MLX5_FLOW_ACTION_SET_MAC_DST | \
428 				      MLX5_FLOW_ACTION_INC_TCP_SEQ | \
429 				      MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
430 				      MLX5_FLOW_ACTION_INC_TCP_ACK | \
431 				      MLX5_FLOW_ACTION_DEC_TCP_ACK | \
432 				      MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
433 				      MLX5_FLOW_ACTION_SET_TAG | \
434 				      MLX5_FLOW_ACTION_MARK_EXT | \
435 				      MLX5_FLOW_ACTION_SET_META | \
436 				      MLX5_FLOW_ACTION_SET_IPV4_DSCP | \
437 				      MLX5_FLOW_ACTION_SET_IPV6_DSCP | \
438 				      MLX5_FLOW_ACTION_MODIFY_FIELD)
439 
440 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
441 				MLX5_FLOW_ACTION_OF_PUSH_VLAN)
442 
443 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP)
444 
445 #ifndef IPPROTO_MPLS
446 #define IPPROTO_MPLS 137
447 #endif
448 
449 #define MLX5_IPV6_HDR_ECN_MASK 0x3
450 #define MLX5_IPV6_HDR_DSCP_SHIFT 2
451 
452 /* UDP port number for MPLS */
453 #define MLX5_UDP_PORT_MPLS 6635
454 
455 /* UDP port numbers for VxLAN. */
456 #define MLX5_UDP_PORT_VXLAN 4789
457 #define MLX5_UDP_PORT_VXLAN_GPE 4790
458 
459 /* UDP port numbers for RoCEv2. */
460 #define MLX5_UDP_PORT_ROCEv2 4791
461 
462 /* UDP port numbers for GENEVE. */
463 #define MLX5_UDP_PORT_GENEVE 6081
464 
465 /* Lowest priority indicator. */
466 #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1)
467 
468 /*
469  * Max priority for ingress\egress flow groups
470  * greater than 0 and for any transfer flow group.
471  * From user configation: 0 - 21843.
472  */
473 #define MLX5_NON_ROOT_FLOW_MAX_PRIO	(21843 + 1)
474 
475 /*
476  * Number of sub priorities.
477  * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
478  * matching on the NIC (firmware dependent) L4 most have the higher priority
479  * followed by L3 and ending with L2.
480  */
481 #define MLX5_PRIORITY_MAP_L2 2
482 #define MLX5_PRIORITY_MAP_L3 1
483 #define MLX5_PRIORITY_MAP_L4 0
484 #define MLX5_PRIORITY_MAP_MAX 3
485 
486 /* Valid layer type for IPV4 RSS. */
487 #define MLX5_IPV4_LAYER_TYPES \
488 	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
489 	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
490 	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
491 
492 /* Valid L4 RSS types */
493 #define MLX5_L4_RSS_TYPES (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
494 
495 /* IBV hash source bits  for IPV4. */
496 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
497 
498 /* Valid layer type for IPV6 RSS. */
499 #define MLX5_IPV6_LAYER_TYPES \
500 	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
501 	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
502 	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
503 
504 /* IBV hash source bits  for IPV6. */
505 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
506 
507 /* IBV hash bits for L3 SRC. */
508 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6)
509 
510 /* IBV hash bits for L3 DST. */
511 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6)
512 
513 /* IBV hash bits for TCP. */
514 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
515 			      IBV_RX_HASH_DST_PORT_TCP)
516 
517 /* IBV hash bits for UDP. */
518 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \
519 			      IBV_RX_HASH_DST_PORT_UDP)
520 
521 /* IBV hash bits for L4 SRC. */
522 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
523 				 IBV_RX_HASH_SRC_PORT_UDP)
524 
525 /* IBV hash bits for L4 DST. */
526 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \
527 				 IBV_RX_HASH_DST_PORT_UDP)
528 
529 /* Geneve header first 16Bit */
530 #define MLX5_GENEVE_VER_MASK 0x3
531 #define MLX5_GENEVE_VER_SHIFT 14
532 #define MLX5_GENEVE_VER_VAL(a) \
533 		(((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
534 #define MLX5_GENEVE_OPTLEN_MASK 0x3F
535 #define MLX5_GENEVE_OPTLEN_SHIFT 8
536 #define MLX5_GENEVE_OPTLEN_VAL(a) \
537 	    (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
538 #define MLX5_GENEVE_OAMF_MASK 0x1
539 #define MLX5_GENEVE_OAMF_SHIFT 7
540 #define MLX5_GENEVE_OAMF_VAL(a) \
541 		(((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
542 #define MLX5_GENEVE_CRITO_MASK 0x1
543 #define MLX5_GENEVE_CRITO_SHIFT 6
544 #define MLX5_GENEVE_CRITO_VAL(a) \
545 		(((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
546 #define MLX5_GENEVE_RSVD_MASK 0x3F
547 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
548 /*
549  * The length of the Geneve options fields, expressed in four byte multiples,
550  * not including the eight byte fixed tunnel.
551  */
552 #define MLX5_GENEVE_OPT_LEN_0 14
553 #define MLX5_GENEVE_OPT_LEN_1 63
554 
555 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \
556 					  sizeof(struct rte_ipv4_hdr))
557 /* GTP extension header flag. */
558 #define MLX5_GTP_EXT_HEADER_FLAG 4
559 
560 /* GTP extension header PDU type shift. */
561 #define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4)
562 
563 /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
564 #define MLX5_IPV4_FRAG_OFFSET_MASK \
565 		(RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)
566 
567 /* Specific item's fields can accept a range of values (using spec and last). */
568 #define MLX5_ITEM_RANGE_NOT_ACCEPTED	false
569 #define MLX5_ITEM_RANGE_ACCEPTED	true
570 
571 /* Software header modify action numbers of a flow. */
572 #define MLX5_ACT_NUM_MDF_IPV4		1
573 #define MLX5_ACT_NUM_MDF_IPV6		4
574 #define MLX5_ACT_NUM_MDF_MAC		2
575 #define MLX5_ACT_NUM_MDF_VID		1
576 #define MLX5_ACT_NUM_MDF_PORT		1
577 #define MLX5_ACT_NUM_MDF_TTL		1
578 #define MLX5_ACT_NUM_DEC_TTL		MLX5_ACT_NUM_MDF_TTL
579 #define MLX5_ACT_NUM_MDF_TCPSEQ		1
580 #define MLX5_ACT_NUM_MDF_TCPACK		1
581 #define MLX5_ACT_NUM_SET_REG		1
582 #define MLX5_ACT_NUM_SET_TAG		1
583 #define MLX5_ACT_NUM_CPY_MREG		MLX5_ACT_NUM_SET_TAG
584 #define MLX5_ACT_NUM_SET_MARK		MLX5_ACT_NUM_SET_TAG
585 #define MLX5_ACT_NUM_SET_META		MLX5_ACT_NUM_SET_TAG
586 #define MLX5_ACT_NUM_SET_DSCP		1
587 
588 /* Maximum number of fields to modify in MODIFY_FIELD */
589 #define MLX5_ACT_MAX_MOD_FIELDS 5
590 
591 /* Syndrome bits definition for connection tracking. */
592 #define MLX5_CT_SYNDROME_VALID		(0x0 << 6)
593 #define MLX5_CT_SYNDROME_INVALID	(0x1 << 6)
594 #define MLX5_CT_SYNDROME_TRAP		(0x2 << 6)
595 #define MLX5_CT_SYNDROME_STATE_CHANGE	(0x1 << 1)
596 #define MLX5_CT_SYNDROME_BAD_PACKET	(0x1 << 0)
597 
598 enum mlx5_flow_drv_type {
599 	MLX5_FLOW_TYPE_MIN,
600 	MLX5_FLOW_TYPE_DV,
601 	MLX5_FLOW_TYPE_VERBS,
602 	MLX5_FLOW_TYPE_HW,
603 	MLX5_FLOW_TYPE_MAX,
604 };
605 
606 /* Fate action type. */
607 enum mlx5_flow_fate_type {
608 	MLX5_FLOW_FATE_NONE, /* Egress flow. */
609 	MLX5_FLOW_FATE_QUEUE,
610 	MLX5_FLOW_FATE_JUMP,
611 	MLX5_FLOW_FATE_PORT_ID,
612 	MLX5_FLOW_FATE_DROP,
613 	MLX5_FLOW_FATE_DEFAULT_MISS,
614 	MLX5_FLOW_FATE_SHARED_RSS,
615 	MLX5_FLOW_FATE_MTR,
616 	MLX5_FLOW_FATE_SEND_TO_KERNEL,
617 	MLX5_FLOW_FATE_MAX,
618 };
619 
620 /* Matcher PRM representation */
621 struct mlx5_flow_dv_match_params {
622 	size_t size;
623 	/**< Size of match value. Do NOT split size and key! */
624 	uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
625 	/**< Matcher value. This value is used as the mask or as a key. */
626 };
627 
628 /* Matcher structure. */
629 struct mlx5_flow_dv_matcher {
630 	struct mlx5_list_entry entry; /**< Pointer to the next element. */
631 	union {
632 		struct mlx5_flow_tbl_resource *tbl;
633 		/**< Pointer to the table(group) the matcher associated with for DV flow. */
634 		struct mlx5_flow_group *group;
635 		/* Group of this matcher for HWS non template flow. */
636 	};
637 	void *matcher_object; /**< Pointer to DV matcher */
638 	uint16_t crc; /**< CRC of key. */
639 	uint16_t priority; /**< Priority of matcher. */
640 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
641 };
642 
643 #define MLX5_PUSH_MAX_LEN 128
644 #define MLX5_ENCAP_MAX_LEN 132
645 
646 /* Encap/decap resource structure. */
647 struct mlx5_flow_dv_encap_decap_resource {
648 	struct mlx5_list_entry entry;
649 	/* Pointer to next element. */
650 	uint32_t refcnt; /**< Reference counter. */
651 	void *action;
652 	/**< Encap/decap action object. */
653 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
654 	size_t size;
655 	uint8_t reformat_type;
656 	uint8_t ft_type;
657 	uint64_t flags; /**< Flags for RDMA API. */
658 	uint32_t idx; /**< Index for the index memory pool. */
659 };
660 
661 /* Tag resource structure. */
662 struct mlx5_flow_dv_tag_resource {
663 	struct mlx5_list_entry entry;
664 	/**< hash list entry for tag resource, tag value as the key. */
665 	void *action;
666 	/**< Tag action object. */
667 	uint32_t refcnt; /**< Reference counter. */
668 	uint32_t idx; /**< Index for the index memory pool. */
669 	uint32_t tag_id; /**< Tag ID. */
670 };
671 
672 /* Modify resource structure */
673 struct mlx5_flow_dv_modify_hdr_resource {
674 	struct mlx5_list_entry entry;
675 	void *action; /**< Modify header action object. */
676 	uint32_t idx;
677 	uint64_t flags; /**< Flags for RDMA API(HWS only). */
678 	/* Key area for hash list matching: */
679 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
680 	uint8_t actions_num; /**< Number of modification actions. */
681 	bool root; /**< Whether action is in root table. */
682 	struct mlx5_modification_cmd actions[];
683 	/**< Modification actions. */
684 } __rte_packed;
685 
686 /* Modify resource key of the hash organization. */
687 union mlx5_flow_modify_hdr_key {
688 	struct {
689 		uint32_t ft_type:8;	/**< Flow table type, Rx or Tx. */
690 		uint32_t actions_num:5;	/**< Number of modification actions. */
691 		uint32_t group:19;	/**< Flow group id. */
692 		uint32_t cksum;		/**< Actions check sum. */
693 	};
694 	uint64_t v64;			/**< full 64bits value of key */
695 };
696 
697 /* Jump action resource structure. */
698 struct mlx5_flow_dv_jump_tbl_resource {
699 	void *action; /**< Pointer to the rdma core action. */
700 };
701 
702 /* Port ID resource structure. */
703 struct mlx5_flow_dv_port_id_action_resource {
704 	struct mlx5_list_entry entry;
705 	void *action; /**< Action object. */
706 	uint32_t port_id; /**< Port ID value. */
707 	uint32_t idx; /**< Indexed pool memory index. */
708 };
709 
710 /* Push VLAN action resource structure */
711 struct mlx5_flow_dv_push_vlan_action_resource {
712 	struct mlx5_list_entry entry; /* Cache entry. */
713 	void *action; /**< Action object. */
714 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
715 	rte_be32_t vlan_tag; /**< VLAN tag value. */
716 	uint32_t idx; /**< Indexed pool memory index. */
717 };
718 
719 /* Metadata register copy table entry. */
720 struct mlx5_flow_mreg_copy_resource {
721 	/*
722 	 * Hash list entry for copy table.
723 	 *  - Key is 32/64-bit MARK action ID.
724 	 *  - MUST be the first entry.
725 	 */
726 	struct mlx5_list_entry hlist_ent;
727 	LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
728 	/* List entry for device flows. */
729 	uint32_t idx;
730 	uint32_t rix_flow; /* Built flow for copy. */
731 	uint32_t mark_id;
732 };
733 
734 /* Table tunnel parameter. */
735 struct mlx5_flow_tbl_tunnel_prm {
736 	const struct mlx5_flow_tunnel *tunnel;
737 	uint32_t group_id;
738 	bool external;
739 };
740 
741 /* Table data structure of the hash organization. */
742 struct mlx5_flow_tbl_data_entry {
743 	struct mlx5_list_entry entry;
744 	/**< hash list entry, 64-bits key inside. */
745 	struct mlx5_flow_tbl_resource tbl;
746 	/**< flow table resource. */
747 	struct mlx5_list *matchers;
748 	/**< matchers' header associated with the flow table. */
749 	struct mlx5_flow_dv_jump_tbl_resource jump;
750 	/**< jump resource, at most one for each table created. */
751 	uint32_t idx; /**< index for the indexed mempool. */
752 	/**< tunnel offload */
753 	const struct mlx5_flow_tunnel *tunnel;
754 	uint32_t group_id;
755 	uint32_t external:1;
756 	uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
757 	uint32_t is_egress:1; /**< Egress table. */
758 	uint32_t is_transfer:1; /**< Transfer table. */
759 	uint32_t dummy:1; /**<  DR table. */
760 	uint32_t id:22; /**< Table ID. */
761 	uint32_t reserve:5; /**< Reserved to future using. */
762 	uint32_t level; /**< Table level. */
763 };
764 
765 /* Sub rdma-core actions list. */
766 struct mlx5_flow_sub_actions_list {
767 	uint32_t actions_num; /**< Number of sample actions. */
768 	uint64_t action_flags;
769 	void *dr_queue_action;
770 	void *dr_tag_action;
771 	void *dr_cnt_action;
772 	void *dr_port_id_action;
773 	void *dr_encap_action;
774 	void *dr_jump_action;
775 };
776 
777 /* Sample sub-actions resource list. */
778 struct mlx5_flow_sub_actions_idx {
779 	uint32_t rix_hrxq; /**< Hash Rx queue object index. */
780 	uint32_t rix_tag; /**< Index to the tag action. */
781 	uint32_t rix_port_id_action; /**< Index to port ID action resource. */
782 	uint32_t rix_encap_decap; /**< Index to encap/decap resource. */
783 	uint32_t rix_jump; /**< Index to the jump action resource. */
784 };
785 
786 /* Sample action resource structure. */
787 struct mlx5_flow_dv_sample_resource {
788 	struct mlx5_list_entry entry; /**< Cache entry. */
789 	union {
790 		void *verbs_action; /**< Verbs sample action object. */
791 		void **sub_actions; /**< Sample sub-action array. */
792 	};
793 	struct rte_eth_dev *dev; /**< Device registers the action. */
794 	uint32_t idx; /** Sample object index. */
795 	uint8_t ft_type; /** Flow Table Type */
796 	uint32_t ft_id; /** Flow Table Level */
797 	uint32_t ratio;   /** Sample Ratio */
798 	uint64_t set_action; /** Restore reg_c0 value */
799 	void *normal_path_tbl; /** Flow Table pointer */
800 	struct mlx5_flow_sub_actions_idx sample_idx;
801 	/**< Action index resources. */
802 	struct mlx5_flow_sub_actions_list sample_act;
803 	/**< Action resources. */
804 };
805 
806 #define MLX5_MAX_DEST_NUM	2
807 
808 /* Destination array action resource structure. */
809 struct mlx5_flow_dv_dest_array_resource {
810 	struct mlx5_list_entry entry; /**< Cache entry. */
811 	uint32_t idx; /** Destination array action object index. */
812 	uint8_t ft_type; /** Flow Table Type */
813 	uint8_t num_of_dest; /**< Number of destination actions. */
814 	struct rte_eth_dev *dev; /**< Device registers the action. */
815 	void *action; /**< Pointer to the rdma core action. */
816 	struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
817 	/**< Action index resources. */
818 	struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM];
819 	/**< Action resources. */
820 };
821 
822 /* PMD flow priority for tunnel */
823 #define MLX5_TUNNEL_PRIO_GET(rss_desc) \
824 	((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
825 
826 
827 /** Device flow handle structure for DV mode only. */
828 struct mlx5_flow_handle_dv {
829 	/* Flow DV api: */
830 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
831 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
832 	/**< Pointer to modify header resource in cache. */
833 	uint32_t rix_encap_decap;
834 	/**< Index to encap/decap resource in cache. */
835 	uint32_t rix_push_vlan;
836 	/**< Index to push VLAN action resource in cache. */
837 	uint32_t rix_tag;
838 	/**< Index to the tag action. */
839 	uint32_t rix_sample;
840 	/**< Index to sample action resource in cache. */
841 	uint32_t rix_dest_array;
842 	/**< Index to destination array resource in cache. */
843 } __rte_packed;
844 
845 /** Device flow handle structure: used both for creating & destroying. */
846 struct mlx5_flow_handle {
847 	SILIST_ENTRY(uint32_t)next;
848 	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
849 	/**< Index to next device flow handle. */
850 	uint64_t layers;
851 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
852 	void *drv_flow; /**< pointer to driver flow object. */
853 	uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
854 	uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
855 	uint32_t fate_action:4; /**< Fate action type. */
856 	union {
857 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
858 		uint32_t rix_jump; /**< Index to the jump action resource. */
859 		uint32_t rix_port_id_action;
860 		/**< Index to port ID action resource. */
861 		uint32_t rix_fate;
862 		/**< Generic value indicates the fate action. */
863 		uint32_t rix_default_fate;
864 		/**< Indicates default miss fate action. */
865 		uint32_t rix_srss;
866 		/**< Indicates shared RSS fate action. */
867 	};
868 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
869 	struct mlx5_flow_handle_dv dvh;
870 #endif
871 	uint8_t flex_item; /**< referenced Flex Item bitmask. */
872 } __rte_packed;
873 
874 /*
875  * Size for Verbs device flow handle structure only. Do not use the DV only
876  * structure in Verbs. No DV flows attributes will be accessed.
877  * Macro offsetof() could also be used here.
878  */
879 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
880 #define MLX5_FLOW_HANDLE_VERBS_SIZE \
881 	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
882 #else
883 #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
884 #endif
885 
886 /** Device flow structure only for DV flow creation. */
887 struct mlx5_flow_dv_workspace {
888 	uint32_t group; /**< The group index. */
889 	uint32_t table_id; /**< Flow table identifier. */
890 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
891 	int actions_n; /**< number of actions. */
892 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
893 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
894 	/**< Pointer to encap/decap resource in cache. */
895 	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
896 	/**< Pointer to push VLAN action resource in cache. */
897 	struct mlx5_flow_dv_tag_resource *tag_resource;
898 	/**< pointer to the tag action. */
899 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
900 	/**< Pointer to port ID action resource. */
901 	struct mlx5_flow_dv_jump_tbl_resource *jump;
902 	/**< Pointer to the jump action resource. */
903 	struct mlx5_flow_dv_match_params value;
904 	/**< Holds the value that the packet is compared to. */
905 	struct mlx5_flow_dv_sample_resource *sample_res;
906 	/**< Pointer to the sample action resource. */
907 	struct mlx5_flow_dv_dest_array_resource *dest_array_res;
908 	/**< Pointer to the destination array resource. */
909 };
910 
911 #ifdef HAVE_INFINIBAND_VERBS_H
912 /*
913  * Maximal Verbs flow specifications & actions size.
914  * Some elements are mutually exclusive, but enough space should be allocated.
915  * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
916  *               2. One tunnel header (exception: GRE + MPLS),
917  *                  SPEC length: GRE == tunnel.
918  * Actions: 1. 1 Mark OR Flag.
919  *          2. 1 Drop (if any).
920  *          3. No limitation for counters, but it makes no sense to support too
921  *             many counters in a single device flow.
922  */
923 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
924 #define MLX5_VERBS_MAX_SPEC_SIZE \
925 		( \
926 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
927 			      sizeof(struct ibv_flow_spec_ipv6) + \
928 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
929 			sizeof(struct ibv_flow_spec_gre) + \
930 			sizeof(struct ibv_flow_spec_mpls)) \
931 		)
932 #else
933 #define MLX5_VERBS_MAX_SPEC_SIZE \
934 		( \
935 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
936 			      sizeof(struct ibv_flow_spec_ipv6) + \
937 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
938 			sizeof(struct ibv_flow_spec_tunnel)) \
939 		)
940 #endif
941 
942 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
943 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
944 #define MLX5_VERBS_MAX_ACT_SIZE \
945 		( \
946 			sizeof(struct ibv_flow_spec_action_tag) + \
947 			sizeof(struct ibv_flow_spec_action_drop) + \
948 			sizeof(struct ibv_flow_spec_counter_action) * 4 \
949 		)
950 #else
951 #define MLX5_VERBS_MAX_ACT_SIZE \
952 		( \
953 			sizeof(struct ibv_flow_spec_action_tag) + \
954 			sizeof(struct ibv_flow_spec_action_drop) \
955 		)
956 #endif
957 
958 #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
959 		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
960 
961 /** Device flow structure only for Verbs flow creation. */
962 struct mlx5_flow_verbs_workspace {
963 	unsigned int size; /**< Size of the attribute. */
964 	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
965 	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
966 	/**< Specifications & actions buffer of verbs flow. */
967 };
968 #endif /* HAVE_INFINIBAND_VERBS_H */
969 
970 #define MLX5_SCALE_FLOW_GROUP_BIT 0
971 #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1
972 
973 /** Maximal number of device sub-flows supported. */
974 #define MLX5_NUM_MAX_DEV_FLOWS 32
975 
976 /**
977  * tunnel offload rules type
978  */
979 enum mlx5_tof_rule_type {
980 	MLX5_TUNNEL_OFFLOAD_NONE = 0,
981 	MLX5_TUNNEL_OFFLOAD_SET_RULE,
982 	MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
983 	MLX5_TUNNEL_OFFLOAD_MISS_RULE,
984 };
985 
986 /** Device flow structure. */
987 __extension__
988 struct mlx5_flow {
989 	struct rte_flow *flow; /**< Pointer to the main flow. */
990 	uint32_t flow_idx; /**< The memory pool index to the main flow. */
991 	uint64_t hash_fields; /**< Hash Rx queue hash fields. */
992 	uint64_t act_flags;
993 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
994 	bool external; /**< true if the flow is created external to PMD. */
995 	uint8_t ingress:1; /**< 1 if the flow is ingress. */
996 	uint8_t skip_scale:2;
997 	uint8_t symmetric_hash_function:1;
998 	/**
999 	 * Each Bit be set to 1 if Skip the scale the flow group with factor.
1000 	 * If bit0 be set to 1, then skip the scale the original flow group;
1001 	 * If bit1 be set to 1, then skip the scale the jump flow group if
1002 	 * having jump action.
1003 	 * 00: Enable scale in a flow, default value.
1004 	 * 01: Skip scale the flow group with factor, enable scale the group
1005 	 * of jump action.
1006 	 * 10: Enable scale the group with factor, skip scale the group of
1007 	 * jump action.
1008 	 * 11: Skip scale the table with factor both for flow group and jump
1009 	 * group.
1010 	 */
1011 	union {
1012 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1013 		struct mlx5_flow_dv_workspace dv;
1014 #endif
1015 #ifdef HAVE_INFINIBAND_VERBS_H
1016 		struct mlx5_flow_verbs_workspace verbs;
1017 #endif
1018 	};
1019 	struct mlx5_flow_handle *handle;
1020 	uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
1021 	const struct mlx5_flow_tunnel *tunnel;
1022 	enum mlx5_tof_rule_type tof_type;
1023 };
1024 
1025 /* Flow meter state. */
1026 #define MLX5_FLOW_METER_DISABLE 0
1027 #define MLX5_FLOW_METER_ENABLE 1
1028 
1029 #define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
1030 #define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
1031 
1032 #define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES
1033 
1034 #define MLX5_MAN_WIDTH 8
1035 /* Legacy Meter parameter structure. */
1036 struct mlx5_legacy_flow_meter {
1037 	struct mlx5_flow_meter_info fm;
1038 	/* Must be the first in struct. */
1039 	TAILQ_ENTRY(mlx5_legacy_flow_meter) next;
1040 	/**< Pointer to the next flow meter structure. */
1041 	uint32_t idx;
1042 	/* Index to meter object. */
1043 };
1044 
1045 #define MLX5_MAX_TUNNELS 256
1046 #define MLX5_TNL_MISS_RULE_PRIORITY 3
1047 #define MLX5_TNL_MISS_FDB_JUMP_GRP  0x1234faac
1048 
1049 /*
1050  * When tunnel offload is active, all JUMP group ids are converted
1051  * using the same method. That conversion is applied both to tunnel and
1052  * regular rule types.
1053  * Group ids used in tunnel rules are relative to it's tunnel (!).
1054  * Application can create number of steer rules, using the same
1055  * tunnel, with different group id in each rule.
1056  * Each tunnel stores its groups internally in PMD tunnel object.
1057  * Groups used in regular rules do not belong to any tunnel and are stored
1058  * in tunnel hub.
1059  */
1060 
1061 struct mlx5_flow_tunnel {
1062 	LIST_ENTRY(mlx5_flow_tunnel) chain;
1063 	struct rte_flow_tunnel app_tunnel;	/** app tunnel copy */
1064 	uint32_t tunnel_id;			/** unique tunnel ID */
1065 	RTE_ATOMIC(uint32_t) refctn;
1066 	struct rte_flow_action action;
1067 	struct rte_flow_item item;
1068 	struct mlx5_hlist *groups;		/** tunnel groups */
1069 };
1070 
1071 /** PMD tunnel related context */
1072 struct mlx5_flow_tunnel_hub {
1073 	/* Tunnels list
1074 	 * Access to the list MUST be MT protected
1075 	 */
1076 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
1077 	 /* protect access to the tunnels list */
1078 	rte_spinlock_t sl;
1079 	struct mlx5_hlist *groups;		/** non tunnel groups */
1080 };
1081 
1082 /* convert jump group to flow table ID in tunnel rules */
1083 struct tunnel_tbl_entry {
1084 	struct mlx5_list_entry hash;
1085 	uint32_t flow_table;
1086 	uint32_t tunnel_id;
1087 	uint32_t group;
1088 };
1089 
1090 static inline uint32_t
1091 tunnel_id_to_flow_tbl(uint32_t id)
1092 {
1093 	return id | (1u << 16);
1094 }
1095 
1096 static inline uint32_t
1097 tunnel_flow_tbl_to_id(uint32_t flow_tbl)
1098 {
1099 	return flow_tbl & ~(1u << 16);
1100 }
1101 
1102 union tunnel_tbl_key {
1103 	uint64_t val;
1104 	struct {
1105 		uint32_t tunnel_id;
1106 		uint32_t group;
1107 	};
1108 };
1109 
1110 static inline struct mlx5_flow_tunnel_hub *
1111 mlx5_tunnel_hub(struct rte_eth_dev *dev)
1112 {
1113 	struct mlx5_priv *priv = dev->data->dev_private;
1114 	return priv->sh->tunnel_hub;
1115 }
1116 
1117 static inline bool
1118 is_tunnel_offload_active(const struct rte_eth_dev *dev)
1119 {
1120 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1121 	const struct mlx5_priv *priv = dev->data->dev_private;
1122 	return !!priv->sh->config.dv_miss_info;
1123 #else
1124 	RTE_SET_USED(dev);
1125 	return false;
1126 #endif
1127 }
1128 
1129 static inline bool
1130 is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
1131 {
1132 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
1133 }
1134 
1135 static inline bool
1136 is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
1137 {
1138 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
1139 }
1140 
1141 static inline const struct mlx5_flow_tunnel *
1142 flow_actions_to_tunnel(const struct rte_flow_action actions[])
1143 {
1144 	return actions[0].conf;
1145 }
1146 
1147 static inline const struct mlx5_flow_tunnel *
1148 flow_items_to_tunnel(const struct rte_flow_item items[])
1149 {
1150 	return items[0].spec;
1151 }
1152 
1153 /**
1154  * Gets the tag array given for RTE_FLOW_FIELD_TAG type.
1155  *
1156  * In old API the value was provided in "level" field, but in new API
1157  * it is provided in "tag_array" field. Since encapsulation level is not
1158  * relevant for metadata, the tag array can be still provided in "level"
1159  * for backwards compatibility.
1160  *
1161  * @param[in] data
1162  *   Pointer to tag modify data structure.
1163  *
1164  * @return
1165  *   Tag array index.
1166  */
1167 static inline uint8_t
1168 flow_tag_index_get(const struct rte_flow_field_data *data)
1169 {
1170 	return data->tag_index ? data->tag_index : data->level;
1171 }
1172 
1173 /**
1174  * Fetch 1, 2, 3 or 4 byte field from the byte array
1175  * and return as unsigned integer in host-endian format.
1176  *
1177  * @param[in] data
1178  *   Pointer to data array.
1179  * @param[in] size
1180  *   Size of field to extract.
1181  *
1182  * @return
1183  *   converted field in host endian format.
1184  */
1185 static inline uint32_t
1186 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
1187 {
1188 	uint32_t ret;
1189 
1190 	switch (size) {
1191 	case 1:
1192 		ret = *data;
1193 		break;
1194 	case 2:
1195 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1196 		break;
1197 	case 3:
1198 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
1199 		ret = (ret << 8) | *(data + sizeof(uint16_t));
1200 		break;
1201 	case 4:
1202 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
1203 		break;
1204 	default:
1205 		MLX5_ASSERT(false);
1206 		ret = 0;
1207 		break;
1208 	}
1209 	return ret;
1210 }
1211 
1212 static inline bool
1213 flow_modify_field_support_tag_array(enum rte_flow_field_id field)
1214 {
1215 	switch ((int)field) {
1216 	case RTE_FLOW_FIELD_TAG:
1217 	case RTE_FLOW_FIELD_MPLS:
1218 	case MLX5_RTE_FLOW_FIELD_META_REG:
1219 		return true;
1220 	default:
1221 		break;
1222 	}
1223 	return false;
1224 }
1225 
1226 struct field_modify_info {
1227 	uint32_t size; /* Size of field in protocol header, in bytes. */
1228 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
1229 	enum mlx5_modification_field id;
1230 	uint32_t shift;
1231 	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
1232 };
1233 
1234 /* HW steering flow attributes. */
1235 struct mlx5_flow_attr {
1236 	uint32_t port_id; /* Port index. */
1237 	uint32_t group; /* Flow group. */
1238 	uint32_t priority; /* Original Priority. */
1239 	/* rss level, used by priority adjustment. */
1240 	uint32_t rss_level;
1241 	/* Action flags, used by priority adjustment. */
1242 	uint32_t act_flags;
1243 	uint32_t tbl_type; /* Flow table type. */
1244 };
1245 
1246 /* Flow structure. */
1247 struct rte_flow {
1248 	uint32_t dev_handles;
1249 	/**< Device flow handles that are part of the flow. */
1250 	uint32_t type:2;
1251 	uint32_t drv_type:2; /**< Driver type. */
1252 	uint32_t tunnel:1;
1253 	uint32_t meter:24; /**< Holds flow meter id. */
1254 	uint32_t indirect_type:2; /**< Indirect action type. */
1255 	uint32_t matcher_selector:1; /**< Matcher index in resizable table. */
1256 	uint32_t rix_mreg_copy;
1257 	/**< Index to metadata register copy table resource. */
1258 	uint32_t counter; /**< Holds flow counter. */
1259 	uint32_t tunnel_id;  /**< Tunnel id */
1260 	union {
1261 		uint32_t age; /**< Holds ASO age bit index. */
1262 		uint32_t ct; /**< Holds ASO CT index. */
1263 	};
1264 	uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
1265 } __rte_packed;
1266 
1267 /*
1268  * HWS COUNTER ID's layout
1269  *       3                   2                   1                   0
1270  *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1271  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1272  *    |  T  |     | D |                                               |
1273  *    ~  Y  |     | C |                    IDX                        ~
1274  *    |  P  |     | S |                                               |
1275  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1276  *
1277  *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
1278  *    Bit 25:24 = DCS index
1279  *    Bit 23:00 = IDX in this counter belonged DCS bulk.
1280  */
1281 typedef uint32_t cnt_id_t;
1282 
1283 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1284 
1285 enum {
1286 	MLX5_FLOW_HW_FLOW_OP_TYPE_NONE,
1287 	MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE,
1288 	MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY,
1289 	MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE,
1290 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE,
1291 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY,
1292 	MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE,
1293 };
1294 
1295 enum {
1296 	MLX5_FLOW_HW_FLOW_FLAG_CNT_ID = RTE_BIT32(0),
1297 	MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP = RTE_BIT32(1),
1298 	MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ = RTE_BIT32(2),
1299 	MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX = RTE_BIT32(3),
1300 	MLX5_FLOW_HW_FLOW_FLAG_MTR_ID = RTE_BIT32(4),
1301 	MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR = RTE_BIT32(5),
1302 	MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW = RTE_BIT32(6),
1303 };
1304 
1305 #define MLX5_FLOW_HW_FLOW_FLAGS_ALL ( \
1306 		MLX5_FLOW_HW_FLOW_FLAG_CNT_ID | \
1307 		MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP | \
1308 		MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ | \
1309 		MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX | \
1310 		MLX5_FLOW_HW_FLOW_FLAG_MTR_ID | \
1311 		MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR | \
1312 		MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW \
1313 	)
1314 
1315 #ifdef PEDANTIC
1316 #pragma GCC diagnostic ignored "-Wpedantic"
1317 #endif
1318 
1319 #define MLX5_DR_RULE_SIZE 72
1320 
1321 SLIST_HEAD(mlx5_nta_rss_flow_head, rte_flow_hw);
1322 
1323 /** HWS non template flow data. */
1324 struct rte_flow_nt2hws {
1325 	/** BWC rule pointer. */
1326 	struct mlx5dr_bwc_rule *nt_rule;
1327 	/** The matcher for non template api. */
1328 	struct mlx5_flow_dv_matcher *matcher;
1329 	/**< Auxiliary data stored per flow. */
1330 	struct rte_flow_hw_aux *flow_aux;
1331 	/** Modify header pointer. */
1332 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
1333 	/** Chain NTA flows. */
1334 	SLIST_ENTRY(rte_flow_hw) next;
1335 	/** Encap/decap index. */
1336 	uint32_t rix_encap_decap;
1337 	uint8_t chaned_flow;
1338 };
1339 
1340 /** HWS flow struct. */
1341 struct rte_flow_hw {
1342 	union {
1343 		/** The table flow allcated from. */
1344 		struct rte_flow_template_table *table;
1345 		/** Data needed for non template flows. */
1346 		struct rte_flow_nt2hws *nt2hws;
1347 	};
1348 	/** Application's private data passed to enqueued flow operation. */
1349 	void *user_data;
1350 	union {
1351 		/** Jump action. */
1352 		struct mlx5_hw_jump_action *jump;
1353 		/** TIR action. */
1354 		struct mlx5_hrxq *hrxq;
1355 	};
1356 	/** Flow index from indexed pool. */
1357 	uint32_t idx;
1358 	/** Resource index from indexed pool. */
1359 	uint32_t res_idx;
1360 	/** HWS flow rule index passed to mlx5dr. */
1361 	uint32_t rule_idx;
1362 	/** Which flow fields (inline or in auxiliary struct) are used. */
1363 	uint32_t flags;
1364 	/** COUNT action index. */
1365 	cnt_id_t cnt_id;
1366 	/** Ongoing flow operation type. */
1367 	uint8_t operation_type;
1368 	/** Index of pattern template this flow is based on. */
1369 	uint8_t mt_idx;
1370 	/** Equals true if it is non template rule. */
1371 	bool nt_rule;
1372 	/**
1373 	 * Padding for alignment to 56 bytes.
1374 	 * Since mlx5dr rule is 72 bytes, whole flow is contained within 128 B (2 cache lines).
1375 	 * This space is reserved for future additions to flow struct.
1376 	 */
1377 	uint8_t padding[9];
1378 	/** HWS layer data struct. */
1379 	uint8_t rule[];
1380 };
1381 
1382 /** Auxiliary data fields that are updatable. */
1383 struct rte_flow_hw_aux_fields {
1384 	/** AGE action index. */
1385 	uint32_t age_idx;
1386 	/** Direct meter (METER or METER_MARK) action index. */
1387 	uint32_t mtr_id;
1388 };
1389 
1390 /** Auxiliary data stored per flow which is not required to be stored in main flow structure. */
1391 struct rte_flow_hw_aux {
1392 	/** Auxiliary fields associated with the original flow. */
1393 	struct rte_flow_hw_aux_fields orig;
1394 	/** Auxiliary fields associated with the updated flow. */
1395 	struct rte_flow_hw_aux_fields upd;
1396 	/** Index of resizable matcher associated with this flow. */
1397 	uint8_t matcher_selector;
1398 	/** Placeholder flow struct used during flow rule update operation. */
1399 	struct rte_flow_hw upd_flow;
1400 };
1401 
1402 #ifdef PEDANTIC
1403 #pragma GCC diagnostic error "-Wpedantic"
1404 #endif
1405 
1406 struct mlx5_action_construct_data;
1407 typedef int
1408 (*indirect_list_callback_t)(struct rte_eth_dev *,
1409 			    const struct mlx5_action_construct_data *,
1410 			    const struct rte_flow_action *,
1411 			    struct mlx5dr_rule_action *);
1412 
1413 #define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
1414 
1415 /** Container for flow action data constructed during flow rule creation. */
1416 struct mlx5_flow_hw_action_params {
1417 	/** Array of constructed modify header commands. */
1418 	struct mlx5_modification_cmd mhdr_cmd[MLX5_MHDR_MAX_CMD];
1419 	/** Constructed encap/decap data buffer. */
1420 	uint8_t encap_data[MLX5_ENCAP_MAX_LEN];
1421 	/** Constructed IPv6 routing data buffer. */
1422 	uint8_t ipv6_push_data[MLX5_PUSH_MAX_LEN];
1423 };
1424 
1425 /** Container for dynamically generated flow items used during flow rule creation. */
1426 struct mlx5_flow_hw_pattern_params {
1427 	/** Array of dynamically generated flow items. */
1428 	struct rte_flow_item items[MLX5_HW_MAX_ITEMS];
1429 	/** Temporary REPRESENTED_PORT item generated by PMD. */
1430 	struct rte_flow_item_ethdev port_spec;
1431 	/** Temporary TAG item generated by PMD. */
1432 	struct rte_flow_item_tag tag_spec;
1433 };
1434 
1435 /* rte flow action translate to DR action struct. */
1436 struct mlx5_action_construct_data {
1437 	LIST_ENTRY(mlx5_action_construct_data) next;
1438 	/* Ensure the action types are matched. */
1439 	int type;
1440 	uint32_t idx;  /* Data index. */
1441 	uint16_t action_src; /* rte_flow_action src offset. */
1442 	uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
1443 	indirect_list_callback_t indirect_list_cb;
1444 	union {
1445 		struct {
1446 			/* Expected type of indirection action. */
1447 			enum rte_flow_action_type expected_type;
1448 		} indirect;
1449 		struct {
1450 			/* encap data len. */
1451 			uint16_t len;
1452 		} encap;
1453 		struct {
1454 			/* Modify header action offset in pattern. */
1455 			uint16_t mhdr_cmds_off;
1456 			/* Offset in pattern after modify header actions. */
1457 			uint16_t mhdr_cmds_end;
1458 			/*
1459 			 * True if this action is masked and does not need to
1460 			 * be generated.
1461 			 */
1462 			bool shared;
1463 			/*
1464 			 * Modified field definitions in dst field (SET, ADD)
1465 			 * or src field (COPY).
1466 			 */
1467 			struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS];
1468 			/* Modified field definitions in dst field (COPY). */
1469 			struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS];
1470 			/*
1471 			 * Masks applied to field values to generate
1472 			 * PRM actions.
1473 			 */
1474 			uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS];
1475 			/* Copy of action passed to the action template. */
1476 			struct rte_flow_action_modify_field action;
1477 		} modify_header;
1478 		struct {
1479 			bool symmetric_hash_function; /* Symmetric RSS hash */
1480 			uint64_t types; /* RSS hash types. */
1481 			uint32_t level; /* RSS level. */
1482 			uint32_t idx; /* Shared action index. */
1483 		} shared_rss;
1484 		struct {
1485 			cnt_id_t id;
1486 		} shared_counter;
1487 		struct {
1488 			/* IPv6 extension push data len. */
1489 			uint16_t len;
1490 		} ipv6_ext;
1491 		struct {
1492 			uint32_t id;
1493 			uint32_t conf_masked:1;
1494 		} shared_meter;
1495 	};
1496 };
1497 
1498 #define MAX_GENEVE_OPTIONS_RESOURCES 7
1499 
1500 /* GENEVE TLV options manager structure. */
1501 struct mlx5_geneve_tlv_options_mng {
1502 	uint8_t nb_options; /* Number of options inside the template. */
1503 	struct {
1504 		uint8_t opt_type;
1505 		uint16_t opt_class;
1506 	} options[MAX_GENEVE_OPTIONS_RESOURCES];
1507 };
1508 
1509 /* Flow item template struct. */
1510 struct rte_flow_pattern_template {
1511 	LIST_ENTRY(rte_flow_pattern_template) next;
1512 	/* Template attributes. */
1513 	struct rte_flow_pattern_template_attr attr;
1514 	struct mlx5dr_match_template *mt; /* mlx5 match template. */
1515 	uint64_t item_flags; /* Item layer flags. */
1516 	uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
1517 	RTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */
1518 	/*
1519 	 * If true, then rule pattern should be prepended with
1520 	 * represented_port pattern item.
1521 	 */
1522 	bool implicit_port;
1523 	/*
1524 	 * If true, then rule pattern should be prepended with
1525 	 * tag pattern item for representor matching.
1526 	 */
1527 	bool implicit_tag;
1528 	/* Manages all GENEVE TLV options used by this pattern template. */
1529 	struct mlx5_geneve_tlv_options_mng geneve_opt_mng;
1530 	uint8_t flex_item; /* flex item index. */
1531 	/* Items on which this pattern template is based on. */
1532 	struct rte_flow_item *items;
1533 };
1534 
1535 /* Flow action template struct. */
1536 struct rte_flow_actions_template {
1537 	LIST_ENTRY(rte_flow_actions_template) next;
1538 	/* Template attributes. */
1539 	struct rte_flow_actions_template_attr attr;
1540 	struct rte_flow_action *actions; /* Cached flow actions. */
1541 	struct rte_flow_action *orig_actions; /* Original flow actions. */
1542 	struct rte_flow_action *masks; /* Cached action masks.*/
1543 	struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */
1544 	uint64_t action_flags; /* Bit-map of all valid action in template. */
1545 	uint16_t dr_actions_num; /* Amount of DR rules actions. */
1546 	uint16_t actions_num; /* Amount of flow actions */
1547 	uint16_t *dr_off; /* DR action offset for given rte action offset. */
1548 	uint16_t *src_off; /* RTE action displacement from app. template */
1549 	uint16_t reformat_off; /* Offset of DR reformat action. */
1550 	uint16_t mhdr_off; /* Offset of DR modify header action. */
1551 	uint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */
1552 	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
1553 	uint8_t flex_item; /* flex item index. */
1554 };
1555 
1556 /* Jump action struct. */
1557 struct mlx5_hw_jump_action {
1558 	/* Action jump from root. */
1559 	struct mlx5dr_action *root_action;
1560 	/* HW steering jump action. */
1561 	struct mlx5dr_action *hws_action;
1562 };
1563 
1564 /* Encap decap action struct. */
1565 struct mlx5_hw_encap_decap_action {
1566 	struct mlx5_indirect_list indirect;
1567 	enum mlx5dr_action_type action_type;
1568 	struct mlx5dr_action *action; /* Action object. */
1569 	/* Is header_reformat action shared across flows in table. */
1570 	uint32_t shared:1;
1571 	uint32_t multi_pattern:1;
1572 	size_t data_size; /* Action metadata size. */
1573 	uint8_t data[]; /* Action data. */
1574 };
1575 
1576 /* Push remove action struct. */
1577 struct mlx5_hw_push_remove_action {
1578 	struct mlx5dr_action *action; /* Action object. */
1579 	/* Is push_remove action shared across flows in table. */
1580 	uint8_t shared;
1581 	size_t data_size; /* Action metadata size. */
1582 	uint8_t data[]; /* Action data. */
1583 };
1584 
1585 /* Modify field action struct. */
1586 struct mlx5_hw_modify_header_action {
1587 	/* Reference to DR action */
1588 	struct mlx5dr_action *action;
1589 	/* Modify header action position in action rule table. */
1590 	uint16_t pos;
1591 	/* Is MODIFY_HEADER action shared across flows in table. */
1592 	uint32_t shared:1;
1593 	uint32_t multi_pattern:1;
1594 	/* Amount of modification commands stored in the precompiled buffer. */
1595 	uint32_t mhdr_cmds_num;
1596 	/* Precompiled modification commands. */
1597 	struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD];
1598 };
1599 
1600 /* The maximum actions support in the flow. */
1601 #define MLX5_HW_MAX_ACTS 16
1602 
1603 /* DR action set struct. */
1604 struct mlx5_hw_actions {
1605 	/* Dynamic action list. */
1606 	LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
1607 	struct mlx5_hw_jump_action *jump; /* Jump action. */
1608 	struct mlx5_hrxq *tir; /* TIR action. */
1609 	struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */
1610 	/* Encap/Decap action. */
1611 	struct mlx5_hw_encap_decap_action *encap_decap;
1612 	uint16_t encap_decap_pos; /* Encap/Decap action position. */
1613 	/* Push/remove action. */
1614 	struct mlx5_hw_push_remove_action *push_remove;
1615 	uint16_t push_remove_pos; /* Push/remove action position. */
1616 	uint32_t mark:1; /* Indicate the mark action. */
1617 	cnt_id_t cnt_id; /* Counter id. */
1618 	uint32_t mtr_id; /* Meter id. */
1619 	/* Translated DR action array from action template. */
1620 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
1621 };
1622 
1623 /* mlx5 action template struct. */
1624 struct mlx5_hw_action_template {
1625 	/* Action template pointer. */
1626 	struct rte_flow_actions_template *action_template;
1627 	struct mlx5_hw_actions acts; /* Template actions. */
1628 };
1629 
1630 /* mlx5 flow group struct. */
1631 struct mlx5_flow_group {
1632 	struct mlx5_list_entry entry;
1633 	LIST_ENTRY(mlx5_flow_group) next;
1634 	struct rte_eth_dev *dev; /* Reference to corresponding device. */
1635 	struct mlx5dr_table *tbl; /* HWS table object. */
1636 	struct mlx5_hw_jump_action jump; /* Jump action. */
1637 	struct mlx5_flow_group *miss_group; /* Group pointed to by miss action. */
1638 	enum mlx5dr_table_type type; /* Table type. */
1639 	uint32_t group_id; /* Group id. */
1640 	uint32_t idx; /* Group memory index. */
1641 	/* List of all matchers created for this group in non template api */
1642 	struct mlx5_list *matchers;
1643 };
1644 
1645 
1646 #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 32
1647 #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
1648 
1649 #define MLX5_MULTIPATTERN_ENCAP_NUM 5
1650 #define MLX5_MAX_TABLE_RESIZE_NUM 64
1651 
1652 struct mlx5_multi_pattern_segment {
1653 	/*
1654 	 * Modify Header Argument Objects number allocated for action in that
1655 	 * segment.
1656 	 * Capacity is always power of 2.
1657 	 */
1658 	uint32_t capacity;
1659 	uint32_t head_index;
1660 	struct mlx5dr_action *mhdr_action;
1661 	struct mlx5dr_action *reformat_action[MLX5_MULTIPATTERN_ENCAP_NUM];
1662 };
1663 
1664 struct mlx5_tbl_multi_pattern_ctx {
1665 	struct {
1666 		uint32_t elements_num;
1667 		struct mlx5dr_action_reformat_header reformat_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1668 		/**
1669 		 * insert_header structure is larger than reformat_header.
1670 		 * Enclosing these structures with union will case a gap between
1671 		 * reformat_hdr array elements.
1672 		 * mlx5dr_action_create_reformat() expects adjacent array elements.
1673 		 */
1674 		struct mlx5dr_action_insert_header insert_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1675 	} reformat[MLX5_MULTIPATTERN_ENCAP_NUM];
1676 
1677 	struct {
1678 		uint32_t elements_num;
1679 		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1680 	} mh;
1681 	struct mlx5_multi_pattern_segment segments[MLX5_MAX_TABLE_RESIZE_NUM];
1682 };
1683 
1684 static __rte_always_inline void
1685 mlx5_multi_pattern_activate(struct mlx5_tbl_multi_pattern_ctx *mpctx)
1686 {
1687 	mpctx->segments[0].head_index = 1;
1688 }
1689 
1690 static __rte_always_inline bool
1691 mlx5_is_multi_pattern_active(const struct mlx5_tbl_multi_pattern_ctx *mpctx)
1692 {
1693 	return mpctx->segments[0].head_index == 1;
1694 }
1695 
1696 struct mlx5_flow_template_table_cfg {
1697 	struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */
1698 	bool external; /* True if created by flow API, false if table is internal to PMD. */
1699 };
1700 
1701 struct mlx5_matcher_info {
1702 	struct mlx5dr_matcher *matcher; /* Template matcher. */
1703 	RTE_ATOMIC(uint32_t) refcnt;
1704 };
1705 
1706 struct __rte_cache_aligned mlx5_dr_rule_action_container {
1707 	struct mlx5dr_rule_action acts[MLX5_HW_MAX_ACTS];
1708 };
1709 
1710 struct rte_flow_template_table {
1711 	LIST_ENTRY(rte_flow_template_table) next;
1712 	struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
1713 	struct mlx5_matcher_info matcher_info[2];
1714 	uint32_t matcher_selector;
1715 	rte_rwlock_t matcher_replace_rwlk; /* RW lock for resizable tables */
1716 	/* Item templates bind to the table. */
1717 	struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
1718 	/* Action templates bind to the table. */
1719 	struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1720 	struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
1721 	struct rte_flow_hw_aux *flow_aux; /**< Auxiliary data stored per flow. */
1722 	struct mlx5_indexed_pool *resource; /* The table's resource ipool. */
1723 	struct mlx5_flow_template_table_cfg cfg;
1724 	uint32_t type; /* Flow table type RX/TX/FDB. */
1725 	uint8_t nb_item_templates; /* Item template number. */
1726 	uint8_t nb_action_templates; /* Action template number. */
1727 	uint32_t refcnt; /* Table reference counter. */
1728 	struct mlx5_tbl_multi_pattern_ctx mpctx;
1729 	struct mlx5dr_matcher_attr matcher_attr;
1730 	/**
1731 	 * Variable length array of containers containing precalculated templates of DR actions
1732 	 * arrays. This array is allocated at template table creation time and contains
1733 	 * one container per each queue, per each actions template.
1734 	 * Essentially rule_acts is a 2-dimensional array indexed with (AT index, queue) pair.
1735 	 * Each container will provide a local "queue buffer" to work on for flow creation
1736 	 * operations when using a given actions template.
1737 	 */
1738 	struct mlx5_dr_rule_action_container rule_acts[];
1739 };
1740 
1741 static __rte_always_inline struct mlx5dr_matcher *
1742 mlx5_table_matcher(const struct rte_flow_template_table *table)
1743 {
1744 	return table->matcher_info[table->matcher_selector].matcher;
1745 }
1746 
1747 static __rte_always_inline struct mlx5_multi_pattern_segment *
1748 mlx5_multi_pattern_segment_find(struct rte_flow_template_table *table,
1749 				uint32_t flow_resource_ix)
1750 {
1751 	int i;
1752 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
1753 
1754 	if (likely(!rte_flow_template_table_resizable(0, &table->cfg.attr)))
1755 		return &mpctx->segments[0];
1756 	for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
1757 		uint32_t limit = mpctx->segments[i].head_index +
1758 				 mpctx->segments[i].capacity;
1759 
1760 		if (flow_resource_ix < limit)
1761 			return &mpctx->segments[i];
1762 	}
1763 	return NULL;
1764 }
1765 
1766 /*
1767  * Convert metadata or tag to the actual register.
1768  * META: Fixed C_1 for FDB mode, REG_A for NIC TX and REG_B for NIC RX.
1769  * TAG: C_x expect meter color reg and the reserved ones.
1770  */
1771 static __rte_always_inline int
1772 flow_hw_get_reg_id_by_domain(struct rte_eth_dev *dev,
1773 			     enum rte_flow_item_type type,
1774 			     enum mlx5dr_table_type domain_type, uint32_t id)
1775 {
1776 	struct mlx5_dev_ctx_shared *sh = MLX5_SH(dev);
1777 	struct mlx5_dev_registers *reg = &sh->registers;
1778 
1779 	switch (type) {
1780 	case RTE_FLOW_ITEM_TYPE_META:
1781 		if (sh->config.dv_esw_en &&
1782 		    sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
1783 			return REG_C_1;
1784 		}
1785 		/*
1786 		 * On root table - PMD allows only egress META matching, thus
1787 		 * REG_A matching is sufficient.
1788 		 *
1789 		 * On non-root tables - REG_A corresponds to general_purpose_lookup_field,
1790 		 * which translates to REG_A in NIC TX and to REG_B in NIC RX.
1791 		 * However, current FW does not implement REG_B case right now, so
1792 		 * REG_B case is return explicitly by this function for NIC RX.
1793 		 */
1794 		if (domain_type == MLX5DR_TABLE_TYPE_NIC_RX)
1795 			return REG_B;
1796 		return REG_A;
1797 	case RTE_FLOW_ITEM_TYPE_CONNTRACK:
1798 	case RTE_FLOW_ITEM_TYPE_METER_COLOR:
1799 		return reg->aso_reg;
1800 	case RTE_FLOW_ITEM_TYPE_TAG:
1801 		if (id == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1802 			return REG_C_3;
1803 		MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);
1804 		return reg->hw_avl_tags[id];
1805 	default:
1806 		return REG_NON;
1807 	}
1808 }
1809 
1810 static __rte_always_inline int
1811 flow_hw_get_reg_id_from_ctx(void *dr_ctx, enum rte_flow_item_type type,
1812 			    enum mlx5dr_table_type domain_type, uint32_t id)
1813 {
1814 	uint16_t port;
1815 
1816 	MLX5_ETH_FOREACH_DEV(port, NULL) {
1817 		struct mlx5_priv *priv;
1818 
1819 		priv = rte_eth_devices[port].data->dev_private;
1820 		if (priv->dr_ctx == dr_ctx)
1821 			return flow_hw_get_reg_id_by_domain(&rte_eth_devices[port],
1822 							    type, domain_type, id);
1823 	}
1824 	return REG_NON;
1825 }
1826 
1827 #endif
1828 
1829 /*
1830  * Define list of valid combinations of RX Hash fields
1831  * (see enum ibv_rx_hash_fields).
1832  */
1833 #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
1834 #define MLX5_RSS_HASH_IPV4_TCP \
1835 	(MLX5_RSS_HASH_IPV4 | \
1836 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1837 #define MLX5_RSS_HASH_IPV4_UDP \
1838 	(MLX5_RSS_HASH_IPV4 | \
1839 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1840 #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
1841 #define MLX5_RSS_HASH_IPV6_TCP \
1842 	(MLX5_RSS_HASH_IPV6 | \
1843 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1844 #define MLX5_RSS_HASH_IPV6_UDP \
1845 	(MLX5_RSS_HASH_IPV6 | \
1846 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1847 #define MLX5_RSS_HASH_IPV4_SRC_ONLY IBV_RX_HASH_SRC_IPV4
1848 #define MLX5_RSS_HASH_IPV4_DST_ONLY IBV_RX_HASH_DST_IPV4
1849 #define MLX5_RSS_HASH_IPV6_SRC_ONLY IBV_RX_HASH_SRC_IPV6
1850 #define MLX5_RSS_HASH_IPV6_DST_ONLY IBV_RX_HASH_DST_IPV6
1851 #define MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY \
1852 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_UDP)
1853 #define MLX5_RSS_HASH_IPV4_UDP_DST_ONLY \
1854 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_UDP)
1855 #define MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY \
1856 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_UDP)
1857 #define MLX5_RSS_HASH_IPV6_UDP_DST_ONLY \
1858 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_UDP)
1859 #define MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY \
1860 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_TCP)
1861 #define MLX5_RSS_HASH_IPV4_TCP_DST_ONLY \
1862 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_TCP)
1863 #define MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY \
1864 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
1865 #define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
1866 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
1867 
1868 #ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
1869 #define IBV_RX_HASH_IPSEC_SPI (1U << 8)
1870 #endif
1871 
1872 #define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
1873 #define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
1874 				MLX5_RSS_HASH_ESP_SPI)
1875 #define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
1876 				MLX5_RSS_HASH_ESP_SPI)
1877 #define MLX5_RSS_HASH_NONE 0ULL
1878 
1879 #define MLX5_RSS_IS_SYMM(func) \
1880 		(((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) || \
1881 		 ((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT))
1882 
1883 /* extract next protocol type from Ethernet & VLAN headers */
1884 #define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
1885 	(_prt) = ((const struct _s *)(_itm)->mask)->_m;       \
1886 	(_prt) &= ((const struct _s *)(_itm)->spec)->_m;      \
1887 	(_prt) = rte_be_to_cpu_16((_prt));                    \
1888 } while (0)
1889 
1890 /* array of valid combinations of RX Hash fields for RSS */
1891 static const uint64_t mlx5_rss_hash_fields[] = {
1892 	MLX5_RSS_HASH_IPV4,
1893 	MLX5_RSS_HASH_IPV4_TCP,
1894 	MLX5_RSS_HASH_IPV4_UDP,
1895 	MLX5_RSS_HASH_IPV4_ESP,
1896 	MLX5_RSS_HASH_IPV6,
1897 	MLX5_RSS_HASH_IPV6_TCP,
1898 	MLX5_RSS_HASH_IPV6_UDP,
1899 	MLX5_RSS_HASH_IPV6_ESP,
1900 	MLX5_RSS_HASH_ESP_SPI,
1901 	MLX5_RSS_HASH_NONE,
1902 };
1903 
1904 /* Shared RSS action structure */
1905 struct mlx5_shared_action_rss {
1906 	ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
1907 	RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
1908 	struct rte_flow_action_rss origin; /**< Original rte RSS action. */
1909 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
1910 	struct mlx5_ind_table_obj *ind_tbl;
1911 	/**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
1912 	uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
1913 	/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
1914 	rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
1915 };
1916 
1917 struct rte_flow_action_handle {
1918 	uint32_t id;
1919 };
1920 
1921 /* Thread specific flow workspace intermediate data. */
1922 struct mlx5_flow_workspace {
1923 	/* If creating another flow in same thread, push new as stack. */
1924 	struct mlx5_flow_workspace *prev;
1925 	struct mlx5_flow_workspace *next;
1926 	struct mlx5_flow_workspace *gc;
1927 	uint32_t inuse; /* can't create new flow with current. */
1928 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
1929 	struct mlx5_flow_rss_desc rss_desc;
1930 	uint32_t flow_idx; /* Intermediate device flow index. */
1931 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
1932 	struct mlx5_flow_meter_policy *policy;
1933 	/* The meter policy used by meter in flow. */
1934 	struct mlx5_flow_meter_policy *final_policy;
1935 	/* The final policy when meter policy is hierarchy. */
1936 	uint32_t skip_matcher_reg:1;
1937 	/* Indicates if need to skip matcher register in translate. */
1938 	uint32_t mark:1; /* Indicates if flow contains mark action. */
1939 	uint32_t vport_meta_tag; /* Used for vport index match. */
1940 };
1941 
1942 /* Matcher translate type. */
1943 enum MLX5_SET_MATCHER {
1944 	MLX5_SET_MATCHER_SW_V = 1 << 0,
1945 	MLX5_SET_MATCHER_SW_M = 1 << 1,
1946 	MLX5_SET_MATCHER_HS_V = 1 << 2,
1947 	MLX5_SET_MATCHER_HS_M = 1 << 3,
1948 };
1949 
1950 #define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M)
1951 #define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M)
1952 #define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V)
1953 #define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M)
1954 
1955 /* Flow matcher workspace intermediate data. */
1956 struct mlx5_dv_matcher_workspace {
1957 	uint8_t priority; /* Flow priority. */
1958 	uint64_t last_item; /* Last item in pattern. */
1959 	uint64_t item_flags; /* Flow item pattern flags. */
1960 	uint64_t action_flags; /* Flow action flags. */
1961 	bool external; /* External flow or not. */
1962 	uint32_t vlan_tag:12; /* Flow item VLAN tag. */
1963 	uint8_t next_protocol; /* Tunnel next protocol */
1964 	uint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */
1965 	uint32_t group; /* Flow group. */
1966 	uint16_t udp_dport; /* Flow item UDP port. */
1967 	const struct rte_flow_attr *attr; /* Flow attribute. */
1968 	struct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */
1969 	const struct rte_flow_item *tunnel_item; /* Flow tunnel item. */
1970 	const struct rte_flow_item *gre_item; /* Flow GRE item. */
1971 	const struct rte_flow_item *integrity_items[2];
1972 };
1973 
1974 struct mlx5_flow_split_info {
1975 	uint32_t external:1;
1976 	/**< True if flow is created by request external to PMD. */
1977 	uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
1978 	uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
1979 	uint32_t flow_idx; /**< This memory pool index to the flow. */
1980 	uint32_t table_id; /**< Flow table identifier. */
1981 	uint64_t prefix_layers; /**< Prefix subflow layers. */
1982 };
1983 
1984 struct mlx5_hl_data {
1985 	uint8_t dw_offset;
1986 	uint32_t dw_mask;
1987 };
1988 
1989 extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
1990 
1991 /*
1992  * Get sqn for given tx_queue.
1993  * Used in HWS rule creation.
1994  */
1995 static __rte_always_inline int
1996 flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
1997 {
1998 	struct mlx5_txq_ctrl *txq;
1999 	struct mlx5_external_q *ext_txq;
2000 
2001 	/* Means Tx queue is PF0. */
2002 	if (tx_queue == UINT16_MAX) {
2003 		*sqn = 0;
2004 		return 0;
2005 	}
2006 	if (mlx5_is_external_txq(dev, tx_queue)) {
2007 		ext_txq = mlx5_ext_txq_get(dev, tx_queue);
2008 		*sqn = ext_txq->hw_id;
2009 		return 0;
2010 	}
2011 	txq = mlx5_txq_get(dev, tx_queue);
2012 	if (unlikely(!txq))
2013 		return -ENOENT;
2014 	*sqn = mlx5_txq_get_sqn(txq);
2015 	mlx5_txq_release(dev, tx_queue);
2016 	return 0;
2017 }
2018 
2019 /*
2020  * Convert sqn for given rte_eth_dev port.
2021  * Used in HWS rule creation.
2022  */
2023 static __rte_always_inline int
2024 flow_hw_conv_sqn(uint16_t port_id, uint16_t tx_queue, uint32_t *sqn)
2025 {
2026 	if (port_id >= RTE_MAX_ETHPORTS)
2027 		return -EINVAL;
2028 	return flow_hw_get_sqn(&rte_eth_devices[port_id], tx_queue, sqn);
2029 }
2030 
2031 /*
2032  * Get given rte_eth_dev port_id.
2033  * Used in HWS rule creation.
2034  */
2035 static __rte_always_inline uint16_t
2036 flow_hw_get_port_id(void *dr_ctx)
2037 {
2038 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2039 	uint16_t port_id;
2040 
2041 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2042 		struct mlx5_priv *priv;
2043 
2044 		priv = rte_eth_devices[port_id].data->dev_private;
2045 		if (priv->dr_ctx == dr_ctx)
2046 			return port_id;
2047 	}
2048 #else
2049 	RTE_SET_USED(dr_ctx);
2050 #endif
2051 	return UINT16_MAX;
2052 }
2053 
2054 /*
2055  * Get given eswitch manager id.
2056  * Used in HWS match with port creation.
2057  */
2058 static __rte_always_inline const struct flow_hw_port_info *
2059 flow_hw_get_esw_mgr_id(void *dr_ctx)
2060 {
2061 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2062 	uint16_t port_id;
2063 
2064 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2065 		struct mlx5_priv *priv;
2066 
2067 		priv = rte_eth_devices[port_id].data->dev_private;
2068 		if (priv->dr_ctx == dr_ctx)
2069 			return &priv->sh->dev_cap.esw_info;
2070 	}
2071 #else
2072 	RTE_SET_USED(dr_ctx);
2073 #endif
2074 	return NULL;
2075 }
2076 
2077 /*
2078  * Get metadata match tag and mask for given rte_eth_dev port.
2079  * Used in HWS rule creation.
2080  */
2081 static __rte_always_inline const struct flow_hw_port_info *
2082 flow_hw_conv_port_id(void *ctx, const uint16_t port_id)
2083 {
2084 	struct flow_hw_port_info *port_info;
2085 
2086 	if (port_id == UINT16_MAX && ctx)
2087 		return flow_hw_get_esw_mgr_id(ctx);
2088 
2089 	if (port_id >= RTE_MAX_ETHPORTS)
2090 		return NULL;
2091 	port_info = &mlx5_flow_hw_port_infos[port_id];
2092 	return !!port_info->regc_mask ? port_info : NULL;
2093 }
2094 
2095 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2096 /*
2097  * Get metadata match tag and mask for the uplink port represented
2098  * by given IB context. Used in HWS context creation.
2099  */
2100 static __rte_always_inline const struct flow_hw_port_info *
2101 flow_hw_get_wire_port(struct ibv_context *ibctx)
2102 {
2103 	struct ibv_device *ibdev = ibctx->device;
2104 	uint16_t port_id;
2105 
2106 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
2107 		const struct mlx5_priv *priv =
2108 				rte_eth_devices[port_id].data->dev_private;
2109 
2110 		if (priv && priv->master) {
2111 			struct ibv_context *port_ibctx = priv->sh->cdev->ctx;
2112 
2113 			if (port_ibctx->device == ibdev)
2114 				return flow_hw_conv_port_id(priv->dr_ctx, port_id);
2115 		}
2116 	}
2117 	return NULL;
2118 }
2119 #endif
2120 
2121 static __rte_always_inline int
2122 flow_hw_get_reg_id(struct rte_eth_dev *dev,
2123 		   enum rte_flow_item_type type, uint32_t id)
2124 {
2125 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2126 	return flow_hw_get_reg_id_by_domain(dev, type,
2127 					    MLX5DR_TABLE_TYPE_MAX, id);
2128 #else
2129 	RTE_SET_USED(dev);
2130 	RTE_SET_USED(type);
2131 	RTE_SET_USED(id);
2132 	return REG_NON;
2133 #endif
2134 }
2135 
2136 static __rte_always_inline int
2137 flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val)
2138 {
2139 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2140 	uint32_t port;
2141 
2142 	MLX5_ETH_FOREACH_DEV(port, NULL) {
2143 		struct mlx5_priv *priv;
2144 		priv = rte_eth_devices[port].data->dev_private;
2145 
2146 		if (priv->dr_ctx == dr_ctx) {
2147 			*port_val = port;
2148 			return 0;
2149 		}
2150 	}
2151 #else
2152 	RTE_SET_USED(dr_ctx);
2153 	RTE_SET_USED(port_val);
2154 #endif
2155 	return -EINVAL;
2156 }
2157 
2158 /**
2159  * Get GENEVE TLV option FW information according type and class.
2160  *
2161  * @param[in] dr_ctx
2162  *   Pointer to HW steering DR context.
2163  * @param[in] type
2164  *   GENEVE TLV option type.
2165  * @param[in] class
2166  *   GENEVE TLV option class.
2167  * @param[out] hl_ok_bit
2168  *   Pointer to header layout structure describing OK bit FW information.
2169  * @param[out] num_of_dws
2170  *   Pointer to fill inside the size of 'hl_dws' array.
2171  * @param[out] hl_dws
2172  *   Pointer to header layout array describing data DWs FW information.
2173  * @param[out] ok_bit_on_class
2174  *   Pointer to an indicator whether OK bit includes class along with type.
2175  *
2176  * @return
2177  *   0 on success, negative errno otherwise and rte_errno is set.
2178  */
2179 int
2180 mlx5_get_geneve_hl_data(const void *dr_ctx, uint8_t type, uint16_t class,
2181 			struct mlx5_hl_data ** const hl_ok_bit,
2182 			uint8_t *num_of_dws,
2183 			struct mlx5_hl_data ** const hl_dws,
2184 			bool *ok_bit_on_class);
2185 
2186 /**
2187  * Get modify field ID for single DW inside configured GENEVE TLV option.
2188  *
2189  * @param[in] dr_ctx
2190  *   Pointer to HW steering DR context.
2191  * @param[in] type
2192  *   GENEVE TLV option type.
2193  * @param[in] class
2194  *   GENEVE TLV option class.
2195  * @param[in] dw_offset
2196  *   Offset of DW inside the option.
2197  *
2198  * @return
2199  *   Modify field ID on success, negative errno otherwise and rte_errno is set.
2200  */
2201 int
2202 mlx5_get_geneve_option_modify_field_id(const void *dr_ctx, uint8_t type,
2203 				       uint16_t class, uint8_t dw_offset);
2204 
2205 void *
2206 mlx5_geneve_tlv_parser_create(uint16_t port_id,
2207 			      const struct rte_pmd_mlx5_geneve_tlv tlv_list[],
2208 			      uint8_t nb_options);
2209 int mlx5_geneve_tlv_parser_destroy(void *handle);
2210 int mlx5_flow_geneve_tlv_option_validate(struct mlx5_priv *priv,
2211 					 const struct rte_flow_item *geneve_opt,
2212 					 struct rte_flow_error *error);
2213 int mlx5_geneve_opt_modi_field_get(struct mlx5_priv *priv,
2214 				   const struct rte_flow_field_data *data);
2215 
2216 struct mlx5_geneve_tlv_options_mng;
2217 int mlx5_geneve_tlv_option_register(struct mlx5_priv *priv,
2218 				    const struct rte_flow_item_geneve_opt *spec,
2219 				    struct mlx5_geneve_tlv_options_mng *mng);
2220 void mlx5_geneve_tlv_options_unregister(struct mlx5_priv *priv,
2221 					struct mlx5_geneve_tlv_options_mng *mng);
2222 
2223 void flow_hw_set_port_info(struct rte_eth_dev *dev);
2224 void flow_hw_clear_port_info(struct rte_eth_dev *dev);
2225 int flow_hw_create_vport_action(struct rte_eth_dev *dev);
2226 void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);
2227 int
2228 flow_hw_init(struct rte_eth_dev *dev,
2229 	     struct rte_flow_error *error);
2230 
2231 typedef uintptr_t (*mlx5_flow_list_create_t)(struct rte_eth_dev *dev,
2232 					enum mlx5_flow_type type,
2233 					const struct rte_flow_attr *attr,
2234 					const struct rte_flow_item items[],
2235 					const struct rte_flow_action actions[],
2236 					bool external,
2237 					struct rte_flow_error *error);
2238 typedef void (*mlx5_flow_list_destroy_t)(struct rte_eth_dev *dev,
2239 					enum mlx5_flow_type type,
2240 					uintptr_t flow_idx);
2241 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
2242 				    const struct rte_flow_attr *attr,
2243 				    const struct rte_flow_item items[],
2244 				    const struct rte_flow_action actions[],
2245 				    bool external,
2246 				    int hairpin,
2247 				    struct rte_flow_error *error);
2248 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
2249 	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2250 	 const struct rte_flow_item items[],
2251 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
2252 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
2253 				     struct mlx5_flow *dev_flow,
2254 				     const struct rte_flow_attr *attr,
2255 				     const struct rte_flow_item items[],
2256 				     const struct rte_flow_action actions[],
2257 				     struct rte_flow_error *error);
2258 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
2259 				 struct rte_flow_error *error);
2260 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
2261 				   struct rte_flow *flow);
2262 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
2263 				    struct rte_flow *flow);
2264 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
2265 				 struct rte_flow *flow,
2266 				 const struct rte_flow_action *actions,
2267 				 void *data,
2268 				 struct rte_flow_error *error);
2269 typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
2270 					struct mlx5_flow_meter_info *fm,
2271 					uint32_t mtr_idx,
2272 					uint8_t domain_bitmap);
2273 typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
2274 				struct mlx5_flow_meter_info *fm);
2275 typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
2276 typedef struct mlx5_flow_meter_sub_policy *
2277 	(*mlx5_flow_meter_sub_policy_rss_prepare_t)
2278 		(struct rte_eth_dev *dev,
2279 		struct mlx5_flow_meter_policy *mtr_policy,
2280 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
2281 typedef int (*mlx5_flow_meter_hierarchy_rule_create_t)
2282 		(struct rte_eth_dev *dev,
2283 		struct mlx5_flow_meter_info *fm,
2284 		int32_t src_port,
2285 		const struct rte_flow_item *item,
2286 		struct rte_flow_error *error);
2287 typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
2288 	(struct rte_eth_dev *dev,
2289 	struct mlx5_flow_meter_policy *mtr_policy);
2290 typedef uint32_t (*mlx5_flow_mtr_alloc_t)
2291 					    (struct rte_eth_dev *dev);
2292 typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
2293 						uint32_t mtr_idx);
2294 typedef uint32_t (*mlx5_flow_counter_alloc_t)
2295 				   (struct rte_eth_dev *dev);
2296 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev,
2297 					 uint32_t cnt);
2298 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
2299 					 uint32_t cnt,
2300 					 bool clear, uint64_t *pkts,
2301 					 uint64_t *bytes, void **action);
2302 typedef int (*mlx5_flow_get_aged_flows_t)
2303 					(struct rte_eth_dev *dev,
2304 					 void **context,
2305 					 uint32_t nb_contexts,
2306 					 struct rte_flow_error *error);
2307 typedef int (*mlx5_flow_get_q_aged_flows_t)
2308 					(struct rte_eth_dev *dev,
2309 					 uint32_t queue_id,
2310 					 void **context,
2311 					 uint32_t nb_contexts,
2312 					 struct rte_flow_error *error);
2313 typedef int (*mlx5_flow_action_validate_t)
2314 				(struct rte_eth_dev *dev,
2315 				 const struct rte_flow_indir_action_conf *conf,
2316 				 const struct rte_flow_action *action,
2317 				 struct rte_flow_error *error);
2318 typedef struct rte_flow_action_handle *(*mlx5_flow_action_create_t)
2319 				(struct rte_eth_dev *dev,
2320 				 const struct rte_flow_indir_action_conf *conf,
2321 				 const struct rte_flow_action *action,
2322 				 struct rte_flow_error *error);
2323 typedef int (*mlx5_flow_action_destroy_t)
2324 				(struct rte_eth_dev *dev,
2325 				 struct rte_flow_action_handle *action,
2326 				 struct rte_flow_error *error);
2327 typedef int (*mlx5_flow_action_update_t)
2328 			(struct rte_eth_dev *dev,
2329 			 struct rte_flow_action_handle *action,
2330 			 const void *update,
2331 			 struct rte_flow_error *error);
2332 typedef int (*mlx5_flow_action_query_t)
2333 			(struct rte_eth_dev *dev,
2334 			 const struct rte_flow_action_handle *action,
2335 			 void *data,
2336 			 struct rte_flow_error *error);
2337 typedef int (*mlx5_flow_action_query_update_t)
2338 			(struct rte_eth_dev *dev,
2339 			 struct rte_flow_action_handle *handle,
2340 			 const void *update, void *data,
2341 			 enum rte_flow_query_update_mode qu_mode,
2342 			 struct rte_flow_error *error);
2343 typedef struct rte_flow_action_list_handle *
2344 (*mlx5_flow_action_list_handle_create_t)
2345 			(struct rte_eth_dev *dev,
2346 			 const struct rte_flow_indir_action_conf *conf,
2347 			 const struct rte_flow_action *actions,
2348 			 struct rte_flow_error *error);
2349 typedef int
2350 (*mlx5_flow_action_list_handle_destroy_t)
2351 			(struct rte_eth_dev *dev,
2352 			 struct rte_flow_action_list_handle *handle,
2353 			 struct rte_flow_error *error);
2354 typedef int (*mlx5_flow_sync_domain_t)
2355 			(struct rte_eth_dev *dev,
2356 			 uint32_t domains,
2357 			 uint32_t flags);
2358 typedef int (*mlx5_flow_validate_mtr_acts_t)
2359 			(struct rte_eth_dev *dev,
2360 			 const struct rte_flow_action *actions[RTE_COLORS],
2361 			 struct rte_flow_attr *attr,
2362 			 bool *is_rss,
2363 			 uint8_t *domain_bitmap,
2364 			 uint8_t *policy_mode,
2365 			 struct rte_mtr_error *error);
2366 typedef int (*mlx5_flow_create_mtr_acts_t)
2367 			(struct rte_eth_dev *dev,
2368 		      struct mlx5_flow_meter_policy *mtr_policy,
2369 		      const struct rte_flow_action *actions[RTE_COLORS],
2370 		      struct rte_flow_attr *attr,
2371 		      struct rte_mtr_error *error);
2372 typedef void (*mlx5_flow_destroy_mtr_acts_t)
2373 			(struct rte_eth_dev *dev,
2374 		      struct mlx5_flow_meter_policy *mtr_policy);
2375 typedef int (*mlx5_flow_create_policy_rules_t)
2376 			(struct rte_eth_dev *dev,
2377 			  struct mlx5_flow_meter_policy *mtr_policy);
2378 typedef void (*mlx5_flow_destroy_policy_rules_t)
2379 			(struct rte_eth_dev *dev,
2380 			  struct mlx5_flow_meter_policy *mtr_policy);
2381 typedef int (*mlx5_flow_create_def_policy_t)
2382 			(struct rte_eth_dev *dev);
2383 typedef void (*mlx5_flow_destroy_def_policy_t)
2384 			(struct rte_eth_dev *dev);
2385 typedef int (*mlx5_flow_discover_priorities_t)
2386 			(struct rte_eth_dev *dev,
2387 			 const uint16_t *vprio, int vprio_n);
2388 typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
2389 			(struct rte_eth_dev *dev,
2390 			 const struct rte_flow_item_flex_conf *conf,
2391 			 struct rte_flow_error *error);
2392 typedef int (*mlx5_flow_item_release_t)
2393 			(struct rte_eth_dev *dev,
2394 			 const struct rte_flow_item_flex_handle *handle,
2395 			 struct rte_flow_error *error);
2396 typedef int (*mlx5_flow_item_update_t)
2397 			(struct rte_eth_dev *dev,
2398 			 const struct rte_flow_item_flex_handle *handle,
2399 			 const struct rte_flow_item_flex_conf *conf,
2400 			 struct rte_flow_error *error);
2401 typedef int (*mlx5_flow_info_get_t)
2402 			(struct rte_eth_dev *dev,
2403 			 struct rte_flow_port_info *port_info,
2404 			 struct rte_flow_queue_info *queue_info,
2405 			 struct rte_flow_error *error);
2406 typedef int (*mlx5_flow_port_configure_t)
2407 			(struct rte_eth_dev *dev,
2408 			 const struct rte_flow_port_attr *port_attr,
2409 			 uint16_t nb_queue,
2410 			 const struct rte_flow_queue_attr *queue_attr[],
2411 			 struct rte_flow_error *err);
2412 typedef int (*mlx5_flow_pattern_validate_t)
2413 			(struct rte_eth_dev *dev,
2414 			 const struct rte_flow_pattern_template_attr *attr,
2415 			 const struct rte_flow_item items[],
2416 			 uint64_t *item_flags,
2417 			 struct rte_flow_error *error);
2418 typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
2419 			(struct rte_eth_dev *dev,
2420 			 const struct rte_flow_pattern_template_attr *attr,
2421 			 const struct rte_flow_item items[],
2422 			 struct rte_flow_error *error);
2423 typedef int (*mlx5_flow_pattern_template_destroy_t)
2424 			(struct rte_eth_dev *dev,
2425 			 struct rte_flow_pattern_template *template,
2426 			 struct rte_flow_error *error);
2427 typedef int (*mlx5_flow_actions_validate_t)
2428 			(struct rte_eth_dev *dev,
2429 			 const struct rte_flow_actions_template_attr *attr,
2430 			 const struct rte_flow_action actions[],
2431 			 const struct rte_flow_action masks[],
2432 			 struct rte_flow_error *error);
2433 typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
2434 			(struct rte_eth_dev *dev,
2435 			 const struct rte_flow_actions_template_attr *attr,
2436 			 const struct rte_flow_action actions[],
2437 			 const struct rte_flow_action masks[],
2438 			 struct rte_flow_error *error);
2439 typedef int (*mlx5_flow_actions_template_destroy_t)
2440 			(struct rte_eth_dev *dev,
2441 			 struct rte_flow_actions_template *template,
2442 			 struct rte_flow_error *error);
2443 typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
2444 		(struct rte_eth_dev *dev,
2445 		 const struct rte_flow_template_table_attr *attr,
2446 		 struct rte_flow_pattern_template *item_templates[],
2447 		 uint8_t nb_item_templates,
2448 		 struct rte_flow_actions_template *action_templates[],
2449 		 uint8_t nb_action_templates,
2450 		 struct rte_flow_error *error);
2451 typedef int (*mlx5_flow_table_destroy_t)
2452 			(struct rte_eth_dev *dev,
2453 			 struct rte_flow_template_table *table,
2454 			 struct rte_flow_error *error);
2455 typedef int (*mlx5_flow_group_set_miss_actions_t)
2456 			(struct rte_eth_dev *dev,
2457 			 uint32_t group_id,
2458 			 const struct rte_flow_group_attr *attr,
2459 			 const struct rte_flow_action actions[],
2460 			 struct rte_flow_error *error);
2461 typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
2462 			(struct rte_eth_dev *dev,
2463 			 uint32_t queue,
2464 			 const struct rte_flow_op_attr *attr,
2465 			 struct rte_flow_template_table *table,
2466 			 const struct rte_flow_item items[],
2467 			 uint8_t pattern_template_index,
2468 			 const struct rte_flow_action actions[],
2469 			 uint8_t action_template_index,
2470 			 void *user_data,
2471 			 struct rte_flow_error *error);
2472 typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t)
2473 			(struct rte_eth_dev *dev,
2474 			 uint32_t queue,
2475 			 const struct rte_flow_op_attr *attr,
2476 			 struct rte_flow_template_table *table,
2477 			 uint32_t rule_index,
2478 			 const struct rte_flow_action actions[],
2479 			 uint8_t action_template_index,
2480 			 void *user_data,
2481 			 struct rte_flow_error *error);
2482 typedef int (*mlx5_flow_async_flow_update_t)
2483 			(struct rte_eth_dev *dev,
2484 			 uint32_t queue,
2485 			 const struct rte_flow_op_attr *attr,
2486 			 struct rte_flow *flow,
2487 			 const struct rte_flow_action actions[],
2488 			 uint8_t action_template_index,
2489 			 void *user_data,
2490 			 struct rte_flow_error *error);
2491 typedef int (*mlx5_flow_async_flow_destroy_t)
2492 			(struct rte_eth_dev *dev,
2493 			 uint32_t queue,
2494 			 const struct rte_flow_op_attr *attr,
2495 			 struct rte_flow *flow,
2496 			 void *user_data,
2497 			 struct rte_flow_error *error);
2498 typedef int (*mlx5_flow_pull_t)
2499 			(struct rte_eth_dev *dev,
2500 			 uint32_t queue,
2501 			 struct rte_flow_op_result res[],
2502 			 uint16_t n_res,
2503 			 struct rte_flow_error *error);
2504 typedef int (*mlx5_flow_push_t)
2505 			(struct rte_eth_dev *dev,
2506 			 uint32_t queue,
2507 			 struct rte_flow_error *error);
2508 
2509 typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)
2510 			(struct rte_eth_dev *dev,
2511 			 uint32_t queue,
2512 			 const struct rte_flow_op_attr *attr,
2513 			 const struct rte_flow_indir_action_conf *conf,
2514 			 const struct rte_flow_action *action,
2515 			 void *user_data,
2516 			 struct rte_flow_error *error);
2517 
2518 typedef int (*mlx5_flow_async_action_handle_update_t)
2519 			(struct rte_eth_dev *dev,
2520 			 uint32_t queue,
2521 			 const struct rte_flow_op_attr *attr,
2522 			 struct rte_flow_action_handle *handle,
2523 			 const void *update,
2524 			 void *user_data,
2525 			 struct rte_flow_error *error);
2526 typedef int (*mlx5_flow_async_action_handle_query_update_t)
2527 			(struct rte_eth_dev *dev, uint32_t queue_id,
2528 			 const struct rte_flow_op_attr *op_attr,
2529 			 struct rte_flow_action_handle *action_handle,
2530 			 const void *update, void *data,
2531 			 enum rte_flow_query_update_mode qu_mode,
2532 			 void *user_data, struct rte_flow_error *error);
2533 typedef int (*mlx5_flow_async_action_handle_query_t)
2534 			(struct rte_eth_dev *dev,
2535 			 uint32_t queue,
2536 			 const struct rte_flow_op_attr *attr,
2537 			 const struct rte_flow_action_handle *handle,
2538 			 void *data,
2539 			 void *user_data,
2540 			 struct rte_flow_error *error);
2541 
2542 typedef int (*mlx5_flow_async_action_handle_destroy_t)
2543 			(struct rte_eth_dev *dev,
2544 			 uint32_t queue,
2545 			 const struct rte_flow_op_attr *attr,
2546 			 struct rte_flow_action_handle *handle,
2547 			 void *user_data,
2548 			 struct rte_flow_error *error);
2549 typedef struct rte_flow_action_list_handle *
2550 (*mlx5_flow_async_action_list_handle_create_t)
2551 			(struct rte_eth_dev *dev, uint32_t queue_id,
2552 			 const struct rte_flow_op_attr *attr,
2553 			 const struct rte_flow_indir_action_conf *conf,
2554 			 const struct rte_flow_action *actions,
2555 			 void *user_data, struct rte_flow_error *error);
2556 typedef int
2557 (*mlx5_flow_async_action_list_handle_destroy_t)
2558 			(struct rte_eth_dev *dev, uint32_t queue_id,
2559 			 const struct rte_flow_op_attr *op_attr,
2560 			 struct rte_flow_action_list_handle *action_handle,
2561 			 void *user_data, struct rte_flow_error *error);
2562 typedef int
2563 (*mlx5_flow_action_list_handle_query_update_t)
2564 			(struct rte_eth_dev *dev,
2565 			const struct rte_flow_action_list_handle *handle,
2566 			const void **update, void **query,
2567 			enum rte_flow_query_update_mode mode,
2568 			struct rte_flow_error *error);
2569 typedef int
2570 (*mlx5_flow_async_action_list_handle_query_update_t)
2571 			(struct rte_eth_dev *dev, uint32_t queue_id,
2572 			const struct rte_flow_op_attr *attr,
2573 			const struct rte_flow_action_list_handle *handle,
2574 			const void **update, void **query,
2575 			enum rte_flow_query_update_mode mode,
2576 			void *user_data, struct rte_flow_error *error);
2577 typedef int
2578 (*mlx5_flow_calc_table_hash_t)
2579 			(struct rte_eth_dev *dev,
2580 			 const struct rte_flow_template_table *table,
2581 			 const struct rte_flow_item pattern[],
2582 			 uint8_t pattern_template_index,
2583 			 uint32_t *hash, struct rte_flow_error *error);
2584 typedef int
2585 (*mlx5_flow_calc_encap_hash_t)
2586 			(struct rte_eth_dev *dev,
2587 			 const struct rte_flow_item pattern[],
2588 			 enum rte_flow_encap_hash_field dest_field,
2589 			 uint8_t *hash,
2590 			 struct rte_flow_error *error);
2591 typedef int (*mlx5_table_resize_t)(struct rte_eth_dev *dev,
2592 				   struct rte_flow_template_table *table,
2593 				   uint32_t nb_rules, struct rte_flow_error *error);
2594 typedef int (*mlx5_flow_update_resized_t)
2595 			(struct rte_eth_dev *dev, uint32_t queue,
2596 			 const struct rte_flow_op_attr *attr,
2597 			 struct rte_flow *rule, void *user_data,
2598 			 struct rte_flow_error *error);
2599 typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev,
2600 				       struct rte_flow_template_table *table,
2601 				       struct rte_flow_error *error);
2602 
2603 struct mlx5_flow_driver_ops {
2604 	mlx5_flow_list_create_t list_create;
2605 	mlx5_flow_list_destroy_t list_destroy;
2606 	mlx5_flow_validate_t validate;
2607 	mlx5_flow_prepare_t prepare;
2608 	mlx5_flow_translate_t translate;
2609 	mlx5_flow_apply_t apply;
2610 	mlx5_flow_remove_t remove;
2611 	mlx5_flow_destroy_t destroy;
2612 	mlx5_flow_query_t query;
2613 	mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
2614 	mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
2615 	mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls;
2616 	mlx5_flow_mtr_alloc_t create_meter;
2617 	mlx5_flow_mtr_free_t free_meter;
2618 	mlx5_flow_validate_mtr_acts_t validate_mtr_acts;
2619 	mlx5_flow_create_mtr_acts_t create_mtr_acts;
2620 	mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts;
2621 	mlx5_flow_create_policy_rules_t create_policy_rules;
2622 	mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
2623 	mlx5_flow_create_def_policy_t create_def_policy;
2624 	mlx5_flow_destroy_def_policy_t destroy_def_policy;
2625 	mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
2626 	mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create;
2627 	mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
2628 	mlx5_flow_counter_alloc_t counter_alloc;
2629 	mlx5_flow_counter_free_t counter_free;
2630 	mlx5_flow_counter_query_t counter_query;
2631 	mlx5_flow_get_aged_flows_t get_aged_flows;
2632 	mlx5_flow_get_q_aged_flows_t get_q_aged_flows;
2633 	mlx5_flow_action_validate_t action_validate;
2634 	mlx5_flow_action_create_t action_create;
2635 	mlx5_flow_action_destroy_t action_destroy;
2636 	mlx5_flow_action_update_t action_update;
2637 	mlx5_flow_action_query_t action_query;
2638 	mlx5_flow_action_query_update_t action_query_update;
2639 	mlx5_flow_action_list_handle_create_t action_list_handle_create;
2640 	mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
2641 	mlx5_flow_sync_domain_t sync_domain;
2642 	mlx5_flow_discover_priorities_t discover_priorities;
2643 	mlx5_flow_item_create_t item_create;
2644 	mlx5_flow_item_release_t item_release;
2645 	mlx5_flow_item_update_t item_update;
2646 	mlx5_flow_info_get_t info_get;
2647 	mlx5_flow_port_configure_t configure;
2648 	mlx5_flow_pattern_validate_t pattern_validate;
2649 	mlx5_flow_pattern_template_create_t pattern_template_create;
2650 	mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
2651 	mlx5_flow_actions_validate_t actions_validate;
2652 	mlx5_flow_actions_template_create_t actions_template_create;
2653 	mlx5_flow_actions_template_destroy_t actions_template_destroy;
2654 	mlx5_flow_table_create_t template_table_create;
2655 	mlx5_flow_table_destroy_t template_table_destroy;
2656 	mlx5_flow_group_set_miss_actions_t group_set_miss_actions;
2657 	mlx5_flow_async_flow_create_t async_flow_create;
2658 	mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;
2659 	mlx5_flow_async_flow_update_t async_flow_update;
2660 	mlx5_flow_async_flow_destroy_t async_flow_destroy;
2661 	mlx5_flow_pull_t pull;
2662 	mlx5_flow_push_t push;
2663 	mlx5_flow_async_action_handle_create_t async_action_create;
2664 	mlx5_flow_async_action_handle_update_t async_action_update;
2665 	mlx5_flow_async_action_handle_query_update_t async_action_query_update;
2666 	mlx5_flow_async_action_handle_query_t async_action_query;
2667 	mlx5_flow_async_action_handle_destroy_t async_action_destroy;
2668 	mlx5_flow_async_action_list_handle_create_t
2669 		async_action_list_handle_create;
2670 	mlx5_flow_async_action_list_handle_destroy_t
2671 		async_action_list_handle_destroy;
2672 	mlx5_flow_action_list_handle_query_update_t
2673 		action_list_handle_query_update;
2674 	mlx5_flow_async_action_list_handle_query_update_t
2675 		async_action_list_handle_query_update;
2676 	mlx5_flow_calc_table_hash_t flow_calc_table_hash;
2677 	mlx5_flow_calc_encap_hash_t flow_calc_encap_hash;
2678 	mlx5_table_resize_t table_resize;
2679 	mlx5_flow_update_resized_t flow_update_resized;
2680 	table_resize_complete_t table_resize_complete;
2681 };
2682 
2683 /* mlx5_flow.c */
2684 
2685 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
2686 void mlx5_flow_pop_thread_workspace(void);
2687 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
2688 
2689 __extension__
2690 struct flow_grp_info {
2691 	uint64_t external:1;
2692 	uint64_t transfer:1;
2693 	uint64_t fdb_def_rule:1;
2694 	/* force standard group translation */
2695 	uint64_t std_tbl_fix:1;
2696 	uint64_t skip_scale:2;
2697 };
2698 
2699 static inline bool
2700 tunnel_use_standard_attr_group_translate
2701 		    (const struct rte_eth_dev *dev,
2702 		     const struct rte_flow_attr *attr,
2703 		     const struct mlx5_flow_tunnel *tunnel,
2704 		     enum mlx5_tof_rule_type tof_rule_type)
2705 {
2706 	bool verdict;
2707 
2708 	if (!is_tunnel_offload_active(dev))
2709 		/* no tunnel offload API */
2710 		verdict = true;
2711 	else if (tunnel) {
2712 		/*
2713 		 * OvS will use jump to group 0 in tunnel steer rule.
2714 		 * If tunnel steer rule starts from group 0 (attr.group == 0)
2715 		 * that 0 group must be translated with standard method.
2716 		 * attr.group == 0 in tunnel match rule translated with tunnel
2717 		 * method
2718 		 */
2719 		verdict = !attr->group &&
2720 			  is_flow_tunnel_steer_rule(tof_rule_type);
2721 	} else {
2722 		/*
2723 		 * non-tunnel group translation uses standard method for
2724 		 * root group only: attr.group == 0
2725 		 */
2726 		verdict = !attr->group;
2727 	}
2728 
2729 	return verdict;
2730 }
2731 
2732 /**
2733  * Get DV flow aso meter by index.
2734  *
2735  * @param[in] dev
2736  *   Pointer to the Ethernet device structure.
2737  * @param[in] idx
2738  *   mlx5 flow aso meter index in the container.
2739  * @param[out] ppool
2740  *   mlx5 flow aso meter pool in the container,
2741  *
2742  * @return
2743  *   Pointer to the aso meter, NULL otherwise.
2744  */
2745 static inline struct mlx5_aso_mtr *
2746 mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
2747 {
2748 	struct mlx5_aso_mtr_pool *pool;
2749 	struct mlx5_aso_mtr_pools_mng *pools_mng =
2750 				&priv->sh->mtrmng->pools_mng;
2751 
2752 	if (priv->mtr_bulk.aso)
2753 		return priv->mtr_bulk.aso + idx;
2754 	/* Decrease to original index. */
2755 	idx--;
2756 	MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
2757 	rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
2758 	pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
2759 	rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
2760 	return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
2761 }
2762 
2763 static __rte_always_inline const struct rte_flow_item *
2764 mlx5_find_end_item(const struct rte_flow_item *item)
2765 {
2766 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
2767 	return item;
2768 }
2769 
2770 static __rte_always_inline bool
2771 mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item)
2772 {
2773 	struct rte_flow_item_integrity test = *item;
2774 	test.l3_ok = 0;
2775 	test.l4_ok = 0;
2776 	test.ipv4_csum_ok = 0;
2777 	test.l4_csum_ok = 0;
2778 	return (test.value == 0);
2779 }
2780 
2781 /*
2782  * Get ASO CT action by device and index.
2783  *
2784  * @param[in] dev
2785  *   Pointer to the Ethernet device structure.
2786  * @param[in] idx
2787  *   Index to the ASO CT action.
2788  *
2789  * @return
2790  *   The specified ASO CT action pointer.
2791  */
2792 static inline struct mlx5_aso_ct_action *
2793 flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx)
2794 {
2795 	struct mlx5_priv *priv = dev->data->dev_private;
2796 	struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
2797 	struct mlx5_aso_ct_pool *pool;
2798 
2799 	idx--;
2800 	MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n);
2801 	/* Bit operation AND could be used. */
2802 	rte_rwlock_read_lock(&mng->resize_rwl);
2803 	pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL];
2804 	rte_rwlock_read_unlock(&mng->resize_rwl);
2805 	return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL];
2806 }
2807 
2808 /*
2809  * Get ASO CT action by owner & index.
2810  *
2811  * @param[in] dev
2812  *   Pointer to the Ethernet device structure.
2813  * @param[in] idx
2814  *   Index to the ASO CT action and owner port combination.
2815  *
2816  * @return
2817  *   The specified ASO CT action pointer.
2818  */
2819 static inline struct mlx5_aso_ct_action *
2820 flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
2821 {
2822 	struct mlx5_priv *priv = dev->data->dev_private;
2823 	struct mlx5_aso_ct_action *ct;
2824 	uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
2825 	uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
2826 
2827 	if (owner == PORT_ID(priv)) {
2828 		ct = flow_aso_ct_get_by_dev_idx(dev, idx);
2829 	} else {
2830 		struct rte_eth_dev *owndev = &rte_eth_devices[owner];
2831 
2832 		MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
2833 		if (dev->data->dev_started != 1)
2834 			return NULL;
2835 		ct = flow_aso_ct_get_by_dev_idx(owndev, idx);
2836 		if (ct->peer != PORT_ID(priv))
2837 			return NULL;
2838 	}
2839 	return ct;
2840 }
2841 
2842 static inline uint16_t
2843 mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
2844 {
2845 	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
2846 		return RTE_ETHER_TYPE_TEB;
2847 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2848 		return RTE_ETHER_TYPE_IPV4;
2849 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2850 		return RTE_ETHER_TYPE_IPV6;
2851 	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
2852 		return RTE_ETHER_TYPE_MPLS;
2853 	return 0;
2854 }
2855 
2856 int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
2857 			 struct rte_flow_error *error);
2858 
2859 /*
2860  * Convert rte_mtr_color to mlx5 color.
2861  *
2862  * @param[in] rcol
2863  *   rte_mtr_color.
2864  *
2865  * @return
2866  *   mlx5 color.
2867  */
2868 static inline int
2869 rte_col_2_mlx5_col(enum rte_color rcol)
2870 {
2871 	switch (rcol) {
2872 	case RTE_COLOR_GREEN:
2873 		return MLX5_FLOW_COLOR_GREEN;
2874 	case RTE_COLOR_YELLOW:
2875 		return MLX5_FLOW_COLOR_YELLOW;
2876 	case RTE_COLOR_RED:
2877 		return MLX5_FLOW_COLOR_RED;
2878 	default:
2879 		break;
2880 	}
2881 	return MLX5_FLOW_COLOR_UNDEFINED;
2882 }
2883 
2884 /**
2885  * Indicates whether flow source vport is representor port.
2886  *
2887  * @param[in] priv
2888  *   Pointer to device private context structure.
2889  * @param[in] act_priv
2890  *   Pointer to actual device private context structure if have.
2891  *
2892  * @return
2893  *   True when the flow source vport is representor port, false otherwise.
2894  */
2895 static inline bool
2896 flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv)
2897 {
2898 	MLX5_ASSERT(priv);
2899 	return (!act_priv ? (priv->representor_id != UINT16_MAX) :
2900 		 (act_priv->representor_id != UINT16_MAX));
2901 }
2902 
2903 /* All types of Ethernet patterns used in control flow rules. */
2904 enum mlx5_flow_ctrl_rx_eth_pattern_type {
2905 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL = 0,
2906 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST,
2907 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST,
2908 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN,
2909 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST,
2910 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN,
2911 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST,
2912 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN,
2913 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
2914 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
2915 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX,
2916 };
2917 
2918 /* All types of RSS actions used in control flow rules. */
2919 enum mlx5_flow_ctrl_rx_expanded_rss_type {
2920 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP = 0,
2921 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4,
2922 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP,
2923 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP,
2924 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6,
2925 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP,
2926 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP,
2927 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX,
2928 };
2929 
2930 /**
2931  * Contains pattern template, template table and its attributes for a single
2932  * combination of Ethernet pattern and RSS action. Used to create control flow rules
2933  * with HWS.
2934  */
2935 struct mlx5_flow_hw_ctrl_rx_table {
2936 	struct rte_flow_template_table_attr attr;
2937 	struct rte_flow_pattern_template *pt;
2938 	struct rte_flow_template_table *tbl;
2939 };
2940 
2941 /* Contains all templates required to create control flow rules with HWS. */
2942 struct mlx5_flow_hw_ctrl_rx {
2943 	struct rte_flow_actions_template *rss[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2944 	struct mlx5_flow_hw_ctrl_rx_table tables[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX]
2945 						[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
2946 };
2947 
2948 /* Contains all templates required for control flow rules in FDB with HWS. */
2949 struct mlx5_flow_hw_ctrl_fdb {
2950 	struct rte_flow_pattern_template *esw_mgr_items_tmpl;
2951 	struct rte_flow_actions_template *regc_jump_actions_tmpl;
2952 	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
2953 	struct rte_flow_pattern_template *regc_sq_items_tmpl;
2954 	struct rte_flow_actions_template *port_actions_tmpl;
2955 	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
2956 	struct rte_flow_pattern_template *port_items_tmpl;
2957 	struct rte_flow_actions_template *jump_one_actions_tmpl;
2958 	struct rte_flow_template_table *hw_esw_zero_tbl;
2959 	struct rte_flow_pattern_template *tx_meta_items_tmpl;
2960 	struct rte_flow_actions_template *tx_meta_actions_tmpl;
2961 	struct rte_flow_template_table *hw_tx_meta_cpy_tbl;
2962 	struct rte_flow_pattern_template *lacp_rx_items_tmpl;
2963 	struct rte_flow_actions_template *lacp_rx_actions_tmpl;
2964 	struct rte_flow_template_table *hw_lacp_rx_tbl;
2965 };
2966 
2967 #define MLX5_CTRL_PROMISCUOUS    (RTE_BIT32(0))
2968 #define MLX5_CTRL_ALL_MULTICAST  (RTE_BIT32(1))
2969 #define MLX5_CTRL_BROADCAST      (RTE_BIT32(2))
2970 #define MLX5_CTRL_IPV4_MULTICAST (RTE_BIT32(3))
2971 #define MLX5_CTRL_IPV6_MULTICAST (RTE_BIT32(4))
2972 #define MLX5_CTRL_DMAC           (RTE_BIT32(5))
2973 #define MLX5_CTRL_VLAN_FILTER    (RTE_BIT32(6))
2974 
2975 int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
2976 void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
2977 
2978 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
2979 			     const struct mlx5_flow_tunnel *tunnel,
2980 			     uint32_t group, uint32_t *table,
2981 			     const struct flow_grp_info *flags,
2982 			     struct rte_flow_error *error);
2983 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
2984 				     int tunnel, uint64_t layer_types,
2985 				     uint64_t hash_fields);
2986 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
2987 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
2988 				   uint32_t subpriority);
2989 uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
2990 					const struct rte_flow_attr *attr);
2991 uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
2992 				   const struct rte_flow_attr *attr,
2993 				   uint32_t subpriority, bool external);
2994 uint32_t mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev);
2995 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
2996 				     enum mlx5_feature_name feature,
2997 				     uint32_t id,
2998 				     struct rte_flow_error *error);
2999 const struct rte_flow_action *mlx5_flow_find_action
3000 					(const struct rte_flow_action *actions,
3001 					 enum rte_flow_action_type action);
3002 int mlx5_validate_action_rss(struct rte_eth_dev *dev,
3003 			     const struct rte_flow_action *action,
3004 			     struct rte_flow_error *error);
3005 
3006 struct mlx5_hw_encap_decap_action*
3007 mlx5_reformat_action_create(struct rte_eth_dev *dev,
3008 			    const struct rte_flow_indir_action_conf *conf,
3009 			    const struct rte_flow_action *encap_action,
3010 			    const struct rte_flow_action *decap_action,
3011 			    struct rte_flow_error *error);
3012 int mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
3013 				 struct rte_flow_action_list_handle *handle,
3014 				 struct rte_flow_error *error);
3015 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
3016 				    const struct rte_flow_attr *attr,
3017 				    struct rte_flow_error *error);
3018 int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev,
3019 				   bool is_root,
3020 				   const struct rte_flow_attr *attr,
3021 				   struct rte_flow_error *error);
3022 int mlx5_flow_validate_action_flag(uint64_t action_flags,
3023 				   const struct rte_flow_attr *attr,
3024 				   struct rte_flow_error *error);
3025 int mlx5_flow_validate_action_mark(struct rte_eth_dev *dev,
3026 				   const struct rte_flow_action *action,
3027 				   uint64_t action_flags,
3028 				   const struct rte_flow_attr *attr,
3029 				   struct rte_flow_error *error);
3030 int mlx5_flow_validate_target_queue(struct rte_eth_dev *dev,
3031 				    const struct rte_flow_action *action,
3032 				    struct rte_flow_error *error);
3033 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
3034 				    uint64_t action_flags,
3035 				    struct rte_eth_dev *dev,
3036 				    const struct rte_flow_attr *attr,
3037 				    struct rte_flow_error *error);
3038 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
3039 				  uint64_t action_flags,
3040 				  struct rte_eth_dev *dev,
3041 				  const struct rte_flow_attr *attr,
3042 				  uint64_t item_flags,
3043 				  struct rte_flow_error *error);
3044 int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
3045 				const struct rte_flow_attr *attr,
3046 				struct rte_flow_error *error);
3047 int flow_validate_modify_field_level
3048 			(const struct rte_flow_field_data *data,
3049 			 struct rte_flow_error *error);
3050 int
3051 mlx5_flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3052 				      uint64_t action_flags,
3053 				      const struct rte_flow_action *action,
3054 				      const struct rte_flow_attr *attr,
3055 				      struct rte_flow_error *error);
3056 int
3057 mlx5_flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3058 				   uint64_t action_flags,
3059 				   const struct rte_flow_action *action,
3060 				   const uint64_t item_flags,
3061 				   const struct rte_flow_attr *attr,
3062 				   struct rte_flow_error *error);
3063 int
3064 mlx5_flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3065 				    uint64_t action_flags,
3066 				    uint64_t item_flags,
3067 				    bool root,
3068 				    struct rte_flow_error *error);
3069 int
3070 mlx5_flow_dv_validate_action_raw_encap_decap
3071 	(struct rte_eth_dev *dev,
3072 	 const struct rte_flow_action_raw_decap *decap,
3073 	 const struct rte_flow_action_raw_encap *encap,
3074 	 const struct rte_flow_attr *attr, uint64_t *action_flags,
3075 	 int *actions_n, const struct rte_flow_action *action,
3076 	 uint64_t item_flags, struct rte_flow_error *error);
3077 int mlx5_flow_item_acceptable(const struct rte_eth_dev *dev,
3078 			      const struct rte_flow_item *item,
3079 			      const uint8_t *mask,
3080 			      const uint8_t *nic_mask,
3081 			      unsigned int size,
3082 			      bool range_accepted,
3083 			      struct rte_flow_error *error);
3084 int mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev,
3085 				const struct rte_flow_item *item,
3086 				uint64_t item_flags, bool ext_vlan_sup,
3087 				struct rte_flow_error *error);
3088 int
3089 mlx5_flow_dv_validate_item_vlan(const struct rte_flow_item *item,
3090 				uint64_t item_flags,
3091 				struct rte_eth_dev *dev,
3092 				struct rte_flow_error *error);
3093 int
3094 mlx5_flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
3095 				const struct rte_flow_item *item,
3096 				uint64_t item_flags,
3097 				uint64_t last_item,
3098 				uint16_t ether_type,
3099 				const struct rte_flow_item_ipv4 *acc_mask,
3100 				struct rte_flow_error *error);
3101 int
3102 mlx5_flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
3103 			       const struct rte_flow_item *item,
3104 			       uint64_t item_flags,
3105 			       struct rte_flow_error *error);
3106 int
3107 mlx5_flow_dv_validate_item_gtp_psc(const struct rte_eth_dev *dev,
3108 				   const struct rte_flow_item *item,
3109 				   uint64_t last_item,
3110 				   const struct rte_flow_item *gtp_item,
3111 				   bool root, struct rte_flow_error *error);
3112 int
3113 mlx5_flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
3114 				  const struct rte_flow_item *item,
3115 				  uint64_t *item_flags,
3116 				  struct rte_flow_error *error);
3117 int mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev,
3118 				const struct rte_flow_item *item,
3119 				uint64_t item_flags,
3120 				uint8_t target_protocol,
3121 				struct rte_flow_error *error);
3122 int mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev,
3123 				    const struct rte_flow_item *item,
3124 				    uint64_t item_flags,
3125 				    const struct rte_flow_item *gre_item,
3126 				    struct rte_flow_error *error);
3127 int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
3128 				       const struct rte_flow_item *item,
3129 				       uint64_t item_flags,
3130 				       const struct rte_flow_attr *attr,
3131 				       const struct rte_flow_item *gre_item,
3132 				       struct rte_flow_error *error);
3133 int mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev,
3134 				 const struct rte_flow_item *item,
3135 				 uint64_t item_flags,
3136 				 uint64_t last_item,
3137 				 uint16_t ether_type,
3138 				 const struct rte_flow_item_ipv4 *acc_mask,
3139 				 bool range_accepted,
3140 				 struct rte_flow_error *error);
3141 int mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
3142 				 const struct rte_flow_item *item,
3143 				 uint64_t item_flags,
3144 				 uint64_t last_item,
3145 				 uint16_t ether_type,
3146 				 const struct rte_flow_item_ipv6 *acc_mask,
3147 				 struct rte_flow_error *error);
3148 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
3149 				 const struct rte_flow_item *item,
3150 				 uint64_t item_flags,
3151 				 uint64_t prev_layer,
3152 				 struct rte_flow_error *error);
3153 int mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev,
3154 				const struct rte_flow_item *item,
3155 				uint64_t item_flags,
3156 				uint8_t target_protocol,
3157 				const struct rte_flow_item_tcp *flow_mask,
3158 				struct rte_flow_error *error);
3159 int mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev,
3160 				const struct rte_flow_item *item,
3161 				uint64_t item_flags,
3162 				uint8_t target_protocol,
3163 				struct rte_flow_error *error);
3164 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
3165 				 uint64_t item_flags,
3166 				 struct rte_eth_dev *dev,
3167 				 struct rte_flow_error *error);
3168 int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
3169 				  uint16_t udp_dport,
3170 				  const struct rte_flow_item *item,
3171 				  uint64_t item_flags,
3172 				  bool root,
3173 				  struct rte_flow_error *error);
3174 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
3175 				      uint64_t item_flags,
3176 				      struct rte_eth_dev *dev,
3177 				      struct rte_flow_error *error);
3178 int mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev,
3179 				 const struct rte_flow_item *item,
3180 				 uint64_t item_flags,
3181 				 uint8_t target_protocol,
3182 				 struct rte_flow_error *error);
3183 int mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev,
3184 				  const struct rte_flow_item *item,
3185 				  uint64_t item_flags,
3186 				  uint8_t target_protocol,
3187 				  struct rte_flow_error *error);
3188 int mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev,
3189 				       const struct rte_flow_item *item,
3190 				       uint64_t item_flags,
3191 				       uint8_t target_protocol,
3192 				       struct rte_flow_error *error);
3193 int mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev,
3194 				  const struct rte_flow_item *item,
3195 				  uint64_t item_flags,
3196 				  uint8_t target_protocol,
3197 				  struct rte_flow_error *error);
3198 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
3199 				   uint64_t item_flags,
3200 				   struct rte_eth_dev *dev,
3201 				   struct rte_flow_error *error);
3202 int mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
3203 				   uint64_t last_item,
3204 				   const struct rte_flow_item *geneve_item,
3205 				   struct rte_eth_dev *dev,
3206 				   struct rte_flow_error *error);
3207 int mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev,
3208 				  const struct rte_flow_item *item,
3209 				  uint64_t item_flags,
3210 				  uint64_t last_item,
3211 				  uint16_t ether_type,
3212 				  const struct rte_flow_item_ecpri *acc_mask,
3213 				  struct rte_flow_error *error);
3214 int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
3215 				const struct rte_flow_item *item,
3216 				struct rte_flow_error *error);
3217 int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
3218 			      struct mlx5_flow_meter_info *fm,
3219 			      uint32_t mtr_idx,
3220 			      uint8_t domain_bitmap);
3221 void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
3222 			       struct mlx5_flow_meter_info *fm);
3223 void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
3224 struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
3225 		(struct rte_eth_dev *dev,
3226 		struct mlx5_flow_meter_policy *mtr_policy,
3227 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
3228 void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
3229 		struct mlx5_flow_meter_policy *mtr_policy);
3230 int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
3231 int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
3232 int mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev);
3233 int mlx5_action_handle_attach(struct rte_eth_dev *dev);
3234 int mlx5_action_handle_detach(struct rte_eth_dev *dev);
3235 int mlx5_action_handle_flush(struct rte_eth_dev *dev);
3236 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
3237 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
3238 
3239 struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx);
3240 int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3241 			 void *cb_ctx);
3242 void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3243 struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx,
3244 					     struct mlx5_list_entry *oentry,
3245 					     void *entry_ctx);
3246 void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3247 struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3248 		uint32_t table_level, uint8_t egress, uint8_t transfer,
3249 		bool external, const struct mlx5_flow_tunnel *tunnel,
3250 		uint32_t group_id, uint8_t dummy,
3251 		uint32_t table_id, struct rte_flow_error *error);
3252 int flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
3253 				 struct mlx5_flow_tbl_resource *tbl);
3254 
3255 struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx);
3256 int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3257 			 void *cb_ctx);
3258 void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3259 struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx,
3260 					     struct mlx5_list_entry *oentry,
3261 					     void *cb_ctx);
3262 void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3263 
3264 int flow_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3265 			    void *cb_ctx);
3266 struct mlx5_list_entry *flow_modify_create_cb(void *tool_ctx, void *ctx);
3267 void flow_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3268 struct mlx5_list_entry *flow_modify_clone_cb(void *tool_ctx,
3269 						struct mlx5_list_entry *oentry,
3270 						void *ctx);
3271 void flow_modify_clone_free_cb(void *tool_ctx,
3272 				  struct mlx5_list_entry *entry);
3273 
3274 struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx);
3275 int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3276 			  void *cb_ctx);
3277 void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3278 struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx,
3279 					      struct mlx5_list_entry *entry,
3280 					      void *ctx);
3281 void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3282 
3283 int flow_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3284 				 void *cb_ctx);
3285 struct mlx5_list_entry *flow_encap_decap_create_cb(void *tool_ctx,
3286 						      void *cb_ctx);
3287 void flow_encap_decap_remove_cb(void *tool_ctx,
3288 				   struct mlx5_list_entry *entry);
3289 struct mlx5_list_entry *flow_encap_decap_clone_cb(void *tool_ctx,
3290 						  struct mlx5_list_entry *entry,
3291 						  void *cb_ctx);
3292 void flow_encap_decap_clone_free_cb(void *tool_ctx,
3293 				       struct mlx5_list_entry *entry);
3294 int __flow_encap_decap_resource_register
3295 			(struct rte_eth_dev *dev,
3296 			 struct mlx5_flow_dv_encap_decap_resource *resource,
3297 			 bool is_root,
3298 			 struct mlx5_flow_dv_encap_decap_resource **encap_decap,
3299 			 struct rte_flow_error *error);
3300 int __flow_modify_hdr_resource_register
3301 			(struct rte_eth_dev *dev,
3302 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
3303 			 struct mlx5_flow_dv_modify_hdr_resource **modify,
3304 			 struct rte_flow_error *error);
3305 int flow_encap_decap_resource_release(struct rte_eth_dev *dev,
3306 				     uint32_t encap_decap_idx);
3307 int flow_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3308 			     void *ctx);
3309 struct mlx5_list_entry *flow_matcher_create_cb(void *tool_ctx, void *ctx);
3310 void flow_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3311 struct mlx5_list_entry *flow_matcher_clone_cb(void *tool_ctx __rte_unused,
3312 			 struct mlx5_list_entry *entry, void *cb_ctx);
3313 void flow_matcher_clone_free_cb(void *tool_ctx __rte_unused,
3314 			     struct mlx5_list_entry *entry);
3315 int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3316 			     void *cb_ctx);
3317 struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
3318 void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3319 struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx,
3320 				struct mlx5_list_entry *entry, void *cb_ctx);
3321 void flow_dv_port_id_clone_free_cb(void *tool_ctx,
3322 				   struct mlx5_list_entry *entry);
3323 
3324 int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3325 			       void *cb_ctx);
3326 struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx,
3327 						    void *cb_ctx);
3328 void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3329 struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx,
3330 				 struct mlx5_list_entry *entry, void *cb_ctx);
3331 void flow_dv_push_vlan_clone_free_cb(void *tool_ctx,
3332 				     struct mlx5_list_entry *entry);
3333 
3334 int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3335 			    void *cb_ctx);
3336 struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx);
3337 void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3338 struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx,
3339 				 struct mlx5_list_entry *entry, void *cb_ctx);
3340 void flow_dv_sample_clone_free_cb(void *tool_ctx,
3341 				  struct mlx5_list_entry *entry);
3342 
3343 int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3344 				void *cb_ctx);
3345 struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx,
3346 						     void *cb_ctx);
3347 void flow_dv_dest_array_remove_cb(void *tool_ctx,
3348 				  struct mlx5_list_entry *entry);
3349 struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
3350 				   struct mlx5_list_entry *entry, void *cb_ctx);
3351 void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
3352 				      struct mlx5_list_entry *entry);
3353 void flow_dv_hashfields_set(uint64_t item_flags,
3354 			    struct mlx5_flow_rss_desc *rss_desc,
3355 			    uint64_t *hash_fields);
3356 void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
3357 					uint64_t *hash_field);
3358 uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
3359 					const uint64_t hash_fields);
3360 int flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3361 		     const struct rte_flow_item items[],
3362 		     const struct rte_flow_action actions[],
3363 		     bool external, int hairpin, struct rte_flow_error *error);
3364 
3365 struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
3366 void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3367 int flow_hw_grp_match_cb(void *tool_ctx,
3368 			 struct mlx5_list_entry *entry,
3369 			 void *cb_ctx);
3370 struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
3371 					     struct mlx5_list_entry *oentry,
3372 					     void *cb_ctx);
3373 void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
3374 
3375 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
3376 						    uint32_t age_idx);
3377 
3378 void flow_release_workspace(void *data);
3379 int mlx5_flow_os_init_workspace_once(void);
3380 void *mlx5_flow_os_get_specific_workspace(void);
3381 int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);
3382 void mlx5_flow_os_release_workspace(void);
3383 uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev);
3384 void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx);
3385 int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
3386 			const struct rte_flow_action *actions[RTE_COLORS],
3387 			struct rte_flow_attr *attr,
3388 			bool *is_rss,
3389 			uint8_t *domain_bitmap,
3390 			uint8_t *policy_mode,
3391 			struct rte_mtr_error *error);
3392 void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
3393 		      struct mlx5_flow_meter_policy *mtr_policy);
3394 int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
3395 		      struct mlx5_flow_meter_policy *mtr_policy,
3396 		      const struct rte_flow_action *actions[RTE_COLORS],
3397 		      struct rte_flow_attr *attr,
3398 		      struct rte_mtr_error *error);
3399 int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
3400 			     struct mlx5_flow_meter_policy *mtr_policy);
3401 void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
3402 			     struct mlx5_flow_meter_policy *mtr_policy);
3403 int mlx5_flow_create_def_policy(struct rte_eth_dev *dev);
3404 void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
3405 void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
3406 		       struct mlx5_flow_handle *dev_handle);
3407 const struct mlx5_flow_tunnel *
3408 mlx5_get_tof(const struct rte_flow_item *items,
3409 	     const struct rte_flow_action *actions,
3410 	     enum mlx5_tof_rule_type *rule_type);
3411 void
3412 flow_hw_resource_release(struct rte_eth_dev *dev);
3413 int
3414 mlx5_geneve_tlv_options_destroy(struct mlx5_geneve_tlv_options *options,
3415 				struct mlx5_physical_device *phdev);
3416 int
3417 mlx5_geneve_tlv_options_check_busy(struct mlx5_priv *priv);
3418 void
3419 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable);
3420 int flow_dv_action_validate(struct rte_eth_dev *dev,
3421 			    const struct rte_flow_indir_action_conf *conf,
3422 			    const struct rte_flow_action *action,
3423 			    struct rte_flow_error *err);
3424 struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,
3425 		      const struct rte_flow_indir_action_conf *conf,
3426 		      const struct rte_flow_action *action,
3427 		      struct rte_flow_error *err);
3428 int flow_dv_action_destroy(struct rte_eth_dev *dev,
3429 			   struct rte_flow_action_handle *handle,
3430 			   struct rte_flow_error *error);
3431 int flow_dv_action_update(struct rte_eth_dev *dev,
3432 			  struct rte_flow_action_handle *handle,
3433 			  const void *update,
3434 			  struct rte_flow_error *err);
3435 int flow_dv_action_query(struct rte_eth_dev *dev,
3436 			 const struct rte_flow_action_handle *handle,
3437 			 void *data,
3438 			 struct rte_flow_error *error);
3439 size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
3440 int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3441 			   size_t *size, struct rte_flow_error *error);
3442 void mlx5_flow_field_id_to_modify_info
3443 		(const struct rte_flow_field_data *data,
3444 		 struct field_modify_info *info, uint32_t *mask,
3445 		 uint32_t width, struct rte_eth_dev *dev,
3446 		 const struct rte_flow_attr *attr, struct rte_flow_error *error);
3447 int flow_dv_convert_modify_action(struct rte_flow_item *item,
3448 			      struct field_modify_info *field,
3449 			      struct field_modify_info *dest,
3450 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
3451 			      uint32_t type, struct rte_flow_error *error);
3452 
3453 #define MLX5_PF_VPORT_ID 0
3454 #define MLX5_ECPF_VPORT_ID 0xFFFE
3455 
3456 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev);
3457 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
3458 				const struct rte_flow_item *item,
3459 				uint16_t *vport_id,
3460 				bool *all_ports,
3461 				struct rte_flow_error *error);
3462 
3463 int flow_dv_translate_items_hws(const struct rte_flow_item *items,
3464 				struct mlx5_flow_attr *attr, void *key,
3465 				uint32_t key_type, uint64_t *item_flags,
3466 				uint8_t *match_criteria,
3467 				struct rte_flow_error *error);
3468 
3469 int __flow_dv_translate_items_hws(const struct rte_flow_item *items,
3470 				struct mlx5_flow_attr *attr, void *key,
3471 				uint32_t key_type, uint64_t *item_flags,
3472 				uint8_t *match_criteria,
3473 				bool nt_flow,
3474 				struct rte_flow_error *error);
3475 
3476 int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
3477 				  uint16_t *proxy_port_id,
3478 				  struct rte_flow_error *error);
3479 int flow_null_get_aged_flows(struct rte_eth_dev *dev,
3480 		    void **context,
3481 		    uint32_t nb_contexts,
3482 		    struct rte_flow_error *error);
3483 uint32_t flow_null_counter_allocate(struct rte_eth_dev *dev);
3484 void flow_null_counter_free(struct rte_eth_dev *dev,
3485 			uint32_t counter);
3486 int flow_null_counter_query(struct rte_eth_dev *dev,
3487 			uint32_t counter,
3488 			bool clear,
3489 		    uint64_t *pkts,
3490 			uint64_t *bytes,
3491 			void **action);
3492 
3493 int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
3494 
3495 int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
3496 					 uint32_t sqn, bool external);
3497 int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
3498 					  uint32_t sqn);
3499 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
3500 int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
3501 int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
3502 int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev);
3503 int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
3504 		const struct rte_flow_actions_template_attr *attr,
3505 		const struct rte_flow_action actions[],
3506 		const struct rte_flow_action masks[],
3507 		struct rte_flow_error *error);
3508 int mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
3509 		const struct rte_flow_pattern_template_attr *attr,
3510 		const struct rte_flow_item items[],
3511 		struct rte_flow_error *error);
3512 int flow_hw_table_update(struct rte_eth_dev *dev,
3513 			 struct rte_flow_error *error);
3514 int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
3515 			   enum rte_flow_field_id field, int inherit,
3516 			   const struct rte_flow_attr *attr,
3517 			   struct rte_flow_error *error);
3518 uintptr_t flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3519 				const struct rte_flow_attr *attr,
3520 				const struct rte_flow_item items[],
3521 				const struct rte_flow_action actions[],
3522 				bool external, struct rte_flow_error *error);
3523 void flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3524 				uintptr_t flow_idx);
3525 
3526 static __rte_always_inline int
3527 flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
3528 {
3529 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3530 	uint16_t port;
3531 
3532 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3533 		struct mlx5_priv *priv;
3534 		struct mlx5_hca_flex_attr *attr;
3535 		struct mlx5_devx_match_sample_info_query_attr *info;
3536 
3537 		priv = rte_eth_devices[port].data->dev_private;
3538 		attr = &priv->sh->cdev->config.hca_attr.flex;
3539 		if (priv->dr_ctx == dr_ctx && attr->query_match_sample_info) {
3540 			info = &priv->sh->srh_flex_parser.flex.devx_fp->sample_info[0];
3541 			if (priv->sh->srh_flex_parser.flex.mapnum)
3542 				return info->sample_dw_data * sizeof(uint32_t);
3543 			else
3544 				return UINT32_MAX;
3545 		}
3546 	}
3547 #endif
3548 	return UINT32_MAX;
3549 }
3550 
3551 static __rte_always_inline uint8_t
3552 flow_hw_get_ipv6_route_ext_anchor_from_ctx(void *dr_ctx)
3553 {
3554 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3555 	uint16_t port;
3556 	struct mlx5_priv *priv;
3557 
3558 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3559 		priv = rte_eth_devices[port].data->dev_private;
3560 		if (priv->dr_ctx == dr_ctx)
3561 			return priv->sh->srh_flex_parser.flex.devx_fp->anchor_id;
3562 	}
3563 #else
3564 	RTE_SET_USED(dr_ctx);
3565 #endif
3566 	return 0;
3567 }
3568 
3569 static __rte_always_inline uint16_t
3570 flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx)
3571 {
3572 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3573 	uint16_t port;
3574 	struct mlx5_priv *priv;
3575 	struct mlx5_flex_parser_devx *fp;
3576 
3577 	if (idx >= MLX5_GRAPH_NODE_SAMPLE_NUM || idx >= MLX5_SRV6_SAMPLE_NUM)
3578 		return 0;
3579 	MLX5_ETH_FOREACH_DEV(port, NULL) {
3580 		priv = rte_eth_devices[port].data->dev_private;
3581 		if (priv->dr_ctx == dr_ctx) {
3582 			fp = priv->sh->srh_flex_parser.flex.devx_fp;
3583 			return fp->sample_info[idx].modify_field_id;
3584 		}
3585 	}
3586 #else
3587 	RTE_SET_USED(dr_ctx);
3588 	RTE_SET_USED(idx);
3589 #endif
3590 	return 0;
3591 }
3592 void
3593 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
3594 #ifdef HAVE_MLX5_HWS_SUPPORT
3595 struct mlx5_mirror;
3596 void
3597 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
3598 void
3599 mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
3600 			     struct mlx5_indirect_list *ptr);
3601 void
3602 mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,
3603 			    struct mlx5_indirect_list *reformat);
3604 int
3605 flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3606 		    const struct rte_flow_attr *attr,
3607 		    const struct rte_flow_item items[],
3608 		    const struct rte_flow_action actions[],
3609 		    uint64_t item_flags, uint64_t action_flags, bool external,
3610 		    struct rte_flow_hw **flow, struct rte_flow_error *error);
3611 void
3612 flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow);
3613 void
3614 flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
3615 		     uintptr_t flow_idx);
3616 const struct rte_flow_action_rss *
3617 flow_nta_locate_rss(struct rte_eth_dev *dev,
3618 		    const struct rte_flow_action actions[],
3619 		    struct rte_flow_error *error);
3620 struct rte_flow_hw *
3621 flow_nta_handle_rss(struct rte_eth_dev *dev,
3622 		    const struct rte_flow_attr *attr,
3623 		    const struct rte_flow_item items[],
3624 		    const struct rte_flow_action actions[],
3625 		    const struct rte_flow_action_rss *rss_conf,
3626 		    uint64_t item_flags, uint64_t action_flags,
3627 		    bool external, enum mlx5_flow_type flow_type,
3628 		    struct rte_flow_error *error);
3629 
3630 extern const struct rte_flow_action_raw_decap empty_decap;
3631 extern const struct rte_flow_item_ipv6 nic_ipv6_mask;
3632 extern const struct rte_flow_item_tcp nic_tcp_mask;
3633 
3634 #endif
3635 #endif /* RTE_PMD_MLX5_FLOW_H_ */
3636