xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision 515cd4a488b6a0c6e40d20e6b10d8e89657dc23f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <sys/queue.h>
11 
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_eal_paging.h>
16 #include <rte_flow.h>
17 #include <rte_cycles.h>
18 #include <rte_flow_driver.h>
19 #include <rte_malloc.h>
20 #include <rte_ip.h>
21 
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
24 #include <mlx5_prm.h>
25 #include <mlx5_malloc.h>
26 
27 #include "mlx5_defs.h"
28 #include "mlx5.h"
29 #include "mlx5_flow.h"
30 #include "mlx5_flow_os.h"
31 #include "mlx5_rx.h"
32 #include "mlx5_tx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
35 
36 struct tunnel_default_miss_ctx {
37 	uint16_t *queue;
38 	__extension__
39 	union {
40 		struct rte_flow_action_rss action_rss;
41 		struct rte_flow_action_queue miss_queue;
42 		struct rte_flow_action_jump miss_jump;
43 		uint8_t raw[0];
44 	};
45 };
46 
47 static int
48 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
49 			     struct rte_flow *flow,
50 			     const struct rte_flow_attr *attr,
51 			     const struct rte_flow_action *app_actions,
52 			     uint32_t flow_idx,
53 			     const struct mlx5_flow_tunnel *tunnel,
54 			     struct tunnel_default_miss_ctx *ctx,
55 			     struct rte_flow_error *error);
56 static struct mlx5_flow_tunnel *
57 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
58 static void
59 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
60 static uint32_t
61 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
62 				const struct mlx5_flow_tunnel *tunnel,
63 				uint32_t group, uint32_t *table,
64 				struct rte_flow_error *error);
65 
66 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
67 static void mlx5_flow_pop_thread_workspace(void);
68 
69 
70 /** Device flow drivers. */
71 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
72 
73 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
74 
75 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
76 	[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
77 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
78 	[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
79 	[MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
80 #endif
81 	[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
82 	[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
83 };
84 
85 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
86 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
87 	(const int []){ \
88 		__VA_ARGS__, 0, \
89 	}
90 
91 /** Node object of input graph for mlx5_flow_expand_rss(). */
92 struct mlx5_flow_expand_node {
93 	const int *const next;
94 	/**<
95 	 * List of next node indexes. Index 0 is interpreted as a terminator.
96 	 */
97 	const enum rte_flow_item_type type;
98 	/**< Pattern item type of current node. */
99 	uint64_t rss_types;
100 	/**<
101 	 * RSS types bit-field associated with this node
102 	 * (see RTE_ETH_RSS_* definitions).
103 	 */
104 	uint64_t node_flags;
105 	/**<
106 	 *  Bit-fields that define how the node is used in the expansion.
107 	 * (see MLX5_EXPANSION_NODE_* definitions).
108 	 */
109 };
110 
111 /* Optional expand field. The expansion alg will not go deeper. */
112 #define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
113 
114 /* The node is not added implicitly as expansion to the flow pattern.
115  * If the node type does not match the flow pattern item type, the
116  * expansion alg will go deeper to its next items.
117  * In the current implementation, the list of next nodes indexes can
118  * have up to one node with this flag set and it has to be the last
119  * node index (before the list terminator).
120  */
121 #define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1)
122 
123 /** Object returned by mlx5_flow_expand_rss(). */
124 struct mlx5_flow_expand_rss {
125 	uint32_t entries;
126 	/**< Number of entries @p patterns and @p priorities. */
127 	struct {
128 		struct rte_flow_item *pattern; /**< Expanded pattern array. */
129 		uint32_t priority; /**< Priority offset for each expansion. */
130 	} entry[];
131 };
132 
133 static void
134 mlx5_dbg__print_pattern(const struct rte_flow_item *item);
135 
136 static const struct mlx5_flow_expand_node *
137 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
138 		unsigned int item_idx,
139 		const struct mlx5_flow_expand_node graph[],
140 		const struct mlx5_flow_expand_node *node);
141 
142 static bool
143 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
144 {
145 	switch (item->type) {
146 	case RTE_FLOW_ITEM_TYPE_ETH:
147 	case RTE_FLOW_ITEM_TYPE_VLAN:
148 	case RTE_FLOW_ITEM_TYPE_IPV4:
149 	case RTE_FLOW_ITEM_TYPE_IPV6:
150 	case RTE_FLOW_ITEM_TYPE_UDP:
151 	case RTE_FLOW_ITEM_TYPE_TCP:
152 	case RTE_FLOW_ITEM_TYPE_ESP:
153 	case RTE_FLOW_ITEM_TYPE_ICMP:
154 	case RTE_FLOW_ITEM_TYPE_ICMP6:
155 	case RTE_FLOW_ITEM_TYPE_VXLAN:
156 	case RTE_FLOW_ITEM_TYPE_NVGRE:
157 	case RTE_FLOW_ITEM_TYPE_GRE:
158 	case RTE_FLOW_ITEM_TYPE_GENEVE:
159 	case RTE_FLOW_ITEM_TYPE_MPLS:
160 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
161 	case RTE_FLOW_ITEM_TYPE_GRE_KEY:
162 	case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
163 	case RTE_FLOW_ITEM_TYPE_GTP:
164 		return true;
165 	default:
166 		break;
167 	}
168 	return false;
169 }
170 
171 /**
172  * Network Service Header (NSH) and its next protocol values
173  * are described in RFC-8393.
174  */
175 static enum rte_flow_item_type
176 mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
177 {
178 	enum rte_flow_item_type type;
179 
180 	switch (proto_mask & proto_spec) {
181 	case 0:
182 		type = RTE_FLOW_ITEM_TYPE_VOID;
183 		break;
184 	case RTE_VXLAN_GPE_TYPE_IPV4:
185 		type = RTE_FLOW_ITEM_TYPE_IPV4;
186 		break;
187 	case RTE_VXLAN_GPE_TYPE_IPV6:
188 		type = RTE_VXLAN_GPE_TYPE_IPV6;
189 		break;
190 	case RTE_VXLAN_GPE_TYPE_ETH:
191 		type = RTE_FLOW_ITEM_TYPE_ETH;
192 		break;
193 	default:
194 		type = RTE_FLOW_ITEM_TYPE_END;
195 	}
196 	return type;
197 }
198 
199 static enum rte_flow_item_type
200 mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
201 {
202 	enum rte_flow_item_type type;
203 
204 	switch (proto_mask & proto_spec) {
205 	case 0:
206 		type = RTE_FLOW_ITEM_TYPE_VOID;
207 		break;
208 	case IPPROTO_UDP:
209 		type = RTE_FLOW_ITEM_TYPE_UDP;
210 		break;
211 	case IPPROTO_TCP:
212 		type = RTE_FLOW_ITEM_TYPE_TCP;
213 		break;
214 	case IPPROTO_IPIP:
215 		type = RTE_FLOW_ITEM_TYPE_IPV4;
216 		break;
217 	case IPPROTO_IPV6:
218 		type = RTE_FLOW_ITEM_TYPE_IPV6;
219 		break;
220 	case IPPROTO_ESP:
221 		type = RTE_FLOW_ITEM_TYPE_ESP;
222 		break;
223 	default:
224 		type = RTE_FLOW_ITEM_TYPE_END;
225 	}
226 	return type;
227 }
228 
229 static enum rte_flow_item_type
230 mlx5_ethertype_to_item_type(rte_be16_t type_spec,
231 			    rte_be16_t type_mask, bool is_tunnel)
232 {
233 	enum rte_flow_item_type type;
234 
235 	switch (rte_be_to_cpu_16(type_spec & type_mask)) {
236 	case 0:
237 		type = RTE_FLOW_ITEM_TYPE_VOID;
238 		break;
239 	case RTE_ETHER_TYPE_TEB:
240 		type = is_tunnel ?
241 		       RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END;
242 		break;
243 	case RTE_ETHER_TYPE_VLAN:
244 		type = !is_tunnel ?
245 		       RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END;
246 		break;
247 	case RTE_ETHER_TYPE_IPV4:
248 		type = RTE_FLOW_ITEM_TYPE_IPV4;
249 		break;
250 	case RTE_ETHER_TYPE_IPV6:
251 		type = RTE_FLOW_ITEM_TYPE_IPV6;
252 		break;
253 	default:
254 		type = RTE_FLOW_ITEM_TYPE_END;
255 	}
256 	return type;
257 }
258 
259 static enum rte_flow_item_type
260 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
261 {
262 #define MLX5_XSET_ITEM_MASK_SPEC(type, fld)                              \
263 	do {                                                             \
264 		const void *m = item->mask;                              \
265 		const void *s = item->spec;                              \
266 		mask = m ?                                               \
267 			((const struct rte_flow_item_##type *)m)->fld :  \
268 			rte_flow_item_##type##_mask.fld;                 \
269 		spec = ((const struct rte_flow_item_##type *)s)->fld;    \
270 	} while (0)
271 
272 	enum rte_flow_item_type ret;
273 	uint16_t spec, mask;
274 
275 	if (item == NULL || item->spec == NULL)
276 		return RTE_FLOW_ITEM_TYPE_VOID;
277 	switch (item->type) {
278 	case RTE_FLOW_ITEM_TYPE_ETH:
279 		MLX5_XSET_ITEM_MASK_SPEC(eth, type);
280 		if (!mask)
281 			return RTE_FLOW_ITEM_TYPE_VOID;
282 		ret = mlx5_ethertype_to_item_type(spec, mask, false);
283 		break;
284 	case RTE_FLOW_ITEM_TYPE_VLAN:
285 		MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type);
286 		if (!mask)
287 			return RTE_FLOW_ITEM_TYPE_VOID;
288 		ret = mlx5_ethertype_to_item_type(spec, mask, false);
289 		break;
290 	case RTE_FLOW_ITEM_TYPE_IPV4:
291 		MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id);
292 		if (!mask)
293 			return RTE_FLOW_ITEM_TYPE_VOID;
294 		ret = mlx5_inet_proto_to_item_type(spec, mask);
295 		break;
296 	case RTE_FLOW_ITEM_TYPE_IPV6:
297 		MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto);
298 		if (!mask)
299 			return RTE_FLOW_ITEM_TYPE_VOID;
300 		ret = mlx5_inet_proto_to_item_type(spec, mask);
301 		break;
302 	case RTE_FLOW_ITEM_TYPE_GENEVE:
303 		MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol);
304 		ret = mlx5_ethertype_to_item_type(spec, mask, true);
305 		break;
306 	case RTE_FLOW_ITEM_TYPE_GRE:
307 		MLX5_XSET_ITEM_MASK_SPEC(gre, protocol);
308 		ret = mlx5_ethertype_to_item_type(spec, mask, true);
309 		break;
310 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
311 		MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol);
312 		ret = mlx5_nsh_proto_to_item_type(spec, mask);
313 		break;
314 	default:
315 		ret = RTE_FLOW_ITEM_TYPE_VOID;
316 		break;
317 	}
318 	return ret;
319 #undef MLX5_XSET_ITEM_MASK_SPEC
320 }
321 
322 static const int *
323 mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
324 		const int *next_node)
325 {
326 	const struct mlx5_flow_expand_node *node = NULL;
327 	const int *next = next_node;
328 
329 	while (next && *next) {
330 		/*
331 		 * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT
332 		 * flag set, because they were not found in the flow pattern.
333 		 */
334 		node = &graph[*next];
335 		if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT))
336 			break;
337 		next = node->next;
338 	}
339 	return next;
340 }
341 
342 #define MLX5_RSS_EXP_ELT_N 16
343 
344 /**
345  * Expand RSS flows into several possible flows according to the RSS hash
346  * fields requested and the driver capabilities.
347  *
348  * @param[out] buf
349  *   Buffer to store the result expansion.
350  * @param[in] size
351  *   Buffer size in bytes. If 0, @p buf can be NULL.
352  * @param[in] pattern
353  *   User flow pattern.
354  * @param[in] types
355  *   RSS types to expand (see RTE_ETH_RSS_* definitions).
356  * @param[in] graph
357  *   Input graph to expand @p pattern according to @p types.
358  * @param[in] graph_root_index
359  *   Index of root node in @p graph, typically 0.
360  *
361  * @return
362  *   A positive value representing the size of @p buf in bytes regardless of
363  *   @p size on success, a negative errno value otherwise and rte_errno is
364  *   set, the following errors are defined:
365  *
366  *   -E2BIG: graph-depth @p graph is too deep.
367  *   -EINVAL: @p size has not enough space for expanded pattern.
368  */
369 static int
370 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
371 		     const struct rte_flow_item *pattern, uint64_t types,
372 		     const struct mlx5_flow_expand_node graph[],
373 		     int graph_root_index)
374 {
375 	const struct rte_flow_item *item;
376 	const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
377 	const int *next_node;
378 	const int *stack[MLX5_RSS_EXP_ELT_N];
379 	int stack_pos = 0;
380 	struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
381 	unsigned int i, item_idx, last_expand_item_idx = 0;
382 	size_t lsize;
383 	size_t user_pattern_size = 0;
384 	void *addr = NULL;
385 	const struct mlx5_flow_expand_node *next = NULL;
386 	struct rte_flow_item missed_item;
387 	int missed = 0;
388 	int elt = 0;
389 	const struct rte_flow_item *last_expand_item = NULL;
390 
391 	memset(&missed_item, 0, sizeof(missed_item));
392 	lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
393 		MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
394 	if (lsize > size)
395 		return -EINVAL;
396 	buf->entry[0].priority = 0;
397 	buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
398 	buf->entries = 0;
399 	addr = buf->entry[0].pattern;
400 	for (item = pattern, item_idx = 0;
401 			item->type != RTE_FLOW_ITEM_TYPE_END;
402 			item++, item_idx++) {
403 		if (!mlx5_flow_is_rss_expandable_item(item)) {
404 			user_pattern_size += sizeof(*item);
405 			continue;
406 		}
407 		last_expand_item = item;
408 		last_expand_item_idx = item_idx;
409 		i = 0;
410 		while (node->next && node->next[i]) {
411 			next = &graph[node->next[i]];
412 			if (next->type == item->type)
413 				break;
414 			if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
415 				node = next;
416 				i = 0;
417 			} else {
418 				++i;
419 			}
420 		}
421 		if (next)
422 			node = next;
423 		user_pattern_size += sizeof(*item);
424 	}
425 	user_pattern_size += sizeof(*item); /* Handle END item. */
426 	lsize += user_pattern_size;
427 	if (lsize > size)
428 		return -EINVAL;
429 	/* Copy the user pattern in the first entry of the buffer. */
430 	rte_memcpy(addr, pattern, user_pattern_size);
431 	addr = (void *)(((uintptr_t)addr) + user_pattern_size);
432 	buf->entries = 1;
433 	/* Start expanding. */
434 	memset(flow_items, 0, sizeof(flow_items));
435 	user_pattern_size -= sizeof(*item);
436 	/*
437 	 * Check if the last valid item has spec set, need complete pattern,
438 	 * and the pattern can be used for expansion.
439 	 */
440 	missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item);
441 	if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
442 		/* Item type END indicates expansion is not required. */
443 		return lsize;
444 	}
445 	if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
446 		next = NULL;
447 		missed = 1;
448 		i = 0;
449 		while (node->next && node->next[i]) {
450 			next = &graph[node->next[i]];
451 			if (next->type == missed_item.type) {
452 				flow_items[0].type = missed_item.type;
453 				flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
454 				break;
455 			}
456 			if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
457 				node = next;
458 				i = 0;
459 			} else {
460 				++i;
461 			}
462 			next = NULL;
463 		}
464 	}
465 	if (next && missed) {
466 		elt = 2; /* missed item + item end. */
467 		node = next;
468 		lsize += elt * sizeof(*item) + user_pattern_size;
469 		if (lsize > size)
470 			return -EINVAL;
471 		if (node->rss_types & types) {
472 			buf->entry[buf->entries].priority = 1;
473 			buf->entry[buf->entries].pattern = addr;
474 			buf->entries++;
475 			rte_memcpy(addr, buf->entry[0].pattern,
476 				   user_pattern_size);
477 			addr = (void *)(((uintptr_t)addr) + user_pattern_size);
478 			rte_memcpy(addr, flow_items, elt * sizeof(*item));
479 			addr = (void *)(((uintptr_t)addr) +
480 					elt * sizeof(*item));
481 		}
482 	} else if (last_expand_item != NULL) {
483 		node = mlx5_flow_expand_rss_adjust_node(pattern,
484 				last_expand_item_idx, graph, node);
485 	}
486 	memset(flow_items, 0, sizeof(flow_items));
487 	next_node = mlx5_flow_expand_rss_skip_explicit(graph,
488 			node->next);
489 	stack[stack_pos] = next_node;
490 	node = next_node ? &graph[*next_node] : NULL;
491 	while (node) {
492 		flow_items[stack_pos].type = node->type;
493 		if (node->rss_types & types) {
494 			size_t n;
495 			/*
496 			 * compute the number of items to copy from the
497 			 * expansion and copy it.
498 			 * When the stack_pos is 0, there are 1 element in it,
499 			 * plus the addition END item.
500 			 */
501 			elt = stack_pos + 2;
502 			flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
503 			lsize += elt * sizeof(*item) + user_pattern_size;
504 			if (lsize > size)
505 				return -EINVAL;
506 			n = elt * sizeof(*item);
507 			buf->entry[buf->entries].priority =
508 				stack_pos + 1 + missed;
509 			buf->entry[buf->entries].pattern = addr;
510 			buf->entries++;
511 			rte_memcpy(addr, buf->entry[0].pattern,
512 				   user_pattern_size);
513 			addr = (void *)(((uintptr_t)addr) +
514 					user_pattern_size);
515 			rte_memcpy(addr, &missed_item,
516 				   missed * sizeof(*item));
517 			addr = (void *)(((uintptr_t)addr) +
518 				missed * sizeof(*item));
519 			rte_memcpy(addr, flow_items, n);
520 			addr = (void *)(((uintptr_t)addr) + n);
521 		}
522 		/* Go deeper. */
523 		if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) &&
524 				node->next) {
525 			next_node = mlx5_flow_expand_rss_skip_explicit(graph,
526 					node->next);
527 			if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
528 				rte_errno = E2BIG;
529 				return -rte_errno;
530 			}
531 			stack[stack_pos] = next_node;
532 		} else if (*(next_node + 1)) {
533 			/* Follow up with the next possibility. */
534 			next_node = mlx5_flow_expand_rss_skip_explicit(graph,
535 					++next_node);
536 		} else if (!stack_pos) {
537 			/*
538 			 * Completing the traverse over the different paths.
539 			 * The next_node is advanced to the terminator.
540 			 */
541 			++next_node;
542 		} else {
543 			/* Move to the next path. */
544 			while (stack_pos) {
545 				next_node = stack[--stack_pos];
546 				next_node++;
547 				if (*next_node)
548 					break;
549 			}
550 			next_node = mlx5_flow_expand_rss_skip_explicit(graph,
551 					next_node);
552 			stack[stack_pos] = next_node;
553 		}
554 		node = next_node && *next_node ? &graph[*next_node] : NULL;
555 	};
556 	return lsize;
557 }
558 
559 enum mlx5_expansion {
560 	MLX5_EXPANSION_ROOT,
561 	MLX5_EXPANSION_ROOT_OUTER,
562 	MLX5_EXPANSION_OUTER_ETH,
563 	MLX5_EXPANSION_OUTER_VLAN,
564 	MLX5_EXPANSION_OUTER_IPV4,
565 	MLX5_EXPANSION_OUTER_IPV4_UDP,
566 	MLX5_EXPANSION_OUTER_IPV4_TCP,
567 	MLX5_EXPANSION_OUTER_IPV4_ESP,
568 	MLX5_EXPANSION_OUTER_IPV4_ICMP,
569 	MLX5_EXPANSION_OUTER_IPV6,
570 	MLX5_EXPANSION_OUTER_IPV6_UDP,
571 	MLX5_EXPANSION_OUTER_IPV6_TCP,
572 	MLX5_EXPANSION_OUTER_IPV6_ESP,
573 	MLX5_EXPANSION_OUTER_IPV6_ICMP6,
574 	MLX5_EXPANSION_VXLAN,
575 	MLX5_EXPANSION_STD_VXLAN,
576 	MLX5_EXPANSION_L3_VXLAN,
577 	MLX5_EXPANSION_VXLAN_GPE,
578 	MLX5_EXPANSION_GRE,
579 	MLX5_EXPANSION_NVGRE,
580 	MLX5_EXPANSION_GRE_KEY,
581 	MLX5_EXPANSION_MPLS,
582 	MLX5_EXPANSION_ETH,
583 	MLX5_EXPANSION_VLAN,
584 	MLX5_EXPANSION_IPV4,
585 	MLX5_EXPANSION_IPV4_UDP,
586 	MLX5_EXPANSION_IPV4_TCP,
587 	MLX5_EXPANSION_IPV4_ESP,
588 	MLX5_EXPANSION_IPV4_ICMP,
589 	MLX5_EXPANSION_IPV6,
590 	MLX5_EXPANSION_IPV6_UDP,
591 	MLX5_EXPANSION_IPV6_TCP,
592 	MLX5_EXPANSION_IPV6_ESP,
593 	MLX5_EXPANSION_IPV6_ICMP6,
594 	MLX5_EXPANSION_IPV6_FRAG_EXT,
595 	MLX5_EXPANSION_GTP,
596 	MLX5_EXPANSION_GENEVE,
597 };
598 
599 /** Supported expansion of items. */
600 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
601 	[MLX5_EXPANSION_ROOT] = {
602 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
603 						  MLX5_EXPANSION_IPV4,
604 						  MLX5_EXPANSION_IPV6),
605 		.type = RTE_FLOW_ITEM_TYPE_END,
606 	},
607 	[MLX5_EXPANSION_ROOT_OUTER] = {
608 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
609 						  MLX5_EXPANSION_OUTER_IPV4,
610 						  MLX5_EXPANSION_OUTER_IPV6),
611 		.type = RTE_FLOW_ITEM_TYPE_END,
612 	},
613 	[MLX5_EXPANSION_OUTER_ETH] = {
614 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
615 		.type = RTE_FLOW_ITEM_TYPE_ETH,
616 		.rss_types = 0,
617 	},
618 	[MLX5_EXPANSION_OUTER_VLAN] = {
619 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
620 						  MLX5_EXPANSION_OUTER_IPV6),
621 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
622 		.node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
623 	},
624 	[MLX5_EXPANSION_OUTER_IPV4] = {
625 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
626 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
627 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
628 			 MLX5_EXPANSION_OUTER_IPV4_ESP,
629 			 MLX5_EXPANSION_OUTER_IPV4_ICMP,
630 			 MLX5_EXPANSION_GRE,
631 			 MLX5_EXPANSION_NVGRE,
632 			 MLX5_EXPANSION_IPV4,
633 			 MLX5_EXPANSION_IPV6),
634 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
635 		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
636 			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
637 	},
638 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
639 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
640 						  MLX5_EXPANSION_VXLAN_GPE,
641 						  MLX5_EXPANSION_MPLS,
642 						  MLX5_EXPANSION_GENEVE,
643 						  MLX5_EXPANSION_GTP),
644 		.type = RTE_FLOW_ITEM_TYPE_UDP,
645 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
646 	},
647 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
648 		.type = RTE_FLOW_ITEM_TYPE_TCP,
649 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
650 	},
651 	[MLX5_EXPANSION_OUTER_IPV4_ESP] = {
652 		.type = RTE_FLOW_ITEM_TYPE_ESP,
653 		.rss_types = RTE_ETH_RSS_ESP,
654 	},
655 	[MLX5_EXPANSION_OUTER_IPV4_ICMP] = {
656 		.type = RTE_FLOW_ITEM_TYPE_ICMP,
657 	},
658 	[MLX5_EXPANSION_OUTER_IPV6] = {
659 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
660 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
661 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
662 			 MLX5_EXPANSION_OUTER_IPV6_ESP,
663 			 MLX5_EXPANSION_OUTER_IPV6_ICMP6,
664 			 MLX5_EXPANSION_IPV4,
665 			 MLX5_EXPANSION_IPV6,
666 			 MLX5_EXPANSION_GRE,
667 			 MLX5_EXPANSION_NVGRE),
668 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
669 		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
670 			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
671 	},
672 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
673 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
674 						  MLX5_EXPANSION_VXLAN_GPE,
675 						  MLX5_EXPANSION_MPLS,
676 						  MLX5_EXPANSION_GENEVE,
677 						  MLX5_EXPANSION_GTP),
678 		.type = RTE_FLOW_ITEM_TYPE_UDP,
679 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
680 	},
681 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
682 		.type = RTE_FLOW_ITEM_TYPE_TCP,
683 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
684 	},
685 	[MLX5_EXPANSION_OUTER_IPV6_ESP] = {
686 		.type = RTE_FLOW_ITEM_TYPE_ESP,
687 		.rss_types = RTE_ETH_RSS_ESP,
688 	},
689 	[MLX5_EXPANSION_OUTER_IPV6_ICMP6] = {
690 		.type = RTE_FLOW_ITEM_TYPE_ICMP6,
691 	},
692 	[MLX5_EXPANSION_VXLAN] = {
693 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
694 						  MLX5_EXPANSION_IPV4,
695 						  MLX5_EXPANSION_IPV6),
696 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
697 	},
698 	[MLX5_EXPANSION_STD_VXLAN] = {
699 			.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
700 					.type = RTE_FLOW_ITEM_TYPE_VXLAN,
701 	},
702 	[MLX5_EXPANSION_L3_VXLAN] = {
703 			.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
704 					MLX5_EXPANSION_IPV6),
705 					.type = RTE_FLOW_ITEM_TYPE_VXLAN,
706 	},
707 	[MLX5_EXPANSION_VXLAN_GPE] = {
708 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
709 						  MLX5_EXPANSION_IPV4,
710 						  MLX5_EXPANSION_IPV6),
711 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
712 	},
713 	[MLX5_EXPANSION_GRE] = {
714 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
715 						  MLX5_EXPANSION_IPV4,
716 						  MLX5_EXPANSION_IPV6,
717 						  MLX5_EXPANSION_GRE_KEY,
718 						  MLX5_EXPANSION_MPLS),
719 		.type = RTE_FLOW_ITEM_TYPE_GRE,
720 	},
721 	[MLX5_EXPANSION_GRE_KEY] = {
722 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
723 						  MLX5_EXPANSION_IPV6,
724 						  MLX5_EXPANSION_MPLS),
725 		.type = RTE_FLOW_ITEM_TYPE_GRE_KEY,
726 		.node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
727 	},
728 	[MLX5_EXPANSION_NVGRE] = {
729 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
730 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
731 	},
732 	[MLX5_EXPANSION_MPLS] = {
733 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
734 						  MLX5_EXPANSION_IPV6,
735 						  MLX5_EXPANSION_ETH),
736 		.type = RTE_FLOW_ITEM_TYPE_MPLS,
737 		.node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
738 	},
739 	[MLX5_EXPANSION_ETH] = {
740 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
741 		.type = RTE_FLOW_ITEM_TYPE_ETH,
742 	},
743 	[MLX5_EXPANSION_VLAN] = {
744 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
745 						  MLX5_EXPANSION_IPV6),
746 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
747 		.node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
748 	},
749 	[MLX5_EXPANSION_IPV4] = {
750 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
751 						  MLX5_EXPANSION_IPV4_TCP,
752 						  MLX5_EXPANSION_IPV4_ESP,
753 						  MLX5_EXPANSION_IPV4_ICMP),
754 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
755 		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
756 			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
757 	},
758 	[MLX5_EXPANSION_IPV4_UDP] = {
759 		.type = RTE_FLOW_ITEM_TYPE_UDP,
760 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
761 	},
762 	[MLX5_EXPANSION_IPV4_TCP] = {
763 		.type = RTE_FLOW_ITEM_TYPE_TCP,
764 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
765 	},
766 	[MLX5_EXPANSION_IPV4_ESP] = {
767 		.type = RTE_FLOW_ITEM_TYPE_ESP,
768 		.rss_types = RTE_ETH_RSS_ESP,
769 	},
770 	[MLX5_EXPANSION_IPV4_ICMP] = {
771 		.type = RTE_FLOW_ITEM_TYPE_ICMP,
772 	},
773 	[MLX5_EXPANSION_IPV6] = {
774 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
775 						  MLX5_EXPANSION_IPV6_TCP,
776 						  MLX5_EXPANSION_IPV6_ESP,
777 						  MLX5_EXPANSION_IPV6_ICMP6,
778 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
779 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
780 		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
781 			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
782 	},
783 	[MLX5_EXPANSION_IPV6_UDP] = {
784 		.type = RTE_FLOW_ITEM_TYPE_UDP,
785 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
786 	},
787 	[MLX5_EXPANSION_IPV6_TCP] = {
788 		.type = RTE_FLOW_ITEM_TYPE_TCP,
789 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
790 	},
791 	[MLX5_EXPANSION_IPV6_ESP] = {
792 		.type = RTE_FLOW_ITEM_TYPE_ESP,
793 		.rss_types = RTE_ETH_RSS_ESP,
794 	},
795 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
796 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
797 	},
798 	[MLX5_EXPANSION_IPV6_ICMP6] = {
799 		.type = RTE_FLOW_ITEM_TYPE_ICMP6,
800 	},
801 	[MLX5_EXPANSION_GTP] = {
802 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
803 						  MLX5_EXPANSION_IPV6),
804 		.type = RTE_FLOW_ITEM_TYPE_GTP,
805 	},
806 	[MLX5_EXPANSION_GENEVE] = {
807 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
808 						  MLX5_EXPANSION_IPV4,
809 						  MLX5_EXPANSION_IPV6),
810 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
811 	},
812 };
813 
814 static struct rte_flow_action_handle *
815 mlx5_action_handle_create(struct rte_eth_dev *dev,
816 			  const struct rte_flow_indir_action_conf *conf,
817 			  const struct rte_flow_action *action,
818 			  struct rte_flow_error *error);
819 static int mlx5_action_handle_destroy
820 				(struct rte_eth_dev *dev,
821 				 struct rte_flow_action_handle *handle,
822 				 struct rte_flow_error *error);
823 static int mlx5_action_handle_update
824 				(struct rte_eth_dev *dev,
825 				 struct rte_flow_action_handle *handle,
826 				 const void *update,
827 				 struct rte_flow_error *error);
828 static int mlx5_action_handle_query
829 				(struct rte_eth_dev *dev,
830 				 const struct rte_flow_action_handle *handle,
831 				 void *data,
832 				 struct rte_flow_error *error);
833 static int
834 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
835 		    struct rte_flow_tunnel *app_tunnel,
836 		    struct rte_flow_action **actions,
837 		    uint32_t *num_of_actions,
838 		    struct rte_flow_error *error);
839 static int
840 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
841 		       struct rte_flow_tunnel *app_tunnel,
842 		       struct rte_flow_item **items,
843 		       uint32_t *num_of_items,
844 		       struct rte_flow_error *error);
845 static int
846 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
847 			      struct rte_flow_item *pmd_items,
848 			      uint32_t num_items, struct rte_flow_error *err);
849 static int
850 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
851 				struct rte_flow_action *pmd_actions,
852 				uint32_t num_actions,
853 				struct rte_flow_error *err);
854 static int
855 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
856 				  struct rte_mbuf *m,
857 				  struct rte_flow_restore_info *info,
858 				  struct rte_flow_error *err);
859 static struct rte_flow_item_flex_handle *
860 mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
861 			   const struct rte_flow_item_flex_conf *conf,
862 			   struct rte_flow_error *error);
863 static int
864 mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
865 			    const struct rte_flow_item_flex_handle *handle,
866 			    struct rte_flow_error *error);
867 static int
868 mlx5_flow_info_get(struct rte_eth_dev *dev,
869 		   struct rte_flow_port_info *port_info,
870 		   struct rte_flow_queue_info *queue_info,
871 		   struct rte_flow_error *error);
872 static int
873 mlx5_flow_port_configure(struct rte_eth_dev *dev,
874 			 const struct rte_flow_port_attr *port_attr,
875 			 uint16_t nb_queue,
876 			 const struct rte_flow_queue_attr *queue_attr[],
877 			 struct rte_flow_error *err);
878 
879 static struct rte_flow_pattern_template *
880 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
881 		const struct rte_flow_pattern_template_attr *attr,
882 		const struct rte_flow_item items[],
883 		struct rte_flow_error *error);
884 
885 static int
886 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
887 				   struct rte_flow_pattern_template *template,
888 				   struct rte_flow_error *error);
889 static struct rte_flow_actions_template *
890 mlx5_flow_actions_template_create(struct rte_eth_dev *dev,
891 			const struct rte_flow_actions_template_attr *attr,
892 			const struct rte_flow_action actions[],
893 			const struct rte_flow_action masks[],
894 			struct rte_flow_error *error);
895 static int
896 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
897 				   struct rte_flow_actions_template *template,
898 				   struct rte_flow_error *error);
899 
900 static struct rte_flow_template_table *
901 mlx5_flow_table_create(struct rte_eth_dev *dev,
902 		       const struct rte_flow_template_table_attr *attr,
903 		       struct rte_flow_pattern_template *item_templates[],
904 		       uint8_t nb_item_templates,
905 		       struct rte_flow_actions_template *action_templates[],
906 		       uint8_t nb_action_templates,
907 		       struct rte_flow_error *error);
908 static int
909 mlx5_flow_table_destroy(struct rte_eth_dev *dev,
910 			struct rte_flow_template_table *table,
911 			struct rte_flow_error *error);
912 static struct rte_flow *
913 mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
914 			    uint32_t queue,
915 			    const struct rte_flow_op_attr *attr,
916 			    struct rte_flow_template_table *table,
917 			    const struct rte_flow_item items[],
918 			    uint8_t pattern_template_index,
919 			    const struct rte_flow_action actions[],
920 			    uint8_t action_template_index,
921 			    void *user_data,
922 			    struct rte_flow_error *error);
923 static int
924 mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev,
925 			     uint32_t queue,
926 			     const struct rte_flow_op_attr *attr,
927 			     struct rte_flow *flow,
928 			     void *user_data,
929 			     struct rte_flow_error *error);
930 static int
931 mlx5_flow_pull(struct rte_eth_dev *dev,
932 	       uint32_t queue,
933 	       struct rte_flow_op_result res[],
934 	       uint16_t n_res,
935 	       struct rte_flow_error *error);
936 static int
937 mlx5_flow_push(struct rte_eth_dev *dev,
938 	       uint32_t queue,
939 	       struct rte_flow_error *error);
940 
941 static struct rte_flow_action_handle *
942 mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
943 				 const struct rte_flow_op_attr *attr,
944 				 const struct rte_flow_indir_action_conf *conf,
945 				 const struct rte_flow_action *action,
946 				 void *user_data,
947 				 struct rte_flow_error *error);
948 
949 static int
950 mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
951 				 const struct rte_flow_op_attr *attr,
952 				 struct rte_flow_action_handle *handle,
953 				 const void *update,
954 				 void *user_data,
955 				 struct rte_flow_error *error);
956 
957 static int
958 mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
959 				  const struct rte_flow_op_attr *attr,
960 				  struct rte_flow_action_handle *handle,
961 				  void *user_data,
962 				  struct rte_flow_error *error);
963 
964 static const struct rte_flow_ops mlx5_flow_ops = {
965 	.validate = mlx5_flow_validate,
966 	.create = mlx5_flow_create,
967 	.destroy = mlx5_flow_destroy,
968 	.flush = mlx5_flow_flush,
969 	.isolate = mlx5_flow_isolate,
970 	.query = mlx5_flow_query,
971 	.dev_dump = mlx5_flow_dev_dump,
972 	.get_aged_flows = mlx5_flow_get_aged_flows,
973 	.action_handle_create = mlx5_action_handle_create,
974 	.action_handle_destroy = mlx5_action_handle_destroy,
975 	.action_handle_update = mlx5_action_handle_update,
976 	.action_handle_query = mlx5_action_handle_query,
977 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
978 	.tunnel_match = mlx5_flow_tunnel_match,
979 	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
980 	.tunnel_item_release = mlx5_flow_tunnel_item_release,
981 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
982 	.flex_item_create = mlx5_flow_flex_item_create,
983 	.flex_item_release = mlx5_flow_flex_item_release,
984 	.info_get = mlx5_flow_info_get,
985 	.configure = mlx5_flow_port_configure,
986 	.pattern_template_create = mlx5_flow_pattern_template_create,
987 	.pattern_template_destroy = mlx5_flow_pattern_template_destroy,
988 	.actions_template_create = mlx5_flow_actions_template_create,
989 	.actions_template_destroy = mlx5_flow_actions_template_destroy,
990 	.template_table_create = mlx5_flow_table_create,
991 	.template_table_destroy = mlx5_flow_table_destroy,
992 	.async_create = mlx5_flow_async_flow_create,
993 	.async_destroy = mlx5_flow_async_flow_destroy,
994 	.pull = mlx5_flow_pull,
995 	.push = mlx5_flow_push,
996 	.async_action_handle_create = mlx5_flow_async_action_handle_create,
997 	.async_action_handle_update = mlx5_flow_async_action_handle_update,
998 	.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
999 };
1000 
1001 /* Tunnel information. */
1002 struct mlx5_flow_tunnel_info {
1003 	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
1004 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
1005 };
1006 
1007 static struct mlx5_flow_tunnel_info tunnels_info[] = {
1008 	{
1009 		.tunnel = MLX5_FLOW_LAYER_VXLAN,
1010 		.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
1011 	},
1012 	{
1013 		.tunnel = MLX5_FLOW_LAYER_GENEVE,
1014 		.ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
1015 	},
1016 	{
1017 		.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
1018 		.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
1019 	},
1020 	{
1021 		.tunnel = MLX5_FLOW_LAYER_GRE,
1022 		.ptype = RTE_PTYPE_TUNNEL_GRE,
1023 	},
1024 	{
1025 		.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
1026 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
1027 	},
1028 	{
1029 		.tunnel = MLX5_FLOW_LAYER_MPLS,
1030 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
1031 	},
1032 	{
1033 		.tunnel = MLX5_FLOW_LAYER_NVGRE,
1034 		.ptype = RTE_PTYPE_TUNNEL_NVGRE,
1035 	},
1036 	{
1037 		.tunnel = MLX5_FLOW_LAYER_IPIP,
1038 		.ptype = RTE_PTYPE_TUNNEL_IP,
1039 	},
1040 	{
1041 		.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
1042 		.ptype = RTE_PTYPE_TUNNEL_IP,
1043 	},
1044 	{
1045 		.tunnel = MLX5_FLOW_LAYER_GTP,
1046 		.ptype = RTE_PTYPE_TUNNEL_GTPU,
1047 	},
1048 };
1049 
1050 
1051 
1052 /**
1053  * Translate tag ID to register.
1054  *
1055  * @param[in] dev
1056  *   Pointer to the Ethernet device structure.
1057  * @param[in] feature
1058  *   The feature that request the register.
1059  * @param[in] id
1060  *   The request register ID.
1061  * @param[out] error
1062  *   Error description in case of any.
1063  *
1064  * @return
1065  *   The request register on success, a negative errno
1066  *   value otherwise and rte_errno is set.
1067  */
1068 int
1069 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
1070 		     enum mlx5_feature_name feature,
1071 		     uint32_t id,
1072 		     struct rte_flow_error *error)
1073 {
1074 	struct mlx5_priv *priv = dev->data->dev_private;
1075 	struct mlx5_sh_config *config = &priv->sh->config;
1076 	enum modify_reg start_reg;
1077 	bool skip_mtr_reg = false;
1078 
1079 	switch (feature) {
1080 	case MLX5_HAIRPIN_RX:
1081 		return REG_B;
1082 	case MLX5_HAIRPIN_TX:
1083 		return REG_A;
1084 	case MLX5_METADATA_RX:
1085 		switch (config->dv_xmeta_en) {
1086 		case MLX5_XMETA_MODE_LEGACY:
1087 			return REG_B;
1088 		case MLX5_XMETA_MODE_META16:
1089 			return REG_C_0;
1090 		case MLX5_XMETA_MODE_META32:
1091 			return REG_C_1;
1092 		}
1093 		break;
1094 	case MLX5_METADATA_TX:
1095 		return REG_A;
1096 	case MLX5_METADATA_FDB:
1097 		switch (config->dv_xmeta_en) {
1098 		case MLX5_XMETA_MODE_LEGACY:
1099 			return REG_NON;
1100 		case MLX5_XMETA_MODE_META16:
1101 			return REG_C_0;
1102 		case MLX5_XMETA_MODE_META32:
1103 			return REG_C_1;
1104 		}
1105 		break;
1106 	case MLX5_FLOW_MARK:
1107 		switch (config->dv_xmeta_en) {
1108 		case MLX5_XMETA_MODE_LEGACY:
1109 			return REG_NON;
1110 		case MLX5_XMETA_MODE_META16:
1111 			return REG_C_1;
1112 		case MLX5_XMETA_MODE_META32:
1113 			return REG_C_0;
1114 		}
1115 		break;
1116 	case MLX5_MTR_ID:
1117 		/*
1118 		 * If meter color and meter id share one register, flow match
1119 		 * should use the meter color register for match.
1120 		 */
1121 		if (priv->mtr_reg_share)
1122 			return priv->mtr_color_reg;
1123 		else
1124 			return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
1125 			       REG_C_3;
1126 	case MLX5_MTR_COLOR:
1127 	case MLX5_ASO_FLOW_HIT:
1128 	case MLX5_ASO_CONNTRACK:
1129 	case MLX5_SAMPLE_ID:
1130 		/* All features use the same REG_C. */
1131 		MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
1132 		return priv->mtr_color_reg;
1133 	case MLX5_COPY_MARK:
1134 		/*
1135 		 * Metadata COPY_MARK register using is in meter suffix sub
1136 		 * flow while with meter. It's safe to share the same register.
1137 		 */
1138 		return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
1139 	case MLX5_APP_TAG:
1140 		/*
1141 		 * If meter is enable, it will engage the register for color
1142 		 * match and flow match. If meter color match is not using the
1143 		 * REG_C_2, need to skip the REG_C_x be used by meter color
1144 		 * match.
1145 		 * If meter is disable, free to use all available registers.
1146 		 */
1147 		start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
1148 			    (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
1149 		skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
1150 		if (id > (uint32_t)(REG_C_7 - start_reg))
1151 			return rte_flow_error_set(error, EINVAL,
1152 						  RTE_FLOW_ERROR_TYPE_ITEM,
1153 						  NULL, "invalid tag id");
1154 		if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
1155 			return rte_flow_error_set(error, ENOTSUP,
1156 						  RTE_FLOW_ERROR_TYPE_ITEM,
1157 						  NULL, "unsupported tag id");
1158 		/*
1159 		 * This case means meter is using the REG_C_x great than 2.
1160 		 * Take care not to conflict with meter color REG_C_x.
1161 		 * If the available index REG_C_y >= REG_C_x, skip the
1162 		 * color register.
1163 		 */
1164 		if (skip_mtr_reg && priv->sh->flow_mreg_c
1165 		    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
1166 			if (id >= (uint32_t)(REG_C_7 - start_reg))
1167 				return rte_flow_error_set(error, EINVAL,
1168 						       RTE_FLOW_ERROR_TYPE_ITEM,
1169 							NULL, "invalid tag id");
1170 			if (priv->sh->flow_mreg_c
1171 			    [id + 1 + start_reg - REG_C_0] != REG_NON)
1172 				return priv->sh->flow_mreg_c
1173 					       [id + 1 + start_reg - REG_C_0];
1174 			return rte_flow_error_set(error, ENOTSUP,
1175 						  RTE_FLOW_ERROR_TYPE_ITEM,
1176 						  NULL, "unsupported tag id");
1177 		}
1178 		return priv->sh->flow_mreg_c[id + start_reg - REG_C_0];
1179 	}
1180 	MLX5_ASSERT(false);
1181 	return rte_flow_error_set(error, EINVAL,
1182 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1183 				  NULL, "invalid feature name");
1184 }
1185 
1186 /**
1187  * Check extensive flow metadata register support.
1188  *
1189  * @param dev
1190  *   Pointer to rte_eth_dev structure.
1191  *
1192  * @return
1193  *   True if device supports extensive flow metadata register, otherwise false.
1194  */
1195 bool
1196 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
1197 {
1198 	struct mlx5_priv *priv = dev->data->dev_private;
1199 
1200 	/*
1201 	 * Having available reg_c can be regarded inclusively as supporting
1202 	 * extensive flow metadata register, which could mean,
1203 	 * - metadata register copy action by modify header.
1204 	 * - 16 modify header actions is supported.
1205 	 * - reg_c's are preserved across different domain (FDB and NIC) on
1206 	 *   packet loopback by flow lookup miss.
1207 	 */
1208 	return priv->sh->flow_mreg_c[2] != REG_NON;
1209 }
1210 
1211 /**
1212  * Get the lowest priority.
1213  *
1214  * @param[in] dev
1215  *   Pointer to the Ethernet device structure.
1216  * @param[in] attributes
1217  *   Pointer to device flow rule attributes.
1218  *
1219  * @return
1220  *   The value of lowest priority of flow.
1221  */
1222 uint32_t
1223 mlx5_get_lowest_priority(struct rte_eth_dev *dev,
1224 			  const struct rte_flow_attr *attr)
1225 {
1226 	struct mlx5_priv *priv = dev->data->dev_private;
1227 
1228 	if (!attr->group && !attr->transfer)
1229 		return priv->sh->flow_max_priority - 2;
1230 	return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
1231 }
1232 
1233 /**
1234  * Calculate matcher priority of the flow.
1235  *
1236  * @param[in] dev
1237  *   Pointer to the Ethernet device structure.
1238  * @param[in] attr
1239  *   Pointer to device flow rule attributes.
1240  * @param[in] subpriority
1241  *   The priority based on the items.
1242  * @param[in] external
1243  *   Flow is user flow.
1244  * @return
1245  *   The matcher priority of the flow.
1246  */
1247 uint16_t
1248 mlx5_get_matcher_priority(struct rte_eth_dev *dev,
1249 			  const struct rte_flow_attr *attr,
1250 			  uint32_t subpriority, bool external)
1251 {
1252 	uint16_t priority = (uint16_t)attr->priority;
1253 	struct mlx5_priv *priv = dev->data->dev_private;
1254 
1255 	if (!attr->group && !attr->transfer) {
1256 		if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1257 			priority = priv->sh->flow_max_priority - 1;
1258 		return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
1259 	} else if (!external && attr->transfer && attr->group == 0 &&
1260 		   attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
1261 		return (priv->sh->flow_max_priority - 1) * 3;
1262 	}
1263 	if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1264 		priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
1265 	return priority * 3 + subpriority;
1266 }
1267 
1268 /**
1269  * Verify the @p item specifications (spec, last, mask) are compatible with the
1270  * NIC capabilities.
1271  *
1272  * @param[in] item
1273  *   Item specification.
1274  * @param[in] mask
1275  *   @p item->mask or flow default bit-masks.
1276  * @param[in] nic_mask
1277  *   Bit-masks covering supported fields by the NIC to compare with user mask.
1278  * @param[in] size
1279  *   Bit-masks size in bytes.
1280  * @param[in] range_accepted
1281  *   True if range of values is accepted for specific fields, false otherwise.
1282  * @param[out] error
1283  *   Pointer to error structure.
1284  *
1285  * @return
1286  *   0 on success, a negative errno value otherwise and rte_errno is set.
1287  */
1288 int
1289 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
1290 			  const uint8_t *mask,
1291 			  const uint8_t *nic_mask,
1292 			  unsigned int size,
1293 			  bool range_accepted,
1294 			  struct rte_flow_error *error)
1295 {
1296 	unsigned int i;
1297 
1298 	MLX5_ASSERT(nic_mask);
1299 	for (i = 0; i < size; ++i)
1300 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
1301 			return rte_flow_error_set(error, ENOTSUP,
1302 						  RTE_FLOW_ERROR_TYPE_ITEM,
1303 						  item,
1304 						  "mask enables non supported"
1305 						  " bits");
1306 	if (!item->spec && (item->mask || item->last))
1307 		return rte_flow_error_set(error, EINVAL,
1308 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1309 					  "mask/last without a spec is not"
1310 					  " supported");
1311 	if (item->spec && item->last && !range_accepted) {
1312 		uint8_t spec[size];
1313 		uint8_t last[size];
1314 		unsigned int i;
1315 		int ret;
1316 
1317 		for (i = 0; i < size; ++i) {
1318 			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
1319 			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
1320 		}
1321 		ret = memcmp(spec, last, size);
1322 		if (ret != 0)
1323 			return rte_flow_error_set(error, EINVAL,
1324 						  RTE_FLOW_ERROR_TYPE_ITEM,
1325 						  item,
1326 						  "range is not valid");
1327 	}
1328 	return 0;
1329 }
1330 
1331 /**
1332  * Adjust the hash fields according to the @p flow information.
1333  *
1334  * @param[in] dev_flow.
1335  *   Pointer to the mlx5_flow.
1336  * @param[in] tunnel
1337  *   1 when the hash field is for a tunnel item.
1338  * @param[in] layer_types
1339  *   RTE_ETH_RSS_* types.
1340  * @param[in] hash_fields
1341  *   Item hash fields.
1342  *
1343  * @return
1344  *   The hash fields that should be used.
1345  */
1346 uint64_t
1347 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
1348 			    int tunnel __rte_unused, uint64_t layer_types,
1349 			    uint64_t hash_fields)
1350 {
1351 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1352 	int rss_request_inner = rss_desc->level >= 2;
1353 
1354 	/* Check RSS hash level for tunnel. */
1355 	if (tunnel && rss_request_inner)
1356 		hash_fields |= IBV_RX_HASH_INNER;
1357 	else if (tunnel || rss_request_inner)
1358 		return 0;
1359 #endif
1360 	/* Check if requested layer matches RSS hash fields. */
1361 	if (!(rss_desc->types & layer_types))
1362 		return 0;
1363 	return hash_fields;
1364 }
1365 
1366 /**
1367  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
1368  * if several tunnel rules are used on this queue, the tunnel ptype will be
1369  * cleared.
1370  *
1371  * @param rxq_ctrl
1372  *   Rx queue to update.
1373  */
1374 static void
1375 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
1376 {
1377 	unsigned int i;
1378 	uint32_t tunnel_ptype = 0;
1379 
1380 	/* Look up for the ptype to use. */
1381 	for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
1382 		if (!rxq_ctrl->flow_tunnels_n[i])
1383 			continue;
1384 		if (!tunnel_ptype) {
1385 			tunnel_ptype = tunnels_info[i].ptype;
1386 		} else {
1387 			tunnel_ptype = 0;
1388 			break;
1389 		}
1390 	}
1391 	rxq_ctrl->rxq.tunnel = tunnel_ptype;
1392 }
1393 
1394 /**
1395  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
1396  * flow.
1397  *
1398  * @param[in] dev
1399  *   Pointer to the Ethernet device structure.
1400  * @param[in] dev_handle
1401  *   Pointer to device flow handle structure.
1402  */
1403 void
1404 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
1405 		       struct mlx5_flow_handle *dev_handle)
1406 {
1407 	struct mlx5_priv *priv = dev->data->dev_private;
1408 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1409 	struct mlx5_ind_table_obj *ind_tbl = NULL;
1410 	unsigned int i;
1411 
1412 	if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1413 		struct mlx5_hrxq *hrxq;
1414 
1415 		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1416 			      dev_handle->rix_hrxq);
1417 		if (hrxq)
1418 			ind_tbl = hrxq->ind_table;
1419 	} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1420 		struct mlx5_shared_action_rss *shared_rss;
1421 
1422 		shared_rss = mlx5_ipool_get
1423 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1424 			 dev_handle->rix_srss);
1425 		if (shared_rss)
1426 			ind_tbl = shared_rss->ind_tbl;
1427 	}
1428 	if (!ind_tbl)
1429 		return;
1430 	for (i = 0; i != ind_tbl->queues_n; ++i) {
1431 		int idx = ind_tbl->queues[i];
1432 		struct mlx5_rxq_ctrl *rxq_ctrl;
1433 
1434 		if (mlx5_is_external_rxq(dev, idx))
1435 			continue;
1436 		rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
1437 		MLX5_ASSERT(rxq_ctrl != NULL);
1438 		if (rxq_ctrl == NULL)
1439 			continue;
1440 		/*
1441 		 * To support metadata register copy on Tx loopback,
1442 		 * this must be always enabled (metadata may arive
1443 		 * from other port - not from local flows only.
1444 		 */
1445 		if (tunnel) {
1446 			unsigned int j;
1447 
1448 			/* Increase the counter matching the flow. */
1449 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1450 				if ((tunnels_info[j].tunnel &
1451 				     dev_handle->layers) ==
1452 				    tunnels_info[j].tunnel) {
1453 					rxq_ctrl->flow_tunnels_n[j]++;
1454 					break;
1455 				}
1456 			}
1457 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
1458 		}
1459 	}
1460 }
1461 
1462 static void
1463 flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
1464 {
1465 	struct mlx5_priv *priv = dev->data->dev_private;
1466 	struct mlx5_rxq_ctrl *rxq_ctrl;
1467 
1468 	if (priv->mark_enabled)
1469 		return;
1470 	LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1471 		rxq_ctrl->rxq.mark = 1;
1472 	}
1473 	priv->mark_enabled = 1;
1474 }
1475 
1476 /**
1477  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1478  *
1479  * @param[in] dev
1480  *   Pointer to the Ethernet device structure.
1481  * @param[in] flow
1482  *   Pointer to flow structure.
1483  */
1484 static void
1485 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1486 {
1487 	struct mlx5_priv *priv = dev->data->dev_private;
1488 	uint32_t handle_idx;
1489 	struct mlx5_flow_handle *dev_handle;
1490 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1491 
1492 	MLX5_ASSERT(wks);
1493 	if (wks->mark)
1494 		flow_rxq_mark_flag_set(dev);
1495 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1496 		       handle_idx, dev_handle, next)
1497 		flow_drv_rxq_flags_set(dev, dev_handle);
1498 }
1499 
1500 /**
1501  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1502  * device flow if no other flow uses it with the same kind of request.
1503  *
1504  * @param dev
1505  *   Pointer to Ethernet device.
1506  * @param[in] dev_handle
1507  *   Pointer to the device flow handle structure.
1508  */
1509 static void
1510 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1511 			struct mlx5_flow_handle *dev_handle)
1512 {
1513 	struct mlx5_priv *priv = dev->data->dev_private;
1514 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1515 	struct mlx5_ind_table_obj *ind_tbl = NULL;
1516 	unsigned int i;
1517 
1518 	if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1519 		struct mlx5_hrxq *hrxq;
1520 
1521 		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1522 			      dev_handle->rix_hrxq);
1523 		if (hrxq)
1524 			ind_tbl = hrxq->ind_table;
1525 	} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1526 		struct mlx5_shared_action_rss *shared_rss;
1527 
1528 		shared_rss = mlx5_ipool_get
1529 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1530 			 dev_handle->rix_srss);
1531 		if (shared_rss)
1532 			ind_tbl = shared_rss->ind_tbl;
1533 	}
1534 	if (!ind_tbl)
1535 		return;
1536 	MLX5_ASSERT(dev->data->dev_started);
1537 	for (i = 0; i != ind_tbl->queues_n; ++i) {
1538 		int idx = ind_tbl->queues[i];
1539 		struct mlx5_rxq_ctrl *rxq_ctrl;
1540 
1541 		if (mlx5_is_external_rxq(dev, idx))
1542 			continue;
1543 		rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
1544 		MLX5_ASSERT(rxq_ctrl != NULL);
1545 		if (rxq_ctrl == NULL)
1546 			continue;
1547 		if (tunnel) {
1548 			unsigned int j;
1549 
1550 			/* Decrease the counter matching the flow. */
1551 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1552 				if ((tunnels_info[j].tunnel &
1553 				     dev_handle->layers) ==
1554 				    tunnels_info[j].tunnel) {
1555 					rxq_ctrl->flow_tunnels_n[j]--;
1556 					break;
1557 				}
1558 			}
1559 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
1560 		}
1561 	}
1562 }
1563 
1564 /**
1565  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1566  * @p flow if no other flow uses it with the same kind of request.
1567  *
1568  * @param dev
1569  *   Pointer to Ethernet device.
1570  * @param[in] flow
1571  *   Pointer to the flow.
1572  */
1573 static void
1574 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1575 {
1576 	struct mlx5_priv *priv = dev->data->dev_private;
1577 	uint32_t handle_idx;
1578 	struct mlx5_flow_handle *dev_handle;
1579 
1580 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1581 		       handle_idx, dev_handle, next)
1582 		flow_drv_rxq_flags_trim(dev, dev_handle);
1583 }
1584 
1585 /**
1586  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1587  *
1588  * @param dev
1589  *   Pointer to Ethernet device.
1590  */
1591 static void
1592 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1593 {
1594 	struct mlx5_priv *priv = dev->data->dev_private;
1595 	unsigned int i;
1596 
1597 	for (i = 0; i != priv->rxqs_n; ++i) {
1598 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
1599 		unsigned int j;
1600 
1601 		if (rxq == NULL || rxq->ctrl == NULL)
1602 			continue;
1603 		rxq->ctrl->rxq.mark = 0;
1604 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1605 			rxq->ctrl->flow_tunnels_n[j] = 0;
1606 		rxq->ctrl->rxq.tunnel = 0;
1607 	}
1608 	priv->mark_enabled = 0;
1609 }
1610 
1611 /**
1612  * Set the Rx queue dynamic metadata (mask and offset) for a flow
1613  *
1614  * @param[in] dev
1615  *   Pointer to the Ethernet device structure.
1616  */
1617 void
1618 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
1619 {
1620 	struct mlx5_priv *priv = dev->data->dev_private;
1621 	unsigned int i;
1622 
1623 	for (i = 0; i != priv->rxqs_n; ++i) {
1624 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
1625 		struct mlx5_rxq_data *data;
1626 
1627 		if (rxq == NULL || rxq->ctrl == NULL)
1628 			continue;
1629 		data = &rxq->ctrl->rxq;
1630 		if (!rte_flow_dynf_metadata_avail()) {
1631 			data->dynf_meta = 0;
1632 			data->flow_meta_mask = 0;
1633 			data->flow_meta_offset = -1;
1634 			data->flow_meta_port_mask = 0;
1635 		} else {
1636 			data->dynf_meta = 1;
1637 			data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1638 			data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1639 			data->flow_meta_port_mask = priv->sh->dv_meta_mask;
1640 		}
1641 	}
1642 }
1643 
1644 /*
1645  * return a pointer to the desired action in the list of actions.
1646  *
1647  * @param[in] actions
1648  *   The list of actions to search the action in.
1649  * @param[in] action
1650  *   The action to find.
1651  *
1652  * @return
1653  *   Pointer to the action in the list, if found. NULL otherwise.
1654  */
1655 const struct rte_flow_action *
1656 mlx5_flow_find_action(const struct rte_flow_action *actions,
1657 		      enum rte_flow_action_type action)
1658 {
1659 	if (actions == NULL)
1660 		return NULL;
1661 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1662 		if (actions->type == action)
1663 			return actions;
1664 	return NULL;
1665 }
1666 
1667 /*
1668  * Validate the flag action.
1669  *
1670  * @param[in] action_flags
1671  *   Bit-fields that holds the actions detected until now.
1672  * @param[in] attr
1673  *   Attributes of flow that includes this action.
1674  * @param[out] error
1675  *   Pointer to error structure.
1676  *
1677  * @return
1678  *   0 on success, a negative errno value otherwise and rte_errno is set.
1679  */
1680 int
1681 mlx5_flow_validate_action_flag(uint64_t action_flags,
1682 			       const struct rte_flow_attr *attr,
1683 			       struct rte_flow_error *error)
1684 {
1685 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1686 		return rte_flow_error_set(error, EINVAL,
1687 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1688 					  "can't mark and flag in same flow");
1689 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1690 		return rte_flow_error_set(error, EINVAL,
1691 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1692 					  "can't have 2 flag"
1693 					  " actions in same flow");
1694 	if (attr->egress)
1695 		return rte_flow_error_set(error, ENOTSUP,
1696 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1697 					  "flag action not supported for "
1698 					  "egress");
1699 	return 0;
1700 }
1701 
1702 /*
1703  * Validate the mark action.
1704  *
1705  * @param[in] action
1706  *   Pointer to the queue action.
1707  * @param[in] action_flags
1708  *   Bit-fields that holds the actions detected until now.
1709  * @param[in] attr
1710  *   Attributes of flow that includes this action.
1711  * @param[out] error
1712  *   Pointer to error structure.
1713  *
1714  * @return
1715  *   0 on success, a negative errno value otherwise and rte_errno is set.
1716  */
1717 int
1718 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1719 			       uint64_t action_flags,
1720 			       const struct rte_flow_attr *attr,
1721 			       struct rte_flow_error *error)
1722 {
1723 	const struct rte_flow_action_mark *mark = action->conf;
1724 
1725 	if (!mark)
1726 		return rte_flow_error_set(error, EINVAL,
1727 					  RTE_FLOW_ERROR_TYPE_ACTION,
1728 					  action,
1729 					  "configuration cannot be null");
1730 	if (mark->id >= MLX5_FLOW_MARK_MAX)
1731 		return rte_flow_error_set(error, EINVAL,
1732 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1733 					  &mark->id,
1734 					  "mark id must in 0 <= id < "
1735 					  RTE_STR(MLX5_FLOW_MARK_MAX));
1736 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1737 		return rte_flow_error_set(error, EINVAL,
1738 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1739 					  "can't flag and mark in same flow");
1740 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1741 		return rte_flow_error_set(error, EINVAL,
1742 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1743 					  "can't have 2 mark actions in same"
1744 					  " flow");
1745 	if (attr->egress)
1746 		return rte_flow_error_set(error, ENOTSUP,
1747 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1748 					  "mark action not supported for "
1749 					  "egress");
1750 	return 0;
1751 }
1752 
1753 /*
1754  * Validate the drop action.
1755  *
1756  * @param[in] action_flags
1757  *   Bit-fields that holds the actions detected until now.
1758  * @param[in] attr
1759  *   Attributes of flow that includes this action.
1760  * @param[out] error
1761  *   Pointer to error structure.
1762  *
1763  * @return
1764  *   0 on success, a negative errno value otherwise and rte_errno is set.
1765  */
1766 int
1767 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1768 			       const struct rte_flow_attr *attr,
1769 			       struct rte_flow_error *error)
1770 {
1771 	if (attr->egress)
1772 		return rte_flow_error_set(error, ENOTSUP,
1773 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1774 					  "drop action not supported for "
1775 					  "egress");
1776 	return 0;
1777 }
1778 
1779 /*
1780  * Validate the queue action.
1781  *
1782  * @param[in] action
1783  *   Pointer to the queue action.
1784  * @param[in] action_flags
1785  *   Bit-fields that holds the actions detected until now.
1786  * @param[in] dev
1787  *   Pointer to the Ethernet device structure.
1788  * @param[in] attr
1789  *   Attributes of flow that includes this action.
1790  * @param[out] error
1791  *   Pointer to error structure.
1792  *
1793  * @return
1794  *   0 on success, a negative errno value otherwise and rte_errno is set.
1795  */
1796 int
1797 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1798 				uint64_t action_flags,
1799 				struct rte_eth_dev *dev,
1800 				const struct rte_flow_attr *attr,
1801 				struct rte_flow_error *error)
1802 {
1803 	struct mlx5_priv *priv = dev->data->dev_private;
1804 	const struct rte_flow_action_queue *queue = action->conf;
1805 
1806 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1807 		return rte_flow_error_set(error, EINVAL,
1808 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1809 					  "can't have 2 fate actions in"
1810 					  " same flow");
1811 	if (attr->egress)
1812 		return rte_flow_error_set(error, ENOTSUP,
1813 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1814 					  "queue action not supported for egress.");
1815 	if (mlx5_is_external_rxq(dev, queue->index))
1816 		return 0;
1817 	if (!priv->rxqs_n)
1818 		return rte_flow_error_set(error, EINVAL,
1819 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1820 					  NULL, "No Rx queues configured");
1821 	if (queue->index >= priv->rxqs_n)
1822 		return rte_flow_error_set(error, EINVAL,
1823 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1824 					  &queue->index,
1825 					  "queue index out of range");
1826 	if (mlx5_rxq_get(dev, queue->index) == NULL)
1827 		return rte_flow_error_set(error, EINVAL,
1828 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1829 					  &queue->index,
1830 					  "queue is not configured");
1831 	return 0;
1832 }
1833 
1834 /**
1835  * Validate queue numbers for device RSS.
1836  *
1837  * @param[in] dev
1838  *   Configured device.
1839  * @param[in] queues
1840  *   Array of queue numbers.
1841  * @param[in] queues_n
1842  *   Size of the @p queues array.
1843  * @param[out] error
1844  *   On error, filled with a textual error description.
1845  * @param[out] queue_idx
1846  *   On error, filled with an offending queue index in @p queues array.
1847  *
1848  * @return
1849  *   0 on success, a negative errno code on error.
1850  */
1851 static int
1852 mlx5_validate_rss_queues(struct rte_eth_dev *dev,
1853 			 const uint16_t *queues, uint32_t queues_n,
1854 			 const char **error, uint32_t *queue_idx)
1855 {
1856 	const struct mlx5_priv *priv = dev->data->dev_private;
1857 	bool is_hairpin = false;
1858 	bool is_ext_rss = false;
1859 	uint32_t i;
1860 
1861 	for (i = 0; i != queues_n; ++i) {
1862 		struct mlx5_rxq_ctrl *rxq_ctrl;
1863 
1864 		if (mlx5_is_external_rxq(dev, queues[0])) {
1865 			is_ext_rss = true;
1866 			continue;
1867 		}
1868 		if (is_ext_rss) {
1869 			*error = "Combining external and regular RSS queues is not supported";
1870 			*queue_idx = i;
1871 			return -ENOTSUP;
1872 		}
1873 		if (queues[i] >= priv->rxqs_n) {
1874 			*error = "queue index out of range";
1875 			*queue_idx = i;
1876 			return -EINVAL;
1877 		}
1878 		rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]);
1879 		if (rxq_ctrl == NULL) {
1880 			*error =  "queue is not configured";
1881 			*queue_idx = i;
1882 			return -EINVAL;
1883 		}
1884 		if (i == 0 && rxq_ctrl->is_hairpin)
1885 			is_hairpin = true;
1886 		if (is_hairpin != rxq_ctrl->is_hairpin) {
1887 			*error = "combining hairpin and regular RSS queues is not supported";
1888 			*queue_idx = i;
1889 			return -ENOTSUP;
1890 		}
1891 	}
1892 	return 0;
1893 }
1894 
1895 /*
1896  * Validate the rss action.
1897  *
1898  * @param[in] dev
1899  *   Pointer to the Ethernet device structure.
1900  * @param[in] action
1901  *   Pointer to the queue action.
1902  * @param[out] error
1903  *   Pointer to error structure.
1904  *
1905  * @return
1906  *   0 on success, a negative errno value otherwise and rte_errno is set.
1907  */
1908 int
1909 mlx5_validate_action_rss(struct rte_eth_dev *dev,
1910 			 const struct rte_flow_action *action,
1911 			 struct rte_flow_error *error)
1912 {
1913 	struct mlx5_priv *priv = dev->data->dev_private;
1914 	const struct rte_flow_action_rss *rss = action->conf;
1915 	int ret;
1916 	const char *message;
1917 	uint32_t queue_idx;
1918 
1919 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1920 	    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1921 		return rte_flow_error_set(error, ENOTSUP,
1922 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1923 					  &rss->func,
1924 					  "RSS hash function not supported");
1925 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1926 	if (rss->level > 2)
1927 #else
1928 	if (rss->level > 1)
1929 #endif
1930 		return rte_flow_error_set(error, ENOTSUP,
1931 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1932 					  &rss->level,
1933 					  "tunnel RSS is not supported");
1934 	/* allow RSS key_len 0 in case of NULL (default) RSS key. */
1935 	if (rss->key_len == 0 && rss->key != NULL)
1936 		return rte_flow_error_set(error, ENOTSUP,
1937 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1938 					  &rss->key_len,
1939 					  "RSS hash key length 0");
1940 	if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1941 		return rte_flow_error_set(error, ENOTSUP,
1942 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1943 					  &rss->key_len,
1944 					  "RSS hash key too small");
1945 	if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1946 		return rte_flow_error_set(error, ENOTSUP,
1947 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1948 					  &rss->key_len,
1949 					  "RSS hash key too large");
1950 	if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size)
1951 		return rte_flow_error_set(error, ENOTSUP,
1952 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1953 					  &rss->queue_num,
1954 					  "number of queues too large");
1955 	if (rss->types & MLX5_RSS_HF_MASK)
1956 		return rte_flow_error_set(error, ENOTSUP,
1957 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1958 					  &rss->types,
1959 					  "some RSS protocols are not"
1960 					  " supported");
1961 	if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
1962 	    !(rss->types & RTE_ETH_RSS_IP))
1963 		return rte_flow_error_set(error, EINVAL,
1964 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1965 					  "L3 partial RSS requested but L3 RSS"
1966 					  " type not specified");
1967 	if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
1968 	    !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
1969 		return rte_flow_error_set(error, EINVAL,
1970 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1971 					  "L4 partial RSS requested but L4 RSS"
1972 					  " type not specified");
1973 	if (!priv->rxqs_n && priv->ext_rxqs == NULL)
1974 		return rte_flow_error_set(error, EINVAL,
1975 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1976 					  NULL, "No Rx queues configured");
1977 	if (!rss->queue_num)
1978 		return rte_flow_error_set(error, EINVAL,
1979 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1980 					  NULL, "No queues configured");
1981 	ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,
1982 				       &message, &queue_idx);
1983 	if (ret != 0) {
1984 		return rte_flow_error_set(error, -ret,
1985 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1986 					  &rss->queue[queue_idx], message);
1987 	}
1988 	return 0;
1989 }
1990 
1991 /*
1992  * Validate the rss action.
1993  *
1994  * @param[in] action
1995  *   Pointer to the queue action.
1996  * @param[in] action_flags
1997  *   Bit-fields that holds the actions detected until now.
1998  * @param[in] dev
1999  *   Pointer to the Ethernet device structure.
2000  * @param[in] attr
2001  *   Attributes of flow that includes this action.
2002  * @param[in] item_flags
2003  *   Items that were detected.
2004  * @param[out] error
2005  *   Pointer to error structure.
2006  *
2007  * @return
2008  *   0 on success, a negative errno value otherwise and rte_errno is set.
2009  */
2010 int
2011 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
2012 			      uint64_t action_flags,
2013 			      struct rte_eth_dev *dev,
2014 			      const struct rte_flow_attr *attr,
2015 			      uint64_t item_flags,
2016 			      struct rte_flow_error *error)
2017 {
2018 	const struct rte_flow_action_rss *rss = action->conf;
2019 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2020 	int ret;
2021 
2022 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
2023 		return rte_flow_error_set(error, EINVAL,
2024 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2025 					  "can't have 2 fate actions"
2026 					  " in same flow");
2027 	ret = mlx5_validate_action_rss(dev, action, error);
2028 	if (ret)
2029 		return ret;
2030 	if (attr->egress)
2031 		return rte_flow_error_set(error, ENOTSUP,
2032 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2033 					  "rss action not supported for "
2034 					  "egress");
2035 	if (rss->level > 1 && !tunnel)
2036 		return rte_flow_error_set(error, EINVAL,
2037 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2038 					  "inner RSS is not supported for "
2039 					  "non-tunnel flows");
2040 	if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
2041 	    !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
2042 		return rte_flow_error_set(error, EINVAL,
2043 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2044 					  "RSS on eCPRI is not supported now");
2045 	}
2046 	if ((item_flags & MLX5_FLOW_LAYER_MPLS) &&
2047 	    !(item_flags &
2048 	      (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) &&
2049 	    rss->level > 1)
2050 		return rte_flow_error_set(error, EINVAL,
2051 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2052 					  "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern");
2053 	return 0;
2054 }
2055 
2056 /*
2057  * Validate the default miss action.
2058  *
2059  * @param[in] action_flags
2060  *   Bit-fields that holds the actions detected until now.
2061  * @param[out] error
2062  *   Pointer to error structure.
2063  *
2064  * @return
2065  *   0 on success, a negative errno value otherwise and rte_errno is set.
2066  */
2067 int
2068 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
2069 				const struct rte_flow_attr *attr,
2070 				struct rte_flow_error *error)
2071 {
2072 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
2073 		return rte_flow_error_set(error, EINVAL,
2074 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2075 					  "can't have 2 fate actions in"
2076 					  " same flow");
2077 	if (attr->egress)
2078 		return rte_flow_error_set(error, ENOTSUP,
2079 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2080 					  "default miss action not supported "
2081 					  "for egress");
2082 	if (attr->group)
2083 		return rte_flow_error_set(error, ENOTSUP,
2084 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2085 					  "only group 0 is supported");
2086 	if (attr->transfer)
2087 		return rte_flow_error_set(error, ENOTSUP,
2088 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2089 					  NULL, "transfer is not supported");
2090 	return 0;
2091 }
2092 
2093 /*
2094  * Validate the count action.
2095  *
2096  * @param[in] dev
2097  *   Pointer to the Ethernet device structure.
2098  * @param[in] attr
2099  *   Attributes of flow that includes this action.
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106 int
2107 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
2108 				const struct rte_flow_attr *attr,
2109 				struct rte_flow_error *error)
2110 {
2111 	if (attr->egress)
2112 		return rte_flow_error_set(error, ENOTSUP,
2113 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2114 					  "count action not supported for "
2115 					  "egress");
2116 	return 0;
2117 }
2118 
2119 /*
2120  * Validate the ASO CT action.
2121  *
2122  * @param[in] dev
2123  *   Pointer to the Ethernet device structure.
2124  * @param[in] conntrack
2125  *   Pointer to the CT action profile.
2126  * @param[out] error
2127  *   Pointer to error structure.
2128  *
2129  * @return
2130  *   0 on success, a negative errno value otherwise and rte_errno is set.
2131  */
2132 int
2133 mlx5_validate_action_ct(struct rte_eth_dev *dev,
2134 			const struct rte_flow_action_conntrack *conntrack,
2135 			struct rte_flow_error *error)
2136 {
2137 	RTE_SET_USED(dev);
2138 
2139 	if (conntrack->state > RTE_FLOW_CONNTRACK_STATE_TIME_WAIT)
2140 		return rte_flow_error_set(error, EINVAL,
2141 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2142 					  "Invalid CT state");
2143 	if (conntrack->last_index > RTE_FLOW_CONNTRACK_FLAG_RST)
2144 		return rte_flow_error_set(error, EINVAL,
2145 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2146 					  "Invalid last TCP packet flag");
2147 	return 0;
2148 }
2149 
2150 /**
2151  * Verify the @p attributes will be correctly understood by the NIC and store
2152  * them in the @p flow if everything is correct.
2153  *
2154  * @param[in] dev
2155  *   Pointer to the Ethernet device structure.
2156  * @param[in] attributes
2157  *   Pointer to flow attributes
2158  * @param[out] error
2159  *   Pointer to error structure.
2160  *
2161  * @return
2162  *   0 on success, a negative errno value otherwise and rte_errno is set.
2163  */
2164 int
2165 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
2166 			      const struct rte_flow_attr *attributes,
2167 			      struct rte_flow_error *error)
2168 {
2169 	struct mlx5_priv *priv = dev->data->dev_private;
2170 	uint32_t priority_max = priv->sh->flow_max_priority - 1;
2171 
2172 	if (attributes->group)
2173 		return rte_flow_error_set(error, ENOTSUP,
2174 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2175 					  NULL, "groups is not supported");
2176 	if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
2177 	    attributes->priority >= priority_max)
2178 		return rte_flow_error_set(error, ENOTSUP,
2179 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2180 					  NULL, "priority out of range");
2181 	if (attributes->egress)
2182 		return rte_flow_error_set(error, ENOTSUP,
2183 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2184 					  "egress is not supported");
2185 	if (attributes->transfer && !priv->sh->config.dv_esw_en)
2186 		return rte_flow_error_set(error, ENOTSUP,
2187 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2188 					  NULL, "transfer is not supported");
2189 	if (!attributes->ingress)
2190 		return rte_flow_error_set(error, EINVAL,
2191 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2192 					  NULL,
2193 					  "ingress attribute is mandatory");
2194 	return 0;
2195 }
2196 
2197 /**
2198  * Validate ICMP6 item.
2199  *
2200  * @param[in] item
2201  *   Item specification.
2202  * @param[in] item_flags
2203  *   Bit-fields that holds the items detected until now.
2204  * @param[in] ext_vlan_sup
2205  *   Whether extended VLAN features are supported or not.
2206  * @param[out] error
2207  *   Pointer to error structure.
2208  *
2209  * @return
2210  *   0 on success, a negative errno value otherwise and rte_errno is set.
2211  */
2212 int
2213 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
2214 			       uint64_t item_flags,
2215 			       uint8_t target_protocol,
2216 			       struct rte_flow_error *error)
2217 {
2218 	const struct rte_flow_item_icmp6 *mask = item->mask;
2219 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2220 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2221 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2222 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2223 				      MLX5_FLOW_LAYER_OUTER_L4;
2224 	int ret;
2225 
2226 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
2227 		return rte_flow_error_set(error, EINVAL,
2228 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2229 					  "protocol filtering not compatible"
2230 					  " with ICMP6 layer");
2231 	if (!(item_flags & l3m))
2232 		return rte_flow_error_set(error, EINVAL,
2233 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2234 					  "IPv6 is mandatory to filter on"
2235 					  " ICMP6");
2236 	if (item_flags & l4m)
2237 		return rte_flow_error_set(error, EINVAL,
2238 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2239 					  "multiple L4 layers not supported");
2240 	if (!mask)
2241 		mask = &rte_flow_item_icmp6_mask;
2242 	ret = mlx5_flow_item_acceptable
2243 		(item, (const uint8_t *)mask,
2244 		 (const uint8_t *)&rte_flow_item_icmp6_mask,
2245 		 sizeof(struct rte_flow_item_icmp6),
2246 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2247 	if (ret < 0)
2248 		return ret;
2249 	return 0;
2250 }
2251 
2252 /**
2253  * Validate ICMP item.
2254  *
2255  * @param[in] item
2256  *   Item specification.
2257  * @param[in] item_flags
2258  *   Bit-fields that holds the items detected until now.
2259  * @param[out] error
2260  *   Pointer to error structure.
2261  *
2262  * @return
2263  *   0 on success, a negative errno value otherwise and rte_errno is set.
2264  */
2265 int
2266 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
2267 			     uint64_t item_flags,
2268 			     uint8_t target_protocol,
2269 			     struct rte_flow_error *error)
2270 {
2271 	const struct rte_flow_item_icmp *mask = item->mask;
2272 	const struct rte_flow_item_icmp nic_mask = {
2273 		.hdr.icmp_type = 0xff,
2274 		.hdr.icmp_code = 0xff,
2275 		.hdr.icmp_ident = RTE_BE16(0xffff),
2276 		.hdr.icmp_seq_nb = RTE_BE16(0xffff),
2277 	};
2278 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2279 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2280 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2281 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2282 				      MLX5_FLOW_LAYER_OUTER_L4;
2283 	int ret;
2284 
2285 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
2286 		return rte_flow_error_set(error, EINVAL,
2287 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2288 					  "protocol filtering not compatible"
2289 					  " with ICMP layer");
2290 	if (!(item_flags & l3m))
2291 		return rte_flow_error_set(error, EINVAL,
2292 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2293 					  "IPv4 is mandatory to filter"
2294 					  " on ICMP");
2295 	if (item_flags & l4m)
2296 		return rte_flow_error_set(error, EINVAL,
2297 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2298 					  "multiple L4 layers not supported");
2299 	if (!mask)
2300 		mask = &nic_mask;
2301 	ret = mlx5_flow_item_acceptable
2302 		(item, (const uint8_t *)mask,
2303 		 (const uint8_t *)&nic_mask,
2304 		 sizeof(struct rte_flow_item_icmp),
2305 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2306 	if (ret < 0)
2307 		return ret;
2308 	return 0;
2309 }
2310 
2311 /**
2312  * Validate Ethernet item.
2313  *
2314  * @param[in] item
2315  *   Item specification.
2316  * @param[in] item_flags
2317  *   Bit-fields that holds the items detected until now.
2318  * @param[out] error
2319  *   Pointer to error structure.
2320  *
2321  * @return
2322  *   0 on success, a negative errno value otherwise and rte_errno is set.
2323  */
2324 int
2325 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
2326 			    uint64_t item_flags, bool ext_vlan_sup,
2327 			    struct rte_flow_error *error)
2328 {
2329 	const struct rte_flow_item_eth *mask = item->mask;
2330 	const struct rte_flow_item_eth nic_mask = {
2331 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2332 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2333 		.type = RTE_BE16(0xffff),
2334 		.has_vlan = ext_vlan_sup ? 1 : 0,
2335 	};
2336 	int ret;
2337 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2338 	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
2339 				       MLX5_FLOW_LAYER_OUTER_L2;
2340 
2341 	if (item_flags & ethm)
2342 		return rte_flow_error_set(error, ENOTSUP,
2343 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2344 					  "multiple L2 layers not supported");
2345 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
2346 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
2347 		return rte_flow_error_set(error, EINVAL,
2348 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2349 					  "L2 layer should not follow "
2350 					  "L3 layers");
2351 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
2352 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
2353 		return rte_flow_error_set(error, EINVAL,
2354 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2355 					  "L2 layer should not follow VLAN");
2356 	if (item_flags & MLX5_FLOW_LAYER_GTP)
2357 		return rte_flow_error_set(error, EINVAL,
2358 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2359 					  "L2 layer should not follow GTP");
2360 	if (!mask)
2361 		mask = &rte_flow_item_eth_mask;
2362 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2363 					(const uint8_t *)&nic_mask,
2364 					sizeof(struct rte_flow_item_eth),
2365 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2366 	return ret;
2367 }
2368 
2369 /**
2370  * Validate VLAN item.
2371  *
2372  * @param[in] item
2373  *   Item specification.
2374  * @param[in] item_flags
2375  *   Bit-fields that holds the items detected until now.
2376  * @param[in] dev
2377  *   Ethernet device flow is being created on.
2378  * @param[out] error
2379  *   Pointer to error structure.
2380  *
2381  * @return
2382  *   0 on success, a negative errno value otherwise and rte_errno is set.
2383  */
2384 int
2385 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
2386 			     uint64_t item_flags,
2387 			     struct rte_eth_dev *dev,
2388 			     struct rte_flow_error *error)
2389 {
2390 	const struct rte_flow_item_vlan *spec = item->spec;
2391 	const struct rte_flow_item_vlan *mask = item->mask;
2392 	const struct rte_flow_item_vlan nic_mask = {
2393 		.tci = RTE_BE16(UINT16_MAX),
2394 		.inner_type = RTE_BE16(UINT16_MAX),
2395 	};
2396 	uint16_t vlan_tag = 0;
2397 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2398 	int ret;
2399 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2400 					MLX5_FLOW_LAYER_INNER_L4) :
2401 				       (MLX5_FLOW_LAYER_OUTER_L3 |
2402 					MLX5_FLOW_LAYER_OUTER_L4);
2403 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2404 					MLX5_FLOW_LAYER_OUTER_VLAN;
2405 
2406 	if (item_flags & vlanm)
2407 		return rte_flow_error_set(error, EINVAL,
2408 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2409 					  "multiple VLAN layers not supported");
2410 	else if ((item_flags & l34m) != 0)
2411 		return rte_flow_error_set(error, EINVAL,
2412 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2413 					  "VLAN cannot follow L3/L4 layer");
2414 	if (!mask)
2415 		mask = &rte_flow_item_vlan_mask;
2416 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2417 					(const uint8_t *)&nic_mask,
2418 					sizeof(struct rte_flow_item_vlan),
2419 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2420 	if (ret)
2421 		return ret;
2422 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2423 		struct mlx5_priv *priv = dev->data->dev_private;
2424 
2425 		if (priv->vmwa_context) {
2426 			/*
2427 			 * Non-NULL context means we have a virtual machine
2428 			 * and SR-IOV enabled, we have to create VLAN interface
2429 			 * to make hypervisor to setup E-Switch vport
2430 			 * context correctly. We avoid creating the multiple
2431 			 * VLAN interfaces, so we cannot support VLAN tag mask.
2432 			 */
2433 			return rte_flow_error_set(error, EINVAL,
2434 						  RTE_FLOW_ERROR_TYPE_ITEM,
2435 						  item,
2436 						  "VLAN tag mask is not"
2437 						  " supported in virtual"
2438 						  " environment");
2439 		}
2440 	}
2441 	if (spec) {
2442 		vlan_tag = spec->tci;
2443 		vlan_tag &= mask->tci;
2444 	}
2445 	/*
2446 	 * From verbs perspective an empty VLAN is equivalent
2447 	 * to a packet without VLAN layer.
2448 	 */
2449 	if (!vlan_tag)
2450 		return rte_flow_error_set(error, EINVAL,
2451 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2452 					  item->spec,
2453 					  "VLAN cannot be empty");
2454 	return 0;
2455 }
2456 
2457 /**
2458  * Validate IPV4 item.
2459  *
2460  * @param[in] item
2461  *   Item specification.
2462  * @param[in] item_flags
2463  *   Bit-fields that holds the items detected until now.
2464  * @param[in] last_item
2465  *   Previous validated item in the pattern items.
2466  * @param[in] ether_type
2467  *   Type in the ethernet layer header (including dot1q).
2468  * @param[in] acc_mask
2469  *   Acceptable mask, if NULL default internal default mask
2470  *   will be used to check whether item fields are supported.
2471  * @param[in] range_accepted
2472  *   True if range of values is accepted for specific fields, false otherwise.
2473  * @param[out] error
2474  *   Pointer to error structure.
2475  *
2476  * @return
2477  *   0 on success, a negative errno value otherwise and rte_errno is set.
2478  */
2479 int
2480 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2481 			     uint64_t item_flags,
2482 			     uint64_t last_item,
2483 			     uint16_t ether_type,
2484 			     const struct rte_flow_item_ipv4 *acc_mask,
2485 			     bool range_accepted,
2486 			     struct rte_flow_error *error)
2487 {
2488 	const struct rte_flow_item_ipv4 *mask = item->mask;
2489 	const struct rte_flow_item_ipv4 *spec = item->spec;
2490 	const struct rte_flow_item_ipv4 nic_mask = {
2491 		.hdr = {
2492 			.src_addr = RTE_BE32(0xffffffff),
2493 			.dst_addr = RTE_BE32(0xffffffff),
2494 			.type_of_service = 0xff,
2495 			.next_proto_id = 0xff,
2496 		},
2497 	};
2498 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2499 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2500 				      MLX5_FLOW_LAYER_OUTER_L3;
2501 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2502 				      MLX5_FLOW_LAYER_OUTER_L4;
2503 	int ret;
2504 	uint8_t next_proto = 0xFF;
2505 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2506 				  MLX5_FLOW_LAYER_OUTER_VLAN |
2507 				  MLX5_FLOW_LAYER_INNER_VLAN);
2508 
2509 	if ((last_item & l2_vlan) && ether_type &&
2510 	    ether_type != RTE_ETHER_TYPE_IPV4)
2511 		return rte_flow_error_set(error, EINVAL,
2512 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2513 					  "IPv4 cannot follow L2/VLAN layer "
2514 					  "which ether type is not IPv4");
2515 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
2516 		if (mask && spec)
2517 			next_proto = mask->hdr.next_proto_id &
2518 				     spec->hdr.next_proto_id;
2519 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2520 			return rte_flow_error_set(error, EINVAL,
2521 						  RTE_FLOW_ERROR_TYPE_ITEM,
2522 						  item,
2523 						  "multiple tunnel "
2524 						  "not supported");
2525 	}
2526 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
2527 		return rte_flow_error_set(error, EINVAL,
2528 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2529 					  "wrong tunnel type - IPv6 specified "
2530 					  "but IPv4 item provided");
2531 	if (item_flags & l3m)
2532 		return rte_flow_error_set(error, ENOTSUP,
2533 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2534 					  "multiple L3 layers not supported");
2535 	else if (item_flags & l4m)
2536 		return rte_flow_error_set(error, EINVAL,
2537 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2538 					  "L3 cannot follow an L4 layer.");
2539 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2540 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2541 		return rte_flow_error_set(error, EINVAL,
2542 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2543 					  "L3 cannot follow an NVGRE layer.");
2544 	if (!mask)
2545 		mask = &rte_flow_item_ipv4_mask;
2546 	else if (mask->hdr.next_proto_id != 0 &&
2547 		 mask->hdr.next_proto_id != 0xff)
2548 		return rte_flow_error_set(error, EINVAL,
2549 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2550 					  "partial mask is not supported"
2551 					  " for protocol");
2552 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2553 					acc_mask ? (const uint8_t *)acc_mask
2554 						 : (const uint8_t *)&nic_mask,
2555 					sizeof(struct rte_flow_item_ipv4),
2556 					range_accepted, error);
2557 	if (ret < 0)
2558 		return ret;
2559 	return 0;
2560 }
2561 
2562 /**
2563  * Validate IPV6 item.
2564  *
2565  * @param[in] item
2566  *   Item specification.
2567  * @param[in] item_flags
2568  *   Bit-fields that holds the items detected until now.
2569  * @param[in] last_item
2570  *   Previous validated item in the pattern items.
2571  * @param[in] ether_type
2572  *   Type in the ethernet layer header (including dot1q).
2573  * @param[in] acc_mask
2574  *   Acceptable mask, if NULL default internal default mask
2575  *   will be used to check whether item fields are supported.
2576  * @param[out] error
2577  *   Pointer to error structure.
2578  *
2579  * @return
2580  *   0 on success, a negative errno value otherwise and rte_errno is set.
2581  */
2582 int
2583 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2584 			     uint64_t item_flags,
2585 			     uint64_t last_item,
2586 			     uint16_t ether_type,
2587 			     const struct rte_flow_item_ipv6 *acc_mask,
2588 			     struct rte_flow_error *error)
2589 {
2590 	const struct rte_flow_item_ipv6 *mask = item->mask;
2591 	const struct rte_flow_item_ipv6 *spec = item->spec;
2592 	const struct rte_flow_item_ipv6 nic_mask = {
2593 		.hdr = {
2594 			.src_addr =
2595 				"\xff\xff\xff\xff\xff\xff\xff\xff"
2596 				"\xff\xff\xff\xff\xff\xff\xff\xff",
2597 			.dst_addr =
2598 				"\xff\xff\xff\xff\xff\xff\xff\xff"
2599 				"\xff\xff\xff\xff\xff\xff\xff\xff",
2600 			.vtc_flow = RTE_BE32(0xffffffff),
2601 			.proto = 0xff,
2602 		},
2603 	};
2604 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2605 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2606 				      MLX5_FLOW_LAYER_OUTER_L3;
2607 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2608 				      MLX5_FLOW_LAYER_OUTER_L4;
2609 	int ret;
2610 	uint8_t next_proto = 0xFF;
2611 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2612 				  MLX5_FLOW_LAYER_OUTER_VLAN |
2613 				  MLX5_FLOW_LAYER_INNER_VLAN);
2614 
2615 	if ((last_item & l2_vlan) && ether_type &&
2616 	    ether_type != RTE_ETHER_TYPE_IPV6)
2617 		return rte_flow_error_set(error, EINVAL,
2618 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2619 					  "IPv6 cannot follow L2/VLAN layer "
2620 					  "which ether type is not IPv6");
2621 	if (mask && mask->hdr.proto == UINT8_MAX && spec)
2622 		next_proto = spec->hdr.proto;
2623 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
2624 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2625 			return rte_flow_error_set(error, EINVAL,
2626 						  RTE_FLOW_ERROR_TYPE_ITEM,
2627 						  item,
2628 						  "multiple tunnel "
2629 						  "not supported");
2630 	}
2631 	if (next_proto == IPPROTO_HOPOPTS  ||
2632 	    next_proto == IPPROTO_ROUTING  ||
2633 	    next_proto == IPPROTO_FRAGMENT ||
2634 	    next_proto == IPPROTO_ESP	   ||
2635 	    next_proto == IPPROTO_AH	   ||
2636 	    next_proto == IPPROTO_DSTOPTS)
2637 		return rte_flow_error_set(error, EINVAL,
2638 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2639 					  "IPv6 proto (next header) should "
2640 					  "not be set as extension header");
2641 	if (item_flags & MLX5_FLOW_LAYER_IPIP)
2642 		return rte_flow_error_set(error, EINVAL,
2643 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2644 					  "wrong tunnel type - IPv4 specified "
2645 					  "but IPv6 item provided");
2646 	if (item_flags & l3m)
2647 		return rte_flow_error_set(error, ENOTSUP,
2648 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2649 					  "multiple L3 layers not supported");
2650 	else if (item_flags & l4m)
2651 		return rte_flow_error_set(error, EINVAL,
2652 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2653 					  "L3 cannot follow an L4 layer.");
2654 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2655 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2656 		return rte_flow_error_set(error, EINVAL,
2657 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2658 					  "L3 cannot follow an NVGRE layer.");
2659 	if (!mask)
2660 		mask = &rte_flow_item_ipv6_mask;
2661 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2662 					acc_mask ? (const uint8_t *)acc_mask
2663 						 : (const uint8_t *)&nic_mask,
2664 					sizeof(struct rte_flow_item_ipv6),
2665 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2666 	if (ret < 0)
2667 		return ret;
2668 	return 0;
2669 }
2670 
2671 /**
2672  * Validate UDP item.
2673  *
2674  * @param[in] item
2675  *   Item specification.
2676  * @param[in] item_flags
2677  *   Bit-fields that holds the items detected until now.
2678  * @param[in] target_protocol
2679  *   The next protocol in the previous item.
2680  * @param[in] flow_mask
2681  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2682  * @param[out] error
2683  *   Pointer to error structure.
2684  *
2685  * @return
2686  *   0 on success, a negative errno value otherwise and rte_errno is set.
2687  */
2688 int
2689 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2690 			    uint64_t item_flags,
2691 			    uint8_t target_protocol,
2692 			    struct rte_flow_error *error)
2693 {
2694 	const struct rte_flow_item_udp *mask = item->mask;
2695 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2696 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2697 				      MLX5_FLOW_LAYER_OUTER_L3;
2698 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2699 				      MLX5_FLOW_LAYER_OUTER_L4;
2700 	int ret;
2701 
2702 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2703 		return rte_flow_error_set(error, EINVAL,
2704 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2705 					  "protocol filtering not compatible"
2706 					  " with UDP layer");
2707 	if (!(item_flags & l3m))
2708 		return rte_flow_error_set(error, EINVAL,
2709 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2710 					  "L3 is mandatory to filter on L4");
2711 	if (item_flags & l4m)
2712 		return rte_flow_error_set(error, EINVAL,
2713 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2714 					  "multiple L4 layers not supported");
2715 	if (!mask)
2716 		mask = &rte_flow_item_udp_mask;
2717 	ret = mlx5_flow_item_acceptable
2718 		(item, (const uint8_t *)mask,
2719 		 (const uint8_t *)&rte_flow_item_udp_mask,
2720 		 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2721 		 error);
2722 	if (ret < 0)
2723 		return ret;
2724 	return 0;
2725 }
2726 
2727 /**
2728  * Validate TCP item.
2729  *
2730  * @param[in] item
2731  *   Item specification.
2732  * @param[in] item_flags
2733  *   Bit-fields that holds the items detected until now.
2734  * @param[in] target_protocol
2735  *   The next protocol in the previous item.
2736  * @param[out] error
2737  *   Pointer to error structure.
2738  *
2739  * @return
2740  *   0 on success, a negative errno value otherwise and rte_errno is set.
2741  */
2742 int
2743 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2744 			    uint64_t item_flags,
2745 			    uint8_t target_protocol,
2746 			    const struct rte_flow_item_tcp *flow_mask,
2747 			    struct rte_flow_error *error)
2748 {
2749 	const struct rte_flow_item_tcp *mask = item->mask;
2750 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2751 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2752 				      MLX5_FLOW_LAYER_OUTER_L3;
2753 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2754 				      MLX5_FLOW_LAYER_OUTER_L4;
2755 	int ret;
2756 
2757 	MLX5_ASSERT(flow_mask);
2758 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
2759 		return rte_flow_error_set(error, EINVAL,
2760 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2761 					  "protocol filtering not compatible"
2762 					  " with TCP layer");
2763 	if (!(item_flags & l3m))
2764 		return rte_flow_error_set(error, EINVAL,
2765 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2766 					  "L3 is mandatory to filter on L4");
2767 	if (item_flags & l4m)
2768 		return rte_flow_error_set(error, EINVAL,
2769 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2770 					  "multiple L4 layers not supported");
2771 	if (!mask)
2772 		mask = &rte_flow_item_tcp_mask;
2773 	ret = mlx5_flow_item_acceptable
2774 		(item, (const uint8_t *)mask,
2775 		 (const uint8_t *)flow_mask,
2776 		 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2777 		 error);
2778 	if (ret < 0)
2779 		return ret;
2780 	return 0;
2781 }
2782 
2783 /**
2784  * Validate VXLAN item.
2785  *
2786  * @param[in] dev
2787  *   Pointer to the Ethernet device structure.
2788  * @param[in] udp_dport
2789  *   UDP destination port
2790  * @param[in] item
2791  *   Item specification.
2792  * @param[in] item_flags
2793  *   Bit-fields that holds the items detected until now.
2794  * @param[in] attr
2795  *   Flow rule attributes.
2796  * @param[out] error
2797  *   Pointer to error structure.
2798  *
2799  * @return
2800  *   0 on success, a negative errno value otherwise and rte_errno is set.
2801  */
2802 int
2803 mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
2804 			      uint16_t udp_dport,
2805 			      const struct rte_flow_item *item,
2806 			      uint64_t item_flags,
2807 			      const struct rte_flow_attr *attr,
2808 			      struct rte_flow_error *error)
2809 {
2810 	const struct rte_flow_item_vxlan *spec = item->spec;
2811 	const struct rte_flow_item_vxlan *mask = item->mask;
2812 	int ret;
2813 	struct mlx5_priv *priv = dev->data->dev_private;
2814 	union vni {
2815 		uint32_t vlan_id;
2816 		uint8_t vni[4];
2817 	} id = { .vlan_id = 0, };
2818 	const struct rte_flow_item_vxlan nic_mask = {
2819 		.vni = "\xff\xff\xff",
2820 		.rsvd1 = 0xff,
2821 	};
2822 	const struct rte_flow_item_vxlan *valid_mask;
2823 
2824 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2825 		return rte_flow_error_set(error, ENOTSUP,
2826 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2827 					  "multiple tunnel layers not"
2828 					  " supported");
2829 	valid_mask = &rte_flow_item_vxlan_mask;
2830 	/*
2831 	 * Verify only UDPv4 is present as defined in
2832 	 * https://tools.ietf.org/html/rfc7348
2833 	 */
2834 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2835 		return rte_flow_error_set(error, EINVAL,
2836 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2837 					  "no outer UDP layer found");
2838 	if (!mask)
2839 		mask = &rte_flow_item_vxlan_mask;
2840 
2841 	if (priv->sh->steering_format_version !=
2842 	    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
2843 	    !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
2844 		/* FDB domain & NIC domain non-zero group */
2845 		if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
2846 			valid_mask = &nic_mask;
2847 		/* Group zero in NIC domain */
2848 		if (!attr->group && !attr->transfer &&
2849 		    priv->sh->tunnel_header_0_1)
2850 			valid_mask = &nic_mask;
2851 	}
2852 	ret = mlx5_flow_item_acceptable
2853 		(item, (const uint8_t *)mask,
2854 		 (const uint8_t *)valid_mask,
2855 		 sizeof(struct rte_flow_item_vxlan),
2856 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2857 	if (ret < 0)
2858 		return ret;
2859 	if (spec) {
2860 		memcpy(&id.vni[1], spec->vni, 3);
2861 		memcpy(&id.vni[1], mask->vni, 3);
2862 	}
2863 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2864 		return rte_flow_error_set(error, ENOTSUP,
2865 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2866 					  "VXLAN tunnel must be fully defined");
2867 	return 0;
2868 }
2869 
2870 /**
2871  * Validate VXLAN_GPE item.
2872  *
2873  * @param[in] item
2874  *   Item specification.
2875  * @param[in] item_flags
2876  *   Bit-fields that holds the items detected until now.
2877  * @param[in] priv
2878  *   Pointer to the private data structure.
2879  * @param[in] target_protocol
2880  *   The next protocol in the previous item.
2881  * @param[out] error
2882  *   Pointer to error structure.
2883  *
2884  * @return
2885  *   0 on success, a negative errno value otherwise and rte_errno is set.
2886  */
2887 int
2888 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2889 				  uint64_t item_flags,
2890 				  struct rte_eth_dev *dev,
2891 				  struct rte_flow_error *error)
2892 {
2893 	struct mlx5_priv *priv = dev->data->dev_private;
2894 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
2895 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
2896 	int ret;
2897 	union vni {
2898 		uint32_t vlan_id;
2899 		uint8_t vni[4];
2900 	} id = { .vlan_id = 0, };
2901 
2902 	if (!priv->sh->config.l3_vxlan_en)
2903 		return rte_flow_error_set(error, ENOTSUP,
2904 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2905 					  "L3 VXLAN is not enabled by device"
2906 					  " parameter and/or not configured in"
2907 					  " firmware");
2908 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2909 		return rte_flow_error_set(error, ENOTSUP,
2910 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2911 					  "multiple tunnel layers not"
2912 					  " supported");
2913 	/*
2914 	 * Verify only UDPv4 is present as defined in
2915 	 * https://tools.ietf.org/html/rfc7348
2916 	 */
2917 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2918 		return rte_flow_error_set(error, EINVAL,
2919 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2920 					  "no outer UDP layer found");
2921 	if (!mask)
2922 		mask = &rte_flow_item_vxlan_gpe_mask;
2923 	ret = mlx5_flow_item_acceptable
2924 		(item, (const uint8_t *)mask,
2925 		 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
2926 		 sizeof(struct rte_flow_item_vxlan_gpe),
2927 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2928 	if (ret < 0)
2929 		return ret;
2930 	if (spec) {
2931 		if (spec->protocol)
2932 			return rte_flow_error_set(error, ENOTSUP,
2933 						  RTE_FLOW_ERROR_TYPE_ITEM,
2934 						  item,
2935 						  "VxLAN-GPE protocol"
2936 						  " not supported");
2937 		memcpy(&id.vni[1], spec->vni, 3);
2938 		memcpy(&id.vni[1], mask->vni, 3);
2939 	}
2940 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2941 		return rte_flow_error_set(error, ENOTSUP,
2942 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2943 					  "VXLAN-GPE tunnel must be fully"
2944 					  " defined");
2945 	return 0;
2946 }
2947 /**
2948  * Validate GRE Key item.
2949  *
2950  * @param[in] item
2951  *   Item specification.
2952  * @param[in] item_flags
2953  *   Bit flags to mark detected items.
2954  * @param[in] gre_item
2955  *   Pointer to gre_item
2956  * @param[out] error
2957  *   Pointer to error structure.
2958  *
2959  * @return
2960  *   0 on success, a negative errno value otherwise and rte_errno is set.
2961  */
2962 int
2963 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2964 				uint64_t item_flags,
2965 				const struct rte_flow_item *gre_item,
2966 				struct rte_flow_error *error)
2967 {
2968 	const rte_be32_t *mask = item->mask;
2969 	int ret = 0;
2970 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2971 	const struct rte_flow_item_gre *gre_spec;
2972 	const struct rte_flow_item_gre *gre_mask;
2973 
2974 	if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2975 		return rte_flow_error_set(error, ENOTSUP,
2976 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2977 					  "Multiple GRE key not support");
2978 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2979 		return rte_flow_error_set(error, ENOTSUP,
2980 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2981 					  "No preceding GRE header");
2982 	if (item_flags & MLX5_FLOW_LAYER_INNER)
2983 		return rte_flow_error_set(error, ENOTSUP,
2984 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2985 					  "GRE key following a wrong item");
2986 	gre_mask = gre_item->mask;
2987 	if (!gre_mask)
2988 		gre_mask = &rte_flow_item_gre_mask;
2989 	gre_spec = gre_item->spec;
2990 	if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2991 			 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2992 		return rte_flow_error_set(error, EINVAL,
2993 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2994 					  "Key bit must be on");
2995 
2996 	if (!mask)
2997 		mask = &gre_key_default_mask;
2998 	ret = mlx5_flow_item_acceptable
2999 		(item, (const uint8_t *)mask,
3000 		 (const uint8_t *)&gre_key_default_mask,
3001 		 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3002 	return ret;
3003 }
3004 
3005 /**
3006  * Validate GRE optional item.
3007  *
3008  * @param[in] dev
3009  *   Pointer to the Ethernet device structure.
3010  * @param[in] item
3011  *   Item specification.
3012  * @param[in] item_flags
3013  *   Bit flags to mark detected items.
3014  * @param[in] attr
3015  *   Flow rule attributes.
3016  * @param[in] gre_item
3017  *   Pointer to gre_item
3018  * @param[out] error
3019  *   Pointer to error structure.
3020  *
3021  * @return
3022  *   0 on success, a negative errno value otherwise and rte_errno is set.
3023  */
3024 int
3025 mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
3026 				   const struct rte_flow_item *item,
3027 				   uint64_t item_flags,
3028 				   const struct rte_flow_attr *attr,
3029 				   const struct rte_flow_item *gre_item,
3030 				   struct rte_flow_error *error)
3031 {
3032 	const struct rte_flow_item_gre *gre_spec = gre_item->spec;
3033 	const struct rte_flow_item_gre *gre_mask = gre_item->mask;
3034 	const struct rte_flow_item_gre_opt *spec = item->spec;
3035 	const struct rte_flow_item_gre_opt *mask = item->mask;
3036 	struct mlx5_priv *priv = dev->data->dev_private;
3037 	int ret = 0;
3038 	struct rte_flow_item_gre_opt nic_mask = {
3039 		.checksum_rsvd = {
3040 			.checksum = RTE_BE16(UINT16_MAX),
3041 			.reserved1 = 0x0,
3042 		},
3043 		.key = {
3044 			.key = RTE_BE32(UINT32_MAX),
3045 		},
3046 		.sequence = {
3047 			.sequence = RTE_BE32(UINT32_MAX),
3048 		},
3049 	};
3050 
3051 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
3052 		return rte_flow_error_set(error, ENOTSUP,
3053 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3054 					  "No preceding GRE header");
3055 	if (item_flags & MLX5_FLOW_LAYER_INNER)
3056 		return rte_flow_error_set(error, ENOTSUP,
3057 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3058 					  "GRE option following a wrong item");
3059 	if (!spec || !mask)
3060 		return rte_flow_error_set(error, EINVAL,
3061 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3062 					  "At least one field gre_option(checksum/key/sequence) must be specified");
3063 	if (!gre_mask)
3064 		gre_mask = &rte_flow_item_gre_mask;
3065 	if (mask->checksum_rsvd.checksum)
3066 		if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x8000)) &&
3067 				 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x8000)))
3068 			return rte_flow_error_set(error, EINVAL,
3069 						  RTE_FLOW_ERROR_TYPE_ITEM,
3070 						  item,
3071 						  "Checksum bit must be on");
3072 	if (mask->key.key)
3073 		if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
3074 				 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
3075 			return rte_flow_error_set(error, EINVAL,
3076 						  RTE_FLOW_ERROR_TYPE_ITEM,
3077 						  item, "Key bit must be on");
3078 	if (mask->sequence.sequence)
3079 		if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x1000)) &&
3080 				 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x1000)))
3081 			return rte_flow_error_set(error, EINVAL,
3082 						  RTE_FLOW_ERROR_TYPE_ITEM,
3083 						  item,
3084 						  "Sequence bit must be on");
3085 	if (mask->checksum_rsvd.checksum || mask->sequence.sequence) {
3086 		if (priv->sh->steering_format_version ==
3087 		    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
3088 		    ((attr->group || attr->transfer) &&
3089 		     !priv->sh->misc5_cap) ||
3090 		    (!(priv->sh->tunnel_header_0_1 &&
3091 		       priv->sh->tunnel_header_2_3) &&
3092 		    !attr->group && !attr->transfer))
3093 			return rte_flow_error_set(error, EINVAL,
3094 						  RTE_FLOW_ERROR_TYPE_ITEM,
3095 						  item,
3096 						  "Checksum/Sequence not supported");
3097 	}
3098 	ret = mlx5_flow_item_acceptable
3099 		(item, (const uint8_t *)mask,
3100 		 (const uint8_t *)&nic_mask,
3101 		 sizeof(struct rte_flow_item_gre_opt),
3102 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3103 	return ret;
3104 }
3105 
3106 /**
3107  * Validate GRE item.
3108  *
3109  * @param[in] item
3110  *   Item specification.
3111  * @param[in] item_flags
3112  *   Bit flags to mark detected items.
3113  * @param[in] target_protocol
3114  *   The next protocol in the previous item.
3115  * @param[out] error
3116  *   Pointer to error structure.
3117  *
3118  * @return
3119  *   0 on success, a negative errno value otherwise and rte_errno is set.
3120  */
3121 int
3122 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
3123 			    uint64_t item_flags,
3124 			    uint8_t target_protocol,
3125 			    struct rte_flow_error *error)
3126 {
3127 	const struct rte_flow_item_gre *spec __rte_unused = item->spec;
3128 	const struct rte_flow_item_gre *mask = item->mask;
3129 	int ret;
3130 	const struct rte_flow_item_gre nic_mask = {
3131 		.c_rsvd0_ver = RTE_BE16(0xB000),
3132 		.protocol = RTE_BE16(UINT16_MAX),
3133 	};
3134 
3135 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
3136 		return rte_flow_error_set(error, EINVAL,
3137 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3138 					  "protocol filtering not compatible"
3139 					  " with this GRE layer");
3140 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3141 		return rte_flow_error_set(error, ENOTSUP,
3142 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3143 					  "multiple tunnel layers not"
3144 					  " supported");
3145 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
3146 		return rte_flow_error_set(error, ENOTSUP,
3147 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3148 					  "L3 Layer is missing");
3149 	if (!mask)
3150 		mask = &rte_flow_item_gre_mask;
3151 	ret = mlx5_flow_item_acceptable
3152 		(item, (const uint8_t *)mask,
3153 		 (const uint8_t *)&nic_mask,
3154 		 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
3155 		 error);
3156 	if (ret < 0)
3157 		return ret;
3158 #ifndef HAVE_MLX5DV_DR
3159 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
3160 	if (spec && (spec->protocol & mask->protocol))
3161 		return rte_flow_error_set(error, ENOTSUP,
3162 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3163 					  "without MPLS support the"
3164 					  " specification cannot be used for"
3165 					  " filtering");
3166 #endif
3167 #endif
3168 	return 0;
3169 }
3170 
3171 /**
3172  * Validate Geneve item.
3173  *
3174  * @param[in] item
3175  *   Item specification.
3176  * @param[in] itemFlags
3177  *   Bit-fields that holds the items detected until now.
3178  * @param[in] enPriv
3179  *   Pointer to the private data structure.
3180  * @param[out] error
3181  *   Pointer to error structure.
3182  *
3183  * @return
3184  *   0 on success, a negative errno value otherwise and rte_errno is set.
3185  */
3186 
3187 int
3188 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
3189 			       uint64_t item_flags,
3190 			       struct rte_eth_dev *dev,
3191 			       struct rte_flow_error *error)
3192 {
3193 	struct mlx5_priv *priv = dev->data->dev_private;
3194 	const struct rte_flow_item_geneve *spec = item->spec;
3195 	const struct rte_flow_item_geneve *mask = item->mask;
3196 	int ret;
3197 	uint16_t gbhdr;
3198 	uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
3199 			  MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
3200 	const struct rte_flow_item_geneve nic_mask = {
3201 		.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
3202 		.vni = "\xff\xff\xff",
3203 		.protocol = RTE_BE16(UINT16_MAX),
3204 	};
3205 
3206 	if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
3207 		return rte_flow_error_set(error, ENOTSUP,
3208 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3209 					  "L3 Geneve is not enabled by device"
3210 					  " parameter and/or not configured in"
3211 					  " firmware");
3212 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3213 		return rte_flow_error_set(error, ENOTSUP,
3214 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3215 					  "multiple tunnel layers not"
3216 					  " supported");
3217 	/*
3218 	 * Verify only UDPv4 is present as defined in
3219 	 * https://tools.ietf.org/html/rfc7348
3220 	 */
3221 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
3222 		return rte_flow_error_set(error, EINVAL,
3223 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3224 					  "no outer UDP layer found");
3225 	if (!mask)
3226 		mask = &rte_flow_item_geneve_mask;
3227 	ret = mlx5_flow_item_acceptable
3228 				  (item, (const uint8_t *)mask,
3229 				   (const uint8_t *)&nic_mask,
3230 				   sizeof(struct rte_flow_item_geneve),
3231 				   MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3232 	if (ret)
3233 		return ret;
3234 	if (spec) {
3235 		gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
3236 		if (MLX5_GENEVE_VER_VAL(gbhdr) ||
3237 		     MLX5_GENEVE_CRITO_VAL(gbhdr) ||
3238 		     MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
3239 			return rte_flow_error_set(error, ENOTSUP,
3240 						  RTE_FLOW_ERROR_TYPE_ITEM,
3241 						  item,
3242 						  "Geneve protocol unsupported"
3243 						  " fields are being used");
3244 		if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
3245 			return rte_flow_error_set
3246 					(error, ENOTSUP,
3247 					 RTE_FLOW_ERROR_TYPE_ITEM,
3248 					 item,
3249 					 "Unsupported Geneve options length");
3250 	}
3251 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
3252 		return rte_flow_error_set
3253 				    (error, ENOTSUP,
3254 				     RTE_FLOW_ERROR_TYPE_ITEM, item,
3255 				     "Geneve tunnel must be fully defined");
3256 	return 0;
3257 }
3258 
3259 /**
3260  * Validate Geneve TLV option item.
3261  *
3262  * @param[in] item
3263  *   Item specification.
3264  * @param[in] last_item
3265  *   Previous validated item in the pattern items.
3266  * @param[in] geneve_item
3267  *   Previous GENEVE item specification.
3268  * @param[in] dev
3269  *   Pointer to the rte_eth_dev structure.
3270  * @param[out] error
3271  *   Pointer to error structure.
3272  *
3273  * @return
3274  *   0 on success, a negative errno value otherwise and rte_errno is set.
3275  */
3276 int
3277 mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
3278 				   uint64_t last_item,
3279 				   const struct rte_flow_item *geneve_item,
3280 				   struct rte_eth_dev *dev,
3281 				   struct rte_flow_error *error)
3282 {
3283 	struct mlx5_priv *priv = dev->data->dev_private;
3284 	struct mlx5_dev_ctx_shared *sh = priv->sh;
3285 	struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
3286 	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
3287 	uint8_t data_max_supported =
3288 			hca_attr->max_geneve_tlv_option_data_len * 4;
3289 	const struct rte_flow_item_geneve *geneve_spec;
3290 	const struct rte_flow_item_geneve *geneve_mask;
3291 	const struct rte_flow_item_geneve_opt *spec = item->spec;
3292 	const struct rte_flow_item_geneve_opt *mask = item->mask;
3293 	unsigned int i;
3294 	unsigned int data_len;
3295 	uint8_t tlv_option_len;
3296 	uint16_t optlen_m, optlen_v;
3297 	const struct rte_flow_item_geneve_opt full_mask = {
3298 		.option_class = RTE_BE16(0xffff),
3299 		.option_type = 0xff,
3300 		.option_len = 0x1f,
3301 	};
3302 
3303 	if (!mask)
3304 		mask = &rte_flow_item_geneve_opt_mask;
3305 	if (!spec)
3306 		return rte_flow_error_set
3307 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3308 			"Geneve TLV opt class/type/length must be specified");
3309 	if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
3310 		return rte_flow_error_set
3311 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3312 			"Geneve TLV opt length exceeds the limit (31)");
3313 	/* Check if class type and length masks are full. */
3314 	if (full_mask.option_class != mask->option_class ||
3315 	    full_mask.option_type != mask->option_type ||
3316 	    full_mask.option_len != (mask->option_len & full_mask.option_len))
3317 		return rte_flow_error_set
3318 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3319 			"Geneve TLV opt class/type/length masks must be full");
3320 	/* Check if length is supported */
3321 	if ((uint32_t)spec->option_len >
3322 			hca_attr->max_geneve_tlv_option_data_len)
3323 		return rte_flow_error_set
3324 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3325 			"Geneve TLV opt length not supported");
3326 	if (hca_attr->max_geneve_tlv_options > 1)
3327 		DRV_LOG(DEBUG,
3328 			"max_geneve_tlv_options supports more than 1 option");
3329 	/* Check GENEVE item preceding. */
3330 	if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE))
3331 		return rte_flow_error_set
3332 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3333 			"Geneve opt item must be preceded with Geneve item");
3334 	geneve_spec = geneve_item->spec;
3335 	geneve_mask = geneve_item->mask ? geneve_item->mask :
3336 					  &rte_flow_item_geneve_mask;
3337 	/* Check if GENEVE TLV option size doesn't exceed option length */
3338 	if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 ||
3339 			    geneve_spec->ver_opt_len_o_c_rsvd0)) {
3340 		tlv_option_len = spec->option_len & mask->option_len;
3341 		optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0);
3342 		optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v);
3343 		optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0);
3344 		optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m);
3345 		if ((optlen_v & optlen_m) <= tlv_option_len)
3346 			return rte_flow_error_set
3347 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3348 				 "GENEVE TLV option length exceeds optlen");
3349 	}
3350 	/* Check if length is 0 or data is 0. */
3351 	if (spec->data == NULL || spec->option_len == 0)
3352 		return rte_flow_error_set
3353 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3354 			"Geneve TLV opt with zero data/length not supported");
3355 	/* Check not all data & mask are 0. */
3356 	data_len = spec->option_len * 4;
3357 	if (mask->data == NULL) {
3358 		for (i = 0; i < data_len; i++)
3359 			if (spec->data[i])
3360 				break;
3361 		if (i == data_len)
3362 			return rte_flow_error_set(error, ENOTSUP,
3363 				RTE_FLOW_ERROR_TYPE_ITEM, item,
3364 				"Can't match on Geneve option data 0");
3365 	} else {
3366 		for (i = 0; i < data_len; i++)
3367 			if (spec->data[i] & mask->data[i])
3368 				break;
3369 		if (i == data_len)
3370 			return rte_flow_error_set(error, ENOTSUP,
3371 				RTE_FLOW_ERROR_TYPE_ITEM, item,
3372 				"Can't match on Geneve option data and mask 0");
3373 		/* Check data mask supported. */
3374 		for (i = data_max_supported; i < data_len ; i++)
3375 			if (mask->data[i])
3376 				return rte_flow_error_set(error, ENOTSUP,
3377 					RTE_FLOW_ERROR_TYPE_ITEM, item,
3378 					"Data mask is of unsupported size");
3379 	}
3380 	/* Check GENEVE option is supported in NIC. */
3381 	if (!hca_attr->geneve_tlv_opt)
3382 		return rte_flow_error_set
3383 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3384 			"Geneve TLV opt not supported");
3385 	/* Check if we already have geneve option with different type/class. */
3386 	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
3387 	geneve_opt_resource = sh->geneve_tlv_option_resource;
3388 	if (geneve_opt_resource != NULL)
3389 		if (geneve_opt_resource->option_class != spec->option_class ||
3390 		    geneve_opt_resource->option_type != spec->option_type ||
3391 		    geneve_opt_resource->length != spec->option_len) {
3392 			rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
3393 			return rte_flow_error_set(error, ENOTSUP,
3394 				RTE_FLOW_ERROR_TYPE_ITEM, item,
3395 				"Only one Geneve TLV option supported");
3396 		}
3397 	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
3398 	return 0;
3399 }
3400 
3401 /**
3402  * Validate MPLS item.
3403  *
3404  * @param[in] dev
3405  *   Pointer to the rte_eth_dev structure.
3406  * @param[in] item
3407  *   Item specification.
3408  * @param[in] item_flags
3409  *   Bit-fields that holds the items detected until now.
3410  * @param[in] prev_layer
3411  *   The protocol layer indicated in previous item.
3412  * @param[out] error
3413  *   Pointer to error structure.
3414  *
3415  * @return
3416  *   0 on success, a negative errno value otherwise and rte_errno is set.
3417  */
3418 int
3419 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
3420 			     const struct rte_flow_item *item __rte_unused,
3421 			     uint64_t item_flags __rte_unused,
3422 			     uint64_t prev_layer __rte_unused,
3423 			     struct rte_flow_error *error)
3424 {
3425 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
3426 	const struct rte_flow_item_mpls *mask = item->mask;
3427 	struct mlx5_priv *priv = dev->data->dev_private;
3428 	int ret;
3429 
3430 	if (!priv->sh->dev_cap.mpls_en)
3431 		return rte_flow_error_set(error, ENOTSUP,
3432 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3433 					  "MPLS not supported or"
3434 					  " disabled in firmware"
3435 					  " configuration.");
3436 	/* MPLS over UDP, GRE is allowed */
3437 	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP |
3438 			    MLX5_FLOW_LAYER_GRE |
3439 			    MLX5_FLOW_LAYER_GRE_KEY)))
3440 		return rte_flow_error_set(error, EINVAL,
3441 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3442 					  "protocol filtering not compatible"
3443 					  " with MPLS layer");
3444 	/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
3445 	if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
3446 	    !(item_flags & MLX5_FLOW_LAYER_GRE))
3447 		return rte_flow_error_set(error, ENOTSUP,
3448 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3449 					  "multiple tunnel layers not"
3450 					  " supported");
3451 	if (!mask)
3452 		mask = &rte_flow_item_mpls_mask;
3453 	ret = mlx5_flow_item_acceptable
3454 		(item, (const uint8_t *)mask,
3455 		 (const uint8_t *)&rte_flow_item_mpls_mask,
3456 		 sizeof(struct rte_flow_item_mpls),
3457 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3458 	if (ret < 0)
3459 		return ret;
3460 	return 0;
3461 #else
3462 	return rte_flow_error_set(error, ENOTSUP,
3463 				  RTE_FLOW_ERROR_TYPE_ITEM, item,
3464 				  "MPLS is not supported by Verbs, please"
3465 				  " update.");
3466 #endif
3467 }
3468 
3469 /**
3470  * Validate NVGRE item.
3471  *
3472  * @param[in] item
3473  *   Item specification.
3474  * @param[in] item_flags
3475  *   Bit flags to mark detected items.
3476  * @param[in] target_protocol
3477  *   The next protocol in the previous item.
3478  * @param[out] error
3479  *   Pointer to error structure.
3480  *
3481  * @return
3482  *   0 on success, a negative errno value otherwise and rte_errno is set.
3483  */
3484 int
3485 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
3486 			      uint64_t item_flags,
3487 			      uint8_t target_protocol,
3488 			      struct rte_flow_error *error)
3489 {
3490 	const struct rte_flow_item_nvgre *mask = item->mask;
3491 	int ret;
3492 
3493 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
3494 		return rte_flow_error_set(error, EINVAL,
3495 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3496 					  "protocol filtering not compatible"
3497 					  " with this GRE layer");
3498 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3499 		return rte_flow_error_set(error, ENOTSUP,
3500 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3501 					  "multiple tunnel layers not"
3502 					  " supported");
3503 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
3504 		return rte_flow_error_set(error, ENOTSUP,
3505 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3506 					  "L3 Layer is missing");
3507 	if (!mask)
3508 		mask = &rte_flow_item_nvgre_mask;
3509 	ret = mlx5_flow_item_acceptable
3510 		(item, (const uint8_t *)mask,
3511 		 (const uint8_t *)&rte_flow_item_nvgre_mask,
3512 		 sizeof(struct rte_flow_item_nvgre),
3513 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3514 	if (ret < 0)
3515 		return ret;
3516 	return 0;
3517 }
3518 
3519 /**
3520  * Validate eCPRI item.
3521  *
3522  * @param[in] item
3523  *   Item specification.
3524  * @param[in] item_flags
3525  *   Bit-fields that holds the items detected until now.
3526  * @param[in] last_item
3527  *   Previous validated item in the pattern items.
3528  * @param[in] ether_type
3529  *   Type in the ethernet layer header (including dot1q).
3530  * @param[in] acc_mask
3531  *   Acceptable mask, if NULL default internal default mask
3532  *   will be used to check whether item fields are supported.
3533  * @param[out] error
3534  *   Pointer to error structure.
3535  *
3536  * @return
3537  *   0 on success, a negative errno value otherwise and rte_errno is set.
3538  */
3539 int
3540 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
3541 			      uint64_t item_flags,
3542 			      uint64_t last_item,
3543 			      uint16_t ether_type,
3544 			      const struct rte_flow_item_ecpri *acc_mask,
3545 			      struct rte_flow_error *error)
3546 {
3547 	const struct rte_flow_item_ecpri *mask = item->mask;
3548 	const struct rte_flow_item_ecpri nic_mask = {
3549 		.hdr = {
3550 			.common = {
3551 				.u32 =
3552 				RTE_BE32(((const struct rte_ecpri_common_hdr) {
3553 					.type = 0xFF,
3554 					}).u32),
3555 			},
3556 			.dummy[0] = 0xFFFFFFFF,
3557 		},
3558 	};
3559 	const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
3560 					MLX5_FLOW_LAYER_OUTER_VLAN);
3561 	struct rte_flow_item_ecpri mask_lo;
3562 
3563 	if (!(last_item & outer_l2_vlan) &&
3564 	    last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
3565 		return rte_flow_error_set(error, EINVAL,
3566 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3567 					  "eCPRI can only follow L2/VLAN layer or UDP layer");
3568 	if ((last_item & outer_l2_vlan) && ether_type &&
3569 	    ether_type != RTE_ETHER_TYPE_ECPRI)
3570 		return rte_flow_error_set(error, EINVAL,
3571 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3572 					  "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
3573 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3574 		return rte_flow_error_set(error, EINVAL,
3575 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3576 					  "eCPRI with tunnel is not supported right now");
3577 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
3578 		return rte_flow_error_set(error, ENOTSUP,
3579 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3580 					  "multiple L3 layers not supported");
3581 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
3582 		return rte_flow_error_set(error, EINVAL,
3583 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3584 					  "eCPRI cannot coexist with a TCP layer");
3585 	/* In specification, eCPRI could be over UDP layer. */
3586 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
3587 		return rte_flow_error_set(error, EINVAL,
3588 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3589 					  "eCPRI over UDP layer is not yet supported right now");
3590 	/* Mask for type field in common header could be zero. */
3591 	if (!mask)
3592 		mask = &rte_flow_item_ecpri_mask;
3593 	mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
3594 	/* Input mask is in big-endian format. */
3595 	if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
3596 		return rte_flow_error_set(error, EINVAL,
3597 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
3598 					  "partial mask is not supported for protocol");
3599 	else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
3600 		return rte_flow_error_set(error, EINVAL,
3601 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
3602 					  "message header mask must be after a type mask");
3603 	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
3604 					 acc_mask ? (const uint8_t *)acc_mask
3605 						  : (const uint8_t *)&nic_mask,
3606 					 sizeof(struct rte_flow_item_ecpri),
3607 					 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3608 }
3609 
3610 static int
3611 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
3612 		   const struct rte_flow_attr *attr __rte_unused,
3613 		   const struct rte_flow_item items[] __rte_unused,
3614 		   const struct rte_flow_action actions[] __rte_unused,
3615 		   bool external __rte_unused,
3616 		   int hairpin __rte_unused,
3617 		   struct rte_flow_error *error)
3618 {
3619 	return rte_flow_error_set(error, ENOTSUP,
3620 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3621 }
3622 
3623 static struct mlx5_flow *
3624 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
3625 		  const struct rte_flow_attr *attr __rte_unused,
3626 		  const struct rte_flow_item items[] __rte_unused,
3627 		  const struct rte_flow_action actions[] __rte_unused,
3628 		  struct rte_flow_error *error)
3629 {
3630 	rte_flow_error_set(error, ENOTSUP,
3631 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3632 	return NULL;
3633 }
3634 
3635 static int
3636 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
3637 		    struct mlx5_flow *dev_flow __rte_unused,
3638 		    const struct rte_flow_attr *attr __rte_unused,
3639 		    const struct rte_flow_item items[] __rte_unused,
3640 		    const struct rte_flow_action actions[] __rte_unused,
3641 		    struct rte_flow_error *error)
3642 {
3643 	return rte_flow_error_set(error, ENOTSUP,
3644 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3645 }
3646 
3647 static int
3648 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
3649 		struct rte_flow *flow __rte_unused,
3650 		struct rte_flow_error *error)
3651 {
3652 	return rte_flow_error_set(error, ENOTSUP,
3653 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3654 }
3655 
3656 static void
3657 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
3658 		 struct rte_flow *flow __rte_unused)
3659 {
3660 }
3661 
3662 static void
3663 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
3664 		  struct rte_flow *flow __rte_unused)
3665 {
3666 }
3667 
3668 static int
3669 flow_null_query(struct rte_eth_dev *dev __rte_unused,
3670 		struct rte_flow *flow __rte_unused,
3671 		const struct rte_flow_action *actions __rte_unused,
3672 		void *data __rte_unused,
3673 		struct rte_flow_error *error)
3674 {
3675 	return rte_flow_error_set(error, ENOTSUP,
3676 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3677 }
3678 
3679 static int
3680 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
3681 		      uint32_t domains __rte_unused,
3682 		      uint32_t flags __rte_unused)
3683 {
3684 	return 0;
3685 }
3686 
3687 /* Void driver to protect from null pointer reference. */
3688 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
3689 	.validate = flow_null_validate,
3690 	.prepare = flow_null_prepare,
3691 	.translate = flow_null_translate,
3692 	.apply = flow_null_apply,
3693 	.remove = flow_null_remove,
3694 	.destroy = flow_null_destroy,
3695 	.query = flow_null_query,
3696 	.sync_domain = flow_null_sync_domain,
3697 };
3698 
3699 /**
3700  * Select flow driver type according to flow attributes and device
3701  * configuration.
3702  *
3703  * @param[in] dev
3704  *   Pointer to the dev structure.
3705  * @param[in] attr
3706  *   Pointer to the flow attributes.
3707  *
3708  * @return
3709  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
3710  */
3711 static enum mlx5_flow_drv_type
3712 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
3713 {
3714 	struct mlx5_priv *priv = dev->data->dev_private;
3715 	/* The OS can determine first a specific flow type (DV, VERBS) */
3716 	enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
3717 
3718 	if (type != MLX5_FLOW_TYPE_MAX)
3719 		return type;
3720 	/*
3721 	 * Currently when dv_flow_en == 2, only HW steering engine is
3722 	 * supported. New engines can also be chosen here if ready.
3723 	 */
3724 	if (priv->sh->config.dv_flow_en == 2)
3725 		return MLX5_FLOW_TYPE_HW;
3726 	/* If no OS specific type - continue with DV/VERBS selection */
3727 	if (attr->transfer && priv->sh->config.dv_esw_en)
3728 		type = MLX5_FLOW_TYPE_DV;
3729 	if (!attr->transfer)
3730 		type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
3731 						     MLX5_FLOW_TYPE_VERBS;
3732 	return type;
3733 }
3734 
3735 #define flow_get_drv_ops(type) flow_drv_ops[type]
3736 
3737 /**
3738  * Flow driver validation API. This abstracts calling driver specific functions.
3739  * The type of flow driver is determined according to flow attributes.
3740  *
3741  * @param[in] dev
3742  *   Pointer to the dev structure.
3743  * @param[in] attr
3744  *   Pointer to the flow attributes.
3745  * @param[in] items
3746  *   Pointer to the list of items.
3747  * @param[in] actions
3748  *   Pointer to the list of actions.
3749  * @param[in] external
3750  *   This flow rule is created by request external to PMD.
3751  * @param[in] hairpin
3752  *   Number of hairpin TX actions, 0 means classic flow.
3753  * @param[out] error
3754  *   Pointer to the error structure.
3755  *
3756  * @return
3757  *   0 on success, a negative errno value otherwise and rte_errno is set.
3758  */
3759 static inline int
3760 flow_drv_validate(struct rte_eth_dev *dev,
3761 		  const struct rte_flow_attr *attr,
3762 		  const struct rte_flow_item items[],
3763 		  const struct rte_flow_action actions[],
3764 		  bool external, int hairpin, struct rte_flow_error *error)
3765 {
3766 	const struct mlx5_flow_driver_ops *fops;
3767 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
3768 
3769 	fops = flow_get_drv_ops(type);
3770 	return fops->validate(dev, attr, items, actions, external,
3771 			      hairpin, error);
3772 }
3773 
3774 /**
3775  * Flow driver preparation API. This abstracts calling driver specific
3776  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3777  * calculates the size of memory required for device flow, allocates the memory,
3778  * initializes the device flow and returns the pointer.
3779  *
3780  * @note
3781  *   This function initializes device flow structure such as dv or verbs in
3782  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
3783  *   rest. For example, adding returning device flow to flow->dev_flow list and
3784  *   setting backward reference to the flow should be done out of this function.
3785  *   layers field is not filled either.
3786  *
3787  * @param[in] dev
3788  *   Pointer to the dev structure.
3789  * @param[in] attr
3790  *   Pointer to the flow attributes.
3791  * @param[in] items
3792  *   Pointer to the list of items.
3793  * @param[in] actions
3794  *   Pointer to the list of actions.
3795  * @param[in] flow_idx
3796  *   This memory pool index to the flow.
3797  * @param[out] error
3798  *   Pointer to the error structure.
3799  *
3800  * @return
3801  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
3802  */
3803 static inline struct mlx5_flow *
3804 flow_drv_prepare(struct rte_eth_dev *dev,
3805 		 const struct rte_flow *flow,
3806 		 const struct rte_flow_attr *attr,
3807 		 const struct rte_flow_item items[],
3808 		 const struct rte_flow_action actions[],
3809 		 uint32_t flow_idx,
3810 		 struct rte_flow_error *error)
3811 {
3812 	const struct mlx5_flow_driver_ops *fops;
3813 	enum mlx5_flow_drv_type type = flow->drv_type;
3814 	struct mlx5_flow *mlx5_flow = NULL;
3815 
3816 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3817 	fops = flow_get_drv_ops(type);
3818 	mlx5_flow = fops->prepare(dev, attr, items, actions, error);
3819 	if (mlx5_flow)
3820 		mlx5_flow->flow_idx = flow_idx;
3821 	return mlx5_flow;
3822 }
3823 
3824 /**
3825  * Flow driver translation API. This abstracts calling driver specific
3826  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3827  * translates a generic flow into a driver flow. flow_drv_prepare() must
3828  * precede.
3829  *
3830  * @note
3831  *   dev_flow->layers could be filled as a result of parsing during translation
3832  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
3833  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
3834  *   flow->actions could be overwritten even though all the expanded dev_flows
3835  *   have the same actions.
3836  *
3837  * @param[in] dev
3838  *   Pointer to the rte dev structure.
3839  * @param[in, out] dev_flow
3840  *   Pointer to the mlx5 flow.
3841  * @param[in] attr
3842  *   Pointer to the flow attributes.
3843  * @param[in] items
3844  *   Pointer to the list of items.
3845  * @param[in] actions
3846  *   Pointer to the list of actions.
3847  * @param[out] error
3848  *   Pointer to the error structure.
3849  *
3850  * @return
3851  *   0 on success, a negative errno value otherwise and rte_errno is set.
3852  */
3853 static inline int
3854 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3855 		   const struct rte_flow_attr *attr,
3856 		   const struct rte_flow_item items[],
3857 		   const struct rte_flow_action actions[],
3858 		   struct rte_flow_error *error)
3859 {
3860 	const struct mlx5_flow_driver_ops *fops;
3861 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
3862 
3863 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3864 	fops = flow_get_drv_ops(type);
3865 	return fops->translate(dev, dev_flow, attr, items, actions, error);
3866 }
3867 
3868 /**
3869  * Flow driver apply API. This abstracts calling driver specific functions.
3870  * Parent flow (rte_flow) should have driver type (drv_type). It applies
3871  * translated driver flows on to device. flow_drv_translate() must precede.
3872  *
3873  * @param[in] dev
3874  *   Pointer to Ethernet device structure.
3875  * @param[in, out] flow
3876  *   Pointer to flow structure.
3877  * @param[out] error
3878  *   Pointer to error structure.
3879  *
3880  * @return
3881  *   0 on success, a negative errno value otherwise and rte_errno is set.
3882  */
3883 static inline int
3884 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3885 	       struct rte_flow_error *error)
3886 {
3887 	const struct mlx5_flow_driver_ops *fops;
3888 	enum mlx5_flow_drv_type type = flow->drv_type;
3889 
3890 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3891 	fops = flow_get_drv_ops(type);
3892 	return fops->apply(dev, flow, error);
3893 }
3894 
3895 /**
3896  * Flow driver destroy API. This abstracts calling driver specific functions.
3897  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3898  * on device and releases resources of the flow.
3899  *
3900  * @param[in] dev
3901  *   Pointer to Ethernet device.
3902  * @param[in, out] flow
3903  *   Pointer to flow structure.
3904  */
3905 static inline void
3906 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3907 {
3908 	const struct mlx5_flow_driver_ops *fops;
3909 	enum mlx5_flow_drv_type type = flow->drv_type;
3910 
3911 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3912 	fops = flow_get_drv_ops(type);
3913 	fops->destroy(dev, flow);
3914 }
3915 
3916 /**
3917  * Flow driver find RSS policy tbl API. This abstracts calling driver
3918  * specific functions. Parent flow (rte_flow) should have driver
3919  * type (drv_type). It will find the RSS policy table that has the rss_desc.
3920  *
3921  * @param[in] dev
3922  *   Pointer to Ethernet device.
3923  * @param[in, out] flow
3924  *   Pointer to flow structure.
3925  * @param[in] policy
3926  *   Pointer to meter policy table.
3927  * @param[in] rss_desc
3928  *   Pointer to rss_desc
3929  */
3930 static struct mlx5_flow_meter_sub_policy *
3931 flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
3932 		struct rte_flow *flow,
3933 		struct mlx5_flow_meter_policy *policy,
3934 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
3935 {
3936 	const struct mlx5_flow_driver_ops *fops;
3937 	enum mlx5_flow_drv_type type = flow->drv_type;
3938 
3939 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3940 	fops = flow_get_drv_ops(type);
3941 	return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc);
3942 }
3943 
3944 /**
3945  * Flow driver color tag rule API. This abstracts calling driver
3946  * specific functions. Parent flow (rte_flow) should have driver
3947  * type (drv_type). It will create the color tag rules in hierarchy meter.
3948  *
3949  * @param[in] dev
3950  *   Pointer to Ethernet device.
3951  * @param[in, out] flow
3952  *   Pointer to flow structure.
3953  * @param[in] fm
3954  *   Pointer to flow meter structure.
3955  * @param[in] src_port
3956  *   The src port this extra rule should use.
3957  * @param[in] item
3958  *   The src port id match item.
3959  * @param[out] error
3960  *   Pointer to error structure.
3961  */
3962 static int
3963 flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev,
3964 		struct rte_flow *flow,
3965 		struct mlx5_flow_meter_info *fm,
3966 		int32_t src_port,
3967 		const struct rte_flow_item *item,
3968 		struct rte_flow_error *error)
3969 {
3970 	const struct mlx5_flow_driver_ops *fops;
3971 	enum mlx5_flow_drv_type type = flow->drv_type;
3972 
3973 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3974 	fops = flow_get_drv_ops(type);
3975 	return fops->meter_hierarchy_rule_create(dev, fm,
3976 						src_port, item, error);
3977 }
3978 
3979 /**
3980  * Get RSS action from the action list.
3981  *
3982  * @param[in] dev
3983  *   Pointer to Ethernet device.
3984  * @param[in] actions
3985  *   Pointer to the list of actions.
3986  * @param[in] flow
3987  *   Parent flow structure pointer.
3988  *
3989  * @return
3990  *   Pointer to the RSS action if exist, else return NULL.
3991  */
3992 static const struct rte_flow_action_rss*
3993 flow_get_rss_action(struct rte_eth_dev *dev,
3994 		    const struct rte_flow_action actions[])
3995 {
3996 	struct mlx5_priv *priv = dev->data->dev_private;
3997 	const struct rte_flow_action_rss *rss = NULL;
3998 	struct mlx5_meter_policy_action_container *acg;
3999 	struct mlx5_meter_policy_action_container *acy;
4000 
4001 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4002 		switch (actions->type) {
4003 		case RTE_FLOW_ACTION_TYPE_RSS:
4004 			rss = actions->conf;
4005 			break;
4006 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
4007 		{
4008 			const struct rte_flow_action_sample *sample =
4009 								actions->conf;
4010 			const struct rte_flow_action *act = sample->actions;
4011 			for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++)
4012 				if (act->type == RTE_FLOW_ACTION_TYPE_RSS)
4013 					rss = act->conf;
4014 			break;
4015 		}
4016 		case RTE_FLOW_ACTION_TYPE_METER:
4017 		{
4018 			uint32_t mtr_idx;
4019 			struct mlx5_flow_meter_info *fm;
4020 			struct mlx5_flow_meter_policy *policy;
4021 			const struct rte_flow_action_meter *mtr = actions->conf;
4022 
4023 			fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx);
4024 			if (fm && !fm->def_policy) {
4025 				policy = mlx5_flow_meter_policy_find(dev,
4026 						fm->policy_id, NULL);
4027 				MLX5_ASSERT(policy);
4028 				if (policy->is_hierarchy) {
4029 					policy =
4030 				mlx5_flow_meter_hierarchy_get_final_policy(dev,
4031 									policy);
4032 					if (!policy)
4033 						return NULL;
4034 				}
4035 				if (policy->is_rss) {
4036 					acg =
4037 					&policy->act_cnt[RTE_COLOR_GREEN];
4038 					acy =
4039 					&policy->act_cnt[RTE_COLOR_YELLOW];
4040 					if (acg->fate_action ==
4041 					    MLX5_FLOW_FATE_SHARED_RSS)
4042 						rss = acg->rss->conf;
4043 					else if (acy->fate_action ==
4044 						 MLX5_FLOW_FATE_SHARED_RSS)
4045 						rss = acy->rss->conf;
4046 				}
4047 			}
4048 			break;
4049 		}
4050 		default:
4051 			break;
4052 		}
4053 	}
4054 	return rss;
4055 }
4056 
4057 /**
4058  * Get ASO age action by index.
4059  *
4060  * @param[in] dev
4061  *   Pointer to the Ethernet device structure.
4062  * @param[in] age_idx
4063  *   Index to the ASO age action.
4064  *
4065  * @return
4066  *   The specified ASO age action.
4067  */
4068 struct mlx5_aso_age_action*
4069 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
4070 {
4071 	uint16_t pool_idx = age_idx & UINT16_MAX;
4072 	uint16_t offset = (age_idx >> 16) & UINT16_MAX;
4073 	struct mlx5_priv *priv = dev->data->dev_private;
4074 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
4075 	struct mlx5_aso_age_pool *pool;
4076 
4077 	rte_rwlock_read_lock(&mng->resize_rwl);
4078 	pool = mng->pools[pool_idx];
4079 	rte_rwlock_read_unlock(&mng->resize_rwl);
4080 	return &pool->actions[offset - 1];
4081 }
4082 
4083 /* maps indirect action to translated direct in some actions array */
4084 struct mlx5_translated_action_handle {
4085 	struct rte_flow_action_handle *action; /**< Indirect action handle. */
4086 	int index; /**< Index in related array of rte_flow_action. */
4087 };
4088 
4089 /**
4090  * Translates actions of type RTE_FLOW_ACTION_TYPE_INDIRECT to related
4091  * direct action if translation possible.
4092  * This functionality used to run same execution path for both direct and
4093  * indirect actions on flow create. All necessary preparations for indirect
4094  * action handling should be performed on *handle* actions list returned
4095  * from this call.
4096  *
4097  * @param[in] dev
4098  *   Pointer to Ethernet device.
4099  * @param[in] actions
4100  *   List of actions to translate.
4101  * @param[out] handle
4102  *   List to store translated indirect action object handles.
4103  * @param[in, out] indir_n
4104  *   Size of *handle* array. On return should be updated with number of
4105  *   indirect actions retrieved from the *actions* list.
4106  * @param[out] translated_actions
4107  *   List of actions where all indirect actions were translated to direct
4108  *   if possible. NULL if no translation took place.
4109  * @param[out] error
4110  *   Pointer to the error structure.
4111  *
4112  * @return
4113  *   0 on success, a negative errno value otherwise and rte_errno is set.
4114  */
4115 static int
4116 flow_action_handles_translate(struct rte_eth_dev *dev,
4117 			      const struct rte_flow_action actions[],
4118 			      struct mlx5_translated_action_handle *handle,
4119 			      int *indir_n,
4120 			      struct rte_flow_action **translated_actions,
4121 			      struct rte_flow_error *error)
4122 {
4123 	struct mlx5_priv *priv = dev->data->dev_private;
4124 	struct rte_flow_action *translated = NULL;
4125 	size_t actions_size;
4126 	int n;
4127 	int copied_n = 0;
4128 	struct mlx5_translated_action_handle *handle_end = NULL;
4129 
4130 	for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
4131 		if (actions[n].type != RTE_FLOW_ACTION_TYPE_INDIRECT)
4132 			continue;
4133 		if (copied_n == *indir_n) {
4134 			return rte_flow_error_set
4135 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
4136 				 NULL, "too many shared actions");
4137 		}
4138 		rte_memcpy(&handle[copied_n].action, &actions[n].conf,
4139 			   sizeof(actions[n].conf));
4140 		handle[copied_n].index = n;
4141 		copied_n++;
4142 	}
4143 	n++;
4144 	*indir_n = copied_n;
4145 	if (!copied_n)
4146 		return 0;
4147 	actions_size = sizeof(struct rte_flow_action) * n;
4148 	translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
4149 	if (!translated) {
4150 		rte_errno = ENOMEM;
4151 		return -ENOMEM;
4152 	}
4153 	memcpy(translated, actions, actions_size);
4154 	for (handle_end = handle + copied_n; handle < handle_end; handle++) {
4155 		struct mlx5_shared_action_rss *shared_rss;
4156 		uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
4157 		uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
4158 		uint32_t idx = act_idx &
4159 			       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
4160 
4161 		switch (type) {
4162 		case MLX5_INDIRECT_ACTION_TYPE_RSS:
4163 			shared_rss = mlx5_ipool_get
4164 			  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
4165 			translated[handle->index].type =
4166 				RTE_FLOW_ACTION_TYPE_RSS;
4167 			translated[handle->index].conf =
4168 				&shared_rss->origin;
4169 			break;
4170 		case MLX5_INDIRECT_ACTION_TYPE_COUNT:
4171 			translated[handle->index].type =
4172 						(enum rte_flow_action_type)
4173 						MLX5_RTE_FLOW_ACTION_TYPE_COUNT;
4174 			translated[handle->index].conf = (void *)(uintptr_t)idx;
4175 			break;
4176 		case MLX5_INDIRECT_ACTION_TYPE_AGE:
4177 			if (priv->sh->flow_hit_aso_en) {
4178 				translated[handle->index].type =
4179 					(enum rte_flow_action_type)
4180 					MLX5_RTE_FLOW_ACTION_TYPE_AGE;
4181 				translated[handle->index].conf =
4182 							 (void *)(uintptr_t)idx;
4183 				break;
4184 			}
4185 			/* Fall-through */
4186 		case MLX5_INDIRECT_ACTION_TYPE_CT:
4187 			if (priv->sh->ct_aso_en) {
4188 				translated[handle->index].type =
4189 					RTE_FLOW_ACTION_TYPE_CONNTRACK;
4190 				translated[handle->index].conf =
4191 							 (void *)(uintptr_t)idx;
4192 				break;
4193 			}
4194 			/* Fall-through */
4195 		default:
4196 			mlx5_free(translated);
4197 			return rte_flow_error_set
4198 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
4199 				 NULL, "invalid indirect action type");
4200 		}
4201 	}
4202 	*translated_actions = translated;
4203 	return 0;
4204 }
4205 
4206 /**
4207  * Get Shared RSS action from the action list.
4208  *
4209  * @param[in] dev
4210  *   Pointer to Ethernet device.
4211  * @param[in] shared
4212  *   Pointer to the list of actions.
4213  * @param[in] shared_n
4214  *   Actions list length.
4215  *
4216  * @return
4217  *   The MLX5 RSS action ID if exists, otherwise return 0.
4218  */
4219 static uint32_t
4220 flow_get_shared_rss_action(struct rte_eth_dev *dev,
4221 			   struct mlx5_translated_action_handle *handle,
4222 			   int shared_n)
4223 {
4224 	struct mlx5_translated_action_handle *handle_end;
4225 	struct mlx5_priv *priv = dev->data->dev_private;
4226 	struct mlx5_shared_action_rss *shared_rss;
4227 
4228 
4229 	for (handle_end = handle + shared_n; handle < handle_end; handle++) {
4230 		uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
4231 		uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
4232 		uint32_t idx = act_idx &
4233 			       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
4234 		switch (type) {
4235 		case MLX5_INDIRECT_ACTION_TYPE_RSS:
4236 			shared_rss = mlx5_ipool_get
4237 				(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
4238 									   idx);
4239 			__atomic_add_fetch(&shared_rss->refcnt, 1,
4240 					   __ATOMIC_RELAXED);
4241 			return idx;
4242 		default:
4243 			break;
4244 		}
4245 	}
4246 	return 0;
4247 }
4248 
4249 static unsigned int
4250 find_graph_root(uint32_t rss_level)
4251 {
4252 	return rss_level < 2 ? MLX5_EXPANSION_ROOT :
4253 			       MLX5_EXPANSION_ROOT_OUTER;
4254 }
4255 
4256 /**
4257  *  Get layer flags from the prefix flow.
4258  *
4259  *  Some flows may be split to several subflows, the prefix subflow gets the
4260  *  match items and the suffix sub flow gets the actions.
4261  *  Some actions need the user defined match item flags to get the detail for
4262  *  the action.
4263  *  This function helps the suffix flow to get the item layer flags from prefix
4264  *  subflow.
4265  *
4266  * @param[in] dev_flow
4267  *   Pointer the created prefix subflow.
4268  *
4269  * @return
4270  *   The layers get from prefix subflow.
4271  */
4272 static inline uint64_t
4273 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
4274 {
4275 	uint64_t layers = 0;
4276 
4277 	/*
4278 	 * Layers bits could be localization, but usually the compiler will
4279 	 * help to do the optimization work for source code.
4280 	 * If no decap actions, use the layers directly.
4281 	 */
4282 	if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
4283 		return dev_flow->handle->layers;
4284 	/* Convert L3 layers with decap action. */
4285 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
4286 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4287 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
4288 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4289 	/* Convert L4 layers with decap action.  */
4290 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
4291 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
4292 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
4293 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
4294 	return layers;
4295 }
4296 
4297 /**
4298  * Get metadata split action information.
4299  *
4300  * @param[in] actions
4301  *   Pointer to the list of actions.
4302  * @param[out] qrss
4303  *   Pointer to the return pointer.
4304  * @param[out] qrss_type
4305  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
4306  *   if no QUEUE/RSS is found.
4307  * @param[out] encap_idx
4308  *   Pointer to the index of the encap action if exists, otherwise the last
4309  *   action index.
4310  *
4311  * @return
4312  *   Total number of actions.
4313  */
4314 static int
4315 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
4316 				       const struct rte_flow_action **qrss,
4317 				       int *encap_idx)
4318 {
4319 	const struct rte_flow_action_raw_encap *raw_encap;
4320 	int actions_n = 0;
4321 	int raw_decap_idx = -1;
4322 
4323 	*encap_idx = -1;
4324 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4325 		switch (actions->type) {
4326 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4327 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4328 			*encap_idx = actions_n;
4329 			break;
4330 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4331 			raw_decap_idx = actions_n;
4332 			break;
4333 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4334 			raw_encap = actions->conf;
4335 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4336 				*encap_idx = raw_decap_idx != -1 ?
4337 						      raw_decap_idx : actions_n;
4338 			break;
4339 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4340 		case RTE_FLOW_ACTION_TYPE_RSS:
4341 			*qrss = actions;
4342 			break;
4343 		default:
4344 			break;
4345 		}
4346 		actions_n++;
4347 	}
4348 	if (*encap_idx == -1)
4349 		*encap_idx = actions_n;
4350 	/* Count RTE_FLOW_ACTION_TYPE_END. */
4351 	return actions_n + 1;
4352 }
4353 
4354 /**
4355  * Check if the action will change packet.
4356  *
4357  * @param dev
4358  *   Pointer to Ethernet device.
4359  * @param[in] type
4360  *   action type.
4361  *
4362  * @return
4363  *   true if action will change packet, false otherwise.
4364  */
4365 static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
4366 					  enum rte_flow_action_type type)
4367 {
4368 	struct mlx5_priv *priv = dev->data->dev_private;
4369 
4370 	switch (type) {
4371 	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4372 	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4373 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4374 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4375 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4376 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4377 	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4378 	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4379 	case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4380 	case RTE_FLOW_ACTION_TYPE_SET_TTL:
4381 	case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4382 	case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4383 	case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4384 	case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4385 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
4386 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
4387 	case RTE_FLOW_ACTION_TYPE_SET_META:
4388 	case RTE_FLOW_ACTION_TYPE_SET_TAG:
4389 	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4390 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4391 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4392 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4393 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4394 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4395 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4396 	case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4397 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4398 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4399 	case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
4400 		return true;
4401 	case RTE_FLOW_ACTION_TYPE_FLAG:
4402 	case RTE_FLOW_ACTION_TYPE_MARK:
4403 		if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4404 			return true;
4405 		else
4406 			return false;
4407 	default:
4408 		return false;
4409 	}
4410 }
4411 
4412 /**
4413  * Check meter action from the action list.
4414  *
4415  * @param dev
4416  *   Pointer to Ethernet device.
4417  * @param[in] actions
4418  *   Pointer to the list of actions.
4419  * @param[out] has_mtr
4420  *   Pointer to the meter exist flag.
4421  * @param[out] has_modify
4422  *   Pointer to the flag showing there's packet change action.
4423  * @param[out] meter_id
4424  *   Pointer to the meter id.
4425  *
4426  * @return
4427  *   Total number of actions.
4428  */
4429 static int
4430 flow_check_meter_action(struct rte_eth_dev *dev,
4431 			const struct rte_flow_action actions[],
4432 			bool *has_mtr, bool *has_modify, uint32_t *meter_id)
4433 {
4434 	const struct rte_flow_action_meter *mtr = NULL;
4435 	int actions_n = 0;
4436 
4437 	MLX5_ASSERT(has_mtr);
4438 	*has_mtr = false;
4439 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4440 		switch (actions->type) {
4441 		case RTE_FLOW_ACTION_TYPE_METER:
4442 			mtr = actions->conf;
4443 			*meter_id = mtr->mtr_id;
4444 			*has_mtr = true;
4445 			break;
4446 		default:
4447 			break;
4448 		}
4449 		if (!*has_mtr)
4450 			*has_modify |= flow_check_modify_action_type(dev,
4451 								actions->type);
4452 		actions_n++;
4453 	}
4454 	/* Count RTE_FLOW_ACTION_TYPE_END. */
4455 	return actions_n + 1;
4456 }
4457 
4458 /**
4459  * Check if the flow should be split due to hairpin.
4460  * The reason for the split is that in current HW we can't
4461  * support encap and push-vlan on Rx, so if a flow contains
4462  * these actions we move it to Tx.
4463  *
4464  * @param dev
4465  *   Pointer to Ethernet device.
4466  * @param[in] attr
4467  *   Flow rule attributes.
4468  * @param[in] actions
4469  *   Associated actions (list terminated by the END action).
4470  *
4471  * @return
4472  *   > 0 the number of actions and the flow should be split,
4473  *   0 when no split required.
4474  */
4475 static int
4476 flow_check_hairpin_split(struct rte_eth_dev *dev,
4477 			 const struct rte_flow_attr *attr,
4478 			 const struct rte_flow_action actions[])
4479 {
4480 	int queue_action = 0;
4481 	int action_n = 0;
4482 	int split = 0;
4483 	const struct rte_flow_action_queue *queue;
4484 	const struct rte_flow_action_rss *rss;
4485 	const struct rte_flow_action_raw_encap *raw_encap;
4486 	const struct rte_eth_hairpin_conf *conf;
4487 
4488 	if (!attr->ingress)
4489 		return 0;
4490 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4491 		switch (actions->type) {
4492 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4493 			queue = actions->conf;
4494 			if (queue == NULL)
4495 				return 0;
4496 			conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
4497 			if (conf == NULL || conf->tx_explicit != 0)
4498 				return 0;
4499 			queue_action = 1;
4500 			action_n++;
4501 			break;
4502 		case RTE_FLOW_ACTION_TYPE_RSS:
4503 			rss = actions->conf;
4504 			if (rss == NULL || rss->queue_num == 0)
4505 				return 0;
4506 			conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
4507 			if (conf == NULL || conf->tx_explicit != 0)
4508 				return 0;
4509 			queue_action = 1;
4510 			action_n++;
4511 			break;
4512 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4513 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4514 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4515 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4516 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4517 			split++;
4518 			action_n++;
4519 			break;
4520 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4521 			raw_encap = actions->conf;
4522 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4523 				split++;
4524 			action_n++;
4525 			break;
4526 		default:
4527 			action_n++;
4528 			break;
4529 		}
4530 	}
4531 	if (split && queue_action)
4532 		return action_n;
4533 	return 0;
4534 }
4535 
4536 /* Declare flow create/destroy prototype in advance. */
4537 static uint32_t
4538 flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
4539 		 const struct rte_flow_attr *attr,
4540 		 const struct rte_flow_item items[],
4541 		 const struct rte_flow_action actions[],
4542 		 bool external, struct rte_flow_error *error);
4543 
4544 static void
4545 flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
4546 		  uint32_t flow_idx);
4547 
4548 int
4549 flow_dv_mreg_match_cb(void *tool_ctx __rte_unused,
4550 		      struct mlx5_list_entry *entry, void *cb_ctx)
4551 {
4552 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4553 	struct mlx5_flow_mreg_copy_resource *mcp_res =
4554 			       container_of(entry, typeof(*mcp_res), hlist_ent);
4555 
4556 	return mcp_res->mark_id != *(uint32_t *)(ctx->data);
4557 }
4558 
4559 struct mlx5_list_entry *
4560 flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
4561 {
4562 	struct rte_eth_dev *dev = tool_ctx;
4563 	struct mlx5_priv *priv = dev->data->dev_private;
4564 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4565 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4566 	struct rte_flow_error *error = ctx->error;
4567 	uint32_t idx = 0;
4568 	int ret;
4569 	uint32_t mark_id = *(uint32_t *)(ctx->data);
4570 	struct rte_flow_attr attr = {
4571 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4572 		.ingress = 1,
4573 	};
4574 	struct mlx5_rte_flow_item_tag tag_spec = {
4575 		.data = mark_id,
4576 	};
4577 	struct rte_flow_item items[] = {
4578 		[1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
4579 	};
4580 	struct rte_flow_action_mark ftag = {
4581 		.id = mark_id,
4582 	};
4583 	struct mlx5_flow_action_copy_mreg cp_mreg = {
4584 		.dst = REG_B,
4585 		.src = REG_NON,
4586 	};
4587 	struct rte_flow_action_jump jump = {
4588 		.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
4589 	};
4590 	struct rte_flow_action actions[] = {
4591 		[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
4592 	};
4593 
4594 	/* Fill the register fields in the flow. */
4595 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
4596 	if (ret < 0)
4597 		return NULL;
4598 	tag_spec.id = ret;
4599 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4600 	if (ret < 0)
4601 		return NULL;
4602 	cp_mreg.src = ret;
4603 	/* Provide the full width of FLAG specific value. */
4604 	if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
4605 		tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
4606 	/* Build a new flow. */
4607 	if (mark_id != MLX5_DEFAULT_COPY_ID) {
4608 		items[0] = (struct rte_flow_item){
4609 			.type = (enum rte_flow_item_type)
4610 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4611 			.spec = &tag_spec,
4612 		};
4613 		items[1] = (struct rte_flow_item){
4614 			.type = RTE_FLOW_ITEM_TYPE_END,
4615 		};
4616 		actions[0] = (struct rte_flow_action){
4617 			.type = (enum rte_flow_action_type)
4618 				MLX5_RTE_FLOW_ACTION_TYPE_MARK,
4619 			.conf = &ftag,
4620 		};
4621 		actions[1] = (struct rte_flow_action){
4622 			.type = (enum rte_flow_action_type)
4623 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4624 			.conf = &cp_mreg,
4625 		};
4626 		actions[2] = (struct rte_flow_action){
4627 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
4628 			.conf = &jump,
4629 		};
4630 		actions[3] = (struct rte_flow_action){
4631 			.type = RTE_FLOW_ACTION_TYPE_END,
4632 		};
4633 	} else {
4634 		/* Default rule, wildcard match. */
4635 		attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
4636 		items[0] = (struct rte_flow_item){
4637 			.type = RTE_FLOW_ITEM_TYPE_END,
4638 		};
4639 		actions[0] = (struct rte_flow_action){
4640 			.type = (enum rte_flow_action_type)
4641 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4642 			.conf = &cp_mreg,
4643 		};
4644 		actions[1] = (struct rte_flow_action){
4645 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
4646 			.conf = &jump,
4647 		};
4648 		actions[2] = (struct rte_flow_action){
4649 			.type = RTE_FLOW_ACTION_TYPE_END,
4650 		};
4651 	}
4652 	/* Build a new entry. */
4653 	mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
4654 	if (!mcp_res) {
4655 		rte_errno = ENOMEM;
4656 		return NULL;
4657 	}
4658 	mcp_res->idx = idx;
4659 	mcp_res->mark_id = mark_id;
4660 	/*
4661 	 * The copy Flows are not included in any list. There
4662 	 * ones are referenced from other Flows and can not
4663 	 * be applied, removed, deleted in arbitrary order
4664 	 * by list traversing.
4665 	 */
4666 	mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
4667 					&attr, items, actions, false, error);
4668 	if (!mcp_res->rix_flow) {
4669 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
4670 		return NULL;
4671 	}
4672 	return &mcp_res->hlist_ent;
4673 }
4674 
4675 struct mlx5_list_entry *
4676 flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
4677 		      void *cb_ctx __rte_unused)
4678 {
4679 	struct rte_eth_dev *dev = tool_ctx;
4680 	struct mlx5_priv *priv = dev->data->dev_private;
4681 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4682 	uint32_t idx = 0;
4683 
4684 	mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
4685 	if (!mcp_res) {
4686 		rte_errno = ENOMEM;
4687 		return NULL;
4688 	}
4689 	memcpy(mcp_res, oentry, sizeof(*mcp_res));
4690 	mcp_res->idx = idx;
4691 	return &mcp_res->hlist_ent;
4692 }
4693 
4694 void
4695 flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
4696 {
4697 	struct mlx5_flow_mreg_copy_resource *mcp_res =
4698 			       container_of(entry, typeof(*mcp_res), hlist_ent);
4699 	struct rte_eth_dev *dev = tool_ctx;
4700 	struct mlx5_priv *priv = dev->data->dev_private;
4701 
4702 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
4703 }
4704 
4705 /**
4706  * Add a flow of copying flow metadata registers in RX_CP_TBL.
4707  *
4708  * As mark_id is unique, if there's already a registered flow for the mark_id,
4709  * return by increasing the reference counter of the resource. Otherwise, create
4710  * the resource (mcp_res) and flow.
4711  *
4712  * Flow looks like,
4713  *   - If ingress port is ANY and reg_c[1] is mark_id,
4714  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4715  *
4716  * For default flow (zero mark_id), flow is like,
4717  *   - If ingress port is ANY,
4718  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
4719  *
4720  * @param dev
4721  *   Pointer to Ethernet device.
4722  * @param mark_id
4723  *   ID of MARK action, zero means default flow for META.
4724  * @param[out] error
4725  *   Perform verbose error reporting if not NULL.
4726  *
4727  * @return
4728  *   Associated resource on success, NULL otherwise and rte_errno is set.
4729  */
4730 static struct mlx5_flow_mreg_copy_resource *
4731 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
4732 			  struct rte_flow_error *error)
4733 {
4734 	struct mlx5_priv *priv = dev->data->dev_private;
4735 	struct mlx5_list_entry *entry;
4736 	struct mlx5_flow_cb_ctx ctx = {
4737 		.dev = dev,
4738 		.error = error,
4739 		.data = &mark_id,
4740 	};
4741 
4742 	/* Check if already registered. */
4743 	MLX5_ASSERT(priv->mreg_cp_tbl);
4744 	entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
4745 	if (!entry)
4746 		return NULL;
4747 	return container_of(entry, struct mlx5_flow_mreg_copy_resource,
4748 			    hlist_ent);
4749 }
4750 
4751 void
4752 flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
4753 {
4754 	struct mlx5_flow_mreg_copy_resource *mcp_res =
4755 			       container_of(entry, typeof(*mcp_res), hlist_ent);
4756 	struct rte_eth_dev *dev = tool_ctx;
4757 	struct mlx5_priv *priv = dev->data->dev_private;
4758 
4759 	MLX5_ASSERT(mcp_res->rix_flow);
4760 	flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);
4761 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
4762 }
4763 
4764 /**
4765  * Release flow in RX_CP_TBL.
4766  *
4767  * @param dev
4768  *   Pointer to Ethernet device.
4769  * @flow
4770  *   Parent flow for wich copying is provided.
4771  */
4772 static void
4773 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
4774 			  struct rte_flow *flow)
4775 {
4776 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4777 	struct mlx5_priv *priv = dev->data->dev_private;
4778 
4779 	if (!flow->rix_mreg_copy)
4780 		return;
4781 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
4782 				 flow->rix_mreg_copy);
4783 	if (!mcp_res || !priv->mreg_cp_tbl)
4784 		return;
4785 	MLX5_ASSERT(mcp_res->rix_flow);
4786 	mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
4787 	flow->rix_mreg_copy = 0;
4788 }
4789 
4790 /**
4791  * Remove the default copy action from RX_CP_TBL.
4792  *
4793  * This functions is called in the mlx5_dev_start(). No thread safe
4794  * is guaranteed.
4795  *
4796  * @param dev
4797  *   Pointer to Ethernet device.
4798  */
4799 static void
4800 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
4801 {
4802 	struct mlx5_list_entry *entry;
4803 	struct mlx5_priv *priv = dev->data->dev_private;
4804 	struct mlx5_flow_cb_ctx ctx;
4805 	uint32_t mark_id;
4806 
4807 	/* Check if default flow is registered. */
4808 	if (!priv->mreg_cp_tbl)
4809 		return;
4810 	mark_id = MLX5_DEFAULT_COPY_ID;
4811 	ctx.data = &mark_id;
4812 	entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx);
4813 	if (!entry)
4814 		return;
4815 	mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
4816 }
4817 
4818 /**
4819  * Add the default copy action in in RX_CP_TBL.
4820  *
4821  * This functions is called in the mlx5_dev_start(). No thread safe
4822  * is guaranteed.
4823  *
4824  * @param dev
4825  *   Pointer to Ethernet device.
4826  * @param[out] error
4827  *   Perform verbose error reporting if not NULL.
4828  *
4829  * @return
4830  *   0 for success, negative value otherwise and rte_errno is set.
4831  */
4832 static int
4833 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
4834 				  struct rte_flow_error *error)
4835 {
4836 	struct mlx5_priv *priv = dev->data->dev_private;
4837 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4838 	struct mlx5_flow_cb_ctx ctx;
4839 	uint32_t mark_id;
4840 
4841 	/* Check whether extensive metadata feature is engaged. */
4842 	if (!priv->sh->config.dv_flow_en ||
4843 	    priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4844 	    !mlx5_flow_ext_mreg_supported(dev) ||
4845 	    !priv->sh->dv_regc0_mask)
4846 		return 0;
4847 	/*
4848 	 * Add default mreg copy flow may be called multiple time, but
4849 	 * only be called once in stop. Avoid register it twice.
4850 	 */
4851 	mark_id = MLX5_DEFAULT_COPY_ID;
4852 	ctx.data = &mark_id;
4853 	if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx))
4854 		return 0;
4855 	mcp_res = flow_mreg_add_copy_action(dev, mark_id, error);
4856 	if (!mcp_res)
4857 		return -rte_errno;
4858 	return 0;
4859 }
4860 
4861 /**
4862  * Add a flow of copying flow metadata registers in RX_CP_TBL.
4863  *
4864  * All the flow having Q/RSS action should be split by
4865  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
4866  * performs the following,
4867  *   - CQE->flow_tag := reg_c[1] (MARK)
4868  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4869  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
4870  * but there should be a flow per each MARK ID set by MARK action.
4871  *
4872  * For the aforementioned reason, if there's a MARK action in flow's action
4873  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
4874  * the MARK ID to CQE's flow_tag like,
4875  *   - If reg_c[1] is mark_id,
4876  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4877  *
4878  * For SET_META action which stores value in reg_c[0], as the destination is
4879  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
4880  * MARK ID means the default flow. The default flow looks like,
4881  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4882  *
4883  * @param dev
4884  *   Pointer to Ethernet device.
4885  * @param flow
4886  *   Pointer to flow structure.
4887  * @param[in] actions
4888  *   Pointer to the list of actions.
4889  * @param[out] error
4890  *   Perform verbose error reporting if not NULL.
4891  *
4892  * @return
4893  *   0 on success, negative value otherwise and rte_errno is set.
4894  */
4895 static int
4896 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
4897 			    struct rte_flow *flow,
4898 			    const struct rte_flow_action *actions,
4899 			    struct rte_flow_error *error)
4900 {
4901 	struct mlx5_priv *priv = dev->data->dev_private;
4902 	struct mlx5_sh_config *config = &priv->sh->config;
4903 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4904 	const struct rte_flow_action_mark *mark;
4905 
4906 	/* Check whether extensive metadata feature is engaged. */
4907 	if (!config->dv_flow_en ||
4908 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4909 	    !mlx5_flow_ext_mreg_supported(dev) ||
4910 	    !priv->sh->dv_regc0_mask)
4911 		return 0;
4912 	/* Find MARK action. */
4913 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4914 		switch (actions->type) {
4915 		case RTE_FLOW_ACTION_TYPE_FLAG:
4916 			mcp_res = flow_mreg_add_copy_action
4917 				(dev, MLX5_FLOW_MARK_DEFAULT, error);
4918 			if (!mcp_res)
4919 				return -rte_errno;
4920 			flow->rix_mreg_copy = mcp_res->idx;
4921 			return 0;
4922 		case RTE_FLOW_ACTION_TYPE_MARK:
4923 			mark = (const struct rte_flow_action_mark *)
4924 				actions->conf;
4925 			mcp_res =
4926 				flow_mreg_add_copy_action(dev, mark->id, error);
4927 			if (!mcp_res)
4928 				return -rte_errno;
4929 			flow->rix_mreg_copy = mcp_res->idx;
4930 			return 0;
4931 		default:
4932 			break;
4933 		}
4934 	}
4935 	return 0;
4936 }
4937 
4938 #define MLX5_MAX_SPLIT_ACTIONS 24
4939 #define MLX5_MAX_SPLIT_ITEMS 24
4940 
4941 /**
4942  * Split the hairpin flow.
4943  * Since HW can't support encap and push-vlan on Rx, we move these
4944  * actions to Tx.
4945  * If the count action is after the encap then we also
4946  * move the count action. in this case the count will also measure
4947  * the outer bytes.
4948  *
4949  * @param dev
4950  *   Pointer to Ethernet device.
4951  * @param[in] actions
4952  *   Associated actions (list terminated by the END action).
4953  * @param[out] actions_rx
4954  *   Rx flow actions.
4955  * @param[out] actions_tx
4956  *   Tx flow actions..
4957  * @param[out] pattern_tx
4958  *   The pattern items for the Tx flow.
4959  * @param[out] flow_id
4960  *   The flow ID connected to this flow.
4961  *
4962  * @return
4963  *   0 on success.
4964  */
4965 static int
4966 flow_hairpin_split(struct rte_eth_dev *dev,
4967 		   const struct rte_flow_action actions[],
4968 		   struct rte_flow_action actions_rx[],
4969 		   struct rte_flow_action actions_tx[],
4970 		   struct rte_flow_item pattern_tx[],
4971 		   uint32_t flow_id)
4972 {
4973 	const struct rte_flow_action_raw_encap *raw_encap;
4974 	const struct rte_flow_action_raw_decap *raw_decap;
4975 	struct mlx5_rte_flow_action_set_tag *set_tag;
4976 	struct rte_flow_action *tag_action;
4977 	struct mlx5_rte_flow_item_tag *tag_item;
4978 	struct rte_flow_item *item;
4979 	char *addr;
4980 	int encap = 0;
4981 
4982 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4983 		switch (actions->type) {
4984 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4985 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4986 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4987 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4988 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4989 			rte_memcpy(actions_tx, actions,
4990 			       sizeof(struct rte_flow_action));
4991 			actions_tx++;
4992 			break;
4993 		case RTE_FLOW_ACTION_TYPE_COUNT:
4994 			if (encap) {
4995 				rte_memcpy(actions_tx, actions,
4996 					   sizeof(struct rte_flow_action));
4997 				actions_tx++;
4998 			} else {
4999 				rte_memcpy(actions_rx, actions,
5000 					   sizeof(struct rte_flow_action));
5001 				actions_rx++;
5002 			}
5003 			break;
5004 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5005 			raw_encap = actions->conf;
5006 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
5007 				memcpy(actions_tx, actions,
5008 				       sizeof(struct rte_flow_action));
5009 				actions_tx++;
5010 				encap = 1;
5011 			} else {
5012 				rte_memcpy(actions_rx, actions,
5013 					   sizeof(struct rte_flow_action));
5014 				actions_rx++;
5015 			}
5016 			break;
5017 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5018 			raw_decap = actions->conf;
5019 			if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
5020 				memcpy(actions_tx, actions,
5021 				       sizeof(struct rte_flow_action));
5022 				actions_tx++;
5023 			} else {
5024 				rte_memcpy(actions_rx, actions,
5025 					   sizeof(struct rte_flow_action));
5026 				actions_rx++;
5027 			}
5028 			break;
5029 		default:
5030 			rte_memcpy(actions_rx, actions,
5031 				   sizeof(struct rte_flow_action));
5032 			actions_rx++;
5033 			break;
5034 		}
5035 	}
5036 	/* Add set meta action and end action for the Rx flow. */
5037 	tag_action = actions_rx;
5038 	tag_action->type = (enum rte_flow_action_type)
5039 			   MLX5_RTE_FLOW_ACTION_TYPE_TAG;
5040 	actions_rx++;
5041 	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
5042 	actions_rx++;
5043 	set_tag = (void *)actions_rx;
5044 	*set_tag = (struct mlx5_rte_flow_action_set_tag) {
5045 		.id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL),
5046 		.data = flow_id,
5047 	};
5048 	MLX5_ASSERT(set_tag->id > REG_NON);
5049 	tag_action->conf = set_tag;
5050 	/* Create Tx item list. */
5051 	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
5052 	addr = (void *)&pattern_tx[2];
5053 	item = pattern_tx;
5054 	item->type = (enum rte_flow_item_type)
5055 		     MLX5_RTE_FLOW_ITEM_TYPE_TAG;
5056 	tag_item = (void *)addr;
5057 	tag_item->data = flow_id;
5058 	tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
5059 	MLX5_ASSERT(set_tag->id > REG_NON);
5060 	item->spec = tag_item;
5061 	addr += sizeof(struct mlx5_rte_flow_item_tag);
5062 	tag_item = (void *)addr;
5063 	tag_item->data = UINT32_MAX;
5064 	tag_item->id = UINT16_MAX;
5065 	item->mask = tag_item;
5066 	item->last = NULL;
5067 	item++;
5068 	item->type = RTE_FLOW_ITEM_TYPE_END;
5069 	return 0;
5070 }
5071 
5072 /**
5073  * The last stage of splitting chain, just creates the subflow
5074  * without any modification.
5075  *
5076  * @param[in] dev
5077  *   Pointer to Ethernet device.
5078  * @param[in] flow
5079  *   Parent flow structure pointer.
5080  * @param[in, out] sub_flow
5081  *   Pointer to return the created subflow, may be NULL.
5082  * @param[in] attr
5083  *   Flow rule attributes.
5084  * @param[in] items
5085  *   Pattern specification (list terminated by the END pattern item).
5086  * @param[in] actions
5087  *   Associated actions (list terminated by the END action).
5088  * @param[in] flow_split_info
5089  *   Pointer to flow split info structure.
5090  * @param[out] error
5091  *   Perform verbose error reporting if not NULL.
5092  * @return
5093  *   0 on success, negative value otherwise
5094  */
5095 static int
5096 flow_create_split_inner(struct rte_eth_dev *dev,
5097 			struct rte_flow *flow,
5098 			struct mlx5_flow **sub_flow,
5099 			const struct rte_flow_attr *attr,
5100 			const struct rte_flow_item items[],
5101 			const struct rte_flow_action actions[],
5102 			struct mlx5_flow_split_info *flow_split_info,
5103 			struct rte_flow_error *error)
5104 {
5105 	struct mlx5_flow *dev_flow;
5106 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
5107 
5108 	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
5109 				    flow_split_info->flow_idx, error);
5110 	if (!dev_flow)
5111 		return -rte_errno;
5112 	dev_flow->flow = flow;
5113 	dev_flow->external = flow_split_info->external;
5114 	dev_flow->skip_scale = flow_split_info->skip_scale;
5115 	/* Subflow object was created, we must include one in the list. */
5116 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
5117 		      dev_flow->handle, next);
5118 	/*
5119 	 * If dev_flow is as one of the suffix flow, some actions in suffix
5120 	 * flow may need some user defined item layer flags, and pass the
5121 	 * Metadata rxq mark flag to suffix flow as well.
5122 	 */
5123 	if (flow_split_info->prefix_layers)
5124 		dev_flow->handle->layers = flow_split_info->prefix_layers;
5125 	if (flow_split_info->prefix_mark) {
5126 		MLX5_ASSERT(wks);
5127 		wks->mark = 1;
5128 	}
5129 	if (sub_flow)
5130 		*sub_flow = dev_flow;
5131 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5132 	dev_flow->dv.table_id = flow_split_info->table_id;
5133 #endif
5134 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
5135 }
5136 
5137 /**
5138  * Get the sub policy of a meter.
5139  *
5140  * @param[in] dev
5141  *   Pointer to Ethernet device.
5142  * @param[in] flow
5143  *   Parent flow structure pointer.
5144  * @param wks
5145  *   Pointer to thread flow work space.
5146  * @param[in] attr
5147  *   Flow rule attributes.
5148  * @param[in] items
5149  *   Pattern specification (list terminated by the END pattern item).
5150  * @param[out] error
5151  *   Perform verbose error reporting if not NULL.
5152  *
5153  * @return
5154  *   Pointer to the meter sub policy, NULL otherwise and rte_errno is set.
5155  */
5156 static struct mlx5_flow_meter_sub_policy *
5157 get_meter_sub_policy(struct rte_eth_dev *dev,
5158 		     struct rte_flow *flow,
5159 		     struct mlx5_flow_workspace *wks,
5160 		     const struct rte_flow_attr *attr,
5161 		     const struct rte_flow_item items[],
5162 		     struct rte_flow_error *error)
5163 {
5164 	struct mlx5_flow_meter_policy *policy;
5165 	struct mlx5_flow_meter_policy *final_policy;
5166 	struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
5167 
5168 	policy = wks->policy;
5169 	final_policy = policy->is_hierarchy ? wks->final_policy : policy;
5170 	if (final_policy->is_rss || final_policy->is_queue) {
5171 		struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS];
5172 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
5173 		uint32_t i;
5174 
5175 		/*
5176 		 * This is a tmp dev_flow,
5177 		 * no need to register any matcher for it in translate.
5178 		 */
5179 		wks->skip_matcher_reg = 1;
5180 		for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
5181 			struct mlx5_flow dev_flow = {0};
5182 			struct mlx5_flow_handle dev_handle = { {0} };
5183 			uint8_t fate = final_policy->act_cnt[i].fate_action;
5184 
5185 			if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
5186 				const struct rte_flow_action_rss *rss_act =
5187 					final_policy->act_cnt[i].rss->conf;
5188 				struct rte_flow_action rss_actions[2] = {
5189 					[0] = {
5190 					.type = RTE_FLOW_ACTION_TYPE_RSS,
5191 					.conf = rss_act,
5192 					},
5193 					[1] = {
5194 					.type = RTE_FLOW_ACTION_TYPE_END,
5195 					.conf = NULL,
5196 					}
5197 				};
5198 
5199 				dev_flow.handle = &dev_handle;
5200 				dev_flow.ingress = attr->ingress;
5201 				dev_flow.flow = flow;
5202 				dev_flow.external = 0;
5203 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5204 				dev_flow.dv.transfer = attr->transfer;
5205 #endif
5206 				/**
5207 				 * Translate RSS action to get rss hash fields.
5208 				 */
5209 				if (flow_drv_translate(dev, &dev_flow, attr,
5210 						items, rss_actions, error))
5211 					goto exit;
5212 				rss_desc_v[i] = wks->rss_desc;
5213 				rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
5214 				rss_desc_v[i].hash_fields =
5215 						dev_flow.hash_fields;
5216 				rss_desc_v[i].queue_num =
5217 						rss_desc_v[i].hash_fields ?
5218 						rss_desc_v[i].queue_num : 1;
5219 				rss_desc_v[i].tunnel =
5220 						!!(dev_flow.handle->layers &
5221 						   MLX5_FLOW_LAYER_TUNNEL);
5222 				/* Use the RSS queues in the containers. */
5223 				rss_desc_v[i].queue =
5224 					(uint16_t *)(uintptr_t)rss_act->queue;
5225 				rss_desc[i] = &rss_desc_v[i];
5226 			} else if (fate == MLX5_FLOW_FATE_QUEUE) {
5227 				/* This is queue action. */
5228 				rss_desc_v[i] = wks->rss_desc;
5229 				rss_desc_v[i].key_len = 0;
5230 				rss_desc_v[i].hash_fields = 0;
5231 				rss_desc_v[i].queue =
5232 					&final_policy->act_cnt[i].queue;
5233 				rss_desc_v[i].queue_num = 1;
5234 				rss_desc[i] = &rss_desc_v[i];
5235 			} else {
5236 				rss_desc[i] = NULL;
5237 			}
5238 		}
5239 		sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
5240 						flow, policy, rss_desc);
5241 	} else {
5242 		enum mlx5_meter_domain mtr_domain =
5243 			attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
5244 				(attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
5245 						MLX5_MTR_DOMAIN_INGRESS);
5246 		sub_policy = policy->sub_policys[mtr_domain][0];
5247 	}
5248 	if (!sub_policy)
5249 		rte_flow_error_set(error, EINVAL,
5250 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5251 				   "Failed to get meter sub-policy.");
5252 exit:
5253 	return sub_policy;
5254 }
5255 
5256 /**
5257  * Split the meter flow.
5258  *
5259  * As meter flow will split to three sub flow, other than meter
5260  * action, the other actions make sense to only meter accepts
5261  * the packet. If it need to be dropped, no other additional
5262  * actions should be take.
5263  *
5264  * One kind of special action which decapsulates the L3 tunnel
5265  * header will be in the prefix sub flow, as not to take the
5266  * L3 tunnel header into account.
5267  *
5268  * @param[in] dev
5269  *   Pointer to Ethernet device.
5270  * @param[in] flow
5271  *   Parent flow structure pointer.
5272  * @param wks
5273  *   Pointer to thread flow work space.
5274  * @param[in] attr
5275  *   Flow rule attributes.
5276  * @param[in] items
5277  *   Pattern specification (list terminated by the END pattern item).
5278  * @param[out] sfx_items
5279  *   Suffix flow match items (list terminated by the END pattern item).
5280  * @param[in] actions
5281  *   Associated actions (list terminated by the END action).
5282  * @param[out] actions_sfx
5283  *   Suffix flow actions.
5284  * @param[out] actions_pre
5285  *   Prefix flow actions.
5286  * @param[out] mtr_flow_id
5287  *   Pointer to meter flow id.
5288  * @param[out] error
5289  *   Perform verbose error reporting if not NULL.
5290  *
5291  * @return
5292  *   0 on success, a negative errno value otherwise and rte_errno is set.
5293  */
5294 static int
5295 flow_meter_split_prep(struct rte_eth_dev *dev,
5296 		      struct rte_flow *flow,
5297 		      struct mlx5_flow_workspace *wks,
5298 		      const struct rte_flow_attr *attr,
5299 		      const struct rte_flow_item items[],
5300 		      struct rte_flow_item sfx_items[],
5301 		      const struct rte_flow_action actions[],
5302 		      struct rte_flow_action actions_sfx[],
5303 		      struct rte_flow_action actions_pre[],
5304 		      uint32_t *mtr_flow_id,
5305 		      struct rte_flow_error *error)
5306 {
5307 	struct mlx5_priv *priv = dev->data->dev_private;
5308 	struct mlx5_flow_meter_info *fm = wks->fm;
5309 	struct rte_flow_action *tag_action = NULL;
5310 	struct rte_flow_item *tag_item;
5311 	struct mlx5_rte_flow_action_set_tag *set_tag;
5312 	const struct rte_flow_action_raw_encap *raw_encap;
5313 	const struct rte_flow_action_raw_decap *raw_decap;
5314 	struct mlx5_rte_flow_item_tag *tag_item_spec;
5315 	struct mlx5_rte_flow_item_tag *tag_item_mask;
5316 	uint32_t tag_id = 0;
5317 	struct rte_flow_item *vlan_item_dst = NULL;
5318 	const struct rte_flow_item *vlan_item_src = NULL;
5319 	const struct rte_flow_item *orig_items = items;
5320 	struct rte_flow_action *hw_mtr_action;
5321 	struct rte_flow_action *action_pre_head = NULL;
5322 	uint16_t flow_src_port = priv->representor_id;
5323 	bool mtr_first;
5324 	uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
5325 	uint8_t mtr_reg_bits = priv->mtr_reg_share ?
5326 				MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS;
5327 	uint32_t flow_id = 0;
5328 	uint32_t flow_id_reversed = 0;
5329 	uint8_t flow_id_bits = 0;
5330 	bool after_meter = false;
5331 	int shift;
5332 
5333 	/* Prepare the suffix subflow items. */
5334 	tag_item = sfx_items++;
5335 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5336 		int item_type = items->type;
5337 
5338 		switch (item_type) {
5339 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
5340 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
5341 			if (mlx5_flow_get_item_vport_id(dev, items, &flow_src_port, NULL, error))
5342 				return -rte_errno;
5343 			if (!fm->def_policy && wks->policy->is_hierarchy &&
5344 			    flow_src_port != priv->representor_id) {
5345 				if (flow_drv_mtr_hierarchy_rule_create(dev,
5346 								flow, fm,
5347 								flow_src_port,
5348 								items,
5349 								error))
5350 					return -rte_errno;
5351 			}
5352 			memcpy(sfx_items, items, sizeof(*sfx_items));
5353 			sfx_items++;
5354 			break;
5355 		case RTE_FLOW_ITEM_TYPE_VLAN:
5356 			/* Determine if copy vlan item below. */
5357 			vlan_item_src = items;
5358 			vlan_item_dst = sfx_items++;
5359 			vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID;
5360 			break;
5361 		default:
5362 			break;
5363 		}
5364 	}
5365 	sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
5366 	sfx_items++;
5367 	mtr_first = priv->sh->meter_aso_en &&
5368 		(attr->egress || (attr->transfer && flow_src_port != UINT16_MAX));
5369 	/* For ASO meter, meter must be before tag in TX direction. */
5370 	if (mtr_first) {
5371 		action_pre_head = actions_pre++;
5372 		/* Leave space for tag action. */
5373 		tag_action = actions_pre++;
5374 	}
5375 	/* Prepare the actions for prefix and suffix flow. */
5376 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5377 		struct rte_flow_action *action_cur = NULL;
5378 
5379 		switch (actions->type) {
5380 		case RTE_FLOW_ACTION_TYPE_METER:
5381 			if (mtr_first) {
5382 				action_cur = action_pre_head;
5383 			} else {
5384 				/* Leave space for tag action. */
5385 				tag_action = actions_pre++;
5386 				action_cur = actions_pre++;
5387 			}
5388 			after_meter = true;
5389 			break;
5390 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5391 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5392 			action_cur = actions_pre++;
5393 			break;
5394 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5395 			raw_encap = actions->conf;
5396 			if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
5397 				action_cur = actions_pre++;
5398 			break;
5399 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5400 			raw_decap = actions->conf;
5401 			if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
5402 				action_cur = actions_pre++;
5403 			break;
5404 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5405 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5406 			if (vlan_item_dst && vlan_item_src) {
5407 				memcpy(vlan_item_dst, vlan_item_src,
5408 					sizeof(*vlan_item_dst));
5409 				/*
5410 				 * Convert to internal match item, it is used
5411 				 * for vlan push and set vid.
5412 				 */
5413 				vlan_item_dst->type = (enum rte_flow_item_type)
5414 						MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
5415 			}
5416 			break;
5417 		case RTE_FLOW_ACTION_TYPE_COUNT:
5418 			if (fm->def_policy)
5419 				action_cur = after_meter ?
5420 						actions_sfx++ : actions_pre++;
5421 			break;
5422 		default:
5423 			break;
5424 		}
5425 		if (!action_cur)
5426 			action_cur = (fm->def_policy) ?
5427 					actions_sfx++ : actions_pre++;
5428 		memcpy(action_cur, actions, sizeof(struct rte_flow_action));
5429 	}
5430 	/* Add end action to the actions. */
5431 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
5432 	if (priv->sh->meter_aso_en) {
5433 		/**
5434 		 * For ASO meter, need to add an extra jump action explicitly,
5435 		 * to jump from meter to policer table.
5436 		 */
5437 		struct mlx5_flow_meter_sub_policy *sub_policy;
5438 		struct mlx5_flow_tbl_data_entry *tbl_data;
5439 
5440 		if (!fm->def_policy) {
5441 			sub_policy = get_meter_sub_policy(dev, flow, wks,
5442 							  attr, orig_items,
5443 							  error);
5444 			if (!sub_policy)
5445 				return -rte_errno;
5446 		} else {
5447 			enum mlx5_meter_domain mtr_domain =
5448 			attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
5449 				(attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
5450 						MLX5_MTR_DOMAIN_INGRESS);
5451 
5452 			sub_policy =
5453 			&priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
5454 		}
5455 		tbl_data = container_of(sub_policy->tbl_rsc,
5456 					struct mlx5_flow_tbl_data_entry, tbl);
5457 		hw_mtr_action = actions_pre++;
5458 		hw_mtr_action->type = (enum rte_flow_action_type)
5459 				      MLX5_RTE_FLOW_ACTION_TYPE_JUMP;
5460 		hw_mtr_action->conf = tbl_data->jump.action;
5461 	}
5462 	actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
5463 	actions_pre++;
5464 	if (!tag_action)
5465 		return rte_flow_error_set(error, ENOMEM,
5466 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5467 					  NULL, "No tag action space.");
5468 	if (!mtr_flow_id) {
5469 		tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
5470 		goto exit;
5471 	}
5472 	/* Only default-policy Meter creates mtr flow id. */
5473 	if (fm->def_policy) {
5474 		mlx5_ipool_malloc(fm->flow_ipool, &tag_id);
5475 		if (!tag_id)
5476 			return rte_flow_error_set(error, ENOMEM,
5477 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5478 					"Failed to allocate meter flow id.");
5479 		flow_id = tag_id - 1;
5480 		flow_id_bits = (!flow_id) ? 1 :
5481 				(MLX5_REG_BITS - __builtin_clz(flow_id));
5482 		if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) >
5483 		    mtr_reg_bits) {
5484 			mlx5_ipool_free(fm->flow_ipool, tag_id);
5485 			return rte_flow_error_set(error, EINVAL,
5486 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5487 					"Meter flow id exceeds max limit.");
5488 		}
5489 		if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits)
5490 			priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits;
5491 	}
5492 	/* Build tag actions and items for meter_id/meter flow_id. */
5493 	set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre;
5494 	tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
5495 	tag_item_mask = tag_item_spec + 1;
5496 	/* Both flow_id and meter_id share the same register. */
5497 	*set_tag = (struct mlx5_rte_flow_action_set_tag) {
5498 		.id = (enum modify_reg)mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
5499 							    0, error),
5500 		.offset = mtr_id_offset,
5501 		.length = mtr_reg_bits,
5502 		.data = flow->meter,
5503 	};
5504 	/*
5505 	 * The color Reg bits used by flow_id are growing from
5506 	 * msb to lsb, so must do bit reverse for flow_id val in RegC.
5507 	 */
5508 	for (shift = 0; shift < flow_id_bits; shift++)
5509 		flow_id_reversed = (flow_id_reversed << 1) |
5510 				((flow_id >> shift) & 0x1);
5511 	set_tag->data |=
5512 		flow_id_reversed << (mtr_reg_bits - flow_id_bits);
5513 	tag_item_spec->id = set_tag->id;
5514 	tag_item_spec->data = set_tag->data << mtr_id_offset;
5515 	tag_item_mask->data = UINT32_MAX << mtr_id_offset;
5516 	tag_action->type = (enum rte_flow_action_type)
5517 				MLX5_RTE_FLOW_ACTION_TYPE_TAG;
5518 	tag_action->conf = set_tag;
5519 	tag_item->type = (enum rte_flow_item_type)
5520 				MLX5_RTE_FLOW_ITEM_TYPE_TAG;
5521 	tag_item->spec = tag_item_spec;
5522 	tag_item->last = NULL;
5523 	tag_item->mask = tag_item_mask;
5524 exit:
5525 	if (mtr_flow_id)
5526 		*mtr_flow_id = tag_id;
5527 	return 0;
5528 }
5529 
5530 /**
5531  * Split action list having QUEUE/RSS for metadata register copy.
5532  *
5533  * Once Q/RSS action is detected in user's action list, the flow action
5534  * should be split in order to copy metadata registers, which will happen in
5535  * RX_CP_TBL like,
5536  *   - CQE->flow_tag := reg_c[1] (MARK)
5537  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
5538  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
5539  * This is because the last action of each flow must be a terminal action
5540  * (QUEUE, RSS or DROP).
5541  *
5542  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
5543  * stored and kept in the mlx5_flow structure per each sub_flow.
5544  *
5545  * The Q/RSS action is replaced with,
5546  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
5547  * And the following JUMP action is added at the end,
5548  *   - JUMP, to RX_CP_TBL.
5549  *
5550  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
5551  * flow_create_split_metadata() routine. The flow will look like,
5552  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
5553  *
5554  * @param dev
5555  *   Pointer to Ethernet device.
5556  * @param[out] split_actions
5557  *   Pointer to store split actions to jump to CP_TBL.
5558  * @param[in] actions
5559  *   Pointer to the list of original flow actions.
5560  * @param[in] qrss
5561  *   Pointer to the Q/RSS action.
5562  * @param[in] actions_n
5563  *   Number of original actions.
5564  * @param[in] mtr_sfx
5565  *   Check if it is in meter suffix table.
5566  * @param[out] error
5567  *   Perform verbose error reporting if not NULL.
5568  *
5569  * @return
5570  *   non-zero unique flow_id on success, otherwise 0 and
5571  *   error/rte_error are set.
5572  */
5573 static uint32_t
5574 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
5575 			  struct rte_flow_action *split_actions,
5576 			  const struct rte_flow_action *actions,
5577 			  const struct rte_flow_action *qrss,
5578 			  int actions_n, int mtr_sfx,
5579 			  struct rte_flow_error *error)
5580 {
5581 	struct mlx5_priv *priv = dev->data->dev_private;
5582 	struct mlx5_rte_flow_action_set_tag *set_tag;
5583 	struct rte_flow_action_jump *jump;
5584 	const int qrss_idx = qrss - actions;
5585 	uint32_t flow_id = 0;
5586 	int ret = 0;
5587 
5588 	/*
5589 	 * Given actions will be split
5590 	 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
5591 	 * - Add jump to mreg CP_TBL.
5592 	 * As a result, there will be one more action.
5593 	 */
5594 	memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
5595 	/* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */
5596 	++actions_n;
5597 	set_tag = (void *)(split_actions + actions_n);
5598 	/*
5599 	 * If we are not the meter suffix flow, add the tag action.
5600 	 * Since meter suffix flow already has the tag added.
5601 	 */
5602 	if (!mtr_sfx) {
5603 		/*
5604 		 * Allocate the new subflow ID. This one is unique within
5605 		 * device and not shared with representors. Otherwise,
5606 		 * we would have to resolve multi-thread access synch
5607 		 * issue. Each flow on the shared device is appended
5608 		 * with source vport identifier, so the resulting
5609 		 * flows will be unique in the shared (by master and
5610 		 * representors) domain even if they have coinciding
5611 		 * IDs.
5612 		 */
5613 		mlx5_ipool_malloc(priv->sh->ipool
5614 				  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
5615 		if (!flow_id)
5616 			return rte_flow_error_set(error, ENOMEM,
5617 						  RTE_FLOW_ERROR_TYPE_ACTION,
5618 						  NULL, "can't allocate id "
5619 						  "for split Q/RSS subflow");
5620 		/* Internal SET_TAG action to set flow ID. */
5621 		*set_tag = (struct mlx5_rte_flow_action_set_tag){
5622 			.data = flow_id,
5623 		};
5624 		ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
5625 		if (ret < 0)
5626 			return ret;
5627 		set_tag->id = ret;
5628 		/* Construct new actions array. */
5629 		/* Replace QUEUE/RSS action. */
5630 		split_actions[qrss_idx] = (struct rte_flow_action){
5631 			.type = (enum rte_flow_action_type)
5632 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
5633 			.conf = set_tag,
5634 		};
5635 	} else {
5636 		/*
5637 		 * If we are the suffix flow of meter, tag already exist.
5638 		 * Set the QUEUE/RSS action to void.
5639 		 */
5640 		split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID;
5641 	}
5642 	/* JUMP action to jump to mreg copy table (CP_TBL). */
5643 	jump = (void *)(set_tag + 1);
5644 	*jump = (struct rte_flow_action_jump){
5645 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
5646 	};
5647 	split_actions[actions_n - 2] = (struct rte_flow_action){
5648 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
5649 		.conf = jump,
5650 	};
5651 	split_actions[actions_n - 1] = (struct rte_flow_action){
5652 		.type = RTE_FLOW_ACTION_TYPE_END,
5653 	};
5654 	return flow_id;
5655 }
5656 
5657 /**
5658  * Extend the given action list for Tx metadata copy.
5659  *
5660  * Copy the given action list to the ext_actions and add flow metadata register
5661  * copy action in order to copy reg_a set by WQE to reg_c[0].
5662  *
5663  * @param[out] ext_actions
5664  *   Pointer to the extended action list.
5665  * @param[in] actions
5666  *   Pointer to the list of actions.
5667  * @param[in] actions_n
5668  *   Number of actions in the list.
5669  * @param[out] error
5670  *   Perform verbose error reporting if not NULL.
5671  * @param[in] encap_idx
5672  *   The encap action index.
5673  *
5674  * @return
5675  *   0 on success, negative value otherwise
5676  */
5677 static int
5678 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
5679 		       struct rte_flow_action *ext_actions,
5680 		       const struct rte_flow_action *actions,
5681 		       int actions_n, struct rte_flow_error *error,
5682 		       int encap_idx)
5683 {
5684 	struct mlx5_flow_action_copy_mreg *cp_mreg =
5685 		(struct mlx5_flow_action_copy_mreg *)
5686 			(ext_actions + actions_n + 1);
5687 	int ret;
5688 
5689 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
5690 	if (ret < 0)
5691 		return ret;
5692 	cp_mreg->dst = ret;
5693 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
5694 	if (ret < 0)
5695 		return ret;
5696 	cp_mreg->src = ret;
5697 	if (encap_idx != 0)
5698 		memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
5699 	if (encap_idx == actions_n - 1) {
5700 		ext_actions[actions_n - 1] = (struct rte_flow_action){
5701 			.type = (enum rte_flow_action_type)
5702 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5703 			.conf = cp_mreg,
5704 		};
5705 		ext_actions[actions_n] = (struct rte_flow_action){
5706 			.type = RTE_FLOW_ACTION_TYPE_END,
5707 		};
5708 	} else {
5709 		ext_actions[encap_idx] = (struct rte_flow_action){
5710 			.type = (enum rte_flow_action_type)
5711 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5712 			.conf = cp_mreg,
5713 		};
5714 		memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
5715 				sizeof(*ext_actions) * (actions_n - encap_idx));
5716 	}
5717 	return 0;
5718 }
5719 
5720 /**
5721  * Check the match action from the action list.
5722  *
5723  * @param[in] actions
5724  *   Pointer to the list of actions.
5725  * @param[in] attr
5726  *   Flow rule attributes.
5727  * @param[in] action
5728  *   The action to be check if exist.
5729  * @param[out] match_action_pos
5730  *   Pointer to the position of the matched action if exists, otherwise is -1.
5731  * @param[out] qrss_action_pos
5732  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
5733  * @param[out] modify_after_mirror
5734  *   Pointer to the flag of modify action after FDB mirroring.
5735  *
5736  * @return
5737  *   > 0 the total number of actions.
5738  *   0 if not found match action in action list.
5739  */
5740 static int
5741 flow_check_match_action(const struct rte_flow_action actions[],
5742 			const struct rte_flow_attr *attr,
5743 			enum rte_flow_action_type action,
5744 			int *match_action_pos, int *qrss_action_pos,
5745 			int *modify_after_mirror)
5746 {
5747 	const struct rte_flow_action_sample *sample;
5748 	const struct rte_flow_action_raw_decap *decap;
5749 	int actions_n = 0;
5750 	uint32_t ratio = 0;
5751 	int sub_type = 0;
5752 	int flag = 0;
5753 	int fdb_mirror = 0;
5754 
5755 	*match_action_pos = -1;
5756 	*qrss_action_pos = -1;
5757 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5758 		if (actions->type == action) {
5759 			flag = 1;
5760 			*match_action_pos = actions_n;
5761 		}
5762 		switch (actions->type) {
5763 		case RTE_FLOW_ACTION_TYPE_QUEUE:
5764 		case RTE_FLOW_ACTION_TYPE_RSS:
5765 			*qrss_action_pos = actions_n;
5766 			break;
5767 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
5768 			sample = actions->conf;
5769 			ratio = sample->ratio;
5770 			sub_type = ((const struct rte_flow_action *)
5771 					(sample->actions))->type;
5772 			if (ratio == 1 && attr->transfer)
5773 				fdb_mirror = 1;
5774 			break;
5775 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5776 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5777 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5778 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5779 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5780 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5781 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5782 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5783 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5784 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
5785 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5786 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5787 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5788 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5789 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5790 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5791 		case RTE_FLOW_ACTION_TYPE_FLAG:
5792 		case RTE_FLOW_ACTION_TYPE_MARK:
5793 		case RTE_FLOW_ACTION_TYPE_SET_META:
5794 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
5795 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5796 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5797 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5798 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5799 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5800 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5801 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
5802 		case RTE_FLOW_ACTION_TYPE_METER:
5803 			if (fdb_mirror)
5804 				*modify_after_mirror = 1;
5805 			break;
5806 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5807 			decap = actions->conf;
5808 			while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5809 				;
5810 			actions_n++;
5811 			if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5812 				const struct rte_flow_action_raw_encap *encap =
5813 								actions->conf;
5814 				if (decap->size <=
5815 					MLX5_ENCAPSULATION_DECISION_SIZE &&
5816 				    encap->size >
5817 					MLX5_ENCAPSULATION_DECISION_SIZE)
5818 					/* L3 encap. */
5819 					break;
5820 			}
5821 			if (fdb_mirror)
5822 				*modify_after_mirror = 1;
5823 			break;
5824 		default:
5825 			break;
5826 		}
5827 		actions_n++;
5828 	}
5829 	if (flag && fdb_mirror && !*modify_after_mirror) {
5830 		/* FDB mirroring uses the destination array to implement
5831 		 * instead of FLOW_SAMPLER object.
5832 		 */
5833 		if (sub_type != RTE_FLOW_ACTION_TYPE_END)
5834 			flag = 0;
5835 	}
5836 	/* Count RTE_FLOW_ACTION_TYPE_END. */
5837 	return flag ? actions_n + 1 : 0;
5838 }
5839 
5840 #define SAMPLE_SUFFIX_ITEM 3
5841 
5842 /**
5843  * Split the sample flow.
5844  *
5845  * As sample flow will split to two sub flow, sample flow with
5846  * sample action, the other actions will move to new suffix flow.
5847  *
5848  * Also add unique tag id with tag action in the sample flow,
5849  * the same tag id will be as match in the suffix flow.
5850  *
5851  * @param dev
5852  *   Pointer to Ethernet device.
5853  * @param[in] add_tag
5854  *   Add extra tag action flag.
5855  * @param[out] sfx_items
5856  *   Suffix flow match items (list terminated by the END pattern item).
5857  * @param[in] actions
5858  *   Associated actions (list terminated by the END action).
5859  * @param[out] actions_sfx
5860  *   Suffix flow actions.
5861  * @param[out] actions_pre
5862  *   Prefix flow actions.
5863  * @param[in] actions_n
5864  *  The total number of actions.
5865  * @param[in] sample_action_pos
5866  *   The sample action position.
5867  * @param[in] qrss_action_pos
5868  *   The Queue/RSS action position.
5869  * @param[in] jump_table
5870  *   Add extra jump action flag.
5871  * @param[out] error
5872  *   Perform verbose error reporting if not NULL.
5873  *
5874  * @return
5875  *   0 on success, or unique flow_id, a negative errno value
5876  *   otherwise and rte_errno is set.
5877  */
5878 static int
5879 flow_sample_split_prep(struct rte_eth_dev *dev,
5880 		       int add_tag,
5881 		       const struct rte_flow_item items[],
5882 		       struct rte_flow_item sfx_items[],
5883 		       const struct rte_flow_action actions[],
5884 		       struct rte_flow_action actions_sfx[],
5885 		       struct rte_flow_action actions_pre[],
5886 		       int actions_n,
5887 		       int sample_action_pos,
5888 		       int qrss_action_pos,
5889 		       int jump_table,
5890 		       struct rte_flow_error *error)
5891 {
5892 	struct mlx5_priv *priv = dev->data->dev_private;
5893 	struct mlx5_rte_flow_action_set_tag *set_tag;
5894 	struct mlx5_rte_flow_item_tag *tag_spec;
5895 	struct mlx5_rte_flow_item_tag *tag_mask;
5896 	struct rte_flow_action_jump *jump_action;
5897 	uint32_t tag_id = 0;
5898 	int append_index = 0;
5899 	int set_tag_idx = -1;
5900 	int index;
5901 	int ret;
5902 
5903 	if (sample_action_pos < 0)
5904 		return rte_flow_error_set(error, EINVAL,
5905 					  RTE_FLOW_ERROR_TYPE_ACTION,
5906 					  NULL, "invalid position of sample "
5907 					  "action in list");
5908 	/* Prepare the actions for prefix and suffix flow. */
5909 	if (add_tag) {
5910 		/* Update the new added tag action index preceding
5911 		 * the PUSH_VLAN or ENCAP action.
5912 		 */
5913 		const struct rte_flow_action_raw_encap *raw_encap;
5914 		const struct rte_flow_action *action = actions;
5915 		int encap_idx;
5916 		int action_idx = 0;
5917 		int raw_decap_idx = -1;
5918 		int push_vlan_idx = -1;
5919 		for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
5920 			switch (action->type) {
5921 			case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5922 				raw_decap_idx = action_idx;
5923 				break;
5924 			case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5925 				raw_encap = action->conf;
5926 				if (raw_encap->size >
5927 					MLX5_ENCAPSULATION_DECISION_SIZE) {
5928 					encap_idx = raw_decap_idx != -1 ?
5929 						    raw_decap_idx : action_idx;
5930 					if (encap_idx < sample_action_pos &&
5931 					    push_vlan_idx == -1)
5932 						set_tag_idx = encap_idx;
5933 				}
5934 				break;
5935 			case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5936 			case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5937 				encap_idx = action_idx;
5938 				if (encap_idx < sample_action_pos &&
5939 				    push_vlan_idx == -1)
5940 					set_tag_idx = encap_idx;
5941 				break;
5942 			case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5943 			case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5944 				push_vlan_idx = action_idx;
5945 				if (push_vlan_idx < sample_action_pos)
5946 					set_tag_idx = action_idx;
5947 				break;
5948 			default:
5949 				break;
5950 			}
5951 			action_idx++;
5952 		}
5953 	}
5954 	/* Prepare the actions for prefix and suffix flow. */
5955 	if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
5956 		index = qrss_action_pos;
5957 		/* Put the preceding the Queue/RSS action into prefix flow. */
5958 		if (index != 0)
5959 			memcpy(actions_pre, actions,
5960 			       sizeof(struct rte_flow_action) * index);
5961 		/* Put others preceding the sample action into prefix flow. */
5962 		if (sample_action_pos > index + 1)
5963 			memcpy(actions_pre + index, actions + index + 1,
5964 			       sizeof(struct rte_flow_action) *
5965 			       (sample_action_pos - index - 1));
5966 		index = sample_action_pos - 1;
5967 		/* Put Queue/RSS action into Suffix flow. */
5968 		memcpy(actions_sfx, actions + qrss_action_pos,
5969 		       sizeof(struct rte_flow_action));
5970 		actions_sfx++;
5971 	} else if (add_tag && set_tag_idx >= 0) {
5972 		if (set_tag_idx > 0)
5973 			memcpy(actions_pre, actions,
5974 			       sizeof(struct rte_flow_action) * set_tag_idx);
5975 		memcpy(actions_pre + set_tag_idx + 1, actions + set_tag_idx,
5976 		       sizeof(struct rte_flow_action) *
5977 		       (sample_action_pos - set_tag_idx));
5978 		index = sample_action_pos;
5979 	} else {
5980 		index = sample_action_pos;
5981 		if (index != 0)
5982 			memcpy(actions_pre, actions,
5983 			       sizeof(struct rte_flow_action) * index);
5984 	}
5985 	/* For CX5, add an extra tag action for NIC-RX and E-Switch ingress.
5986 	 * For CX6DX and above, metadata registers Cx preserve their value,
5987 	 * add an extra tag action for NIC-RX and E-Switch Domain.
5988 	 */
5989 	if (add_tag) {
5990 		/* Prepare the prefix tag action. */
5991 		append_index++;
5992 		set_tag = (void *)(actions_pre + actions_n + append_index);
5993 		ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
5994 		/* Trust VF/SF on CX5 not supported meter so that the reserved
5995 		 * metadata regC is REG_NON, back to use application tag
5996 		 * index 0.
5997 		 */
5998 		if (unlikely(ret == REG_NON))
5999 			ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
6000 		if (ret < 0)
6001 			return ret;
6002 		mlx5_ipool_malloc(priv->sh->ipool
6003 				  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
6004 		*set_tag = (struct mlx5_rte_flow_action_set_tag) {
6005 			.id = ret,
6006 			.data = tag_id,
6007 		};
6008 		/* Prepare the suffix subflow items. */
6009 		for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6010 			if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
6011 				memcpy(sfx_items, items, sizeof(*sfx_items));
6012 				sfx_items++;
6013 			}
6014 		}
6015 		tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
6016 		tag_spec->data = tag_id;
6017 		tag_spec->id = set_tag->id;
6018 		tag_mask = tag_spec + 1;
6019 		tag_mask->data = UINT32_MAX;
6020 		sfx_items[0] = (struct rte_flow_item){
6021 			.type = (enum rte_flow_item_type)
6022 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
6023 			.spec = tag_spec,
6024 			.last = NULL,
6025 			.mask = tag_mask,
6026 		};
6027 		sfx_items[1] = (struct rte_flow_item){
6028 			.type = (enum rte_flow_item_type)
6029 				RTE_FLOW_ITEM_TYPE_END,
6030 		};
6031 		/* Prepare the tag action in prefix subflow. */
6032 		set_tag_idx = (set_tag_idx == -1) ? index : set_tag_idx;
6033 		actions_pre[set_tag_idx] =
6034 			(struct rte_flow_action){
6035 			.type = (enum rte_flow_action_type)
6036 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
6037 			.conf = set_tag,
6038 		};
6039 		/* Update next sample position due to add one tag action */
6040 		index += 1;
6041 	}
6042 	/* Copy the sample action into prefix flow. */
6043 	memcpy(actions_pre + index, actions + sample_action_pos,
6044 	       sizeof(struct rte_flow_action));
6045 	index += 1;
6046 	/* For the modify action after the sample action in E-Switch mirroring,
6047 	 * Add the extra jump action in prefix subflow and jump into the next
6048 	 * table, then do the modify action in the new table.
6049 	 */
6050 	if (jump_table) {
6051 		/* Prepare the prefix jump action. */
6052 		append_index++;
6053 		jump_action = (void *)(actions_pre + actions_n + append_index);
6054 		jump_action->group = jump_table;
6055 		actions_pre[index++] =
6056 			(struct rte_flow_action){
6057 			.type = (enum rte_flow_action_type)
6058 				RTE_FLOW_ACTION_TYPE_JUMP,
6059 			.conf = jump_action,
6060 		};
6061 	}
6062 	actions_pre[index] = (struct rte_flow_action){
6063 		.type = (enum rte_flow_action_type)
6064 			RTE_FLOW_ACTION_TYPE_END,
6065 	};
6066 	/* Put the actions after sample into Suffix flow. */
6067 	memcpy(actions_sfx, actions + sample_action_pos + 1,
6068 	       sizeof(struct rte_flow_action) *
6069 	       (actions_n - sample_action_pos - 1));
6070 	return tag_id;
6071 }
6072 
6073 /**
6074  * The splitting for metadata feature.
6075  *
6076  * - Q/RSS action on NIC Rx should be split in order to pass by
6077  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
6078  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
6079  *
6080  * - All the actions on NIC Tx should have a mreg copy action to
6081  *   copy reg_a from WQE to reg_c[0].
6082  *
6083  * @param dev
6084  *   Pointer to Ethernet device.
6085  * @param[in] flow
6086  *   Parent flow structure pointer.
6087  * @param[in] attr
6088  *   Flow rule attributes.
6089  * @param[in] items
6090  *   Pattern specification (list terminated by the END pattern item).
6091  * @param[in] actions
6092  *   Associated actions (list terminated by the END action).
6093  * @param[in] flow_split_info
6094  *   Pointer to flow split info structure.
6095  * @param[out] error
6096  *   Perform verbose error reporting if not NULL.
6097  * @return
6098  *   0 on success, negative value otherwise
6099  */
6100 static int
6101 flow_create_split_metadata(struct rte_eth_dev *dev,
6102 			   struct rte_flow *flow,
6103 			   const struct rte_flow_attr *attr,
6104 			   const struct rte_flow_item items[],
6105 			   const struct rte_flow_action actions[],
6106 			   struct mlx5_flow_split_info *flow_split_info,
6107 			   struct rte_flow_error *error)
6108 {
6109 	struct mlx5_priv *priv = dev->data->dev_private;
6110 	struct mlx5_sh_config *config = &priv->sh->config;
6111 	const struct rte_flow_action *qrss = NULL;
6112 	struct rte_flow_action *ext_actions = NULL;
6113 	struct mlx5_flow *dev_flow = NULL;
6114 	uint32_t qrss_id = 0;
6115 	int mtr_sfx = 0;
6116 	size_t act_size;
6117 	int actions_n;
6118 	int encap_idx;
6119 	int ret;
6120 
6121 	/* Check whether extensive metadata feature is engaged. */
6122 	if (!config->dv_flow_en ||
6123 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
6124 	    !mlx5_flow_ext_mreg_supported(dev))
6125 		return flow_create_split_inner(dev, flow, NULL, attr, items,
6126 					       actions, flow_split_info, error);
6127 	actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
6128 							   &encap_idx);
6129 	if (qrss) {
6130 		/* Exclude hairpin flows from splitting. */
6131 		if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
6132 			const struct rte_flow_action_queue *queue;
6133 
6134 			queue = qrss->conf;
6135 			if (mlx5_rxq_is_hairpin(dev, queue->index))
6136 				qrss = NULL;
6137 		} else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
6138 			const struct rte_flow_action_rss *rss;
6139 
6140 			rss = qrss->conf;
6141 			if (mlx5_rxq_is_hairpin(dev, rss->queue[0]))
6142 				qrss = NULL;
6143 		}
6144 	}
6145 	if (qrss) {
6146 		/* Check if it is in meter suffix table. */
6147 		mtr_sfx = attr->group == (attr->transfer ?
6148 			  (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
6149 			  MLX5_FLOW_TABLE_LEVEL_METER);
6150 		/*
6151 		 * Q/RSS action on NIC Rx should be split in order to pass by
6152 		 * the mreg copy table (RX_CP_TBL) and then it jumps to the
6153 		 * action table (RX_ACT_TBL) which has the split Q/RSS action.
6154 		 */
6155 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
6156 			   sizeof(struct rte_flow_action_set_tag) +
6157 			   sizeof(struct rte_flow_action_jump);
6158 		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
6159 					  SOCKET_ID_ANY);
6160 		if (!ext_actions)
6161 			return rte_flow_error_set(error, ENOMEM,
6162 						  RTE_FLOW_ERROR_TYPE_ACTION,
6163 						  NULL, "no memory to split "
6164 						  "metadata flow");
6165 		/*
6166 		 * Create the new actions list with removed Q/RSS action
6167 		 * and appended set tag and jump to register copy table
6168 		 * (RX_CP_TBL). We should preallocate unique tag ID here
6169 		 * in advance, because it is needed for set tag action.
6170 		 */
6171 		qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
6172 						    qrss, actions_n,
6173 						    mtr_sfx, error);
6174 		if (!mtr_sfx && !qrss_id) {
6175 			ret = -rte_errno;
6176 			goto exit;
6177 		}
6178 	} else if (attr->egress && !attr->transfer) {
6179 		/*
6180 		 * All the actions on NIC Tx should have a metadata register
6181 		 * copy action to copy reg_a from WQE to reg_c[meta]
6182 		 */
6183 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
6184 			   sizeof(struct mlx5_flow_action_copy_mreg);
6185 		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
6186 					  SOCKET_ID_ANY);
6187 		if (!ext_actions)
6188 			return rte_flow_error_set(error, ENOMEM,
6189 						  RTE_FLOW_ERROR_TYPE_ACTION,
6190 						  NULL, "no memory to split "
6191 						  "metadata flow");
6192 		/* Create the action list appended with copy register. */
6193 		ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
6194 					     actions_n, error, encap_idx);
6195 		if (ret < 0)
6196 			goto exit;
6197 	}
6198 	/* Add the unmodified original or prefix subflow. */
6199 	ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
6200 				      items, ext_actions ? ext_actions :
6201 				      actions, flow_split_info, error);
6202 	if (ret < 0)
6203 		goto exit;
6204 	MLX5_ASSERT(dev_flow);
6205 	if (qrss) {
6206 		const struct rte_flow_attr q_attr = {
6207 			.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
6208 			.ingress = 1,
6209 		};
6210 		/* Internal PMD action to set register. */
6211 		struct mlx5_rte_flow_item_tag q_tag_spec = {
6212 			.data = qrss_id,
6213 			.id = REG_NON,
6214 		};
6215 		struct rte_flow_item q_items[] = {
6216 			{
6217 				.type = (enum rte_flow_item_type)
6218 					MLX5_RTE_FLOW_ITEM_TYPE_TAG,
6219 				.spec = &q_tag_spec,
6220 				.last = NULL,
6221 				.mask = NULL,
6222 			},
6223 			{
6224 				.type = RTE_FLOW_ITEM_TYPE_END,
6225 			},
6226 		};
6227 		struct rte_flow_action q_actions[] = {
6228 			{
6229 				.type = qrss->type,
6230 				.conf = qrss->conf,
6231 			},
6232 			{
6233 				.type = RTE_FLOW_ACTION_TYPE_END,
6234 			},
6235 		};
6236 		uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
6237 
6238 		/*
6239 		 * Configure the tag item only if there is no meter subflow.
6240 		 * Since tag is already marked in the meter suffix subflow
6241 		 * we can just use the meter suffix items as is.
6242 		 */
6243 		if (qrss_id) {
6244 			/* Not meter subflow. */
6245 			MLX5_ASSERT(!mtr_sfx);
6246 			/*
6247 			 * Put unique id in prefix flow due to it is destroyed
6248 			 * after suffix flow and id will be freed after there
6249 			 * is no actual flows with this id and identifier
6250 			 * reallocation becomes possible (for example, for
6251 			 * other flows in other threads).
6252 			 */
6253 			dev_flow->handle->split_flow_id = qrss_id;
6254 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
6255 						   error);
6256 			if (ret < 0)
6257 				goto exit;
6258 			q_tag_spec.id = ret;
6259 		}
6260 		dev_flow = NULL;
6261 		/* Add suffix subflow to execute Q/RSS. */
6262 		flow_split_info->prefix_layers = layers;
6263 		flow_split_info->prefix_mark = 0;
6264 		flow_split_info->table_id = 0;
6265 		ret = flow_create_split_inner(dev, flow, &dev_flow,
6266 					      &q_attr, mtr_sfx ? items :
6267 					      q_items, q_actions,
6268 					      flow_split_info, error);
6269 		if (ret < 0)
6270 			goto exit;
6271 		/* qrss ID should be freed if failed. */
6272 		qrss_id = 0;
6273 		MLX5_ASSERT(dev_flow);
6274 	}
6275 
6276 exit:
6277 	/*
6278 	 * We do not destroy the partially created sub_flows in case of error.
6279 	 * These ones are included into parent flow list and will be destroyed
6280 	 * by flow_drv_destroy.
6281 	 */
6282 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
6283 			qrss_id);
6284 	mlx5_free(ext_actions);
6285 	return ret;
6286 }
6287 
6288 /**
6289  * Create meter internal drop flow with the original pattern.
6290  *
6291  * @param dev
6292  *   Pointer to Ethernet device.
6293  * @param[in] flow
6294  *   Parent flow structure pointer.
6295  * @param[in] attr
6296  *   Flow rule attributes.
6297  * @param[in] items
6298  *   Pattern specification (list terminated by the END pattern item).
6299  * @param[in] flow_split_info
6300  *   Pointer to flow split info structure.
6301  * @param[in] fm
6302  *   Pointer to flow meter structure.
6303  * @param[out] error
6304  *   Perform verbose error reporting if not NULL.
6305  * @return
6306  *   0 on success, negative value otherwise
6307  */
6308 static uint32_t
6309 flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev,
6310 			struct rte_flow *flow,
6311 			const struct rte_flow_attr *attr,
6312 			const struct rte_flow_item items[],
6313 			struct mlx5_flow_split_info *flow_split_info,
6314 			struct mlx5_flow_meter_info *fm,
6315 			struct rte_flow_error *error)
6316 {
6317 	struct mlx5_flow *dev_flow = NULL;
6318 	struct rte_flow_attr drop_attr = *attr;
6319 	struct rte_flow_action drop_actions[3];
6320 	struct mlx5_flow_split_info drop_split_info = *flow_split_info;
6321 
6322 	MLX5_ASSERT(fm->drop_cnt);
6323 	drop_actions[0].type =
6324 		(enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_COUNT;
6325 	drop_actions[0].conf = (void *)(uintptr_t)fm->drop_cnt;
6326 	drop_actions[1].type = RTE_FLOW_ACTION_TYPE_DROP;
6327 	drop_actions[1].conf = NULL;
6328 	drop_actions[2].type = RTE_FLOW_ACTION_TYPE_END;
6329 	drop_actions[2].conf = NULL;
6330 	drop_split_info.external = false;
6331 	drop_split_info.skip_scale |= 1 << MLX5_SCALE_FLOW_GROUP_BIT;
6332 	drop_split_info.table_id = MLX5_MTR_TABLE_ID_DROP;
6333 	drop_attr.group = MLX5_FLOW_TABLE_LEVEL_METER;
6334 	return flow_create_split_inner(dev, flow, &dev_flow,
6335 				&drop_attr, items, drop_actions,
6336 				&drop_split_info, error);
6337 }
6338 
6339 /**
6340  * The splitting for meter feature.
6341  *
6342  * - The meter flow will be split to two flows as prefix and
6343  *   suffix flow. The packets make sense only it pass the prefix
6344  *   meter action.
6345  *
6346  * - Reg_C_5 is used for the packet to match betweend prefix and
6347  *   suffix flow.
6348  *
6349  * @param dev
6350  *   Pointer to Ethernet device.
6351  * @param[in] flow
6352  *   Parent flow structure pointer.
6353  * @param[in] attr
6354  *   Flow rule attributes.
6355  * @param[in] items
6356  *   Pattern specification (list terminated by the END pattern item).
6357  * @param[in] actions
6358  *   Associated actions (list terminated by the END action).
6359  * @param[in] flow_split_info
6360  *   Pointer to flow split info structure.
6361  * @param[out] error
6362  *   Perform verbose error reporting if not NULL.
6363  * @return
6364  *   0 on success, negative value otherwise
6365  */
6366 static int
6367 flow_create_split_meter(struct rte_eth_dev *dev,
6368 			struct rte_flow *flow,
6369 			const struct rte_flow_attr *attr,
6370 			const struct rte_flow_item items[],
6371 			const struct rte_flow_action actions[],
6372 			struct mlx5_flow_split_info *flow_split_info,
6373 			struct rte_flow_error *error)
6374 {
6375 	struct mlx5_priv *priv = dev->data->dev_private;
6376 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6377 	struct rte_flow_action *sfx_actions = NULL;
6378 	struct rte_flow_action *pre_actions = NULL;
6379 	struct rte_flow_item *sfx_items = NULL;
6380 	struct mlx5_flow *dev_flow = NULL;
6381 	struct rte_flow_attr sfx_attr = *attr;
6382 	struct mlx5_flow_meter_info *fm = NULL;
6383 	uint8_t skip_scale_restore;
6384 	bool has_mtr = false;
6385 	bool has_modify = false;
6386 	bool set_mtr_reg = true;
6387 	bool is_mtr_hierarchy = false;
6388 	uint32_t meter_id = 0;
6389 	uint32_t mtr_idx = 0;
6390 	uint32_t mtr_flow_id = 0;
6391 	size_t act_size;
6392 	size_t item_size;
6393 	int actions_n = 0;
6394 	int ret = 0;
6395 
6396 	if (priv->mtr_en)
6397 		actions_n = flow_check_meter_action(dev, actions, &has_mtr,
6398 						    &has_modify, &meter_id);
6399 	if (has_mtr) {
6400 		if (flow->meter) {
6401 			fm = flow_dv_meter_find_by_idx(priv, flow->meter);
6402 			if (!fm)
6403 				return rte_flow_error_set(error, EINVAL,
6404 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6405 						NULL, "Meter not found.");
6406 		} else {
6407 			fm = mlx5_flow_meter_find(priv, meter_id, &mtr_idx);
6408 			if (!fm)
6409 				return rte_flow_error_set(error, EINVAL,
6410 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6411 						NULL, "Meter not found.");
6412 			ret = mlx5_flow_meter_attach(priv, fm,
6413 						     &sfx_attr, error);
6414 			if (ret)
6415 				return -rte_errno;
6416 			flow->meter = mtr_idx;
6417 		}
6418 		MLX5_ASSERT(wks);
6419 		wks->fm = fm;
6420 		if (!fm->def_policy) {
6421 			wks->policy = mlx5_flow_meter_policy_find(dev,
6422 								  fm->policy_id,
6423 								  NULL);
6424 			MLX5_ASSERT(wks->policy);
6425 			if (wks->policy->mark)
6426 				wks->mark = 1;
6427 			if (wks->policy->is_hierarchy) {
6428 				wks->final_policy =
6429 				mlx5_flow_meter_hierarchy_get_final_policy(dev,
6430 								wks->policy);
6431 				if (!wks->final_policy)
6432 					return rte_flow_error_set(error,
6433 					EINVAL,
6434 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6435 				"Failed to find terminal policy of hierarchy.");
6436 				is_mtr_hierarchy = true;
6437 			}
6438 		}
6439 		/*
6440 		 * If it isn't default-policy Meter, and
6441 		 * 1. Not meter hierarchy and there's no action in flow to change
6442 		 *    packet (modify/encap/decap etc.), OR
6443 		 * 2. No drop count needed for this meter.
6444 		 * Then no need to use regC to save meter id anymore.
6445 		 */
6446 		if (!fm->def_policy && ((!has_modify && !is_mtr_hierarchy) || !fm->drop_cnt))
6447 			set_mtr_reg = false;
6448 		/* Prefix actions: meter, decap, encap, tag, jump, end, cnt. */
6449 #define METER_PREFIX_ACTION 7
6450 		act_size = (sizeof(struct rte_flow_action) *
6451 			    (actions_n + METER_PREFIX_ACTION)) +
6452 			   sizeof(struct mlx5_rte_flow_action_set_tag);
6453 		/* Suffix items: tag, vlan, port id, end. */
6454 #define METER_SUFFIX_ITEM 4
6455 		item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
6456 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
6457 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
6458 					  0, SOCKET_ID_ANY);
6459 		if (!sfx_actions)
6460 			return rte_flow_error_set(error, ENOMEM,
6461 						  RTE_FLOW_ERROR_TYPE_ACTION,
6462 						  NULL, "no memory to split "
6463 						  "meter flow");
6464 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
6465 			     act_size);
6466 		/* There's no suffix flow for meter of non-default policy. */
6467 		if (!fm->def_policy)
6468 			pre_actions = sfx_actions + 1;
6469 		else
6470 			pre_actions = sfx_actions + actions_n;
6471 		ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr,
6472 					    items, sfx_items, actions,
6473 					    sfx_actions, pre_actions,
6474 					    (set_mtr_reg ? &mtr_flow_id : NULL),
6475 					    error);
6476 		if (ret) {
6477 			ret = -rte_errno;
6478 			goto exit;
6479 		}
6480 		/* Add the prefix subflow. */
6481 		skip_scale_restore = flow_split_info->skip_scale;
6482 		flow_split_info->skip_scale |=
6483 			1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
6484 		ret = flow_create_split_inner(dev, flow, &dev_flow,
6485 					      attr, items, pre_actions,
6486 					      flow_split_info, error);
6487 		flow_split_info->skip_scale = skip_scale_restore;
6488 		if (ret) {
6489 			if (mtr_flow_id)
6490 				mlx5_ipool_free(fm->flow_ipool, mtr_flow_id);
6491 			ret = -rte_errno;
6492 			goto exit;
6493 		}
6494 		if (mtr_flow_id) {
6495 			dev_flow->handle->split_flow_id = mtr_flow_id;
6496 			dev_flow->handle->is_meter_flow_id = 1;
6497 		}
6498 		if (!fm->def_policy) {
6499 			if (!set_mtr_reg && fm->drop_cnt)
6500 				ret =
6501 			flow_meter_create_drop_flow_with_org_pattern(dev, flow,
6502 							&sfx_attr, items,
6503 							flow_split_info,
6504 							fm, error);
6505 			goto exit;
6506 		}
6507 		/* Setting the sfx group atrr. */
6508 		sfx_attr.group = sfx_attr.transfer ?
6509 				(MLX5_FLOW_TABLE_LEVEL_METER - 1) :
6510 				 MLX5_FLOW_TABLE_LEVEL_METER;
6511 		flow_split_info->prefix_layers =
6512 				flow_get_prefix_layer_flags(dev_flow);
6513 		flow_split_info->prefix_mark |= wks->mark;
6514 		flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
6515 	}
6516 	/* Add the prefix subflow. */
6517 	ret = flow_create_split_metadata(dev, flow,
6518 					 &sfx_attr, sfx_items ?
6519 					 sfx_items : items,
6520 					 sfx_actions ? sfx_actions : actions,
6521 					 flow_split_info, error);
6522 exit:
6523 	if (sfx_actions)
6524 		mlx5_free(sfx_actions);
6525 	return ret;
6526 }
6527 
6528 /**
6529  * The splitting for sample feature.
6530  *
6531  * Once Sample action is detected in the action list, the flow actions should
6532  * be split into prefix sub flow and suffix sub flow.
6533  *
6534  * The original items remain in the prefix sub flow, all actions preceding the
6535  * sample action and the sample action itself will be copied to the prefix
6536  * sub flow, the actions following the sample action will be copied to the
6537  * suffix sub flow, Queue action always be located in the suffix sub flow.
6538  *
6539  * In order to make the packet from prefix sub flow matches with suffix sub
6540  * flow, an extra tag action be added into prefix sub flow, and the suffix sub
6541  * flow uses tag item with the unique flow id.
6542  *
6543  * @param dev
6544  *   Pointer to Ethernet device.
6545  * @param[in] flow
6546  *   Parent flow structure pointer.
6547  * @param[in] attr
6548  *   Flow rule attributes.
6549  * @param[in] items
6550  *   Pattern specification (list terminated by the END pattern item).
6551  * @param[in] actions
6552  *   Associated actions (list terminated by the END action).
6553  * @param[in] flow_split_info
6554  *   Pointer to flow split info structure.
6555  * @param[out] error
6556  *   Perform verbose error reporting if not NULL.
6557  * @return
6558  *   0 on success, negative value otherwise
6559  */
6560 static int
6561 flow_create_split_sample(struct rte_eth_dev *dev,
6562 			 struct rte_flow *flow,
6563 			 const struct rte_flow_attr *attr,
6564 			 const struct rte_flow_item items[],
6565 			 const struct rte_flow_action actions[],
6566 			 struct mlx5_flow_split_info *flow_split_info,
6567 			 struct rte_flow_error *error)
6568 {
6569 	struct mlx5_priv *priv = dev->data->dev_private;
6570 	struct rte_flow_action *sfx_actions = NULL;
6571 	struct rte_flow_action *pre_actions = NULL;
6572 	struct rte_flow_item *sfx_items = NULL;
6573 	struct mlx5_flow *dev_flow = NULL;
6574 	struct rte_flow_attr sfx_attr = *attr;
6575 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
6576 	struct mlx5_flow_dv_sample_resource *sample_res;
6577 	struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
6578 	struct mlx5_flow_tbl_resource *sfx_tbl;
6579 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6580 #endif
6581 	size_t act_size;
6582 	size_t item_size;
6583 	uint32_t fdb_tx = 0;
6584 	int32_t tag_id = 0;
6585 	int actions_n = 0;
6586 	int sample_action_pos;
6587 	int qrss_action_pos;
6588 	int add_tag = 0;
6589 	int modify_after_mirror = 0;
6590 	uint16_t jump_table = 0;
6591 	const uint32_t next_ft_step = 1;
6592 	int ret = 0;
6593 
6594 	if (priv->sampler_en)
6595 		actions_n = flow_check_match_action(actions, attr,
6596 					RTE_FLOW_ACTION_TYPE_SAMPLE,
6597 					&sample_action_pos, &qrss_action_pos,
6598 					&modify_after_mirror);
6599 	if (actions_n) {
6600 		/* The prefix actions must includes sample, tag, end. */
6601 		act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
6602 			   + sizeof(struct mlx5_rte_flow_action_set_tag);
6603 		item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
6604 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
6605 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
6606 					  item_size), 0, SOCKET_ID_ANY);
6607 		if (!sfx_actions)
6608 			return rte_flow_error_set(error, ENOMEM,
6609 						  RTE_FLOW_ERROR_TYPE_ACTION,
6610 						  NULL, "no memory to split "
6611 						  "sample flow");
6612 		/* The representor_id is UINT16_MAX for uplink. */
6613 		fdb_tx = (attr->transfer && priv->representor_id != UINT16_MAX);
6614 		/*
6615 		 * When reg_c_preserve is set, metadata registers Cx preserve
6616 		 * their value even through packet duplication.
6617 		 */
6618 		add_tag = (!fdb_tx ||
6619 			   priv->sh->cdev->config.hca_attr.reg_c_preserve);
6620 		if (add_tag)
6621 			sfx_items = (struct rte_flow_item *)((char *)sfx_actions
6622 					+ act_size);
6623 		if (modify_after_mirror)
6624 			jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR +
6625 				     next_ft_step;
6626 		pre_actions = sfx_actions + actions_n;
6627 		tag_id = flow_sample_split_prep(dev, add_tag, items, sfx_items,
6628 						actions, sfx_actions,
6629 						pre_actions, actions_n,
6630 						sample_action_pos,
6631 						qrss_action_pos, jump_table,
6632 						error);
6633 		if (tag_id < 0 || (add_tag && !tag_id)) {
6634 			ret = -rte_errno;
6635 			goto exit;
6636 		}
6637 		if (modify_after_mirror)
6638 			flow_split_info->skip_scale =
6639 					1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
6640 		/* Add the prefix subflow. */
6641 		ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
6642 					      items, pre_actions,
6643 					      flow_split_info, error);
6644 		if (ret) {
6645 			ret = -rte_errno;
6646 			goto exit;
6647 		}
6648 		dev_flow->handle->split_flow_id = tag_id;
6649 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
6650 		if (!modify_after_mirror) {
6651 			/* Set the sfx group attr. */
6652 			sample_res = (struct mlx5_flow_dv_sample_resource *)
6653 						dev_flow->dv.sample_res;
6654 			sfx_tbl = (struct mlx5_flow_tbl_resource *)
6655 						sample_res->normal_path_tbl;
6656 			sfx_tbl_data = container_of(sfx_tbl,
6657 						struct mlx5_flow_tbl_data_entry,
6658 						tbl);
6659 			sfx_attr.group = sfx_attr.transfer ?
6660 			(sfx_tbl_data->level - 1) : sfx_tbl_data->level;
6661 		} else {
6662 			MLX5_ASSERT(attr->transfer);
6663 			sfx_attr.group = jump_table;
6664 		}
6665 		flow_split_info->prefix_layers =
6666 				flow_get_prefix_layer_flags(dev_flow);
6667 		MLX5_ASSERT(wks);
6668 		flow_split_info->prefix_mark |= wks->mark;
6669 		/* Suffix group level already be scaled with factor, set
6670 		 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
6671 		 * again in translation.
6672 		 */
6673 		flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT;
6674 #endif
6675 	}
6676 	/* Add the suffix subflow. */
6677 	ret = flow_create_split_meter(dev, flow, &sfx_attr,
6678 				      sfx_items ? sfx_items : items,
6679 				      sfx_actions ? sfx_actions : actions,
6680 				      flow_split_info, error);
6681 exit:
6682 	if (sfx_actions)
6683 		mlx5_free(sfx_actions);
6684 	return ret;
6685 }
6686 
6687 /**
6688  * Split the flow to subflow set. The splitters might be linked
6689  * in the chain, like this:
6690  * flow_create_split_outer() calls:
6691  *   flow_create_split_meter() calls:
6692  *     flow_create_split_metadata(meter_subflow_0) calls:
6693  *       flow_create_split_inner(metadata_subflow_0)
6694  *       flow_create_split_inner(metadata_subflow_1)
6695  *       flow_create_split_inner(metadata_subflow_2)
6696  *     flow_create_split_metadata(meter_subflow_1) calls:
6697  *       flow_create_split_inner(metadata_subflow_0)
6698  *       flow_create_split_inner(metadata_subflow_1)
6699  *       flow_create_split_inner(metadata_subflow_2)
6700  *
6701  * This provide flexible way to add new levels of flow splitting.
6702  * The all of successfully created subflows are included to the
6703  * parent flow dev_flow list.
6704  *
6705  * @param dev
6706  *   Pointer to Ethernet device.
6707  * @param[in] flow
6708  *   Parent flow structure pointer.
6709  * @param[in] attr
6710  *   Flow rule attributes.
6711  * @param[in] items
6712  *   Pattern specification (list terminated by the END pattern item).
6713  * @param[in] actions
6714  *   Associated actions (list terminated by the END action).
6715  * @param[in] flow_split_info
6716  *   Pointer to flow split info structure.
6717  * @param[out] error
6718  *   Perform verbose error reporting if not NULL.
6719  * @return
6720  *   0 on success, negative value otherwise
6721  */
6722 static int
6723 flow_create_split_outer(struct rte_eth_dev *dev,
6724 			struct rte_flow *flow,
6725 			const struct rte_flow_attr *attr,
6726 			const struct rte_flow_item items[],
6727 			const struct rte_flow_action actions[],
6728 			struct mlx5_flow_split_info *flow_split_info,
6729 			struct rte_flow_error *error)
6730 {
6731 	int ret;
6732 
6733 	ret = flow_create_split_sample(dev, flow, attr, items,
6734 				       actions, flow_split_info, error);
6735 	MLX5_ASSERT(ret <= 0);
6736 	return ret;
6737 }
6738 
6739 static inline struct mlx5_flow_tunnel *
6740 flow_tunnel_from_rule(const struct mlx5_flow *flow)
6741 {
6742 	struct mlx5_flow_tunnel *tunnel;
6743 
6744 #pragma GCC diagnostic push
6745 #pragma GCC diagnostic ignored "-Wcast-qual"
6746 	tunnel = (typeof(tunnel))flow->tunnel;
6747 #pragma GCC diagnostic pop
6748 
6749 	return tunnel;
6750 }
6751 
6752 /**
6753  * Adjust flow RSS workspace if needed.
6754  *
6755  * @param wks
6756  *   Pointer to thread flow work space.
6757  * @param rss_desc
6758  *   Pointer to RSS descriptor.
6759  * @param[in] nrssq_num
6760  *   New RSS queue number.
6761  *
6762  * @return
6763  *   0 on success, -1 otherwise and rte_errno is set.
6764  */
6765 static int
6766 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
6767 			  struct mlx5_flow_rss_desc *rss_desc,
6768 			  uint32_t nrssq_num)
6769 {
6770 	if (likely(nrssq_num <= wks->rssq_num))
6771 		return 0;
6772 	rss_desc->queue = realloc(rss_desc->queue,
6773 			  sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
6774 	if (!rss_desc->queue) {
6775 		rte_errno = ENOMEM;
6776 		return -1;
6777 	}
6778 	wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
6779 	return 0;
6780 }
6781 
6782 /**
6783  * Create a flow and add it to @p list.
6784  *
6785  * @param dev
6786  *   Pointer to Ethernet device.
6787  * @param list
6788  *   Pointer to a TAILQ flow list. If this parameter NULL,
6789  *   no list insertion occurred, flow is just created,
6790  *   this is caller's responsibility to track the
6791  *   created flow.
6792  * @param[in] attr
6793  *   Flow rule attributes.
6794  * @param[in] items
6795  *   Pattern specification (list terminated by the END pattern item).
6796  * @param[in] actions
6797  *   Associated actions (list terminated by the END action).
6798  * @param[in] external
6799  *   This flow rule is created by request external to PMD.
6800  * @param[out] error
6801  *   Perform verbose error reporting if not NULL.
6802  *
6803  * @return
6804  *   A flow index on success, 0 otherwise and rte_errno is set.
6805  */
6806 static uint32_t
6807 flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
6808 		 const struct rte_flow_attr *attr,
6809 		 const struct rte_flow_item items[],
6810 		 const struct rte_flow_action original_actions[],
6811 		 bool external, struct rte_flow_error *error)
6812 {
6813 	struct mlx5_priv *priv = dev->data->dev_private;
6814 	struct rte_flow *flow = NULL;
6815 	struct mlx5_flow *dev_flow;
6816 	const struct rte_flow_action_rss *rss = NULL;
6817 	struct mlx5_translated_action_handle
6818 		indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
6819 	int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
6820 	union {
6821 		struct mlx5_flow_expand_rss buf;
6822 		uint8_t buffer[4096];
6823 	} expand_buffer;
6824 	union {
6825 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
6826 		uint8_t buffer[2048];
6827 	} actions_rx;
6828 	union {
6829 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
6830 		uint8_t buffer[2048];
6831 	} actions_hairpin_tx;
6832 	union {
6833 		struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
6834 		uint8_t buffer[2048];
6835 	} items_tx;
6836 	struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
6837 	struct mlx5_flow_rss_desc *rss_desc;
6838 	const struct rte_flow_action *p_actions_rx;
6839 	uint32_t i;
6840 	uint32_t idx = 0;
6841 	int hairpin_flow;
6842 	struct rte_flow_attr attr_tx = { .priority = 0 };
6843 	const struct rte_flow_action *actions;
6844 	struct rte_flow_action *translated_actions = NULL;
6845 	struct mlx5_flow_tunnel *tunnel;
6846 	struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
6847 	struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
6848 	struct mlx5_flow_split_info flow_split_info = {
6849 		.external = !!external,
6850 		.skip_scale = 0,
6851 		.flow_idx = 0,
6852 		.prefix_mark = 0,
6853 		.prefix_layers = 0,
6854 		.table_id = 0
6855 	};
6856 	int ret;
6857 
6858 	MLX5_ASSERT(wks);
6859 	rss_desc = &wks->rss_desc;
6860 	ret = flow_action_handles_translate(dev, original_actions,
6861 					    indir_actions,
6862 					    &indir_actions_n,
6863 					    &translated_actions, error);
6864 	if (ret < 0) {
6865 		MLX5_ASSERT(translated_actions == NULL);
6866 		return 0;
6867 	}
6868 	actions = translated_actions ? translated_actions : original_actions;
6869 	p_actions_rx = actions;
6870 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
6871 	ret = flow_drv_validate(dev, attr, items, p_actions_rx,
6872 				external, hairpin_flow, error);
6873 	if (ret < 0)
6874 		goto error_before_hairpin_split;
6875 	flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
6876 	if (!flow) {
6877 		rte_errno = ENOMEM;
6878 		goto error_before_hairpin_split;
6879 	}
6880 	if (hairpin_flow > 0) {
6881 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
6882 			rte_errno = EINVAL;
6883 			goto error_before_hairpin_split;
6884 		}
6885 		flow_hairpin_split(dev, actions, actions_rx.actions,
6886 				   actions_hairpin_tx.actions, items_tx.items,
6887 				   idx);
6888 		p_actions_rx = actions_rx.actions;
6889 	}
6890 	flow_split_info.flow_idx = idx;
6891 	flow->drv_type = flow_get_drv_type(dev, attr);
6892 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
6893 		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
6894 	memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
6895 	/* RSS Action only works on NIC RX domain */
6896 	if (attr->ingress && !attr->transfer)
6897 		rss = flow_get_rss_action(dev, p_actions_rx);
6898 	if (rss) {
6899 		if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
6900 			return 0;
6901 		/*
6902 		 * The following information is required by
6903 		 * mlx5_flow_hashfields_adjust() in advance.
6904 		 */
6905 		rss_desc->level = rss->level;
6906 		/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
6907 		rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
6908 	}
6909 	flow->dev_handles = 0;
6910 	if (rss && rss->types) {
6911 		unsigned int graph_root;
6912 
6913 		graph_root = find_graph_root(rss->level);
6914 		ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
6915 					   items, rss->types,
6916 					   mlx5_support_expansion, graph_root);
6917 		MLX5_ASSERT(ret > 0 &&
6918 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
6919 		if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) {
6920 			for (i = 0; i < buf->entries; ++i)
6921 				mlx5_dbg__print_pattern(buf->entry[i].pattern);
6922 		}
6923 	} else {
6924 		buf->entries = 1;
6925 		buf->entry[0].pattern = (void *)(uintptr_t)items;
6926 	}
6927 	rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
6928 						      indir_actions_n);
6929 	for (i = 0; i < buf->entries; ++i) {
6930 		/* Initialize flow split data. */
6931 		flow_split_info.prefix_layers = 0;
6932 		flow_split_info.prefix_mark = 0;
6933 		flow_split_info.skip_scale = 0;
6934 		/*
6935 		 * The splitter may create multiple dev_flows,
6936 		 * depending on configuration. In the simplest
6937 		 * case it just creates unmodified original flow.
6938 		 */
6939 		ret = flow_create_split_outer(dev, flow, attr,
6940 					      buf->entry[i].pattern,
6941 					      p_actions_rx, &flow_split_info,
6942 					      error);
6943 		if (ret < 0)
6944 			goto error;
6945 		if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {
6946 			ret = flow_tunnel_add_default_miss(dev, flow, attr,
6947 							   p_actions_rx,
6948 							   idx,
6949 							   wks->flows[0].tunnel,
6950 							   &default_miss_ctx,
6951 							   error);
6952 			if (ret < 0) {
6953 				mlx5_free(default_miss_ctx.queue);
6954 				goto error;
6955 			}
6956 		}
6957 	}
6958 	/* Create the tx flow. */
6959 	if (hairpin_flow) {
6960 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
6961 		attr_tx.ingress = 0;
6962 		attr_tx.egress = 1;
6963 		dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
6964 					 actions_hairpin_tx.actions,
6965 					 idx, error);
6966 		if (!dev_flow)
6967 			goto error;
6968 		dev_flow->flow = flow;
6969 		dev_flow->external = 0;
6970 		SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
6971 			      dev_flow->handle, next);
6972 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
6973 					 items_tx.items,
6974 					 actions_hairpin_tx.actions, error);
6975 		if (ret < 0)
6976 			goto error;
6977 	}
6978 	/*
6979 	 * Update the metadata register copy table. If extensive
6980 	 * metadata feature is enabled and registers are supported
6981 	 * we might create the extra rte_flow for each unique
6982 	 * MARK/FLAG action ID.
6983 	 *
6984 	 * The table is updated for ingress Flows only, because
6985 	 * the egress Flows belong to the different device and
6986 	 * copy table should be updated in peer NIC Rx domain.
6987 	 */
6988 	if (attr->ingress &&
6989 	    (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
6990 		ret = flow_mreg_update_copy_table(dev, flow, actions, error);
6991 		if (ret)
6992 			goto error;
6993 	}
6994 	/*
6995 	 * If the flow is external (from application) OR device is started,
6996 	 * OR mreg discover, then apply immediately.
6997 	 */
6998 	if (external || dev->data->dev_started ||
6999 	    (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
7000 	     attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
7001 		ret = flow_drv_apply(dev, flow, error);
7002 		if (ret < 0)
7003 			goto error;
7004 	}
7005 	flow->type = type;
7006 	flow_rxq_flags_set(dev, flow);
7007 	rte_free(translated_actions);
7008 	tunnel = flow_tunnel_from_rule(wks->flows);
7009 	if (tunnel) {
7010 		flow->tunnel = 1;
7011 		flow->tunnel_id = tunnel->tunnel_id;
7012 		__atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
7013 		mlx5_free(default_miss_ctx.queue);
7014 	}
7015 	mlx5_flow_pop_thread_workspace();
7016 	return idx;
7017 error:
7018 	MLX5_ASSERT(flow);
7019 	ret = rte_errno; /* Save rte_errno before cleanup. */
7020 	flow_mreg_del_copy_action(dev, flow);
7021 	flow_drv_destroy(dev, flow);
7022 	if (rss_desc->shared_rss)
7023 		__atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
7024 			mlx5_ipool_get
7025 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
7026 			rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
7027 	mlx5_ipool_free(priv->flows[type], idx);
7028 	rte_errno = ret; /* Restore rte_errno. */
7029 	ret = rte_errno;
7030 	rte_errno = ret;
7031 	mlx5_flow_pop_thread_workspace();
7032 error_before_hairpin_split:
7033 	rte_free(translated_actions);
7034 	return 0;
7035 }
7036 
7037 /**
7038  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
7039  * incoming packets to table 1.
7040  *
7041  * Other flow rules, requested for group n, will be created in
7042  * e-switch table n+1.
7043  * Jump action to e-switch group n will be created to group n+1.
7044  *
7045  * Used when working in switchdev mode, to utilise advantages of table 1
7046  * and above.
7047  *
7048  * @param dev
7049  *   Pointer to Ethernet device.
7050  *
7051  * @return
7052  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
7053  */
7054 struct rte_flow *
7055 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
7056 {
7057 	const struct rte_flow_attr attr = {
7058 		.group = 0,
7059 		.priority = 0,
7060 		.ingress = 1,
7061 		.egress = 0,
7062 		.transfer = 1,
7063 	};
7064 	const struct rte_flow_item pattern = {
7065 		.type = RTE_FLOW_ITEM_TYPE_END,
7066 	};
7067 	struct rte_flow_action_jump jump = {
7068 		.group = 1,
7069 	};
7070 	const struct rte_flow_action actions[] = {
7071 		{
7072 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
7073 			.conf = &jump,
7074 		},
7075 		{
7076 			.type = RTE_FLOW_ACTION_TYPE_END,
7077 		},
7078 	};
7079 	struct rte_flow_error error;
7080 
7081 	return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
7082 						   &attr, &pattern,
7083 						   actions, false, &error);
7084 }
7085 
7086 /**
7087  * Create a dedicated flow rule on e-switch table 1, matches ESW manager
7088  * and sq number, directs all packets to peer vport.
7089  *
7090  * @param dev
7091  *   Pointer to Ethernet device.
7092  * @param txq
7093  *   Txq index.
7094  *
7095  * @return
7096  *   Flow ID on success, 0 otherwise and rte_errno is set.
7097  */
7098 uint32_t
7099 mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
7100 {
7101 	struct rte_flow_attr attr = {
7102 		.group = 0,
7103 		.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
7104 		.ingress = 1,
7105 		.egress = 0,
7106 		.transfer = 1,
7107 	};
7108 	struct rte_flow_item_port_id port_spec = {
7109 		.id = MLX5_PORT_ESW_MGR,
7110 	};
7111 	struct mlx5_rte_flow_item_tx_queue txq_spec = {
7112 		.queue = txq,
7113 	};
7114 	struct rte_flow_item pattern[] = {
7115 		{
7116 			.type = RTE_FLOW_ITEM_TYPE_PORT_ID,
7117 			.spec = &port_spec,
7118 		},
7119 		{
7120 			.type = (enum rte_flow_item_type)
7121 				MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
7122 			.spec = &txq_spec,
7123 		},
7124 		{
7125 			.type = RTE_FLOW_ITEM_TYPE_END,
7126 		},
7127 	};
7128 	struct rte_flow_action_jump jump = {
7129 		.group = 1,
7130 	};
7131 	struct rte_flow_action_port_id port = {
7132 		.id = dev->data->port_id,
7133 	};
7134 	struct rte_flow_action actions[] = {
7135 		{
7136 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
7137 			.conf = &jump,
7138 		},
7139 		{
7140 			.type = RTE_FLOW_ACTION_TYPE_END,
7141 		},
7142 	};
7143 	struct rte_flow_error error;
7144 
7145 	/*
7146 	 * Creates group 0, highest priority jump flow.
7147 	 * Matches txq to bypass kernel packets.
7148 	 */
7149 	if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions,
7150 			     false, &error) == 0)
7151 		return 0;
7152 	/* Create group 1, lowest priority redirect flow for txq. */
7153 	attr.group = 1;
7154 	actions[0].conf = &port;
7155 	actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
7156 	return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern,
7157 				actions, false, &error);
7158 }
7159 
7160 /**
7161  * Validate a flow supported by the NIC.
7162  *
7163  * @see rte_flow_validate()
7164  * @see rte_flow_ops
7165  */
7166 int
7167 mlx5_flow_validate(struct rte_eth_dev *dev,
7168 		   const struct rte_flow_attr *attr,
7169 		   const struct rte_flow_item items[],
7170 		   const struct rte_flow_action original_actions[],
7171 		   struct rte_flow_error *error)
7172 {
7173 	int hairpin_flow;
7174 	struct mlx5_translated_action_handle
7175 		indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
7176 	int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
7177 	const struct rte_flow_action *actions;
7178 	struct rte_flow_action *translated_actions = NULL;
7179 	int ret = flow_action_handles_translate(dev, original_actions,
7180 						indir_actions,
7181 						&indir_actions_n,
7182 						&translated_actions, error);
7183 
7184 	if (ret)
7185 		return ret;
7186 	actions = translated_actions ? translated_actions : original_actions;
7187 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
7188 	ret = flow_drv_validate(dev, attr, items, actions,
7189 				true, hairpin_flow, error);
7190 	rte_free(translated_actions);
7191 	return ret;
7192 }
7193 
7194 /**
7195  * Create a flow.
7196  *
7197  * @see rte_flow_create()
7198  * @see rte_flow_ops
7199  */
7200 struct rte_flow *
7201 mlx5_flow_create(struct rte_eth_dev *dev,
7202 		 const struct rte_flow_attr *attr,
7203 		 const struct rte_flow_item items[],
7204 		 const struct rte_flow_action actions[],
7205 		 struct rte_flow_error *error)
7206 {
7207 	struct mlx5_priv *priv = dev->data->dev_private;
7208 
7209 	if (priv->sh->config.dv_flow_en == 2) {
7210 		rte_flow_error_set(error, ENOTSUP,
7211 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7212 			  NULL,
7213 			  "Flow non-Q creation not supported");
7214 		return NULL;
7215 	}
7216 	/*
7217 	 * If the device is not started yet, it is not allowed to created a
7218 	 * flow from application. PMD default flows and traffic control flows
7219 	 * are not affected.
7220 	 */
7221 	if (unlikely(!dev->data->dev_started)) {
7222 		DRV_LOG(DEBUG, "port %u is not started when "
7223 			"inserting a flow", dev->data->port_id);
7224 		rte_flow_error_set(error, ENODEV,
7225 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7226 				   NULL,
7227 				   "port not started");
7228 		return NULL;
7229 	}
7230 
7231 	return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN,
7232 						   attr, items, actions,
7233 						   true, error);
7234 }
7235 
7236 /**
7237  * Destroy a flow in a list.
7238  *
7239  * @param dev
7240  *   Pointer to Ethernet device.
7241  * @param[in] flow_idx
7242  *   Index of flow to destroy.
7243  */
7244 static void
7245 flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
7246 		  uint32_t flow_idx)
7247 {
7248 	struct mlx5_priv *priv = dev->data->dev_private;
7249 	struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx);
7250 
7251 	if (!flow)
7252 		return;
7253 	MLX5_ASSERT(flow->type == type);
7254 	/*
7255 	 * Update RX queue flags only if port is started, otherwise it is
7256 	 * already clean.
7257 	 */
7258 	if (dev->data->dev_started)
7259 		flow_rxq_flags_trim(dev, flow);
7260 	flow_drv_destroy(dev, flow);
7261 	if (flow->tunnel) {
7262 		struct mlx5_flow_tunnel *tunnel;
7263 
7264 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
7265 		RTE_VERIFY(tunnel);
7266 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
7267 			mlx5_flow_tunnel_free(dev, tunnel);
7268 	}
7269 	flow_mreg_del_copy_action(dev, flow);
7270 	mlx5_ipool_free(priv->flows[type], flow_idx);
7271 }
7272 
7273 /**
7274  * Destroy all flows.
7275  *
7276  * @param dev
7277  *   Pointer to Ethernet device.
7278  * @param type
7279  *   Flow type to be flushed.
7280  * @param active
7281  *   If flushing is called actively.
7282  */
7283 void
7284 mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
7285 		     bool active)
7286 {
7287 	struct mlx5_priv *priv = dev->data->dev_private;
7288 	uint32_t num_flushed = 0, fidx = 1;
7289 	struct rte_flow *flow;
7290 
7291 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
7292 	if (priv->sh->config.dv_flow_en == 2 &&
7293 	    type == MLX5_FLOW_TYPE_GEN) {
7294 		flow_hw_q_flow_flush(dev, NULL);
7295 		return;
7296 	}
7297 #endif
7298 
7299 	MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
7300 		flow_list_destroy(dev, type, fidx);
7301 		num_flushed++;
7302 	}
7303 	if (active) {
7304 		DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
7305 			dev->data->port_id, num_flushed);
7306 	}
7307 }
7308 
7309 /**
7310  * Stop all default actions for flows.
7311  *
7312  * @param dev
7313  *   Pointer to Ethernet device.
7314  */
7315 void
7316 mlx5_flow_stop_default(struct rte_eth_dev *dev)
7317 {
7318 	flow_mreg_del_default_copy_action(dev);
7319 	flow_rxq_flags_clear(dev);
7320 }
7321 
7322 /**
7323  * Start all default actions for flows.
7324  *
7325  * @param dev
7326  *   Pointer to Ethernet device.
7327  * @return
7328  *   0 on success, a negative errno value otherwise and rte_errno is set.
7329  */
7330 int
7331 mlx5_flow_start_default(struct rte_eth_dev *dev)
7332 {
7333 	struct rte_flow_error error;
7334 
7335 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
7336 	return flow_mreg_add_default_copy_action(dev, &error);
7337 }
7338 
7339 /**
7340  * Release key of thread specific flow workspace data.
7341  */
7342 void
7343 flow_release_workspace(void *data)
7344 {
7345 	struct mlx5_flow_workspace *wks = data;
7346 	struct mlx5_flow_workspace *next;
7347 
7348 	while (wks) {
7349 		next = wks->next;
7350 		free(wks->rss_desc.queue);
7351 		free(wks);
7352 		wks = next;
7353 	}
7354 }
7355 
7356 /**
7357  * Get thread specific current flow workspace.
7358  *
7359  * @return pointer to thread specific flow workspace data, NULL on error.
7360  */
7361 struct mlx5_flow_workspace*
7362 mlx5_flow_get_thread_workspace(void)
7363 {
7364 	struct mlx5_flow_workspace *data;
7365 
7366 	data = mlx5_flow_os_get_specific_workspace();
7367 	MLX5_ASSERT(data && data->inuse);
7368 	if (!data || !data->inuse)
7369 		DRV_LOG(ERR, "flow workspace not initialized.");
7370 	return data;
7371 }
7372 
7373 /**
7374  * Allocate and init new flow workspace.
7375  *
7376  * @return pointer to flow workspace data, NULL on error.
7377  */
7378 static struct mlx5_flow_workspace*
7379 flow_alloc_thread_workspace(void)
7380 {
7381 	struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
7382 
7383 	if (!data) {
7384 		DRV_LOG(ERR, "Failed to allocate flow workspace "
7385 			"memory.");
7386 		return NULL;
7387 	}
7388 	data->rss_desc.queue = calloc(1,
7389 			sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
7390 	if (!data->rss_desc.queue)
7391 		goto err;
7392 	data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
7393 	return data;
7394 err:
7395 	free(data->rss_desc.queue);
7396 	free(data);
7397 	return NULL;
7398 }
7399 
7400 /**
7401  * Get new thread specific flow workspace.
7402  *
7403  * If current workspace inuse, create new one and set as current.
7404  *
7405  * @return pointer to thread specific flow workspace data, NULL on error.
7406  */
7407 static struct mlx5_flow_workspace*
7408 mlx5_flow_push_thread_workspace(void)
7409 {
7410 	struct mlx5_flow_workspace *curr;
7411 	struct mlx5_flow_workspace *data;
7412 
7413 	curr = mlx5_flow_os_get_specific_workspace();
7414 	if (!curr) {
7415 		data = flow_alloc_thread_workspace();
7416 		if (!data)
7417 			return NULL;
7418 	} else if (!curr->inuse) {
7419 		data = curr;
7420 	} else if (curr->next) {
7421 		data = curr->next;
7422 	} else {
7423 		data = flow_alloc_thread_workspace();
7424 		if (!data)
7425 			return NULL;
7426 		curr->next = data;
7427 		data->prev = curr;
7428 	}
7429 	data->inuse = 1;
7430 	data->flow_idx = 0;
7431 	/* Set as current workspace */
7432 	if (mlx5_flow_os_set_specific_workspace(data))
7433 		DRV_LOG(ERR, "Failed to set flow workspace to thread.");
7434 	return data;
7435 }
7436 
7437 /**
7438  * Close current thread specific flow workspace.
7439  *
7440  * If previous workspace available, set it as current.
7441  *
7442  * @return pointer to thread specific flow workspace data, NULL on error.
7443  */
7444 static void
7445 mlx5_flow_pop_thread_workspace(void)
7446 {
7447 	struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
7448 
7449 	if (!data)
7450 		return;
7451 	if (!data->inuse) {
7452 		DRV_LOG(ERR, "Failed to close unused flow workspace.");
7453 		return;
7454 	}
7455 	data->inuse = 0;
7456 	if (!data->prev)
7457 		return;
7458 	if (mlx5_flow_os_set_specific_workspace(data->prev))
7459 		DRV_LOG(ERR, "Failed to set flow workspace to thread.");
7460 }
7461 
7462 /**
7463  * Verify the flow list is empty
7464  *
7465  * @param dev
7466  *  Pointer to Ethernet device.
7467  *
7468  * @return the number of flows not released.
7469  */
7470 int
7471 mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused)
7472 {
7473 	struct mlx5_priv *priv = dev->data->dev_private;
7474 	struct rte_flow *flow;
7475 	uint32_t idx = 0;
7476 	int ret = 0, i;
7477 
7478 	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
7479 		MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) {
7480 			DRV_LOG(DEBUG, "port %u flow %p still referenced",
7481 				dev->data->port_id, (void *)flow);
7482 			ret++;
7483 		}
7484 	}
7485 	return ret;
7486 }
7487 
7488 /**
7489  * Enable default hairpin egress flow.
7490  *
7491  * @param dev
7492  *   Pointer to Ethernet device.
7493  * @param queue
7494  *   The queue index.
7495  *
7496  * @return
7497  *   0 on success, a negative errno value otherwise and rte_errno is set.
7498  */
7499 int
7500 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
7501 			    uint32_t queue)
7502 {
7503 	const struct rte_flow_attr attr = {
7504 		.egress = 1,
7505 		.priority = 0,
7506 	};
7507 	struct mlx5_rte_flow_item_tx_queue queue_spec = {
7508 		.queue = queue,
7509 	};
7510 	struct mlx5_rte_flow_item_tx_queue queue_mask = {
7511 		.queue = UINT32_MAX,
7512 	};
7513 	struct rte_flow_item items[] = {
7514 		{
7515 			.type = (enum rte_flow_item_type)
7516 				MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
7517 			.spec = &queue_spec,
7518 			.last = NULL,
7519 			.mask = &queue_mask,
7520 		},
7521 		{
7522 			.type = RTE_FLOW_ITEM_TYPE_END,
7523 		},
7524 	};
7525 	struct rte_flow_action_jump jump = {
7526 		.group = MLX5_HAIRPIN_TX_TABLE,
7527 	};
7528 	struct rte_flow_action actions[2];
7529 	uint32_t flow_idx;
7530 	struct rte_flow_error error;
7531 
7532 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
7533 	actions[0].conf = &jump;
7534 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
7535 	flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
7536 				    &attr, items, actions, false, &error);
7537 	if (!flow_idx) {
7538 		DRV_LOG(DEBUG,
7539 			"Failed to create ctrl flow: rte_errno(%d),"
7540 			" type(%d), message(%s)",
7541 			rte_errno, error.type,
7542 			error.message ? error.message : " (no stated reason)");
7543 		return -rte_errno;
7544 	}
7545 	return 0;
7546 }
7547 
7548 /**
7549  * Enable a control flow configured from the control plane.
7550  *
7551  * @param dev
7552  *   Pointer to Ethernet device.
7553  * @param eth_spec
7554  *   An Ethernet flow spec to apply.
7555  * @param eth_mask
7556  *   An Ethernet flow mask to apply.
7557  * @param vlan_spec
7558  *   A VLAN flow spec to apply.
7559  * @param vlan_mask
7560  *   A VLAN flow mask to apply.
7561  *
7562  * @return
7563  *   0 on success, a negative errno value otherwise and rte_errno is set.
7564  */
7565 int
7566 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
7567 		    struct rte_flow_item_eth *eth_spec,
7568 		    struct rte_flow_item_eth *eth_mask,
7569 		    struct rte_flow_item_vlan *vlan_spec,
7570 		    struct rte_flow_item_vlan *vlan_mask)
7571 {
7572 	struct mlx5_priv *priv = dev->data->dev_private;
7573 	const struct rte_flow_attr attr = {
7574 		.ingress = 1,
7575 		.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
7576 	};
7577 	struct rte_flow_item items[] = {
7578 		{
7579 			.type = RTE_FLOW_ITEM_TYPE_ETH,
7580 			.spec = eth_spec,
7581 			.last = NULL,
7582 			.mask = eth_mask,
7583 		},
7584 		{
7585 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
7586 					      RTE_FLOW_ITEM_TYPE_END,
7587 			.spec = vlan_spec,
7588 			.last = NULL,
7589 			.mask = vlan_mask,
7590 		},
7591 		{
7592 			.type = RTE_FLOW_ITEM_TYPE_END,
7593 		},
7594 	};
7595 	uint16_t queue[priv->reta_idx_n];
7596 	struct rte_flow_action_rss action_rss = {
7597 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
7598 		.level = 0,
7599 		.types = priv->rss_conf.rss_hf,
7600 		.key_len = priv->rss_conf.rss_key_len,
7601 		.queue_num = priv->reta_idx_n,
7602 		.key = priv->rss_conf.rss_key,
7603 		.queue = queue,
7604 	};
7605 	struct rte_flow_action actions[] = {
7606 		{
7607 			.type = RTE_FLOW_ACTION_TYPE_RSS,
7608 			.conf = &action_rss,
7609 		},
7610 		{
7611 			.type = RTE_FLOW_ACTION_TYPE_END,
7612 		},
7613 	};
7614 	uint32_t flow_idx;
7615 	struct rte_flow_error error;
7616 	unsigned int i;
7617 
7618 	if (!priv->reta_idx_n || !priv->rxqs_n) {
7619 		return 0;
7620 	}
7621 	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
7622 		action_rss.types = 0;
7623 	for (i = 0; i != priv->reta_idx_n; ++i)
7624 		queue[i] = (*priv->reta_idx)[i];
7625 	flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
7626 				    &attr, items, actions, false, &error);
7627 	if (!flow_idx)
7628 		return -rte_errno;
7629 	return 0;
7630 }
7631 
7632 /**
7633  * Enable a flow control configured from the control plane.
7634  *
7635  * @param dev
7636  *   Pointer to Ethernet device.
7637  * @param eth_spec
7638  *   An Ethernet flow spec to apply.
7639  * @param eth_mask
7640  *   An Ethernet flow mask to apply.
7641  *
7642  * @return
7643  *   0 on success, a negative errno value otherwise and rte_errno is set.
7644  */
7645 int
7646 mlx5_ctrl_flow(struct rte_eth_dev *dev,
7647 	       struct rte_flow_item_eth *eth_spec,
7648 	       struct rte_flow_item_eth *eth_mask)
7649 {
7650 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
7651 }
7652 
7653 /**
7654  * Create default miss flow rule matching lacp traffic
7655  *
7656  * @param dev
7657  *   Pointer to Ethernet device.
7658  * @param eth_spec
7659  *   An Ethernet flow spec to apply.
7660  *
7661  * @return
7662  *   0 on success, a negative errno value otherwise and rte_errno is set.
7663  */
7664 int
7665 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
7666 {
7667 	/*
7668 	 * The LACP matching is done by only using ether type since using
7669 	 * a multicast dst mac causes kernel to give low priority to this flow.
7670 	 */
7671 	static const struct rte_flow_item_eth lacp_spec = {
7672 		.type = RTE_BE16(0x8809),
7673 	};
7674 	static const struct rte_flow_item_eth lacp_mask = {
7675 		.type = 0xffff,
7676 	};
7677 	const struct rte_flow_attr attr = {
7678 		.ingress = 1,
7679 	};
7680 	struct rte_flow_item items[] = {
7681 		{
7682 			.type = RTE_FLOW_ITEM_TYPE_ETH,
7683 			.spec = &lacp_spec,
7684 			.mask = &lacp_mask,
7685 		},
7686 		{
7687 			.type = RTE_FLOW_ITEM_TYPE_END,
7688 		},
7689 	};
7690 	struct rte_flow_action actions[] = {
7691 		{
7692 			.type = (enum rte_flow_action_type)
7693 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
7694 		},
7695 		{
7696 			.type = RTE_FLOW_ACTION_TYPE_END,
7697 		},
7698 	};
7699 	struct rte_flow_error error;
7700 	uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
7701 					&attr, items, actions,
7702 					false, &error);
7703 
7704 	if (!flow_idx)
7705 		return -rte_errno;
7706 	return 0;
7707 }
7708 
7709 /**
7710  * Destroy a flow.
7711  *
7712  * @see rte_flow_destroy()
7713  * @see rte_flow_ops
7714  */
7715 int
7716 mlx5_flow_destroy(struct rte_eth_dev *dev,
7717 		  struct rte_flow *flow,
7718 		  struct rte_flow_error *error __rte_unused)
7719 {
7720 	struct mlx5_priv *priv = dev->data->dev_private;
7721 
7722 	if (priv->sh->config.dv_flow_en == 2)
7723 		return rte_flow_error_set(error, ENOTSUP,
7724 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7725 			  NULL,
7726 			  "Flow non-Q destruction not supported");
7727 	flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
7728 				(uintptr_t)(void *)flow);
7729 	return 0;
7730 }
7731 
7732 /**
7733  * Destroy all flows.
7734  *
7735  * @see rte_flow_flush()
7736  * @see rte_flow_ops
7737  */
7738 int
7739 mlx5_flow_flush(struct rte_eth_dev *dev,
7740 		struct rte_flow_error *error __rte_unused)
7741 {
7742 	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false);
7743 	return 0;
7744 }
7745 
7746 /**
7747  * Isolated mode.
7748  *
7749  * @see rte_flow_isolate()
7750  * @see rte_flow_ops
7751  */
7752 int
7753 mlx5_flow_isolate(struct rte_eth_dev *dev,
7754 		  int enable,
7755 		  struct rte_flow_error *error)
7756 {
7757 	struct mlx5_priv *priv = dev->data->dev_private;
7758 
7759 	if (dev->data->dev_started) {
7760 		rte_flow_error_set(error, EBUSY,
7761 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7762 				   NULL,
7763 				   "port must be stopped first");
7764 		return -rte_errno;
7765 	}
7766 	priv->isolated = !!enable;
7767 	if (enable)
7768 		dev->dev_ops = &mlx5_dev_ops_isolate;
7769 	else
7770 		dev->dev_ops = &mlx5_dev_ops;
7771 
7772 	dev->rx_descriptor_status = mlx5_rx_descriptor_status;
7773 	dev->tx_descriptor_status = mlx5_tx_descriptor_status;
7774 
7775 	return 0;
7776 }
7777 
7778 /**
7779  * Query a flow.
7780  *
7781  * @see rte_flow_query()
7782  * @see rte_flow_ops
7783  */
7784 static int
7785 flow_drv_query(struct rte_eth_dev *dev,
7786 	       uint32_t flow_idx,
7787 	       const struct rte_flow_action *actions,
7788 	       void *data,
7789 	       struct rte_flow_error *error)
7790 {
7791 	struct mlx5_priv *priv = dev->data->dev_private;
7792 	const struct mlx5_flow_driver_ops *fops;
7793 	struct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
7794 					       flow_idx);
7795 	enum mlx5_flow_drv_type ftype;
7796 
7797 	if (!flow) {
7798 		return rte_flow_error_set(error, ENOENT,
7799 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7800 			  NULL,
7801 			  "invalid flow handle");
7802 	}
7803 	ftype = flow->drv_type;
7804 	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
7805 	fops = flow_get_drv_ops(ftype);
7806 
7807 	return fops->query(dev, flow, actions, data, error);
7808 }
7809 
7810 /**
7811  * Query a flow.
7812  *
7813  * @see rte_flow_query()
7814  * @see rte_flow_ops
7815  */
7816 int
7817 mlx5_flow_query(struct rte_eth_dev *dev,
7818 		struct rte_flow *flow,
7819 		const struct rte_flow_action *actions,
7820 		void *data,
7821 		struct rte_flow_error *error)
7822 {
7823 	int ret;
7824 	struct mlx5_priv *priv = dev->data->dev_private;
7825 
7826 	if (priv->sh->config.dv_flow_en == 2)
7827 		return rte_flow_error_set(error, ENOTSUP,
7828 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7829 			  NULL,
7830 			  "Flow non-Q query not supported");
7831 	ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
7832 			     error);
7833 	if (ret < 0)
7834 		return ret;
7835 	return 0;
7836 }
7837 
7838 /**
7839  * Get rte_flow callbacks.
7840  *
7841  * @param dev
7842  *   Pointer to Ethernet device structure.
7843  * @param ops
7844  *   Pointer to operation-specific structure.
7845  *
7846  * @return 0
7847  */
7848 int
7849 mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
7850 		  const struct rte_flow_ops **ops)
7851 {
7852 	*ops = &mlx5_flow_ops;
7853 	return 0;
7854 }
7855 
7856 /**
7857  * Validate meter policy actions.
7858  * Dispatcher for action type specific validation.
7859  *
7860  * @param[in] dev
7861  *   Pointer to the Ethernet device structure.
7862  * @param[in] action
7863  *   The meter policy action object to validate.
7864  * @param[in] attr
7865  *   Attributes of flow to determine steering domain.
7866  * @param[out] is_rss
7867  *   Is RSS or not.
7868  * @param[out] domain_bitmap
7869  *   Domain bitmap.
7870  * @param[out] is_def_policy
7871  *   Is default policy or not.
7872  * @param[out] error
7873  *   Perform verbose error reporting if not NULL. Initialized in case of
7874  *   error only.
7875  *
7876  * @return
7877  *   0 on success, otherwise negative errno value.
7878  */
7879 int
7880 mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
7881 			const struct rte_flow_action *actions[RTE_COLORS],
7882 			struct rte_flow_attr *attr,
7883 			bool *is_rss,
7884 			uint8_t *domain_bitmap,
7885 			uint8_t *policy_mode,
7886 			struct rte_mtr_error *error)
7887 {
7888 	const struct mlx5_flow_driver_ops *fops;
7889 
7890 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7891 	return fops->validate_mtr_acts(dev, actions, attr, is_rss,
7892 				       domain_bitmap, policy_mode, error);
7893 }
7894 
7895 /**
7896  * Destroy the meter table set.
7897  *
7898  * @param[in] dev
7899  *   Pointer to Ethernet device.
7900  * @param[in] mtr_policy
7901  *   Meter policy struct.
7902  */
7903 void
7904 mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
7905 		      struct mlx5_flow_meter_policy *mtr_policy)
7906 {
7907 	const struct mlx5_flow_driver_ops *fops;
7908 
7909 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7910 	fops->destroy_mtr_acts(dev, mtr_policy);
7911 }
7912 
7913 /**
7914  * Create policy action, lock free,
7915  * (mutex should be acquired by caller).
7916  * Dispatcher for action type specific call.
7917  *
7918  * @param[in] dev
7919  *   Pointer to the Ethernet device structure.
7920  * @param[in] mtr_policy
7921  *   Meter policy struct.
7922  * @param[in] action
7923  *   Action specification used to create meter actions.
7924  * @param[in] attr
7925  *   Flow rule attributes.
7926  * @param[out] error
7927  *   Perform verbose error reporting if not NULL. Initialized in case of
7928  *   error only.
7929  *
7930  * @return
7931  *   0 on success, otherwise negative errno value.
7932  */
7933 int
7934 mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
7935 		      struct mlx5_flow_meter_policy *mtr_policy,
7936 		      const struct rte_flow_action *actions[RTE_COLORS],
7937 		      struct rte_flow_attr *attr,
7938 		      struct rte_mtr_error *error)
7939 {
7940 	const struct mlx5_flow_driver_ops *fops;
7941 
7942 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7943 	return fops->create_mtr_acts(dev, mtr_policy, actions, attr, error);
7944 }
7945 
7946 /**
7947  * Create policy rules, lock free,
7948  * (mutex should be acquired by caller).
7949  * Dispatcher for action type specific call.
7950  *
7951  * @param[in] dev
7952  *   Pointer to the Ethernet device structure.
7953  * @param[in] mtr_policy
7954  *   Meter policy struct.
7955  *
7956  * @return
7957  *   0 on success, -1 otherwise.
7958  */
7959 int
7960 mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
7961 			     struct mlx5_flow_meter_policy *mtr_policy)
7962 {
7963 	const struct mlx5_flow_driver_ops *fops;
7964 
7965 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7966 	return fops->create_policy_rules(dev, mtr_policy);
7967 }
7968 
7969 /**
7970  * Destroy policy rules, lock free,
7971  * (mutex should be acquired by caller).
7972  * Dispatcher for action type specific call.
7973  *
7974  * @param[in] dev
7975  *   Pointer to the Ethernet device structure.
7976  * @param[in] mtr_policy
7977  *   Meter policy struct.
7978  */
7979 void
7980 mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
7981 			     struct mlx5_flow_meter_policy *mtr_policy)
7982 {
7983 	const struct mlx5_flow_driver_ops *fops;
7984 
7985 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7986 	fops->destroy_policy_rules(dev, mtr_policy);
7987 }
7988 
7989 /**
7990  * Destroy the default policy table set.
7991  *
7992  * @param[in] dev
7993  *   Pointer to Ethernet device.
7994  */
7995 void
7996 mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev)
7997 {
7998 	const struct mlx5_flow_driver_ops *fops;
7999 
8000 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8001 	fops->destroy_def_policy(dev);
8002 }
8003 
8004 /**
8005  * Destroy the default policy table set.
8006  *
8007  * @param[in] dev
8008  *   Pointer to Ethernet device.
8009  *
8010  * @return
8011  *   0 on success, -1 otherwise.
8012  */
8013 int
8014 mlx5_flow_create_def_policy(struct rte_eth_dev *dev)
8015 {
8016 	const struct mlx5_flow_driver_ops *fops;
8017 
8018 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8019 	return fops->create_def_policy(dev);
8020 }
8021 
8022 /**
8023  * Create the needed meter and suffix tables.
8024  *
8025  * @param[in] dev
8026  *   Pointer to Ethernet device.
8027  *
8028  * @return
8029  *   0 on success, -1 otherwise.
8030  */
8031 int
8032 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
8033 			struct mlx5_flow_meter_info *fm,
8034 			uint32_t mtr_idx,
8035 			uint8_t domain_bitmap)
8036 {
8037 	const struct mlx5_flow_driver_ops *fops;
8038 
8039 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8040 	return fops->create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap);
8041 }
8042 
8043 /**
8044  * Destroy the meter table set.
8045  *
8046  * @param[in] dev
8047  *   Pointer to Ethernet device.
8048  * @param[in] tbl
8049  *   Pointer to the meter table set.
8050  */
8051 void
8052 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
8053 			   struct mlx5_flow_meter_info *fm)
8054 {
8055 	const struct mlx5_flow_driver_ops *fops;
8056 
8057 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8058 	fops->destroy_mtr_tbls(dev, fm);
8059 }
8060 
8061 /**
8062  * Destroy the global meter drop table.
8063  *
8064  * @param[in] dev
8065  *   Pointer to Ethernet device.
8066  */
8067 void
8068 mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
8069 {
8070 	const struct mlx5_flow_driver_ops *fops;
8071 
8072 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8073 	fops->destroy_mtr_drop_tbls(dev);
8074 }
8075 
8076 /**
8077  * Destroy the sub policy table with RX queue.
8078  *
8079  * @param[in] dev
8080  *   Pointer to Ethernet device.
8081  * @param[in] mtr_policy
8082  *   Pointer to meter policy table.
8083  */
8084 void
8085 mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
8086 		struct mlx5_flow_meter_policy *mtr_policy)
8087 {
8088 	const struct mlx5_flow_driver_ops *fops;
8089 
8090 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8091 	fops->destroy_sub_policy_with_rxq(dev, mtr_policy);
8092 }
8093 
8094 /**
8095  * Allocate the needed aso flow meter id.
8096  *
8097  * @param[in] dev
8098  *   Pointer to Ethernet device.
8099  *
8100  * @return
8101  *   Index to aso flow meter on success, NULL otherwise.
8102  */
8103 uint32_t
8104 mlx5_flow_mtr_alloc(struct rte_eth_dev *dev)
8105 {
8106 	const struct mlx5_flow_driver_ops *fops;
8107 
8108 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8109 	return fops->create_meter(dev);
8110 }
8111 
8112 /**
8113  * Free the aso flow meter id.
8114  *
8115  * @param[in] dev
8116  *   Pointer to Ethernet device.
8117  * @param[in] mtr_idx
8118  *  Index to aso flow meter to be free.
8119  *
8120  * @return
8121  *   0 on success.
8122  */
8123 void
8124 mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx)
8125 {
8126 	const struct mlx5_flow_driver_ops *fops;
8127 
8128 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8129 	fops->free_meter(dev, mtr_idx);
8130 }
8131 
8132 /**
8133  * Allocate a counter.
8134  *
8135  * @param[in] dev
8136  *   Pointer to Ethernet device structure.
8137  *
8138  * @return
8139  *   Index to allocated counter  on success, 0 otherwise.
8140  */
8141 uint32_t
8142 mlx5_counter_alloc(struct rte_eth_dev *dev)
8143 {
8144 	const struct mlx5_flow_driver_ops *fops;
8145 	struct rte_flow_attr attr = { .transfer = 0 };
8146 
8147 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
8148 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8149 		return fops->counter_alloc(dev);
8150 	}
8151 	DRV_LOG(ERR,
8152 		"port %u counter allocate is not supported.",
8153 		 dev->data->port_id);
8154 	return 0;
8155 }
8156 
8157 /**
8158  * Free a counter.
8159  *
8160  * @param[in] dev
8161  *   Pointer to Ethernet device structure.
8162  * @param[in] cnt
8163  *   Index to counter to be free.
8164  */
8165 void
8166 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
8167 {
8168 	const struct mlx5_flow_driver_ops *fops;
8169 	struct rte_flow_attr attr = { .transfer = 0 };
8170 
8171 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
8172 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8173 		fops->counter_free(dev, cnt);
8174 		return;
8175 	}
8176 	DRV_LOG(ERR,
8177 		"port %u counter free is not supported.",
8178 		 dev->data->port_id);
8179 }
8180 
8181 /**
8182  * Query counter statistics.
8183  *
8184  * @param[in] dev
8185  *   Pointer to Ethernet device structure.
8186  * @param[in] cnt
8187  *   Index to counter to query.
8188  * @param[in] clear
8189  *   Set to clear counter statistics.
8190  * @param[out] pkts
8191  *   The counter hits packets number to save.
8192  * @param[out] bytes
8193  *   The counter hits bytes number to save.
8194  *
8195  * @return
8196  *   0 on success, a negative errno value otherwise.
8197  */
8198 int
8199 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
8200 		   bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
8201 {
8202 	const struct mlx5_flow_driver_ops *fops;
8203 	struct rte_flow_attr attr = { .transfer = 0 };
8204 
8205 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
8206 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8207 		return fops->counter_query(dev, cnt, clear, pkts,
8208 					bytes, action);
8209 	}
8210 	DRV_LOG(ERR,
8211 		"port %u counter query is not supported.",
8212 		 dev->data->port_id);
8213 	return -ENOTSUP;
8214 }
8215 
8216 /**
8217  * Get information about HWS pre-configurable resources.
8218  *
8219  * @param[in] dev
8220  *   Pointer to the rte_eth_dev structure.
8221  * @param[out] port_info
8222  *   Pointer to port information.
8223  * @param[out] queue_info
8224  *   Pointer to queue information.
8225  * @param[out] error
8226  *   Pointer to error structure.
8227  *
8228  * @return
8229  *   0 on success, a negative errno value otherwise and rte_errno is set.
8230  */
8231 static int
8232 mlx5_flow_info_get(struct rte_eth_dev *dev,
8233 		   struct rte_flow_port_info *port_info,
8234 		   struct rte_flow_queue_info *queue_info,
8235 		   struct rte_flow_error *error)
8236 {
8237 	const struct mlx5_flow_driver_ops *fops;
8238 
8239 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
8240 		return rte_flow_error_set(error, ENOTSUP,
8241 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8242 				NULL,
8243 				"info get with incorrect steering mode");
8244 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8245 	return fops->info_get(dev, port_info, queue_info, error);
8246 }
8247 
8248 /**
8249  * Configure port HWS resources.
8250  *
8251  * @param[in] dev
8252  *   Pointer to the rte_eth_dev structure.
8253  * @param[in] port_attr
8254  *   Port configuration attributes.
8255  * @param[in] nb_queue
8256  *   Number of queue.
8257  * @param[in] queue_attr
8258  *   Array that holds attributes for each flow queue.
8259  * @param[out] error
8260  *   Pointer to error structure.
8261  *
8262  * @return
8263  *   0 on success, a negative errno value otherwise and rte_errno is set.
8264  */
8265 static int
8266 mlx5_flow_port_configure(struct rte_eth_dev *dev,
8267 			 const struct rte_flow_port_attr *port_attr,
8268 			 uint16_t nb_queue,
8269 			 const struct rte_flow_queue_attr *queue_attr[],
8270 			 struct rte_flow_error *error)
8271 {
8272 	const struct mlx5_flow_driver_ops *fops;
8273 
8274 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
8275 		return rte_flow_error_set(error, ENOTSUP,
8276 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8277 				NULL,
8278 				"port configure with incorrect steering mode");
8279 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8280 	return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
8281 }
8282 
8283 /**
8284  * Create flow item template.
8285  *
8286  * @param[in] dev
8287  *   Pointer to the rte_eth_dev structure.
8288  * @param[in] attr
8289  *   Pointer to the item template attributes.
8290  * @param[in] items
8291  *   The template item pattern.
8292  * @param[out] error
8293  *   Pointer to error structure.
8294  *
8295  * @return
8296  *   0 on success, a negative errno value otherwise and rte_errno is set.
8297  */
8298 static struct rte_flow_pattern_template *
8299 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
8300 		const struct rte_flow_pattern_template_attr *attr,
8301 		const struct rte_flow_item items[],
8302 		struct rte_flow_error *error)
8303 {
8304 	const struct mlx5_flow_driver_ops *fops;
8305 
8306 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
8307 		rte_flow_error_set(error, ENOTSUP,
8308 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8309 				NULL,
8310 				"pattern create with incorrect steering mode");
8311 		return NULL;
8312 	}
8313 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8314 	return fops->pattern_template_create(dev, attr, items, error);
8315 }
8316 
8317 /**
8318  * Destroy flow item template.
8319  *
8320  * @param[in] dev
8321  *   Pointer to the rte_eth_dev structure.
8322  * @param[in] template
8323  *   Pointer to the item template to be destroyed.
8324  * @param[out] error
8325  *   Pointer to error structure.
8326  *
8327  * @return
8328  *   0 on success, a negative errno value otherwise and rte_errno is set.
8329  */
8330 static int
8331 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
8332 				   struct rte_flow_pattern_template *template,
8333 				   struct rte_flow_error *error)
8334 {
8335 	const struct mlx5_flow_driver_ops *fops;
8336 
8337 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
8338 		return rte_flow_error_set(error, ENOTSUP,
8339 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8340 				NULL,
8341 				"pattern destroy with incorrect steering mode");
8342 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8343 	return fops->pattern_template_destroy(dev, template, error);
8344 }
8345 
8346 /**
8347  * Create flow item template.
8348  *
8349  * @param[in] dev
8350  *   Pointer to the rte_eth_dev structure.
8351  * @param[in] attr
8352  *   Pointer to the action template attributes.
8353  * @param[in] actions
8354  *   Associated actions (list terminated by the END action).
8355  * @param[in] masks
8356  *   List of actions that marks which of the action's member is constant.
8357  * @param[out] error
8358  *   Pointer to error structure.
8359  *
8360  * @return
8361  *   0 on success, a negative errno value otherwise and rte_errno is set.
8362  */
8363 static struct rte_flow_actions_template *
8364 mlx5_flow_actions_template_create(struct rte_eth_dev *dev,
8365 			const struct rte_flow_actions_template_attr *attr,
8366 			const struct rte_flow_action actions[],
8367 			const struct rte_flow_action masks[],
8368 			struct rte_flow_error *error)
8369 {
8370 	const struct mlx5_flow_driver_ops *fops;
8371 
8372 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
8373 		rte_flow_error_set(error, ENOTSUP,
8374 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8375 				NULL,
8376 				"action create with incorrect steering mode");
8377 		return NULL;
8378 	}
8379 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8380 	return fops->actions_template_create(dev, attr, actions, masks, error);
8381 }
8382 
8383 /**
8384  * Destroy flow action template.
8385  *
8386  * @param[in] dev
8387  *   Pointer to the rte_eth_dev structure.
8388  * @param[in] template
8389  *   Pointer to the action template to be destroyed.
8390  * @param[out] error
8391  *   Pointer to error structure.
8392  *
8393  * @return
8394  *   0 on success, a negative errno value otherwise and rte_errno is set.
8395  */
8396 static int
8397 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
8398 				   struct rte_flow_actions_template *template,
8399 				   struct rte_flow_error *error)
8400 {
8401 	const struct mlx5_flow_driver_ops *fops;
8402 
8403 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
8404 		return rte_flow_error_set(error, ENOTSUP,
8405 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8406 				NULL,
8407 				"action destroy with incorrect steering mode");
8408 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8409 	return fops->actions_template_destroy(dev, template, error);
8410 }
8411 
8412 /**
8413  * Create flow table.
8414  *
8415  * @param[in] dev
8416  *   Pointer to the rte_eth_dev structure.
8417  * @param[in] attr
8418  *   Pointer to the table attributes.
8419  * @param[in] item_templates
8420  *   Item template array to be binded to the table.
8421  * @param[in] nb_item_templates
8422  *   Number of item template.
8423  * @param[in] action_templates
8424  *   Action template array to be binded to the table.
8425  * @param[in] nb_action_templates
8426  *   Number of action template.
8427  * @param[out] error
8428  *   Pointer to error structure.
8429  *
8430  * @return
8431  *    Table on success, NULL otherwise and rte_errno is set.
8432  */
8433 static struct rte_flow_template_table *
8434 mlx5_flow_table_create(struct rte_eth_dev *dev,
8435 		       const struct rte_flow_template_table_attr *attr,
8436 		       struct rte_flow_pattern_template *item_templates[],
8437 		       uint8_t nb_item_templates,
8438 		       struct rte_flow_actions_template *action_templates[],
8439 		       uint8_t nb_action_templates,
8440 		       struct rte_flow_error *error)
8441 {
8442 	const struct mlx5_flow_driver_ops *fops;
8443 
8444 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
8445 		rte_flow_error_set(error, ENOTSUP,
8446 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8447 				NULL,
8448 				"table create with incorrect steering mode");
8449 		return NULL;
8450 	}
8451 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8452 	return fops->template_table_create(dev,
8453 					   attr,
8454 					   item_templates,
8455 					   nb_item_templates,
8456 					   action_templates,
8457 					   nb_action_templates,
8458 					   error);
8459 }
8460 
8461 /**
8462  * PMD destroy flow table.
8463  *
8464  * @param[in] dev
8465  *   Pointer to the rte_eth_dev structure.
8466  * @param[in] table
8467  *   Pointer to the table to be destroyed.
8468  * @param[out] error
8469  *   Pointer to error structure.
8470  *
8471  * @return
8472  *   0 on success, a negative errno value otherwise and rte_errno is set.
8473  */
8474 static int
8475 mlx5_flow_table_destroy(struct rte_eth_dev *dev,
8476 			struct rte_flow_template_table *table,
8477 			struct rte_flow_error *error)
8478 {
8479 	const struct mlx5_flow_driver_ops *fops;
8480 
8481 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
8482 		return rte_flow_error_set(error, ENOTSUP,
8483 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8484 				NULL,
8485 				"table destroy with incorrect steering mode");
8486 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8487 	return fops->template_table_destroy(dev, table, error);
8488 }
8489 
8490 /**
8491  * Enqueue flow creation.
8492  *
8493  * @param[in] dev
8494  *   Pointer to the rte_eth_dev structure.
8495  * @param[in] queue_id
8496  *   The queue to create the flow.
8497  * @param[in] attr
8498  *   Pointer to the flow operation attributes.
8499  * @param[in] items
8500  *   Items with flow spec value.
8501  * @param[in] pattern_template_index
8502  *   The item pattern flow follows from the table.
8503  * @param[in] actions
8504  *   Action with flow spec value.
8505  * @param[in] action_template_index
8506  *   The action pattern flow follows from the table.
8507  * @param[in] user_data
8508  *   Pointer to the user_data.
8509  * @param[out] error
8510  *   Pointer to error structure.
8511  *
8512  * @return
8513  *    Flow pointer on success, NULL otherwise and rte_errno is set.
8514  */
8515 static struct rte_flow *
8516 mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
8517 			    uint32_t queue_id,
8518 			    const struct rte_flow_op_attr *attr,
8519 			    struct rte_flow_template_table *table,
8520 			    const struct rte_flow_item items[],
8521 			    uint8_t pattern_template_index,
8522 			    const struct rte_flow_action actions[],
8523 			    uint8_t action_template_index,
8524 			    void *user_data,
8525 			    struct rte_flow_error *error)
8526 {
8527 	const struct mlx5_flow_driver_ops *fops;
8528 
8529 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
8530 		rte_flow_error_set(error, ENOTSUP,
8531 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8532 				NULL,
8533 				"flow_q create with incorrect steering mode");
8534 		return NULL;
8535 	}
8536 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8537 	return fops->async_flow_create(dev, queue_id, attr, table,
8538 				       items, pattern_template_index,
8539 				       actions, action_template_index,
8540 				       user_data, error);
8541 }
8542 
8543 /**
8544  * Enqueue flow destruction.
8545  *
8546  * @param[in] dev
8547  *   Pointer to the rte_eth_dev structure.
8548  * @param[in] queue
8549  *   The queue to destroy the flow.
8550  * @param[in] attr
8551  *   Pointer to the flow operation attributes.
8552  * @param[in] flow
8553  *   Pointer to the flow to be destroyed.
8554  * @param[in] user_data
8555  *   Pointer to the user_data.
8556  * @param[out] error
8557  *   Pointer to error structure.
8558  *
8559  * @return
8560  *    0 on success, negative value otherwise and rte_errno is set.
8561  */
8562 static int
8563 mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev,
8564 			     uint32_t queue,
8565 			     const struct rte_flow_op_attr *attr,
8566 			     struct rte_flow *flow,
8567 			     void *user_data,
8568 			     struct rte_flow_error *error)
8569 {
8570 	const struct mlx5_flow_driver_ops *fops;
8571 
8572 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
8573 		return rte_flow_error_set(error, ENOTSUP,
8574 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8575 				NULL,
8576 				"flow_q destroy with incorrect steering mode");
8577 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8578 	return fops->async_flow_destroy(dev, queue, attr, flow,
8579 					user_data, error);
8580 }
8581 
8582 /**
8583  * Pull the enqueued flows.
8584  *
8585  * @param[in] dev
8586  *   Pointer to the rte_eth_dev structure.
8587  * @param[in] queue
8588  *   The queue to pull the result.
8589  * @param[in/out] res
8590  *   Array to save the results.
8591  * @param[in] n_res
8592  *   Available result with the array.
8593  * @param[out] error
8594  *   Pointer to error structure.
8595  *
8596  * @return
8597  *    Result number on success, negative value otherwise and rte_errno is set.
8598  */
8599 static int
8600 mlx5_flow_pull(struct rte_eth_dev *dev,
8601 	       uint32_t queue,
8602 	       struct rte_flow_op_result res[],
8603 	       uint16_t n_res,
8604 	       struct rte_flow_error *error)
8605 {
8606 	const struct mlx5_flow_driver_ops *fops;
8607 
8608 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
8609 		return rte_flow_error_set(error, ENOTSUP,
8610 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8611 				NULL,
8612 				"flow_q pull with incorrect steering mode");
8613 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8614 	return fops->pull(dev, queue, res, n_res, error);
8615 }
8616 
8617 /**
8618  * Push the enqueued flows.
8619  *
8620  * @param[in] dev
8621  *   Pointer to the rte_eth_dev structure.
8622  * @param[in] queue
8623  *   The queue to push the flows.
8624  * @param[out] error
8625  *   Pointer to error structure.
8626  *
8627  * @return
8628  *    0 on success, negative value otherwise and rte_errno is set.
8629  */
8630 static int
8631 mlx5_flow_push(struct rte_eth_dev *dev,
8632 	       uint32_t queue,
8633 	       struct rte_flow_error *error)
8634 {
8635 	const struct mlx5_flow_driver_ops *fops;
8636 
8637 	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
8638 		return rte_flow_error_set(error, ENOTSUP,
8639 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8640 				NULL,
8641 				"flow_q push with incorrect steering mode");
8642 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8643 	return fops->push(dev, queue, error);
8644 }
8645 
8646 /**
8647  * Create shared action.
8648  *
8649  * @param[in] dev
8650  *   Pointer to the rte_eth_dev structure.
8651  * @param[in] queue
8652  *   Which queue to be used..
8653  * @param[in] attr
8654  *   Operation attribute.
8655  * @param[in] conf
8656  *   Indirect action configuration.
8657  * @param[in] action
8658  *   rte_flow action detail.
8659  * @param[in] user_data
8660  *   Pointer to the user_data.
8661  * @param[out] error
8662  *   Pointer to error structure.
8663  *
8664  * @return
8665  *   Action handle on success, NULL otherwise and rte_errno is set.
8666  */
8667 static struct rte_flow_action_handle *
8668 mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
8669 				 const struct rte_flow_op_attr *attr,
8670 				 const struct rte_flow_indir_action_conf *conf,
8671 				 const struct rte_flow_action *action,
8672 				 void *user_data,
8673 				 struct rte_flow_error *error)
8674 {
8675 	const struct mlx5_flow_driver_ops *fops =
8676 			flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8677 
8678 	return fops->async_action_create(dev, queue, attr, conf, action,
8679 					 user_data, error);
8680 }
8681 
8682 /**
8683  * Update shared action.
8684  *
8685  * @param[in] dev
8686  *   Pointer to the rte_eth_dev structure.
8687  * @param[in] queue
8688  *   Which queue to be used..
8689  * @param[in] attr
8690  *   Operation attribute.
8691  * @param[in] handle
8692  *   Action handle to be updated.
8693  * @param[in] update
8694  *   Update value.
8695  * @param[in] user_data
8696  *   Pointer to the user_data.
8697  * @param[out] error
8698  *   Pointer to error structure.
8699  *
8700  * @return
8701  *   0 on success, negative value otherwise and rte_errno is set.
8702  */
8703 static int
8704 mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
8705 				     const struct rte_flow_op_attr *attr,
8706 				     struct rte_flow_action_handle *handle,
8707 				     const void *update,
8708 				     void *user_data,
8709 				     struct rte_flow_error *error)
8710 {
8711 	const struct mlx5_flow_driver_ops *fops =
8712 			flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8713 
8714 	return fops->async_action_update(dev, queue, attr, handle,
8715 					 update, user_data, error);
8716 }
8717 
8718 /**
8719  * Destroy shared action.
8720  *
8721  * @param[in] dev
8722  *   Pointer to the rte_eth_dev structure.
8723  * @param[in] queue
8724  *   Which queue to be used..
8725  * @param[in] attr
8726  *   Operation attribute.
8727  * @param[in] handle
8728  *   Action handle to be destroyed.
8729  * @param[in] user_data
8730  *   Pointer to the user_data.
8731  * @param[out] error
8732  *   Pointer to error structure.
8733  *
8734  * @return
8735  *   0 on success, negative value otherwise and rte_errno is set.
8736  */
8737 static int
8738 mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
8739 				      const struct rte_flow_op_attr *attr,
8740 				      struct rte_flow_action_handle *handle,
8741 				      void *user_data,
8742 				      struct rte_flow_error *error)
8743 {
8744 	const struct mlx5_flow_driver_ops *fops =
8745 			flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
8746 
8747 	return fops->async_action_destroy(dev, queue, attr, handle,
8748 					  user_data, error);
8749 }
8750 
8751 /**
8752  * Allocate a new memory for the counter values wrapped by all the needed
8753  * management.
8754  *
8755  * @param[in] sh
8756  *   Pointer to mlx5_dev_ctx_shared object.
8757  *
8758  * @return
8759  *   0 on success, a negative errno value otherwise.
8760  */
8761 static int
8762 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
8763 {
8764 	struct mlx5_counter_stats_mem_mng *mem_mng;
8765 	volatile struct flow_counter_stats *raw_data;
8766 	int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
8767 	int size = (sizeof(struct flow_counter_stats) *
8768 			MLX5_COUNTERS_PER_POOL +
8769 			sizeof(struct mlx5_counter_stats_raw)) * raws_n +
8770 			sizeof(struct mlx5_counter_stats_mem_mng);
8771 	size_t pgsize = rte_mem_page_size();
8772 	uint8_t *mem;
8773 	int ret;
8774 	int i;
8775 
8776 	if (pgsize == (size_t)-1) {
8777 		DRV_LOG(ERR, "Failed to get mem page size");
8778 		rte_errno = ENOMEM;
8779 		return -ENOMEM;
8780 	}
8781 	mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
8782 	if (!mem) {
8783 		rte_errno = ENOMEM;
8784 		return -ENOMEM;
8785 	}
8786 	mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
8787 	size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
8788 	ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd,
8789 					  sh->cdev->pdn, mem, size,
8790 					  &mem_mng->wm);
8791 	if (ret) {
8792 		rte_errno = errno;
8793 		mlx5_free(mem);
8794 		return -rte_errno;
8795 	}
8796 	mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
8797 	raw_data = (volatile struct flow_counter_stats *)mem;
8798 	for (i = 0; i < raws_n; ++i) {
8799 		mem_mng->raws[i].mem_mng = mem_mng;
8800 		mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
8801 	}
8802 	for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
8803 		LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
8804 				 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
8805 				 next);
8806 	LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
8807 	sh->cmng.mem_mng = mem_mng;
8808 	return 0;
8809 }
8810 
8811 /**
8812  * Set the statistic memory to the new counter pool.
8813  *
8814  * @param[in] sh
8815  *   Pointer to mlx5_dev_ctx_shared object.
8816  * @param[in] pool
8817  *   Pointer to the pool to set the statistic memory.
8818  *
8819  * @return
8820  *   0 on success, a negative errno value otherwise.
8821  */
8822 static int
8823 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
8824 			       struct mlx5_flow_counter_pool *pool)
8825 {
8826 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
8827 	/* Resize statistic memory once used out. */
8828 	if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
8829 	    mlx5_flow_create_counter_stat_mem_mng(sh)) {
8830 		DRV_LOG(ERR, "Cannot resize counter stat mem.");
8831 		return -1;
8832 	}
8833 	rte_spinlock_lock(&pool->sl);
8834 	pool->raw = cmng->mem_mng->raws + pool->index %
8835 		    MLX5_CNT_CONTAINER_RESIZE;
8836 	rte_spinlock_unlock(&pool->sl);
8837 	pool->raw_hw = NULL;
8838 	return 0;
8839 }
8840 
8841 #define MLX5_POOL_QUERY_FREQ_US 1000000
8842 
8843 /**
8844  * Set the periodic procedure for triggering asynchronous batch queries for all
8845  * the counter pools.
8846  *
8847  * @param[in] sh
8848  *   Pointer to mlx5_dev_ctx_shared object.
8849  */
8850 void
8851 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
8852 {
8853 	uint32_t pools_n, us;
8854 
8855 	pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
8856 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
8857 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
8858 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
8859 		sh->cmng.query_thread_on = 0;
8860 		DRV_LOG(ERR, "Cannot reinitialize query alarm");
8861 	} else {
8862 		sh->cmng.query_thread_on = 1;
8863 	}
8864 }
8865 
8866 /**
8867  * The periodic procedure for triggering asynchronous batch queries for all the
8868  * counter pools. This function is probably called by the host thread.
8869  *
8870  * @param[in] arg
8871  *   The parameter for the alarm process.
8872  */
8873 void
8874 mlx5_flow_query_alarm(void *arg)
8875 {
8876 	struct mlx5_dev_ctx_shared *sh = arg;
8877 	int ret;
8878 	uint16_t pool_index = sh->cmng.pool_index;
8879 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
8880 	struct mlx5_flow_counter_pool *pool;
8881 	uint16_t n_valid;
8882 
8883 	if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
8884 		goto set_alarm;
8885 	rte_spinlock_lock(&cmng->pool_update_sl);
8886 	pool = cmng->pools[pool_index];
8887 	n_valid = cmng->n_valid;
8888 	rte_spinlock_unlock(&cmng->pool_update_sl);
8889 	/* Set the statistic memory to the new created pool. */
8890 	if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
8891 		goto set_alarm;
8892 	if (pool->raw_hw)
8893 		/* There is a pool query in progress. */
8894 		goto set_alarm;
8895 	pool->raw_hw =
8896 		LIST_FIRST(&sh->cmng.free_stat_raws);
8897 	if (!pool->raw_hw)
8898 		/* No free counter statistics raw memory. */
8899 		goto set_alarm;
8900 	/*
8901 	 * Identify the counters released between query trigger and query
8902 	 * handle more efficiently. The counter released in this gap period
8903 	 * should wait for a new round of query as the new arrived packets
8904 	 * will not be taken into account.
8905 	 */
8906 	pool->query_gen++;
8907 	ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
8908 					       MLX5_COUNTERS_PER_POOL,
8909 					       NULL, NULL,
8910 					       pool->raw_hw->mem_mng->wm.lkey,
8911 					       (void *)(uintptr_t)
8912 					       pool->raw_hw->data,
8913 					       sh->devx_comp,
8914 					       (uint64_t)(uintptr_t)pool);
8915 	if (ret) {
8916 		DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
8917 			" %d", pool->min_dcs->id);
8918 		pool->raw_hw = NULL;
8919 		goto set_alarm;
8920 	}
8921 	LIST_REMOVE(pool->raw_hw, next);
8922 	sh->cmng.pending_queries++;
8923 	pool_index++;
8924 	if (pool_index >= n_valid)
8925 		pool_index = 0;
8926 set_alarm:
8927 	sh->cmng.pool_index = pool_index;
8928 	mlx5_set_query_alarm(sh);
8929 }
8930 
8931 /**
8932  * Check and callback event for new aged flow in the counter pool
8933  *
8934  * @param[in] sh
8935  *   Pointer to mlx5_dev_ctx_shared object.
8936  * @param[in] pool
8937  *   Pointer to Current counter pool.
8938  */
8939 static void
8940 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
8941 		   struct mlx5_flow_counter_pool *pool)
8942 {
8943 	struct mlx5_priv *priv;
8944 	struct mlx5_flow_counter *cnt;
8945 	struct mlx5_age_info *age_info;
8946 	struct mlx5_age_param *age_param;
8947 	struct mlx5_counter_stats_raw *cur = pool->raw_hw;
8948 	struct mlx5_counter_stats_raw *prev = pool->raw;
8949 	const uint64_t curr_time = MLX5_CURR_TIME_SEC;
8950 	const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
8951 	uint16_t expected = AGE_CANDIDATE;
8952 	uint32_t i;
8953 
8954 	pool->time_of_last_age_check = curr_time;
8955 	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
8956 		cnt = MLX5_POOL_GET_CNT(pool, i);
8957 		age_param = MLX5_CNT_TO_AGE(cnt);
8958 		if (__atomic_load_n(&age_param->state,
8959 				    __ATOMIC_RELAXED) != AGE_CANDIDATE)
8960 			continue;
8961 		if (cur->data[i].hits != prev->data[i].hits) {
8962 			__atomic_store_n(&age_param->sec_since_last_hit, 0,
8963 					 __ATOMIC_RELAXED);
8964 			continue;
8965 		}
8966 		if (__atomic_add_fetch(&age_param->sec_since_last_hit,
8967 				       time_delta,
8968 				       __ATOMIC_RELAXED) <= age_param->timeout)
8969 			continue;
8970 		/**
8971 		 * Hold the lock first, or if between the
8972 		 * state AGE_TMOUT and tailq operation the
8973 		 * release happened, the release procedure
8974 		 * may delete a non-existent tailq node.
8975 		 */
8976 		priv = rte_eth_devices[age_param->port_id].data->dev_private;
8977 		age_info = GET_PORT_AGE_INFO(priv);
8978 		rte_spinlock_lock(&age_info->aged_sl);
8979 		if (__atomic_compare_exchange_n(&age_param->state, &expected,
8980 						AGE_TMOUT, false,
8981 						__ATOMIC_RELAXED,
8982 						__ATOMIC_RELAXED)) {
8983 			TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
8984 			MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
8985 		}
8986 		rte_spinlock_unlock(&age_info->aged_sl);
8987 	}
8988 	mlx5_age_event_prepare(sh);
8989 }
8990 
8991 /**
8992  * Handler for the HW respond about ready values from an asynchronous batch
8993  * query. This function is probably called by the host thread.
8994  *
8995  * @param[in] sh
8996  *   The pointer to the shared device context.
8997  * @param[in] async_id
8998  *   The Devx async ID.
8999  * @param[in] status
9000  *   The status of the completion.
9001  */
9002 void
9003 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
9004 				  uint64_t async_id, int status)
9005 {
9006 	struct mlx5_flow_counter_pool *pool =
9007 		(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
9008 	struct mlx5_counter_stats_raw *raw_to_free;
9009 	uint8_t query_gen = pool->query_gen ^ 1;
9010 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
9011 	enum mlx5_counter_type cnt_type =
9012 		pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
9013 				MLX5_COUNTER_TYPE_ORIGIN;
9014 
9015 	if (unlikely(status)) {
9016 		raw_to_free = pool->raw_hw;
9017 	} else {
9018 		raw_to_free = pool->raw;
9019 		if (pool->is_aged)
9020 			mlx5_flow_aging_check(sh, pool);
9021 		rte_spinlock_lock(&pool->sl);
9022 		pool->raw = pool->raw_hw;
9023 		rte_spinlock_unlock(&pool->sl);
9024 		/* Be sure the new raw counters data is updated in memory. */
9025 		rte_io_wmb();
9026 		if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
9027 			rte_spinlock_lock(&cmng->csl[cnt_type]);
9028 			TAILQ_CONCAT(&cmng->counters[cnt_type],
9029 				     &pool->counters[query_gen], next);
9030 			rte_spinlock_unlock(&cmng->csl[cnt_type]);
9031 		}
9032 	}
9033 	LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
9034 	pool->raw_hw = NULL;
9035 	sh->cmng.pending_queries--;
9036 }
9037 
9038 static int
9039 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
9040 		    const struct flow_grp_info *grp_info,
9041 		    struct rte_flow_error *error)
9042 {
9043 	if (grp_info->transfer && grp_info->external &&
9044 	    grp_info->fdb_def_rule) {
9045 		if (group == UINT32_MAX)
9046 			return rte_flow_error_set
9047 						(error, EINVAL,
9048 						 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
9049 						 NULL,
9050 						 "group index not supported");
9051 		*table = group + 1;
9052 	} else {
9053 		*table = group;
9054 	}
9055 	DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
9056 	return 0;
9057 }
9058 
9059 /**
9060  * Translate the rte_flow group index to HW table value.
9061  *
9062  * If tunnel offload is disabled, all group ids converted to flow table
9063  * id using the standard method.
9064  * If tunnel offload is enabled, group id can be converted using the
9065  * standard or tunnel conversion method. Group conversion method
9066  * selection depends on flags in `grp_info` parameter:
9067  * - Internal (grp_info.external == 0) groups conversion uses the
9068  *   standard method.
9069  * - Group ids in JUMP action converted with the tunnel conversion.
9070  * - Group id in rule attribute conversion depends on a rule type and
9071  *   group id value:
9072  *   ** non zero group attributes converted with the tunnel method
9073  *   ** zero group attribute in non-tunnel rule is converted using the
9074  *      standard method - there's only one root table
9075  *   ** zero group attribute in steer tunnel rule is converted with the
9076  *      standard method - single root table
9077  *   ** zero group attribute in match tunnel rule is a special OvS
9078  *      case: that value is used for portability reasons. That group
9079  *      id is converted with the tunnel conversion method.
9080  *
9081  * @param[in] dev
9082  *   Port device
9083  * @param[in] tunnel
9084  *   PMD tunnel offload object
9085  * @param[in] group
9086  *   rte_flow group index value.
9087  * @param[out] table
9088  *   HW table value.
9089  * @param[in] grp_info
9090  *   flags used for conversion
9091  * @param[out] error
9092  *   Pointer to error structure.
9093  *
9094  * @return
9095  *   0 on success, a negative errno value otherwise and rte_errno is set.
9096  */
9097 int
9098 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
9099 			 const struct mlx5_flow_tunnel *tunnel,
9100 			 uint32_t group, uint32_t *table,
9101 			 const struct flow_grp_info *grp_info,
9102 			 struct rte_flow_error *error)
9103 {
9104 	int ret;
9105 	bool standard_translation;
9106 
9107 	if (!grp_info->skip_scale && grp_info->external &&
9108 	    group < MLX5_MAX_TABLES_EXTERNAL)
9109 		group *= MLX5_FLOW_TABLE_FACTOR;
9110 	if (is_tunnel_offload_active(dev)) {
9111 		standard_translation = !grp_info->external ||
9112 					grp_info->std_tbl_fix;
9113 	} else {
9114 		standard_translation = true;
9115 	}
9116 	DRV_LOG(DEBUG,
9117 		"port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
9118 		dev->data->port_id, group, grp_info->transfer,
9119 		grp_info->external, grp_info->fdb_def_rule,
9120 		standard_translation ? "STANDARD" : "TUNNEL");
9121 	if (standard_translation)
9122 		ret = flow_group_to_table(dev->data->port_id, group, table,
9123 					  grp_info, error);
9124 	else
9125 		ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
9126 						      table, error);
9127 
9128 	return ret;
9129 }
9130 
9131 /**
9132  * Discover availability of metadata reg_c's.
9133  *
9134  * Iteratively use test flows to check availability.
9135  *
9136  * @param[in] dev
9137  *   Pointer to the Ethernet device structure.
9138  *
9139  * @return
9140  *   0 on success, a negative errno value otherwise and rte_errno is set.
9141  */
9142 int
9143 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
9144 {
9145 	struct mlx5_priv *priv = dev->data->dev_private;
9146 	enum modify_reg idx;
9147 	int n = 0;
9148 
9149 	/* reg_c[0] and reg_c[1] are reserved. */
9150 	priv->sh->flow_mreg_c[n++] = REG_C_0;
9151 	priv->sh->flow_mreg_c[n++] = REG_C_1;
9152 	/* Discover availability of other reg_c's. */
9153 	for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
9154 		struct rte_flow_attr attr = {
9155 			.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
9156 			.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
9157 			.ingress = 1,
9158 		};
9159 		struct rte_flow_item items[] = {
9160 			[0] = {
9161 				.type = RTE_FLOW_ITEM_TYPE_END,
9162 			},
9163 		};
9164 		struct rte_flow_action actions[] = {
9165 			[0] = {
9166 				.type = (enum rte_flow_action_type)
9167 					MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
9168 				.conf = &(struct mlx5_flow_action_copy_mreg){
9169 					.src = REG_C_1,
9170 					.dst = idx,
9171 				},
9172 			},
9173 			[1] = {
9174 				.type = RTE_FLOW_ACTION_TYPE_JUMP,
9175 				.conf = &(struct rte_flow_action_jump){
9176 					.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
9177 				},
9178 			},
9179 			[2] = {
9180 				.type = RTE_FLOW_ACTION_TYPE_END,
9181 			},
9182 		};
9183 		uint32_t flow_idx;
9184 		struct rte_flow *flow;
9185 		struct rte_flow_error error;
9186 
9187 		if (!priv->sh->config.dv_flow_en)
9188 			break;
9189 		/* Create internal flow, validation skips copy action. */
9190 		flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
9191 					items, actions, false, &error);
9192 		flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
9193 				      flow_idx);
9194 		if (!flow)
9195 			continue;
9196 		priv->sh->flow_mreg_c[n++] = idx;
9197 		flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
9198 	}
9199 	for (; n < MLX5_MREG_C_NUM; ++n)
9200 		priv->sh->flow_mreg_c[n] = REG_NON;
9201 	priv->sh->metadata_regc_check_flag = 1;
9202 	return 0;
9203 }
9204 
9205 int
9206 save_dump_file(const uint8_t *data, uint32_t size,
9207 	uint32_t type, uint64_t id, void *arg, FILE *file)
9208 {
9209 	char line[BUF_SIZE];
9210 	uint32_t out = 0;
9211 	uint32_t k;
9212 	uint32_t actions_num;
9213 	struct rte_flow_query_count *count;
9214 
9215 	memset(line, 0, BUF_SIZE);
9216 	switch (type) {
9217 	case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
9218 		actions_num = *(uint32_t *)(arg);
9219 		out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,",
9220 				type, id, actions_num);
9221 		break;
9222 	case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
9223 		out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",",
9224 				type, id);
9225 		break;
9226 	case DR_DUMP_REC_TYPE_PMD_COUNTER:
9227 		count = (struct rte_flow_query_count *)arg;
9228 		fprintf(file,
9229 			"%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n",
9230 			type, id, count->hits, count->bytes);
9231 		return 0;
9232 	default:
9233 		return -1;
9234 	}
9235 
9236 	for (k = 0; k < size; k++) {
9237 		/* Make sure we do not overrun the line buffer length. */
9238 		if (out >= BUF_SIZE - 4) {
9239 			line[out] = '\0';
9240 			break;
9241 		}
9242 		out += snprintf(line + out, BUF_SIZE - out, "%02x",
9243 				(data[k]) & 0xff);
9244 	}
9245 	fprintf(file, "%s\n", line);
9246 	return 0;
9247 }
9248 
9249 int
9250 mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
9251 	struct rte_flow_query_count *count, struct rte_flow_error *error)
9252 {
9253 	struct rte_flow_action action[2];
9254 	enum mlx5_flow_drv_type ftype;
9255 	const struct mlx5_flow_driver_ops *fops;
9256 
9257 	if (!flow) {
9258 		return rte_flow_error_set(error, ENOENT,
9259 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9260 				NULL,
9261 				"invalid flow handle");
9262 	}
9263 	action[0].type = RTE_FLOW_ACTION_TYPE_COUNT;
9264 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
9265 	if (flow->counter) {
9266 		memset(count, 0, sizeof(struct rte_flow_query_count));
9267 		ftype = (enum mlx5_flow_drv_type)(flow->drv_type);
9268 		MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN &&
9269 						ftype < MLX5_FLOW_TYPE_MAX);
9270 		fops = flow_get_drv_ops(ftype);
9271 		return fops->query(dev, flow, action, count, error);
9272 	}
9273 	return -1;
9274 }
9275 
9276 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
9277 /**
9278  * Dump flow ipool data to file
9279  *
9280  * @param[in] dev
9281  *   The pointer to Ethernet device.
9282  * @param[in] file
9283  *   A pointer to a file for output.
9284  * @param[out] error
9285  *   Perform verbose error reporting if not NULL. PMDs initialize this
9286  *   structure in case of error only.
9287  * @return
9288  *   0 on success, a negative value otherwise.
9289  */
9290 int
9291 mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
9292 	struct rte_flow *flow, FILE *file,
9293 	struct rte_flow_error *error)
9294 {
9295 	struct mlx5_priv *priv = dev->data->dev_private;
9296 	struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
9297 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
9298 	uint32_t handle_idx;
9299 	struct mlx5_flow_handle *dh;
9300 	struct rte_flow_query_count count;
9301 	uint32_t actions_num;
9302 	const uint8_t *data;
9303 	size_t size;
9304 	uint64_t id;
9305 	uint32_t type;
9306 	void *action = NULL;
9307 
9308 	if (!flow) {
9309 		return rte_flow_error_set(error, ENOENT,
9310 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9311 				NULL,
9312 				"invalid flow handle");
9313 	}
9314 	handle_idx = flow->dev_handles;
9315 	/* query counter */
9316 	if (flow->counter &&
9317 	(!mlx5_counter_query(dev, flow->counter, false,
9318 	&count.hits, &count.bytes, &action)) && action) {
9319 		id = (uint64_t)(uintptr_t)action;
9320 		type = DR_DUMP_REC_TYPE_PMD_COUNTER;
9321 		save_dump_file(NULL, 0, type,
9322 			id, (void *)&count, file);
9323 	}
9324 
9325 	while (handle_idx) {
9326 		dh = mlx5_ipool_get(priv->sh->ipool
9327 				[MLX5_IPOOL_MLX5_FLOW], handle_idx);
9328 		if (!dh)
9329 			continue;
9330 		handle_idx = dh->next.next;
9331 
9332 		/* Get modify_hdr and encap_decap buf from ipools. */
9333 		encap_decap = NULL;
9334 		modify_hdr = dh->dvh.modify_hdr;
9335 
9336 		if (dh->dvh.rix_encap_decap) {
9337 			encap_decap = mlx5_ipool_get(priv->sh->ipool
9338 						[MLX5_IPOOL_DECAP_ENCAP],
9339 						dh->dvh.rix_encap_decap);
9340 		}
9341 		if (modify_hdr) {
9342 			data = (const uint8_t *)modify_hdr->actions;
9343 			size = (size_t)(modify_hdr->actions_num) * 8;
9344 			id = (uint64_t)(uintptr_t)modify_hdr->action;
9345 			actions_num = modify_hdr->actions_num;
9346 			type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
9347 			save_dump_file(data, size, type, id,
9348 						(void *)(&actions_num), file);
9349 		}
9350 		if (encap_decap) {
9351 			data = encap_decap->buf;
9352 			size = encap_decap->size;
9353 			id = (uint64_t)(uintptr_t)encap_decap->action;
9354 			type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
9355 			save_dump_file(data, size, type,
9356 						id, NULL, file);
9357 		}
9358 	}
9359 	return 0;
9360 }
9361 
9362 /**
9363  * Dump all flow's encap_decap/modify_hdr/counter data to file
9364  *
9365  * @param[in] dev
9366  *   The pointer to Ethernet device.
9367  * @param[in] file
9368  *   A pointer to a file for output.
9369  * @param[out] error
9370  *   Perform verbose error reporting if not NULL. PMDs initialize this
9371  *   structure in case of error only.
9372  * @return
9373  *   0 on success, a negative value otherwise.
9374  */
9375 static int
9376 mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
9377 	FILE *file, struct rte_flow_error *error __rte_unused)
9378 {
9379 	struct mlx5_priv *priv = dev->data->dev_private;
9380 	struct mlx5_dev_ctx_shared *sh = priv->sh;
9381 	struct mlx5_hlist *h;
9382 	struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
9383 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
9384 	struct rte_flow_query_count count;
9385 	uint32_t actions_num;
9386 	const uint8_t *data;
9387 	size_t size;
9388 	uint64_t id;
9389 	uint32_t type;
9390 	uint32_t i;
9391 	uint32_t j;
9392 	struct mlx5_list_inconst *l_inconst;
9393 	struct mlx5_list_entry *e;
9394 	int lcore_index;
9395 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
9396 	uint32_t max;
9397 	void *action;
9398 
9399 	/* encap_decap hlist is lcore_share, get global core cache. */
9400 	i = MLX5_LIST_GLOBAL;
9401 	h = sh->encaps_decaps;
9402 	if (h) {
9403 		for (j = 0; j <= h->mask; j++) {
9404 			l_inconst = &h->buckets[j].l;
9405 			if (!l_inconst || !l_inconst->cache[i])
9406 				continue;
9407 
9408 			e = LIST_FIRST(&l_inconst->cache[i]->h);
9409 			while (e) {
9410 				encap_decap =
9411 				(struct mlx5_flow_dv_encap_decap_resource *)e;
9412 				data = encap_decap->buf;
9413 				size = encap_decap->size;
9414 				id = (uint64_t)(uintptr_t)encap_decap->action;
9415 				type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
9416 				save_dump_file(data, size, type,
9417 					id, NULL, file);
9418 				e = LIST_NEXT(e, next);
9419 			}
9420 		}
9421 	}
9422 
9423 	/* get modify_hdr */
9424 	h = sh->modify_cmds;
9425 	if (h) {
9426 		lcore_index = rte_lcore_index(rte_lcore_id());
9427 		if (unlikely(lcore_index == -1)) {
9428 			lcore_index = MLX5_LIST_NLCORE;
9429 			rte_spinlock_lock(&h->l_const.lcore_lock);
9430 		}
9431 		i = lcore_index;
9432 
9433 		for (j = 0; j <= h->mask; j++) {
9434 			l_inconst = &h->buckets[j].l;
9435 			if (!l_inconst || !l_inconst->cache[i])
9436 				continue;
9437 
9438 			e = LIST_FIRST(&l_inconst->cache[i]->h);
9439 			while (e) {
9440 				modify_hdr =
9441 				(struct mlx5_flow_dv_modify_hdr_resource *)e;
9442 				data = (const uint8_t *)modify_hdr->actions;
9443 				size = (size_t)(modify_hdr->actions_num) * 8;
9444 				actions_num = modify_hdr->actions_num;
9445 				id = (uint64_t)(uintptr_t)modify_hdr->action;
9446 				type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
9447 				save_dump_file(data, size, type, id,
9448 						(void *)(&actions_num), file);
9449 				e = LIST_NEXT(e, next);
9450 			}
9451 		}
9452 
9453 		if (unlikely(lcore_index == MLX5_LIST_NLCORE))
9454 			rte_spinlock_unlock(&h->l_const.lcore_lock);
9455 	}
9456 
9457 	/* get counter */
9458 	MLX5_ASSERT(cmng->n_valid <= cmng->n);
9459 	max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
9460 	for (j = 1; j <= max; j++) {
9461 		action = NULL;
9462 		if ((!mlx5_counter_query(dev, j, false, &count.hits,
9463 		&count.bytes, &action)) && action) {
9464 			id = (uint64_t)(uintptr_t)action;
9465 			type = DR_DUMP_REC_TYPE_PMD_COUNTER;
9466 			save_dump_file(NULL, 0, type,
9467 					id, (void *)&count, file);
9468 		}
9469 	}
9470 	return 0;
9471 }
9472 #endif
9473 
9474 /**
9475  * Dump flow raw hw data to file
9476  *
9477  * @param[in] dev
9478  *    The pointer to Ethernet device.
9479  * @param[in] file
9480  *   A pointer to a file for output.
9481  * @param[out] error
9482  *   Perform verbose error reporting if not NULL. PMDs initialize this
9483  *   structure in case of error only.
9484  * @return
9485  *   0 on success, a negative value otherwise.
9486  */
9487 int
9488 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
9489 		   FILE *file,
9490 		   struct rte_flow_error *error __rte_unused)
9491 {
9492 	struct mlx5_priv *priv = dev->data->dev_private;
9493 	struct mlx5_dev_ctx_shared *sh = priv->sh;
9494 	uint32_t handle_idx;
9495 	int ret;
9496 	struct mlx5_flow_handle *dh;
9497 	struct rte_flow *flow;
9498 
9499 	if (!sh->config.dv_flow_en) {
9500 		if (fputs("device dv flow disabled\n", file) <= 0)
9501 			return -errno;
9502 		return -ENOTSUP;
9503 	}
9504 
9505 	/* dump all */
9506 	if (!flow_idx) {
9507 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
9508 		if (mlx5_flow_dev_dump_sh_all(dev, file, error))
9509 			return -EINVAL;
9510 #endif
9511 		return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
9512 					sh->rx_domain,
9513 					sh->tx_domain, file);
9514 	}
9515 	/* dump one */
9516 	flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
9517 			(uintptr_t)(void *)flow_idx);
9518 	if (!flow)
9519 		return -EINVAL;
9520 
9521 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
9522 	mlx5_flow_dev_dump_ipool(dev, flow, file, error);
9523 #endif
9524 	handle_idx = flow->dev_handles;
9525 	while (handle_idx) {
9526 		dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9527 				handle_idx);
9528 		if (!dh)
9529 			return -ENOENT;
9530 		if (dh->drv_flow) {
9531 			ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow,
9532 					file);
9533 			if (ret)
9534 				return -ENOENT;
9535 		}
9536 		handle_idx = dh->next.next;
9537 	}
9538 	return 0;
9539 }
9540 
9541 /**
9542  * Get aged-out flows.
9543  *
9544  * @param[in] dev
9545  *   Pointer to the Ethernet device structure.
9546  * @param[in] context
9547  *   The address of an array of pointers to the aged-out flows contexts.
9548  * @param[in] nb_countexts
9549  *   The length of context array pointers.
9550  * @param[out] error
9551  *   Perform verbose error reporting if not NULL. Initialized in case of
9552  *   error only.
9553  *
9554  * @return
9555  *   how many contexts get in success, otherwise negative errno value.
9556  *   if nb_contexts is 0, return the amount of all aged contexts.
9557  *   if nb_contexts is not 0 , return the amount of aged flows reported
9558  *   in the context array.
9559  */
9560 int
9561 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
9562 			uint32_t nb_contexts, struct rte_flow_error *error)
9563 {
9564 	const struct mlx5_flow_driver_ops *fops;
9565 	struct rte_flow_attr attr = { .transfer = 0 };
9566 
9567 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
9568 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
9569 		return fops->get_aged_flows(dev, contexts, nb_contexts,
9570 						    error);
9571 	}
9572 	DRV_LOG(ERR,
9573 		"port %u get aged flows is not supported.",
9574 		 dev->data->port_id);
9575 	return -ENOTSUP;
9576 }
9577 
9578 /* Wrapper for driver action_validate op callback */
9579 static int
9580 flow_drv_action_validate(struct rte_eth_dev *dev,
9581 			 const struct rte_flow_indir_action_conf *conf,
9582 			 const struct rte_flow_action *action,
9583 			 const struct mlx5_flow_driver_ops *fops,
9584 			 struct rte_flow_error *error)
9585 {
9586 	static const char err_msg[] = "indirect action validation unsupported";
9587 
9588 	if (!fops->action_validate) {
9589 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
9590 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
9591 				   NULL, err_msg);
9592 		return -rte_errno;
9593 	}
9594 	return fops->action_validate(dev, conf, action, error);
9595 }
9596 
9597 /**
9598  * Destroys the shared action by handle.
9599  *
9600  * @param dev
9601  *   Pointer to Ethernet device structure.
9602  * @param[in] handle
9603  *   Handle for the indirect action object to be destroyed.
9604  * @param[out] error
9605  *   Perform verbose error reporting if not NULL. PMDs initialize this
9606  *   structure in case of error only.
9607  *
9608  * @return
9609  *   0 on success, a negative errno value otherwise and rte_errno is set.
9610  *
9611  * @note: wrapper for driver action_create op callback.
9612  */
9613 static int
9614 mlx5_action_handle_destroy(struct rte_eth_dev *dev,
9615 			   struct rte_flow_action_handle *handle,
9616 			   struct rte_flow_error *error)
9617 {
9618 	static const char err_msg[] = "indirect action destruction unsupported";
9619 	struct rte_flow_attr attr = { .transfer = 0 };
9620 	const struct mlx5_flow_driver_ops *fops =
9621 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
9622 
9623 	if (!fops->action_destroy) {
9624 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
9625 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
9626 				   NULL, err_msg);
9627 		return -rte_errno;
9628 	}
9629 	return fops->action_destroy(dev, handle, error);
9630 }
9631 
9632 /* Wrapper for driver action_destroy op callback */
9633 static int
9634 flow_drv_action_update(struct rte_eth_dev *dev,
9635 		       struct rte_flow_action_handle *handle,
9636 		       const void *update,
9637 		       const struct mlx5_flow_driver_ops *fops,
9638 		       struct rte_flow_error *error)
9639 {
9640 	static const char err_msg[] = "indirect action update unsupported";
9641 
9642 	if (!fops->action_update) {
9643 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
9644 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
9645 				   NULL, err_msg);
9646 		return -rte_errno;
9647 	}
9648 	return fops->action_update(dev, handle, update, error);
9649 }
9650 
9651 /* Wrapper for driver action_destroy op callback */
9652 static int
9653 flow_drv_action_query(struct rte_eth_dev *dev,
9654 		      const struct rte_flow_action_handle *handle,
9655 		      void *data,
9656 		      const struct mlx5_flow_driver_ops *fops,
9657 		      struct rte_flow_error *error)
9658 {
9659 	static const char err_msg[] = "indirect action query unsupported";
9660 
9661 	if (!fops->action_query) {
9662 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
9663 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
9664 				   NULL, err_msg);
9665 		return -rte_errno;
9666 	}
9667 	return fops->action_query(dev, handle, data, error);
9668 }
9669 
9670 /**
9671  * Create indirect action for reuse in multiple flow rules.
9672  *
9673  * @param dev
9674  *   Pointer to Ethernet device structure.
9675  * @param conf
9676  *   Pointer to indirect action object configuration.
9677  * @param[in] action
9678  *   Action configuration for indirect action object creation.
9679  * @param[out] error
9680  *   Perform verbose error reporting if not NULL. PMDs initialize this
9681  *   structure in case of error only.
9682  * @return
9683  *   A valid handle in case of success, NULL otherwise and rte_errno is set.
9684  */
9685 static struct rte_flow_action_handle *
9686 mlx5_action_handle_create(struct rte_eth_dev *dev,
9687 			  const struct rte_flow_indir_action_conf *conf,
9688 			  const struct rte_flow_action *action,
9689 			  struct rte_flow_error *error)
9690 {
9691 	static const char err_msg[] = "indirect action creation unsupported";
9692 	struct rte_flow_attr attr = { .transfer = 0 };
9693 	const struct mlx5_flow_driver_ops *fops =
9694 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
9695 
9696 	if (flow_drv_action_validate(dev, conf, action, fops, error))
9697 		return NULL;
9698 	if (!fops->action_create) {
9699 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
9700 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
9701 				   NULL, err_msg);
9702 		return NULL;
9703 	}
9704 	return fops->action_create(dev, conf, action, error);
9705 }
9706 
9707 /**
9708  * Updates inplace the indirect action configuration pointed by *handle*
9709  * with the configuration provided as *update* argument.
9710  * The update of the indirect action configuration effects all flow rules
9711  * reusing the action via handle.
9712  *
9713  * @param dev
9714  *   Pointer to Ethernet device structure.
9715  * @param[in] handle
9716  *   Handle for the indirect action to be updated.
9717  * @param[in] update
9718  *   Action specification used to modify the action pointed by handle.
9719  *   *update* could be of same type with the action pointed by the *handle*
9720  *   handle argument, or some other structures like a wrapper, depending on
9721  *   the indirect action type.
9722  * @param[out] error
9723  *   Perform verbose error reporting if not NULL. PMDs initialize this
9724  *   structure in case of error only.
9725  *
9726  * @return
9727  *   0 on success, a negative errno value otherwise and rte_errno is set.
9728  */
9729 static int
9730 mlx5_action_handle_update(struct rte_eth_dev *dev,
9731 		struct rte_flow_action_handle *handle,
9732 		const void *update,
9733 		struct rte_flow_error *error)
9734 {
9735 	struct rte_flow_attr attr = { .transfer = 0 };
9736 	const struct mlx5_flow_driver_ops *fops =
9737 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
9738 	int ret;
9739 
9740 	ret = flow_drv_action_validate(dev, NULL,
9741 			(const struct rte_flow_action *)update, fops, error);
9742 	if (ret)
9743 		return ret;
9744 	return flow_drv_action_update(dev, handle, update, fops,
9745 				      error);
9746 }
9747 
9748 /**
9749  * Query the indirect action by handle.
9750  *
9751  * This function allows retrieving action-specific data such as counters.
9752  * Data is gathered by special action which may be present/referenced in
9753  * more than one flow rule definition.
9754  *
9755  * see @RTE_FLOW_ACTION_TYPE_COUNT
9756  *
9757  * @param dev
9758  *   Pointer to Ethernet device structure.
9759  * @param[in] handle
9760  *   Handle for the indirect action to query.
9761  * @param[in, out] data
9762  *   Pointer to storage for the associated query data type.
9763  * @param[out] error
9764  *   Perform verbose error reporting if not NULL. PMDs initialize this
9765  *   structure in case of error only.
9766  *
9767  * @return
9768  *   0 on success, a negative errno value otherwise and rte_errno is set.
9769  */
9770 static int
9771 mlx5_action_handle_query(struct rte_eth_dev *dev,
9772 			 const struct rte_flow_action_handle *handle,
9773 			 void *data,
9774 			 struct rte_flow_error *error)
9775 {
9776 	struct rte_flow_attr attr = { .transfer = 0 };
9777 	const struct mlx5_flow_driver_ops *fops =
9778 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
9779 
9780 	return flow_drv_action_query(dev, handle, data, fops, error);
9781 }
9782 
9783 /**
9784  * Destroy all indirect actions (shared RSS).
9785  *
9786  * @param dev
9787  *   Pointer to Ethernet device.
9788  *
9789  * @return
9790  *   0 on success, a negative errno value otherwise and rte_errno is set.
9791  */
9792 int
9793 mlx5_action_handle_flush(struct rte_eth_dev *dev)
9794 {
9795 	struct rte_flow_error error;
9796 	struct mlx5_priv *priv = dev->data->dev_private;
9797 	struct mlx5_shared_action_rss *shared_rss;
9798 	int ret = 0;
9799 	uint32_t idx;
9800 
9801 	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
9802 		      priv->rss_shared_actions, idx, shared_rss, next) {
9803 		ret |= mlx5_action_handle_destroy(dev,
9804 		       (struct rte_flow_action_handle *)(uintptr_t)idx, &error);
9805 	}
9806 	return ret;
9807 }
9808 
9809 /**
9810  * Validate existing indirect actions against current device configuration
9811  * and attach them to device resources.
9812  *
9813  * @param dev
9814  *   Pointer to Ethernet device.
9815  *
9816  * @return
9817  *   0 on success, a negative errno value otherwise and rte_errno is set.
9818  */
9819 int
9820 mlx5_action_handle_attach(struct rte_eth_dev *dev)
9821 {
9822 	struct mlx5_priv *priv = dev->data->dev_private;
9823 	int ret = 0;
9824 	struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
9825 
9826 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
9827 		const char *message;
9828 		uint32_t queue_idx;
9829 
9830 		ret = mlx5_validate_rss_queues(dev, ind_tbl->queues,
9831 					       ind_tbl->queues_n,
9832 					       &message, &queue_idx);
9833 		if (ret != 0) {
9834 			DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s",
9835 				dev->data->port_id, ind_tbl->queues[queue_idx],
9836 				message);
9837 			break;
9838 		}
9839 	}
9840 	if (ret != 0)
9841 		return ret;
9842 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
9843 		ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
9844 		if (ret != 0) {
9845 			DRV_LOG(ERR, "Port %u could not attach "
9846 				"indirection table obj %p",
9847 				dev->data->port_id, (void *)ind_tbl);
9848 			goto error;
9849 		}
9850 	}
9851 
9852 	return 0;
9853 error:
9854 	ind_tbl_last = ind_tbl;
9855 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
9856 		if (ind_tbl == ind_tbl_last)
9857 			break;
9858 		if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
9859 			DRV_LOG(CRIT, "Port %u could not detach "
9860 				"indirection table obj %p on rollback",
9861 				dev->data->port_id, (void *)ind_tbl);
9862 	}
9863 	return ret;
9864 }
9865 
9866 /**
9867  * Detach indirect actions of the device from its resources.
9868  *
9869  * @param dev
9870  *   Pointer to Ethernet device.
9871  *
9872  * @return
9873  *   0 on success, a negative errno value otherwise and rte_errno is set.
9874  */
9875 int
9876 mlx5_action_handle_detach(struct rte_eth_dev *dev)
9877 {
9878 	struct mlx5_priv *priv = dev->data->dev_private;
9879 	int ret = 0;
9880 	struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
9881 
9882 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
9883 		ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
9884 		if (ret != 0) {
9885 			DRV_LOG(ERR, "Port %u could not detach "
9886 				"indirection table obj %p",
9887 				dev->data->port_id, (void *)ind_tbl);
9888 			goto error;
9889 		}
9890 	}
9891 	return 0;
9892 error:
9893 	ind_tbl_last = ind_tbl;
9894 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
9895 		if (ind_tbl == ind_tbl_last)
9896 			break;
9897 		if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
9898 			DRV_LOG(CRIT, "Port %u could not attach "
9899 				"indirection table obj %p on rollback",
9900 				dev->data->port_id, (void *)ind_tbl);
9901 	}
9902 	return ret;
9903 }
9904 
9905 #ifndef HAVE_MLX5DV_DR
9906 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
9907 #else
9908 #define MLX5_DOMAIN_SYNC_FLOW \
9909 	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
9910 #endif
9911 
9912 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
9913 {
9914 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
9915 	const struct mlx5_flow_driver_ops *fops;
9916 	int ret;
9917 	struct rte_flow_attr attr = { .transfer = 0 };
9918 
9919 	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
9920 	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
9921 	if (ret > 0)
9922 		ret = -ret;
9923 	return ret;
9924 }
9925 
9926 const struct mlx5_flow_tunnel *
9927 mlx5_get_tof(const struct rte_flow_item *item,
9928 	     const struct rte_flow_action *action,
9929 	     enum mlx5_tof_rule_type *rule_type)
9930 {
9931 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
9932 		if (item->type == (typeof(item->type))
9933 				  MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {
9934 			*rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
9935 			return flow_items_to_tunnel(item);
9936 		}
9937 	}
9938 	for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {
9939 		if (action->type == (typeof(action->type))
9940 				    MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
9941 			*rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;
9942 			return flow_actions_to_tunnel(action);
9943 		}
9944 	}
9945 	return NULL;
9946 }
9947 
9948 /**
9949  * tunnel offload functionality is defined for DV environment only
9950  */
9951 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
9952 __extension__
9953 union tunnel_offload_mark {
9954 	uint32_t val;
9955 	struct {
9956 		uint32_t app_reserve:8;
9957 		uint32_t table_id:15;
9958 		uint32_t transfer:1;
9959 		uint32_t _unused_:8;
9960 	};
9961 };
9962 
9963 static bool
9964 mlx5_access_tunnel_offload_db
9965 	(struct rte_eth_dev *dev,
9966 	 bool (*match)(struct rte_eth_dev *,
9967 		       struct mlx5_flow_tunnel *, const void *),
9968 	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
9969 	 void (*miss)(struct rte_eth_dev *, void *),
9970 	 void *ctx, bool lock_op);
9971 
9972 static int
9973 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
9974 			     struct rte_flow *flow,
9975 			     const struct rte_flow_attr *attr,
9976 			     const struct rte_flow_action *app_actions,
9977 			     uint32_t flow_idx,
9978 			     const struct mlx5_flow_tunnel *tunnel,
9979 			     struct tunnel_default_miss_ctx *ctx,
9980 			     struct rte_flow_error *error)
9981 {
9982 	struct mlx5_priv *priv = dev->data->dev_private;
9983 	struct mlx5_flow *dev_flow;
9984 	struct rte_flow_attr miss_attr = *attr;
9985 	const struct rte_flow_item miss_items[2] = {
9986 		{
9987 			.type = RTE_FLOW_ITEM_TYPE_ETH,
9988 			.spec = NULL,
9989 			.last = NULL,
9990 			.mask = NULL
9991 		},
9992 		{
9993 			.type = RTE_FLOW_ITEM_TYPE_END,
9994 			.spec = NULL,
9995 			.last = NULL,
9996 			.mask = NULL
9997 		}
9998 	};
9999 	union tunnel_offload_mark mark_id;
10000 	struct rte_flow_action_mark miss_mark;
10001 	struct rte_flow_action miss_actions[3] = {
10002 		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
10003 		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
10004 	};
10005 	const struct rte_flow_action_jump *jump_data;
10006 	uint32_t i, flow_table = 0; /* prevent compilation warning */
10007 	struct flow_grp_info grp_info = {
10008 		.external = 1,
10009 		.transfer = attr->transfer,
10010 		.fdb_def_rule = !!priv->fdb_def_rule,
10011 		.std_tbl_fix = 0,
10012 	};
10013 	int ret;
10014 
10015 	if (!attr->transfer) {
10016 		uint32_t q_size;
10017 
10018 		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
10019 		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
10020 		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
10021 					 0, SOCKET_ID_ANY);
10022 		if (!ctx->queue)
10023 			return rte_flow_error_set
10024 				(error, ENOMEM,
10025 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
10026 				NULL, "invalid default miss RSS");
10027 		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
10028 		ctx->action_rss.level = 0,
10029 		ctx->action_rss.types = priv->rss_conf.rss_hf,
10030 		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
10031 		ctx->action_rss.queue_num = priv->reta_idx_n,
10032 		ctx->action_rss.key = priv->rss_conf.rss_key,
10033 		ctx->action_rss.queue = ctx->queue;
10034 		if (!priv->reta_idx_n || !priv->rxqs_n)
10035 			return rte_flow_error_set
10036 				(error, EINVAL,
10037 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
10038 				NULL, "invalid port configuration");
10039 		if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
10040 			ctx->action_rss.types = 0;
10041 		for (i = 0; i != priv->reta_idx_n; ++i)
10042 			ctx->queue[i] = (*priv->reta_idx)[i];
10043 	} else {
10044 		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
10045 		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
10046 	}
10047 	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
10048 	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
10049 	jump_data = app_actions->conf;
10050 	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
10051 	miss_attr.group = jump_data->group;
10052 	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
10053 				       &flow_table, &grp_info, error);
10054 	if (ret)
10055 		return rte_flow_error_set(error, EINVAL,
10056 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
10057 					  NULL, "invalid tunnel id");
10058 	mark_id.app_reserve = 0;
10059 	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
10060 	mark_id.transfer = !!attr->transfer;
10061 	mark_id._unused_ = 0;
10062 	miss_mark.id = mark_id.val;
10063 	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
10064 				    miss_items, miss_actions, flow_idx, error);
10065 	if (!dev_flow)
10066 		return -rte_errno;
10067 	dev_flow->flow = flow;
10068 	dev_flow->external = true;
10069 	dev_flow->tunnel = tunnel;
10070 	dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;
10071 	/* Subflow object was created, we must include one in the list. */
10072 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
10073 		      dev_flow->handle, next);
10074 	DRV_LOG(DEBUG,
10075 		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
10076 		dev->data->port_id, tunnel->app_tunnel.type,
10077 		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
10078 	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
10079 				  miss_actions, error);
10080 	if (!ret)
10081 		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
10082 						  error);
10083 
10084 	return ret;
10085 }
10086 
10087 static const struct mlx5_flow_tbl_data_entry  *
10088 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
10089 {
10090 	struct mlx5_priv *priv = dev->data->dev_private;
10091 	struct mlx5_dev_ctx_shared *sh = priv->sh;
10092 	struct mlx5_list_entry *he;
10093 	union tunnel_offload_mark mbits = { .val = mark };
10094 	union mlx5_flow_tbl_key table_key = {
10095 		{
10096 			.level = tunnel_id_to_flow_tbl(mbits.table_id),
10097 			.id = 0,
10098 			.reserved = 0,
10099 			.dummy = 0,
10100 			.is_fdb = !!mbits.transfer,
10101 			.is_egress = 0,
10102 		}
10103 	};
10104 	struct mlx5_flow_cb_ctx ctx = {
10105 		.data = &table_key.v64,
10106 	};
10107 
10108 	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx);
10109 	return he ?
10110 	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
10111 }
10112 
10113 static void
10114 mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx,
10115 				   struct mlx5_list_entry *entry)
10116 {
10117 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
10118 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
10119 
10120 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
10121 			tunnel_flow_tbl_to_id(tte->flow_table));
10122 	mlx5_free(tte);
10123 }
10124 
10125 static int
10126 mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused,
10127 				  struct mlx5_list_entry *entry, void *cb_ctx)
10128 {
10129 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10130 	union tunnel_tbl_key tbl = {
10131 		.val = *(uint64_t *)(ctx->data),
10132 	};
10133 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
10134 
10135 	return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
10136 }
10137 
10138 static struct mlx5_list_entry *
10139 mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx)
10140 {
10141 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
10142 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10143 	struct tunnel_tbl_entry *tte;
10144 	union tunnel_tbl_key tbl = {
10145 		.val = *(uint64_t *)(ctx->data),
10146 	};
10147 
10148 	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
10149 			  sizeof(*tte), 0,
10150 			  SOCKET_ID_ANY);
10151 	if (!tte)
10152 		goto err;
10153 	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
10154 			  &tte->flow_table);
10155 	if (tte->flow_table >= MLX5_MAX_TABLES) {
10156 		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
10157 			tte->flow_table);
10158 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
10159 				tte->flow_table);
10160 		goto err;
10161 	} else if (!tte->flow_table) {
10162 		goto err;
10163 	}
10164 	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
10165 	tte->tunnel_id = tbl.tunnel_id;
10166 	tte->group = tbl.group;
10167 	return &tte->hash;
10168 err:
10169 	if (tte)
10170 		mlx5_free(tte);
10171 	return NULL;
10172 }
10173 
10174 static struct mlx5_list_entry *
10175 mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused,
10176 				  struct mlx5_list_entry *oentry,
10177 				  void *cb_ctx __rte_unused)
10178 {
10179 	struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte),
10180 						   0, SOCKET_ID_ANY);
10181 
10182 	if (!tte)
10183 		return NULL;
10184 	memcpy(tte, oentry, sizeof(*tte));
10185 	return &tte->hash;
10186 }
10187 
10188 static void
10189 mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused,
10190 				       struct mlx5_list_entry *entry)
10191 {
10192 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
10193 
10194 	mlx5_free(tte);
10195 }
10196 
10197 static uint32_t
10198 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
10199 				const struct mlx5_flow_tunnel *tunnel,
10200 				uint32_t group, uint32_t *table,
10201 				struct rte_flow_error *error)
10202 {
10203 	struct mlx5_list_entry *he;
10204 	struct tunnel_tbl_entry *tte;
10205 	union tunnel_tbl_key key = {
10206 		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
10207 		.group = group
10208 	};
10209 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
10210 	struct mlx5_hlist *group_hash;
10211 	struct mlx5_flow_cb_ctx ctx = {
10212 		.data = &key.val,
10213 	};
10214 
10215 	group_hash = tunnel ? tunnel->groups : thub->groups;
10216 	he = mlx5_hlist_register(group_hash, key.val, &ctx);
10217 	if (!he)
10218 		return rte_flow_error_set(error, EINVAL,
10219 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
10220 					  NULL,
10221 					  "tunnel group index not supported");
10222 	tte = container_of(he, typeof(*tte), hash);
10223 	*table = tte->flow_table;
10224 	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
10225 		dev->data->port_id, key.tunnel_id, group, *table);
10226 	return 0;
10227 }
10228 
10229 static void
10230 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
10231 		      struct mlx5_flow_tunnel *tunnel)
10232 {
10233 	struct mlx5_priv *priv = dev->data->dev_private;
10234 	struct mlx5_indexed_pool *ipool;
10235 
10236 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
10237 		dev->data->port_id, tunnel->tunnel_id);
10238 	LIST_REMOVE(tunnel, chain);
10239 	mlx5_hlist_destroy(tunnel->groups);
10240 	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
10241 	mlx5_ipool_free(ipool, tunnel->tunnel_id);
10242 }
10243 
10244 static bool
10245 mlx5_access_tunnel_offload_db
10246 	(struct rte_eth_dev *dev,
10247 	 bool (*match)(struct rte_eth_dev *,
10248 		       struct mlx5_flow_tunnel *, const void *),
10249 	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
10250 	 void (*miss)(struct rte_eth_dev *, void *),
10251 	 void *ctx, bool lock_op)
10252 {
10253 	bool verdict = false;
10254 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
10255 	struct mlx5_flow_tunnel *tunnel;
10256 
10257 	rte_spinlock_lock(&thub->sl);
10258 	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
10259 		verdict = match(dev, tunnel, (const void *)ctx);
10260 		if (verdict)
10261 			break;
10262 	}
10263 	if (!lock_op)
10264 		rte_spinlock_unlock(&thub->sl);
10265 	if (verdict && hit)
10266 		hit(dev, tunnel, ctx);
10267 	if (!verdict && miss)
10268 		miss(dev, ctx);
10269 	if (lock_op)
10270 		rte_spinlock_unlock(&thub->sl);
10271 
10272 	return verdict;
10273 }
10274 
10275 struct tunnel_db_find_tunnel_id_ctx {
10276 	uint32_t tunnel_id;
10277 	struct mlx5_flow_tunnel *tunnel;
10278 };
10279 
10280 static bool
10281 find_tunnel_id_match(struct rte_eth_dev *dev,
10282 		     struct mlx5_flow_tunnel *tunnel, const void *x)
10283 {
10284 	const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
10285 
10286 	RTE_SET_USED(dev);
10287 	return tunnel->tunnel_id == ctx->tunnel_id;
10288 }
10289 
10290 static void
10291 find_tunnel_id_hit(struct rte_eth_dev *dev,
10292 		   struct mlx5_flow_tunnel *tunnel, void *x)
10293 {
10294 	struct tunnel_db_find_tunnel_id_ctx *ctx = x;
10295 	RTE_SET_USED(dev);
10296 	ctx->tunnel = tunnel;
10297 }
10298 
10299 static struct mlx5_flow_tunnel *
10300 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
10301 {
10302 	struct tunnel_db_find_tunnel_id_ctx ctx = {
10303 		.tunnel_id = id,
10304 	};
10305 
10306 	mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
10307 				      find_tunnel_id_hit, NULL, &ctx, true);
10308 
10309 	return ctx.tunnel;
10310 }
10311 
10312 static struct mlx5_flow_tunnel *
10313 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
10314 			  const struct rte_flow_tunnel *app_tunnel)
10315 {
10316 	struct mlx5_priv *priv = dev->data->dev_private;
10317 	struct mlx5_indexed_pool *ipool;
10318 	struct mlx5_flow_tunnel *tunnel;
10319 	uint32_t id;
10320 
10321 	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
10322 	tunnel = mlx5_ipool_zmalloc(ipool, &id);
10323 	if (!tunnel)
10324 		return NULL;
10325 	if (id >= MLX5_MAX_TUNNELS) {
10326 		mlx5_ipool_free(ipool, id);
10327 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
10328 		return NULL;
10329 	}
10330 	tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true,
10331 					   priv->sh,
10332 					   mlx5_flow_tunnel_grp2tbl_create_cb,
10333 					   mlx5_flow_tunnel_grp2tbl_match_cb,
10334 					   mlx5_flow_tunnel_grp2tbl_remove_cb,
10335 					   mlx5_flow_tunnel_grp2tbl_clone_cb,
10336 					mlx5_flow_tunnel_grp2tbl_clone_free_cb);
10337 	if (!tunnel->groups) {
10338 		mlx5_ipool_free(ipool, id);
10339 		return NULL;
10340 	}
10341 	/* initiate new PMD tunnel */
10342 	memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
10343 	tunnel->tunnel_id = id;
10344 	tunnel->action.type = (typeof(tunnel->action.type))
10345 			      MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
10346 	tunnel->action.conf = tunnel;
10347 	tunnel->item.type = (typeof(tunnel->item.type))
10348 			    MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
10349 	tunnel->item.spec = tunnel;
10350 	tunnel->item.last = NULL;
10351 	tunnel->item.mask = NULL;
10352 
10353 	DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
10354 		dev->data->port_id, tunnel->tunnel_id);
10355 
10356 	return tunnel;
10357 }
10358 
10359 struct tunnel_db_get_tunnel_ctx {
10360 	const struct rte_flow_tunnel *app_tunnel;
10361 	struct mlx5_flow_tunnel *tunnel;
10362 };
10363 
10364 static bool get_tunnel_match(struct rte_eth_dev *dev,
10365 			     struct mlx5_flow_tunnel *tunnel, const void *x)
10366 {
10367 	const struct tunnel_db_get_tunnel_ctx *ctx = x;
10368 
10369 	RTE_SET_USED(dev);
10370 	return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
10371 		       sizeof(*ctx->app_tunnel));
10372 }
10373 
10374 static void get_tunnel_hit(struct rte_eth_dev *dev,
10375 			   struct mlx5_flow_tunnel *tunnel, void *x)
10376 {
10377 	/* called under tunnel spinlock protection */
10378 	struct tunnel_db_get_tunnel_ctx *ctx = x;
10379 
10380 	RTE_SET_USED(dev);
10381 	tunnel->refctn++;
10382 	ctx->tunnel = tunnel;
10383 }
10384 
10385 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
10386 {
10387 	/* called under tunnel spinlock protection */
10388 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
10389 	struct tunnel_db_get_tunnel_ctx *ctx = x;
10390 
10391 	rte_spinlock_unlock(&thub->sl);
10392 	ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
10393 	rte_spinlock_lock(&thub->sl);
10394 	if (ctx->tunnel) {
10395 		ctx->tunnel->refctn = 1;
10396 		LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
10397 	}
10398 }
10399 
10400 
10401 static int
10402 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
10403 		     const struct rte_flow_tunnel *app_tunnel,
10404 		     struct mlx5_flow_tunnel **tunnel)
10405 {
10406 	struct tunnel_db_get_tunnel_ctx ctx = {
10407 		.app_tunnel = app_tunnel,
10408 	};
10409 
10410 	mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
10411 				      get_tunnel_miss, &ctx, true);
10412 	*tunnel = ctx.tunnel;
10413 	return ctx.tunnel ? 0 : -ENOMEM;
10414 }
10415 
10416 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
10417 {
10418 	struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10419 
10420 	if (!thub)
10421 		return;
10422 	if (!LIST_EMPTY(&thub->tunnels))
10423 		DRV_LOG(WARNING, "port %u tunnels present", port_id);
10424 	mlx5_hlist_destroy(thub->groups);
10425 	mlx5_free(thub);
10426 }
10427 
10428 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
10429 {
10430 	int err;
10431 	struct mlx5_flow_tunnel_hub *thub;
10432 
10433 	thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
10434 			   0, SOCKET_ID_ANY);
10435 	if (!thub)
10436 		return -ENOMEM;
10437 	LIST_INIT(&thub->tunnels);
10438 	rte_spinlock_init(&thub->sl);
10439 	thub->groups = mlx5_hlist_create("flow groups", 64,
10440 					 false, true, sh,
10441 					 mlx5_flow_tunnel_grp2tbl_create_cb,
10442 					 mlx5_flow_tunnel_grp2tbl_match_cb,
10443 					 mlx5_flow_tunnel_grp2tbl_remove_cb,
10444 					 mlx5_flow_tunnel_grp2tbl_clone_cb,
10445 					mlx5_flow_tunnel_grp2tbl_clone_free_cb);
10446 	if (!thub->groups) {
10447 		err = -rte_errno;
10448 		goto err;
10449 	}
10450 	sh->tunnel_hub = thub;
10451 
10452 	return 0;
10453 
10454 err:
10455 	if (thub->groups)
10456 		mlx5_hlist_destroy(thub->groups);
10457 	if (thub)
10458 		mlx5_free(thub);
10459 	return err;
10460 }
10461 
10462 static inline int
10463 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
10464 			  struct rte_flow_tunnel *tunnel,
10465 			  struct rte_flow_error *error)
10466 {
10467 	struct mlx5_priv *priv = dev->data->dev_private;
10468 
10469 	if (!priv->sh->config.dv_flow_en)
10470 		return rte_flow_error_set(error, ENOTSUP,
10471 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
10472 					  "flow DV interface is off");
10473 	if (!is_tunnel_offload_active(dev))
10474 		return rte_flow_error_set(error, ENOTSUP,
10475 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
10476 					  "tunnel offload was not activated");
10477 	if (!tunnel)
10478 		return rte_flow_error_set(error, EINVAL,
10479 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
10480 					  "no application tunnel");
10481 	switch (tunnel->type) {
10482 	default:
10483 		return rte_flow_error_set(error, EINVAL,
10484 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
10485 					  "unsupported tunnel type");
10486 	case RTE_FLOW_ITEM_TYPE_VXLAN:
10487 	case RTE_FLOW_ITEM_TYPE_GRE:
10488 	case RTE_FLOW_ITEM_TYPE_NVGRE:
10489 	case RTE_FLOW_ITEM_TYPE_GENEVE:
10490 		break;
10491 	}
10492 	return 0;
10493 }
10494 
10495 static int
10496 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
10497 		    struct rte_flow_tunnel *app_tunnel,
10498 		    struct rte_flow_action **actions,
10499 		    uint32_t *num_of_actions,
10500 		    struct rte_flow_error *error)
10501 {
10502 	struct mlx5_flow_tunnel *tunnel;
10503 	int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
10504 
10505 	if (ret)
10506 		return ret;
10507 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
10508 	if (ret < 0) {
10509 		return rte_flow_error_set(error, ret,
10510 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
10511 					  "failed to initialize pmd tunnel");
10512 	}
10513 	*actions = &tunnel->action;
10514 	*num_of_actions = 1;
10515 	return 0;
10516 }
10517 
10518 static int
10519 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
10520 		       struct rte_flow_tunnel *app_tunnel,
10521 		       struct rte_flow_item **items,
10522 		       uint32_t *num_of_items,
10523 		       struct rte_flow_error *error)
10524 {
10525 	struct mlx5_flow_tunnel *tunnel;
10526 	int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
10527 
10528 	if (ret)
10529 		return ret;
10530 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
10531 	if (ret < 0) {
10532 		return rte_flow_error_set(error, ret,
10533 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
10534 					  "failed to initialize pmd tunnel");
10535 	}
10536 	*items = &tunnel->item;
10537 	*num_of_items = 1;
10538 	return 0;
10539 }
10540 
10541 struct tunnel_db_element_release_ctx {
10542 	struct rte_flow_item *items;
10543 	struct rte_flow_action *actions;
10544 	uint32_t num_elements;
10545 	struct rte_flow_error *error;
10546 	int ret;
10547 };
10548 
10549 static bool
10550 tunnel_element_release_match(struct rte_eth_dev *dev,
10551 			     struct mlx5_flow_tunnel *tunnel, const void *x)
10552 {
10553 	const struct tunnel_db_element_release_ctx *ctx = x;
10554 
10555 	RTE_SET_USED(dev);
10556 	if (ctx->num_elements != 1)
10557 		return false;
10558 	else if (ctx->items)
10559 		return ctx->items == &tunnel->item;
10560 	else if (ctx->actions)
10561 		return ctx->actions == &tunnel->action;
10562 
10563 	return false;
10564 }
10565 
10566 static void
10567 tunnel_element_release_hit(struct rte_eth_dev *dev,
10568 			   struct mlx5_flow_tunnel *tunnel, void *x)
10569 {
10570 	struct tunnel_db_element_release_ctx *ctx = x;
10571 	ctx->ret = 0;
10572 	if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
10573 		mlx5_flow_tunnel_free(dev, tunnel);
10574 }
10575 
10576 static void
10577 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
10578 {
10579 	struct tunnel_db_element_release_ctx *ctx = x;
10580 	RTE_SET_USED(dev);
10581 	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
10582 				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
10583 				      "invalid argument");
10584 }
10585 
10586 static int
10587 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
10588 		       struct rte_flow_item *pmd_items,
10589 		       uint32_t num_items, struct rte_flow_error *err)
10590 {
10591 	struct tunnel_db_element_release_ctx ctx = {
10592 		.items = pmd_items,
10593 		.actions = NULL,
10594 		.num_elements = num_items,
10595 		.error = err,
10596 	};
10597 
10598 	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
10599 				      tunnel_element_release_hit,
10600 				      tunnel_element_release_miss, &ctx, false);
10601 
10602 	return ctx.ret;
10603 }
10604 
10605 static int
10606 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
10607 			 struct rte_flow_action *pmd_actions,
10608 			 uint32_t num_actions, struct rte_flow_error *err)
10609 {
10610 	struct tunnel_db_element_release_ctx ctx = {
10611 		.items = NULL,
10612 		.actions = pmd_actions,
10613 		.num_elements = num_actions,
10614 		.error = err,
10615 	};
10616 
10617 	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
10618 				      tunnel_element_release_hit,
10619 				      tunnel_element_release_miss, &ctx, false);
10620 
10621 	return ctx.ret;
10622 }
10623 
10624 static int
10625 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
10626 				  struct rte_mbuf *m,
10627 				  struct rte_flow_restore_info *info,
10628 				  struct rte_flow_error *err)
10629 {
10630 	uint64_t ol_flags = m->ol_flags;
10631 	const struct mlx5_flow_tbl_data_entry *tble;
10632 	const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
10633 
10634 	if (!is_tunnel_offload_active(dev)) {
10635 		info->flags = 0;
10636 		return 0;
10637 	}
10638 
10639 	if ((ol_flags & mask) != mask)
10640 		goto err;
10641 	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
10642 	if (!tble) {
10643 		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
10644 			dev->data->port_id, m->hash.fdir.hi);
10645 		goto err;
10646 	}
10647 	MLX5_ASSERT(tble->tunnel);
10648 	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
10649 	info->group_id = tble->group_id;
10650 	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
10651 		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
10652 		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
10653 
10654 	return 0;
10655 
10656 err:
10657 	return rte_flow_error_set(err, EINVAL,
10658 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10659 				  "failed to get restore info");
10660 }
10661 
10662 #else /* HAVE_IBV_FLOW_DV_SUPPORT */
10663 static int
10664 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
10665 			   __rte_unused struct rte_flow_tunnel *app_tunnel,
10666 			   __rte_unused struct rte_flow_action **actions,
10667 			   __rte_unused uint32_t *num_of_actions,
10668 			   __rte_unused struct rte_flow_error *error)
10669 {
10670 	return -ENOTSUP;
10671 }
10672 
10673 static int
10674 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
10675 		       __rte_unused struct rte_flow_tunnel *app_tunnel,
10676 		       __rte_unused struct rte_flow_item **items,
10677 		       __rte_unused uint32_t *num_of_items,
10678 		       __rte_unused struct rte_flow_error *error)
10679 {
10680 	return -ENOTSUP;
10681 }
10682 
10683 static int
10684 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
10685 			      __rte_unused struct rte_flow_item *pmd_items,
10686 			      __rte_unused uint32_t num_items,
10687 			      __rte_unused struct rte_flow_error *err)
10688 {
10689 	return -ENOTSUP;
10690 }
10691 
10692 static int
10693 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
10694 				__rte_unused struct rte_flow_action *pmd_action,
10695 				__rte_unused uint32_t num_actions,
10696 				__rte_unused struct rte_flow_error *err)
10697 {
10698 	return -ENOTSUP;
10699 }
10700 
10701 static int
10702 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
10703 				  __rte_unused struct rte_mbuf *m,
10704 				  __rte_unused struct rte_flow_restore_info *i,
10705 				  __rte_unused struct rte_flow_error *err)
10706 {
10707 	return -ENOTSUP;
10708 }
10709 
10710 static int
10711 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
10712 			     __rte_unused struct rte_flow *flow,
10713 			     __rte_unused const struct rte_flow_attr *attr,
10714 			     __rte_unused const struct rte_flow_action *actions,
10715 			     __rte_unused uint32_t flow_idx,
10716 			     __rte_unused const struct mlx5_flow_tunnel *tunnel,
10717 			     __rte_unused struct tunnel_default_miss_ctx *ctx,
10718 			     __rte_unused struct rte_flow_error *error)
10719 {
10720 	return -ENOTSUP;
10721 }
10722 
10723 static struct mlx5_flow_tunnel *
10724 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
10725 		    __rte_unused uint32_t id)
10726 {
10727 	return NULL;
10728 }
10729 
10730 static void
10731 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
10732 		      __rte_unused struct mlx5_flow_tunnel *tunnel)
10733 {
10734 }
10735 
10736 static uint32_t
10737 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
10738 				__rte_unused const struct mlx5_flow_tunnel *t,
10739 				__rte_unused uint32_t group,
10740 				__rte_unused uint32_t *table,
10741 				struct rte_flow_error *error)
10742 {
10743 	return rte_flow_error_set(error, ENOTSUP,
10744 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10745 				  "tunnel offload requires DV support");
10746 }
10747 
10748 void
10749 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
10750 			__rte_unused  uint16_t port_id)
10751 {
10752 }
10753 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
10754 
10755 /* Flex flow item API */
10756 static struct rte_flow_item_flex_handle *
10757 mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
10758 			   const struct rte_flow_item_flex_conf *conf,
10759 			   struct rte_flow_error *error)
10760 {
10761 	static const char err_msg[] = "flex item creation unsupported";
10762 	struct mlx5_priv *priv = dev->data->dev_private;
10763 	struct rte_flow_attr attr = { .transfer = 0 };
10764 	const struct mlx5_flow_driver_ops *fops =
10765 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
10766 
10767 	if (!priv->pci_dev) {
10768 		rte_flow_error_set(error, ENOTSUP,
10769 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10770 				   "create flex item on PF only");
10771 		return NULL;
10772 	}
10773 	switch (priv->pci_dev->id.device_id) {
10774 	case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
10775 	case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
10776 		break;
10777 	default:
10778 		rte_flow_error_set(error, ENOTSUP,
10779 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10780 				   "flex item available on BlueField ports only");
10781 		return NULL;
10782 	}
10783 	if (!fops->item_create) {
10784 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
10785 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10786 				   NULL, err_msg);
10787 		return NULL;
10788 	}
10789 	return fops->item_create(dev, conf, error);
10790 }
10791 
10792 static int
10793 mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
10794 			    const struct rte_flow_item_flex_handle *handle,
10795 			    struct rte_flow_error *error)
10796 {
10797 	static const char err_msg[] = "flex item release unsupported";
10798 	struct rte_flow_attr attr = { .transfer = 0 };
10799 	const struct mlx5_flow_driver_ops *fops =
10800 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
10801 
10802 	if (!fops->item_release) {
10803 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
10804 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10805 				   NULL, err_msg);
10806 		return -rte_errno;
10807 	}
10808 	return fops->item_release(dev, handle, error);
10809 }
10810 
10811 static void
10812 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
10813 {
10814 	int ret;
10815 	struct rte_flow_error error;
10816 
10817 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
10818 		char *item_name;
10819 		ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name,
10820 				    sizeof(item_name),
10821 				    (void *)(uintptr_t)item->type, &error);
10822 		if (ret > 0)
10823 			printf("%s ", item_name);
10824 		else
10825 			printf("%d\n", (int)item->type);
10826 	}
10827 	printf("END\n");
10828 }
10829 
10830 static int
10831 mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item)
10832 {
10833 	const struct rte_flow_item_udp *spec = udp_item->spec;
10834 	const struct rte_flow_item_udp *mask = udp_item->mask;
10835 	uint16_t udp_dport = 0;
10836 
10837 	if (spec != NULL) {
10838 		if (!mask)
10839 			mask = &rte_flow_item_udp_mask;
10840 		udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port &
10841 				mask->hdr.dst_port);
10842 	}
10843 	return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN);
10844 }
10845 
10846 static const struct mlx5_flow_expand_node *
10847 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
10848 		unsigned int item_idx,
10849 		const struct mlx5_flow_expand_node graph[],
10850 		const struct mlx5_flow_expand_node *node)
10851 {
10852 	const struct rte_flow_item *item = pattern + item_idx, *prev_item;
10853 
10854 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN &&
10855 			node != NULL &&
10856 			node->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
10857 		/*
10858 		 * The expansion node is VXLAN and it is also the last
10859 		 * expandable item in the pattern, so need to continue
10860 		 * expansion of the inner tunnel.
10861 		 */
10862 		MLX5_ASSERT(item_idx > 0);
10863 		prev_item = pattern + item_idx - 1;
10864 		MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
10865 		if (mlx5_flow_is_std_vxlan_port(prev_item))
10866 			return &graph[MLX5_EXPANSION_STD_VXLAN];
10867 		return &graph[MLX5_EXPANSION_L3_VXLAN];
10868 	}
10869 	return node;
10870 }
10871 
10872 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
10873 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
10874 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
10875 };
10876 
10877 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
10878 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
10879 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
10880 	{ 9, 10, 11 }, { 12, 13, 14 },
10881 };
10882 
10883 /**
10884  * Discover the number of available flow priorities.
10885  *
10886  * @param dev
10887  *   Ethernet device.
10888  *
10889  * @return
10890  *   On success, number of available flow priorities.
10891  *   On failure, a negative errno-style code and rte_errno is set.
10892  */
10893 int
10894 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
10895 {
10896 	static const uint16_t vprio[] = {8, 16};
10897 	const struct mlx5_priv *priv = dev->data->dev_private;
10898 	const struct mlx5_flow_driver_ops *fops;
10899 	enum mlx5_flow_drv_type type;
10900 	int ret;
10901 
10902 	type = mlx5_flow_os_get_type();
10903 	if (type == MLX5_FLOW_TYPE_MAX) {
10904 		type = MLX5_FLOW_TYPE_VERBS;
10905 		if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
10906 			type = MLX5_FLOW_TYPE_DV;
10907 	}
10908 	fops = flow_get_drv_ops(type);
10909 	if (fops->discover_priorities == NULL) {
10910 		DRV_LOG(ERR, "Priority discovery not supported");
10911 		rte_errno = ENOTSUP;
10912 		return -rte_errno;
10913 	}
10914 	ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio));
10915 	if (ret < 0)
10916 		return ret;
10917 	switch (ret) {
10918 	case 8:
10919 		ret = RTE_DIM(priority_map_3);
10920 		break;
10921 	case 16:
10922 		ret = RTE_DIM(priority_map_5);
10923 		break;
10924 	default:
10925 		rte_errno = ENOTSUP;
10926 		DRV_LOG(ERR,
10927 			"port %u maximum priority: %d expected 8/16",
10928 			dev->data->port_id, ret);
10929 		return -rte_errno;
10930 	}
10931 	DRV_LOG(INFO, "port %u supported flow priorities:"
10932 		" 0-%d for ingress or egress root table,"
10933 		" 0-%d for non-root table or transfer root table.",
10934 		dev->data->port_id, ret - 2,
10935 		MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
10936 	return ret;
10937 }
10938 
10939 /**
10940  * Adjust flow priority based on the highest layer and the request priority.
10941  *
10942  * @param[in] dev
10943  *   Pointer to the Ethernet device structure.
10944  * @param[in] priority
10945  *   The rule base priority.
10946  * @param[in] subpriority
10947  *   The priority based on the items.
10948  *
10949  * @return
10950  *   The new priority.
10951  */
10952 uint32_t
10953 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
10954 			  uint32_t subpriority)
10955 {
10956 	uint32_t res = 0;
10957 	struct mlx5_priv *priv = dev->data->dev_private;
10958 
10959 	switch (priv->sh->flow_max_priority) {
10960 	case RTE_DIM(priority_map_3):
10961 		res = priority_map_3[priority][subpriority];
10962 		break;
10963 	case RTE_DIM(priority_map_5):
10964 		res = priority_map_5[priority][subpriority];
10965 		break;
10966 	}
10967 	return  res;
10968 }
10969 
10970 /**
10971  * Get the E-Switch Manager vport id.
10972  *
10973  * @param[in] dev
10974  *   Pointer to the Ethernet device structure.
10975  *
10976  * @return
10977  *   The vport id.
10978  */
10979 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev)
10980 {
10981 	struct mlx5_priv *priv = dev->data->dev_private;
10982 	struct mlx5_common_device *cdev = priv->sh->cdev;
10983 
10984 	/* New FW exposes E-Switch Manager vport ID, can use it directly. */
10985 	if (cdev->config.hca_attr.esw_mgr_vport_id_valid)
10986 		return (int16_t)cdev->config.hca_attr.esw_mgr_vport_id;
10987 
10988 	if (priv->pci_dev == NULL)
10989 		return 0;
10990 	switch (priv->pci_dev->id.device_id) {
10991 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
10992 	case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
10993 	case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
10994 	/*
10995 	 * In old FW which doesn't expose the E-Switch Manager vport ID in the capability,
10996 	 * only the BF embedded CPUs control the E-Switch Manager port. Hence,
10997 	 * ECPF vport ID is selected and not the host port (0) in any BF case.
10998 	 */
10999 		return (int16_t)MLX5_ECPF_VPORT_ID;
11000 	default:
11001 		return MLX5_PF_VPORT_ID;
11002 	}
11003 }
11004 
11005 /**
11006  * Parse item to get the vport id.
11007  *
11008  * @param[in] dev
11009  *   Pointer to the Ethernet device structure.
11010  * @param[in] item
11011  *   The src port id match item.
11012  * @param[out] vport_id
11013  *   Pointer to put the vport id.
11014  * @param[out] all_ports
11015  *   Indicate if the item matches all ports.
11016  * @param[out] error
11017  *   Pointer to error structure.
11018  *
11019  * @return
11020  *   0 on success, a negative errno value otherwise and rte_errno is set.
11021  */
11022 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
11023 				const struct rte_flow_item *item,
11024 				uint16_t *vport_id,
11025 				bool *all_ports,
11026 				struct rte_flow_error *error)
11027 {
11028 	struct mlx5_priv *port_priv;
11029 	const struct rte_flow_item_port_id *pid_v;
11030 	uint32_t esw_mgr_port;
11031 
11032 	if (item->type != RTE_FLOW_ITEM_TYPE_PORT_ID &&
11033 	    item->type != RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT)
11034 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
11035 					  NULL, "Incorrect item type.");
11036 	pid_v = item->spec;
11037 	if (!pid_v) {
11038 		if (all_ports)
11039 			*all_ports = (item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT);
11040 		return 0;
11041 	}
11042 	if (all_ports)
11043 		*all_ports = false;
11044 	esw_mgr_port = (item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) ?
11045 				MLX5_REPRESENTED_PORT_ESW_MGR : MLX5_PORT_ESW_MGR;
11046 	if (pid_v->id == esw_mgr_port) {
11047 		*vport_id = mlx5_flow_get_esw_manager_vport_id(dev);
11048 	} else {
11049 		port_priv = mlx5_port_to_eswitch_info(pid_v->id, false);
11050 		if (!port_priv)
11051 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
11052 						  NULL, "Failed to get port info.");
11053 		*vport_id = port_priv->representor_id;
11054 	}
11055 
11056 	return 0;
11057 }
11058