xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision a26cc30fa0463fb6707b0caea1a288dc54c09b0f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <sys/queue.h>
11 
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_eal_paging.h>
16 #include <rte_flow.h>
17 #include <rte_cycles.h>
18 #include <rte_flow_driver.h>
19 #include <rte_malloc.h>
20 #include <rte_ip.h>
21 
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
24 #include <mlx5_prm.h>
25 #include <mlx5_malloc.h>
26 
27 #include "mlx5_defs.h"
28 #include "mlx5.h"
29 #include "mlx5_flow.h"
30 #include "mlx5_flow_os.h"
31 #include "mlx5_rx.h"
32 #include "mlx5_tx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
35 
36 struct tunnel_default_miss_ctx {
37 	uint16_t *queue;
38 	__extension__
39 	union {
40 		struct rte_flow_action_rss action_rss;
41 		struct rte_flow_action_queue miss_queue;
42 		struct rte_flow_action_jump miss_jump;
43 		uint8_t raw[0];
44 	};
45 };
46 
47 static int
48 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
49 			     struct rte_flow *flow,
50 			     const struct rte_flow_attr *attr,
51 			     const struct rte_flow_action *app_actions,
52 			     uint32_t flow_idx,
53 			     const struct mlx5_flow_tunnel *tunnel,
54 			     struct tunnel_default_miss_ctx *ctx,
55 			     struct rte_flow_error *error);
56 static struct mlx5_flow_tunnel *
57 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
58 static void
59 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
60 static uint32_t
61 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
62 				const struct mlx5_flow_tunnel *tunnel,
63 				uint32_t group, uint32_t *table,
64 				struct rte_flow_error *error);
65 
66 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
67 static void mlx5_flow_pop_thread_workspace(void);
68 
69 
70 /** Device flow drivers. */
71 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
72 
73 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
74 
75 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
76 	[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
77 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
78 	[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
79 #endif
80 	[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
81 	[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
82 };
83 
84 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
85 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
86 	(const int []){ \
87 		__VA_ARGS__, 0, \
88 	}
89 
90 /** Node object of input graph for mlx5_flow_expand_rss(). */
91 struct mlx5_flow_expand_node {
92 	const int *const next;
93 	/**<
94 	 * List of next node indexes. Index 0 is interpreted as a terminator.
95 	 */
96 	const enum rte_flow_item_type type;
97 	/**< Pattern item type of current node. */
98 	uint64_t rss_types;
99 	/**<
100 	 * RSS types bit-field associated with this node
101 	 * (see ETH_RSS_* definitions).
102 	 */
103 	uint8_t optional;
104 	/**< optional expand field. Default 0 to expand, 1 not go deeper. */
105 };
106 
107 /** Object returned by mlx5_flow_expand_rss(). */
108 struct mlx5_flow_expand_rss {
109 	uint32_t entries;
110 	/**< Number of entries @p patterns and @p priorities. */
111 	struct {
112 		struct rte_flow_item *pattern; /**< Expanded pattern array. */
113 		uint32_t priority; /**< Priority offset for each expansion. */
114 	} entry[];
115 };
116 
117 static void
118 mlx5_dbg__print_pattern(const struct rte_flow_item *item);
119 
120 static bool
121 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
122 {
123 	switch (item->type) {
124 	case RTE_FLOW_ITEM_TYPE_ETH:
125 	case RTE_FLOW_ITEM_TYPE_VLAN:
126 	case RTE_FLOW_ITEM_TYPE_IPV4:
127 	case RTE_FLOW_ITEM_TYPE_IPV6:
128 	case RTE_FLOW_ITEM_TYPE_UDP:
129 	case RTE_FLOW_ITEM_TYPE_TCP:
130 	case RTE_FLOW_ITEM_TYPE_VXLAN:
131 	case RTE_FLOW_ITEM_TYPE_NVGRE:
132 	case RTE_FLOW_ITEM_TYPE_GRE:
133 	case RTE_FLOW_ITEM_TYPE_GENEVE:
134 	case RTE_FLOW_ITEM_TYPE_MPLS:
135 		return true;
136 	default:
137 		break;
138 	}
139 	return false;
140 }
141 
142 static enum rte_flow_item_type
143 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
144 {
145 	enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
146 	uint16_t ether_type = 0;
147 	uint16_t ether_type_m;
148 	uint8_t ip_next_proto = 0;
149 	uint8_t ip_next_proto_m;
150 
151 	if (item == NULL || item->spec == NULL)
152 		return ret;
153 	switch (item->type) {
154 	case RTE_FLOW_ITEM_TYPE_ETH:
155 		if (item->mask)
156 			ether_type_m = ((const struct rte_flow_item_eth *)
157 						(item->mask))->type;
158 		else
159 			ether_type_m = rte_flow_item_eth_mask.type;
160 		if (ether_type_m != RTE_BE16(0xFFFF))
161 			break;
162 		ether_type = ((const struct rte_flow_item_eth *)
163 				(item->spec))->type;
164 		if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
165 			ret = RTE_FLOW_ITEM_TYPE_IPV4;
166 		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
167 			ret = RTE_FLOW_ITEM_TYPE_IPV6;
168 		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
169 			ret = RTE_FLOW_ITEM_TYPE_VLAN;
170 		else
171 			ret = RTE_FLOW_ITEM_TYPE_END;
172 		break;
173 	case RTE_FLOW_ITEM_TYPE_VLAN:
174 		if (item->mask)
175 			ether_type_m = ((const struct rte_flow_item_vlan *)
176 						(item->mask))->inner_type;
177 		else
178 			ether_type_m = rte_flow_item_vlan_mask.inner_type;
179 		if (ether_type_m != RTE_BE16(0xFFFF))
180 			break;
181 		ether_type = ((const struct rte_flow_item_vlan *)
182 				(item->spec))->inner_type;
183 		if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
184 			ret = RTE_FLOW_ITEM_TYPE_IPV4;
185 		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
186 			ret = RTE_FLOW_ITEM_TYPE_IPV6;
187 		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
188 			ret = RTE_FLOW_ITEM_TYPE_VLAN;
189 		else
190 			ret = RTE_FLOW_ITEM_TYPE_END;
191 		break;
192 	case RTE_FLOW_ITEM_TYPE_IPV4:
193 		if (item->mask)
194 			ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
195 					(item->mask))->hdr.next_proto_id;
196 		else
197 			ip_next_proto_m =
198 				rte_flow_item_ipv4_mask.hdr.next_proto_id;
199 		if (ip_next_proto_m != 0xFF)
200 			break;
201 		ip_next_proto = ((const struct rte_flow_item_ipv4 *)
202 				(item->spec))->hdr.next_proto_id;
203 		if (ip_next_proto == IPPROTO_UDP)
204 			ret = RTE_FLOW_ITEM_TYPE_UDP;
205 		else if (ip_next_proto == IPPROTO_TCP)
206 			ret = RTE_FLOW_ITEM_TYPE_TCP;
207 		else if (ip_next_proto == IPPROTO_IP)
208 			ret = RTE_FLOW_ITEM_TYPE_IPV4;
209 		else if (ip_next_proto == IPPROTO_IPV6)
210 			ret = RTE_FLOW_ITEM_TYPE_IPV6;
211 		else
212 			ret = RTE_FLOW_ITEM_TYPE_END;
213 		break;
214 	case RTE_FLOW_ITEM_TYPE_IPV6:
215 		if (item->mask)
216 			ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
217 						(item->mask))->hdr.proto;
218 		else
219 			ip_next_proto_m =
220 				rte_flow_item_ipv6_mask.hdr.proto;
221 		if (ip_next_proto_m != 0xFF)
222 			break;
223 		ip_next_proto = ((const struct rte_flow_item_ipv6 *)
224 				(item->spec))->hdr.proto;
225 		if (ip_next_proto == IPPROTO_UDP)
226 			ret = RTE_FLOW_ITEM_TYPE_UDP;
227 		else if (ip_next_proto == IPPROTO_TCP)
228 			ret = RTE_FLOW_ITEM_TYPE_TCP;
229 		else if (ip_next_proto == IPPROTO_IP)
230 			ret = RTE_FLOW_ITEM_TYPE_IPV4;
231 		else if (ip_next_proto == IPPROTO_IPV6)
232 			ret = RTE_FLOW_ITEM_TYPE_IPV6;
233 		else
234 			ret = RTE_FLOW_ITEM_TYPE_END;
235 		break;
236 	default:
237 		ret = RTE_FLOW_ITEM_TYPE_VOID;
238 		break;
239 	}
240 	return ret;
241 }
242 
243 #define MLX5_RSS_EXP_ELT_N 16
244 
245 /**
246  * Expand RSS flows into several possible flows according to the RSS hash
247  * fields requested and the driver capabilities.
248  *
249  * @param[out] buf
250  *   Buffer to store the result expansion.
251  * @param[in] size
252  *   Buffer size in bytes. If 0, @p buf can be NULL.
253  * @param[in] pattern
254  *   User flow pattern.
255  * @param[in] types
256  *   RSS types to expand (see ETH_RSS_* definitions).
257  * @param[in] graph
258  *   Input graph to expand @p pattern according to @p types.
259  * @param[in] graph_root_index
260  *   Index of root node in @p graph, typically 0.
261  *
262  * @return
263  *   A positive value representing the size of @p buf in bytes regardless of
264  *   @p size on success, a negative errno value otherwise and rte_errno is
265  *   set, the following errors are defined:
266  *
267  *   -E2BIG: graph-depth @p graph is too deep.
268  *   -EINVAL: @p size has not enough space for expanded pattern.
269  */
270 static int
271 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
272 		     const struct rte_flow_item *pattern, uint64_t types,
273 		     const struct mlx5_flow_expand_node graph[],
274 		     int graph_root_index)
275 {
276 	const struct rte_flow_item *item;
277 	const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
278 	const int *next_node;
279 	const int *stack[MLX5_RSS_EXP_ELT_N];
280 	int stack_pos = 0;
281 	struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
282 	unsigned int i;
283 	size_t lsize;
284 	size_t user_pattern_size = 0;
285 	void *addr = NULL;
286 	const struct mlx5_flow_expand_node *next = NULL;
287 	struct rte_flow_item missed_item;
288 	int missed = 0;
289 	int elt = 0;
290 	const struct rte_flow_item *last_item = NULL;
291 
292 	memset(&missed_item, 0, sizeof(missed_item));
293 	lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
294 		MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
295 	if (lsize > size)
296 		return -EINVAL;
297 	buf->entry[0].priority = 0;
298 	buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
299 	buf->entries = 0;
300 	addr = buf->entry[0].pattern;
301 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
302 		if (!mlx5_flow_is_rss_expandable_item(item)) {
303 			user_pattern_size += sizeof(*item);
304 			continue;
305 		}
306 		last_item = item;
307 		for (i = 0; node->next && node->next[i]; ++i) {
308 			next = &graph[node->next[i]];
309 			if (next->type == item->type)
310 				break;
311 		}
312 		if (next)
313 			node = next;
314 		user_pattern_size += sizeof(*item);
315 	}
316 	user_pattern_size += sizeof(*item); /* Handle END item. */
317 	lsize += user_pattern_size;
318 	if (lsize > size)
319 		return -EINVAL;
320 	/* Copy the user pattern in the first entry of the buffer. */
321 	rte_memcpy(addr, pattern, user_pattern_size);
322 	addr = (void *)(((uintptr_t)addr) + user_pattern_size);
323 	buf->entries = 1;
324 	/* Start expanding. */
325 	memset(flow_items, 0, sizeof(flow_items));
326 	user_pattern_size -= sizeof(*item);
327 	/*
328 	 * Check if the last valid item has spec set, need complete pattern,
329 	 * and the pattern can be used for expansion.
330 	 */
331 	missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
332 	if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
333 		/* Item type END indicates expansion is not required. */
334 		return lsize;
335 	}
336 	if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
337 		next = NULL;
338 		missed = 1;
339 		for (i = 0; node->next && node->next[i]; ++i) {
340 			next = &graph[node->next[i]];
341 			if (next->type == missed_item.type) {
342 				flow_items[0].type = missed_item.type;
343 				flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
344 				break;
345 			}
346 			next = NULL;
347 		}
348 	}
349 	if (next && missed) {
350 		elt = 2; /* missed item + item end. */
351 		node = next;
352 		lsize += elt * sizeof(*item) + user_pattern_size;
353 		if (lsize > size)
354 			return -EINVAL;
355 		if (node->rss_types & types) {
356 			buf->entry[buf->entries].priority = 1;
357 			buf->entry[buf->entries].pattern = addr;
358 			buf->entries++;
359 			rte_memcpy(addr, buf->entry[0].pattern,
360 				   user_pattern_size);
361 			addr = (void *)(((uintptr_t)addr) + user_pattern_size);
362 			rte_memcpy(addr, flow_items, elt * sizeof(*item));
363 			addr = (void *)(((uintptr_t)addr) +
364 					elt * sizeof(*item));
365 		}
366 	}
367 	memset(flow_items, 0, sizeof(flow_items));
368 	next_node = node->next;
369 	stack[stack_pos] = next_node;
370 	node = next_node ? &graph[*next_node] : NULL;
371 	while (node) {
372 		flow_items[stack_pos].type = node->type;
373 		if (node->rss_types & types) {
374 			size_t n;
375 			/*
376 			 * compute the number of items to copy from the
377 			 * expansion and copy it.
378 			 * When the stack_pos is 0, there are 1 element in it,
379 			 * plus the addition END item.
380 			 */
381 			elt = stack_pos + 2;
382 			flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
383 			lsize += elt * sizeof(*item) + user_pattern_size;
384 			if (lsize > size)
385 				return -EINVAL;
386 			n = elt * sizeof(*item);
387 			buf->entry[buf->entries].priority =
388 				stack_pos + 1 + missed;
389 			buf->entry[buf->entries].pattern = addr;
390 			buf->entries++;
391 			rte_memcpy(addr, buf->entry[0].pattern,
392 				   user_pattern_size);
393 			addr = (void *)(((uintptr_t)addr) +
394 					user_pattern_size);
395 			rte_memcpy(addr, &missed_item,
396 				   missed * sizeof(*item));
397 			addr = (void *)(((uintptr_t)addr) +
398 				missed * sizeof(*item));
399 			rte_memcpy(addr, flow_items, n);
400 			addr = (void *)(((uintptr_t)addr) + n);
401 		}
402 		/* Go deeper. */
403 		if (!node->optional && node->next) {
404 			next_node = node->next;
405 			if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
406 				rte_errno = E2BIG;
407 				return -rte_errno;
408 			}
409 			stack[stack_pos] = next_node;
410 		} else if (*(next_node + 1)) {
411 			/* Follow up with the next possibility. */
412 			++next_node;
413 		} else {
414 			/* Move to the next path. */
415 			if (stack_pos)
416 				next_node = stack[--stack_pos];
417 			next_node++;
418 			stack[stack_pos] = next_node;
419 		}
420 		node = *next_node ? &graph[*next_node] : NULL;
421 	};
422 	return lsize;
423 }
424 
425 enum mlx5_expansion {
426 	MLX5_EXPANSION_ROOT,
427 	MLX5_EXPANSION_ROOT_OUTER,
428 	MLX5_EXPANSION_ROOT_ETH_VLAN,
429 	MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
430 	MLX5_EXPANSION_OUTER_ETH,
431 	MLX5_EXPANSION_OUTER_ETH_VLAN,
432 	MLX5_EXPANSION_OUTER_VLAN,
433 	MLX5_EXPANSION_OUTER_IPV4,
434 	MLX5_EXPANSION_OUTER_IPV4_UDP,
435 	MLX5_EXPANSION_OUTER_IPV4_TCP,
436 	MLX5_EXPANSION_OUTER_IPV6,
437 	MLX5_EXPANSION_OUTER_IPV6_UDP,
438 	MLX5_EXPANSION_OUTER_IPV6_TCP,
439 	MLX5_EXPANSION_VXLAN,
440 	MLX5_EXPANSION_VXLAN_GPE,
441 	MLX5_EXPANSION_GRE,
442 	MLX5_EXPANSION_NVGRE,
443 	MLX5_EXPANSION_GRE_KEY,
444 	MLX5_EXPANSION_MPLS,
445 	MLX5_EXPANSION_ETH,
446 	MLX5_EXPANSION_ETH_VLAN,
447 	MLX5_EXPANSION_VLAN,
448 	MLX5_EXPANSION_IPV4,
449 	MLX5_EXPANSION_IPV4_UDP,
450 	MLX5_EXPANSION_IPV4_TCP,
451 	MLX5_EXPANSION_IPV6,
452 	MLX5_EXPANSION_IPV6_UDP,
453 	MLX5_EXPANSION_IPV6_TCP,
454 };
455 
456 /** Supported expansion of items. */
457 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
458 	[MLX5_EXPANSION_ROOT] = {
459 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
460 						  MLX5_EXPANSION_IPV4,
461 						  MLX5_EXPANSION_IPV6),
462 		.type = RTE_FLOW_ITEM_TYPE_END,
463 	},
464 	[MLX5_EXPANSION_ROOT_OUTER] = {
465 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
466 						  MLX5_EXPANSION_OUTER_IPV4,
467 						  MLX5_EXPANSION_OUTER_IPV6),
468 		.type = RTE_FLOW_ITEM_TYPE_END,
469 	},
470 	[MLX5_EXPANSION_ROOT_ETH_VLAN] = {
471 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
472 		.type = RTE_FLOW_ITEM_TYPE_END,
473 	},
474 	[MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
475 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
476 						(MLX5_EXPANSION_OUTER_ETH_VLAN),
477 		.type = RTE_FLOW_ITEM_TYPE_END,
478 	},
479 	[MLX5_EXPANSION_OUTER_ETH] = {
480 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
481 						  MLX5_EXPANSION_OUTER_IPV6),
482 		.type = RTE_FLOW_ITEM_TYPE_ETH,
483 		.rss_types = 0,
484 	},
485 	[MLX5_EXPANSION_OUTER_ETH_VLAN] = {
486 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
487 		.type = RTE_FLOW_ITEM_TYPE_ETH,
488 		.rss_types = 0,
489 	},
490 	[MLX5_EXPANSION_OUTER_VLAN] = {
491 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
492 						  MLX5_EXPANSION_OUTER_IPV6),
493 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
494 	},
495 	[MLX5_EXPANSION_OUTER_IPV4] = {
496 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
497 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
498 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
499 			 MLX5_EXPANSION_GRE,
500 			 MLX5_EXPANSION_NVGRE,
501 			 MLX5_EXPANSION_IPV4,
502 			 MLX5_EXPANSION_IPV6),
503 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
504 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
505 			ETH_RSS_NONFRAG_IPV4_OTHER,
506 	},
507 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
508 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
509 						  MLX5_EXPANSION_VXLAN_GPE,
510 						  MLX5_EXPANSION_MPLS),
511 		.type = RTE_FLOW_ITEM_TYPE_UDP,
512 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
513 	},
514 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
515 		.type = RTE_FLOW_ITEM_TYPE_TCP,
516 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
517 	},
518 	[MLX5_EXPANSION_OUTER_IPV6] = {
519 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
520 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
521 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
522 			 MLX5_EXPANSION_IPV4,
523 			 MLX5_EXPANSION_IPV6,
524 			 MLX5_EXPANSION_GRE,
525 			 MLX5_EXPANSION_NVGRE),
526 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
527 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
528 			ETH_RSS_NONFRAG_IPV6_OTHER,
529 	},
530 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
531 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
532 						  MLX5_EXPANSION_VXLAN_GPE,
533 						  MLX5_EXPANSION_MPLS),
534 		.type = RTE_FLOW_ITEM_TYPE_UDP,
535 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
536 	},
537 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
538 		.type = RTE_FLOW_ITEM_TYPE_TCP,
539 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
540 	},
541 	[MLX5_EXPANSION_VXLAN] = {
542 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
543 						  MLX5_EXPANSION_IPV4,
544 						  MLX5_EXPANSION_IPV6),
545 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
546 	},
547 	[MLX5_EXPANSION_VXLAN_GPE] = {
548 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
549 						  MLX5_EXPANSION_IPV4,
550 						  MLX5_EXPANSION_IPV6),
551 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
552 	},
553 	[MLX5_EXPANSION_GRE] = {
554 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
555 						  MLX5_EXPANSION_IPV6,
556 						  MLX5_EXPANSION_GRE_KEY,
557 						  MLX5_EXPANSION_MPLS),
558 		.type = RTE_FLOW_ITEM_TYPE_GRE,
559 	},
560 	[MLX5_EXPANSION_GRE_KEY] = {
561 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
562 						  MLX5_EXPANSION_IPV6,
563 						  MLX5_EXPANSION_MPLS),
564 		.type = RTE_FLOW_ITEM_TYPE_GRE_KEY,
565 		.optional = 1,
566 	},
567 	[MLX5_EXPANSION_NVGRE] = {
568 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
569 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
570 	},
571 	[MLX5_EXPANSION_MPLS] = {
572 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
573 						  MLX5_EXPANSION_IPV6,
574 						  MLX5_EXPANSION_ETH),
575 		.type = RTE_FLOW_ITEM_TYPE_MPLS,
576 	},
577 	[MLX5_EXPANSION_ETH] = {
578 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
579 						  MLX5_EXPANSION_IPV6),
580 		.type = RTE_FLOW_ITEM_TYPE_ETH,
581 	},
582 	[MLX5_EXPANSION_ETH_VLAN] = {
583 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
584 		.type = RTE_FLOW_ITEM_TYPE_ETH,
585 	},
586 	[MLX5_EXPANSION_VLAN] = {
587 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
588 						  MLX5_EXPANSION_IPV6),
589 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
590 	},
591 	[MLX5_EXPANSION_IPV4] = {
592 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
593 						  MLX5_EXPANSION_IPV4_TCP),
594 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
595 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
596 			ETH_RSS_NONFRAG_IPV4_OTHER,
597 	},
598 	[MLX5_EXPANSION_IPV4_UDP] = {
599 		.type = RTE_FLOW_ITEM_TYPE_UDP,
600 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
601 	},
602 	[MLX5_EXPANSION_IPV4_TCP] = {
603 		.type = RTE_FLOW_ITEM_TYPE_TCP,
604 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
605 	},
606 	[MLX5_EXPANSION_IPV6] = {
607 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
608 						  MLX5_EXPANSION_IPV6_TCP),
609 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
610 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
611 			ETH_RSS_NONFRAG_IPV6_OTHER,
612 	},
613 	[MLX5_EXPANSION_IPV6_UDP] = {
614 		.type = RTE_FLOW_ITEM_TYPE_UDP,
615 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
616 	},
617 	[MLX5_EXPANSION_IPV6_TCP] = {
618 		.type = RTE_FLOW_ITEM_TYPE_TCP,
619 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
620 	},
621 };
622 
623 static struct rte_flow_action_handle *
624 mlx5_action_handle_create(struct rte_eth_dev *dev,
625 			  const struct rte_flow_indir_action_conf *conf,
626 			  const struct rte_flow_action *action,
627 			  struct rte_flow_error *error);
628 static int mlx5_action_handle_destroy
629 				(struct rte_eth_dev *dev,
630 				 struct rte_flow_action_handle *handle,
631 				 struct rte_flow_error *error);
632 static int mlx5_action_handle_update
633 				(struct rte_eth_dev *dev,
634 				 struct rte_flow_action_handle *handle,
635 				 const void *update,
636 				 struct rte_flow_error *error);
637 static int mlx5_action_handle_query
638 				(struct rte_eth_dev *dev,
639 				 const struct rte_flow_action_handle *handle,
640 				 void *data,
641 				 struct rte_flow_error *error);
642 static int
643 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
644 		    struct rte_flow_tunnel *app_tunnel,
645 		    struct rte_flow_action **actions,
646 		    uint32_t *num_of_actions,
647 		    struct rte_flow_error *error);
648 static int
649 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
650 		       struct rte_flow_tunnel *app_tunnel,
651 		       struct rte_flow_item **items,
652 		       uint32_t *num_of_items,
653 		       struct rte_flow_error *error);
654 static int
655 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
656 			      struct rte_flow_item *pmd_items,
657 			      uint32_t num_items, struct rte_flow_error *err);
658 static int
659 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
660 				struct rte_flow_action *pmd_actions,
661 				uint32_t num_actions,
662 				struct rte_flow_error *err);
663 static int
664 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
665 				  struct rte_mbuf *m,
666 				  struct rte_flow_restore_info *info,
667 				  struct rte_flow_error *err);
668 
669 static const struct rte_flow_ops mlx5_flow_ops = {
670 	.validate = mlx5_flow_validate,
671 	.create = mlx5_flow_create,
672 	.destroy = mlx5_flow_destroy,
673 	.flush = mlx5_flow_flush,
674 	.isolate = mlx5_flow_isolate,
675 	.query = mlx5_flow_query,
676 	.dev_dump = mlx5_flow_dev_dump,
677 	.get_aged_flows = mlx5_flow_get_aged_flows,
678 	.action_handle_create = mlx5_action_handle_create,
679 	.action_handle_destroy = mlx5_action_handle_destroy,
680 	.action_handle_update = mlx5_action_handle_update,
681 	.action_handle_query = mlx5_action_handle_query,
682 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
683 	.tunnel_match = mlx5_flow_tunnel_match,
684 	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
685 	.tunnel_item_release = mlx5_flow_tunnel_item_release,
686 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
687 };
688 
689 /* Tunnel information. */
690 struct mlx5_flow_tunnel_info {
691 	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
692 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
693 };
694 
695 static struct mlx5_flow_tunnel_info tunnels_info[] = {
696 	{
697 		.tunnel = MLX5_FLOW_LAYER_VXLAN,
698 		.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
699 	},
700 	{
701 		.tunnel = MLX5_FLOW_LAYER_GENEVE,
702 		.ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
703 	},
704 	{
705 		.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
706 		.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
707 	},
708 	{
709 		.tunnel = MLX5_FLOW_LAYER_GRE,
710 		.ptype = RTE_PTYPE_TUNNEL_GRE,
711 	},
712 	{
713 		.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
714 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
715 	},
716 	{
717 		.tunnel = MLX5_FLOW_LAYER_MPLS,
718 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
719 	},
720 	{
721 		.tunnel = MLX5_FLOW_LAYER_NVGRE,
722 		.ptype = RTE_PTYPE_TUNNEL_NVGRE,
723 	},
724 	{
725 		.tunnel = MLX5_FLOW_LAYER_IPIP,
726 		.ptype = RTE_PTYPE_TUNNEL_IP,
727 	},
728 	{
729 		.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
730 		.ptype = RTE_PTYPE_TUNNEL_IP,
731 	},
732 	{
733 		.tunnel = MLX5_FLOW_LAYER_GTP,
734 		.ptype = RTE_PTYPE_TUNNEL_GTPU,
735 	},
736 };
737 
738 
739 
740 /**
741  * Translate tag ID to register.
742  *
743  * @param[in] dev
744  *   Pointer to the Ethernet device structure.
745  * @param[in] feature
746  *   The feature that request the register.
747  * @param[in] id
748  *   The request register ID.
749  * @param[out] error
750  *   Error description in case of any.
751  *
752  * @return
753  *   The request register on success, a negative errno
754  *   value otherwise and rte_errno is set.
755  */
756 int
757 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
758 		     enum mlx5_feature_name feature,
759 		     uint32_t id,
760 		     struct rte_flow_error *error)
761 {
762 	struct mlx5_priv *priv = dev->data->dev_private;
763 	struct mlx5_dev_config *config = &priv->config;
764 	enum modify_reg start_reg;
765 	bool skip_mtr_reg = false;
766 
767 	switch (feature) {
768 	case MLX5_HAIRPIN_RX:
769 		return REG_B;
770 	case MLX5_HAIRPIN_TX:
771 		return REG_A;
772 	case MLX5_METADATA_RX:
773 		switch (config->dv_xmeta_en) {
774 		case MLX5_XMETA_MODE_LEGACY:
775 			return REG_B;
776 		case MLX5_XMETA_MODE_META16:
777 			return REG_C_0;
778 		case MLX5_XMETA_MODE_META32:
779 			return REG_C_1;
780 		}
781 		break;
782 	case MLX5_METADATA_TX:
783 		return REG_A;
784 	case MLX5_METADATA_FDB:
785 		switch (config->dv_xmeta_en) {
786 		case MLX5_XMETA_MODE_LEGACY:
787 			return REG_NON;
788 		case MLX5_XMETA_MODE_META16:
789 			return REG_C_0;
790 		case MLX5_XMETA_MODE_META32:
791 			return REG_C_1;
792 		}
793 		break;
794 	case MLX5_FLOW_MARK:
795 		switch (config->dv_xmeta_en) {
796 		case MLX5_XMETA_MODE_LEGACY:
797 			return REG_NON;
798 		case MLX5_XMETA_MODE_META16:
799 			return REG_C_1;
800 		case MLX5_XMETA_MODE_META32:
801 			return REG_C_0;
802 		}
803 		break;
804 	case MLX5_MTR_ID:
805 		/*
806 		 * If meter color and meter id share one register, flow match
807 		 * should use the meter color register for match.
808 		 */
809 		if (priv->mtr_reg_share)
810 			return priv->mtr_color_reg;
811 		else
812 			return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
813 			       REG_C_3;
814 	case MLX5_MTR_COLOR:
815 	case MLX5_ASO_FLOW_HIT:
816 	case MLX5_ASO_CONNTRACK:
817 		/* All features use the same REG_C. */
818 		MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
819 		return priv->mtr_color_reg;
820 	case MLX5_COPY_MARK:
821 		/*
822 		 * Metadata COPY_MARK register using is in meter suffix sub
823 		 * flow while with meter. It's safe to share the same register.
824 		 */
825 		return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
826 	case MLX5_APP_TAG:
827 		/*
828 		 * If meter is enable, it will engage the register for color
829 		 * match and flow match. If meter color match is not using the
830 		 * REG_C_2, need to skip the REG_C_x be used by meter color
831 		 * match.
832 		 * If meter is disable, free to use all available registers.
833 		 */
834 		start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
835 			    (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
836 		skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
837 		if (id > (uint32_t)(REG_C_7 - start_reg))
838 			return rte_flow_error_set(error, EINVAL,
839 						  RTE_FLOW_ERROR_TYPE_ITEM,
840 						  NULL, "invalid tag id");
841 		if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
842 			return rte_flow_error_set(error, ENOTSUP,
843 						  RTE_FLOW_ERROR_TYPE_ITEM,
844 						  NULL, "unsupported tag id");
845 		/*
846 		 * This case means meter is using the REG_C_x great than 2.
847 		 * Take care not to conflict with meter color REG_C_x.
848 		 * If the available index REG_C_y >= REG_C_x, skip the
849 		 * color register.
850 		 */
851 		if (skip_mtr_reg && config->flow_mreg_c
852 		    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
853 			if (id >= (uint32_t)(REG_C_7 - start_reg))
854 				return rte_flow_error_set(error, EINVAL,
855 						       RTE_FLOW_ERROR_TYPE_ITEM,
856 							NULL, "invalid tag id");
857 			if (config->flow_mreg_c
858 			    [id + 1 + start_reg - REG_C_0] != REG_NON)
859 				return config->flow_mreg_c
860 					       [id + 1 + start_reg - REG_C_0];
861 			return rte_flow_error_set(error, ENOTSUP,
862 						  RTE_FLOW_ERROR_TYPE_ITEM,
863 						  NULL, "unsupported tag id");
864 		}
865 		return config->flow_mreg_c[id + start_reg - REG_C_0];
866 	}
867 	MLX5_ASSERT(false);
868 	return rte_flow_error_set(error, EINVAL,
869 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870 				  NULL, "invalid feature name");
871 }
872 
873 /**
874  * Check extensive flow metadata register support.
875  *
876  * @param dev
877  *   Pointer to rte_eth_dev structure.
878  *
879  * @return
880  *   True if device supports extensive flow metadata register, otherwise false.
881  */
882 bool
883 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
884 {
885 	struct mlx5_priv *priv = dev->data->dev_private;
886 	struct mlx5_dev_config *config = &priv->config;
887 
888 	/*
889 	 * Having available reg_c can be regarded inclusively as supporting
890 	 * extensive flow metadata register, which could mean,
891 	 * - metadata register copy action by modify header.
892 	 * - 16 modify header actions is supported.
893 	 * - reg_c's are preserved across different domain (FDB and NIC) on
894 	 *   packet loopback by flow lookup miss.
895 	 */
896 	return config->flow_mreg_c[2] != REG_NON;
897 }
898 
899 /**
900  * Get the lowest priority.
901  *
902  * @param[in] dev
903  *   Pointer to the Ethernet device structure.
904  * @param[in] attributes
905  *   Pointer to device flow rule attributes.
906  *
907  * @return
908  *   The value of lowest priority of flow.
909  */
910 uint32_t
911 mlx5_get_lowest_priority(struct rte_eth_dev *dev,
912 			  const struct rte_flow_attr *attr)
913 {
914 	struct mlx5_priv *priv = dev->data->dev_private;
915 
916 	if (!attr->group && !attr->transfer)
917 		return priv->config.flow_prio - 2;
918 	return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
919 }
920 
921 /**
922  * Calculate matcher priority of the flow.
923  *
924  * @param[in] dev
925  *   Pointer to the Ethernet device structure.
926  * @param[in] attr
927  *   Pointer to device flow rule attributes.
928  * @param[in] subpriority
929  *   The priority based on the items.
930  * @return
931  *   The matcher priority of the flow.
932  */
933 uint16_t
934 mlx5_get_matcher_priority(struct rte_eth_dev *dev,
935 			  const struct rte_flow_attr *attr,
936 			  uint32_t subpriority)
937 {
938 	uint16_t priority = (uint16_t)attr->priority;
939 	struct mlx5_priv *priv = dev->data->dev_private;
940 
941 	if (!attr->group && !attr->transfer) {
942 		if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
943 			priority = priv->config.flow_prio - 1;
944 		return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
945 	}
946 	if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
947 		priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
948 	return priority * 3 + subpriority;
949 }
950 
951 /**
952  * Verify the @p item specifications (spec, last, mask) are compatible with the
953  * NIC capabilities.
954  *
955  * @param[in] item
956  *   Item specification.
957  * @param[in] mask
958  *   @p item->mask or flow default bit-masks.
959  * @param[in] nic_mask
960  *   Bit-masks covering supported fields by the NIC to compare with user mask.
961  * @param[in] size
962  *   Bit-masks size in bytes.
963  * @param[in] range_accepted
964  *   True if range of values is accepted for specific fields, false otherwise.
965  * @param[out] error
966  *   Pointer to error structure.
967  *
968  * @return
969  *   0 on success, a negative errno value otherwise and rte_errno is set.
970  */
971 int
972 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
973 			  const uint8_t *mask,
974 			  const uint8_t *nic_mask,
975 			  unsigned int size,
976 			  bool range_accepted,
977 			  struct rte_flow_error *error)
978 {
979 	unsigned int i;
980 
981 	MLX5_ASSERT(nic_mask);
982 	for (i = 0; i < size; ++i)
983 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
984 			return rte_flow_error_set(error, ENOTSUP,
985 						  RTE_FLOW_ERROR_TYPE_ITEM,
986 						  item,
987 						  "mask enables non supported"
988 						  " bits");
989 	if (!item->spec && (item->mask || item->last))
990 		return rte_flow_error_set(error, EINVAL,
991 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
992 					  "mask/last without a spec is not"
993 					  " supported");
994 	if (item->spec && item->last && !range_accepted) {
995 		uint8_t spec[size];
996 		uint8_t last[size];
997 		unsigned int i;
998 		int ret;
999 
1000 		for (i = 0; i < size; ++i) {
1001 			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
1002 			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
1003 		}
1004 		ret = memcmp(spec, last, size);
1005 		if (ret != 0)
1006 			return rte_flow_error_set(error, EINVAL,
1007 						  RTE_FLOW_ERROR_TYPE_ITEM,
1008 						  item,
1009 						  "range is not valid");
1010 	}
1011 	return 0;
1012 }
1013 
1014 /**
1015  * Adjust the hash fields according to the @p flow information.
1016  *
1017  * @param[in] dev_flow.
1018  *   Pointer to the mlx5_flow.
1019  * @param[in] tunnel
1020  *   1 when the hash field is for a tunnel item.
1021  * @param[in] layer_types
1022  *   ETH_RSS_* types.
1023  * @param[in] hash_fields
1024  *   Item hash fields.
1025  *
1026  * @return
1027  *   The hash fields that should be used.
1028  */
1029 uint64_t
1030 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
1031 			    int tunnel __rte_unused, uint64_t layer_types,
1032 			    uint64_t hash_fields)
1033 {
1034 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1035 	int rss_request_inner = rss_desc->level >= 2;
1036 
1037 	/* Check RSS hash level for tunnel. */
1038 	if (tunnel && rss_request_inner)
1039 		hash_fields |= IBV_RX_HASH_INNER;
1040 	else if (tunnel || rss_request_inner)
1041 		return 0;
1042 #endif
1043 	/* Check if requested layer matches RSS hash fields. */
1044 	if (!(rss_desc->types & layer_types))
1045 		return 0;
1046 	return hash_fields;
1047 }
1048 
1049 /**
1050  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
1051  * if several tunnel rules are used on this queue, the tunnel ptype will be
1052  * cleared.
1053  *
1054  * @param rxq_ctrl
1055  *   Rx queue to update.
1056  */
1057 static void
1058 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
1059 {
1060 	unsigned int i;
1061 	uint32_t tunnel_ptype = 0;
1062 
1063 	/* Look up for the ptype to use. */
1064 	for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
1065 		if (!rxq_ctrl->flow_tunnels_n[i])
1066 			continue;
1067 		if (!tunnel_ptype) {
1068 			tunnel_ptype = tunnels_info[i].ptype;
1069 		} else {
1070 			tunnel_ptype = 0;
1071 			break;
1072 		}
1073 	}
1074 	rxq_ctrl->rxq.tunnel = tunnel_ptype;
1075 }
1076 
1077 /**
1078  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
1079  * flow.
1080  *
1081  * @param[in] dev
1082  *   Pointer to the Ethernet device structure.
1083  * @param[in] dev_handle
1084  *   Pointer to device flow handle structure.
1085  */
1086 void
1087 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
1088 		       struct mlx5_flow_handle *dev_handle)
1089 {
1090 	struct mlx5_priv *priv = dev->data->dev_private;
1091 	const int mark = dev_handle->mark;
1092 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1093 	struct mlx5_ind_table_obj *ind_tbl = NULL;
1094 	unsigned int i;
1095 
1096 	if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1097 		struct mlx5_hrxq *hrxq;
1098 
1099 		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1100 			      dev_handle->rix_hrxq);
1101 		if (hrxq)
1102 			ind_tbl = hrxq->ind_table;
1103 	} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1104 		struct mlx5_shared_action_rss *shared_rss;
1105 
1106 		shared_rss = mlx5_ipool_get
1107 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1108 			 dev_handle->rix_srss);
1109 		if (shared_rss)
1110 			ind_tbl = shared_rss->ind_tbl;
1111 	}
1112 	if (!ind_tbl)
1113 		return;
1114 	for (i = 0; i != ind_tbl->queues_n; ++i) {
1115 		int idx = ind_tbl->queues[i];
1116 		struct mlx5_rxq_ctrl *rxq_ctrl =
1117 			container_of((*priv->rxqs)[idx],
1118 				     struct mlx5_rxq_ctrl, rxq);
1119 
1120 		/*
1121 		 * To support metadata register copy on Tx loopback,
1122 		 * this must be always enabled (metadata may arive
1123 		 * from other port - not from local flows only.
1124 		 */
1125 		if (priv->config.dv_flow_en &&
1126 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1127 		    mlx5_flow_ext_mreg_supported(dev)) {
1128 			rxq_ctrl->rxq.mark = 1;
1129 			rxq_ctrl->flow_mark_n = 1;
1130 		} else if (mark) {
1131 			rxq_ctrl->rxq.mark = 1;
1132 			rxq_ctrl->flow_mark_n++;
1133 		}
1134 		if (tunnel) {
1135 			unsigned int j;
1136 
1137 			/* Increase the counter matching the flow. */
1138 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1139 				if ((tunnels_info[j].tunnel &
1140 				     dev_handle->layers) ==
1141 				    tunnels_info[j].tunnel) {
1142 					rxq_ctrl->flow_tunnels_n[j]++;
1143 					break;
1144 				}
1145 			}
1146 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
1147 		}
1148 	}
1149 }
1150 
1151 /**
1152  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1153  *
1154  * @param[in] dev
1155  *   Pointer to the Ethernet device structure.
1156  * @param[in] flow
1157  *   Pointer to flow structure.
1158  */
1159 static void
1160 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1161 {
1162 	struct mlx5_priv *priv = dev->data->dev_private;
1163 	uint32_t handle_idx;
1164 	struct mlx5_flow_handle *dev_handle;
1165 
1166 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1167 		       handle_idx, dev_handle, next)
1168 		flow_drv_rxq_flags_set(dev, dev_handle);
1169 }
1170 
1171 /**
1172  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1173  * device flow if no other flow uses it with the same kind of request.
1174  *
1175  * @param dev
1176  *   Pointer to Ethernet device.
1177  * @param[in] dev_handle
1178  *   Pointer to the device flow handle structure.
1179  */
1180 static void
1181 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1182 			struct mlx5_flow_handle *dev_handle)
1183 {
1184 	struct mlx5_priv *priv = dev->data->dev_private;
1185 	const int mark = dev_handle->mark;
1186 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1187 	struct mlx5_ind_table_obj *ind_tbl = NULL;
1188 	unsigned int i;
1189 
1190 	if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1191 		struct mlx5_hrxq *hrxq;
1192 
1193 		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1194 			      dev_handle->rix_hrxq);
1195 		if (hrxq)
1196 			ind_tbl = hrxq->ind_table;
1197 	} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1198 		struct mlx5_shared_action_rss *shared_rss;
1199 
1200 		shared_rss = mlx5_ipool_get
1201 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1202 			 dev_handle->rix_srss);
1203 		if (shared_rss)
1204 			ind_tbl = shared_rss->ind_tbl;
1205 	}
1206 	if (!ind_tbl)
1207 		return;
1208 	MLX5_ASSERT(dev->data->dev_started);
1209 	for (i = 0; i != ind_tbl->queues_n; ++i) {
1210 		int idx = ind_tbl->queues[i];
1211 		struct mlx5_rxq_ctrl *rxq_ctrl =
1212 			container_of((*priv->rxqs)[idx],
1213 				     struct mlx5_rxq_ctrl, rxq);
1214 
1215 		if (priv->config.dv_flow_en &&
1216 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1217 		    mlx5_flow_ext_mreg_supported(dev)) {
1218 			rxq_ctrl->rxq.mark = 1;
1219 			rxq_ctrl->flow_mark_n = 1;
1220 		} else if (mark) {
1221 			rxq_ctrl->flow_mark_n--;
1222 			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
1223 		}
1224 		if (tunnel) {
1225 			unsigned int j;
1226 
1227 			/* Decrease the counter matching the flow. */
1228 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1229 				if ((tunnels_info[j].tunnel &
1230 				     dev_handle->layers) ==
1231 				    tunnels_info[j].tunnel) {
1232 					rxq_ctrl->flow_tunnels_n[j]--;
1233 					break;
1234 				}
1235 			}
1236 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
1237 		}
1238 	}
1239 }
1240 
1241 /**
1242  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1243  * @p flow if no other flow uses it with the same kind of request.
1244  *
1245  * @param dev
1246  *   Pointer to Ethernet device.
1247  * @param[in] flow
1248  *   Pointer to the flow.
1249  */
1250 static void
1251 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1252 {
1253 	struct mlx5_priv *priv = dev->data->dev_private;
1254 	uint32_t handle_idx;
1255 	struct mlx5_flow_handle *dev_handle;
1256 
1257 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1258 		       handle_idx, dev_handle, next)
1259 		flow_drv_rxq_flags_trim(dev, dev_handle);
1260 }
1261 
1262 /**
1263  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1264  *
1265  * @param dev
1266  *   Pointer to Ethernet device.
1267  */
1268 static void
1269 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1270 {
1271 	struct mlx5_priv *priv = dev->data->dev_private;
1272 	unsigned int i;
1273 
1274 	for (i = 0; i != priv->rxqs_n; ++i) {
1275 		struct mlx5_rxq_ctrl *rxq_ctrl;
1276 		unsigned int j;
1277 
1278 		if (!(*priv->rxqs)[i])
1279 			continue;
1280 		rxq_ctrl = container_of((*priv->rxqs)[i],
1281 					struct mlx5_rxq_ctrl, rxq);
1282 		rxq_ctrl->flow_mark_n = 0;
1283 		rxq_ctrl->rxq.mark = 0;
1284 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1285 			rxq_ctrl->flow_tunnels_n[j] = 0;
1286 		rxq_ctrl->rxq.tunnel = 0;
1287 	}
1288 }
1289 
1290 /**
1291  * Set the Rx queue dynamic metadata (mask and offset) for a flow
1292  *
1293  * @param[in] dev
1294  *   Pointer to the Ethernet device structure.
1295  */
1296 void
1297 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
1298 {
1299 	struct mlx5_priv *priv = dev->data->dev_private;
1300 	struct mlx5_rxq_data *data;
1301 	unsigned int i;
1302 
1303 	for (i = 0; i != priv->rxqs_n; ++i) {
1304 		if (!(*priv->rxqs)[i])
1305 			continue;
1306 		data = (*priv->rxqs)[i];
1307 		if (!rte_flow_dynf_metadata_avail()) {
1308 			data->dynf_meta = 0;
1309 			data->flow_meta_mask = 0;
1310 			data->flow_meta_offset = -1;
1311 			data->flow_meta_port_mask = 0;
1312 		} else {
1313 			data->dynf_meta = 1;
1314 			data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1315 			data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1316 			data->flow_meta_port_mask = (uint32_t)~0;
1317 			if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
1318 				data->flow_meta_port_mask >>= 16;
1319 		}
1320 	}
1321 }
1322 
1323 /*
1324  * return a pointer to the desired action in the list of actions.
1325  *
1326  * @param[in] actions
1327  *   The list of actions to search the action in.
1328  * @param[in] action
1329  *   The action to find.
1330  *
1331  * @return
1332  *   Pointer to the action in the list, if found. NULL otherwise.
1333  */
1334 const struct rte_flow_action *
1335 mlx5_flow_find_action(const struct rte_flow_action *actions,
1336 		      enum rte_flow_action_type action)
1337 {
1338 	if (actions == NULL)
1339 		return NULL;
1340 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1341 		if (actions->type == action)
1342 			return actions;
1343 	return NULL;
1344 }
1345 
1346 /*
1347  * Validate the flag action.
1348  *
1349  * @param[in] action_flags
1350  *   Bit-fields that holds the actions detected until now.
1351  * @param[in] attr
1352  *   Attributes of flow that includes this action.
1353  * @param[out] error
1354  *   Pointer to error structure.
1355  *
1356  * @return
1357  *   0 on success, a negative errno value otherwise and rte_errno is set.
1358  */
1359 int
1360 mlx5_flow_validate_action_flag(uint64_t action_flags,
1361 			       const struct rte_flow_attr *attr,
1362 			       struct rte_flow_error *error)
1363 {
1364 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1365 		return rte_flow_error_set(error, EINVAL,
1366 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1367 					  "can't mark and flag in same flow");
1368 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1369 		return rte_flow_error_set(error, EINVAL,
1370 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1371 					  "can't have 2 flag"
1372 					  " actions in same flow");
1373 	if (attr->egress)
1374 		return rte_flow_error_set(error, ENOTSUP,
1375 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1376 					  "flag action not supported for "
1377 					  "egress");
1378 	return 0;
1379 }
1380 
1381 /*
1382  * Validate the mark action.
1383  *
1384  * @param[in] action
1385  *   Pointer to the queue action.
1386  * @param[in] action_flags
1387  *   Bit-fields that holds the actions detected until now.
1388  * @param[in] attr
1389  *   Attributes of flow that includes this action.
1390  * @param[out] error
1391  *   Pointer to error structure.
1392  *
1393  * @return
1394  *   0 on success, a negative errno value otherwise and rte_errno is set.
1395  */
1396 int
1397 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1398 			       uint64_t action_flags,
1399 			       const struct rte_flow_attr *attr,
1400 			       struct rte_flow_error *error)
1401 {
1402 	const struct rte_flow_action_mark *mark = action->conf;
1403 
1404 	if (!mark)
1405 		return rte_flow_error_set(error, EINVAL,
1406 					  RTE_FLOW_ERROR_TYPE_ACTION,
1407 					  action,
1408 					  "configuration cannot be null");
1409 	if (mark->id >= MLX5_FLOW_MARK_MAX)
1410 		return rte_flow_error_set(error, EINVAL,
1411 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1412 					  &mark->id,
1413 					  "mark id must in 0 <= id < "
1414 					  RTE_STR(MLX5_FLOW_MARK_MAX));
1415 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1416 		return rte_flow_error_set(error, EINVAL,
1417 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1418 					  "can't flag and mark in same flow");
1419 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1420 		return rte_flow_error_set(error, EINVAL,
1421 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1422 					  "can't have 2 mark actions in same"
1423 					  " flow");
1424 	if (attr->egress)
1425 		return rte_flow_error_set(error, ENOTSUP,
1426 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1427 					  "mark action not supported for "
1428 					  "egress");
1429 	return 0;
1430 }
1431 
1432 /*
1433  * Validate the drop action.
1434  *
1435  * @param[in] action_flags
1436  *   Bit-fields that holds the actions detected until now.
1437  * @param[in] attr
1438  *   Attributes of flow that includes this action.
1439  * @param[out] error
1440  *   Pointer to error structure.
1441  *
1442  * @return
1443  *   0 on success, a negative errno value otherwise and rte_errno is set.
1444  */
1445 int
1446 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1447 			       const struct rte_flow_attr *attr,
1448 			       struct rte_flow_error *error)
1449 {
1450 	if (attr->egress)
1451 		return rte_flow_error_set(error, ENOTSUP,
1452 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1453 					  "drop action not supported for "
1454 					  "egress");
1455 	return 0;
1456 }
1457 
1458 /*
1459  * Validate the queue action.
1460  *
1461  * @param[in] action
1462  *   Pointer to the queue action.
1463  * @param[in] action_flags
1464  *   Bit-fields that holds the actions detected until now.
1465  * @param[in] dev
1466  *   Pointer to the Ethernet device structure.
1467  * @param[in] attr
1468  *   Attributes of flow that includes this action.
1469  * @param[out] error
1470  *   Pointer to error structure.
1471  *
1472  * @return
1473  *   0 on success, a negative errno value otherwise and rte_errno is set.
1474  */
1475 int
1476 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1477 				uint64_t action_flags,
1478 				struct rte_eth_dev *dev,
1479 				const struct rte_flow_attr *attr,
1480 				struct rte_flow_error *error)
1481 {
1482 	struct mlx5_priv *priv = dev->data->dev_private;
1483 	const struct rte_flow_action_queue *queue = action->conf;
1484 
1485 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1486 		return rte_flow_error_set(error, EINVAL,
1487 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1488 					  "can't have 2 fate actions in"
1489 					  " same flow");
1490 	if (!priv->rxqs_n)
1491 		return rte_flow_error_set(error, EINVAL,
1492 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1493 					  NULL, "No Rx queues configured");
1494 	if (queue->index >= priv->rxqs_n)
1495 		return rte_flow_error_set(error, EINVAL,
1496 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1497 					  &queue->index,
1498 					  "queue index out of range");
1499 	if (!(*priv->rxqs)[queue->index])
1500 		return rte_flow_error_set(error, EINVAL,
1501 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1502 					  &queue->index,
1503 					  "queue is not configured");
1504 	if (attr->egress)
1505 		return rte_flow_error_set(error, ENOTSUP,
1506 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1507 					  "queue action not supported for "
1508 					  "egress");
1509 	return 0;
1510 }
1511 
1512 /*
1513  * Validate the rss action.
1514  *
1515  * @param[in] dev
1516  *   Pointer to the Ethernet device structure.
1517  * @param[in] action
1518  *   Pointer to the queue action.
1519  * @param[out] error
1520  *   Pointer to error structure.
1521  *
1522  * @return
1523  *   0 on success, a negative errno value otherwise and rte_errno is set.
1524  */
1525 int
1526 mlx5_validate_action_rss(struct rte_eth_dev *dev,
1527 			 const struct rte_flow_action *action,
1528 			 struct rte_flow_error *error)
1529 {
1530 	struct mlx5_priv *priv = dev->data->dev_private;
1531 	const struct rte_flow_action_rss *rss = action->conf;
1532 	enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
1533 	unsigned int i;
1534 
1535 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1536 	    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1537 		return rte_flow_error_set(error, ENOTSUP,
1538 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1539 					  &rss->func,
1540 					  "RSS hash function not supported");
1541 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1542 	if (rss->level > 2)
1543 #else
1544 	if (rss->level > 1)
1545 #endif
1546 		return rte_flow_error_set(error, ENOTSUP,
1547 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1548 					  &rss->level,
1549 					  "tunnel RSS is not supported");
1550 	/* allow RSS key_len 0 in case of NULL (default) RSS key. */
1551 	if (rss->key_len == 0 && rss->key != NULL)
1552 		return rte_flow_error_set(error, ENOTSUP,
1553 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1554 					  &rss->key_len,
1555 					  "RSS hash key length 0");
1556 	if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1557 		return rte_flow_error_set(error, ENOTSUP,
1558 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1559 					  &rss->key_len,
1560 					  "RSS hash key too small");
1561 	if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1562 		return rte_flow_error_set(error, ENOTSUP,
1563 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1564 					  &rss->key_len,
1565 					  "RSS hash key too large");
1566 	if (rss->queue_num > priv->config.ind_table_max_size)
1567 		return rte_flow_error_set(error, ENOTSUP,
1568 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1569 					  &rss->queue_num,
1570 					  "number of queues too large");
1571 	if (rss->types & MLX5_RSS_HF_MASK)
1572 		return rte_flow_error_set(error, ENOTSUP,
1573 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1574 					  &rss->types,
1575 					  "some RSS protocols are not"
1576 					  " supported");
1577 	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1578 	    !(rss->types & ETH_RSS_IP))
1579 		return rte_flow_error_set(error, EINVAL,
1580 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1581 					  "L3 partial RSS requested but L3 RSS"
1582 					  " type not specified");
1583 	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1584 	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1585 		return rte_flow_error_set(error, EINVAL,
1586 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1587 					  "L4 partial RSS requested but L4 RSS"
1588 					  " type not specified");
1589 	if (!priv->rxqs_n)
1590 		return rte_flow_error_set(error, EINVAL,
1591 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1592 					  NULL, "No Rx queues configured");
1593 	if (!rss->queue_num)
1594 		return rte_flow_error_set(error, EINVAL,
1595 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1596 					  NULL, "No queues configured");
1597 	for (i = 0; i != rss->queue_num; ++i) {
1598 		struct mlx5_rxq_ctrl *rxq_ctrl;
1599 
1600 		if (rss->queue[i] >= priv->rxqs_n)
1601 			return rte_flow_error_set
1602 				(error, EINVAL,
1603 				 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1604 				 &rss->queue[i], "queue index out of range");
1605 		if (!(*priv->rxqs)[rss->queue[i]])
1606 			return rte_flow_error_set
1607 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1608 				 &rss->queue[i], "queue is not configured");
1609 		rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
1610 					struct mlx5_rxq_ctrl, rxq);
1611 		if (i == 0)
1612 			rxq_type = rxq_ctrl->type;
1613 		if (rxq_type != rxq_ctrl->type)
1614 			return rte_flow_error_set
1615 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1616 				 &rss->queue[i],
1617 				 "combining hairpin and regular RSS queues is not supported");
1618 	}
1619 	return 0;
1620 }
1621 
1622 /*
1623  * Validate the rss action.
1624  *
1625  * @param[in] action
1626  *   Pointer to the queue action.
1627  * @param[in] action_flags
1628  *   Bit-fields that holds the actions detected until now.
1629  * @param[in] dev
1630  *   Pointer to the Ethernet device structure.
1631  * @param[in] attr
1632  *   Attributes of flow that includes this action.
1633  * @param[in] item_flags
1634  *   Items that were detected.
1635  * @param[out] error
1636  *   Pointer to error structure.
1637  *
1638  * @return
1639  *   0 on success, a negative errno value otherwise and rte_errno is set.
1640  */
1641 int
1642 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1643 			      uint64_t action_flags,
1644 			      struct rte_eth_dev *dev,
1645 			      const struct rte_flow_attr *attr,
1646 			      uint64_t item_flags,
1647 			      struct rte_flow_error *error)
1648 {
1649 	const struct rte_flow_action_rss *rss = action->conf;
1650 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1651 	int ret;
1652 
1653 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1654 		return rte_flow_error_set(error, EINVAL,
1655 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1656 					  "can't have 2 fate actions"
1657 					  " in same flow");
1658 	ret = mlx5_validate_action_rss(dev, action, error);
1659 	if (ret)
1660 		return ret;
1661 	if (attr->egress)
1662 		return rte_flow_error_set(error, ENOTSUP,
1663 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1664 					  "rss action not supported for "
1665 					  "egress");
1666 	if (rss->level > 1 && !tunnel)
1667 		return rte_flow_error_set(error, EINVAL,
1668 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1669 					  "inner RSS is not supported for "
1670 					  "non-tunnel flows");
1671 	if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
1672 	    !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
1673 		return rte_flow_error_set(error, EINVAL,
1674 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1675 					  "RSS on eCPRI is not supported now");
1676 	}
1677 	if ((item_flags & MLX5_FLOW_LAYER_MPLS) &&
1678 	    !(item_flags &
1679 	      (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) &&
1680 	    rss->level > 1)
1681 		return rte_flow_error_set(error, EINVAL,
1682 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1683 					  "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern");
1684 	return 0;
1685 }
1686 
1687 /*
1688  * Validate the default miss action.
1689  *
1690  * @param[in] action_flags
1691  *   Bit-fields that holds the actions detected until now.
1692  * @param[out] error
1693  *   Pointer to error structure.
1694  *
1695  * @return
1696  *   0 on success, a negative errno value otherwise and rte_errno is set.
1697  */
1698 int
1699 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
1700 				const struct rte_flow_attr *attr,
1701 				struct rte_flow_error *error)
1702 {
1703 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1704 		return rte_flow_error_set(error, EINVAL,
1705 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1706 					  "can't have 2 fate actions in"
1707 					  " same flow");
1708 	if (attr->egress)
1709 		return rte_flow_error_set(error, ENOTSUP,
1710 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1711 					  "default miss action not supported "
1712 					  "for egress");
1713 	if (attr->group)
1714 		return rte_flow_error_set(error, ENOTSUP,
1715 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1716 					  "only group 0 is supported");
1717 	if (attr->transfer)
1718 		return rte_flow_error_set(error, ENOTSUP,
1719 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1720 					  NULL, "transfer is not supported");
1721 	return 0;
1722 }
1723 
1724 /*
1725  * Validate the count action.
1726  *
1727  * @param[in] dev
1728  *   Pointer to the Ethernet device structure.
1729  * @param[in] attr
1730  *   Attributes of flow that includes this action.
1731  * @param[out] error
1732  *   Pointer to error structure.
1733  *
1734  * @return
1735  *   0 on success, a negative errno value otherwise and rte_errno is set.
1736  */
1737 int
1738 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1739 				const struct rte_flow_attr *attr,
1740 				struct rte_flow_error *error)
1741 {
1742 	if (attr->egress)
1743 		return rte_flow_error_set(error, ENOTSUP,
1744 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1745 					  "count action not supported for "
1746 					  "egress");
1747 	return 0;
1748 }
1749 
1750 /*
1751  * Validate the ASO CT action.
1752  *
1753  * @param[in] dev
1754  *   Pointer to the Ethernet device structure.
1755  * @param[in] conntrack
1756  *   Pointer to the CT action profile.
1757  * @param[out] error
1758  *   Pointer to error structure.
1759  *
1760  * @return
1761  *   0 on success, a negative errno value otherwise and rte_errno is set.
1762  */
1763 int
1764 mlx5_validate_action_ct(struct rte_eth_dev *dev,
1765 			const struct rte_flow_action_conntrack *conntrack,
1766 			struct rte_flow_error *error)
1767 {
1768 	RTE_SET_USED(dev);
1769 
1770 	if (conntrack->state > RTE_FLOW_CONNTRACK_STATE_TIME_WAIT)
1771 		return rte_flow_error_set(error, EINVAL,
1772 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1773 					  "Invalid CT state");
1774 	if (conntrack->last_index > RTE_FLOW_CONNTRACK_FLAG_RST)
1775 		return rte_flow_error_set(error, EINVAL,
1776 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1777 					  "Invalid last TCP packet flag");
1778 	return 0;
1779 }
1780 
1781 /**
1782  * Verify the @p attributes will be correctly understood by the NIC and store
1783  * them in the @p flow if everything is correct.
1784  *
1785  * @param[in] dev
1786  *   Pointer to the Ethernet device structure.
1787  * @param[in] attributes
1788  *   Pointer to flow attributes
1789  * @param[out] error
1790  *   Pointer to error structure.
1791  *
1792  * @return
1793  *   0 on success, a negative errno value otherwise and rte_errno is set.
1794  */
1795 int
1796 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1797 			      const struct rte_flow_attr *attributes,
1798 			      struct rte_flow_error *error)
1799 {
1800 	struct mlx5_priv *priv = dev->data->dev_private;
1801 	uint32_t priority_max = priv->config.flow_prio - 1;
1802 
1803 	if (attributes->group)
1804 		return rte_flow_error_set(error, ENOTSUP,
1805 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1806 					  NULL, "groups is not supported");
1807 	if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
1808 	    attributes->priority >= priority_max)
1809 		return rte_flow_error_set(error, ENOTSUP,
1810 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1811 					  NULL, "priority out of range");
1812 	if (attributes->egress)
1813 		return rte_flow_error_set(error, ENOTSUP,
1814 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1815 					  "egress is not supported");
1816 	if (attributes->transfer && !priv->config.dv_esw_en)
1817 		return rte_flow_error_set(error, ENOTSUP,
1818 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1819 					  NULL, "transfer is not supported");
1820 	if (!attributes->ingress)
1821 		return rte_flow_error_set(error, EINVAL,
1822 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1823 					  NULL,
1824 					  "ingress attribute is mandatory");
1825 	return 0;
1826 }
1827 
1828 /**
1829  * Validate ICMP6 item.
1830  *
1831  * @param[in] item
1832  *   Item specification.
1833  * @param[in] item_flags
1834  *   Bit-fields that holds the items detected until now.
1835  * @param[in] ext_vlan_sup
1836  *   Whether extended VLAN features are supported or not.
1837  * @param[out] error
1838  *   Pointer to error structure.
1839  *
1840  * @return
1841  *   0 on success, a negative errno value otherwise and rte_errno is set.
1842  */
1843 int
1844 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1845 			       uint64_t item_flags,
1846 			       uint8_t target_protocol,
1847 			       struct rte_flow_error *error)
1848 {
1849 	const struct rte_flow_item_icmp6 *mask = item->mask;
1850 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1851 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1852 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1853 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1854 				      MLX5_FLOW_LAYER_OUTER_L4;
1855 	int ret;
1856 
1857 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1858 		return rte_flow_error_set(error, EINVAL,
1859 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1860 					  "protocol filtering not compatible"
1861 					  " with ICMP6 layer");
1862 	if (!(item_flags & l3m))
1863 		return rte_flow_error_set(error, EINVAL,
1864 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1865 					  "IPv6 is mandatory to filter on"
1866 					  " ICMP6");
1867 	if (item_flags & l4m)
1868 		return rte_flow_error_set(error, EINVAL,
1869 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1870 					  "multiple L4 layers not supported");
1871 	if (!mask)
1872 		mask = &rte_flow_item_icmp6_mask;
1873 	ret = mlx5_flow_item_acceptable
1874 		(item, (const uint8_t *)mask,
1875 		 (const uint8_t *)&rte_flow_item_icmp6_mask,
1876 		 sizeof(struct rte_flow_item_icmp6),
1877 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1878 	if (ret < 0)
1879 		return ret;
1880 	return 0;
1881 }
1882 
1883 /**
1884  * Validate ICMP item.
1885  *
1886  * @param[in] item
1887  *   Item specification.
1888  * @param[in] item_flags
1889  *   Bit-fields that holds the items detected until now.
1890  * @param[out] error
1891  *   Pointer to error structure.
1892  *
1893  * @return
1894  *   0 on success, a negative errno value otherwise and rte_errno is set.
1895  */
1896 int
1897 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1898 			     uint64_t item_flags,
1899 			     uint8_t target_protocol,
1900 			     struct rte_flow_error *error)
1901 {
1902 	const struct rte_flow_item_icmp *mask = item->mask;
1903 	const struct rte_flow_item_icmp nic_mask = {
1904 		.hdr.icmp_type = 0xff,
1905 		.hdr.icmp_code = 0xff,
1906 		.hdr.icmp_ident = RTE_BE16(0xffff),
1907 		.hdr.icmp_seq_nb = RTE_BE16(0xffff),
1908 	};
1909 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1910 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1911 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1912 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1913 				      MLX5_FLOW_LAYER_OUTER_L4;
1914 	int ret;
1915 
1916 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1917 		return rte_flow_error_set(error, EINVAL,
1918 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1919 					  "protocol filtering not compatible"
1920 					  " with ICMP layer");
1921 	if (!(item_flags & l3m))
1922 		return rte_flow_error_set(error, EINVAL,
1923 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1924 					  "IPv4 is mandatory to filter"
1925 					  " on ICMP");
1926 	if (item_flags & l4m)
1927 		return rte_flow_error_set(error, EINVAL,
1928 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1929 					  "multiple L4 layers not supported");
1930 	if (!mask)
1931 		mask = &nic_mask;
1932 	ret = mlx5_flow_item_acceptable
1933 		(item, (const uint8_t *)mask,
1934 		 (const uint8_t *)&nic_mask,
1935 		 sizeof(struct rte_flow_item_icmp),
1936 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1937 	if (ret < 0)
1938 		return ret;
1939 	return 0;
1940 }
1941 
1942 /**
1943  * Validate Ethernet item.
1944  *
1945  * @param[in] item
1946  *   Item specification.
1947  * @param[in] item_flags
1948  *   Bit-fields that holds the items detected until now.
1949  * @param[out] error
1950  *   Pointer to error structure.
1951  *
1952  * @return
1953  *   0 on success, a negative errno value otherwise and rte_errno is set.
1954  */
1955 int
1956 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1957 			    uint64_t item_flags, bool ext_vlan_sup,
1958 			    struct rte_flow_error *error)
1959 {
1960 	const struct rte_flow_item_eth *mask = item->mask;
1961 	const struct rte_flow_item_eth nic_mask = {
1962 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1963 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1964 		.type = RTE_BE16(0xffff),
1965 		.has_vlan = ext_vlan_sup ? 1 : 0,
1966 	};
1967 	int ret;
1968 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1969 	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
1970 				       MLX5_FLOW_LAYER_OUTER_L2;
1971 
1972 	if (item_flags & ethm)
1973 		return rte_flow_error_set(error, ENOTSUP,
1974 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1975 					  "multiple L2 layers not supported");
1976 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1977 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1978 		return rte_flow_error_set(error, EINVAL,
1979 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1980 					  "L2 layer should not follow "
1981 					  "L3 layers");
1982 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1983 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1984 		return rte_flow_error_set(error, EINVAL,
1985 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1986 					  "L2 layer should not follow VLAN");
1987 	if (!mask)
1988 		mask = &rte_flow_item_eth_mask;
1989 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1990 					(const uint8_t *)&nic_mask,
1991 					sizeof(struct rte_flow_item_eth),
1992 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1993 	return ret;
1994 }
1995 
1996 /**
1997  * Validate VLAN item.
1998  *
1999  * @param[in] item
2000  *   Item specification.
2001  * @param[in] item_flags
2002  *   Bit-fields that holds the items detected until now.
2003  * @param[in] dev
2004  *   Ethernet device flow is being created on.
2005  * @param[out] error
2006  *   Pointer to error structure.
2007  *
2008  * @return
2009  *   0 on success, a negative errno value otherwise and rte_errno is set.
2010  */
2011 int
2012 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
2013 			     uint64_t item_flags,
2014 			     struct rte_eth_dev *dev,
2015 			     struct rte_flow_error *error)
2016 {
2017 	const struct rte_flow_item_vlan *spec = item->spec;
2018 	const struct rte_flow_item_vlan *mask = item->mask;
2019 	const struct rte_flow_item_vlan nic_mask = {
2020 		.tci = RTE_BE16(UINT16_MAX),
2021 		.inner_type = RTE_BE16(UINT16_MAX),
2022 	};
2023 	uint16_t vlan_tag = 0;
2024 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2025 	int ret;
2026 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2027 					MLX5_FLOW_LAYER_INNER_L4) :
2028 				       (MLX5_FLOW_LAYER_OUTER_L3 |
2029 					MLX5_FLOW_LAYER_OUTER_L4);
2030 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2031 					MLX5_FLOW_LAYER_OUTER_VLAN;
2032 
2033 	if (item_flags & vlanm)
2034 		return rte_flow_error_set(error, EINVAL,
2035 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2036 					  "multiple VLAN layers not supported");
2037 	else if ((item_flags & l34m) != 0)
2038 		return rte_flow_error_set(error, EINVAL,
2039 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2040 					  "VLAN cannot follow L3/L4 layer");
2041 	if (!mask)
2042 		mask = &rte_flow_item_vlan_mask;
2043 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2044 					(const uint8_t *)&nic_mask,
2045 					sizeof(struct rte_flow_item_vlan),
2046 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2047 	if (ret)
2048 		return ret;
2049 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2050 		struct mlx5_priv *priv = dev->data->dev_private;
2051 
2052 		if (priv->vmwa_context) {
2053 			/*
2054 			 * Non-NULL context means we have a virtual machine
2055 			 * and SR-IOV enabled, we have to create VLAN interface
2056 			 * to make hypervisor to setup E-Switch vport
2057 			 * context correctly. We avoid creating the multiple
2058 			 * VLAN interfaces, so we cannot support VLAN tag mask.
2059 			 */
2060 			return rte_flow_error_set(error, EINVAL,
2061 						  RTE_FLOW_ERROR_TYPE_ITEM,
2062 						  item,
2063 						  "VLAN tag mask is not"
2064 						  " supported in virtual"
2065 						  " environment");
2066 		}
2067 	}
2068 	if (spec) {
2069 		vlan_tag = spec->tci;
2070 		vlan_tag &= mask->tci;
2071 	}
2072 	/*
2073 	 * From verbs perspective an empty VLAN is equivalent
2074 	 * to a packet without VLAN layer.
2075 	 */
2076 	if (!vlan_tag)
2077 		return rte_flow_error_set(error, EINVAL,
2078 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2079 					  item->spec,
2080 					  "VLAN cannot be empty");
2081 	return 0;
2082 }
2083 
2084 /**
2085  * Validate IPV4 item.
2086  *
2087  * @param[in] item
2088  *   Item specification.
2089  * @param[in] item_flags
2090  *   Bit-fields that holds the items detected until now.
2091  * @param[in] last_item
2092  *   Previous validated item in the pattern items.
2093  * @param[in] ether_type
2094  *   Type in the ethernet layer header (including dot1q).
2095  * @param[in] acc_mask
2096  *   Acceptable mask, if NULL default internal default mask
2097  *   will be used to check whether item fields are supported.
2098  * @param[in] range_accepted
2099  *   True if range of values is accepted for specific fields, false otherwise.
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106 int
2107 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2108 			     uint64_t item_flags,
2109 			     uint64_t last_item,
2110 			     uint16_t ether_type,
2111 			     const struct rte_flow_item_ipv4 *acc_mask,
2112 			     bool range_accepted,
2113 			     struct rte_flow_error *error)
2114 {
2115 	const struct rte_flow_item_ipv4 *mask = item->mask;
2116 	const struct rte_flow_item_ipv4 *spec = item->spec;
2117 	const struct rte_flow_item_ipv4 nic_mask = {
2118 		.hdr = {
2119 			.src_addr = RTE_BE32(0xffffffff),
2120 			.dst_addr = RTE_BE32(0xffffffff),
2121 			.type_of_service = 0xff,
2122 			.next_proto_id = 0xff,
2123 		},
2124 	};
2125 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2126 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2127 				      MLX5_FLOW_LAYER_OUTER_L3;
2128 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2129 				      MLX5_FLOW_LAYER_OUTER_L4;
2130 	int ret;
2131 	uint8_t next_proto = 0xFF;
2132 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2133 				  MLX5_FLOW_LAYER_OUTER_VLAN |
2134 				  MLX5_FLOW_LAYER_INNER_VLAN);
2135 
2136 	if ((last_item & l2_vlan) && ether_type &&
2137 	    ether_type != RTE_ETHER_TYPE_IPV4)
2138 		return rte_flow_error_set(error, EINVAL,
2139 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2140 					  "IPv4 cannot follow L2/VLAN layer "
2141 					  "which ether type is not IPv4");
2142 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
2143 		if (mask && spec)
2144 			next_proto = mask->hdr.next_proto_id &
2145 				     spec->hdr.next_proto_id;
2146 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2147 			return rte_flow_error_set(error, EINVAL,
2148 						  RTE_FLOW_ERROR_TYPE_ITEM,
2149 						  item,
2150 						  "multiple tunnel "
2151 						  "not supported");
2152 	}
2153 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
2154 		return rte_flow_error_set(error, EINVAL,
2155 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2156 					  "wrong tunnel type - IPv6 specified "
2157 					  "but IPv4 item provided");
2158 	if (item_flags & l3m)
2159 		return rte_flow_error_set(error, ENOTSUP,
2160 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2161 					  "multiple L3 layers not supported");
2162 	else if (item_flags & l4m)
2163 		return rte_flow_error_set(error, EINVAL,
2164 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2165 					  "L3 cannot follow an L4 layer.");
2166 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2167 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2168 		return rte_flow_error_set(error, EINVAL,
2169 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2170 					  "L3 cannot follow an NVGRE layer.");
2171 	if (!mask)
2172 		mask = &rte_flow_item_ipv4_mask;
2173 	else if (mask->hdr.next_proto_id != 0 &&
2174 		 mask->hdr.next_proto_id != 0xff)
2175 		return rte_flow_error_set(error, EINVAL,
2176 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2177 					  "partial mask is not supported"
2178 					  " for protocol");
2179 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2180 					acc_mask ? (const uint8_t *)acc_mask
2181 						 : (const uint8_t *)&nic_mask,
2182 					sizeof(struct rte_flow_item_ipv4),
2183 					range_accepted, error);
2184 	if (ret < 0)
2185 		return ret;
2186 	return 0;
2187 }
2188 
2189 /**
2190  * Validate IPV6 item.
2191  *
2192  * @param[in] item
2193  *   Item specification.
2194  * @param[in] item_flags
2195  *   Bit-fields that holds the items detected until now.
2196  * @param[in] last_item
2197  *   Previous validated item in the pattern items.
2198  * @param[in] ether_type
2199  *   Type in the ethernet layer header (including dot1q).
2200  * @param[in] acc_mask
2201  *   Acceptable mask, if NULL default internal default mask
2202  *   will be used to check whether item fields are supported.
2203  * @param[out] error
2204  *   Pointer to error structure.
2205  *
2206  * @return
2207  *   0 on success, a negative errno value otherwise and rte_errno is set.
2208  */
2209 int
2210 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2211 			     uint64_t item_flags,
2212 			     uint64_t last_item,
2213 			     uint16_t ether_type,
2214 			     const struct rte_flow_item_ipv6 *acc_mask,
2215 			     struct rte_flow_error *error)
2216 {
2217 	const struct rte_flow_item_ipv6 *mask = item->mask;
2218 	const struct rte_flow_item_ipv6 *spec = item->spec;
2219 	const struct rte_flow_item_ipv6 nic_mask = {
2220 		.hdr = {
2221 			.src_addr =
2222 				"\xff\xff\xff\xff\xff\xff\xff\xff"
2223 				"\xff\xff\xff\xff\xff\xff\xff\xff",
2224 			.dst_addr =
2225 				"\xff\xff\xff\xff\xff\xff\xff\xff"
2226 				"\xff\xff\xff\xff\xff\xff\xff\xff",
2227 			.vtc_flow = RTE_BE32(0xffffffff),
2228 			.proto = 0xff,
2229 		},
2230 	};
2231 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2232 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2233 				      MLX5_FLOW_LAYER_OUTER_L3;
2234 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2235 				      MLX5_FLOW_LAYER_OUTER_L4;
2236 	int ret;
2237 	uint8_t next_proto = 0xFF;
2238 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2239 				  MLX5_FLOW_LAYER_OUTER_VLAN |
2240 				  MLX5_FLOW_LAYER_INNER_VLAN);
2241 
2242 	if ((last_item & l2_vlan) && ether_type &&
2243 	    ether_type != RTE_ETHER_TYPE_IPV6)
2244 		return rte_flow_error_set(error, EINVAL,
2245 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2246 					  "IPv6 cannot follow L2/VLAN layer "
2247 					  "which ether type is not IPv6");
2248 	if (mask && mask->hdr.proto == UINT8_MAX && spec)
2249 		next_proto = spec->hdr.proto;
2250 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
2251 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2252 			return rte_flow_error_set(error, EINVAL,
2253 						  RTE_FLOW_ERROR_TYPE_ITEM,
2254 						  item,
2255 						  "multiple tunnel "
2256 						  "not supported");
2257 	}
2258 	if (next_proto == IPPROTO_HOPOPTS  ||
2259 	    next_proto == IPPROTO_ROUTING  ||
2260 	    next_proto == IPPROTO_FRAGMENT ||
2261 	    next_proto == IPPROTO_ESP	   ||
2262 	    next_proto == IPPROTO_AH	   ||
2263 	    next_proto == IPPROTO_DSTOPTS)
2264 		return rte_flow_error_set(error, EINVAL,
2265 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2266 					  "IPv6 proto (next header) should "
2267 					  "not be set as extension header");
2268 	if (item_flags & MLX5_FLOW_LAYER_IPIP)
2269 		return rte_flow_error_set(error, EINVAL,
2270 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2271 					  "wrong tunnel type - IPv4 specified "
2272 					  "but IPv6 item provided");
2273 	if (item_flags & l3m)
2274 		return rte_flow_error_set(error, ENOTSUP,
2275 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2276 					  "multiple L3 layers not supported");
2277 	else if (item_flags & l4m)
2278 		return rte_flow_error_set(error, EINVAL,
2279 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2280 					  "L3 cannot follow an L4 layer.");
2281 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2282 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2283 		return rte_flow_error_set(error, EINVAL,
2284 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2285 					  "L3 cannot follow an NVGRE layer.");
2286 	if (!mask)
2287 		mask = &rte_flow_item_ipv6_mask;
2288 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2289 					acc_mask ? (const uint8_t *)acc_mask
2290 						 : (const uint8_t *)&nic_mask,
2291 					sizeof(struct rte_flow_item_ipv6),
2292 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2293 	if (ret < 0)
2294 		return ret;
2295 	return 0;
2296 }
2297 
2298 /**
2299  * Validate UDP item.
2300  *
2301  * @param[in] item
2302  *   Item specification.
2303  * @param[in] item_flags
2304  *   Bit-fields that holds the items detected until now.
2305  * @param[in] target_protocol
2306  *   The next protocol in the previous item.
2307  * @param[in] flow_mask
2308  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2309  * @param[out] error
2310  *   Pointer to error structure.
2311  *
2312  * @return
2313  *   0 on success, a negative errno value otherwise and rte_errno is set.
2314  */
2315 int
2316 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2317 			    uint64_t item_flags,
2318 			    uint8_t target_protocol,
2319 			    struct rte_flow_error *error)
2320 {
2321 	const struct rte_flow_item_udp *mask = item->mask;
2322 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2323 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2324 				      MLX5_FLOW_LAYER_OUTER_L3;
2325 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2326 				      MLX5_FLOW_LAYER_OUTER_L4;
2327 	int ret;
2328 
2329 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2330 		return rte_flow_error_set(error, EINVAL,
2331 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2332 					  "protocol filtering not compatible"
2333 					  " with UDP layer");
2334 	if (!(item_flags & l3m))
2335 		return rte_flow_error_set(error, EINVAL,
2336 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2337 					  "L3 is mandatory to filter on L4");
2338 	if (item_flags & l4m)
2339 		return rte_flow_error_set(error, EINVAL,
2340 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2341 					  "multiple L4 layers not supported");
2342 	if (!mask)
2343 		mask = &rte_flow_item_udp_mask;
2344 	ret = mlx5_flow_item_acceptable
2345 		(item, (const uint8_t *)mask,
2346 		 (const uint8_t *)&rte_flow_item_udp_mask,
2347 		 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2348 		 error);
2349 	if (ret < 0)
2350 		return ret;
2351 	return 0;
2352 }
2353 
2354 /**
2355  * Validate TCP item.
2356  *
2357  * @param[in] item
2358  *   Item specification.
2359  * @param[in] item_flags
2360  *   Bit-fields that holds the items detected until now.
2361  * @param[in] target_protocol
2362  *   The next protocol in the previous item.
2363  * @param[out] error
2364  *   Pointer to error structure.
2365  *
2366  * @return
2367  *   0 on success, a negative errno value otherwise and rte_errno is set.
2368  */
2369 int
2370 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2371 			    uint64_t item_flags,
2372 			    uint8_t target_protocol,
2373 			    const struct rte_flow_item_tcp *flow_mask,
2374 			    struct rte_flow_error *error)
2375 {
2376 	const struct rte_flow_item_tcp *mask = item->mask;
2377 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2378 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2379 				      MLX5_FLOW_LAYER_OUTER_L3;
2380 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2381 				      MLX5_FLOW_LAYER_OUTER_L4;
2382 	int ret;
2383 
2384 	MLX5_ASSERT(flow_mask);
2385 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
2386 		return rte_flow_error_set(error, EINVAL,
2387 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2388 					  "protocol filtering not compatible"
2389 					  " with TCP layer");
2390 	if (!(item_flags & l3m))
2391 		return rte_flow_error_set(error, EINVAL,
2392 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2393 					  "L3 is mandatory to filter on L4");
2394 	if (item_flags & l4m)
2395 		return rte_flow_error_set(error, EINVAL,
2396 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2397 					  "multiple L4 layers not supported");
2398 	if (!mask)
2399 		mask = &rte_flow_item_tcp_mask;
2400 	ret = mlx5_flow_item_acceptable
2401 		(item, (const uint8_t *)mask,
2402 		 (const uint8_t *)flow_mask,
2403 		 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2404 		 error);
2405 	if (ret < 0)
2406 		return ret;
2407 	return 0;
2408 }
2409 
2410 /**
2411  * Validate VXLAN item.
2412  *
2413  * @param[in] item
2414  *   Item specification.
2415  * @param[in] item_flags
2416  *   Bit-fields that holds the items detected until now.
2417  * @param[in] target_protocol
2418  *   The next protocol in the previous item.
2419  * @param[out] error
2420  *   Pointer to error structure.
2421  *
2422  * @return
2423  *   0 on success, a negative errno value otherwise and rte_errno is set.
2424  */
2425 int
2426 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
2427 			      uint64_t item_flags,
2428 			      struct rte_flow_error *error)
2429 {
2430 	const struct rte_flow_item_vxlan *spec = item->spec;
2431 	const struct rte_flow_item_vxlan *mask = item->mask;
2432 	int ret;
2433 	union vni {
2434 		uint32_t vlan_id;
2435 		uint8_t vni[4];
2436 	} id = { .vlan_id = 0, };
2437 
2438 
2439 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2440 		return rte_flow_error_set(error, ENOTSUP,
2441 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2442 					  "multiple tunnel layers not"
2443 					  " supported");
2444 	/*
2445 	 * Verify only UDPv4 is present as defined in
2446 	 * https://tools.ietf.org/html/rfc7348
2447 	 */
2448 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2449 		return rte_flow_error_set(error, EINVAL,
2450 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2451 					  "no outer UDP layer found");
2452 	if (!mask)
2453 		mask = &rte_flow_item_vxlan_mask;
2454 	ret = mlx5_flow_item_acceptable
2455 		(item, (const uint8_t *)mask,
2456 		 (const uint8_t *)&rte_flow_item_vxlan_mask,
2457 		 sizeof(struct rte_flow_item_vxlan),
2458 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2459 	if (ret < 0)
2460 		return ret;
2461 	if (spec) {
2462 		memcpy(&id.vni[1], spec->vni, 3);
2463 		memcpy(&id.vni[1], mask->vni, 3);
2464 	}
2465 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2466 		return rte_flow_error_set(error, ENOTSUP,
2467 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2468 					  "VXLAN tunnel must be fully defined");
2469 	return 0;
2470 }
2471 
2472 /**
2473  * Validate VXLAN_GPE item.
2474  *
2475  * @param[in] item
2476  *   Item specification.
2477  * @param[in] item_flags
2478  *   Bit-fields that holds the items detected until now.
2479  * @param[in] priv
2480  *   Pointer to the private data structure.
2481  * @param[in] target_protocol
2482  *   The next protocol in the previous item.
2483  * @param[out] error
2484  *   Pointer to error structure.
2485  *
2486  * @return
2487  *   0 on success, a negative errno value otherwise and rte_errno is set.
2488  */
2489 int
2490 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2491 				  uint64_t item_flags,
2492 				  struct rte_eth_dev *dev,
2493 				  struct rte_flow_error *error)
2494 {
2495 	struct mlx5_priv *priv = dev->data->dev_private;
2496 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
2497 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
2498 	int ret;
2499 	union vni {
2500 		uint32_t vlan_id;
2501 		uint8_t vni[4];
2502 	} id = { .vlan_id = 0, };
2503 
2504 	if (!priv->config.l3_vxlan_en)
2505 		return rte_flow_error_set(error, ENOTSUP,
2506 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2507 					  "L3 VXLAN is not enabled by device"
2508 					  " parameter and/or not configured in"
2509 					  " firmware");
2510 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2511 		return rte_flow_error_set(error, ENOTSUP,
2512 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2513 					  "multiple tunnel layers not"
2514 					  " supported");
2515 	/*
2516 	 * Verify only UDPv4 is present as defined in
2517 	 * https://tools.ietf.org/html/rfc7348
2518 	 */
2519 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2520 		return rte_flow_error_set(error, EINVAL,
2521 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2522 					  "no outer UDP layer found");
2523 	if (!mask)
2524 		mask = &rte_flow_item_vxlan_gpe_mask;
2525 	ret = mlx5_flow_item_acceptable
2526 		(item, (const uint8_t *)mask,
2527 		 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
2528 		 sizeof(struct rte_flow_item_vxlan_gpe),
2529 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2530 	if (ret < 0)
2531 		return ret;
2532 	if (spec) {
2533 		if (spec->protocol)
2534 			return rte_flow_error_set(error, ENOTSUP,
2535 						  RTE_FLOW_ERROR_TYPE_ITEM,
2536 						  item,
2537 						  "VxLAN-GPE protocol"
2538 						  " not supported");
2539 		memcpy(&id.vni[1], spec->vni, 3);
2540 		memcpy(&id.vni[1], mask->vni, 3);
2541 	}
2542 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2543 		return rte_flow_error_set(error, ENOTSUP,
2544 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2545 					  "VXLAN-GPE tunnel must be fully"
2546 					  " defined");
2547 	return 0;
2548 }
2549 /**
2550  * Validate GRE Key item.
2551  *
2552  * @param[in] item
2553  *   Item specification.
2554  * @param[in] item_flags
2555  *   Bit flags to mark detected items.
2556  * @param[in] gre_item
2557  *   Pointer to gre_item
2558  * @param[out] error
2559  *   Pointer to error structure.
2560  *
2561  * @return
2562  *   0 on success, a negative errno value otherwise and rte_errno is set.
2563  */
2564 int
2565 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2566 				uint64_t item_flags,
2567 				const struct rte_flow_item *gre_item,
2568 				struct rte_flow_error *error)
2569 {
2570 	const rte_be32_t *mask = item->mask;
2571 	int ret = 0;
2572 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2573 	const struct rte_flow_item_gre *gre_spec;
2574 	const struct rte_flow_item_gre *gre_mask;
2575 
2576 	if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2577 		return rte_flow_error_set(error, ENOTSUP,
2578 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2579 					  "Multiple GRE key not support");
2580 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2581 		return rte_flow_error_set(error, ENOTSUP,
2582 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2583 					  "No preceding GRE header");
2584 	if (item_flags & MLX5_FLOW_LAYER_INNER)
2585 		return rte_flow_error_set(error, ENOTSUP,
2586 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2587 					  "GRE key following a wrong item");
2588 	gre_mask = gre_item->mask;
2589 	if (!gre_mask)
2590 		gre_mask = &rte_flow_item_gre_mask;
2591 	gre_spec = gre_item->spec;
2592 	if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2593 			 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2594 		return rte_flow_error_set(error, EINVAL,
2595 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2596 					  "Key bit must be on");
2597 
2598 	if (!mask)
2599 		mask = &gre_key_default_mask;
2600 	ret = mlx5_flow_item_acceptable
2601 		(item, (const uint8_t *)mask,
2602 		 (const uint8_t *)&gre_key_default_mask,
2603 		 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2604 	return ret;
2605 }
2606 
2607 /**
2608  * Validate GRE item.
2609  *
2610  * @param[in] item
2611  *   Item specification.
2612  * @param[in] item_flags
2613  *   Bit flags to mark detected items.
2614  * @param[in] target_protocol
2615  *   The next protocol in the previous item.
2616  * @param[out] error
2617  *   Pointer to error structure.
2618  *
2619  * @return
2620  *   0 on success, a negative errno value otherwise and rte_errno is set.
2621  */
2622 int
2623 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2624 			    uint64_t item_flags,
2625 			    uint8_t target_protocol,
2626 			    struct rte_flow_error *error)
2627 {
2628 	const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2629 	const struct rte_flow_item_gre *mask = item->mask;
2630 	int ret;
2631 	const struct rte_flow_item_gre nic_mask = {
2632 		.c_rsvd0_ver = RTE_BE16(0xB000),
2633 		.protocol = RTE_BE16(UINT16_MAX),
2634 	};
2635 
2636 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2637 		return rte_flow_error_set(error, EINVAL,
2638 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2639 					  "protocol filtering not compatible"
2640 					  " with this GRE layer");
2641 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2642 		return rte_flow_error_set(error, ENOTSUP,
2643 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2644 					  "multiple tunnel layers not"
2645 					  " supported");
2646 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2647 		return rte_flow_error_set(error, ENOTSUP,
2648 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2649 					  "L3 Layer is missing");
2650 	if (!mask)
2651 		mask = &rte_flow_item_gre_mask;
2652 	ret = mlx5_flow_item_acceptable
2653 		(item, (const uint8_t *)mask,
2654 		 (const uint8_t *)&nic_mask,
2655 		 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2656 		 error);
2657 	if (ret < 0)
2658 		return ret;
2659 #ifndef HAVE_MLX5DV_DR
2660 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2661 	if (spec && (spec->protocol & mask->protocol))
2662 		return rte_flow_error_set(error, ENOTSUP,
2663 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2664 					  "without MPLS support the"
2665 					  " specification cannot be used for"
2666 					  " filtering");
2667 #endif
2668 #endif
2669 	return 0;
2670 }
2671 
2672 /**
2673  * Validate Geneve item.
2674  *
2675  * @param[in] item
2676  *   Item specification.
2677  * @param[in] itemFlags
2678  *   Bit-fields that holds the items detected until now.
2679  * @param[in] enPriv
2680  *   Pointer to the private data structure.
2681  * @param[out] error
2682  *   Pointer to error structure.
2683  *
2684  * @return
2685  *   0 on success, a negative errno value otherwise and rte_errno is set.
2686  */
2687 
2688 int
2689 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2690 			       uint64_t item_flags,
2691 			       struct rte_eth_dev *dev,
2692 			       struct rte_flow_error *error)
2693 {
2694 	struct mlx5_priv *priv = dev->data->dev_private;
2695 	const struct rte_flow_item_geneve *spec = item->spec;
2696 	const struct rte_flow_item_geneve *mask = item->mask;
2697 	int ret;
2698 	uint16_t gbhdr;
2699 	uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2700 			  MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2701 	const struct rte_flow_item_geneve nic_mask = {
2702 		.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2703 		.vni = "\xff\xff\xff",
2704 		.protocol = RTE_BE16(UINT16_MAX),
2705 	};
2706 
2707 	if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2708 		return rte_flow_error_set(error, ENOTSUP,
2709 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2710 					  "L3 Geneve is not enabled by device"
2711 					  " parameter and/or not configured in"
2712 					  " firmware");
2713 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2714 		return rte_flow_error_set(error, ENOTSUP,
2715 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2716 					  "multiple tunnel layers not"
2717 					  " supported");
2718 	/*
2719 	 * Verify only UDPv4 is present as defined in
2720 	 * https://tools.ietf.org/html/rfc7348
2721 	 */
2722 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2723 		return rte_flow_error_set(error, EINVAL,
2724 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2725 					  "no outer UDP layer found");
2726 	if (!mask)
2727 		mask = &rte_flow_item_geneve_mask;
2728 	ret = mlx5_flow_item_acceptable
2729 				  (item, (const uint8_t *)mask,
2730 				   (const uint8_t *)&nic_mask,
2731 				   sizeof(struct rte_flow_item_geneve),
2732 				   MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2733 	if (ret)
2734 		return ret;
2735 	if (spec) {
2736 		gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2737 		if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2738 		     MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2739 		     MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2740 			return rte_flow_error_set(error, ENOTSUP,
2741 						  RTE_FLOW_ERROR_TYPE_ITEM,
2742 						  item,
2743 						  "Geneve protocol unsupported"
2744 						  " fields are being used");
2745 		if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2746 			return rte_flow_error_set
2747 					(error, ENOTSUP,
2748 					 RTE_FLOW_ERROR_TYPE_ITEM,
2749 					 item,
2750 					 "Unsupported Geneve options length");
2751 	}
2752 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2753 		return rte_flow_error_set
2754 				    (error, ENOTSUP,
2755 				     RTE_FLOW_ERROR_TYPE_ITEM, item,
2756 				     "Geneve tunnel must be fully defined");
2757 	return 0;
2758 }
2759 
2760 /**
2761  * Validate Geneve TLV option item.
2762  *
2763  * @param[in] item
2764  *   Item specification.
2765  * @param[in] last_item
2766  *   Previous validated item in the pattern items.
2767  * @param[in] geneve_item
2768  *   Previous GENEVE item specification.
2769  * @param[in] dev
2770  *   Pointer to the rte_eth_dev structure.
2771  * @param[out] error
2772  *   Pointer to error structure.
2773  *
2774  * @return
2775  *   0 on success, a negative errno value otherwise and rte_errno is set.
2776  */
2777 int
2778 mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
2779 				   uint64_t last_item,
2780 				   const struct rte_flow_item *geneve_item,
2781 				   struct rte_eth_dev *dev,
2782 				   struct rte_flow_error *error)
2783 {
2784 	struct mlx5_priv *priv = dev->data->dev_private;
2785 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2786 	struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
2787 	struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
2788 	uint8_t data_max_supported =
2789 			hca_attr->max_geneve_tlv_option_data_len * 4;
2790 	struct mlx5_dev_config *config = &priv->config;
2791 	const struct rte_flow_item_geneve *geneve_spec;
2792 	const struct rte_flow_item_geneve *geneve_mask;
2793 	const struct rte_flow_item_geneve_opt *spec = item->spec;
2794 	const struct rte_flow_item_geneve_opt *mask = item->mask;
2795 	unsigned int i;
2796 	unsigned int data_len;
2797 	uint8_t tlv_option_len;
2798 	uint16_t optlen_m, optlen_v;
2799 	const struct rte_flow_item_geneve_opt full_mask = {
2800 		.option_class = RTE_BE16(0xffff),
2801 		.option_type = 0xff,
2802 		.option_len = 0x1f,
2803 	};
2804 
2805 	if (!mask)
2806 		mask = &rte_flow_item_geneve_opt_mask;
2807 	if (!spec)
2808 		return rte_flow_error_set
2809 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2810 			"Geneve TLV opt class/type/length must be specified");
2811 	if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
2812 		return rte_flow_error_set
2813 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2814 			"Geneve TLV opt length exceeeds the limit (31)");
2815 	/* Check if class type and length masks are full. */
2816 	if (full_mask.option_class != mask->option_class ||
2817 	    full_mask.option_type != mask->option_type ||
2818 	    full_mask.option_len != (mask->option_len & full_mask.option_len))
2819 		return rte_flow_error_set
2820 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2821 			"Geneve TLV opt class/type/length masks must be full");
2822 	/* Check if length is supported */
2823 	if ((uint32_t)spec->option_len >
2824 			config->hca_attr.max_geneve_tlv_option_data_len)
2825 		return rte_flow_error_set
2826 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2827 			"Geneve TLV opt length not supported");
2828 	if (config->hca_attr.max_geneve_tlv_options > 1)
2829 		DRV_LOG(DEBUG,
2830 			"max_geneve_tlv_options supports more than 1 option");
2831 	/* Check GENEVE item preceding. */
2832 	if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE))
2833 		return rte_flow_error_set
2834 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2835 			"Geneve opt item must be preceded with Geneve item");
2836 	geneve_spec = geneve_item->spec;
2837 	geneve_mask = geneve_item->mask ? geneve_item->mask :
2838 					  &rte_flow_item_geneve_mask;
2839 	/* Check if GENEVE TLV option size doesn't exceed option length */
2840 	if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 ||
2841 			    geneve_spec->ver_opt_len_o_c_rsvd0)) {
2842 		tlv_option_len = spec->option_len & mask->option_len;
2843 		optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0);
2844 		optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v);
2845 		optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0);
2846 		optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m);
2847 		if ((optlen_v & optlen_m) <= tlv_option_len)
2848 			return rte_flow_error_set
2849 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2850 				 "GENEVE TLV option length exceeds optlen");
2851 	}
2852 	/* Check if length is 0 or data is 0. */
2853 	if (spec->data == NULL || spec->option_len == 0)
2854 		return rte_flow_error_set
2855 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2856 			"Geneve TLV opt with zero data/length not supported");
2857 	/* Check not all data & mask are 0. */
2858 	data_len = spec->option_len * 4;
2859 	if (mask->data == NULL) {
2860 		for (i = 0; i < data_len; i++)
2861 			if (spec->data[i])
2862 				break;
2863 		if (i == data_len)
2864 			return rte_flow_error_set(error, ENOTSUP,
2865 				RTE_FLOW_ERROR_TYPE_ITEM, item,
2866 				"Can't match on Geneve option data 0");
2867 	} else {
2868 		for (i = 0; i < data_len; i++)
2869 			if (spec->data[i] & mask->data[i])
2870 				break;
2871 		if (i == data_len)
2872 			return rte_flow_error_set(error, ENOTSUP,
2873 				RTE_FLOW_ERROR_TYPE_ITEM, item,
2874 				"Can't match on Geneve option data and mask 0");
2875 		/* Check data mask supported. */
2876 		for (i = data_max_supported; i < data_len ; i++)
2877 			if (mask->data[i])
2878 				return rte_flow_error_set(error, ENOTSUP,
2879 					RTE_FLOW_ERROR_TYPE_ITEM, item,
2880 					"Data mask is of unsupported size");
2881 	}
2882 	/* Check GENEVE option is supported in NIC. */
2883 	if (!config->hca_attr.geneve_tlv_opt)
2884 		return rte_flow_error_set
2885 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2886 			"Geneve TLV opt not supported");
2887 	/* Check if we already have geneve option with different type/class. */
2888 	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
2889 	geneve_opt_resource = sh->geneve_tlv_option_resource;
2890 	if (geneve_opt_resource != NULL)
2891 		if (geneve_opt_resource->option_class != spec->option_class ||
2892 		    geneve_opt_resource->option_type != spec->option_type ||
2893 		    geneve_opt_resource->length != spec->option_len) {
2894 			rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
2895 			return rte_flow_error_set(error, ENOTSUP,
2896 				RTE_FLOW_ERROR_TYPE_ITEM, item,
2897 				"Only one Geneve TLV option supported");
2898 		}
2899 	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
2900 	return 0;
2901 }
2902 
2903 /**
2904  * Validate MPLS item.
2905  *
2906  * @param[in] dev
2907  *   Pointer to the rte_eth_dev structure.
2908  * @param[in] item
2909  *   Item specification.
2910  * @param[in] item_flags
2911  *   Bit-fields that holds the items detected until now.
2912  * @param[in] prev_layer
2913  *   The protocol layer indicated in previous item.
2914  * @param[out] error
2915  *   Pointer to error structure.
2916  *
2917  * @return
2918  *   0 on success, a negative errno value otherwise and rte_errno is set.
2919  */
2920 int
2921 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2922 			     const struct rte_flow_item *item __rte_unused,
2923 			     uint64_t item_flags __rte_unused,
2924 			     uint64_t prev_layer __rte_unused,
2925 			     struct rte_flow_error *error)
2926 {
2927 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2928 	const struct rte_flow_item_mpls *mask = item->mask;
2929 	struct mlx5_priv *priv = dev->data->dev_private;
2930 	int ret;
2931 
2932 	if (!priv->config.mpls_en)
2933 		return rte_flow_error_set(error, ENOTSUP,
2934 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2935 					  "MPLS not supported or"
2936 					  " disabled in firmware"
2937 					  " configuration.");
2938 	/* MPLS over UDP, GRE is allowed */
2939 	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP |
2940 			    MLX5_FLOW_LAYER_GRE |
2941 			    MLX5_FLOW_LAYER_GRE_KEY)))
2942 		return rte_flow_error_set(error, EINVAL,
2943 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2944 					  "protocol filtering not compatible"
2945 					  " with MPLS layer");
2946 	/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2947 	if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2948 	    !(item_flags & MLX5_FLOW_LAYER_GRE))
2949 		return rte_flow_error_set(error, ENOTSUP,
2950 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2951 					  "multiple tunnel layers not"
2952 					  " supported");
2953 	if (!mask)
2954 		mask = &rte_flow_item_mpls_mask;
2955 	ret = mlx5_flow_item_acceptable
2956 		(item, (const uint8_t *)mask,
2957 		 (const uint8_t *)&rte_flow_item_mpls_mask,
2958 		 sizeof(struct rte_flow_item_mpls),
2959 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2960 	if (ret < 0)
2961 		return ret;
2962 	return 0;
2963 #else
2964 	return rte_flow_error_set(error, ENOTSUP,
2965 				  RTE_FLOW_ERROR_TYPE_ITEM, item,
2966 				  "MPLS is not supported by Verbs, please"
2967 				  " update.");
2968 #endif
2969 }
2970 
2971 /**
2972  * Validate NVGRE item.
2973  *
2974  * @param[in] item
2975  *   Item specification.
2976  * @param[in] item_flags
2977  *   Bit flags to mark detected items.
2978  * @param[in] target_protocol
2979  *   The next protocol in the previous item.
2980  * @param[out] error
2981  *   Pointer to error structure.
2982  *
2983  * @return
2984  *   0 on success, a negative errno value otherwise and rte_errno is set.
2985  */
2986 int
2987 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2988 			      uint64_t item_flags,
2989 			      uint8_t target_protocol,
2990 			      struct rte_flow_error *error)
2991 {
2992 	const struct rte_flow_item_nvgre *mask = item->mask;
2993 	int ret;
2994 
2995 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2996 		return rte_flow_error_set(error, EINVAL,
2997 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2998 					  "protocol filtering not compatible"
2999 					  " with this GRE layer");
3000 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3001 		return rte_flow_error_set(error, ENOTSUP,
3002 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3003 					  "multiple tunnel layers not"
3004 					  " supported");
3005 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
3006 		return rte_flow_error_set(error, ENOTSUP,
3007 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3008 					  "L3 Layer is missing");
3009 	if (!mask)
3010 		mask = &rte_flow_item_nvgre_mask;
3011 	ret = mlx5_flow_item_acceptable
3012 		(item, (const uint8_t *)mask,
3013 		 (const uint8_t *)&rte_flow_item_nvgre_mask,
3014 		 sizeof(struct rte_flow_item_nvgre),
3015 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3016 	if (ret < 0)
3017 		return ret;
3018 	return 0;
3019 }
3020 
3021 /**
3022  * Validate eCPRI item.
3023  *
3024  * @param[in] item
3025  *   Item specification.
3026  * @param[in] item_flags
3027  *   Bit-fields that holds the items detected until now.
3028  * @param[in] last_item
3029  *   Previous validated item in the pattern items.
3030  * @param[in] ether_type
3031  *   Type in the ethernet layer header (including dot1q).
3032  * @param[in] acc_mask
3033  *   Acceptable mask, if NULL default internal default mask
3034  *   will be used to check whether item fields are supported.
3035  * @param[out] error
3036  *   Pointer to error structure.
3037  *
3038  * @return
3039  *   0 on success, a negative errno value otherwise and rte_errno is set.
3040  */
3041 int
3042 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
3043 			      uint64_t item_flags,
3044 			      uint64_t last_item,
3045 			      uint16_t ether_type,
3046 			      const struct rte_flow_item_ecpri *acc_mask,
3047 			      struct rte_flow_error *error)
3048 {
3049 	const struct rte_flow_item_ecpri *mask = item->mask;
3050 	const struct rte_flow_item_ecpri nic_mask = {
3051 		.hdr = {
3052 			.common = {
3053 				.u32 =
3054 				RTE_BE32(((const struct rte_ecpri_common_hdr) {
3055 					.type = 0xFF,
3056 					}).u32),
3057 			},
3058 			.dummy[0] = 0xFFFFFFFF,
3059 		},
3060 	};
3061 	const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
3062 					MLX5_FLOW_LAYER_OUTER_VLAN);
3063 	struct rte_flow_item_ecpri mask_lo;
3064 
3065 	if (!(last_item & outer_l2_vlan) &&
3066 	    last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
3067 		return rte_flow_error_set(error, EINVAL,
3068 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3069 					  "eCPRI can only follow L2/VLAN layer or UDP layer");
3070 	if ((last_item & outer_l2_vlan) && ether_type &&
3071 	    ether_type != RTE_ETHER_TYPE_ECPRI)
3072 		return rte_flow_error_set(error, EINVAL,
3073 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3074 					  "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
3075 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3076 		return rte_flow_error_set(error, EINVAL,
3077 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3078 					  "eCPRI with tunnel is not supported right now");
3079 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
3080 		return rte_flow_error_set(error, ENOTSUP,
3081 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3082 					  "multiple L3 layers not supported");
3083 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
3084 		return rte_flow_error_set(error, EINVAL,
3085 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3086 					  "eCPRI cannot coexist with a TCP layer");
3087 	/* In specification, eCPRI could be over UDP layer. */
3088 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
3089 		return rte_flow_error_set(error, EINVAL,
3090 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3091 					  "eCPRI over UDP layer is not yet supported right now");
3092 	/* Mask for type field in common header could be zero. */
3093 	if (!mask)
3094 		mask = &rte_flow_item_ecpri_mask;
3095 	mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
3096 	/* Input mask is in big-endian format. */
3097 	if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
3098 		return rte_flow_error_set(error, EINVAL,
3099 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
3100 					  "partial mask is not supported for protocol");
3101 	else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
3102 		return rte_flow_error_set(error, EINVAL,
3103 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
3104 					  "message header mask must be after a type mask");
3105 	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
3106 					 acc_mask ? (const uint8_t *)acc_mask
3107 						  : (const uint8_t *)&nic_mask,
3108 					 sizeof(struct rte_flow_item_ecpri),
3109 					 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3110 }
3111 
3112 /**
3113  * Release resource related QUEUE/RSS action split.
3114  *
3115  * @param dev
3116  *   Pointer to Ethernet device.
3117  * @param flow
3118  *   Flow to release id's from.
3119  */
3120 static void
3121 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
3122 			     struct rte_flow *flow)
3123 {
3124 	struct mlx5_priv *priv = dev->data->dev_private;
3125 	uint32_t handle_idx;
3126 	struct mlx5_flow_handle *dev_handle;
3127 
3128 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
3129 		       handle_idx, dev_handle, next)
3130 		if (dev_handle->split_flow_id &&
3131 		    !dev_handle->is_meter_flow_id)
3132 			mlx5_ipool_free(priv->sh->ipool
3133 					[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
3134 					dev_handle->split_flow_id);
3135 }
3136 
3137 static int
3138 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
3139 		   const struct rte_flow_attr *attr __rte_unused,
3140 		   const struct rte_flow_item items[] __rte_unused,
3141 		   const struct rte_flow_action actions[] __rte_unused,
3142 		   bool external __rte_unused,
3143 		   int hairpin __rte_unused,
3144 		   struct rte_flow_error *error)
3145 {
3146 	return rte_flow_error_set(error, ENOTSUP,
3147 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3148 }
3149 
3150 static struct mlx5_flow *
3151 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
3152 		  const struct rte_flow_attr *attr __rte_unused,
3153 		  const struct rte_flow_item items[] __rte_unused,
3154 		  const struct rte_flow_action actions[] __rte_unused,
3155 		  struct rte_flow_error *error)
3156 {
3157 	rte_flow_error_set(error, ENOTSUP,
3158 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3159 	return NULL;
3160 }
3161 
3162 static int
3163 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
3164 		    struct mlx5_flow *dev_flow __rte_unused,
3165 		    const struct rte_flow_attr *attr __rte_unused,
3166 		    const struct rte_flow_item items[] __rte_unused,
3167 		    const struct rte_flow_action actions[] __rte_unused,
3168 		    struct rte_flow_error *error)
3169 {
3170 	return rte_flow_error_set(error, ENOTSUP,
3171 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3172 }
3173 
3174 static int
3175 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
3176 		struct rte_flow *flow __rte_unused,
3177 		struct rte_flow_error *error)
3178 {
3179 	return rte_flow_error_set(error, ENOTSUP,
3180 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3181 }
3182 
3183 static void
3184 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
3185 		 struct rte_flow *flow __rte_unused)
3186 {
3187 }
3188 
3189 static void
3190 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
3191 		  struct rte_flow *flow __rte_unused)
3192 {
3193 }
3194 
3195 static int
3196 flow_null_query(struct rte_eth_dev *dev __rte_unused,
3197 		struct rte_flow *flow __rte_unused,
3198 		const struct rte_flow_action *actions __rte_unused,
3199 		void *data __rte_unused,
3200 		struct rte_flow_error *error)
3201 {
3202 	return rte_flow_error_set(error, ENOTSUP,
3203 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3204 }
3205 
3206 static int
3207 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
3208 		      uint32_t domains __rte_unused,
3209 		      uint32_t flags __rte_unused)
3210 {
3211 	return 0;
3212 }
3213 
3214 /* Void driver to protect from null pointer reference. */
3215 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
3216 	.validate = flow_null_validate,
3217 	.prepare = flow_null_prepare,
3218 	.translate = flow_null_translate,
3219 	.apply = flow_null_apply,
3220 	.remove = flow_null_remove,
3221 	.destroy = flow_null_destroy,
3222 	.query = flow_null_query,
3223 	.sync_domain = flow_null_sync_domain,
3224 };
3225 
3226 /**
3227  * Select flow driver type according to flow attributes and device
3228  * configuration.
3229  *
3230  * @param[in] dev
3231  *   Pointer to the dev structure.
3232  * @param[in] attr
3233  *   Pointer to the flow attributes.
3234  *
3235  * @return
3236  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
3237  */
3238 static enum mlx5_flow_drv_type
3239 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
3240 {
3241 	struct mlx5_priv *priv = dev->data->dev_private;
3242 	/* The OS can determine first a specific flow type (DV, VERBS) */
3243 	enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
3244 
3245 	if (type != MLX5_FLOW_TYPE_MAX)
3246 		return type;
3247 	/* If no OS specific type - continue with DV/VERBS selection */
3248 	if (attr->transfer && priv->config.dv_esw_en)
3249 		type = MLX5_FLOW_TYPE_DV;
3250 	if (!attr->transfer)
3251 		type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
3252 						 MLX5_FLOW_TYPE_VERBS;
3253 	return type;
3254 }
3255 
3256 #define flow_get_drv_ops(type) flow_drv_ops[type]
3257 
3258 /**
3259  * Flow driver validation API. This abstracts calling driver specific functions.
3260  * The type of flow driver is determined according to flow attributes.
3261  *
3262  * @param[in] dev
3263  *   Pointer to the dev structure.
3264  * @param[in] attr
3265  *   Pointer to the flow attributes.
3266  * @param[in] items
3267  *   Pointer to the list of items.
3268  * @param[in] actions
3269  *   Pointer to the list of actions.
3270  * @param[in] external
3271  *   This flow rule is created by request external to PMD.
3272  * @param[in] hairpin
3273  *   Number of hairpin TX actions, 0 means classic flow.
3274  * @param[out] error
3275  *   Pointer to the error structure.
3276  *
3277  * @return
3278  *   0 on success, a negative errno value otherwise and rte_errno is set.
3279  */
3280 static inline int
3281 flow_drv_validate(struct rte_eth_dev *dev,
3282 		  const struct rte_flow_attr *attr,
3283 		  const struct rte_flow_item items[],
3284 		  const struct rte_flow_action actions[],
3285 		  bool external, int hairpin, struct rte_flow_error *error)
3286 {
3287 	const struct mlx5_flow_driver_ops *fops;
3288 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
3289 
3290 	fops = flow_get_drv_ops(type);
3291 	return fops->validate(dev, attr, items, actions, external,
3292 			      hairpin, error);
3293 }
3294 
3295 /**
3296  * Flow driver preparation API. This abstracts calling driver specific
3297  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3298  * calculates the size of memory required for device flow, allocates the memory,
3299  * initializes the device flow and returns the pointer.
3300  *
3301  * @note
3302  *   This function initializes device flow structure such as dv or verbs in
3303  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
3304  *   rest. For example, adding returning device flow to flow->dev_flow list and
3305  *   setting backward reference to the flow should be done out of this function.
3306  *   layers field is not filled either.
3307  *
3308  * @param[in] dev
3309  *   Pointer to the dev structure.
3310  * @param[in] attr
3311  *   Pointer to the flow attributes.
3312  * @param[in] items
3313  *   Pointer to the list of items.
3314  * @param[in] actions
3315  *   Pointer to the list of actions.
3316  * @param[in] flow_idx
3317  *   This memory pool index to the flow.
3318  * @param[out] error
3319  *   Pointer to the error structure.
3320  *
3321  * @return
3322  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
3323  */
3324 static inline struct mlx5_flow *
3325 flow_drv_prepare(struct rte_eth_dev *dev,
3326 		 const struct rte_flow *flow,
3327 		 const struct rte_flow_attr *attr,
3328 		 const struct rte_flow_item items[],
3329 		 const struct rte_flow_action actions[],
3330 		 uint32_t flow_idx,
3331 		 struct rte_flow_error *error)
3332 {
3333 	const struct mlx5_flow_driver_ops *fops;
3334 	enum mlx5_flow_drv_type type = flow->drv_type;
3335 	struct mlx5_flow *mlx5_flow = NULL;
3336 
3337 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3338 	fops = flow_get_drv_ops(type);
3339 	mlx5_flow = fops->prepare(dev, attr, items, actions, error);
3340 	if (mlx5_flow)
3341 		mlx5_flow->flow_idx = flow_idx;
3342 	return mlx5_flow;
3343 }
3344 
3345 /**
3346  * Flow driver translation API. This abstracts calling driver specific
3347  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3348  * translates a generic flow into a driver flow. flow_drv_prepare() must
3349  * precede.
3350  *
3351  * @note
3352  *   dev_flow->layers could be filled as a result of parsing during translation
3353  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
3354  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
3355  *   flow->actions could be overwritten even though all the expanded dev_flows
3356  *   have the same actions.
3357  *
3358  * @param[in] dev
3359  *   Pointer to the rte dev structure.
3360  * @param[in, out] dev_flow
3361  *   Pointer to the mlx5 flow.
3362  * @param[in] attr
3363  *   Pointer to the flow attributes.
3364  * @param[in] items
3365  *   Pointer to the list of items.
3366  * @param[in] actions
3367  *   Pointer to the list of actions.
3368  * @param[out] error
3369  *   Pointer to the error structure.
3370  *
3371  * @return
3372  *   0 on success, a negative errno value otherwise and rte_errno is set.
3373  */
3374 static inline int
3375 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3376 		   const struct rte_flow_attr *attr,
3377 		   const struct rte_flow_item items[],
3378 		   const struct rte_flow_action actions[],
3379 		   struct rte_flow_error *error)
3380 {
3381 	const struct mlx5_flow_driver_ops *fops;
3382 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
3383 
3384 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3385 	fops = flow_get_drv_ops(type);
3386 	return fops->translate(dev, dev_flow, attr, items, actions, error);
3387 }
3388 
3389 /**
3390  * Flow driver apply API. This abstracts calling driver specific functions.
3391  * Parent flow (rte_flow) should have driver type (drv_type). It applies
3392  * translated driver flows on to device. flow_drv_translate() must precede.
3393  *
3394  * @param[in] dev
3395  *   Pointer to Ethernet device structure.
3396  * @param[in, out] flow
3397  *   Pointer to flow structure.
3398  * @param[out] error
3399  *   Pointer to error structure.
3400  *
3401  * @return
3402  *   0 on success, a negative errno value otherwise and rte_errno is set.
3403  */
3404 static inline int
3405 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3406 	       struct rte_flow_error *error)
3407 {
3408 	const struct mlx5_flow_driver_ops *fops;
3409 	enum mlx5_flow_drv_type type = flow->drv_type;
3410 
3411 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3412 	fops = flow_get_drv_ops(type);
3413 	return fops->apply(dev, flow, error);
3414 }
3415 
3416 /**
3417  * Flow driver destroy API. This abstracts calling driver specific functions.
3418  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3419  * on device and releases resources of the flow.
3420  *
3421  * @param[in] dev
3422  *   Pointer to Ethernet device.
3423  * @param[in, out] flow
3424  *   Pointer to flow structure.
3425  */
3426 static inline void
3427 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3428 {
3429 	const struct mlx5_flow_driver_ops *fops;
3430 	enum mlx5_flow_drv_type type = flow->drv_type;
3431 
3432 	flow_mreg_split_qrss_release(dev, flow);
3433 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3434 	fops = flow_get_drv_ops(type);
3435 	fops->destroy(dev, flow);
3436 }
3437 
3438 /**
3439  * Flow driver find RSS policy tbl API. This abstracts calling driver
3440  * specific functions. Parent flow (rte_flow) should have driver
3441  * type (drv_type). It will find the RSS policy table that has the rss_desc.
3442  *
3443  * @param[in] dev
3444  *   Pointer to Ethernet device.
3445  * @param[in, out] flow
3446  *   Pointer to flow structure.
3447  * @param[in] policy
3448  *   Pointer to meter policy table.
3449  * @param[in] rss_desc
3450  *   Pointer to rss_desc
3451  */
3452 static struct mlx5_flow_meter_sub_policy *
3453 flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
3454 		struct rte_flow *flow,
3455 		struct mlx5_flow_meter_policy *policy,
3456 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
3457 {
3458 	const struct mlx5_flow_driver_ops *fops;
3459 	enum mlx5_flow_drv_type type = flow->drv_type;
3460 
3461 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3462 	fops = flow_get_drv_ops(type);
3463 	return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc);
3464 }
3465 
3466 /**
3467  * Get RSS action from the action list.
3468  *
3469  * @param[in] dev
3470  *   Pointer to Ethernet device.
3471  * @param[in] actions
3472  *   Pointer to the list of actions.
3473  * @param[in] flow
3474  *   Parent flow structure pointer.
3475  *
3476  * @return
3477  *   Pointer to the RSS action if exist, else return NULL.
3478  */
3479 static const struct rte_flow_action_rss*
3480 flow_get_rss_action(struct rte_eth_dev *dev,
3481 		    const struct rte_flow_action actions[])
3482 {
3483 	struct mlx5_priv *priv = dev->data->dev_private;
3484 	const struct rte_flow_action_rss *rss = NULL;
3485 
3486 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3487 		switch (actions->type) {
3488 		case RTE_FLOW_ACTION_TYPE_RSS:
3489 			rss = actions->conf;
3490 			break;
3491 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
3492 		{
3493 			const struct rte_flow_action_sample *sample =
3494 								actions->conf;
3495 			const struct rte_flow_action *act = sample->actions;
3496 			for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++)
3497 				if (act->type == RTE_FLOW_ACTION_TYPE_RSS)
3498 					rss = act->conf;
3499 			break;
3500 		}
3501 		case RTE_FLOW_ACTION_TYPE_METER:
3502 		{
3503 			uint32_t mtr_idx;
3504 			struct mlx5_flow_meter_info *fm;
3505 			struct mlx5_flow_meter_policy *policy;
3506 			const struct rte_flow_action_meter *mtr = actions->conf;
3507 
3508 			fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx);
3509 			if (fm) {
3510 				policy = mlx5_flow_meter_policy_find(dev,
3511 						fm->policy_id, NULL);
3512 				if (policy && policy->is_rss)
3513 					rss =
3514 				policy->act_cnt[RTE_COLOR_GREEN].rss->conf;
3515 			}
3516 			break;
3517 		}
3518 		default:
3519 			break;
3520 		}
3521 	}
3522 	return rss;
3523 }
3524 
3525 /**
3526  * Get ASO age action by index.
3527  *
3528  * @param[in] dev
3529  *   Pointer to the Ethernet device structure.
3530  * @param[in] age_idx
3531  *   Index to the ASO age action.
3532  *
3533  * @return
3534  *   The specified ASO age action.
3535  */
3536 struct mlx5_aso_age_action*
3537 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
3538 {
3539 	uint16_t pool_idx = age_idx & UINT16_MAX;
3540 	uint16_t offset = (age_idx >> 16) & UINT16_MAX;
3541 	struct mlx5_priv *priv = dev->data->dev_private;
3542 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
3543 	struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
3544 
3545 	return &pool->actions[offset - 1];
3546 }
3547 
3548 /* maps indirect action to translated direct in some actions array */
3549 struct mlx5_translated_action_handle {
3550 	struct rte_flow_action_handle *action; /**< Indirect action handle. */
3551 	int index; /**< Index in related array of rte_flow_action. */
3552 };
3553 
3554 /**
3555  * Translates actions of type RTE_FLOW_ACTION_TYPE_INDIRECT to related
3556  * direct action if translation possible.
3557  * This functionality used to run same execution path for both direct and
3558  * indirect actions on flow create. All necessary preparations for indirect
3559  * action handling should be performed on *handle* actions list returned
3560  * from this call.
3561  *
3562  * @param[in] dev
3563  *   Pointer to Ethernet device.
3564  * @param[in] actions
3565  *   List of actions to translate.
3566  * @param[out] handle
3567  *   List to store translated indirect action object handles.
3568  * @param[in, out] indir_n
3569  *   Size of *handle* array. On return should be updated with number of
3570  *   indirect actions retrieved from the *actions* list.
3571  * @param[out] translated_actions
3572  *   List of actions where all indirect actions were translated to direct
3573  *   if possible. NULL if no translation took place.
3574  * @param[out] error
3575  *   Pointer to the error structure.
3576  *
3577  * @return
3578  *   0 on success, a negative errno value otherwise and rte_errno is set.
3579  */
3580 static int
3581 flow_action_handles_translate(struct rte_eth_dev *dev,
3582 			      const struct rte_flow_action actions[],
3583 			      struct mlx5_translated_action_handle *handle,
3584 			      int *indir_n,
3585 			      struct rte_flow_action **translated_actions,
3586 			      struct rte_flow_error *error)
3587 {
3588 	struct mlx5_priv *priv = dev->data->dev_private;
3589 	struct rte_flow_action *translated = NULL;
3590 	size_t actions_size;
3591 	int n;
3592 	int copied_n = 0;
3593 	struct mlx5_translated_action_handle *handle_end = NULL;
3594 
3595 	for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
3596 		if (actions[n].type != RTE_FLOW_ACTION_TYPE_INDIRECT)
3597 			continue;
3598 		if (copied_n == *indir_n) {
3599 			return rte_flow_error_set
3600 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3601 				 NULL, "too many shared actions");
3602 		}
3603 		rte_memcpy(&handle[copied_n].action, &actions[n].conf,
3604 			   sizeof(actions[n].conf));
3605 		handle[copied_n].index = n;
3606 		copied_n++;
3607 	}
3608 	n++;
3609 	*indir_n = copied_n;
3610 	if (!copied_n)
3611 		return 0;
3612 	actions_size = sizeof(struct rte_flow_action) * n;
3613 	translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
3614 	if (!translated) {
3615 		rte_errno = ENOMEM;
3616 		return -ENOMEM;
3617 	}
3618 	memcpy(translated, actions, actions_size);
3619 	for (handle_end = handle + copied_n; handle < handle_end; handle++) {
3620 		struct mlx5_shared_action_rss *shared_rss;
3621 		uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
3622 		uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
3623 		uint32_t idx = act_idx &
3624 			       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3625 
3626 		switch (type) {
3627 		case MLX5_INDIRECT_ACTION_TYPE_RSS:
3628 			shared_rss = mlx5_ipool_get
3629 			  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
3630 			translated[handle->index].type =
3631 				RTE_FLOW_ACTION_TYPE_RSS;
3632 			translated[handle->index].conf =
3633 				&shared_rss->origin;
3634 			break;
3635 		case MLX5_INDIRECT_ACTION_TYPE_COUNT:
3636 			translated[handle->index].type =
3637 						(enum rte_flow_action_type)
3638 						MLX5_RTE_FLOW_ACTION_TYPE_COUNT;
3639 			translated[handle->index].conf = (void *)(uintptr_t)idx;
3640 			break;
3641 		case MLX5_INDIRECT_ACTION_TYPE_AGE:
3642 			if (priv->sh->flow_hit_aso_en) {
3643 				translated[handle->index].type =
3644 					(enum rte_flow_action_type)
3645 					MLX5_RTE_FLOW_ACTION_TYPE_AGE;
3646 				translated[handle->index].conf =
3647 							 (void *)(uintptr_t)idx;
3648 				break;
3649 			}
3650 			/* Fall-through */
3651 		case MLX5_INDIRECT_ACTION_TYPE_CT:
3652 			if (priv->sh->ct_aso_en) {
3653 				translated[handle->index].type =
3654 					RTE_FLOW_ACTION_TYPE_CONNTRACK;
3655 				translated[handle->index].conf =
3656 							 (void *)(uintptr_t)idx;
3657 				break;
3658 			}
3659 			/* Fall-through */
3660 		default:
3661 			mlx5_free(translated);
3662 			return rte_flow_error_set
3663 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3664 				 NULL, "invalid indirect action type");
3665 		}
3666 	}
3667 	*translated_actions = translated;
3668 	return 0;
3669 }
3670 
3671 /**
3672  * Get Shared RSS action from the action list.
3673  *
3674  * @param[in] dev
3675  *   Pointer to Ethernet device.
3676  * @param[in] shared
3677  *   Pointer to the list of actions.
3678  * @param[in] shared_n
3679  *   Actions list length.
3680  *
3681  * @return
3682  *   The MLX5 RSS action ID if exists, otherwise return 0.
3683  */
3684 static uint32_t
3685 flow_get_shared_rss_action(struct rte_eth_dev *dev,
3686 			   struct mlx5_translated_action_handle *handle,
3687 			   int shared_n)
3688 {
3689 	struct mlx5_translated_action_handle *handle_end;
3690 	struct mlx5_priv *priv = dev->data->dev_private;
3691 	struct mlx5_shared_action_rss *shared_rss;
3692 
3693 
3694 	for (handle_end = handle + shared_n; handle < handle_end; handle++) {
3695 		uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
3696 		uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
3697 		uint32_t idx = act_idx &
3698 			       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3699 		switch (type) {
3700 		case MLX5_INDIRECT_ACTION_TYPE_RSS:
3701 			shared_rss = mlx5_ipool_get
3702 				(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
3703 									   idx);
3704 			__atomic_add_fetch(&shared_rss->refcnt, 1,
3705 					   __ATOMIC_RELAXED);
3706 			return idx;
3707 		default:
3708 			break;
3709 		}
3710 	}
3711 	return 0;
3712 }
3713 
3714 static unsigned int
3715 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
3716 {
3717 	const struct rte_flow_item *item;
3718 	unsigned int has_vlan = 0;
3719 
3720 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3721 		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
3722 			has_vlan = 1;
3723 			break;
3724 		}
3725 	}
3726 	if (has_vlan)
3727 		return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
3728 				       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
3729 	return rss_level < 2 ? MLX5_EXPANSION_ROOT :
3730 			       MLX5_EXPANSION_ROOT_OUTER;
3731 }
3732 
3733 /**
3734  *  Get layer flags from the prefix flow.
3735  *
3736  *  Some flows may be split to several subflows, the prefix subflow gets the
3737  *  match items and the suffix sub flow gets the actions.
3738  *  Some actions need the user defined match item flags to get the detail for
3739  *  the action.
3740  *  This function helps the suffix flow to get the item layer flags from prefix
3741  *  subflow.
3742  *
3743  * @param[in] dev_flow
3744  *   Pointer the created preifx subflow.
3745  *
3746  * @return
3747  *   The layers get from prefix subflow.
3748  */
3749 static inline uint64_t
3750 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
3751 {
3752 	uint64_t layers = 0;
3753 
3754 	/*
3755 	 * Layers bits could be localization, but usually the compiler will
3756 	 * help to do the optimization work for source code.
3757 	 * If no decap actions, use the layers directly.
3758 	 */
3759 	if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
3760 		return dev_flow->handle->layers;
3761 	/* Convert L3 layers with decap action. */
3762 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
3763 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3764 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
3765 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3766 	/* Convert L4 layers with decap action.  */
3767 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
3768 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
3769 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
3770 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
3771 	return layers;
3772 }
3773 
3774 /**
3775  * Get metadata split action information.
3776  *
3777  * @param[in] actions
3778  *   Pointer to the list of actions.
3779  * @param[out] qrss
3780  *   Pointer to the return pointer.
3781  * @param[out] qrss_type
3782  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
3783  *   if no QUEUE/RSS is found.
3784  * @param[out] encap_idx
3785  *   Pointer to the index of the encap action if exists, otherwise the last
3786  *   action index.
3787  *
3788  * @return
3789  *   Total number of actions.
3790  */
3791 static int
3792 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
3793 				       const struct rte_flow_action **qrss,
3794 				       int *encap_idx)
3795 {
3796 	const struct rte_flow_action_raw_encap *raw_encap;
3797 	int actions_n = 0;
3798 	int raw_decap_idx = -1;
3799 
3800 	*encap_idx = -1;
3801 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3802 		switch (actions->type) {
3803 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3804 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3805 			*encap_idx = actions_n;
3806 			break;
3807 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3808 			raw_decap_idx = actions_n;
3809 			break;
3810 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3811 			raw_encap = actions->conf;
3812 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3813 				*encap_idx = raw_decap_idx != -1 ?
3814 						      raw_decap_idx : actions_n;
3815 			break;
3816 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3817 		case RTE_FLOW_ACTION_TYPE_RSS:
3818 			*qrss = actions;
3819 			break;
3820 		default:
3821 			break;
3822 		}
3823 		actions_n++;
3824 	}
3825 	if (*encap_idx == -1)
3826 		*encap_idx = actions_n;
3827 	/* Count RTE_FLOW_ACTION_TYPE_END. */
3828 	return actions_n + 1;
3829 }
3830 
3831 /**
3832  * Check if the action will change packet.
3833  *
3834  * @param dev
3835  *   Pointer to Ethernet device.
3836  * @param[in] type
3837  *   action type.
3838  *
3839  * @return
3840  *   true if action will change packet, false otherwise.
3841  */
3842 static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
3843 					  enum rte_flow_action_type type)
3844 {
3845 	struct mlx5_priv *priv = dev->data->dev_private;
3846 
3847 	switch (type) {
3848 	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3849 	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3850 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3851 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3852 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3853 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3854 	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3855 	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3856 	case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3857 	case RTE_FLOW_ACTION_TYPE_SET_TTL:
3858 	case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3859 	case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3860 	case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3861 	case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3862 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
3863 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
3864 	case RTE_FLOW_ACTION_TYPE_SET_META:
3865 	case RTE_FLOW_ACTION_TYPE_SET_TAG:
3866 	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3867 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3868 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3869 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3870 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3871 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3872 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3873 	case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3874 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3875 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3876 	case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3877 		return true;
3878 	case RTE_FLOW_ACTION_TYPE_FLAG:
3879 	case RTE_FLOW_ACTION_TYPE_MARK:
3880 		if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
3881 			return true;
3882 		else
3883 			return false;
3884 	default:
3885 		return false;
3886 	}
3887 }
3888 
3889 /**
3890  * Check meter action from the action list.
3891  *
3892  * @param dev
3893  *   Pointer to Ethernet device.
3894  * @param[in] actions
3895  *   Pointer to the list of actions.
3896  * @param[out] has_mtr
3897  *   Pointer to the meter exist flag.
3898  * @param[out] has_modify
3899  *   Pointer to the flag showing there's packet change action.
3900  * @param[out] meter_id
3901  *   Pointer to the meter id.
3902  *
3903  * @return
3904  *   Total number of actions.
3905  */
3906 static int
3907 flow_check_meter_action(struct rte_eth_dev *dev,
3908 			const struct rte_flow_action actions[],
3909 			bool *has_mtr, bool *has_modify, uint32_t *meter_id)
3910 {
3911 	const struct rte_flow_action_meter *mtr = NULL;
3912 	int actions_n = 0;
3913 
3914 	MLX5_ASSERT(has_mtr);
3915 	*has_mtr = false;
3916 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3917 		switch (actions->type) {
3918 		case RTE_FLOW_ACTION_TYPE_METER:
3919 			mtr = actions->conf;
3920 			*meter_id = mtr->mtr_id;
3921 			*has_mtr = true;
3922 			break;
3923 		default:
3924 			break;
3925 		}
3926 		if (!*has_mtr)
3927 			*has_modify |= flow_check_modify_action_type(dev,
3928 								actions->type);
3929 		actions_n++;
3930 	}
3931 	/* Count RTE_FLOW_ACTION_TYPE_END. */
3932 	return actions_n + 1;
3933 }
3934 
3935 /**
3936  * Check if the flow should be split due to hairpin.
3937  * The reason for the split is that in current HW we can't
3938  * support encap and push-vlan on Rx, so if a flow contains
3939  * these actions we move it to Tx.
3940  *
3941  * @param dev
3942  *   Pointer to Ethernet device.
3943  * @param[in] attr
3944  *   Flow rule attributes.
3945  * @param[in] actions
3946  *   Associated actions (list terminated by the END action).
3947  *
3948  * @return
3949  *   > 0 the number of actions and the flow should be split,
3950  *   0 when no split required.
3951  */
3952 static int
3953 flow_check_hairpin_split(struct rte_eth_dev *dev,
3954 			 const struct rte_flow_attr *attr,
3955 			 const struct rte_flow_action actions[])
3956 {
3957 	int queue_action = 0;
3958 	int action_n = 0;
3959 	int split = 0;
3960 	const struct rte_flow_action_queue *queue;
3961 	const struct rte_flow_action_rss *rss;
3962 	const struct rte_flow_action_raw_encap *raw_encap;
3963 	const struct rte_eth_hairpin_conf *conf;
3964 
3965 	if (!attr->ingress)
3966 		return 0;
3967 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3968 		switch (actions->type) {
3969 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3970 			queue = actions->conf;
3971 			if (queue == NULL)
3972 				return 0;
3973 			conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
3974 			if (conf == NULL || conf->tx_explicit != 0)
3975 				return 0;
3976 			queue_action = 1;
3977 			action_n++;
3978 			break;
3979 		case RTE_FLOW_ACTION_TYPE_RSS:
3980 			rss = actions->conf;
3981 			if (rss == NULL || rss->queue_num == 0)
3982 				return 0;
3983 			conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
3984 			if (conf == NULL || conf->tx_explicit != 0)
3985 				return 0;
3986 			queue_action = 1;
3987 			action_n++;
3988 			break;
3989 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3990 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3991 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3992 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3993 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3994 			split++;
3995 			action_n++;
3996 			break;
3997 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3998 			raw_encap = actions->conf;
3999 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4000 				split++;
4001 			action_n++;
4002 			break;
4003 		default:
4004 			action_n++;
4005 			break;
4006 		}
4007 	}
4008 	if (split && queue_action)
4009 		return action_n;
4010 	return 0;
4011 }
4012 
4013 /* Declare flow create/destroy prototype in advance. */
4014 static uint32_t
4015 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
4016 		 const struct rte_flow_attr *attr,
4017 		 const struct rte_flow_item items[],
4018 		 const struct rte_flow_action actions[],
4019 		 bool external, struct rte_flow_error *error);
4020 
4021 static void
4022 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
4023 		  uint32_t flow_idx);
4024 
4025 int
4026 flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
4027 		      struct mlx5_hlist_entry *entry,
4028 		      uint64_t key, void *cb_ctx __rte_unused)
4029 {
4030 	struct mlx5_flow_mreg_copy_resource *mcp_res =
4031 		container_of(entry, typeof(*mcp_res), hlist_ent);
4032 
4033 	return mcp_res->mark_id != key;
4034 }
4035 
4036 struct mlx5_hlist_entry *
4037 flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
4038 		       void *cb_ctx)
4039 {
4040 	struct rte_eth_dev *dev = list->ctx;
4041 	struct mlx5_priv *priv = dev->data->dev_private;
4042 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4043 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4044 	struct rte_flow_error *error = ctx->error;
4045 	uint32_t idx = 0;
4046 	int ret;
4047 	uint32_t mark_id = key;
4048 	struct rte_flow_attr attr = {
4049 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4050 		.ingress = 1,
4051 	};
4052 	struct mlx5_rte_flow_item_tag tag_spec = {
4053 		.data = mark_id,
4054 	};
4055 	struct rte_flow_item items[] = {
4056 		[1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
4057 	};
4058 	struct rte_flow_action_mark ftag = {
4059 		.id = mark_id,
4060 	};
4061 	struct mlx5_flow_action_copy_mreg cp_mreg = {
4062 		.dst = REG_B,
4063 		.src = REG_NON,
4064 	};
4065 	struct rte_flow_action_jump jump = {
4066 		.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
4067 	};
4068 	struct rte_flow_action actions[] = {
4069 		[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
4070 	};
4071 
4072 	/* Fill the register fileds in the flow. */
4073 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
4074 	if (ret < 0)
4075 		return NULL;
4076 	tag_spec.id = ret;
4077 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4078 	if (ret < 0)
4079 		return NULL;
4080 	cp_mreg.src = ret;
4081 	/* Provide the full width of FLAG specific value. */
4082 	if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
4083 		tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
4084 	/* Build a new flow. */
4085 	if (mark_id != MLX5_DEFAULT_COPY_ID) {
4086 		items[0] = (struct rte_flow_item){
4087 			.type = (enum rte_flow_item_type)
4088 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4089 			.spec = &tag_spec,
4090 		};
4091 		items[1] = (struct rte_flow_item){
4092 			.type = RTE_FLOW_ITEM_TYPE_END,
4093 		};
4094 		actions[0] = (struct rte_flow_action){
4095 			.type = (enum rte_flow_action_type)
4096 				MLX5_RTE_FLOW_ACTION_TYPE_MARK,
4097 			.conf = &ftag,
4098 		};
4099 		actions[1] = (struct rte_flow_action){
4100 			.type = (enum rte_flow_action_type)
4101 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4102 			.conf = &cp_mreg,
4103 		};
4104 		actions[2] = (struct rte_flow_action){
4105 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
4106 			.conf = &jump,
4107 		};
4108 		actions[3] = (struct rte_flow_action){
4109 			.type = RTE_FLOW_ACTION_TYPE_END,
4110 		};
4111 	} else {
4112 		/* Default rule, wildcard match. */
4113 		attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
4114 		items[0] = (struct rte_flow_item){
4115 			.type = RTE_FLOW_ITEM_TYPE_END,
4116 		};
4117 		actions[0] = (struct rte_flow_action){
4118 			.type = (enum rte_flow_action_type)
4119 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4120 			.conf = &cp_mreg,
4121 		};
4122 		actions[1] = (struct rte_flow_action){
4123 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
4124 			.conf = &jump,
4125 		};
4126 		actions[2] = (struct rte_flow_action){
4127 			.type = RTE_FLOW_ACTION_TYPE_END,
4128 		};
4129 	}
4130 	/* Build a new entry. */
4131 	mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
4132 	if (!mcp_res) {
4133 		rte_errno = ENOMEM;
4134 		return NULL;
4135 	}
4136 	mcp_res->idx = idx;
4137 	mcp_res->mark_id = mark_id;
4138 	/*
4139 	 * The copy Flows are not included in any list. There
4140 	 * ones are referenced from other Flows and can not
4141 	 * be applied, removed, deleted in ardbitrary order
4142 	 * by list traversing.
4143 	 */
4144 	mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
4145 					 actions, false, error);
4146 	if (!mcp_res->rix_flow) {
4147 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
4148 		return NULL;
4149 	}
4150 	return &mcp_res->hlist_ent;
4151 }
4152 
4153 /**
4154  * Add a flow of copying flow metadata registers in RX_CP_TBL.
4155  *
4156  * As mark_id is unique, if there's already a registered flow for the mark_id,
4157  * return by increasing the reference counter of the resource. Otherwise, create
4158  * the resource (mcp_res) and flow.
4159  *
4160  * Flow looks like,
4161  *   - If ingress port is ANY and reg_c[1] is mark_id,
4162  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4163  *
4164  * For default flow (zero mark_id), flow is like,
4165  *   - If ingress port is ANY,
4166  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
4167  *
4168  * @param dev
4169  *   Pointer to Ethernet device.
4170  * @param mark_id
4171  *   ID of MARK action, zero means default flow for META.
4172  * @param[out] error
4173  *   Perform verbose error reporting if not NULL.
4174  *
4175  * @return
4176  *   Associated resource on success, NULL otherwise and rte_errno is set.
4177  */
4178 static struct mlx5_flow_mreg_copy_resource *
4179 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
4180 			  struct rte_flow_error *error)
4181 {
4182 	struct mlx5_priv *priv = dev->data->dev_private;
4183 	struct mlx5_hlist_entry *entry;
4184 	struct mlx5_flow_cb_ctx ctx = {
4185 		.dev = dev,
4186 		.error = error,
4187 	};
4188 
4189 	/* Check if already registered. */
4190 	MLX5_ASSERT(priv->mreg_cp_tbl);
4191 	entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
4192 	if (!entry)
4193 		return NULL;
4194 	return container_of(entry, struct mlx5_flow_mreg_copy_resource,
4195 			    hlist_ent);
4196 }
4197 
4198 void
4199 flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
4200 {
4201 	struct mlx5_flow_mreg_copy_resource *mcp_res =
4202 		container_of(entry, typeof(*mcp_res), hlist_ent);
4203 	struct rte_eth_dev *dev = list->ctx;
4204 	struct mlx5_priv *priv = dev->data->dev_private;
4205 
4206 	MLX5_ASSERT(mcp_res->rix_flow);
4207 	flow_list_destroy(dev, NULL, mcp_res->rix_flow);
4208 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
4209 }
4210 
4211 /**
4212  * Release flow in RX_CP_TBL.
4213  *
4214  * @param dev
4215  *   Pointer to Ethernet device.
4216  * @flow
4217  *   Parent flow for wich copying is provided.
4218  */
4219 static void
4220 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
4221 			  struct rte_flow *flow)
4222 {
4223 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4224 	struct mlx5_priv *priv = dev->data->dev_private;
4225 
4226 	if (!flow->rix_mreg_copy)
4227 		return;
4228 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
4229 				 flow->rix_mreg_copy);
4230 	if (!mcp_res || !priv->mreg_cp_tbl)
4231 		return;
4232 	MLX5_ASSERT(mcp_res->rix_flow);
4233 	mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
4234 	flow->rix_mreg_copy = 0;
4235 }
4236 
4237 /**
4238  * Remove the default copy action from RX_CP_TBL.
4239  *
4240  * This functions is called in the mlx5_dev_start(). No thread safe
4241  * is guaranteed.
4242  *
4243  * @param dev
4244  *   Pointer to Ethernet device.
4245  */
4246 static void
4247 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
4248 {
4249 	struct mlx5_hlist_entry *entry;
4250 	struct mlx5_priv *priv = dev->data->dev_private;
4251 
4252 	/* Check if default flow is registered. */
4253 	if (!priv->mreg_cp_tbl)
4254 		return;
4255 	entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
4256 				  MLX5_DEFAULT_COPY_ID, NULL);
4257 	if (!entry)
4258 		return;
4259 	mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
4260 }
4261 
4262 /**
4263  * Add the default copy action in in RX_CP_TBL.
4264  *
4265  * This functions is called in the mlx5_dev_start(). No thread safe
4266  * is guaranteed.
4267  *
4268  * @param dev
4269  *   Pointer to Ethernet device.
4270  * @param[out] error
4271  *   Perform verbose error reporting if not NULL.
4272  *
4273  * @return
4274  *   0 for success, negative value otherwise and rte_errno is set.
4275  */
4276 static int
4277 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
4278 				  struct rte_flow_error *error)
4279 {
4280 	struct mlx5_priv *priv = dev->data->dev_private;
4281 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4282 
4283 	/* Check whether extensive metadata feature is engaged. */
4284 	if (!priv->config.dv_flow_en ||
4285 	    priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4286 	    !mlx5_flow_ext_mreg_supported(dev) ||
4287 	    !priv->sh->dv_regc0_mask)
4288 		return 0;
4289 	/*
4290 	 * Add default mreg copy flow may be called multiple time, but
4291 	 * only be called once in stop. Avoid register it twice.
4292 	 */
4293 	if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
4294 		return 0;
4295 	mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
4296 	if (!mcp_res)
4297 		return -rte_errno;
4298 	return 0;
4299 }
4300 
4301 /**
4302  * Add a flow of copying flow metadata registers in RX_CP_TBL.
4303  *
4304  * All the flow having Q/RSS action should be split by
4305  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
4306  * performs the following,
4307  *   - CQE->flow_tag := reg_c[1] (MARK)
4308  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4309  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
4310  * but there should be a flow per each MARK ID set by MARK action.
4311  *
4312  * For the aforementioned reason, if there's a MARK action in flow's action
4313  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
4314  * the MARK ID to CQE's flow_tag like,
4315  *   - If reg_c[1] is mark_id,
4316  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4317  *
4318  * For SET_META action which stores value in reg_c[0], as the destination is
4319  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
4320  * MARK ID means the default flow. The default flow looks like,
4321  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4322  *
4323  * @param dev
4324  *   Pointer to Ethernet device.
4325  * @param flow
4326  *   Pointer to flow structure.
4327  * @param[in] actions
4328  *   Pointer to the list of actions.
4329  * @param[out] error
4330  *   Perform verbose error reporting if not NULL.
4331  *
4332  * @return
4333  *   0 on success, negative value otherwise and rte_errno is set.
4334  */
4335 static int
4336 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
4337 			    struct rte_flow *flow,
4338 			    const struct rte_flow_action *actions,
4339 			    struct rte_flow_error *error)
4340 {
4341 	struct mlx5_priv *priv = dev->data->dev_private;
4342 	struct mlx5_dev_config *config = &priv->config;
4343 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4344 	const struct rte_flow_action_mark *mark;
4345 
4346 	/* Check whether extensive metadata feature is engaged. */
4347 	if (!config->dv_flow_en ||
4348 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4349 	    !mlx5_flow_ext_mreg_supported(dev) ||
4350 	    !priv->sh->dv_regc0_mask)
4351 		return 0;
4352 	/* Find MARK action. */
4353 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4354 		switch (actions->type) {
4355 		case RTE_FLOW_ACTION_TYPE_FLAG:
4356 			mcp_res = flow_mreg_add_copy_action
4357 				(dev, MLX5_FLOW_MARK_DEFAULT, error);
4358 			if (!mcp_res)
4359 				return -rte_errno;
4360 			flow->rix_mreg_copy = mcp_res->idx;
4361 			return 0;
4362 		case RTE_FLOW_ACTION_TYPE_MARK:
4363 			mark = (const struct rte_flow_action_mark *)
4364 				actions->conf;
4365 			mcp_res =
4366 				flow_mreg_add_copy_action(dev, mark->id, error);
4367 			if (!mcp_res)
4368 				return -rte_errno;
4369 			flow->rix_mreg_copy = mcp_res->idx;
4370 			return 0;
4371 		default:
4372 			break;
4373 		}
4374 	}
4375 	return 0;
4376 }
4377 
4378 #define MLX5_MAX_SPLIT_ACTIONS 24
4379 #define MLX5_MAX_SPLIT_ITEMS 24
4380 
4381 /**
4382  * Split the hairpin flow.
4383  * Since HW can't support encap and push-vlan on Rx, we move these
4384  * actions to Tx.
4385  * If the count action is after the encap then we also
4386  * move the count action. in this case the count will also measure
4387  * the outer bytes.
4388  *
4389  * @param dev
4390  *   Pointer to Ethernet device.
4391  * @param[in] actions
4392  *   Associated actions (list terminated by the END action).
4393  * @param[out] actions_rx
4394  *   Rx flow actions.
4395  * @param[out] actions_tx
4396  *   Tx flow actions..
4397  * @param[out] pattern_tx
4398  *   The pattern items for the Tx flow.
4399  * @param[out] flow_id
4400  *   The flow ID connected to this flow.
4401  *
4402  * @return
4403  *   0 on success.
4404  */
4405 static int
4406 flow_hairpin_split(struct rte_eth_dev *dev,
4407 		   const struct rte_flow_action actions[],
4408 		   struct rte_flow_action actions_rx[],
4409 		   struct rte_flow_action actions_tx[],
4410 		   struct rte_flow_item pattern_tx[],
4411 		   uint32_t flow_id)
4412 {
4413 	const struct rte_flow_action_raw_encap *raw_encap;
4414 	const struct rte_flow_action_raw_decap *raw_decap;
4415 	struct mlx5_rte_flow_action_set_tag *set_tag;
4416 	struct rte_flow_action *tag_action;
4417 	struct mlx5_rte_flow_item_tag *tag_item;
4418 	struct rte_flow_item *item;
4419 	char *addr;
4420 	int encap = 0;
4421 
4422 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4423 		switch (actions->type) {
4424 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4425 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4426 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4427 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4428 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4429 			rte_memcpy(actions_tx, actions,
4430 			       sizeof(struct rte_flow_action));
4431 			actions_tx++;
4432 			break;
4433 		case RTE_FLOW_ACTION_TYPE_COUNT:
4434 			if (encap) {
4435 				rte_memcpy(actions_tx, actions,
4436 					   sizeof(struct rte_flow_action));
4437 				actions_tx++;
4438 			} else {
4439 				rte_memcpy(actions_rx, actions,
4440 					   sizeof(struct rte_flow_action));
4441 				actions_rx++;
4442 			}
4443 			break;
4444 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4445 			raw_encap = actions->conf;
4446 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
4447 				memcpy(actions_tx, actions,
4448 				       sizeof(struct rte_flow_action));
4449 				actions_tx++;
4450 				encap = 1;
4451 			} else {
4452 				rte_memcpy(actions_rx, actions,
4453 					   sizeof(struct rte_flow_action));
4454 				actions_rx++;
4455 			}
4456 			break;
4457 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4458 			raw_decap = actions->conf;
4459 			if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
4460 				memcpy(actions_tx, actions,
4461 				       sizeof(struct rte_flow_action));
4462 				actions_tx++;
4463 			} else {
4464 				rte_memcpy(actions_rx, actions,
4465 					   sizeof(struct rte_flow_action));
4466 				actions_rx++;
4467 			}
4468 			break;
4469 		default:
4470 			rte_memcpy(actions_rx, actions,
4471 				   sizeof(struct rte_flow_action));
4472 			actions_rx++;
4473 			break;
4474 		}
4475 	}
4476 	/* Add set meta action and end action for the Rx flow. */
4477 	tag_action = actions_rx;
4478 	tag_action->type = (enum rte_flow_action_type)
4479 			   MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4480 	actions_rx++;
4481 	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
4482 	actions_rx++;
4483 	set_tag = (void *)actions_rx;
4484 	*set_tag = (struct mlx5_rte_flow_action_set_tag) {
4485 		.id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL),
4486 		.data = flow_id,
4487 	};
4488 	MLX5_ASSERT(set_tag->id > REG_NON);
4489 	tag_action->conf = set_tag;
4490 	/* Create Tx item list. */
4491 	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
4492 	addr = (void *)&pattern_tx[2];
4493 	item = pattern_tx;
4494 	item->type = (enum rte_flow_item_type)
4495 		     MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4496 	tag_item = (void *)addr;
4497 	tag_item->data = flow_id;
4498 	tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
4499 	MLX5_ASSERT(set_tag->id > REG_NON);
4500 	item->spec = tag_item;
4501 	addr += sizeof(struct mlx5_rte_flow_item_tag);
4502 	tag_item = (void *)addr;
4503 	tag_item->data = UINT32_MAX;
4504 	tag_item->id = UINT16_MAX;
4505 	item->mask = tag_item;
4506 	item->last = NULL;
4507 	item++;
4508 	item->type = RTE_FLOW_ITEM_TYPE_END;
4509 	return 0;
4510 }
4511 
4512 /**
4513  * The last stage of splitting chain, just creates the subflow
4514  * without any modification.
4515  *
4516  * @param[in] dev
4517  *   Pointer to Ethernet device.
4518  * @param[in] flow
4519  *   Parent flow structure pointer.
4520  * @param[in, out] sub_flow
4521  *   Pointer to return the created subflow, may be NULL.
4522  * @param[in] attr
4523  *   Flow rule attributes.
4524  * @param[in] items
4525  *   Pattern specification (list terminated by the END pattern item).
4526  * @param[in] actions
4527  *   Associated actions (list terminated by the END action).
4528  * @param[in] flow_split_info
4529  *   Pointer to flow split info structure.
4530  * @param[out] error
4531  *   Perform verbose error reporting if not NULL.
4532  * @return
4533  *   0 on success, negative value otherwise
4534  */
4535 static int
4536 flow_create_split_inner(struct rte_eth_dev *dev,
4537 			struct rte_flow *flow,
4538 			struct mlx5_flow **sub_flow,
4539 			const struct rte_flow_attr *attr,
4540 			const struct rte_flow_item items[],
4541 			const struct rte_flow_action actions[],
4542 			struct mlx5_flow_split_info *flow_split_info,
4543 			struct rte_flow_error *error)
4544 {
4545 	struct mlx5_flow *dev_flow;
4546 
4547 	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
4548 				    flow_split_info->flow_idx, error);
4549 	if (!dev_flow)
4550 		return -rte_errno;
4551 	dev_flow->flow = flow;
4552 	dev_flow->external = flow_split_info->external;
4553 	dev_flow->skip_scale = flow_split_info->skip_scale;
4554 	/* Subflow object was created, we must include one in the list. */
4555 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4556 		      dev_flow->handle, next);
4557 	/*
4558 	 * If dev_flow is as one of the suffix flow, some actions in suffix
4559 	 * flow may need some user defined item layer flags, and pass the
4560 	 * Metadate rxq mark flag to suffix flow as well.
4561 	 */
4562 	if (flow_split_info->prefix_layers)
4563 		dev_flow->handle->layers = flow_split_info->prefix_layers;
4564 	if (flow_split_info->prefix_mark)
4565 		dev_flow->handle->mark = 1;
4566 	if (sub_flow)
4567 		*sub_flow = dev_flow;
4568 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
4569 	dev_flow->dv.table_id = flow_split_info->table_id;
4570 #endif
4571 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
4572 }
4573 
4574 /**
4575  * Get the sub policy of a meter.
4576  *
4577  * @param[in] dev
4578  *   Pointer to Ethernet device.
4579  * @param[in] flow
4580  *   Parent flow structure pointer.
4581  * @param[in] policy_id;
4582  *   Meter Policy id.
4583  * @param[in] attr
4584  *   Flow rule attributes.
4585  * @param[in] items
4586  *   Pattern specification (list terminated by the END pattern item).
4587  * @param[out] error
4588  *   Perform verbose error reporting if not NULL.
4589  *
4590  * @return
4591  *   Pointer to the meter sub policy, NULL otherwise and rte_errno is set.
4592  */
4593 static struct mlx5_flow_meter_sub_policy *
4594 get_meter_sub_policy(struct rte_eth_dev *dev,
4595 		     struct rte_flow *flow,
4596 		     uint32_t policy_id,
4597 		     const struct rte_flow_attr *attr,
4598 		     const struct rte_flow_item items[],
4599 		     struct rte_flow_error *error)
4600 {
4601 	struct mlx5_flow_meter_policy *policy;
4602 	struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
4603 
4604 	policy = mlx5_flow_meter_policy_find(dev, policy_id, NULL);
4605 	if (!policy) {
4606 		rte_flow_error_set(error, EINVAL,
4607 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4608 				   "Failed to find Meter Policy.");
4609 		goto exit;
4610 	}
4611 	if (policy->is_rss ||
4612 		(policy->is_queue &&
4613 	!policy->sub_policys[MLX5_MTR_DOMAIN_INGRESS][0]->rix_hrxq[0])) {
4614 		struct mlx5_flow_workspace *wks =
4615 				mlx5_flow_get_thread_workspace();
4616 		struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS];
4617 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
4618 		uint32_t i;
4619 
4620 		MLX5_ASSERT(wks);
4621 		/**
4622 		 * This is a tmp dev_flow,
4623 		 * no need to register any matcher for it in translate.
4624 		 */
4625 		wks->skip_matcher_reg = 1;
4626 		for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
4627 			struct mlx5_flow dev_flow = {0};
4628 			struct mlx5_flow_handle dev_handle = { {0} };
4629 
4630 			if (policy->is_rss) {
4631 				const void *rss_act =
4632 					policy->act_cnt[i].rss->conf;
4633 				struct rte_flow_action rss_actions[2] = {
4634 					[0] = {
4635 					.type = RTE_FLOW_ACTION_TYPE_RSS,
4636 					.conf = rss_act
4637 					},
4638 					[1] = {
4639 					.type = RTE_FLOW_ACTION_TYPE_END,
4640 					.conf = NULL
4641 					}
4642 				};
4643 
4644 				dev_flow.handle = &dev_handle;
4645 				dev_flow.ingress = attr->ingress;
4646 				dev_flow.flow = flow;
4647 				dev_flow.external = 0;
4648 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
4649 				dev_flow.dv.transfer = attr->transfer;
4650 #endif
4651 				/**
4652 				 * Translate RSS action to get rss hash fields.
4653 				 */
4654 				if (flow_drv_translate(dev, &dev_flow, attr,
4655 						items, rss_actions, error))
4656 					goto exit;
4657 				rss_desc_v[i] = wks->rss_desc;
4658 				rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
4659 				rss_desc_v[i].hash_fields =
4660 						dev_flow.hash_fields;
4661 				rss_desc_v[i].queue_num =
4662 						rss_desc_v[i].hash_fields ?
4663 						rss_desc_v[i].queue_num : 1;
4664 				rss_desc_v[i].tunnel =
4665 					!!(dev_flow.handle->layers &
4666 					MLX5_FLOW_LAYER_TUNNEL);
4667 			} else {
4668 				/* This is queue action. */
4669 				rss_desc_v[i] = wks->rss_desc;
4670 				rss_desc_v[i].key_len = 0;
4671 				rss_desc_v[i].hash_fields = 0;
4672 				rss_desc_v[i].queue =
4673 					&policy->act_cnt[i].queue;
4674 				rss_desc_v[i].queue_num = 1;
4675 			}
4676 			rss_desc[i] = &rss_desc_v[i];
4677 		}
4678 		sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
4679 						flow, policy, rss_desc);
4680 	} else {
4681 		enum mlx5_meter_domain mtr_domain =
4682 			attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
4683 				attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
4684 					MLX5_MTR_DOMAIN_INGRESS;
4685 		sub_policy = policy->sub_policys[mtr_domain][0];
4686 	}
4687 	if (!sub_policy) {
4688 		rte_flow_error_set(error, EINVAL,
4689 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4690 			"Failed to get meter sub-policy.");
4691 		goto exit;
4692 	}
4693 exit:
4694 	return sub_policy;
4695 }
4696 
4697 /**
4698  * Split the meter flow.
4699  *
4700  * As meter flow will split to three sub flow, other than meter
4701  * action, the other actions make sense to only meter accepts
4702  * the packet. If it need to be dropped, no other additional
4703  * actions should be take.
4704  *
4705  * One kind of special action which decapsulates the L3 tunnel
4706  * header will be in the prefix sub flow, as not to take the
4707  * L3 tunnel header into account.
4708  *
4709  * @param[in] dev
4710  *   Pointer to Ethernet device.
4711  * @param[in] flow
4712  *   Parent flow structure pointer.
4713  * @param[in] fm
4714  *   Pointer to flow meter structure.
4715  * @param[in] attr
4716  *   Flow rule attributes.
4717  * @param[in] items
4718  *   Pattern specification (list terminated by the END pattern item).
4719  * @param[out] sfx_items
4720  *   Suffix flow match items (list terminated by the END pattern item).
4721  * @param[in] actions
4722  *   Associated actions (list terminated by the END action).
4723  * @param[out] actions_sfx
4724  *   Suffix flow actions.
4725  * @param[out] actions_pre
4726  *   Prefix flow actions.
4727  * @param[out] mtr_flow_id
4728  *   Pointer to meter flow id.
4729  * @param[out] error
4730  *   Perform verbose error reporting if not NULL.
4731  *
4732  * @return
4733  *   0 on success, a negative errno value otherwise and rte_errno is set.
4734  */
4735 static int
4736 flow_meter_split_prep(struct rte_eth_dev *dev,
4737 		      struct rte_flow *flow,
4738 		      struct mlx5_flow_meter_info *fm,
4739 		      const struct rte_flow_attr *attr,
4740 		      const struct rte_flow_item items[],
4741 		      struct rte_flow_item sfx_items[],
4742 		      const struct rte_flow_action actions[],
4743 		      struct rte_flow_action actions_sfx[],
4744 		      struct rte_flow_action actions_pre[],
4745 		      uint32_t *mtr_flow_id,
4746 		      struct rte_flow_error *error)
4747 {
4748 	struct mlx5_priv *priv = dev->data->dev_private;
4749 	struct rte_flow_action *tag_action = NULL;
4750 	struct rte_flow_item *tag_item;
4751 	struct mlx5_rte_flow_action_set_tag *set_tag;
4752 	const struct rte_flow_action_raw_encap *raw_encap;
4753 	const struct rte_flow_action_raw_decap *raw_decap;
4754 	struct mlx5_rte_flow_item_tag *tag_item_spec;
4755 	struct mlx5_rte_flow_item_tag *tag_item_mask;
4756 	uint32_t tag_id = 0;
4757 	struct rte_flow_item *vlan_item_dst = NULL;
4758 	const struct rte_flow_item *vlan_item_src = NULL;
4759 	struct rte_flow_action *hw_mtr_action;
4760 	struct rte_flow_action *action_pre_head = NULL;
4761 	int32_t flow_src_port = priv->representor_id;
4762 	bool mtr_first;
4763 	uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
4764 	uint8_t mtr_reg_bits = priv->mtr_reg_share ?
4765 				MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS;
4766 	uint32_t flow_id = 0;
4767 	uint32_t flow_id_reversed = 0;
4768 	uint8_t flow_id_bits = 0;
4769 	int shift;
4770 
4771 	/* Prepare the suffix subflow items. */
4772 	tag_item = sfx_items++;
4773 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4774 		struct mlx5_priv *port_priv;
4775 		const struct rte_flow_item_port_id *pid_v;
4776 		int item_type = items->type;
4777 
4778 		switch (item_type) {
4779 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
4780 			pid_v = items->spec;
4781 			MLX5_ASSERT(pid_v);
4782 			port_priv = mlx5_port_to_eswitch_info(pid_v->id, false);
4783 			if (!port_priv)
4784 				return rte_flow_error_set(error,
4785 						rte_errno,
4786 						RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
4787 						pid_v,
4788 						"Failed to get port info.");
4789 			flow_src_port = port_priv->representor_id;
4790 			memcpy(sfx_items, items, sizeof(*sfx_items));
4791 			sfx_items++;
4792 			break;
4793 		case RTE_FLOW_ITEM_TYPE_VLAN:
4794 			/* Determine if copy vlan item below. */
4795 			vlan_item_src = items;
4796 			vlan_item_dst = sfx_items++;
4797 			vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID;
4798 			break;
4799 		default:
4800 			break;
4801 		}
4802 	}
4803 	sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
4804 	sfx_items++;
4805 	mtr_first = priv->sh->meter_aso_en &&
4806 		(attr->egress || (attr->transfer && flow_src_port != UINT16_MAX));
4807 	/* For ASO meter, meter must be before tag in TX direction. */
4808 	if (mtr_first) {
4809 		action_pre_head = actions_pre++;
4810 		/* Leave space for tag action. */
4811 		tag_action = actions_pre++;
4812 	}
4813 	/* Prepare the actions for prefix and suffix flow. */
4814 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4815 		struct rte_flow_action *action_cur = NULL;
4816 
4817 		switch (actions->type) {
4818 		case RTE_FLOW_ACTION_TYPE_METER:
4819 			if (mtr_first) {
4820 				action_cur = action_pre_head;
4821 			} else {
4822 				/* Leave space for tag action. */
4823 				tag_action = actions_pre++;
4824 				action_cur = actions_pre++;
4825 			}
4826 			break;
4827 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4828 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4829 			action_cur = actions_pre++;
4830 			break;
4831 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4832 			raw_encap = actions->conf;
4833 			if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
4834 				action_cur = actions_pre++;
4835 			break;
4836 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4837 			raw_decap = actions->conf;
4838 			if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4839 				action_cur = actions_pre++;
4840 			break;
4841 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4842 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4843 			if (vlan_item_dst && vlan_item_src) {
4844 				memcpy(vlan_item_dst, vlan_item_src,
4845 					sizeof(*vlan_item_dst));
4846 				/*
4847 				 * Convert to internal match item, it is used
4848 				 * for vlan push and set vid.
4849 				 */
4850 				vlan_item_dst->type = (enum rte_flow_item_type)
4851 						MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
4852 			}
4853 			break;
4854 		default:
4855 			break;
4856 		}
4857 		if (!action_cur)
4858 			action_cur = (fm->def_policy) ?
4859 					actions_sfx++ : actions_pre++;
4860 		memcpy(action_cur, actions, sizeof(struct rte_flow_action));
4861 	}
4862 	/* Add end action to the actions. */
4863 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
4864 	if (priv->sh->meter_aso_en) {
4865 		/**
4866 		 * For ASO meter, need to add an extra jump action explicitly,
4867 		 * to jump from meter to policer table.
4868 		 */
4869 		struct mlx5_flow_meter_sub_policy *sub_policy;
4870 		struct mlx5_flow_tbl_data_entry *tbl_data;
4871 
4872 		if (!fm->def_policy) {
4873 			sub_policy = get_meter_sub_policy(dev, flow,
4874 							  fm->policy_id, attr,
4875 							  items, error);
4876 			if (!sub_policy)
4877 				return -rte_errno;
4878 		} else {
4879 			enum mlx5_meter_domain mtr_domain =
4880 			attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
4881 				attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
4882 					MLX5_MTR_DOMAIN_INGRESS;
4883 
4884 			sub_policy =
4885 			&priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
4886 		}
4887 		tbl_data = container_of(sub_policy->tbl_rsc,
4888 					struct mlx5_flow_tbl_data_entry, tbl);
4889 		hw_mtr_action = actions_pre++;
4890 		hw_mtr_action->type = (enum rte_flow_action_type)
4891 				      MLX5_RTE_FLOW_ACTION_TYPE_JUMP;
4892 		hw_mtr_action->conf = tbl_data->jump.action;
4893 	}
4894 	actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
4895 	actions_pre++;
4896 	if (!tag_action)
4897 		return rte_flow_error_set(error, ENOMEM,
4898 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4899 					"No tag action space.");
4900 	if (!mtr_flow_id) {
4901 		tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
4902 		goto exit;
4903 	}
4904 	/* Only default-policy Meter creates mtr flow id. */
4905 	if (fm->def_policy) {
4906 		mlx5_ipool_malloc(fm->flow_ipool, &tag_id);
4907 		if (!tag_id)
4908 			return rte_flow_error_set(error, ENOMEM,
4909 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4910 					"Failed to allocate meter flow id.");
4911 		flow_id = tag_id - 1;
4912 		flow_id_bits = (!flow_id) ? 1 :
4913 				(MLX5_REG_BITS - __builtin_clz(flow_id));
4914 		if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) >
4915 		    mtr_reg_bits) {
4916 			mlx5_ipool_free(fm->flow_ipool, tag_id);
4917 			return rte_flow_error_set(error, EINVAL,
4918 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4919 					"Meter flow id exceeds max limit.");
4920 		}
4921 		if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits)
4922 			priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits;
4923 	}
4924 	/* Build tag actions and items for meter_id/meter flow_id. */
4925 	set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre;
4926 	tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
4927 	tag_item_mask = tag_item_spec + 1;
4928 	/* Both flow_id and meter_id share the same register. */
4929 	*set_tag = (struct mlx5_rte_flow_action_set_tag) {
4930 		.id = (enum modify_reg)mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
4931 							    0, error),
4932 		.offset = mtr_id_offset,
4933 		.length = mtr_reg_bits,
4934 		.data = flow->meter,
4935 	};
4936 	/*
4937 	 * The color Reg bits used by flow_id are growing from
4938 	 * msb to lsb, so must do bit reverse for flow_id val in RegC.
4939 	 */
4940 	for (shift = 0; shift < flow_id_bits; shift++)
4941 		flow_id_reversed = (flow_id_reversed << 1) |
4942 				((flow_id >> shift) & 0x1);
4943 	set_tag->data |=
4944 		flow_id_reversed << (mtr_reg_bits - flow_id_bits);
4945 	tag_item_spec->id = set_tag->id;
4946 	tag_item_spec->data = set_tag->data << mtr_id_offset;
4947 	tag_item_mask->data = UINT32_MAX << mtr_id_offset;
4948 	tag_action->type = (enum rte_flow_action_type)
4949 				MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4950 	tag_action->conf = set_tag;
4951 	tag_item->type = (enum rte_flow_item_type)
4952 				MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4953 	tag_item->spec = tag_item_spec;
4954 	tag_item->last = NULL;
4955 	tag_item->mask = tag_item_mask;
4956 exit:
4957 	if (mtr_flow_id)
4958 		*mtr_flow_id = tag_id;
4959 	return 0;
4960 }
4961 
4962 /**
4963  * Split action list having QUEUE/RSS for metadata register copy.
4964  *
4965  * Once Q/RSS action is detected in user's action list, the flow action
4966  * should be split in order to copy metadata registers, which will happen in
4967  * RX_CP_TBL like,
4968  *   - CQE->flow_tag := reg_c[1] (MARK)
4969  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4970  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
4971  * This is because the last action of each flow must be a terminal action
4972  * (QUEUE, RSS or DROP).
4973  *
4974  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
4975  * stored and kept in the mlx5_flow structure per each sub_flow.
4976  *
4977  * The Q/RSS action is replaced with,
4978  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
4979  * And the following JUMP action is added at the end,
4980  *   - JUMP, to RX_CP_TBL.
4981  *
4982  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
4983  * flow_create_split_metadata() routine. The flow will look like,
4984  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
4985  *
4986  * @param dev
4987  *   Pointer to Ethernet device.
4988  * @param[out] split_actions
4989  *   Pointer to store split actions to jump to CP_TBL.
4990  * @param[in] actions
4991  *   Pointer to the list of original flow actions.
4992  * @param[in] qrss
4993  *   Pointer to the Q/RSS action.
4994  * @param[in] actions_n
4995  *   Number of original actions.
4996  * @param[out] error
4997  *   Perform verbose error reporting if not NULL.
4998  *
4999  * @return
5000  *   non-zero unique flow_id on success, otherwise 0 and
5001  *   error/rte_error are set.
5002  */
5003 static uint32_t
5004 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
5005 			  struct rte_flow_action *split_actions,
5006 			  const struct rte_flow_action *actions,
5007 			  const struct rte_flow_action *qrss,
5008 			  int actions_n, struct rte_flow_error *error)
5009 {
5010 	struct mlx5_priv *priv = dev->data->dev_private;
5011 	struct mlx5_rte_flow_action_set_tag *set_tag;
5012 	struct rte_flow_action_jump *jump;
5013 	const int qrss_idx = qrss - actions;
5014 	uint32_t flow_id = 0;
5015 	int ret = 0;
5016 
5017 	/*
5018 	 * Given actions will be split
5019 	 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
5020 	 * - Add jump to mreg CP_TBL.
5021 	 * As a result, there will be one more action.
5022 	 */
5023 	++actions_n;
5024 	memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
5025 	set_tag = (void *)(split_actions + actions_n);
5026 	/*
5027 	 * If tag action is not set to void(it means we are not the meter
5028 	 * suffix flow), add the tag action. Since meter suffix flow already
5029 	 * has the tag added.
5030 	 */
5031 	if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
5032 		/*
5033 		 * Allocate the new subflow ID. This one is unique within
5034 		 * device and not shared with representors. Otherwise,
5035 		 * we would have to resolve multi-thread access synch
5036 		 * issue. Each flow on the shared device is appended
5037 		 * with source vport identifier, so the resulting
5038 		 * flows will be unique in the shared (by master and
5039 		 * representors) domain even if they have coinciding
5040 		 * IDs.
5041 		 */
5042 		mlx5_ipool_malloc(priv->sh->ipool
5043 				  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
5044 		if (!flow_id)
5045 			return rte_flow_error_set(error, ENOMEM,
5046 						  RTE_FLOW_ERROR_TYPE_ACTION,
5047 						  NULL, "can't allocate id "
5048 						  "for split Q/RSS subflow");
5049 		/* Internal SET_TAG action to set flow ID. */
5050 		*set_tag = (struct mlx5_rte_flow_action_set_tag){
5051 			.data = flow_id,
5052 		};
5053 		ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
5054 		if (ret < 0)
5055 			return ret;
5056 		set_tag->id = ret;
5057 		/* Construct new actions array. */
5058 		/* Replace QUEUE/RSS action. */
5059 		split_actions[qrss_idx] = (struct rte_flow_action){
5060 			.type = (enum rte_flow_action_type)
5061 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
5062 			.conf = set_tag,
5063 		};
5064 	}
5065 	/* JUMP action to jump to mreg copy table (CP_TBL). */
5066 	jump = (void *)(set_tag + 1);
5067 	*jump = (struct rte_flow_action_jump){
5068 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
5069 	};
5070 	split_actions[actions_n - 2] = (struct rte_flow_action){
5071 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
5072 		.conf = jump,
5073 	};
5074 	split_actions[actions_n - 1] = (struct rte_flow_action){
5075 		.type = RTE_FLOW_ACTION_TYPE_END,
5076 	};
5077 	return flow_id;
5078 }
5079 
5080 /**
5081  * Extend the given action list for Tx metadata copy.
5082  *
5083  * Copy the given action list to the ext_actions and add flow metadata register
5084  * copy action in order to copy reg_a set by WQE to reg_c[0].
5085  *
5086  * @param[out] ext_actions
5087  *   Pointer to the extended action list.
5088  * @param[in] actions
5089  *   Pointer to the list of actions.
5090  * @param[in] actions_n
5091  *   Number of actions in the list.
5092  * @param[out] error
5093  *   Perform verbose error reporting if not NULL.
5094  * @param[in] encap_idx
5095  *   The encap action inndex.
5096  *
5097  * @return
5098  *   0 on success, negative value otherwise
5099  */
5100 static int
5101 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
5102 		       struct rte_flow_action *ext_actions,
5103 		       const struct rte_flow_action *actions,
5104 		       int actions_n, struct rte_flow_error *error,
5105 		       int encap_idx)
5106 {
5107 	struct mlx5_flow_action_copy_mreg *cp_mreg =
5108 		(struct mlx5_flow_action_copy_mreg *)
5109 			(ext_actions + actions_n + 1);
5110 	int ret;
5111 
5112 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
5113 	if (ret < 0)
5114 		return ret;
5115 	cp_mreg->dst = ret;
5116 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
5117 	if (ret < 0)
5118 		return ret;
5119 	cp_mreg->src = ret;
5120 	if (encap_idx != 0)
5121 		memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
5122 	if (encap_idx == actions_n - 1) {
5123 		ext_actions[actions_n - 1] = (struct rte_flow_action){
5124 			.type = (enum rte_flow_action_type)
5125 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5126 			.conf = cp_mreg,
5127 		};
5128 		ext_actions[actions_n] = (struct rte_flow_action){
5129 			.type = RTE_FLOW_ACTION_TYPE_END,
5130 		};
5131 	} else {
5132 		ext_actions[encap_idx] = (struct rte_flow_action){
5133 			.type = (enum rte_flow_action_type)
5134 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5135 			.conf = cp_mreg,
5136 		};
5137 		memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
5138 				sizeof(*ext_actions) * (actions_n - encap_idx));
5139 	}
5140 	return 0;
5141 }
5142 
5143 /**
5144  * Check the match action from the action list.
5145  *
5146  * @param[in] actions
5147  *   Pointer to the list of actions.
5148  * @param[in] attr
5149  *   Flow rule attributes.
5150  * @param[in] action
5151  *   The action to be check if exist.
5152  * @param[out] match_action_pos
5153  *   Pointer to the position of the matched action if exists, otherwise is -1.
5154  * @param[out] qrss_action_pos
5155  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
5156  * @param[out] modify_after_mirror
5157  *   Pointer to the flag of modify action after FDB mirroring.
5158  *
5159  * @return
5160  *   > 0 the total number of actions.
5161  *   0 if not found match action in action list.
5162  */
5163 static int
5164 flow_check_match_action(const struct rte_flow_action actions[],
5165 			const struct rte_flow_attr *attr,
5166 			enum rte_flow_action_type action,
5167 			int *match_action_pos, int *qrss_action_pos,
5168 			int *modify_after_mirror)
5169 {
5170 	const struct rte_flow_action_sample *sample;
5171 	int actions_n = 0;
5172 	uint32_t ratio = 0;
5173 	int sub_type = 0;
5174 	int flag = 0;
5175 	int fdb_mirror = 0;
5176 
5177 	*match_action_pos = -1;
5178 	*qrss_action_pos = -1;
5179 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5180 		if (actions->type == action) {
5181 			flag = 1;
5182 			*match_action_pos = actions_n;
5183 		}
5184 		switch (actions->type) {
5185 		case RTE_FLOW_ACTION_TYPE_QUEUE:
5186 		case RTE_FLOW_ACTION_TYPE_RSS:
5187 			*qrss_action_pos = actions_n;
5188 			break;
5189 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
5190 			sample = actions->conf;
5191 			ratio = sample->ratio;
5192 			sub_type = ((const struct rte_flow_action *)
5193 					(sample->actions))->type;
5194 			if (ratio == 1 && attr->transfer)
5195 				fdb_mirror = 1;
5196 			break;
5197 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5198 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5199 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5200 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5201 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5202 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5203 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5204 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5205 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5206 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
5207 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5208 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5209 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5210 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5211 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5212 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5213 		case RTE_FLOW_ACTION_TYPE_FLAG:
5214 		case RTE_FLOW_ACTION_TYPE_MARK:
5215 		case RTE_FLOW_ACTION_TYPE_SET_META:
5216 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
5217 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5218 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5219 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5220 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5221 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5222 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5223 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5224 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
5225 		case RTE_FLOW_ACTION_TYPE_METER:
5226 			if (fdb_mirror)
5227 				*modify_after_mirror = 1;
5228 			break;
5229 		default:
5230 			break;
5231 		}
5232 		actions_n++;
5233 	}
5234 	if (flag && fdb_mirror && !*modify_after_mirror) {
5235 		/* FDB mirroring uses the destination array to implement
5236 		 * instead of FLOW_SAMPLER object.
5237 		 */
5238 		if (sub_type != RTE_FLOW_ACTION_TYPE_END)
5239 			flag = 0;
5240 	}
5241 	/* Count RTE_FLOW_ACTION_TYPE_END. */
5242 	return flag ? actions_n + 1 : 0;
5243 }
5244 
5245 #define SAMPLE_SUFFIX_ITEM 2
5246 
5247 /**
5248  * Split the sample flow.
5249  *
5250  * As sample flow will split to two sub flow, sample flow with
5251  * sample action, the other actions will move to new suffix flow.
5252  *
5253  * Also add unique tag id with tag action in the sample flow,
5254  * the same tag id will be as match in the suffix flow.
5255  *
5256  * @param dev
5257  *   Pointer to Ethernet device.
5258  * @param[in] add_tag
5259  *   Add extra tag action flag.
5260  * @param[out] sfx_items
5261  *   Suffix flow match items (list terminated by the END pattern item).
5262  * @param[in] actions
5263  *   Associated actions (list terminated by the END action).
5264  * @param[out] actions_sfx
5265  *   Suffix flow actions.
5266  * @param[out] actions_pre
5267  *   Prefix flow actions.
5268  * @param[in] actions_n
5269  *  The total number of actions.
5270  * @param[in] sample_action_pos
5271  *   The sample action position.
5272  * @param[in] qrss_action_pos
5273  *   The Queue/RSS action position.
5274  * @param[in] jump_table
5275  *   Add extra jump action flag.
5276  * @param[out] error
5277  *   Perform verbose error reporting if not NULL.
5278  *
5279  * @return
5280  *   0 on success, or unique flow_id, a negative errno value
5281  *   otherwise and rte_errno is set.
5282  */
5283 static int
5284 flow_sample_split_prep(struct rte_eth_dev *dev,
5285 		       int add_tag,
5286 		       struct rte_flow_item sfx_items[],
5287 		       const struct rte_flow_action actions[],
5288 		       struct rte_flow_action actions_sfx[],
5289 		       struct rte_flow_action actions_pre[],
5290 		       int actions_n,
5291 		       int sample_action_pos,
5292 		       int qrss_action_pos,
5293 		       int jump_table,
5294 		       struct rte_flow_error *error)
5295 {
5296 	struct mlx5_priv *priv = dev->data->dev_private;
5297 	struct mlx5_rte_flow_action_set_tag *set_tag;
5298 	struct mlx5_rte_flow_item_tag *tag_spec;
5299 	struct mlx5_rte_flow_item_tag *tag_mask;
5300 	struct rte_flow_action_jump *jump_action;
5301 	uint32_t tag_id = 0;
5302 	int index;
5303 	int append_index = 0;
5304 	int ret;
5305 
5306 	if (sample_action_pos < 0)
5307 		return rte_flow_error_set(error, EINVAL,
5308 					  RTE_FLOW_ERROR_TYPE_ACTION,
5309 					  NULL, "invalid position of sample "
5310 					  "action in list");
5311 	/* Prepare the actions for prefix and suffix flow. */
5312 	if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
5313 		index = qrss_action_pos;
5314 		/* Put the preceding the Queue/RSS action into prefix flow. */
5315 		if (index != 0)
5316 			memcpy(actions_pre, actions,
5317 			       sizeof(struct rte_flow_action) * index);
5318 		/* Put others preceding the sample action into prefix flow. */
5319 		if (sample_action_pos > index + 1)
5320 			memcpy(actions_pre + index, actions + index + 1,
5321 			       sizeof(struct rte_flow_action) *
5322 			       (sample_action_pos - index - 1));
5323 		index = sample_action_pos - 1;
5324 		/* Put Queue/RSS action into Suffix flow. */
5325 		memcpy(actions_sfx, actions + qrss_action_pos,
5326 		       sizeof(struct rte_flow_action));
5327 		actions_sfx++;
5328 	} else {
5329 		index = sample_action_pos;
5330 		if (index != 0)
5331 			memcpy(actions_pre, actions,
5332 			       sizeof(struct rte_flow_action) * index);
5333 	}
5334 	/* For CX5, add an extra tag action for NIC-RX and E-Switch ingress.
5335 	 * For CX6DX and above, metadata registers Cx preserve their value,
5336 	 * add an extra tag action for NIC-RX and E-Switch Domain.
5337 	 */
5338 	if (add_tag) {
5339 		/* Prepare the prefix tag action. */
5340 		append_index++;
5341 		set_tag = (void *)(actions_pre + actions_n + append_index);
5342 		ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
5343 		if (ret < 0)
5344 			return ret;
5345 		mlx5_ipool_malloc(priv->sh->ipool
5346 				  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
5347 		*set_tag = (struct mlx5_rte_flow_action_set_tag) {
5348 			.id = ret,
5349 			.data = tag_id,
5350 		};
5351 		/* Prepare the suffix subflow items. */
5352 		tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
5353 		tag_spec->data = tag_id;
5354 		tag_spec->id = set_tag->id;
5355 		tag_mask = tag_spec + 1;
5356 		tag_mask->data = UINT32_MAX;
5357 		sfx_items[0] = (struct rte_flow_item){
5358 			.type = (enum rte_flow_item_type)
5359 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
5360 			.spec = tag_spec,
5361 			.last = NULL,
5362 			.mask = tag_mask,
5363 		};
5364 		sfx_items[1] = (struct rte_flow_item){
5365 			.type = (enum rte_flow_item_type)
5366 				RTE_FLOW_ITEM_TYPE_END,
5367 		};
5368 		/* Prepare the tag action in prefix subflow. */
5369 		actions_pre[index++] =
5370 			(struct rte_flow_action){
5371 			.type = (enum rte_flow_action_type)
5372 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
5373 			.conf = set_tag,
5374 		};
5375 	}
5376 	memcpy(actions_pre + index, actions + sample_action_pos,
5377 	       sizeof(struct rte_flow_action));
5378 	index += 1;
5379 	/* For the modify action after the sample action in E-Switch mirroring,
5380 	 * Add the extra jump action in prefix subflow and jump into the next
5381 	 * table, then do the modify action in the new table.
5382 	 */
5383 	if (jump_table) {
5384 		/* Prepare the prefix jump action. */
5385 		append_index++;
5386 		jump_action = (void *)(actions_pre + actions_n + append_index);
5387 		jump_action->group = jump_table;
5388 		actions_pre[index++] =
5389 			(struct rte_flow_action){
5390 			.type = (enum rte_flow_action_type)
5391 				RTE_FLOW_ACTION_TYPE_JUMP,
5392 			.conf = jump_action,
5393 		};
5394 	}
5395 	actions_pre[index] = (struct rte_flow_action){
5396 		.type = (enum rte_flow_action_type)
5397 			RTE_FLOW_ACTION_TYPE_END,
5398 	};
5399 	/* Put the actions after sample into Suffix flow. */
5400 	memcpy(actions_sfx, actions + sample_action_pos + 1,
5401 	       sizeof(struct rte_flow_action) *
5402 	       (actions_n - sample_action_pos - 1));
5403 	return tag_id;
5404 }
5405 
5406 /**
5407  * The splitting for metadata feature.
5408  *
5409  * - Q/RSS action on NIC Rx should be split in order to pass by
5410  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
5411  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
5412  *
5413  * - All the actions on NIC Tx should have a mreg copy action to
5414  *   copy reg_a from WQE to reg_c[0].
5415  *
5416  * @param dev
5417  *   Pointer to Ethernet device.
5418  * @param[in] flow
5419  *   Parent flow structure pointer.
5420  * @param[in] attr
5421  *   Flow rule attributes.
5422  * @param[in] items
5423  *   Pattern specification (list terminated by the END pattern item).
5424  * @param[in] actions
5425  *   Associated actions (list terminated by the END action).
5426  * @param[in] flow_split_info
5427  *   Pointer to flow split info structure.
5428  * @param[out] error
5429  *   Perform verbose error reporting if not NULL.
5430  * @return
5431  *   0 on success, negative value otherwise
5432  */
5433 static int
5434 flow_create_split_metadata(struct rte_eth_dev *dev,
5435 			   struct rte_flow *flow,
5436 			   const struct rte_flow_attr *attr,
5437 			   const struct rte_flow_item items[],
5438 			   const struct rte_flow_action actions[],
5439 			   struct mlx5_flow_split_info *flow_split_info,
5440 			   struct rte_flow_error *error)
5441 {
5442 	struct mlx5_priv *priv = dev->data->dev_private;
5443 	struct mlx5_dev_config *config = &priv->config;
5444 	const struct rte_flow_action *qrss = NULL;
5445 	struct rte_flow_action *ext_actions = NULL;
5446 	struct mlx5_flow *dev_flow = NULL;
5447 	uint32_t qrss_id = 0;
5448 	int mtr_sfx = 0;
5449 	size_t act_size;
5450 	int actions_n;
5451 	int encap_idx;
5452 	int ret;
5453 
5454 	/* Check whether extensive metadata feature is engaged. */
5455 	if (!config->dv_flow_en ||
5456 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
5457 	    !mlx5_flow_ext_mreg_supported(dev))
5458 		return flow_create_split_inner(dev, flow, NULL, attr, items,
5459 					       actions, flow_split_info, error);
5460 	actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
5461 							   &encap_idx);
5462 	if (qrss) {
5463 		/* Exclude hairpin flows from splitting. */
5464 		if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
5465 			const struct rte_flow_action_queue *queue;
5466 
5467 			queue = qrss->conf;
5468 			if (mlx5_rxq_get_type(dev, queue->index) ==
5469 			    MLX5_RXQ_TYPE_HAIRPIN)
5470 				qrss = NULL;
5471 		} else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
5472 			const struct rte_flow_action_rss *rss;
5473 
5474 			rss = qrss->conf;
5475 			if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
5476 			    MLX5_RXQ_TYPE_HAIRPIN)
5477 				qrss = NULL;
5478 		}
5479 	}
5480 	if (qrss) {
5481 		/* Check if it is in meter suffix table. */
5482 		mtr_sfx = attr->group == (attr->transfer ?
5483 			  (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
5484 			  MLX5_FLOW_TABLE_LEVEL_METER);
5485 		/*
5486 		 * Q/RSS action on NIC Rx should be split in order to pass by
5487 		 * the mreg copy table (RX_CP_TBL) and then it jumps to the
5488 		 * action table (RX_ACT_TBL) which has the split Q/RSS action.
5489 		 */
5490 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
5491 			   sizeof(struct rte_flow_action_set_tag) +
5492 			   sizeof(struct rte_flow_action_jump);
5493 		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
5494 					  SOCKET_ID_ANY);
5495 		if (!ext_actions)
5496 			return rte_flow_error_set(error, ENOMEM,
5497 						  RTE_FLOW_ERROR_TYPE_ACTION,
5498 						  NULL, "no memory to split "
5499 						  "metadata flow");
5500 		/*
5501 		 * If we are the suffix flow of meter, tag already exist.
5502 		 * Set the tag action to void.
5503 		 */
5504 		if (mtr_sfx)
5505 			ext_actions[qrss - actions].type =
5506 						RTE_FLOW_ACTION_TYPE_VOID;
5507 		else
5508 			ext_actions[qrss - actions].type =
5509 						(enum rte_flow_action_type)
5510 						MLX5_RTE_FLOW_ACTION_TYPE_TAG;
5511 		/*
5512 		 * Create the new actions list with removed Q/RSS action
5513 		 * and appended set tag and jump to register copy table
5514 		 * (RX_CP_TBL). We should preallocate unique tag ID here
5515 		 * in advance, because it is needed for set tag action.
5516 		 */
5517 		qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
5518 						    qrss, actions_n, error);
5519 		if (!mtr_sfx && !qrss_id) {
5520 			ret = -rte_errno;
5521 			goto exit;
5522 		}
5523 	} else if (attr->egress && !attr->transfer) {
5524 		/*
5525 		 * All the actions on NIC Tx should have a metadata register
5526 		 * copy action to copy reg_a from WQE to reg_c[meta]
5527 		 */
5528 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
5529 			   sizeof(struct mlx5_flow_action_copy_mreg);
5530 		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
5531 					  SOCKET_ID_ANY);
5532 		if (!ext_actions)
5533 			return rte_flow_error_set(error, ENOMEM,
5534 						  RTE_FLOW_ERROR_TYPE_ACTION,
5535 						  NULL, "no memory to split "
5536 						  "metadata flow");
5537 		/* Create the action list appended with copy register. */
5538 		ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
5539 					     actions_n, error, encap_idx);
5540 		if (ret < 0)
5541 			goto exit;
5542 	}
5543 	/* Add the unmodified original or prefix subflow. */
5544 	ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
5545 				      items, ext_actions ? ext_actions :
5546 				      actions, flow_split_info, error);
5547 	if (ret < 0)
5548 		goto exit;
5549 	MLX5_ASSERT(dev_flow);
5550 	if (qrss) {
5551 		const struct rte_flow_attr q_attr = {
5552 			.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5553 			.ingress = 1,
5554 		};
5555 		/* Internal PMD action to set register. */
5556 		struct mlx5_rte_flow_item_tag q_tag_spec = {
5557 			.data = qrss_id,
5558 			.id = REG_NON,
5559 		};
5560 		struct rte_flow_item q_items[] = {
5561 			{
5562 				.type = (enum rte_flow_item_type)
5563 					MLX5_RTE_FLOW_ITEM_TYPE_TAG,
5564 				.spec = &q_tag_spec,
5565 				.last = NULL,
5566 				.mask = NULL,
5567 			},
5568 			{
5569 				.type = RTE_FLOW_ITEM_TYPE_END,
5570 			},
5571 		};
5572 		struct rte_flow_action q_actions[] = {
5573 			{
5574 				.type = qrss->type,
5575 				.conf = qrss->conf,
5576 			},
5577 			{
5578 				.type = RTE_FLOW_ACTION_TYPE_END,
5579 			},
5580 		};
5581 		uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
5582 
5583 		/*
5584 		 * Configure the tag item only if there is no meter subflow.
5585 		 * Since tag is already marked in the meter suffix subflow
5586 		 * we can just use the meter suffix items as is.
5587 		 */
5588 		if (qrss_id) {
5589 			/* Not meter subflow. */
5590 			MLX5_ASSERT(!mtr_sfx);
5591 			/*
5592 			 * Put unique id in prefix flow due to it is destroyed
5593 			 * after suffix flow and id will be freed after there
5594 			 * is no actual flows with this id and identifier
5595 			 * reallocation becomes possible (for example, for
5596 			 * other flows in other threads).
5597 			 */
5598 			dev_flow->handle->split_flow_id = qrss_id;
5599 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
5600 						   error);
5601 			if (ret < 0)
5602 				goto exit;
5603 			q_tag_spec.id = ret;
5604 		}
5605 		dev_flow = NULL;
5606 		/* Add suffix subflow to execute Q/RSS. */
5607 		flow_split_info->prefix_layers = layers;
5608 		flow_split_info->prefix_mark = 0;
5609 		ret = flow_create_split_inner(dev, flow, &dev_flow,
5610 					      &q_attr, mtr_sfx ? items :
5611 					      q_items, q_actions,
5612 					      flow_split_info, error);
5613 		if (ret < 0)
5614 			goto exit;
5615 		/* qrss ID should be freed if failed. */
5616 		qrss_id = 0;
5617 		MLX5_ASSERT(dev_flow);
5618 	}
5619 
5620 exit:
5621 	/*
5622 	 * We do not destroy the partially created sub_flows in case of error.
5623 	 * These ones are included into parent flow list and will be destroyed
5624 	 * by flow_drv_destroy.
5625 	 */
5626 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
5627 			qrss_id);
5628 	mlx5_free(ext_actions);
5629 	return ret;
5630 }
5631 
5632 /**
5633  * Create meter internal drop flow with the original pattern.
5634  *
5635  * @param dev
5636  *   Pointer to Ethernet device.
5637  * @param[in] flow
5638  *   Parent flow structure pointer.
5639  * @param[in] attr
5640  *   Flow rule attributes.
5641  * @param[in] items
5642  *   Pattern specification (list terminated by the END pattern item).
5643  * @param[in] flow_split_info
5644  *   Pointer to flow split info structure.
5645  * @param[in] fm
5646  *   Pointer to flow meter structure.
5647  * @param[out] error
5648  *   Perform verbose error reporting if not NULL.
5649  * @return
5650  *   0 on success, negative value otherwise
5651  */
5652 static uint32_t
5653 flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev,
5654 			struct rte_flow *flow,
5655 			const struct rte_flow_attr *attr,
5656 			const struct rte_flow_item items[],
5657 			struct mlx5_flow_split_info *flow_split_info,
5658 			struct mlx5_flow_meter_info *fm,
5659 			struct rte_flow_error *error)
5660 {
5661 	struct mlx5_flow *dev_flow = NULL;
5662 	struct rte_flow_attr drop_attr = *attr;
5663 	struct rte_flow_action drop_actions[3];
5664 	struct mlx5_flow_split_info drop_split_info = *flow_split_info;
5665 
5666 	MLX5_ASSERT(fm->drop_cnt);
5667 	drop_actions[0].type =
5668 		(enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_COUNT;
5669 	drop_actions[0].conf = (void *)(uintptr_t)fm->drop_cnt;
5670 	drop_actions[1].type = RTE_FLOW_ACTION_TYPE_DROP;
5671 	drop_actions[1].conf = NULL;
5672 	drop_actions[2].type = RTE_FLOW_ACTION_TYPE_END;
5673 	drop_actions[2].conf = NULL;
5674 	drop_split_info.external = false;
5675 	drop_split_info.skip_scale |= 1 << MLX5_SCALE_FLOW_GROUP_BIT;
5676 	drop_split_info.table_id = MLX5_MTR_TABLE_ID_DROP;
5677 	drop_attr.group = MLX5_FLOW_TABLE_LEVEL_METER;
5678 	return flow_create_split_inner(dev, flow, &dev_flow,
5679 				&drop_attr, items, drop_actions,
5680 				&drop_split_info, error);
5681 }
5682 
5683 /**
5684  * The splitting for meter feature.
5685  *
5686  * - The meter flow will be split to two flows as prefix and
5687  *   suffix flow. The packets make sense only it pass the prefix
5688  *   meter action.
5689  *
5690  * - Reg_C_5 is used for the packet to match betweend prefix and
5691  *   suffix flow.
5692  *
5693  * @param dev
5694  *   Pointer to Ethernet device.
5695  * @param[in] flow
5696  *   Parent flow structure pointer.
5697  * @param[in] attr
5698  *   Flow rule attributes.
5699  * @param[in] items
5700  *   Pattern specification (list terminated by the END pattern item).
5701  * @param[in] actions
5702  *   Associated actions (list terminated by the END action).
5703  * @param[in] flow_split_info
5704  *   Pointer to flow split info structure.
5705  * @param[out] error
5706  *   Perform verbose error reporting if not NULL.
5707  * @return
5708  *   0 on success, negative value otherwise
5709  */
5710 static int
5711 flow_create_split_meter(struct rte_eth_dev *dev,
5712 			struct rte_flow *flow,
5713 			const struct rte_flow_attr *attr,
5714 			const struct rte_flow_item items[],
5715 			const struct rte_flow_action actions[],
5716 			struct mlx5_flow_split_info *flow_split_info,
5717 			struct rte_flow_error *error)
5718 {
5719 	struct mlx5_priv *priv = dev->data->dev_private;
5720 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
5721 	struct rte_flow_action *sfx_actions = NULL;
5722 	struct rte_flow_action *pre_actions = NULL;
5723 	struct rte_flow_item *sfx_items = NULL;
5724 	struct mlx5_flow *dev_flow = NULL;
5725 	struct rte_flow_attr sfx_attr = *attr;
5726 	struct mlx5_flow_meter_info *fm = NULL;
5727 	uint8_t skip_scale_restore;
5728 	bool has_mtr = false;
5729 	bool has_modify = false;
5730 	bool set_mtr_reg = true;
5731 	uint32_t meter_id = 0;
5732 	uint32_t mtr_idx = 0;
5733 	uint32_t mtr_flow_id = 0;
5734 	size_t act_size;
5735 	size_t item_size;
5736 	int actions_n = 0;
5737 	int ret = 0;
5738 
5739 	if (priv->mtr_en)
5740 		actions_n = flow_check_meter_action(dev, actions, &has_mtr,
5741 						    &has_modify, &meter_id);
5742 	if (has_mtr) {
5743 		if (flow->meter) {
5744 			fm = flow_dv_meter_find_by_idx(priv, flow->meter);
5745 			if (!fm)
5746 				return rte_flow_error_set(error, EINVAL,
5747 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5748 						NULL, "Meter not found.");
5749 		} else {
5750 			fm = mlx5_flow_meter_find(priv, meter_id, &mtr_idx);
5751 			if (!fm)
5752 				return rte_flow_error_set(error, EINVAL,
5753 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5754 						NULL, "Meter not found.");
5755 			ret = mlx5_flow_meter_attach(priv, fm,
5756 						     &sfx_attr, error);
5757 			if (ret)
5758 				return -rte_errno;
5759 			flow->meter = mtr_idx;
5760 		}
5761 		MLX5_ASSERT(wks);
5762 		wks->fm = fm;
5763 		/*
5764 		 * If it isn't default-policy Meter, and
5765 		 * 1. There's no action in flow to change
5766 		 *    packet (modify/encap/decap etc.), OR
5767 		 * 2. No drop count needed for this meter.
5768 		 * no need to use regC to save meter id anymore.
5769 		 */
5770 		if (!fm->def_policy && (!has_modify || !fm->drop_cnt))
5771 			set_mtr_reg = false;
5772 		/* Prefix actions: meter, decap, encap, tag, jump, end. */
5773 		act_size = sizeof(struct rte_flow_action) * (actions_n + 6) +
5774 			   sizeof(struct mlx5_rte_flow_action_set_tag);
5775 		/* Suffix items: tag, vlan, port id, end. */
5776 #define METER_SUFFIX_ITEM 4
5777 		item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
5778 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
5779 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
5780 					  0, SOCKET_ID_ANY);
5781 		if (!sfx_actions)
5782 			return rte_flow_error_set(error, ENOMEM,
5783 						  RTE_FLOW_ERROR_TYPE_ACTION,
5784 						  NULL, "no memory to split "
5785 						  "meter flow");
5786 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
5787 			     act_size);
5788 		/* There's no suffix flow for meter of non-default policy. */
5789 		if (!fm->def_policy)
5790 			pre_actions = sfx_actions + 1;
5791 		else
5792 			pre_actions = sfx_actions + actions_n;
5793 		ret = flow_meter_split_prep(dev, flow, fm, &sfx_attr,
5794 					    items, sfx_items, actions,
5795 					    sfx_actions, pre_actions,
5796 					    (set_mtr_reg ? &mtr_flow_id : NULL),
5797 					    error);
5798 		if (ret) {
5799 			ret = -rte_errno;
5800 			goto exit;
5801 		}
5802 		/* Add the prefix subflow. */
5803 		flow_split_info->prefix_mark = 0;
5804 		skip_scale_restore = flow_split_info->skip_scale;
5805 		flow_split_info->skip_scale |=
5806 			1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
5807 		ret = flow_create_split_inner(dev, flow, &dev_flow,
5808 					      attr, items, pre_actions,
5809 					      flow_split_info, error);
5810 		flow_split_info->skip_scale = skip_scale_restore;
5811 		if (ret) {
5812 			if (mtr_flow_id)
5813 				mlx5_ipool_free(fm->flow_ipool, mtr_flow_id);
5814 			ret = -rte_errno;
5815 			goto exit;
5816 		}
5817 		if (mtr_flow_id) {
5818 			dev_flow->handle->split_flow_id = mtr_flow_id;
5819 			dev_flow->handle->is_meter_flow_id = 1;
5820 		}
5821 		if (!fm->def_policy) {
5822 			if (!set_mtr_reg && fm->drop_cnt)
5823 				ret =
5824 			flow_meter_create_drop_flow_with_org_pattern(dev, flow,
5825 							&sfx_attr, items,
5826 							flow_split_info,
5827 							fm, error);
5828 			goto exit;
5829 		}
5830 		/* Setting the sfx group atrr. */
5831 		sfx_attr.group = sfx_attr.transfer ?
5832 				(MLX5_FLOW_TABLE_LEVEL_METER - 1) :
5833 				 MLX5_FLOW_TABLE_LEVEL_METER;
5834 		flow_split_info->prefix_layers =
5835 				flow_get_prefix_layer_flags(dev_flow);
5836 		flow_split_info->prefix_mark = dev_flow->handle->mark;
5837 		flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
5838 	}
5839 	/* Add the prefix subflow. */
5840 	ret = flow_create_split_metadata(dev, flow,
5841 					 &sfx_attr, sfx_items ?
5842 					 sfx_items : items,
5843 					 sfx_actions ? sfx_actions : actions,
5844 					 flow_split_info, error);
5845 exit:
5846 	if (sfx_actions)
5847 		mlx5_free(sfx_actions);
5848 	return ret;
5849 }
5850 
5851 /**
5852  * The splitting for sample feature.
5853  *
5854  * Once Sample action is detected in the action list, the flow actions should
5855  * be split into prefix sub flow and suffix sub flow.
5856  *
5857  * The original items remain in the prefix sub flow, all actions preceding the
5858  * sample action and the sample action itself will be copied to the prefix
5859  * sub flow, the actions following the sample action will be copied to the
5860  * suffix sub flow, Queue action always be located in the suffix sub flow.
5861  *
5862  * In order to make the packet from prefix sub flow matches with suffix sub
5863  * flow, an extra tag action be added into prefix sub flow, and the suffix sub
5864  * flow uses tag item with the unique flow id.
5865  *
5866  * @param dev
5867  *   Pointer to Ethernet device.
5868  * @param[in] flow
5869  *   Parent flow structure pointer.
5870  * @param[in] attr
5871  *   Flow rule attributes.
5872  * @param[in] items
5873  *   Pattern specification (list terminated by the END pattern item).
5874  * @param[in] actions
5875  *   Associated actions (list terminated by the END action).
5876  * @param[in] flow_split_info
5877  *   Pointer to flow split info structure.
5878  * @param[out] error
5879  *   Perform verbose error reporting if not NULL.
5880  * @return
5881  *   0 on success, negative value otherwise
5882  */
5883 static int
5884 flow_create_split_sample(struct rte_eth_dev *dev,
5885 			 struct rte_flow *flow,
5886 			 const struct rte_flow_attr *attr,
5887 			 const struct rte_flow_item items[],
5888 			 const struct rte_flow_action actions[],
5889 			 struct mlx5_flow_split_info *flow_split_info,
5890 			 struct rte_flow_error *error)
5891 {
5892 	struct mlx5_priv *priv = dev->data->dev_private;
5893 	struct rte_flow_action *sfx_actions = NULL;
5894 	struct rte_flow_action *pre_actions = NULL;
5895 	struct rte_flow_item *sfx_items = NULL;
5896 	struct mlx5_flow *dev_flow = NULL;
5897 	struct rte_flow_attr sfx_attr = *attr;
5898 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5899 	struct mlx5_flow_dv_sample_resource *sample_res;
5900 	struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
5901 	struct mlx5_flow_tbl_resource *sfx_tbl;
5902 #endif
5903 	size_t act_size;
5904 	size_t item_size;
5905 	uint32_t fdb_tx = 0;
5906 	int32_t tag_id = 0;
5907 	int actions_n = 0;
5908 	int sample_action_pos;
5909 	int qrss_action_pos;
5910 	int add_tag = 0;
5911 	int modify_after_mirror = 0;
5912 	uint16_t jump_table = 0;
5913 	const uint32_t next_ft_step = 1;
5914 	int ret = 0;
5915 
5916 	if (priv->sampler_en)
5917 		actions_n = flow_check_match_action(actions, attr,
5918 					RTE_FLOW_ACTION_TYPE_SAMPLE,
5919 					&sample_action_pos, &qrss_action_pos,
5920 					&modify_after_mirror);
5921 	if (actions_n) {
5922 		/* The prefix actions must includes sample, tag, end. */
5923 		act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
5924 			   + sizeof(struct mlx5_rte_flow_action_set_tag);
5925 		item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
5926 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
5927 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
5928 					  item_size), 0, SOCKET_ID_ANY);
5929 		if (!sfx_actions)
5930 			return rte_flow_error_set(error, ENOMEM,
5931 						  RTE_FLOW_ERROR_TYPE_ACTION,
5932 						  NULL, "no memory to split "
5933 						  "sample flow");
5934 		/* The representor_id is UINT16_MAX for uplink. */
5935 		fdb_tx = (attr->transfer && priv->representor_id != UINT16_MAX);
5936 		/*
5937 		 * When reg_c_preserve is set, metadata registers Cx preserve
5938 		 * their value even through packet duplication.
5939 		 */
5940 		add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
5941 		if (add_tag)
5942 			sfx_items = (struct rte_flow_item *)((char *)sfx_actions
5943 					+ act_size);
5944 		if (modify_after_mirror)
5945 			jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR +
5946 				     next_ft_step;
5947 		pre_actions = sfx_actions + actions_n;
5948 		tag_id = flow_sample_split_prep(dev, add_tag, sfx_items,
5949 						actions, sfx_actions,
5950 						pre_actions, actions_n,
5951 						sample_action_pos,
5952 						qrss_action_pos, jump_table,
5953 						error);
5954 		if (tag_id < 0 || (add_tag && !tag_id)) {
5955 			ret = -rte_errno;
5956 			goto exit;
5957 		}
5958 		if (modify_after_mirror)
5959 			flow_split_info->skip_scale =
5960 					1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
5961 		/* Add the prefix subflow. */
5962 		ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
5963 					      items, pre_actions,
5964 					      flow_split_info, error);
5965 		if (ret) {
5966 			ret = -rte_errno;
5967 			goto exit;
5968 		}
5969 		dev_flow->handle->split_flow_id = tag_id;
5970 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5971 		if (!modify_after_mirror) {
5972 			/* Set the sfx group attr. */
5973 			sample_res = (struct mlx5_flow_dv_sample_resource *)
5974 						dev_flow->dv.sample_res;
5975 			sfx_tbl = (struct mlx5_flow_tbl_resource *)
5976 						sample_res->normal_path_tbl;
5977 			sfx_tbl_data = container_of(sfx_tbl,
5978 						struct mlx5_flow_tbl_data_entry,
5979 						tbl);
5980 			sfx_attr.group = sfx_attr.transfer ?
5981 			(sfx_tbl_data->level - 1) : sfx_tbl_data->level;
5982 		} else {
5983 			MLX5_ASSERT(attr->transfer);
5984 			sfx_attr.group = jump_table;
5985 		}
5986 		flow_split_info->prefix_layers =
5987 				flow_get_prefix_layer_flags(dev_flow);
5988 		flow_split_info->prefix_mark = dev_flow->handle->mark;
5989 		/* Suffix group level already be scaled with factor, set
5990 		 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
5991 		 * again in translation.
5992 		 */
5993 		flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT;
5994 #endif
5995 	}
5996 	/* Add the suffix subflow. */
5997 	ret = flow_create_split_meter(dev, flow, &sfx_attr,
5998 				      sfx_items ? sfx_items : items,
5999 				      sfx_actions ? sfx_actions : actions,
6000 				      flow_split_info, error);
6001 exit:
6002 	if (sfx_actions)
6003 		mlx5_free(sfx_actions);
6004 	return ret;
6005 }
6006 
6007 /**
6008  * Split the flow to subflow set. The splitters might be linked
6009  * in the chain, like this:
6010  * flow_create_split_outer() calls:
6011  *   flow_create_split_meter() calls:
6012  *     flow_create_split_metadata(meter_subflow_0) calls:
6013  *       flow_create_split_inner(metadata_subflow_0)
6014  *       flow_create_split_inner(metadata_subflow_1)
6015  *       flow_create_split_inner(metadata_subflow_2)
6016  *     flow_create_split_metadata(meter_subflow_1) calls:
6017  *       flow_create_split_inner(metadata_subflow_0)
6018  *       flow_create_split_inner(metadata_subflow_1)
6019  *       flow_create_split_inner(metadata_subflow_2)
6020  *
6021  * This provide flexible way to add new levels of flow splitting.
6022  * The all of successfully created subflows are included to the
6023  * parent flow dev_flow list.
6024  *
6025  * @param dev
6026  *   Pointer to Ethernet device.
6027  * @param[in] flow
6028  *   Parent flow structure pointer.
6029  * @param[in] attr
6030  *   Flow rule attributes.
6031  * @param[in] items
6032  *   Pattern specification (list terminated by the END pattern item).
6033  * @param[in] actions
6034  *   Associated actions (list terminated by the END action).
6035  * @param[in] flow_split_info
6036  *   Pointer to flow split info structure.
6037  * @param[out] error
6038  *   Perform verbose error reporting if not NULL.
6039  * @return
6040  *   0 on success, negative value otherwise
6041  */
6042 static int
6043 flow_create_split_outer(struct rte_eth_dev *dev,
6044 			struct rte_flow *flow,
6045 			const struct rte_flow_attr *attr,
6046 			const struct rte_flow_item items[],
6047 			const struct rte_flow_action actions[],
6048 			struct mlx5_flow_split_info *flow_split_info,
6049 			struct rte_flow_error *error)
6050 {
6051 	int ret;
6052 
6053 	ret = flow_create_split_sample(dev, flow, attr, items,
6054 				       actions, flow_split_info, error);
6055 	MLX5_ASSERT(ret <= 0);
6056 	return ret;
6057 }
6058 
6059 static inline struct mlx5_flow_tunnel *
6060 flow_tunnel_from_rule(const struct mlx5_flow *flow)
6061 {
6062 	struct mlx5_flow_tunnel *tunnel;
6063 
6064 #pragma GCC diagnostic push
6065 #pragma GCC diagnostic ignored "-Wcast-qual"
6066 	tunnel = (typeof(tunnel))flow->tunnel;
6067 #pragma GCC diagnostic pop
6068 
6069 	return tunnel;
6070 }
6071 
6072 /**
6073  * Adjust flow RSS workspace if needed.
6074  *
6075  * @param wks
6076  *   Pointer to thread flow work space.
6077  * @param rss_desc
6078  *   Pointer to RSS descriptor.
6079  * @param[in] nrssq_num
6080  *   New RSS queue number.
6081  *
6082  * @return
6083  *   0 on success, -1 otherwise and rte_errno is set.
6084  */
6085 static int
6086 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
6087 			  struct mlx5_flow_rss_desc *rss_desc,
6088 			  uint32_t nrssq_num)
6089 {
6090 	if (likely(nrssq_num <= wks->rssq_num))
6091 		return 0;
6092 	rss_desc->queue = realloc(rss_desc->queue,
6093 			  sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
6094 	if (!rss_desc->queue) {
6095 		rte_errno = ENOMEM;
6096 		return -1;
6097 	}
6098 	wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
6099 	return 0;
6100 }
6101 
6102 /**
6103  * Create a flow and add it to @p list.
6104  *
6105  * @param dev
6106  *   Pointer to Ethernet device.
6107  * @param list
6108  *   Pointer to a TAILQ flow list. If this parameter NULL,
6109  *   no list insertion occurred, flow is just created,
6110  *   this is caller's responsibility to track the
6111  *   created flow.
6112  * @param[in] attr
6113  *   Flow rule attributes.
6114  * @param[in] items
6115  *   Pattern specification (list terminated by the END pattern item).
6116  * @param[in] actions
6117  *   Associated actions (list terminated by the END action).
6118  * @param[in] external
6119  *   This flow rule is created by request external to PMD.
6120  * @param[out] error
6121  *   Perform verbose error reporting if not NULL.
6122  *
6123  * @return
6124  *   A flow index on success, 0 otherwise and rte_errno is set.
6125  */
6126 static uint32_t
6127 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
6128 		 const struct rte_flow_attr *attr,
6129 		 const struct rte_flow_item items[],
6130 		 const struct rte_flow_action original_actions[],
6131 		 bool external, struct rte_flow_error *error)
6132 {
6133 	struct mlx5_priv *priv = dev->data->dev_private;
6134 	struct rte_flow *flow = NULL;
6135 	struct mlx5_flow *dev_flow;
6136 	const struct rte_flow_action_rss *rss = NULL;
6137 	struct mlx5_translated_action_handle
6138 		indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
6139 	int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
6140 	union {
6141 		struct mlx5_flow_expand_rss buf;
6142 		uint8_t buffer[2048];
6143 	} expand_buffer;
6144 	union {
6145 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
6146 		uint8_t buffer[2048];
6147 	} actions_rx;
6148 	union {
6149 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
6150 		uint8_t buffer[2048];
6151 	} actions_hairpin_tx;
6152 	union {
6153 		struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
6154 		uint8_t buffer[2048];
6155 	} items_tx;
6156 	struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
6157 	struct mlx5_flow_rss_desc *rss_desc;
6158 	const struct rte_flow_action *p_actions_rx;
6159 	uint32_t i;
6160 	uint32_t idx = 0;
6161 	int hairpin_flow;
6162 	struct rte_flow_attr attr_tx = { .priority = 0 };
6163 	const struct rte_flow_action *actions;
6164 	struct rte_flow_action *translated_actions = NULL;
6165 	struct mlx5_flow_tunnel *tunnel;
6166 	struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
6167 	struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
6168 	struct mlx5_flow_split_info flow_split_info = {
6169 		.external = !!external,
6170 		.skip_scale = 0,
6171 		.flow_idx = 0,
6172 		.prefix_mark = 0,
6173 		.prefix_layers = 0,
6174 		.table_id = 0
6175 	};
6176 	int ret;
6177 
6178 	MLX5_ASSERT(wks);
6179 	rss_desc = &wks->rss_desc;
6180 	ret = flow_action_handles_translate(dev, original_actions,
6181 					    indir_actions,
6182 					    &indir_actions_n,
6183 					    &translated_actions, error);
6184 	if (ret < 0) {
6185 		MLX5_ASSERT(translated_actions == NULL);
6186 		return 0;
6187 	}
6188 	actions = translated_actions ? translated_actions : original_actions;
6189 	p_actions_rx = actions;
6190 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
6191 	ret = flow_drv_validate(dev, attr, items, p_actions_rx,
6192 				external, hairpin_flow, error);
6193 	if (ret < 0)
6194 		goto error_before_hairpin_split;
6195 	flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
6196 	if (!flow) {
6197 		rte_errno = ENOMEM;
6198 		goto error_before_hairpin_split;
6199 	}
6200 	if (hairpin_flow > 0) {
6201 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
6202 			rte_errno = EINVAL;
6203 			goto error_before_hairpin_split;
6204 		}
6205 		flow_hairpin_split(dev, actions, actions_rx.actions,
6206 				   actions_hairpin_tx.actions, items_tx.items,
6207 				   idx);
6208 		p_actions_rx = actions_rx.actions;
6209 	}
6210 	flow_split_info.flow_idx = idx;
6211 	flow->drv_type = flow_get_drv_type(dev, attr);
6212 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
6213 		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
6214 	memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
6215 	/* RSS Action only works on NIC RX domain */
6216 	if (attr->ingress && !attr->transfer)
6217 		rss = flow_get_rss_action(dev, p_actions_rx);
6218 	if (rss) {
6219 		if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
6220 			return 0;
6221 		/*
6222 		 * The following information is required by
6223 		 * mlx5_flow_hashfields_adjust() in advance.
6224 		 */
6225 		rss_desc->level = rss->level;
6226 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
6227 		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
6228 	}
6229 	flow->dev_handles = 0;
6230 	if (rss && rss->types) {
6231 		unsigned int graph_root;
6232 
6233 		graph_root = find_graph_root(items, rss->level);
6234 		ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
6235 					   items, rss->types,
6236 					   mlx5_support_expansion, graph_root);
6237 		MLX5_ASSERT(ret > 0 &&
6238 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
6239 		if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) {
6240 			for (i = 0; i < buf->entries; ++i)
6241 				mlx5_dbg__print_pattern(buf->entry[i].pattern);
6242 		}
6243 	} else {
6244 		buf->entries = 1;
6245 		buf->entry[0].pattern = (void *)(uintptr_t)items;
6246 	}
6247 	rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
6248 						      indir_actions_n);
6249 	for (i = 0; i < buf->entries; ++i) {
6250 		/* Initialize flow split data. */
6251 		flow_split_info.prefix_layers = 0;
6252 		flow_split_info.prefix_mark = 0;
6253 		flow_split_info.skip_scale = 0;
6254 		/*
6255 		 * The splitter may create multiple dev_flows,
6256 		 * depending on configuration. In the simplest
6257 		 * case it just creates unmodified original flow.
6258 		 */
6259 		ret = flow_create_split_outer(dev, flow, attr,
6260 					      buf->entry[i].pattern,
6261 					      p_actions_rx, &flow_split_info,
6262 					      error);
6263 		if (ret < 0)
6264 			goto error;
6265 		if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {
6266 			ret = flow_tunnel_add_default_miss(dev, flow, attr,
6267 							   p_actions_rx,
6268 							   idx,
6269 							   wks->flows[0].tunnel,
6270 							   &default_miss_ctx,
6271 							   error);
6272 			if (ret < 0) {
6273 				mlx5_free(default_miss_ctx.queue);
6274 				goto error;
6275 			}
6276 		}
6277 	}
6278 	/* Create the tx flow. */
6279 	if (hairpin_flow) {
6280 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
6281 		attr_tx.ingress = 0;
6282 		attr_tx.egress = 1;
6283 		dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
6284 					 actions_hairpin_tx.actions,
6285 					 idx, error);
6286 		if (!dev_flow)
6287 			goto error;
6288 		dev_flow->flow = flow;
6289 		dev_flow->external = 0;
6290 		SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
6291 			      dev_flow->handle, next);
6292 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
6293 					 items_tx.items,
6294 					 actions_hairpin_tx.actions, error);
6295 		if (ret < 0)
6296 			goto error;
6297 	}
6298 	/*
6299 	 * Update the metadata register copy table. If extensive
6300 	 * metadata feature is enabled and registers are supported
6301 	 * we might create the extra rte_flow for each unique
6302 	 * MARK/FLAG action ID.
6303 	 *
6304 	 * The table is updated for ingress Flows only, because
6305 	 * the egress Flows belong to the different device and
6306 	 * copy table should be updated in peer NIC Rx domain.
6307 	 */
6308 	if (attr->ingress &&
6309 	    (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
6310 		ret = flow_mreg_update_copy_table(dev, flow, actions, error);
6311 		if (ret)
6312 			goto error;
6313 	}
6314 	/*
6315 	 * If the flow is external (from application) OR device is started,
6316 	 * OR mreg discover, then apply immediately.
6317 	 */
6318 	if (external || dev->data->dev_started ||
6319 	    (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
6320 	     attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
6321 		ret = flow_drv_apply(dev, flow, error);
6322 		if (ret < 0)
6323 			goto error;
6324 	}
6325 	if (list) {
6326 		rte_spinlock_lock(&priv->flow_list_lock);
6327 		ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
6328 			     flow, next);
6329 		rte_spinlock_unlock(&priv->flow_list_lock);
6330 	}
6331 	flow_rxq_flags_set(dev, flow);
6332 	rte_free(translated_actions);
6333 	tunnel = flow_tunnel_from_rule(wks->flows);
6334 	if (tunnel) {
6335 		flow->tunnel = 1;
6336 		flow->tunnel_id = tunnel->tunnel_id;
6337 		__atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
6338 		mlx5_free(default_miss_ctx.queue);
6339 	}
6340 	mlx5_flow_pop_thread_workspace();
6341 	return idx;
6342 error:
6343 	MLX5_ASSERT(flow);
6344 	ret = rte_errno; /* Save rte_errno before cleanup. */
6345 	flow_mreg_del_copy_action(dev, flow);
6346 	flow_drv_destroy(dev, flow);
6347 	if (rss_desc->shared_rss)
6348 		__atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
6349 			mlx5_ipool_get
6350 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
6351 			rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
6352 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
6353 	rte_errno = ret; /* Restore rte_errno. */
6354 	ret = rte_errno;
6355 	rte_errno = ret;
6356 	mlx5_flow_pop_thread_workspace();
6357 error_before_hairpin_split:
6358 	rte_free(translated_actions);
6359 	return 0;
6360 }
6361 
6362 /**
6363  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
6364  * incoming packets to table 1.
6365  *
6366  * Other flow rules, requested for group n, will be created in
6367  * e-switch table n+1.
6368  * Jump action to e-switch group n will be created to group n+1.
6369  *
6370  * Used when working in switchdev mode, to utilise advantages of table 1
6371  * and above.
6372  *
6373  * @param dev
6374  *   Pointer to Ethernet device.
6375  *
6376  * @return
6377  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
6378  */
6379 struct rte_flow *
6380 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
6381 {
6382 	const struct rte_flow_attr attr = {
6383 		.group = 0,
6384 		.priority = 0,
6385 		.ingress = 1,
6386 		.egress = 0,
6387 		.transfer = 1,
6388 	};
6389 	const struct rte_flow_item pattern = {
6390 		.type = RTE_FLOW_ITEM_TYPE_END,
6391 	};
6392 	struct rte_flow_action_jump jump = {
6393 		.group = 1,
6394 	};
6395 	const struct rte_flow_action actions[] = {
6396 		{
6397 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
6398 			.conf = &jump,
6399 		},
6400 		{
6401 			.type = RTE_FLOW_ACTION_TYPE_END,
6402 		},
6403 	};
6404 	struct mlx5_priv *priv = dev->data->dev_private;
6405 	struct rte_flow_error error;
6406 
6407 	return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
6408 						   &attr, &pattern,
6409 						   actions, false, &error);
6410 }
6411 
6412 /**
6413  * Validate a flow supported by the NIC.
6414  *
6415  * @see rte_flow_validate()
6416  * @see rte_flow_ops
6417  */
6418 int
6419 mlx5_flow_validate(struct rte_eth_dev *dev,
6420 		   const struct rte_flow_attr *attr,
6421 		   const struct rte_flow_item items[],
6422 		   const struct rte_flow_action original_actions[],
6423 		   struct rte_flow_error *error)
6424 {
6425 	int hairpin_flow;
6426 	struct mlx5_translated_action_handle
6427 		indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
6428 	int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
6429 	const struct rte_flow_action *actions;
6430 	struct rte_flow_action *translated_actions = NULL;
6431 	int ret = flow_action_handles_translate(dev, original_actions,
6432 						indir_actions,
6433 						&indir_actions_n,
6434 						&translated_actions, error);
6435 
6436 	if (ret)
6437 		return ret;
6438 	actions = translated_actions ? translated_actions : original_actions;
6439 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
6440 	ret = flow_drv_validate(dev, attr, items, actions,
6441 				true, hairpin_flow, error);
6442 	rte_free(translated_actions);
6443 	return ret;
6444 }
6445 
6446 /**
6447  * Create a flow.
6448  *
6449  * @see rte_flow_create()
6450  * @see rte_flow_ops
6451  */
6452 struct rte_flow *
6453 mlx5_flow_create(struct rte_eth_dev *dev,
6454 		 const struct rte_flow_attr *attr,
6455 		 const struct rte_flow_item items[],
6456 		 const struct rte_flow_action actions[],
6457 		 struct rte_flow_error *error)
6458 {
6459 	struct mlx5_priv *priv = dev->data->dev_private;
6460 
6461 	/*
6462 	 * If the device is not started yet, it is not allowed to created a
6463 	 * flow from application. PMD default flows and traffic control flows
6464 	 * are not affected.
6465 	 */
6466 	if (unlikely(!dev->data->dev_started)) {
6467 		DRV_LOG(DEBUG, "port %u is not started when "
6468 			"inserting a flow", dev->data->port_id);
6469 		rte_flow_error_set(error, ENODEV,
6470 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6471 				   NULL,
6472 				   "port not started");
6473 		return NULL;
6474 	}
6475 
6476 	return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
6477 				  attr, items, actions, true, error);
6478 }
6479 
6480 /**
6481  * Destroy a flow in a list.
6482  *
6483  * @param dev
6484  *   Pointer to Ethernet device.
6485  * @param list
6486  *   Pointer to the Indexed flow list. If this parameter NULL,
6487  *   there is no flow removal from the list. Be noted that as
6488  *   flow is add to the indexed list, memory of the indexed
6489  *   list points to maybe changed as flow destroyed.
6490  * @param[in] flow_idx
6491  *   Index of flow to destroy.
6492  */
6493 static void
6494 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
6495 		  uint32_t flow_idx)
6496 {
6497 	struct mlx5_priv *priv = dev->data->dev_private;
6498 	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
6499 					       [MLX5_IPOOL_RTE_FLOW], flow_idx);
6500 
6501 	if (!flow)
6502 		return;
6503 	/*
6504 	 * Update RX queue flags only if port is started, otherwise it is
6505 	 * already clean.
6506 	 */
6507 	if (dev->data->dev_started)
6508 		flow_rxq_flags_trim(dev, flow);
6509 	flow_drv_destroy(dev, flow);
6510 	if (list) {
6511 		rte_spinlock_lock(&priv->flow_list_lock);
6512 		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
6513 			     flow_idx, flow, next);
6514 		rte_spinlock_unlock(&priv->flow_list_lock);
6515 	}
6516 	if (flow->tunnel) {
6517 		struct mlx5_flow_tunnel *tunnel;
6518 
6519 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
6520 		RTE_VERIFY(tunnel);
6521 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
6522 			mlx5_flow_tunnel_free(dev, tunnel);
6523 	}
6524 	flow_mreg_del_copy_action(dev, flow);
6525 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
6526 }
6527 
6528 /**
6529  * Destroy all flows.
6530  *
6531  * @param dev
6532  *   Pointer to Ethernet device.
6533  * @param list
6534  *   Pointer to the Indexed flow list.
6535  * @param active
6536  *   If flushing is called avtively.
6537  */
6538 void
6539 mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
6540 {
6541 	uint32_t num_flushed = 0;
6542 
6543 	while (*list) {
6544 		flow_list_destroy(dev, list, *list);
6545 		num_flushed++;
6546 	}
6547 	if (active) {
6548 		DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
6549 			dev->data->port_id, num_flushed);
6550 	}
6551 }
6552 
6553 /**
6554  * Stop all default actions for flows.
6555  *
6556  * @param dev
6557  *   Pointer to Ethernet device.
6558  */
6559 void
6560 mlx5_flow_stop_default(struct rte_eth_dev *dev)
6561 {
6562 	flow_mreg_del_default_copy_action(dev);
6563 	flow_rxq_flags_clear(dev);
6564 }
6565 
6566 /**
6567  * Start all default actions for flows.
6568  *
6569  * @param dev
6570  *   Pointer to Ethernet device.
6571  * @return
6572  *   0 on success, a negative errno value otherwise and rte_errno is set.
6573  */
6574 int
6575 mlx5_flow_start_default(struct rte_eth_dev *dev)
6576 {
6577 	struct rte_flow_error error;
6578 
6579 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
6580 	return flow_mreg_add_default_copy_action(dev, &error);
6581 }
6582 
6583 /**
6584  * Release key of thread specific flow workspace data.
6585  */
6586 void
6587 flow_release_workspace(void *data)
6588 {
6589 	struct mlx5_flow_workspace *wks = data;
6590 	struct mlx5_flow_workspace *next;
6591 
6592 	while (wks) {
6593 		next = wks->next;
6594 		free(wks->rss_desc.queue);
6595 		free(wks);
6596 		wks = next;
6597 	}
6598 }
6599 
6600 /**
6601  * Get thread specific current flow workspace.
6602  *
6603  * @return pointer to thread specific flow workspace data, NULL on error.
6604  */
6605 struct mlx5_flow_workspace*
6606 mlx5_flow_get_thread_workspace(void)
6607 {
6608 	struct mlx5_flow_workspace *data;
6609 
6610 	data = mlx5_flow_os_get_specific_workspace();
6611 	MLX5_ASSERT(data && data->inuse);
6612 	if (!data || !data->inuse)
6613 		DRV_LOG(ERR, "flow workspace not initialized.");
6614 	return data;
6615 }
6616 
6617 /**
6618  * Allocate and init new flow workspace.
6619  *
6620  * @return pointer to flow workspace data, NULL on error.
6621  */
6622 static struct mlx5_flow_workspace*
6623 flow_alloc_thread_workspace(void)
6624 {
6625 	struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
6626 
6627 	if (!data) {
6628 		DRV_LOG(ERR, "Failed to allocate flow workspace "
6629 			"memory.");
6630 		return NULL;
6631 	}
6632 	data->rss_desc.queue = calloc(1,
6633 			sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
6634 	if (!data->rss_desc.queue)
6635 		goto err;
6636 	data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
6637 	return data;
6638 err:
6639 	if (data->rss_desc.queue)
6640 		free(data->rss_desc.queue);
6641 	free(data);
6642 	return NULL;
6643 }
6644 
6645 /**
6646  * Get new thread specific flow workspace.
6647  *
6648  * If current workspace inuse, create new one and set as current.
6649  *
6650  * @return pointer to thread specific flow workspace data, NULL on error.
6651  */
6652 static struct mlx5_flow_workspace*
6653 mlx5_flow_push_thread_workspace(void)
6654 {
6655 	struct mlx5_flow_workspace *curr;
6656 	struct mlx5_flow_workspace *data;
6657 
6658 	curr = mlx5_flow_os_get_specific_workspace();
6659 	if (!curr) {
6660 		data = flow_alloc_thread_workspace();
6661 		if (!data)
6662 			return NULL;
6663 	} else if (!curr->inuse) {
6664 		data = curr;
6665 	} else if (curr->next) {
6666 		data = curr->next;
6667 	} else {
6668 		data = flow_alloc_thread_workspace();
6669 		if (!data)
6670 			return NULL;
6671 		curr->next = data;
6672 		data->prev = curr;
6673 	}
6674 	data->inuse = 1;
6675 	data->flow_idx = 0;
6676 	/* Set as current workspace */
6677 	if (mlx5_flow_os_set_specific_workspace(data))
6678 		DRV_LOG(ERR, "Failed to set flow workspace to thread.");
6679 	return data;
6680 }
6681 
6682 /**
6683  * Close current thread specific flow workspace.
6684  *
6685  * If previous workspace available, set it as current.
6686  *
6687  * @return pointer to thread specific flow workspace data, NULL on error.
6688  */
6689 static void
6690 mlx5_flow_pop_thread_workspace(void)
6691 {
6692 	struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
6693 
6694 	if (!data)
6695 		return;
6696 	if (!data->inuse) {
6697 		DRV_LOG(ERR, "Failed to close unused flow workspace.");
6698 		return;
6699 	}
6700 	data->inuse = 0;
6701 	if (!data->prev)
6702 		return;
6703 	if (mlx5_flow_os_set_specific_workspace(data->prev))
6704 		DRV_LOG(ERR, "Failed to set flow workspace to thread.");
6705 }
6706 
6707 /**
6708  * Verify the flow list is empty
6709  *
6710  * @param dev
6711  *  Pointer to Ethernet device.
6712  *
6713  * @return the number of flows not released.
6714  */
6715 int
6716 mlx5_flow_verify(struct rte_eth_dev *dev)
6717 {
6718 	struct mlx5_priv *priv = dev->data->dev_private;
6719 	struct rte_flow *flow;
6720 	uint32_t idx;
6721 	int ret = 0;
6722 
6723 	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
6724 		      flow, next) {
6725 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
6726 			dev->data->port_id, (void *)flow);
6727 		++ret;
6728 	}
6729 	return ret;
6730 }
6731 
6732 /**
6733  * Enable default hairpin egress flow.
6734  *
6735  * @param dev
6736  *   Pointer to Ethernet device.
6737  * @param queue
6738  *   The queue index.
6739  *
6740  * @return
6741  *   0 on success, a negative errno value otherwise and rte_errno is set.
6742  */
6743 int
6744 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
6745 			    uint32_t queue)
6746 {
6747 	struct mlx5_priv *priv = dev->data->dev_private;
6748 	const struct rte_flow_attr attr = {
6749 		.egress = 1,
6750 		.priority = 0,
6751 	};
6752 	struct mlx5_rte_flow_item_tx_queue queue_spec = {
6753 		.queue = queue,
6754 	};
6755 	struct mlx5_rte_flow_item_tx_queue queue_mask = {
6756 		.queue = UINT32_MAX,
6757 	};
6758 	struct rte_flow_item items[] = {
6759 		{
6760 			.type = (enum rte_flow_item_type)
6761 				MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
6762 			.spec = &queue_spec,
6763 			.last = NULL,
6764 			.mask = &queue_mask,
6765 		},
6766 		{
6767 			.type = RTE_FLOW_ITEM_TYPE_END,
6768 		},
6769 	};
6770 	struct rte_flow_action_jump jump = {
6771 		.group = MLX5_HAIRPIN_TX_TABLE,
6772 	};
6773 	struct rte_flow_action actions[2];
6774 	uint32_t flow_idx;
6775 	struct rte_flow_error error;
6776 
6777 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
6778 	actions[0].conf = &jump;
6779 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
6780 	flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6781 				&attr, items, actions, false, &error);
6782 	if (!flow_idx) {
6783 		DRV_LOG(DEBUG,
6784 			"Failed to create ctrl flow: rte_errno(%d),"
6785 			" type(%d), message(%s)",
6786 			rte_errno, error.type,
6787 			error.message ? error.message : " (no stated reason)");
6788 		return -rte_errno;
6789 	}
6790 	return 0;
6791 }
6792 
6793 /**
6794  * Enable a control flow configured from the control plane.
6795  *
6796  * @param dev
6797  *   Pointer to Ethernet device.
6798  * @param eth_spec
6799  *   An Ethernet flow spec to apply.
6800  * @param eth_mask
6801  *   An Ethernet flow mask to apply.
6802  * @param vlan_spec
6803  *   A VLAN flow spec to apply.
6804  * @param vlan_mask
6805  *   A VLAN flow mask to apply.
6806  *
6807  * @return
6808  *   0 on success, a negative errno value otherwise and rte_errno is set.
6809  */
6810 int
6811 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
6812 		    struct rte_flow_item_eth *eth_spec,
6813 		    struct rte_flow_item_eth *eth_mask,
6814 		    struct rte_flow_item_vlan *vlan_spec,
6815 		    struct rte_flow_item_vlan *vlan_mask)
6816 {
6817 	struct mlx5_priv *priv = dev->data->dev_private;
6818 	const struct rte_flow_attr attr = {
6819 		.ingress = 1,
6820 		.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
6821 	};
6822 	struct rte_flow_item items[] = {
6823 		{
6824 			.type = RTE_FLOW_ITEM_TYPE_ETH,
6825 			.spec = eth_spec,
6826 			.last = NULL,
6827 			.mask = eth_mask,
6828 		},
6829 		{
6830 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
6831 					      RTE_FLOW_ITEM_TYPE_END,
6832 			.spec = vlan_spec,
6833 			.last = NULL,
6834 			.mask = vlan_mask,
6835 		},
6836 		{
6837 			.type = RTE_FLOW_ITEM_TYPE_END,
6838 		},
6839 	};
6840 	uint16_t queue[priv->reta_idx_n];
6841 	struct rte_flow_action_rss action_rss = {
6842 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
6843 		.level = 0,
6844 		.types = priv->rss_conf.rss_hf,
6845 		.key_len = priv->rss_conf.rss_key_len,
6846 		.queue_num = priv->reta_idx_n,
6847 		.key = priv->rss_conf.rss_key,
6848 		.queue = queue,
6849 	};
6850 	struct rte_flow_action actions[] = {
6851 		{
6852 			.type = RTE_FLOW_ACTION_TYPE_RSS,
6853 			.conf = &action_rss,
6854 		},
6855 		{
6856 			.type = RTE_FLOW_ACTION_TYPE_END,
6857 		},
6858 	};
6859 	uint32_t flow_idx;
6860 	struct rte_flow_error error;
6861 	unsigned int i;
6862 
6863 	if (!priv->reta_idx_n || !priv->rxqs_n) {
6864 		return 0;
6865 	}
6866 	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
6867 		action_rss.types = 0;
6868 	for (i = 0; i != priv->reta_idx_n; ++i)
6869 		queue[i] = (*priv->reta_idx)[i];
6870 	flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6871 				&attr, items, actions, false, &error);
6872 	if (!flow_idx)
6873 		return -rte_errno;
6874 	return 0;
6875 }
6876 
6877 /**
6878  * Enable a flow control configured from the control plane.
6879  *
6880  * @param dev
6881  *   Pointer to Ethernet device.
6882  * @param eth_spec
6883  *   An Ethernet flow spec to apply.
6884  * @param eth_mask
6885  *   An Ethernet flow mask to apply.
6886  *
6887  * @return
6888  *   0 on success, a negative errno value otherwise and rte_errno is set.
6889  */
6890 int
6891 mlx5_ctrl_flow(struct rte_eth_dev *dev,
6892 	       struct rte_flow_item_eth *eth_spec,
6893 	       struct rte_flow_item_eth *eth_mask)
6894 {
6895 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
6896 }
6897 
6898 /**
6899  * Create default miss flow rule matching lacp traffic
6900  *
6901  * @param dev
6902  *   Pointer to Ethernet device.
6903  * @param eth_spec
6904  *   An Ethernet flow spec to apply.
6905  *
6906  * @return
6907  *   0 on success, a negative errno value otherwise and rte_errno is set.
6908  */
6909 int
6910 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
6911 {
6912 	struct mlx5_priv *priv = dev->data->dev_private;
6913 	/*
6914 	 * The LACP matching is done by only using ether type since using
6915 	 * a multicast dst mac causes kernel to give low priority to this flow.
6916 	 */
6917 	static const struct rte_flow_item_eth lacp_spec = {
6918 		.type = RTE_BE16(0x8809),
6919 	};
6920 	static const struct rte_flow_item_eth lacp_mask = {
6921 		.type = 0xffff,
6922 	};
6923 	const struct rte_flow_attr attr = {
6924 		.ingress = 1,
6925 	};
6926 	struct rte_flow_item items[] = {
6927 		{
6928 			.type = RTE_FLOW_ITEM_TYPE_ETH,
6929 			.spec = &lacp_spec,
6930 			.mask = &lacp_mask,
6931 		},
6932 		{
6933 			.type = RTE_FLOW_ITEM_TYPE_END,
6934 		},
6935 	};
6936 	struct rte_flow_action actions[] = {
6937 		{
6938 			.type = (enum rte_flow_action_type)
6939 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
6940 		},
6941 		{
6942 			.type = RTE_FLOW_ACTION_TYPE_END,
6943 		},
6944 	};
6945 	struct rte_flow_error error;
6946 	uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6947 				&attr, items, actions, false, &error);
6948 
6949 	if (!flow_idx)
6950 		return -rte_errno;
6951 	return 0;
6952 }
6953 
6954 /**
6955  * Destroy a flow.
6956  *
6957  * @see rte_flow_destroy()
6958  * @see rte_flow_ops
6959  */
6960 int
6961 mlx5_flow_destroy(struct rte_eth_dev *dev,
6962 		  struct rte_flow *flow,
6963 		  struct rte_flow_error *error __rte_unused)
6964 {
6965 	struct mlx5_priv *priv = dev->data->dev_private;
6966 
6967 	flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
6968 	return 0;
6969 }
6970 
6971 /**
6972  * Destroy all flows.
6973  *
6974  * @see rte_flow_flush()
6975  * @see rte_flow_ops
6976  */
6977 int
6978 mlx5_flow_flush(struct rte_eth_dev *dev,
6979 		struct rte_flow_error *error __rte_unused)
6980 {
6981 	struct mlx5_priv *priv = dev->data->dev_private;
6982 
6983 	mlx5_flow_list_flush(dev, &priv->flows, false);
6984 	return 0;
6985 }
6986 
6987 /**
6988  * Isolated mode.
6989  *
6990  * @see rte_flow_isolate()
6991  * @see rte_flow_ops
6992  */
6993 int
6994 mlx5_flow_isolate(struct rte_eth_dev *dev,
6995 		  int enable,
6996 		  struct rte_flow_error *error)
6997 {
6998 	struct mlx5_priv *priv = dev->data->dev_private;
6999 
7000 	if (dev->data->dev_started) {
7001 		rte_flow_error_set(error, EBUSY,
7002 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7003 				   NULL,
7004 				   "port must be stopped first");
7005 		return -rte_errno;
7006 	}
7007 	priv->isolated = !!enable;
7008 	if (enable)
7009 		dev->dev_ops = &mlx5_dev_ops_isolate;
7010 	else
7011 		dev->dev_ops = &mlx5_dev_ops;
7012 
7013 	dev->rx_descriptor_status = mlx5_rx_descriptor_status;
7014 	dev->tx_descriptor_status = mlx5_tx_descriptor_status;
7015 
7016 	return 0;
7017 }
7018 
7019 /**
7020  * Query a flow.
7021  *
7022  * @see rte_flow_query()
7023  * @see rte_flow_ops
7024  */
7025 static int
7026 flow_drv_query(struct rte_eth_dev *dev,
7027 	       uint32_t flow_idx,
7028 	       const struct rte_flow_action *actions,
7029 	       void *data,
7030 	       struct rte_flow_error *error)
7031 {
7032 	struct mlx5_priv *priv = dev->data->dev_private;
7033 	const struct mlx5_flow_driver_ops *fops;
7034 	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
7035 					       [MLX5_IPOOL_RTE_FLOW],
7036 					       flow_idx);
7037 	enum mlx5_flow_drv_type ftype;
7038 
7039 	if (!flow) {
7040 		return rte_flow_error_set(error, ENOENT,
7041 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7042 			  NULL,
7043 			  "invalid flow handle");
7044 	}
7045 	ftype = flow->drv_type;
7046 	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
7047 	fops = flow_get_drv_ops(ftype);
7048 
7049 	return fops->query(dev, flow, actions, data, error);
7050 }
7051 
7052 /**
7053  * Query a flow.
7054  *
7055  * @see rte_flow_query()
7056  * @see rte_flow_ops
7057  */
7058 int
7059 mlx5_flow_query(struct rte_eth_dev *dev,
7060 		struct rte_flow *flow,
7061 		const struct rte_flow_action *actions,
7062 		void *data,
7063 		struct rte_flow_error *error)
7064 {
7065 	int ret;
7066 
7067 	ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
7068 			     error);
7069 	if (ret < 0)
7070 		return ret;
7071 	return 0;
7072 }
7073 
7074 /**
7075  * Get rte_flow callbacks.
7076  *
7077  * @param dev
7078  *   Pointer to Ethernet device structure.
7079  * @param ops
7080  *   Pointer to operation-specific structure.
7081  *
7082  * @return 0
7083  */
7084 int
7085 mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
7086 		  const struct rte_flow_ops **ops)
7087 {
7088 	*ops = &mlx5_flow_ops;
7089 	return 0;
7090 }
7091 
7092 /**
7093  * Validate meter policy actions.
7094  * Dispatcher for action type specific validation.
7095  *
7096  * @param[in] dev
7097  *   Pointer to the Ethernet device structure.
7098  * @param[in] action
7099  *   The meter policy action object to validate.
7100  * @param[in] attr
7101  *   Attributes of flow to determine steering domain.
7102  * @param[out] is_rss
7103  *   Is RSS or not.
7104  * @param[out] domain_bitmap
7105  *   Domain bitmap.
7106  * @param[out] is_def_policy
7107  *   Is default policy or not.
7108  * @param[out] error
7109  *   Perform verbose error reporting if not NULL. Initialized in case of
7110  *   error only.
7111  *
7112  * @return
7113  *   0 on success, otherwise negative errno value.
7114  */
7115 int
7116 mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
7117 			const struct rte_flow_action *actions[RTE_COLORS],
7118 			struct rte_flow_attr *attr,
7119 			bool *is_rss,
7120 			uint8_t *domain_bitmap,
7121 			bool *is_def_policy,
7122 			struct rte_mtr_error *error)
7123 {
7124 	const struct mlx5_flow_driver_ops *fops;
7125 
7126 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7127 	return fops->validate_mtr_acts(dev, actions, attr,
7128 			is_rss, domain_bitmap, is_def_policy, error);
7129 }
7130 
7131 /**
7132  * Destroy the meter table set.
7133  *
7134  * @param[in] dev
7135  *   Pointer to Ethernet device.
7136  * @param[in] mtr_policy
7137  *   Meter policy struct.
7138  */
7139 void
7140 mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
7141 		      struct mlx5_flow_meter_policy *mtr_policy)
7142 {
7143 	const struct mlx5_flow_driver_ops *fops;
7144 
7145 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7146 	fops->destroy_mtr_acts(dev, mtr_policy);
7147 }
7148 
7149 /**
7150  * Create policy action, lock free,
7151  * (mutex should be acquired by caller).
7152  * Dispatcher for action type specific call.
7153  *
7154  * @param[in] dev
7155  *   Pointer to the Ethernet device structure.
7156  * @param[in] mtr_policy
7157  *   Meter policy struct.
7158  * @param[in] action
7159  *   Action specification used to create meter actions.
7160  * @param[out] error
7161  *   Perform verbose error reporting if not NULL. Initialized in case of
7162  *   error only.
7163  *
7164  * @return
7165  *   0 on success, otherwise negative errno value.
7166  */
7167 int
7168 mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
7169 		      struct mlx5_flow_meter_policy *mtr_policy,
7170 		      const struct rte_flow_action *actions[RTE_COLORS],
7171 		      struct rte_mtr_error *error)
7172 {
7173 	const struct mlx5_flow_driver_ops *fops;
7174 
7175 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7176 	return fops->create_mtr_acts(dev, mtr_policy, actions, error);
7177 }
7178 
7179 /**
7180  * Create policy rules, lock free,
7181  * (mutex should be acquired by caller).
7182  * Dispatcher for action type specific call.
7183  *
7184  * @param[in] dev
7185  *   Pointer to the Ethernet device structure.
7186  * @param[in] mtr_policy
7187  *   Meter policy struct.
7188  *
7189  * @return
7190  *   0 on success, -1 otherwise.
7191  */
7192 int
7193 mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
7194 			     struct mlx5_flow_meter_policy *mtr_policy)
7195 {
7196 	const struct mlx5_flow_driver_ops *fops;
7197 
7198 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7199 	return fops->create_policy_rules(dev, mtr_policy);
7200 }
7201 
7202 /**
7203  * Destroy policy rules, lock free,
7204  * (mutex should be acquired by caller).
7205  * Dispatcher for action type specific call.
7206  *
7207  * @param[in] dev
7208  *   Pointer to the Ethernet device structure.
7209  * @param[in] mtr_policy
7210  *   Meter policy struct.
7211  */
7212 void
7213 mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
7214 			     struct mlx5_flow_meter_policy *mtr_policy)
7215 {
7216 	const struct mlx5_flow_driver_ops *fops;
7217 
7218 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7219 	fops->destroy_policy_rules(dev, mtr_policy);
7220 }
7221 
7222 /**
7223  * Destroy the default policy table set.
7224  *
7225  * @param[in] dev
7226  *   Pointer to Ethernet device.
7227  */
7228 void
7229 mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev)
7230 {
7231 	const struct mlx5_flow_driver_ops *fops;
7232 
7233 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7234 	fops->destroy_def_policy(dev);
7235 }
7236 
7237 /**
7238  * Destroy the default policy table set.
7239  *
7240  * @param[in] dev
7241  *   Pointer to Ethernet device.
7242  *
7243  * @return
7244  *   0 on success, -1 otherwise.
7245  */
7246 int
7247 mlx5_flow_create_def_policy(struct rte_eth_dev *dev)
7248 {
7249 	const struct mlx5_flow_driver_ops *fops;
7250 
7251 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7252 	return fops->create_def_policy(dev);
7253 }
7254 
7255 /**
7256  * Create the needed meter and suffix tables.
7257  *
7258  * @param[in] dev
7259  *   Pointer to Ethernet device.
7260  *
7261  * @return
7262  *   0 on success, -1 otherwise.
7263  */
7264 int
7265 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
7266 			struct mlx5_flow_meter_info *fm,
7267 			uint32_t mtr_idx,
7268 			uint8_t domain_bitmap)
7269 {
7270 	const struct mlx5_flow_driver_ops *fops;
7271 
7272 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7273 	return fops->create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap);
7274 }
7275 
7276 /**
7277  * Destroy the meter table set.
7278  *
7279  * @param[in] dev
7280  *   Pointer to Ethernet device.
7281  * @param[in] tbl
7282  *   Pointer to the meter table set.
7283  */
7284 void
7285 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
7286 			   struct mlx5_flow_meter_info *fm)
7287 {
7288 	const struct mlx5_flow_driver_ops *fops;
7289 
7290 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7291 	fops->destroy_mtr_tbls(dev, fm);
7292 }
7293 
7294 /**
7295  * Destroy the global meter drop table.
7296  *
7297  * @param[in] dev
7298  *   Pointer to Ethernet device.
7299  */
7300 void
7301 mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
7302 {
7303 	const struct mlx5_flow_driver_ops *fops;
7304 
7305 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7306 	fops->destroy_mtr_drop_tbls(dev);
7307 }
7308 
7309 /**
7310  * Destroy the sub policy table with RX queue.
7311  *
7312  * @param[in] dev
7313  *   Pointer to Ethernet device.
7314  * @param[in] mtr_policy
7315  *   Pointer to meter policy table.
7316  */
7317 void
7318 mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
7319 		struct mlx5_flow_meter_policy *mtr_policy)
7320 {
7321 	const struct mlx5_flow_driver_ops *fops;
7322 
7323 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7324 	fops->destroy_sub_policy_with_rxq(dev, mtr_policy);
7325 }
7326 
7327 /**
7328  * Allocate the needed aso flow meter id.
7329  *
7330  * @param[in] dev
7331  *   Pointer to Ethernet device.
7332  *
7333  * @return
7334  *   Index to aso flow meter on success, NULL otherwise.
7335  */
7336 uint32_t
7337 mlx5_flow_mtr_alloc(struct rte_eth_dev *dev)
7338 {
7339 	const struct mlx5_flow_driver_ops *fops;
7340 
7341 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7342 	return fops->create_meter(dev);
7343 }
7344 
7345 /**
7346  * Free the aso flow meter id.
7347  *
7348  * @param[in] dev
7349  *   Pointer to Ethernet device.
7350  * @param[in] mtr_idx
7351  *  Index to aso flow meter to be free.
7352  *
7353  * @return
7354  *   0 on success.
7355  */
7356 void
7357 mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx)
7358 {
7359 	const struct mlx5_flow_driver_ops *fops;
7360 
7361 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7362 	fops->free_meter(dev, mtr_idx);
7363 }
7364 
7365 /**
7366  * Allocate a counter.
7367  *
7368  * @param[in] dev
7369  *   Pointer to Ethernet device structure.
7370  *
7371  * @return
7372  *   Index to allocated counter  on success, 0 otherwise.
7373  */
7374 uint32_t
7375 mlx5_counter_alloc(struct rte_eth_dev *dev)
7376 {
7377 	const struct mlx5_flow_driver_ops *fops;
7378 	struct rte_flow_attr attr = { .transfer = 0 };
7379 
7380 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7381 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7382 		return fops->counter_alloc(dev);
7383 	}
7384 	DRV_LOG(ERR,
7385 		"port %u counter allocate is not supported.",
7386 		 dev->data->port_id);
7387 	return 0;
7388 }
7389 
7390 /**
7391  * Free a counter.
7392  *
7393  * @param[in] dev
7394  *   Pointer to Ethernet device structure.
7395  * @param[in] cnt
7396  *   Index to counter to be free.
7397  */
7398 void
7399 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
7400 {
7401 	const struct mlx5_flow_driver_ops *fops;
7402 	struct rte_flow_attr attr = { .transfer = 0 };
7403 
7404 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7405 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7406 		fops->counter_free(dev, cnt);
7407 		return;
7408 	}
7409 	DRV_LOG(ERR,
7410 		"port %u counter free is not supported.",
7411 		 dev->data->port_id);
7412 }
7413 
7414 /**
7415  * Query counter statistics.
7416  *
7417  * @param[in] dev
7418  *   Pointer to Ethernet device structure.
7419  * @param[in] cnt
7420  *   Index to counter to query.
7421  * @param[in] clear
7422  *   Set to clear counter statistics.
7423  * @param[out] pkts
7424  *   The counter hits packets number to save.
7425  * @param[out] bytes
7426  *   The counter hits bytes number to save.
7427  *
7428  * @return
7429  *   0 on success, a negative errno value otherwise.
7430  */
7431 int
7432 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
7433 		   bool clear, uint64_t *pkts, uint64_t *bytes)
7434 {
7435 	const struct mlx5_flow_driver_ops *fops;
7436 	struct rte_flow_attr attr = { .transfer = 0 };
7437 
7438 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7439 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7440 		return fops->counter_query(dev, cnt, clear, pkts, bytes);
7441 	}
7442 	DRV_LOG(ERR,
7443 		"port %u counter query is not supported.",
7444 		 dev->data->port_id);
7445 	return -ENOTSUP;
7446 }
7447 
7448 /**
7449  * Allocate a new memory for the counter values wrapped by all the needed
7450  * management.
7451  *
7452  * @param[in] sh
7453  *   Pointer to mlx5_dev_ctx_shared object.
7454  *
7455  * @return
7456  *   0 on success, a negative errno value otherwise.
7457  */
7458 static int
7459 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
7460 {
7461 	struct mlx5_devx_mkey_attr mkey_attr;
7462 	struct mlx5_counter_stats_mem_mng *mem_mng;
7463 	volatile struct flow_counter_stats *raw_data;
7464 	int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
7465 	int size = (sizeof(struct flow_counter_stats) *
7466 			MLX5_COUNTERS_PER_POOL +
7467 			sizeof(struct mlx5_counter_stats_raw)) * raws_n +
7468 			sizeof(struct mlx5_counter_stats_mem_mng);
7469 	size_t pgsize = rte_mem_page_size();
7470 	uint8_t *mem;
7471 	int i;
7472 
7473 	if (pgsize == (size_t)-1) {
7474 		DRV_LOG(ERR, "Failed to get mem page size");
7475 		rte_errno = ENOMEM;
7476 		return -ENOMEM;
7477 	}
7478 	mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
7479 	if (!mem) {
7480 		rte_errno = ENOMEM;
7481 		return -ENOMEM;
7482 	}
7483 	mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
7484 	size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
7485 	mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
7486 						 IBV_ACCESS_LOCAL_WRITE);
7487 	if (!mem_mng->umem) {
7488 		rte_errno = errno;
7489 		mlx5_free(mem);
7490 		return -rte_errno;
7491 	}
7492 	memset(&mkey_attr, 0, sizeof(mkey_attr));
7493 	mkey_attr.addr = (uintptr_t)mem;
7494 	mkey_attr.size = size;
7495 	mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
7496 	mkey_attr.pd = sh->pdn;
7497 	mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
7498 	mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
7499 	mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
7500 	if (!mem_mng->dm) {
7501 		mlx5_os_umem_dereg(mem_mng->umem);
7502 		rte_errno = errno;
7503 		mlx5_free(mem);
7504 		return -rte_errno;
7505 	}
7506 	mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
7507 	raw_data = (volatile struct flow_counter_stats *)mem;
7508 	for (i = 0; i < raws_n; ++i) {
7509 		mem_mng->raws[i].mem_mng = mem_mng;
7510 		mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
7511 	}
7512 	for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
7513 		LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
7514 				 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
7515 				 next);
7516 	LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
7517 	sh->cmng.mem_mng = mem_mng;
7518 	return 0;
7519 }
7520 
7521 /**
7522  * Set the statistic memory to the new counter pool.
7523  *
7524  * @param[in] sh
7525  *   Pointer to mlx5_dev_ctx_shared object.
7526  * @param[in] pool
7527  *   Pointer to the pool to set the statistic memory.
7528  *
7529  * @return
7530  *   0 on success, a negative errno value otherwise.
7531  */
7532 static int
7533 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
7534 			       struct mlx5_flow_counter_pool *pool)
7535 {
7536 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7537 	/* Resize statistic memory once used out. */
7538 	if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
7539 	    mlx5_flow_create_counter_stat_mem_mng(sh)) {
7540 		DRV_LOG(ERR, "Cannot resize counter stat mem.");
7541 		return -1;
7542 	}
7543 	rte_spinlock_lock(&pool->sl);
7544 	pool->raw = cmng->mem_mng->raws + pool->index %
7545 		    MLX5_CNT_CONTAINER_RESIZE;
7546 	rte_spinlock_unlock(&pool->sl);
7547 	pool->raw_hw = NULL;
7548 	return 0;
7549 }
7550 
7551 #define MLX5_POOL_QUERY_FREQ_US 1000000
7552 
7553 /**
7554  * Set the periodic procedure for triggering asynchronous batch queries for all
7555  * the counter pools.
7556  *
7557  * @param[in] sh
7558  *   Pointer to mlx5_dev_ctx_shared object.
7559  */
7560 void
7561 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
7562 {
7563 	uint32_t pools_n, us;
7564 
7565 	pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
7566 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
7567 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
7568 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
7569 		sh->cmng.query_thread_on = 0;
7570 		DRV_LOG(ERR, "Cannot reinitialize query alarm");
7571 	} else {
7572 		sh->cmng.query_thread_on = 1;
7573 	}
7574 }
7575 
7576 /**
7577  * The periodic procedure for triggering asynchronous batch queries for all the
7578  * counter pools. This function is probably called by the host thread.
7579  *
7580  * @param[in] arg
7581  *   The parameter for the alarm process.
7582  */
7583 void
7584 mlx5_flow_query_alarm(void *arg)
7585 {
7586 	struct mlx5_dev_ctx_shared *sh = arg;
7587 	int ret;
7588 	uint16_t pool_index = sh->cmng.pool_index;
7589 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7590 	struct mlx5_flow_counter_pool *pool;
7591 	uint16_t n_valid;
7592 
7593 	if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
7594 		goto set_alarm;
7595 	rte_spinlock_lock(&cmng->pool_update_sl);
7596 	pool = cmng->pools[pool_index];
7597 	n_valid = cmng->n_valid;
7598 	rte_spinlock_unlock(&cmng->pool_update_sl);
7599 	/* Set the statistic memory to the new created pool. */
7600 	if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
7601 		goto set_alarm;
7602 	if (pool->raw_hw)
7603 		/* There is a pool query in progress. */
7604 		goto set_alarm;
7605 	pool->raw_hw =
7606 		LIST_FIRST(&sh->cmng.free_stat_raws);
7607 	if (!pool->raw_hw)
7608 		/* No free counter statistics raw memory. */
7609 		goto set_alarm;
7610 	/*
7611 	 * Identify the counters released between query trigger and query
7612 	 * handle more efficiently. The counter released in this gap period
7613 	 * should wait for a new round of query as the new arrived packets
7614 	 * will not be taken into account.
7615 	 */
7616 	pool->query_gen++;
7617 	ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
7618 					       MLX5_COUNTERS_PER_POOL,
7619 					       NULL, NULL,
7620 					       pool->raw_hw->mem_mng->dm->id,
7621 					       (void *)(uintptr_t)
7622 					       pool->raw_hw->data,
7623 					       sh->devx_comp,
7624 					       (uint64_t)(uintptr_t)pool);
7625 	if (ret) {
7626 		DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
7627 			" %d", pool->min_dcs->id);
7628 		pool->raw_hw = NULL;
7629 		goto set_alarm;
7630 	}
7631 	LIST_REMOVE(pool->raw_hw, next);
7632 	sh->cmng.pending_queries++;
7633 	pool_index++;
7634 	if (pool_index >= n_valid)
7635 		pool_index = 0;
7636 set_alarm:
7637 	sh->cmng.pool_index = pool_index;
7638 	mlx5_set_query_alarm(sh);
7639 }
7640 
7641 /**
7642  * Check and callback event for new aged flow in the counter pool
7643  *
7644  * @param[in] sh
7645  *   Pointer to mlx5_dev_ctx_shared object.
7646  * @param[in] pool
7647  *   Pointer to Current counter pool.
7648  */
7649 static void
7650 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
7651 		   struct mlx5_flow_counter_pool *pool)
7652 {
7653 	struct mlx5_priv *priv;
7654 	struct mlx5_flow_counter *cnt;
7655 	struct mlx5_age_info *age_info;
7656 	struct mlx5_age_param *age_param;
7657 	struct mlx5_counter_stats_raw *cur = pool->raw_hw;
7658 	struct mlx5_counter_stats_raw *prev = pool->raw;
7659 	const uint64_t curr_time = MLX5_CURR_TIME_SEC;
7660 	const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
7661 	uint16_t expected = AGE_CANDIDATE;
7662 	uint32_t i;
7663 
7664 	pool->time_of_last_age_check = curr_time;
7665 	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
7666 		cnt = MLX5_POOL_GET_CNT(pool, i);
7667 		age_param = MLX5_CNT_TO_AGE(cnt);
7668 		if (__atomic_load_n(&age_param->state,
7669 				    __ATOMIC_RELAXED) != AGE_CANDIDATE)
7670 			continue;
7671 		if (cur->data[i].hits != prev->data[i].hits) {
7672 			__atomic_store_n(&age_param->sec_since_last_hit, 0,
7673 					 __ATOMIC_RELAXED);
7674 			continue;
7675 		}
7676 		if (__atomic_add_fetch(&age_param->sec_since_last_hit,
7677 				       time_delta,
7678 				       __ATOMIC_RELAXED) <= age_param->timeout)
7679 			continue;
7680 		/**
7681 		 * Hold the lock first, or if between the
7682 		 * state AGE_TMOUT and tailq operation the
7683 		 * release happened, the release procedure
7684 		 * may delete a non-existent tailq node.
7685 		 */
7686 		priv = rte_eth_devices[age_param->port_id].data->dev_private;
7687 		age_info = GET_PORT_AGE_INFO(priv);
7688 		rte_spinlock_lock(&age_info->aged_sl);
7689 		if (__atomic_compare_exchange_n(&age_param->state, &expected,
7690 						AGE_TMOUT, false,
7691 						__ATOMIC_RELAXED,
7692 						__ATOMIC_RELAXED)) {
7693 			TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
7694 			MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
7695 		}
7696 		rte_spinlock_unlock(&age_info->aged_sl);
7697 	}
7698 	mlx5_age_event_prepare(sh);
7699 }
7700 
7701 /**
7702  * Handler for the HW respond about ready values from an asynchronous batch
7703  * query. This function is probably called by the host thread.
7704  *
7705  * @param[in] sh
7706  *   The pointer to the shared device context.
7707  * @param[in] async_id
7708  *   The Devx async ID.
7709  * @param[in] status
7710  *   The status of the completion.
7711  */
7712 void
7713 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
7714 				  uint64_t async_id, int status)
7715 {
7716 	struct mlx5_flow_counter_pool *pool =
7717 		(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
7718 	struct mlx5_counter_stats_raw *raw_to_free;
7719 	uint8_t query_gen = pool->query_gen ^ 1;
7720 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7721 	enum mlx5_counter_type cnt_type =
7722 		pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
7723 				MLX5_COUNTER_TYPE_ORIGIN;
7724 
7725 	if (unlikely(status)) {
7726 		raw_to_free = pool->raw_hw;
7727 	} else {
7728 		raw_to_free = pool->raw;
7729 		if (pool->is_aged)
7730 			mlx5_flow_aging_check(sh, pool);
7731 		rte_spinlock_lock(&pool->sl);
7732 		pool->raw = pool->raw_hw;
7733 		rte_spinlock_unlock(&pool->sl);
7734 		/* Be sure the new raw counters data is updated in memory. */
7735 		rte_io_wmb();
7736 		if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
7737 			rte_spinlock_lock(&cmng->csl[cnt_type]);
7738 			TAILQ_CONCAT(&cmng->counters[cnt_type],
7739 				     &pool->counters[query_gen], next);
7740 			rte_spinlock_unlock(&cmng->csl[cnt_type]);
7741 		}
7742 	}
7743 	LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
7744 	pool->raw_hw = NULL;
7745 	sh->cmng.pending_queries--;
7746 }
7747 
7748 static int
7749 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
7750 		    const struct flow_grp_info *grp_info,
7751 		    struct rte_flow_error *error)
7752 {
7753 	if (grp_info->transfer && grp_info->external &&
7754 	    grp_info->fdb_def_rule) {
7755 		if (group == UINT32_MAX)
7756 			return rte_flow_error_set
7757 						(error, EINVAL,
7758 						 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7759 						 NULL,
7760 						 "group index not supported");
7761 		*table = group + 1;
7762 	} else {
7763 		*table = group;
7764 	}
7765 	DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
7766 	return 0;
7767 }
7768 
7769 /**
7770  * Translate the rte_flow group index to HW table value.
7771  *
7772  * If tunnel offload is disabled, all group ids converted to flow table
7773  * id using the standard method.
7774  * If tunnel offload is enabled, group id can be converted using the
7775  * standard or tunnel conversion method. Group conversion method
7776  * selection depends on flags in `grp_info` parameter:
7777  * - Internal (grp_info.external == 0) groups conversion uses the
7778  *   standard method.
7779  * - Group ids in JUMP action converted with the tunnel conversion.
7780  * - Group id in rule attribute conversion depends on a rule type and
7781  *   group id value:
7782  *   ** non zero group attributes converted with the tunnel method
7783  *   ** zero group attribute in non-tunnel rule is converted using the
7784  *      standard method - there's only one root table
7785  *   ** zero group attribute in steer tunnel rule is converted with the
7786  *      standard method - single root table
7787  *   ** zero group attribute in match tunnel rule is a special OvS
7788  *      case: that value is used for portability reasons. That group
7789  *      id is converted with the tunnel conversion method.
7790  *
7791  * @param[in] dev
7792  *   Port device
7793  * @param[in] tunnel
7794  *   PMD tunnel offload object
7795  * @param[in] group
7796  *   rte_flow group index value.
7797  * @param[out] table
7798  *   HW table value.
7799  * @param[in] grp_info
7800  *   flags used for conversion
7801  * @param[out] error
7802  *   Pointer to error structure.
7803  *
7804  * @return
7805  *   0 on success, a negative errno value otherwise and rte_errno is set.
7806  */
7807 int
7808 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
7809 			 const struct mlx5_flow_tunnel *tunnel,
7810 			 uint32_t group, uint32_t *table,
7811 			 const struct flow_grp_info *grp_info,
7812 			 struct rte_flow_error *error)
7813 {
7814 	int ret;
7815 	bool standard_translation;
7816 
7817 	if (!grp_info->skip_scale && grp_info->external &&
7818 	    group < MLX5_MAX_TABLES_EXTERNAL)
7819 		group *= MLX5_FLOW_TABLE_FACTOR;
7820 	if (is_tunnel_offload_active(dev)) {
7821 		standard_translation = !grp_info->external ||
7822 					grp_info->std_tbl_fix;
7823 	} else {
7824 		standard_translation = true;
7825 	}
7826 	DRV_LOG(DEBUG,
7827 		"port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
7828 		dev->data->port_id, group, grp_info->transfer,
7829 		grp_info->external, grp_info->fdb_def_rule,
7830 		standard_translation ? "STANDARD" : "TUNNEL");
7831 	if (standard_translation)
7832 		ret = flow_group_to_table(dev->data->port_id, group, table,
7833 					  grp_info, error);
7834 	else
7835 		ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
7836 						      table, error);
7837 
7838 	return ret;
7839 }
7840 
7841 /**
7842  * Discover availability of metadata reg_c's.
7843  *
7844  * Iteratively use test flows to check availability.
7845  *
7846  * @param[in] dev
7847  *   Pointer to the Ethernet device structure.
7848  *
7849  * @return
7850  *   0 on success, a negative errno value otherwise and rte_errno is set.
7851  */
7852 int
7853 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
7854 {
7855 	struct mlx5_priv *priv = dev->data->dev_private;
7856 	struct mlx5_dev_config *config = &priv->config;
7857 	enum modify_reg idx;
7858 	int n = 0;
7859 
7860 	/* reg_c[0] and reg_c[1] are reserved. */
7861 	config->flow_mreg_c[n++] = REG_C_0;
7862 	config->flow_mreg_c[n++] = REG_C_1;
7863 	/* Discover availability of other reg_c's. */
7864 	for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
7865 		struct rte_flow_attr attr = {
7866 			.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
7867 			.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
7868 			.ingress = 1,
7869 		};
7870 		struct rte_flow_item items[] = {
7871 			[0] = {
7872 				.type = RTE_FLOW_ITEM_TYPE_END,
7873 			},
7874 		};
7875 		struct rte_flow_action actions[] = {
7876 			[0] = {
7877 				.type = (enum rte_flow_action_type)
7878 					MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
7879 				.conf = &(struct mlx5_flow_action_copy_mreg){
7880 					.src = REG_C_1,
7881 					.dst = idx,
7882 				},
7883 			},
7884 			[1] = {
7885 				.type = RTE_FLOW_ACTION_TYPE_JUMP,
7886 				.conf = &(struct rte_flow_action_jump){
7887 					.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
7888 				},
7889 			},
7890 			[2] = {
7891 				.type = RTE_FLOW_ACTION_TYPE_END,
7892 			},
7893 		};
7894 		uint32_t flow_idx;
7895 		struct rte_flow *flow;
7896 		struct rte_flow_error error;
7897 
7898 		if (!config->dv_flow_en)
7899 			break;
7900 		/* Create internal flow, validation skips copy action. */
7901 		flow_idx = flow_list_create(dev, NULL, &attr, items,
7902 					    actions, false, &error);
7903 		flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
7904 				      flow_idx);
7905 		if (!flow)
7906 			continue;
7907 		config->flow_mreg_c[n++] = idx;
7908 		flow_list_destroy(dev, NULL, flow_idx);
7909 	}
7910 	for (; n < MLX5_MREG_C_NUM; ++n)
7911 		config->flow_mreg_c[n] = REG_NON;
7912 	return 0;
7913 }
7914 
7915 int
7916 save_dump_file(const uint8_t *data, uint32_t size,
7917 	uint32_t type, uint32_t id, void *arg, FILE *file)
7918 {
7919 	char line[BUF_SIZE];
7920 	uint32_t out = 0;
7921 	uint32_t k;
7922 	uint32_t actions_num;
7923 	struct rte_flow_query_count *count;
7924 
7925 	memset(line, 0, BUF_SIZE);
7926 	switch (type) {
7927 	case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
7928 		actions_num = *(uint32_t *)(arg);
7929 		out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,",
7930 				type, id, actions_num);
7931 		break;
7932 	case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
7933 		out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,",
7934 				type, id);
7935 		break;
7936 	case DR_DUMP_REC_TYPE_PMD_COUNTER:
7937 		count = (struct rte_flow_query_count *)arg;
7938 		fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type,
7939 				id, count->hits, count->bytes);
7940 		return 0;
7941 	default:
7942 		return -1;
7943 	}
7944 
7945 	for (k = 0; k < size; k++) {
7946 		/* Make sure we do not overrun the line buffer length. */
7947 		if (out >= BUF_SIZE - 4) {
7948 			line[out] = '\0';
7949 			break;
7950 		}
7951 		out += snprintf(line + out, BUF_SIZE - out, "%02x",
7952 				(data[k]) & 0xff);
7953 	}
7954 	fprintf(file, "%s\n", line);
7955 	return 0;
7956 }
7957 
7958 int
7959 mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
7960 	struct rte_flow_query_count *count, struct rte_flow_error *error)
7961 {
7962 	struct rte_flow_action action[2];
7963 	enum mlx5_flow_drv_type ftype;
7964 	const struct mlx5_flow_driver_ops *fops;
7965 
7966 	if (!flow) {
7967 		return rte_flow_error_set(error, ENOENT,
7968 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7969 				NULL,
7970 				"invalid flow handle");
7971 	}
7972 	action[0].type = RTE_FLOW_ACTION_TYPE_COUNT;
7973 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
7974 	if (flow->counter) {
7975 		memset(count, 0, sizeof(struct rte_flow_query_count));
7976 		ftype = (enum mlx5_flow_drv_type)(flow->drv_type);
7977 		MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN &&
7978 						ftype < MLX5_FLOW_TYPE_MAX);
7979 		fops = flow_get_drv_ops(ftype);
7980 		return fops->query(dev, flow, action, count, error);
7981 	}
7982 	return -1;
7983 }
7984 
7985 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
7986 /**
7987  * Dump flow ipool data to file
7988  *
7989  * @param[in] dev
7990  *   The pointer to Ethernet device.
7991  * @param[in] file
7992  *   A pointer to a file for output.
7993  * @param[out] error
7994  *   Perform verbose error reporting if not NULL. PMDs initialize this
7995  *   structure in case of error only.
7996  * @return
7997  *   0 on success, a negative value otherwise.
7998  */
7999 int
8000 mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
8001 	struct rte_flow *flow, FILE *file,
8002 	struct rte_flow_error *error)
8003 {
8004 	struct mlx5_priv *priv = dev->data->dev_private;
8005 	struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
8006 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
8007 	uint32_t handle_idx;
8008 	struct mlx5_flow_handle *dh;
8009 	struct rte_flow_query_count count;
8010 	uint32_t actions_num;
8011 	const uint8_t *data;
8012 	size_t size;
8013 	uint32_t id;
8014 	uint32_t type;
8015 
8016 	if (!flow) {
8017 		return rte_flow_error_set(error, ENOENT,
8018 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8019 			NULL,
8020 			"invalid flow handle");
8021 	}
8022 	handle_idx = flow->dev_handles;
8023 	while (handle_idx) {
8024 		dh = mlx5_ipool_get(priv->sh->ipool
8025 			[MLX5_IPOOL_MLX5_FLOW], handle_idx);
8026 		if (!dh)
8027 			continue;
8028 		handle_idx = dh->next.next;
8029 		id = (uint32_t)(uintptr_t)dh->drv_flow;
8030 
8031 		/* query counter */
8032 		type = DR_DUMP_REC_TYPE_PMD_COUNTER;
8033 		if (!mlx5_flow_query_counter(dev, flow, &count, error))
8034 			save_dump_file(NULL, 0, type,
8035 					id, (void *)&count, file);
8036 
8037 		/* Get modify_hdr and encap_decap buf from ipools. */
8038 		encap_decap = NULL;
8039 		modify_hdr = dh->dvh.modify_hdr;
8040 
8041 		if (dh->dvh.rix_encap_decap) {
8042 			encap_decap = mlx5_ipool_get(priv->sh->ipool
8043 						[MLX5_IPOOL_DECAP_ENCAP],
8044 						dh->dvh.rix_encap_decap);
8045 		}
8046 		if (modify_hdr) {
8047 			data = (const uint8_t *)modify_hdr->actions;
8048 			size = (size_t)(modify_hdr->actions_num) * 8;
8049 			actions_num = modify_hdr->actions_num;
8050 			type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
8051 			save_dump_file(data, size, type, id,
8052 					(void *)(&actions_num), file);
8053 		}
8054 		if (encap_decap) {
8055 			data = encap_decap->buf;
8056 			size = encap_decap->size;
8057 			type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
8058 			save_dump_file(data, size, type,
8059 						id, NULL, file);
8060 		}
8061 	}
8062 	return 0;
8063 }
8064 #endif
8065 
8066 /**
8067  * Dump flow raw hw data to file
8068  *
8069  * @param[in] dev
8070  *    The pointer to Ethernet device.
8071  * @param[in] file
8072  *   A pointer to a file for output.
8073  * @param[out] error
8074  *   Perform verbose error reporting if not NULL. PMDs initialize this
8075  *   structure in case of error only.
8076  * @return
8077  *   0 on success, a nagative value otherwise.
8078  */
8079 int
8080 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
8081 		   FILE *file,
8082 		   struct rte_flow_error *error __rte_unused)
8083 {
8084 	struct mlx5_priv *priv = dev->data->dev_private;
8085 	struct mlx5_dev_ctx_shared *sh = priv->sh;
8086 	uint32_t handle_idx;
8087 	int ret;
8088 	struct mlx5_flow_handle *dh;
8089 	struct rte_flow *flow;
8090 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
8091 	uint32_t idx;
8092 #endif
8093 
8094 	if (!priv->config.dv_flow_en) {
8095 		if (fputs("device dv flow disabled\n", file) <= 0)
8096 			return -errno;
8097 		return -ENOTSUP;
8098 	}
8099 
8100 	/* dump all */
8101 	if (!flow_idx) {
8102 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
8103 		ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
8104 						priv->flows, idx, flow, next)
8105 			mlx5_flow_dev_dump_ipool(dev, flow, file, error);
8106 #endif
8107 		return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
8108 					sh->rx_domain,
8109 					sh->tx_domain, file);
8110 	}
8111 	/* dump one */
8112 	flow = mlx5_ipool_get(priv->sh->ipool
8113 			[MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
8114 	if (!flow)
8115 		return -ENOENT;
8116 
8117 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
8118 	mlx5_flow_dev_dump_ipool(dev, flow, file, error);
8119 #endif
8120 	handle_idx = flow->dev_handles;
8121 	while (handle_idx) {
8122 		dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8123 				handle_idx);
8124 		if (!dh)
8125 			return -ENOENT;
8126 		if (dh->drv_flow) {
8127 			ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow,
8128 					file);
8129 			if (ret)
8130 				return -ENOENT;
8131 		}
8132 		handle_idx = dh->next.next;
8133 	}
8134 	return 0;
8135 }
8136 
8137 /**
8138  * Get aged-out flows.
8139  *
8140  * @param[in] dev
8141  *   Pointer to the Ethernet device structure.
8142  * @param[in] context
8143  *   The address of an array of pointers to the aged-out flows contexts.
8144  * @param[in] nb_countexts
8145  *   The length of context array pointers.
8146  * @param[out] error
8147  *   Perform verbose error reporting if not NULL. Initialized in case of
8148  *   error only.
8149  *
8150  * @return
8151  *   how many contexts get in success, otherwise negative errno value.
8152  *   if nb_contexts is 0, return the amount of all aged contexts.
8153  *   if nb_contexts is not 0 , return the amount of aged flows reported
8154  *   in the context array.
8155  */
8156 int
8157 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
8158 			uint32_t nb_contexts, struct rte_flow_error *error)
8159 {
8160 	const struct mlx5_flow_driver_ops *fops;
8161 	struct rte_flow_attr attr = { .transfer = 0 };
8162 
8163 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
8164 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8165 		return fops->get_aged_flows(dev, contexts, nb_contexts,
8166 						    error);
8167 	}
8168 	DRV_LOG(ERR,
8169 		"port %u get aged flows is not supported.",
8170 		 dev->data->port_id);
8171 	return -ENOTSUP;
8172 }
8173 
8174 /* Wrapper for driver action_validate op callback */
8175 static int
8176 flow_drv_action_validate(struct rte_eth_dev *dev,
8177 			 const struct rte_flow_indir_action_conf *conf,
8178 			 const struct rte_flow_action *action,
8179 			 const struct mlx5_flow_driver_ops *fops,
8180 			 struct rte_flow_error *error)
8181 {
8182 	static const char err_msg[] = "indirect action validation unsupported";
8183 
8184 	if (!fops->action_validate) {
8185 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
8186 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
8187 				   NULL, err_msg);
8188 		return -rte_errno;
8189 	}
8190 	return fops->action_validate(dev, conf, action, error);
8191 }
8192 
8193 /**
8194  * Destroys the shared action by handle.
8195  *
8196  * @param dev
8197  *   Pointer to Ethernet device structure.
8198  * @param[in] handle
8199  *   Handle for the indirect action object to be destroyed.
8200  * @param[out] error
8201  *   Perform verbose error reporting if not NULL. PMDs initialize this
8202  *   structure in case of error only.
8203  *
8204  * @return
8205  *   0 on success, a negative errno value otherwise and rte_errno is set.
8206  *
8207  * @note: wrapper for driver action_create op callback.
8208  */
8209 static int
8210 mlx5_action_handle_destroy(struct rte_eth_dev *dev,
8211 			   struct rte_flow_action_handle *handle,
8212 			   struct rte_flow_error *error)
8213 {
8214 	static const char err_msg[] = "indirect action destruction unsupported";
8215 	struct rte_flow_attr attr = { .transfer = 0 };
8216 	const struct mlx5_flow_driver_ops *fops =
8217 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
8218 
8219 	if (!fops->action_destroy) {
8220 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
8221 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
8222 				   NULL, err_msg);
8223 		return -rte_errno;
8224 	}
8225 	return fops->action_destroy(dev, handle, error);
8226 }
8227 
8228 /* Wrapper for driver action_destroy op callback */
8229 static int
8230 flow_drv_action_update(struct rte_eth_dev *dev,
8231 		       struct rte_flow_action_handle *handle,
8232 		       const void *update,
8233 		       const struct mlx5_flow_driver_ops *fops,
8234 		       struct rte_flow_error *error)
8235 {
8236 	static const char err_msg[] = "indirect action update unsupported";
8237 
8238 	if (!fops->action_update) {
8239 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
8240 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
8241 				   NULL, err_msg);
8242 		return -rte_errno;
8243 	}
8244 	return fops->action_update(dev, handle, update, error);
8245 }
8246 
8247 /* Wrapper for driver action_destroy op callback */
8248 static int
8249 flow_drv_action_query(struct rte_eth_dev *dev,
8250 		      const struct rte_flow_action_handle *handle,
8251 		      void *data,
8252 		      const struct mlx5_flow_driver_ops *fops,
8253 		      struct rte_flow_error *error)
8254 {
8255 	static const char err_msg[] = "indirect action query unsupported";
8256 
8257 	if (!fops->action_query) {
8258 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
8259 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
8260 				   NULL, err_msg);
8261 		return -rte_errno;
8262 	}
8263 	return fops->action_query(dev, handle, data, error);
8264 }
8265 
8266 /**
8267  * Create indirect action for reuse in multiple flow rules.
8268  *
8269  * @param dev
8270  *   Pointer to Ethernet device structure.
8271  * @param conf
8272  *   Pointer to indirect action object configuration.
8273  * @param[in] action
8274  *   Action configuration for indirect action object creation.
8275  * @param[out] error
8276  *   Perform verbose error reporting if not NULL. PMDs initialize this
8277  *   structure in case of error only.
8278  * @return
8279  *   A valid handle in case of success, NULL otherwise and rte_errno is set.
8280  */
8281 static struct rte_flow_action_handle *
8282 mlx5_action_handle_create(struct rte_eth_dev *dev,
8283 			  const struct rte_flow_indir_action_conf *conf,
8284 			  const struct rte_flow_action *action,
8285 			  struct rte_flow_error *error)
8286 {
8287 	static const char err_msg[] = "indirect action creation unsupported";
8288 	struct rte_flow_attr attr = { .transfer = 0 };
8289 	const struct mlx5_flow_driver_ops *fops =
8290 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
8291 
8292 	if (flow_drv_action_validate(dev, conf, action, fops, error))
8293 		return NULL;
8294 	if (!fops->action_create) {
8295 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
8296 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
8297 				   NULL, err_msg);
8298 		return NULL;
8299 	}
8300 	return fops->action_create(dev, conf, action, error);
8301 }
8302 
8303 /**
8304  * Updates inplace the indirect action configuration pointed by *handle*
8305  * with the configuration provided as *update* argument.
8306  * The update of the indirect action configuration effects all flow rules
8307  * reusing the action via handle.
8308  *
8309  * @param dev
8310  *   Pointer to Ethernet device structure.
8311  * @param[in] handle
8312  *   Handle for the indirect action to be updated.
8313  * @param[in] update
8314  *   Action specification used to modify the action pointed by handle.
8315  *   *update* could be of same type with the action pointed by the *handle*
8316  *   handle argument, or some other structures like a wrapper, depending on
8317  *   the indirect action type.
8318  * @param[out] error
8319  *   Perform verbose error reporting if not NULL. PMDs initialize this
8320  *   structure in case of error only.
8321  *
8322  * @return
8323  *   0 on success, a negative errno value otherwise and rte_errno is set.
8324  */
8325 static int
8326 mlx5_action_handle_update(struct rte_eth_dev *dev,
8327 		struct rte_flow_action_handle *handle,
8328 		const void *update,
8329 		struct rte_flow_error *error)
8330 {
8331 	struct rte_flow_attr attr = { .transfer = 0 };
8332 	const struct mlx5_flow_driver_ops *fops =
8333 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
8334 	int ret;
8335 
8336 	ret = flow_drv_action_validate(dev, NULL,
8337 			(const struct rte_flow_action *)update, fops, error);
8338 	if (ret)
8339 		return ret;
8340 	return flow_drv_action_update(dev, handle, update, fops,
8341 				      error);
8342 }
8343 
8344 /**
8345  * Query the indirect action by handle.
8346  *
8347  * This function allows retrieving action-specific data such as counters.
8348  * Data is gathered by special action which may be present/referenced in
8349  * more than one flow rule definition.
8350  *
8351  * see @RTE_FLOW_ACTION_TYPE_COUNT
8352  *
8353  * @param dev
8354  *   Pointer to Ethernet device structure.
8355  * @param[in] handle
8356  *   Handle for the indirect action to query.
8357  * @param[in, out] data
8358  *   Pointer to storage for the associated query data type.
8359  * @param[out] error
8360  *   Perform verbose error reporting if not NULL. PMDs initialize this
8361  *   structure in case of error only.
8362  *
8363  * @return
8364  *   0 on success, a negative errno value otherwise and rte_errno is set.
8365  */
8366 static int
8367 mlx5_action_handle_query(struct rte_eth_dev *dev,
8368 			 const struct rte_flow_action_handle *handle,
8369 			 void *data,
8370 			 struct rte_flow_error *error)
8371 {
8372 	struct rte_flow_attr attr = { .transfer = 0 };
8373 	const struct mlx5_flow_driver_ops *fops =
8374 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
8375 
8376 	return flow_drv_action_query(dev, handle, data, fops, error);
8377 }
8378 
8379 /**
8380  * Destroy all indirect actions (shared RSS).
8381  *
8382  * @param dev
8383  *   Pointer to Ethernet device.
8384  *
8385  * @return
8386  *   0 on success, a negative errno value otherwise and rte_errno is set.
8387  */
8388 int
8389 mlx5_action_handle_flush(struct rte_eth_dev *dev)
8390 {
8391 	struct rte_flow_error error;
8392 	struct mlx5_priv *priv = dev->data->dev_private;
8393 	struct mlx5_shared_action_rss *shared_rss;
8394 	int ret = 0;
8395 	uint32_t idx;
8396 
8397 	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
8398 		      priv->rss_shared_actions, idx, shared_rss, next) {
8399 		ret |= mlx5_action_handle_destroy(dev,
8400 		       (struct rte_flow_action_handle *)(uintptr_t)idx, &error);
8401 	}
8402 	return ret;
8403 }
8404 
8405 #ifndef HAVE_MLX5DV_DR
8406 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
8407 #else
8408 #define MLX5_DOMAIN_SYNC_FLOW \
8409 	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
8410 #endif
8411 
8412 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
8413 {
8414 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
8415 	const struct mlx5_flow_driver_ops *fops;
8416 	int ret;
8417 	struct rte_flow_attr attr = { .transfer = 0 };
8418 
8419 	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
8420 	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
8421 	if (ret > 0)
8422 		ret = -ret;
8423 	return ret;
8424 }
8425 
8426 const struct mlx5_flow_tunnel *
8427 mlx5_get_tof(const struct rte_flow_item *item,
8428 	     const struct rte_flow_action *action,
8429 	     enum mlx5_tof_rule_type *rule_type)
8430 {
8431 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
8432 		if (item->type == (typeof(item->type))
8433 				  MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {
8434 			*rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
8435 			return flow_items_to_tunnel(item);
8436 		}
8437 	}
8438 	for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {
8439 		if (action->type == (typeof(action->type))
8440 				    MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
8441 			*rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;
8442 			return flow_actions_to_tunnel(action);
8443 		}
8444 	}
8445 	return NULL;
8446 }
8447 
8448 /**
8449  * tunnel offload functionalilty is defined for DV environment only
8450  */
8451 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
8452 __extension__
8453 union tunnel_offload_mark {
8454 	uint32_t val;
8455 	struct {
8456 		uint32_t app_reserve:8;
8457 		uint32_t table_id:15;
8458 		uint32_t transfer:1;
8459 		uint32_t _unused_:8;
8460 	};
8461 };
8462 
8463 static bool
8464 mlx5_access_tunnel_offload_db
8465 	(struct rte_eth_dev *dev,
8466 	 bool (*match)(struct rte_eth_dev *,
8467 		       struct mlx5_flow_tunnel *, const void *),
8468 	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
8469 	 void (*miss)(struct rte_eth_dev *, void *),
8470 	 void *ctx, bool lock_op);
8471 
8472 static int
8473 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
8474 			     struct rte_flow *flow,
8475 			     const struct rte_flow_attr *attr,
8476 			     const struct rte_flow_action *app_actions,
8477 			     uint32_t flow_idx,
8478 			     const struct mlx5_flow_tunnel *tunnel,
8479 			     struct tunnel_default_miss_ctx *ctx,
8480 			     struct rte_flow_error *error)
8481 {
8482 	struct mlx5_priv *priv = dev->data->dev_private;
8483 	struct mlx5_flow *dev_flow;
8484 	struct rte_flow_attr miss_attr = *attr;
8485 	const struct rte_flow_item miss_items[2] = {
8486 		{
8487 			.type = RTE_FLOW_ITEM_TYPE_ETH,
8488 			.spec = NULL,
8489 			.last = NULL,
8490 			.mask = NULL
8491 		},
8492 		{
8493 			.type = RTE_FLOW_ITEM_TYPE_END,
8494 			.spec = NULL,
8495 			.last = NULL,
8496 			.mask = NULL
8497 		}
8498 	};
8499 	union tunnel_offload_mark mark_id;
8500 	struct rte_flow_action_mark miss_mark;
8501 	struct rte_flow_action miss_actions[3] = {
8502 		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
8503 		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
8504 	};
8505 	const struct rte_flow_action_jump *jump_data;
8506 	uint32_t i, flow_table = 0; /* prevent compilation warning */
8507 	struct flow_grp_info grp_info = {
8508 		.external = 1,
8509 		.transfer = attr->transfer,
8510 		.fdb_def_rule = !!priv->fdb_def_rule,
8511 		.std_tbl_fix = 0,
8512 	};
8513 	int ret;
8514 
8515 	if (!attr->transfer) {
8516 		uint32_t q_size;
8517 
8518 		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
8519 		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
8520 		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
8521 					 0, SOCKET_ID_ANY);
8522 		if (!ctx->queue)
8523 			return rte_flow_error_set
8524 				(error, ENOMEM,
8525 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
8526 				NULL, "invalid default miss RSS");
8527 		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
8528 		ctx->action_rss.level = 0,
8529 		ctx->action_rss.types = priv->rss_conf.rss_hf,
8530 		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
8531 		ctx->action_rss.queue_num = priv->reta_idx_n,
8532 		ctx->action_rss.key = priv->rss_conf.rss_key,
8533 		ctx->action_rss.queue = ctx->queue;
8534 		if (!priv->reta_idx_n || !priv->rxqs_n)
8535 			return rte_flow_error_set
8536 				(error, EINVAL,
8537 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
8538 				NULL, "invalid port configuration");
8539 		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
8540 			ctx->action_rss.types = 0;
8541 		for (i = 0; i != priv->reta_idx_n; ++i)
8542 			ctx->queue[i] = (*priv->reta_idx)[i];
8543 	} else {
8544 		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
8545 		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
8546 	}
8547 	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
8548 	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
8549 	jump_data = app_actions->conf;
8550 	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
8551 	miss_attr.group = jump_data->group;
8552 	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
8553 				       &flow_table, &grp_info, error);
8554 	if (ret)
8555 		return rte_flow_error_set(error, EINVAL,
8556 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
8557 					  NULL, "invalid tunnel id");
8558 	mark_id.app_reserve = 0;
8559 	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
8560 	mark_id.transfer = !!attr->transfer;
8561 	mark_id._unused_ = 0;
8562 	miss_mark.id = mark_id.val;
8563 	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
8564 				    miss_items, miss_actions, flow_idx, error);
8565 	if (!dev_flow)
8566 		return -rte_errno;
8567 	dev_flow->flow = flow;
8568 	dev_flow->external = true;
8569 	dev_flow->tunnel = tunnel;
8570 	dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;
8571 	/* Subflow object was created, we must include one in the list. */
8572 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
8573 		      dev_flow->handle, next);
8574 	DRV_LOG(DEBUG,
8575 		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
8576 		dev->data->port_id, tunnel->app_tunnel.type,
8577 		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
8578 	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
8579 				  miss_actions, error);
8580 	if (!ret)
8581 		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
8582 						  error);
8583 
8584 	return ret;
8585 }
8586 
8587 static const struct mlx5_flow_tbl_data_entry  *
8588 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
8589 {
8590 	struct mlx5_priv *priv = dev->data->dev_private;
8591 	struct mlx5_dev_ctx_shared *sh = priv->sh;
8592 	struct mlx5_hlist_entry *he;
8593 	union tunnel_offload_mark mbits = { .val = mark };
8594 	union mlx5_flow_tbl_key table_key = {
8595 		{
8596 			.level = tunnel_id_to_flow_tbl(mbits.table_id),
8597 			.id = 0,
8598 			.reserved = 0,
8599 			.dummy = 0,
8600 			.is_fdb = !!mbits.transfer,
8601 			.is_egress = 0,
8602 		}
8603 	};
8604 	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
8605 	return he ?
8606 	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
8607 }
8608 
8609 static void
8610 mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
8611 				   struct mlx5_hlist_entry *entry)
8612 {
8613 	struct mlx5_dev_ctx_shared *sh = list->ctx;
8614 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
8615 
8616 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8617 			tunnel_flow_tbl_to_id(tte->flow_table));
8618 	mlx5_free(tte);
8619 }
8620 
8621 static int
8622 mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
8623 				  struct mlx5_hlist_entry *entry,
8624 				  uint64_t key, void *cb_ctx __rte_unused)
8625 {
8626 	union tunnel_tbl_key tbl = {
8627 		.val = key,
8628 	};
8629 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
8630 
8631 	return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
8632 }
8633 
8634 static struct mlx5_hlist_entry *
8635 mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
8636 				   void *ctx __rte_unused)
8637 {
8638 	struct mlx5_dev_ctx_shared *sh = list->ctx;
8639 	struct tunnel_tbl_entry *tte;
8640 	union tunnel_tbl_key tbl = {
8641 		.val = key,
8642 	};
8643 
8644 	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
8645 			  sizeof(*tte), 0,
8646 			  SOCKET_ID_ANY);
8647 	if (!tte)
8648 		goto err;
8649 	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8650 			  &tte->flow_table);
8651 	if (tte->flow_table >= MLX5_MAX_TABLES) {
8652 		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
8653 			tte->flow_table);
8654 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8655 				tte->flow_table);
8656 		goto err;
8657 	} else if (!tte->flow_table) {
8658 		goto err;
8659 	}
8660 	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
8661 	tte->tunnel_id = tbl.tunnel_id;
8662 	tte->group = tbl.group;
8663 	return &tte->hash;
8664 err:
8665 	if (tte)
8666 		mlx5_free(tte);
8667 	return NULL;
8668 }
8669 
8670 static uint32_t
8671 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
8672 				const struct mlx5_flow_tunnel *tunnel,
8673 				uint32_t group, uint32_t *table,
8674 				struct rte_flow_error *error)
8675 {
8676 	struct mlx5_hlist_entry *he;
8677 	struct tunnel_tbl_entry *tte;
8678 	union tunnel_tbl_key key = {
8679 		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
8680 		.group = group
8681 	};
8682 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
8683 	struct mlx5_hlist *group_hash;
8684 
8685 	group_hash = tunnel ? tunnel->groups : thub->groups;
8686 	he = mlx5_hlist_register(group_hash, key.val, NULL);
8687 	if (!he)
8688 		return rte_flow_error_set(error, EINVAL,
8689 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
8690 					  NULL,
8691 					  "tunnel group index not supported");
8692 	tte = container_of(he, typeof(*tte), hash);
8693 	*table = tte->flow_table;
8694 	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
8695 		dev->data->port_id, key.tunnel_id, group, *table);
8696 	return 0;
8697 }
8698 
8699 static void
8700 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
8701 		      struct mlx5_flow_tunnel *tunnel)
8702 {
8703 	struct mlx5_priv *priv = dev->data->dev_private;
8704 	struct mlx5_indexed_pool *ipool;
8705 
8706 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
8707 		dev->data->port_id, tunnel->tunnel_id);
8708 	LIST_REMOVE(tunnel, chain);
8709 	mlx5_hlist_destroy(tunnel->groups);
8710 	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
8711 	mlx5_ipool_free(ipool, tunnel->tunnel_id);
8712 }
8713 
8714 static bool
8715 mlx5_access_tunnel_offload_db
8716 	(struct rte_eth_dev *dev,
8717 	 bool (*match)(struct rte_eth_dev *,
8718 		       struct mlx5_flow_tunnel *, const void *),
8719 	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
8720 	 void (*miss)(struct rte_eth_dev *, void *),
8721 	 void *ctx, bool lock_op)
8722 {
8723 	bool verdict = false;
8724 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
8725 	struct mlx5_flow_tunnel *tunnel;
8726 
8727 	rte_spinlock_lock(&thub->sl);
8728 	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
8729 		verdict = match(dev, tunnel, (const void *)ctx);
8730 		if (verdict)
8731 			break;
8732 	}
8733 	if (!lock_op)
8734 		rte_spinlock_unlock(&thub->sl);
8735 	if (verdict && hit)
8736 		hit(dev, tunnel, ctx);
8737 	if (!verdict && miss)
8738 		miss(dev, ctx);
8739 	if (lock_op)
8740 		rte_spinlock_unlock(&thub->sl);
8741 
8742 	return verdict;
8743 }
8744 
8745 struct tunnel_db_find_tunnel_id_ctx {
8746 	uint32_t tunnel_id;
8747 	struct mlx5_flow_tunnel *tunnel;
8748 };
8749 
8750 static bool
8751 find_tunnel_id_match(struct rte_eth_dev *dev,
8752 		     struct mlx5_flow_tunnel *tunnel, const void *x)
8753 {
8754 	const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
8755 
8756 	RTE_SET_USED(dev);
8757 	return tunnel->tunnel_id == ctx->tunnel_id;
8758 }
8759 
8760 static void
8761 find_tunnel_id_hit(struct rte_eth_dev *dev,
8762 		   struct mlx5_flow_tunnel *tunnel, void *x)
8763 {
8764 	struct tunnel_db_find_tunnel_id_ctx *ctx = x;
8765 	RTE_SET_USED(dev);
8766 	ctx->tunnel = tunnel;
8767 }
8768 
8769 static struct mlx5_flow_tunnel *
8770 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
8771 {
8772 	struct tunnel_db_find_tunnel_id_ctx ctx = {
8773 		.tunnel_id = id,
8774 	};
8775 
8776 	mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
8777 				      find_tunnel_id_hit, NULL, &ctx, true);
8778 
8779 	return ctx.tunnel;
8780 }
8781 
8782 static struct mlx5_flow_tunnel *
8783 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
8784 			  const struct rte_flow_tunnel *app_tunnel)
8785 {
8786 	struct mlx5_priv *priv = dev->data->dev_private;
8787 	struct mlx5_indexed_pool *ipool;
8788 	struct mlx5_flow_tunnel *tunnel;
8789 	uint32_t id;
8790 
8791 	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
8792 	tunnel = mlx5_ipool_zmalloc(ipool, &id);
8793 	if (!tunnel)
8794 		return NULL;
8795 	if (id >= MLX5_MAX_TUNNELS) {
8796 		mlx5_ipool_free(ipool, id);
8797 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
8798 		return NULL;
8799 	}
8800 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
8801 					   mlx5_flow_tunnel_grp2tbl_create_cb,
8802 					   mlx5_flow_tunnel_grp2tbl_match_cb,
8803 					   mlx5_flow_tunnel_grp2tbl_remove_cb);
8804 	if (!tunnel->groups) {
8805 		mlx5_ipool_free(ipool, id);
8806 		return NULL;
8807 	}
8808 	tunnel->groups->ctx = priv->sh;
8809 	/* initiate new PMD tunnel */
8810 	memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
8811 	tunnel->tunnel_id = id;
8812 	tunnel->action.type = (typeof(tunnel->action.type))
8813 			      MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
8814 	tunnel->action.conf = tunnel;
8815 	tunnel->item.type = (typeof(tunnel->item.type))
8816 			    MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
8817 	tunnel->item.spec = tunnel;
8818 	tunnel->item.last = NULL;
8819 	tunnel->item.mask = NULL;
8820 
8821 	DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
8822 		dev->data->port_id, tunnel->tunnel_id);
8823 
8824 	return tunnel;
8825 }
8826 
8827 struct tunnel_db_get_tunnel_ctx {
8828 	const struct rte_flow_tunnel *app_tunnel;
8829 	struct mlx5_flow_tunnel *tunnel;
8830 };
8831 
8832 static bool get_tunnel_match(struct rte_eth_dev *dev,
8833 			     struct mlx5_flow_tunnel *tunnel, const void *x)
8834 {
8835 	const struct tunnel_db_get_tunnel_ctx *ctx = x;
8836 
8837 	RTE_SET_USED(dev);
8838 	return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
8839 		       sizeof(*ctx->app_tunnel));
8840 }
8841 
8842 static void get_tunnel_hit(struct rte_eth_dev *dev,
8843 			   struct mlx5_flow_tunnel *tunnel, void *x)
8844 {
8845 	/* called under tunnel spinlock protection */
8846 	struct tunnel_db_get_tunnel_ctx *ctx = x;
8847 
8848 	RTE_SET_USED(dev);
8849 	tunnel->refctn++;
8850 	ctx->tunnel = tunnel;
8851 }
8852 
8853 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
8854 {
8855 	/* called under tunnel spinlock protection */
8856 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
8857 	struct tunnel_db_get_tunnel_ctx *ctx = x;
8858 
8859 	rte_spinlock_unlock(&thub->sl);
8860 	ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
8861 	rte_spinlock_lock(&thub->sl);
8862 	if (ctx->tunnel) {
8863 		ctx->tunnel->refctn = 1;
8864 		LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
8865 	}
8866 }
8867 
8868 
8869 static int
8870 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
8871 		     const struct rte_flow_tunnel *app_tunnel,
8872 		     struct mlx5_flow_tunnel **tunnel)
8873 {
8874 	struct tunnel_db_get_tunnel_ctx ctx = {
8875 		.app_tunnel = app_tunnel,
8876 	};
8877 
8878 	mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
8879 				      get_tunnel_miss, &ctx, true);
8880 	*tunnel = ctx.tunnel;
8881 	return ctx.tunnel ? 0 : -ENOMEM;
8882 }
8883 
8884 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
8885 {
8886 	struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8887 
8888 	if (!thub)
8889 		return;
8890 	if (!LIST_EMPTY(&thub->tunnels))
8891 		DRV_LOG(WARNING, "port %u tunnels present", port_id);
8892 	mlx5_hlist_destroy(thub->groups);
8893 	mlx5_free(thub);
8894 }
8895 
8896 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
8897 {
8898 	int err;
8899 	struct mlx5_flow_tunnel_hub *thub;
8900 
8901 	thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
8902 			   0, SOCKET_ID_ANY);
8903 	if (!thub)
8904 		return -ENOMEM;
8905 	LIST_INIT(&thub->tunnels);
8906 	rte_spinlock_init(&thub->sl);
8907 	thub->groups = mlx5_hlist_create("flow groups",
8908 					 rte_align32pow2(MLX5_MAX_TABLES), 0,
8909 					 0, mlx5_flow_tunnel_grp2tbl_create_cb,
8910 					 mlx5_flow_tunnel_grp2tbl_match_cb,
8911 					 mlx5_flow_tunnel_grp2tbl_remove_cb);
8912 	if (!thub->groups) {
8913 		err = -rte_errno;
8914 		goto err;
8915 	}
8916 	thub->groups->ctx = sh;
8917 	sh->tunnel_hub = thub;
8918 
8919 	return 0;
8920 
8921 err:
8922 	if (thub->groups)
8923 		mlx5_hlist_destroy(thub->groups);
8924 	if (thub)
8925 		mlx5_free(thub);
8926 	return err;
8927 }
8928 
8929 static inline bool
8930 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
8931 			  struct rte_flow_tunnel *tunnel,
8932 			  const char *err_msg)
8933 {
8934 	err_msg = NULL;
8935 	if (!is_tunnel_offload_active(dev)) {
8936 		err_msg = "tunnel offload was not activated";
8937 		goto out;
8938 	} else if (!tunnel) {
8939 		err_msg = "no application tunnel";
8940 		goto out;
8941 	}
8942 
8943 	switch (tunnel->type) {
8944 	default:
8945 		err_msg = "unsupported tunnel type";
8946 		goto out;
8947 	case RTE_FLOW_ITEM_TYPE_VXLAN:
8948 		break;
8949 	}
8950 
8951 out:
8952 	return !err_msg;
8953 }
8954 
8955 static int
8956 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
8957 		    struct rte_flow_tunnel *app_tunnel,
8958 		    struct rte_flow_action **actions,
8959 		    uint32_t *num_of_actions,
8960 		    struct rte_flow_error *error)
8961 {
8962 	int ret;
8963 	struct mlx5_flow_tunnel *tunnel;
8964 	const char *err_msg = NULL;
8965 	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
8966 
8967 	if (!verdict)
8968 		return rte_flow_error_set(error, EINVAL,
8969 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
8970 					  err_msg);
8971 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
8972 	if (ret < 0) {
8973 		return rte_flow_error_set(error, ret,
8974 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
8975 					  "failed to initialize pmd tunnel");
8976 	}
8977 	*actions = &tunnel->action;
8978 	*num_of_actions = 1;
8979 	return 0;
8980 }
8981 
8982 static int
8983 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
8984 		       struct rte_flow_tunnel *app_tunnel,
8985 		       struct rte_flow_item **items,
8986 		       uint32_t *num_of_items,
8987 		       struct rte_flow_error *error)
8988 {
8989 	int ret;
8990 	struct mlx5_flow_tunnel *tunnel;
8991 	const char *err_msg = NULL;
8992 	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
8993 
8994 	if (!verdict)
8995 		return rte_flow_error_set(error, EINVAL,
8996 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
8997 					  err_msg);
8998 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
8999 	if (ret < 0) {
9000 		return rte_flow_error_set(error, ret,
9001 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
9002 					  "failed to initialize pmd tunnel");
9003 	}
9004 	*items = &tunnel->item;
9005 	*num_of_items = 1;
9006 	return 0;
9007 }
9008 
9009 struct tunnel_db_element_release_ctx {
9010 	struct rte_flow_item *items;
9011 	struct rte_flow_action *actions;
9012 	uint32_t num_elements;
9013 	struct rte_flow_error *error;
9014 	int ret;
9015 };
9016 
9017 static bool
9018 tunnel_element_release_match(struct rte_eth_dev *dev,
9019 			     struct mlx5_flow_tunnel *tunnel, const void *x)
9020 {
9021 	const struct tunnel_db_element_release_ctx *ctx = x;
9022 
9023 	RTE_SET_USED(dev);
9024 	if (ctx->num_elements != 1)
9025 		return false;
9026 	else if (ctx->items)
9027 		return ctx->items == &tunnel->item;
9028 	else if (ctx->actions)
9029 		return ctx->actions == &tunnel->action;
9030 
9031 	return false;
9032 }
9033 
9034 static void
9035 tunnel_element_release_hit(struct rte_eth_dev *dev,
9036 			   struct mlx5_flow_tunnel *tunnel, void *x)
9037 {
9038 	struct tunnel_db_element_release_ctx *ctx = x;
9039 	ctx->ret = 0;
9040 	if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
9041 		mlx5_flow_tunnel_free(dev, tunnel);
9042 }
9043 
9044 static void
9045 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
9046 {
9047 	struct tunnel_db_element_release_ctx *ctx = x;
9048 	RTE_SET_USED(dev);
9049 	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
9050 				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
9051 				      "invalid argument");
9052 }
9053 
9054 static int
9055 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
9056 		       struct rte_flow_item *pmd_items,
9057 		       uint32_t num_items, struct rte_flow_error *err)
9058 {
9059 	struct tunnel_db_element_release_ctx ctx = {
9060 		.items = pmd_items,
9061 		.actions = NULL,
9062 		.num_elements = num_items,
9063 		.error = err,
9064 	};
9065 
9066 	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
9067 				      tunnel_element_release_hit,
9068 				      tunnel_element_release_miss, &ctx, false);
9069 
9070 	return ctx.ret;
9071 }
9072 
9073 static int
9074 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
9075 			 struct rte_flow_action *pmd_actions,
9076 			 uint32_t num_actions, struct rte_flow_error *err)
9077 {
9078 	struct tunnel_db_element_release_ctx ctx = {
9079 		.items = NULL,
9080 		.actions = pmd_actions,
9081 		.num_elements = num_actions,
9082 		.error = err,
9083 	};
9084 
9085 	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
9086 				      tunnel_element_release_hit,
9087 				      tunnel_element_release_miss, &ctx, false);
9088 
9089 	return ctx.ret;
9090 }
9091 
9092 static int
9093 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
9094 				  struct rte_mbuf *m,
9095 				  struct rte_flow_restore_info *info,
9096 				  struct rte_flow_error *err)
9097 {
9098 	uint64_t ol_flags = m->ol_flags;
9099 	const struct mlx5_flow_tbl_data_entry *tble;
9100 	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
9101 
9102 	if (!is_tunnel_offload_active(dev)) {
9103 		info->flags = 0;
9104 		return 0;
9105 	}
9106 
9107 	if ((ol_flags & mask) != mask)
9108 		goto err;
9109 	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
9110 	if (!tble) {
9111 		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
9112 			dev->data->port_id, m->hash.fdir.hi);
9113 		goto err;
9114 	}
9115 	MLX5_ASSERT(tble->tunnel);
9116 	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
9117 	info->group_id = tble->group_id;
9118 	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
9119 		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
9120 		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
9121 
9122 	return 0;
9123 
9124 err:
9125 	return rte_flow_error_set(err, EINVAL,
9126 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9127 				  "failed to get restore info");
9128 }
9129 
9130 #else /* HAVE_IBV_FLOW_DV_SUPPORT */
9131 static int
9132 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
9133 			   __rte_unused struct rte_flow_tunnel *app_tunnel,
9134 			   __rte_unused struct rte_flow_action **actions,
9135 			   __rte_unused uint32_t *num_of_actions,
9136 			   __rte_unused struct rte_flow_error *error)
9137 {
9138 	return -ENOTSUP;
9139 }
9140 
9141 static int
9142 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
9143 		       __rte_unused struct rte_flow_tunnel *app_tunnel,
9144 		       __rte_unused struct rte_flow_item **items,
9145 		       __rte_unused uint32_t *num_of_items,
9146 		       __rte_unused struct rte_flow_error *error)
9147 {
9148 	return -ENOTSUP;
9149 }
9150 
9151 static int
9152 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
9153 			      __rte_unused struct rte_flow_item *pmd_items,
9154 			      __rte_unused uint32_t num_items,
9155 			      __rte_unused struct rte_flow_error *err)
9156 {
9157 	return -ENOTSUP;
9158 }
9159 
9160 static int
9161 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
9162 				__rte_unused struct rte_flow_action *pmd_action,
9163 				__rte_unused uint32_t num_actions,
9164 				__rte_unused struct rte_flow_error *err)
9165 {
9166 	return -ENOTSUP;
9167 }
9168 
9169 static int
9170 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
9171 				  __rte_unused struct rte_mbuf *m,
9172 				  __rte_unused struct rte_flow_restore_info *i,
9173 				  __rte_unused struct rte_flow_error *err)
9174 {
9175 	return -ENOTSUP;
9176 }
9177 
9178 static int
9179 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
9180 			     __rte_unused struct rte_flow *flow,
9181 			     __rte_unused const struct rte_flow_attr *attr,
9182 			     __rte_unused const struct rte_flow_action *actions,
9183 			     __rte_unused uint32_t flow_idx,
9184 			     __rte_unused const struct mlx5_flow_tunnel *tunnel,
9185 			     __rte_unused struct tunnel_default_miss_ctx *ctx,
9186 			     __rte_unused struct rte_flow_error *error)
9187 {
9188 	return -ENOTSUP;
9189 }
9190 
9191 static struct mlx5_flow_tunnel *
9192 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
9193 		    __rte_unused uint32_t id)
9194 {
9195 	return NULL;
9196 }
9197 
9198 static void
9199 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
9200 		      __rte_unused struct mlx5_flow_tunnel *tunnel)
9201 {
9202 }
9203 
9204 static uint32_t
9205 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
9206 				__rte_unused const struct mlx5_flow_tunnel *t,
9207 				__rte_unused uint32_t group,
9208 				__rte_unused uint32_t *table,
9209 				struct rte_flow_error *error)
9210 {
9211 	return rte_flow_error_set(error, ENOTSUP,
9212 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9213 				  "tunnel offload requires DV support");
9214 }
9215 
9216 void
9217 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
9218 			__rte_unused  uint16_t port_id)
9219 {
9220 }
9221 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
9222 
9223 static void
9224 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
9225 {
9226 	int ret;
9227 	struct rte_flow_error error;
9228 
9229 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
9230 		char *item_name;
9231 		ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name,
9232 				    sizeof(item_name),
9233 				    (void *)(uintptr_t)item->type, &error);
9234 		if (ret > 0)
9235 			printf("%s ", item_name);
9236 		else
9237 			printf("%d\n", (int)item->type);
9238 	}
9239 	printf("END\n");
9240 }
9241