xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision d163fc2d15b6ebdb4f36178424e13a4a383b07c0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <stdbool.h>
12 
13 #include <rte_common.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_eal_paging.h>
17 #include <rte_flow.h>
18 #include <rte_cycles.h>
19 #include <rte_flow_driver.h>
20 #include <rte_malloc.h>
21 #include <rte_ip.h>
22 
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
25 #include <mlx5_prm.h>
26 #include <mlx5_malloc.h>
27 
28 #include "mlx5_defs.h"
29 #include "mlx5.h"
30 #include "mlx5_flow.h"
31 #include "mlx5_flow_os.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
35 
36 static struct mlx5_flow_tunnel *
37 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
38 static void
39 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
40 static const struct mlx5_flow_tbl_data_entry  *
41 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
42 static int
43 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
44 		     const struct rte_flow_tunnel *app_tunnel,
45 		     struct mlx5_flow_tunnel **tunnel);
46 
47 
48 /** Device flow drivers. */
49 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
50 
51 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
52 
53 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
54 	[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
55 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
56 	[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
57 #endif
58 	[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
59 	[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
60 };
61 
62 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
63 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
64 	(const int []){ \
65 		__VA_ARGS__, 0, \
66 	}
67 
68 /** Node object of input graph for mlx5_flow_expand_rss(). */
69 struct mlx5_flow_expand_node {
70 	const int *const next;
71 	/**<
72 	 * List of next node indexes. Index 0 is interpreted as a terminator.
73 	 */
74 	const enum rte_flow_item_type type;
75 	/**< Pattern item type of current node. */
76 	uint64_t rss_types;
77 	/**<
78 	 * RSS types bit-field associated with this node
79 	 * (see ETH_RSS_* definitions).
80 	 */
81 };
82 
83 /** Object returned by mlx5_flow_expand_rss(). */
84 struct mlx5_flow_expand_rss {
85 	uint32_t entries;
86 	/**< Number of entries @p patterns and @p priorities. */
87 	struct {
88 		struct rte_flow_item *pattern; /**< Expanded pattern array. */
89 		uint32_t priority; /**< Priority offset for each expansion. */
90 	} entry[];
91 };
92 
93 static enum rte_flow_item_type
94 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
95 {
96 	enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
97 	uint16_t ether_type = 0;
98 	uint16_t ether_type_m;
99 	uint8_t ip_next_proto = 0;
100 	uint8_t ip_next_proto_m;
101 
102 	if (item == NULL || item->spec == NULL)
103 		return ret;
104 	switch (item->type) {
105 	case RTE_FLOW_ITEM_TYPE_ETH:
106 		if (item->mask)
107 			ether_type_m = ((const struct rte_flow_item_eth *)
108 						(item->mask))->type;
109 		else
110 			ether_type_m = rte_flow_item_eth_mask.type;
111 		if (ether_type_m != RTE_BE16(0xFFFF))
112 			break;
113 		ether_type = ((const struct rte_flow_item_eth *)
114 				(item->spec))->type;
115 		if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
116 			ret = RTE_FLOW_ITEM_TYPE_IPV4;
117 		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
118 			ret = RTE_FLOW_ITEM_TYPE_IPV6;
119 		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
120 			ret = RTE_FLOW_ITEM_TYPE_VLAN;
121 		else
122 			ret = RTE_FLOW_ITEM_TYPE_END;
123 		break;
124 	case RTE_FLOW_ITEM_TYPE_VLAN:
125 		if (item->mask)
126 			ether_type_m = ((const struct rte_flow_item_vlan *)
127 						(item->mask))->inner_type;
128 		else
129 			ether_type_m = rte_flow_item_vlan_mask.inner_type;
130 		if (ether_type_m != RTE_BE16(0xFFFF))
131 			break;
132 		ether_type = ((const struct rte_flow_item_vlan *)
133 				(item->spec))->inner_type;
134 		if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
135 			ret = RTE_FLOW_ITEM_TYPE_IPV4;
136 		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
137 			ret = RTE_FLOW_ITEM_TYPE_IPV6;
138 		else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
139 			ret = RTE_FLOW_ITEM_TYPE_VLAN;
140 		else
141 			ret = RTE_FLOW_ITEM_TYPE_END;
142 		break;
143 	case RTE_FLOW_ITEM_TYPE_IPV4:
144 		if (item->mask)
145 			ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
146 					(item->mask))->hdr.next_proto_id;
147 		else
148 			ip_next_proto_m =
149 				rte_flow_item_ipv4_mask.hdr.next_proto_id;
150 		if (ip_next_proto_m != 0xFF)
151 			break;
152 		ip_next_proto = ((const struct rte_flow_item_ipv4 *)
153 				(item->spec))->hdr.next_proto_id;
154 		if (ip_next_proto == IPPROTO_UDP)
155 			ret = RTE_FLOW_ITEM_TYPE_UDP;
156 		else if (ip_next_proto == IPPROTO_TCP)
157 			ret = RTE_FLOW_ITEM_TYPE_TCP;
158 		else if (ip_next_proto == IPPROTO_IP)
159 			ret = RTE_FLOW_ITEM_TYPE_IPV4;
160 		else if (ip_next_proto == IPPROTO_IPV6)
161 			ret = RTE_FLOW_ITEM_TYPE_IPV6;
162 		else
163 			ret = RTE_FLOW_ITEM_TYPE_END;
164 		break;
165 	case RTE_FLOW_ITEM_TYPE_IPV6:
166 		if (item->mask)
167 			ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
168 						(item->mask))->hdr.proto;
169 		else
170 			ip_next_proto_m =
171 				rte_flow_item_ipv6_mask.hdr.proto;
172 		if (ip_next_proto_m != 0xFF)
173 			break;
174 		ip_next_proto = ((const struct rte_flow_item_ipv6 *)
175 				(item->spec))->hdr.proto;
176 		if (ip_next_proto == IPPROTO_UDP)
177 			ret = RTE_FLOW_ITEM_TYPE_UDP;
178 		else if (ip_next_proto == IPPROTO_TCP)
179 			ret = RTE_FLOW_ITEM_TYPE_TCP;
180 		else if (ip_next_proto == IPPROTO_IP)
181 			ret = RTE_FLOW_ITEM_TYPE_IPV4;
182 		else if (ip_next_proto == IPPROTO_IPV6)
183 			ret = RTE_FLOW_ITEM_TYPE_IPV6;
184 		else
185 			ret = RTE_FLOW_ITEM_TYPE_END;
186 		break;
187 	default:
188 		ret = RTE_FLOW_ITEM_TYPE_VOID;
189 		break;
190 	}
191 	return ret;
192 }
193 
194 /**
195  * Expand RSS flows into several possible flows according to the RSS hash
196  * fields requested and the driver capabilities.
197  *
198  * @param[out] buf
199  *   Buffer to store the result expansion.
200  * @param[in] size
201  *   Buffer size in bytes. If 0, @p buf can be NULL.
202  * @param[in] pattern
203  *   User flow pattern.
204  * @param[in] types
205  *   RSS types to expand (see ETH_RSS_* definitions).
206  * @param[in] graph
207  *   Input graph to expand @p pattern according to @p types.
208  * @param[in] graph_root_index
209  *   Index of root node in @p graph, typically 0.
210  *
211  * @return
212  *   A positive value representing the size of @p buf in bytes regardless of
213  *   @p size on success, a negative errno value otherwise and rte_errno is
214  *   set, the following errors are defined:
215  *
216  *   -E2BIG: graph-depth @p graph is too deep.
217  */
218 static int
219 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
220 		     const struct rte_flow_item *pattern, uint64_t types,
221 		     const struct mlx5_flow_expand_node graph[],
222 		     int graph_root_index)
223 {
224 	const int elt_n = 8;
225 	const struct rte_flow_item *item;
226 	const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
227 	const int *next_node;
228 	const int *stack[elt_n];
229 	int stack_pos = 0;
230 	struct rte_flow_item flow_items[elt_n];
231 	unsigned int i;
232 	size_t lsize;
233 	size_t user_pattern_size = 0;
234 	void *addr = NULL;
235 	const struct mlx5_flow_expand_node *next = NULL;
236 	struct rte_flow_item missed_item;
237 	int missed = 0;
238 	int elt = 0;
239 	const struct rte_flow_item *last_item = NULL;
240 
241 	memset(&missed_item, 0, sizeof(missed_item));
242 	lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
243 		elt_n * sizeof(buf->entry[0]);
244 	if (lsize <= size) {
245 		buf->entry[0].priority = 0;
246 		buf->entry[0].pattern = (void *)&buf->entry[elt_n];
247 		buf->entries = 0;
248 		addr = buf->entry[0].pattern;
249 	}
250 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
251 		if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
252 			last_item = item;
253 		for (i = 0; node->next && node->next[i]; ++i) {
254 			next = &graph[node->next[i]];
255 			if (next->type == item->type)
256 				break;
257 		}
258 		if (next)
259 			node = next;
260 		user_pattern_size += sizeof(*item);
261 	}
262 	user_pattern_size += sizeof(*item); /* Handle END item. */
263 	lsize += user_pattern_size;
264 	/* Copy the user pattern in the first entry of the buffer. */
265 	if (lsize <= size) {
266 		rte_memcpy(addr, pattern, user_pattern_size);
267 		addr = (void *)(((uintptr_t)addr) + user_pattern_size);
268 		buf->entries = 1;
269 	}
270 	/* Start expanding. */
271 	memset(flow_items, 0, sizeof(flow_items));
272 	user_pattern_size -= sizeof(*item);
273 	/*
274 	 * Check if the last valid item has spec set, need complete pattern,
275 	 * and the pattern can be used for expansion.
276 	 */
277 	missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
278 	if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
279 		/* Item type END indicates expansion is not required. */
280 		return lsize;
281 	}
282 	if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
283 		next = NULL;
284 		missed = 1;
285 		for (i = 0; node->next && node->next[i]; ++i) {
286 			next = &graph[node->next[i]];
287 			if (next->type == missed_item.type) {
288 				flow_items[0].type = missed_item.type;
289 				flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
290 				break;
291 			}
292 			next = NULL;
293 		}
294 	}
295 	if (next && missed) {
296 		elt = 2; /* missed item + item end. */
297 		node = next;
298 		lsize += elt * sizeof(*item) + user_pattern_size;
299 		if ((node->rss_types & types) && lsize <= size) {
300 			buf->entry[buf->entries].priority = 1;
301 			buf->entry[buf->entries].pattern = addr;
302 			buf->entries++;
303 			rte_memcpy(addr, buf->entry[0].pattern,
304 				   user_pattern_size);
305 			addr = (void *)(((uintptr_t)addr) + user_pattern_size);
306 			rte_memcpy(addr, flow_items, elt * sizeof(*item));
307 			addr = (void *)(((uintptr_t)addr) +
308 					elt * sizeof(*item));
309 		}
310 	}
311 	memset(flow_items, 0, sizeof(flow_items));
312 	next_node = node->next;
313 	stack[stack_pos] = next_node;
314 	node = next_node ? &graph[*next_node] : NULL;
315 	while (node) {
316 		flow_items[stack_pos].type = node->type;
317 		if (node->rss_types & types) {
318 			/*
319 			 * compute the number of items to copy from the
320 			 * expansion and copy it.
321 			 * When the stack_pos is 0, there are 1 element in it,
322 			 * plus the addition END item.
323 			 */
324 			elt = stack_pos + 2;
325 			flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
326 			lsize += elt * sizeof(*item) + user_pattern_size;
327 			if (lsize <= size) {
328 				size_t n = elt * sizeof(*item);
329 
330 				buf->entry[buf->entries].priority =
331 					stack_pos + 1 + missed;
332 				buf->entry[buf->entries].pattern = addr;
333 				buf->entries++;
334 				rte_memcpy(addr, buf->entry[0].pattern,
335 					   user_pattern_size);
336 				addr = (void *)(((uintptr_t)addr) +
337 						user_pattern_size);
338 				rte_memcpy(addr, &missed_item,
339 					   missed * sizeof(*item));
340 				addr = (void *)(((uintptr_t)addr) +
341 					missed * sizeof(*item));
342 				rte_memcpy(addr, flow_items, n);
343 				addr = (void *)(((uintptr_t)addr) + n);
344 			}
345 		}
346 		/* Go deeper. */
347 		if (node->next) {
348 			next_node = node->next;
349 			if (stack_pos++ == elt_n) {
350 				rte_errno = E2BIG;
351 				return -rte_errno;
352 			}
353 			stack[stack_pos] = next_node;
354 		} else if (*(next_node + 1)) {
355 			/* Follow up with the next possibility. */
356 			++next_node;
357 		} else {
358 			/* Move to the next path. */
359 			if (stack_pos)
360 				next_node = stack[--stack_pos];
361 			next_node++;
362 			stack[stack_pos] = next_node;
363 		}
364 		node = *next_node ? &graph[*next_node] : NULL;
365 	};
366 	/* no expanded flows but we have missed item, create one rule for it */
367 	if (buf->entries == 1 && missed != 0) {
368 		elt = 2;
369 		lsize += elt * sizeof(*item) + user_pattern_size;
370 		if (lsize <= size) {
371 			buf->entry[buf->entries].priority = 1;
372 			buf->entry[buf->entries].pattern = addr;
373 			buf->entries++;
374 			flow_items[0].type = missed_item.type;
375 			flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
376 			rte_memcpy(addr, buf->entry[0].pattern,
377 				   user_pattern_size);
378 			addr = (void *)(((uintptr_t)addr) + user_pattern_size);
379 			rte_memcpy(addr, flow_items, elt * sizeof(*item));
380 			addr = (void *)(((uintptr_t)addr) +
381 					elt * sizeof(*item));
382 		}
383 	}
384 	return lsize;
385 }
386 
387 enum mlx5_expansion {
388 	MLX5_EXPANSION_ROOT,
389 	MLX5_EXPANSION_ROOT_OUTER,
390 	MLX5_EXPANSION_ROOT_ETH_VLAN,
391 	MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
392 	MLX5_EXPANSION_OUTER_ETH,
393 	MLX5_EXPANSION_OUTER_ETH_VLAN,
394 	MLX5_EXPANSION_OUTER_VLAN,
395 	MLX5_EXPANSION_OUTER_IPV4,
396 	MLX5_EXPANSION_OUTER_IPV4_UDP,
397 	MLX5_EXPANSION_OUTER_IPV4_TCP,
398 	MLX5_EXPANSION_OUTER_IPV6,
399 	MLX5_EXPANSION_OUTER_IPV6_UDP,
400 	MLX5_EXPANSION_OUTER_IPV6_TCP,
401 	MLX5_EXPANSION_VXLAN,
402 	MLX5_EXPANSION_VXLAN_GPE,
403 	MLX5_EXPANSION_GRE,
404 	MLX5_EXPANSION_MPLS,
405 	MLX5_EXPANSION_ETH,
406 	MLX5_EXPANSION_ETH_VLAN,
407 	MLX5_EXPANSION_VLAN,
408 	MLX5_EXPANSION_IPV4,
409 	MLX5_EXPANSION_IPV4_UDP,
410 	MLX5_EXPANSION_IPV4_TCP,
411 	MLX5_EXPANSION_IPV6,
412 	MLX5_EXPANSION_IPV6_UDP,
413 	MLX5_EXPANSION_IPV6_TCP,
414 };
415 
416 /** Supported expansion of items. */
417 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
418 	[MLX5_EXPANSION_ROOT] = {
419 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
420 						  MLX5_EXPANSION_IPV4,
421 						  MLX5_EXPANSION_IPV6),
422 		.type = RTE_FLOW_ITEM_TYPE_END,
423 	},
424 	[MLX5_EXPANSION_ROOT_OUTER] = {
425 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
426 						  MLX5_EXPANSION_OUTER_IPV4,
427 						  MLX5_EXPANSION_OUTER_IPV6),
428 		.type = RTE_FLOW_ITEM_TYPE_END,
429 	},
430 	[MLX5_EXPANSION_ROOT_ETH_VLAN] = {
431 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
432 		.type = RTE_FLOW_ITEM_TYPE_END,
433 	},
434 	[MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
435 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
436 						(MLX5_EXPANSION_OUTER_ETH_VLAN),
437 		.type = RTE_FLOW_ITEM_TYPE_END,
438 	},
439 	[MLX5_EXPANSION_OUTER_ETH] = {
440 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
441 						  MLX5_EXPANSION_OUTER_IPV6,
442 						  MLX5_EXPANSION_MPLS),
443 		.type = RTE_FLOW_ITEM_TYPE_ETH,
444 		.rss_types = 0,
445 	},
446 	[MLX5_EXPANSION_OUTER_ETH_VLAN] = {
447 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
448 		.type = RTE_FLOW_ITEM_TYPE_ETH,
449 		.rss_types = 0,
450 	},
451 	[MLX5_EXPANSION_OUTER_VLAN] = {
452 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
453 						  MLX5_EXPANSION_OUTER_IPV6),
454 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
455 	},
456 	[MLX5_EXPANSION_OUTER_IPV4] = {
457 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
458 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
459 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
460 			 MLX5_EXPANSION_GRE,
461 			 MLX5_EXPANSION_IPV4,
462 			 MLX5_EXPANSION_IPV6),
463 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
464 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
465 			ETH_RSS_NONFRAG_IPV4_OTHER,
466 	},
467 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
468 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
469 						  MLX5_EXPANSION_VXLAN_GPE),
470 		.type = RTE_FLOW_ITEM_TYPE_UDP,
471 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
472 	},
473 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
474 		.type = RTE_FLOW_ITEM_TYPE_TCP,
475 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
476 	},
477 	[MLX5_EXPANSION_OUTER_IPV6] = {
478 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
479 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
480 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
481 			 MLX5_EXPANSION_IPV4,
482 			 MLX5_EXPANSION_IPV6),
483 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
484 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
485 			ETH_RSS_NONFRAG_IPV6_OTHER,
486 	},
487 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
488 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
489 						  MLX5_EXPANSION_VXLAN_GPE),
490 		.type = RTE_FLOW_ITEM_TYPE_UDP,
491 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
492 	},
493 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
494 		.type = RTE_FLOW_ITEM_TYPE_TCP,
495 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
496 	},
497 	[MLX5_EXPANSION_VXLAN] = {
498 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
499 						  MLX5_EXPANSION_IPV4,
500 						  MLX5_EXPANSION_IPV6),
501 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
502 	},
503 	[MLX5_EXPANSION_VXLAN_GPE] = {
504 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
505 						  MLX5_EXPANSION_IPV4,
506 						  MLX5_EXPANSION_IPV6),
507 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
508 	},
509 	[MLX5_EXPANSION_GRE] = {
510 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
511 		.type = RTE_FLOW_ITEM_TYPE_GRE,
512 	},
513 	[MLX5_EXPANSION_MPLS] = {
514 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
515 						  MLX5_EXPANSION_IPV6),
516 		.type = RTE_FLOW_ITEM_TYPE_MPLS,
517 	},
518 	[MLX5_EXPANSION_ETH] = {
519 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
520 						  MLX5_EXPANSION_IPV6),
521 		.type = RTE_FLOW_ITEM_TYPE_ETH,
522 	},
523 	[MLX5_EXPANSION_ETH_VLAN] = {
524 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
525 		.type = RTE_FLOW_ITEM_TYPE_ETH,
526 	},
527 	[MLX5_EXPANSION_VLAN] = {
528 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
529 						  MLX5_EXPANSION_IPV6),
530 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
531 	},
532 	[MLX5_EXPANSION_IPV4] = {
533 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
534 						  MLX5_EXPANSION_IPV4_TCP),
535 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
536 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
537 			ETH_RSS_NONFRAG_IPV4_OTHER,
538 	},
539 	[MLX5_EXPANSION_IPV4_UDP] = {
540 		.type = RTE_FLOW_ITEM_TYPE_UDP,
541 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
542 	},
543 	[MLX5_EXPANSION_IPV4_TCP] = {
544 		.type = RTE_FLOW_ITEM_TYPE_TCP,
545 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
546 	},
547 	[MLX5_EXPANSION_IPV6] = {
548 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
549 						  MLX5_EXPANSION_IPV6_TCP),
550 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
551 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
552 			ETH_RSS_NONFRAG_IPV6_OTHER,
553 	},
554 	[MLX5_EXPANSION_IPV6_UDP] = {
555 		.type = RTE_FLOW_ITEM_TYPE_UDP,
556 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
557 	},
558 	[MLX5_EXPANSION_IPV6_TCP] = {
559 		.type = RTE_FLOW_ITEM_TYPE_TCP,
560 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
561 	},
562 };
563 
564 static struct rte_flow_shared_action *
565 mlx5_shared_action_create(struct rte_eth_dev *dev,
566 			  const struct rte_flow_shared_action_conf *conf,
567 			  const struct rte_flow_action *action,
568 			  struct rte_flow_error *error);
569 static int mlx5_shared_action_destroy
570 				(struct rte_eth_dev *dev,
571 				 struct rte_flow_shared_action *shared_action,
572 				 struct rte_flow_error *error);
573 static int mlx5_shared_action_update
574 				(struct rte_eth_dev *dev,
575 				 struct rte_flow_shared_action *shared_action,
576 				 const struct rte_flow_action *action,
577 				 struct rte_flow_error *error);
578 static int mlx5_shared_action_query
579 				(struct rte_eth_dev *dev,
580 				 const struct rte_flow_shared_action *action,
581 				 void *data,
582 				 struct rte_flow_error *error);
583 static inline bool
584 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
585 			  struct rte_flow_tunnel *tunnel,
586 			  const char *err_msg)
587 {
588 	err_msg = NULL;
589 	if (!is_tunnel_offload_active(dev)) {
590 		err_msg = "tunnel offload was not activated";
591 		goto out;
592 	} else if (!tunnel) {
593 		err_msg = "no application tunnel";
594 		goto out;
595 	}
596 
597 	switch (tunnel->type) {
598 	default:
599 		err_msg = "unsupported tunnel type";
600 		goto out;
601 	case RTE_FLOW_ITEM_TYPE_VXLAN:
602 		break;
603 	}
604 
605 out:
606 	return !err_msg;
607 }
608 
609 
610 static int
611 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
612 		    struct rte_flow_tunnel *app_tunnel,
613 		    struct rte_flow_action **actions,
614 		    uint32_t *num_of_actions,
615 		    struct rte_flow_error *error)
616 {
617 	int ret;
618 	struct mlx5_flow_tunnel *tunnel;
619 	const char *err_msg = NULL;
620 	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
621 
622 	if (!verdict)
623 		return rte_flow_error_set(error, EINVAL,
624 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
625 					  err_msg);
626 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
627 	if (ret < 0) {
628 		return rte_flow_error_set(error, ret,
629 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
630 					  "failed to initialize pmd tunnel");
631 	}
632 	*actions = &tunnel->action;
633 	*num_of_actions = 1;
634 	return 0;
635 }
636 
637 static int
638 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
639 		       struct rte_flow_tunnel *app_tunnel,
640 		       struct rte_flow_item **items,
641 		       uint32_t *num_of_items,
642 		       struct rte_flow_error *error)
643 {
644 	int ret;
645 	struct mlx5_flow_tunnel *tunnel;
646 	const char *err_msg = NULL;
647 	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
648 
649 	if (!verdict)
650 		return rte_flow_error_set(error, EINVAL,
651 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
652 					  err_msg);
653 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
654 	if (ret < 0) {
655 		return rte_flow_error_set(error, ret,
656 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
657 					  "failed to initialize pmd tunnel");
658 	}
659 	*items = &tunnel->item;
660 	*num_of_items = 1;
661 	return 0;
662 }
663 
664 static int
665 mlx5_flow_item_release(struct rte_eth_dev *dev,
666 		       struct rte_flow_item *pmd_items,
667 		       uint32_t num_items, struct rte_flow_error *err)
668 {
669 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
670 	struct mlx5_flow_tunnel *tun;
671 
672 	LIST_FOREACH(tun, &thub->tunnels, chain) {
673 		if (&tun->item == pmd_items)
674 			break;
675 	}
676 	if (!tun || num_items != 1)
677 		return rte_flow_error_set(err, EINVAL,
678 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
679 					  "invalid argument");
680 	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
681 		mlx5_flow_tunnel_free(dev, tun);
682 	return 0;
683 }
684 
685 static int
686 mlx5_flow_action_release(struct rte_eth_dev *dev,
687 			 struct rte_flow_action *pmd_actions,
688 			 uint32_t num_actions, struct rte_flow_error *err)
689 {
690 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
691 	struct mlx5_flow_tunnel *tun;
692 
693 	LIST_FOREACH(tun, &thub->tunnels, chain) {
694 		if (&tun->action == pmd_actions)
695 			break;
696 	}
697 	if (!tun || num_actions != 1)
698 		return rte_flow_error_set(err, EINVAL,
699 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
700 					  "invalid argument");
701 	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
702 		mlx5_flow_tunnel_free(dev, tun);
703 
704 	return 0;
705 }
706 
707 static int
708 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
709 				  struct rte_mbuf *m,
710 				  struct rte_flow_restore_info *info,
711 				  struct rte_flow_error *err)
712 {
713 	uint64_t ol_flags = m->ol_flags;
714 	const struct mlx5_flow_tbl_data_entry *tble;
715 	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
716 
717 	if ((ol_flags & mask) != mask)
718 		goto err;
719 	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
720 	if (!tble) {
721 		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
722 			dev->data->port_id, m->hash.fdir.hi);
723 		goto err;
724 	}
725 	MLX5_ASSERT(tble->tunnel);
726 	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
727 	info->group_id = tble->group_id;
728 	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
729 		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
730 		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
731 
732 	return 0;
733 
734 err:
735 	return rte_flow_error_set(err, EINVAL,
736 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
737 				  "failed to get restore info");
738 }
739 
740 static const struct rte_flow_ops mlx5_flow_ops = {
741 	.validate = mlx5_flow_validate,
742 	.create = mlx5_flow_create,
743 	.destroy = mlx5_flow_destroy,
744 	.flush = mlx5_flow_flush,
745 	.isolate = mlx5_flow_isolate,
746 	.query = mlx5_flow_query,
747 	.dev_dump = mlx5_flow_dev_dump,
748 	.get_aged_flows = mlx5_flow_get_aged_flows,
749 	.shared_action_create = mlx5_shared_action_create,
750 	.shared_action_destroy = mlx5_shared_action_destroy,
751 	.shared_action_update = mlx5_shared_action_update,
752 	.shared_action_query = mlx5_shared_action_query,
753 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
754 	.tunnel_match = mlx5_flow_tunnel_match,
755 	.tunnel_action_decap_release = mlx5_flow_action_release,
756 	.tunnel_item_release = mlx5_flow_item_release,
757 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
758 };
759 
760 /* Convert FDIR request to Generic flow. */
761 struct mlx5_fdir {
762 	struct rte_flow_attr attr;
763 	struct rte_flow_item items[4];
764 	struct rte_flow_item_eth l2;
765 	struct rte_flow_item_eth l2_mask;
766 	union {
767 		struct rte_flow_item_ipv4 ipv4;
768 		struct rte_flow_item_ipv6 ipv6;
769 	} l3;
770 	union {
771 		struct rte_flow_item_ipv4 ipv4;
772 		struct rte_flow_item_ipv6 ipv6;
773 	} l3_mask;
774 	union {
775 		struct rte_flow_item_udp udp;
776 		struct rte_flow_item_tcp tcp;
777 	} l4;
778 	union {
779 		struct rte_flow_item_udp udp;
780 		struct rte_flow_item_tcp tcp;
781 	} l4_mask;
782 	struct rte_flow_action actions[2];
783 	struct rte_flow_action_queue queue;
784 };
785 
786 /* Tunnel information. */
787 struct mlx5_flow_tunnel_info {
788 	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
789 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
790 };
791 
792 static struct mlx5_flow_tunnel_info tunnels_info[] = {
793 	{
794 		.tunnel = MLX5_FLOW_LAYER_VXLAN,
795 		.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
796 	},
797 	{
798 		.tunnel = MLX5_FLOW_LAYER_GENEVE,
799 		.ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
800 	},
801 	{
802 		.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
803 		.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
804 	},
805 	{
806 		.tunnel = MLX5_FLOW_LAYER_GRE,
807 		.ptype = RTE_PTYPE_TUNNEL_GRE,
808 	},
809 	{
810 		.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
811 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
812 	},
813 	{
814 		.tunnel = MLX5_FLOW_LAYER_MPLS,
815 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
816 	},
817 	{
818 		.tunnel = MLX5_FLOW_LAYER_NVGRE,
819 		.ptype = RTE_PTYPE_TUNNEL_NVGRE,
820 	},
821 	{
822 		.tunnel = MLX5_FLOW_LAYER_IPIP,
823 		.ptype = RTE_PTYPE_TUNNEL_IP,
824 	},
825 	{
826 		.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
827 		.ptype = RTE_PTYPE_TUNNEL_IP,
828 	},
829 	{
830 		.tunnel = MLX5_FLOW_LAYER_GTP,
831 		.ptype = RTE_PTYPE_TUNNEL_GTPU,
832 	},
833 };
834 
835 /* Key of thread specific flow workspace data. */
836 static pthread_key_t key_workspace;
837 
838 /* Thread specific flow workspace data once initialization data. */
839 static pthread_once_t key_workspace_init;
840 
841 
842 /**
843  * Translate tag ID to register.
844  *
845  * @param[in] dev
846  *   Pointer to the Ethernet device structure.
847  * @param[in] feature
848  *   The feature that request the register.
849  * @param[in] id
850  *   The request register ID.
851  * @param[out] error
852  *   Error description in case of any.
853  *
854  * @return
855  *   The request register on success, a negative errno
856  *   value otherwise and rte_errno is set.
857  */
858 int
859 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
860 		     enum mlx5_feature_name feature,
861 		     uint32_t id,
862 		     struct rte_flow_error *error)
863 {
864 	struct mlx5_priv *priv = dev->data->dev_private;
865 	struct mlx5_dev_config *config = &priv->config;
866 	enum modify_reg start_reg;
867 	bool skip_mtr_reg = false;
868 
869 	switch (feature) {
870 	case MLX5_HAIRPIN_RX:
871 		return REG_B;
872 	case MLX5_HAIRPIN_TX:
873 		return REG_A;
874 	case MLX5_METADATA_RX:
875 		switch (config->dv_xmeta_en) {
876 		case MLX5_XMETA_MODE_LEGACY:
877 			return REG_B;
878 		case MLX5_XMETA_MODE_META16:
879 			return REG_C_0;
880 		case MLX5_XMETA_MODE_META32:
881 			return REG_C_1;
882 		}
883 		break;
884 	case MLX5_METADATA_TX:
885 		return REG_A;
886 	case MLX5_METADATA_FDB:
887 		switch (config->dv_xmeta_en) {
888 		case MLX5_XMETA_MODE_LEGACY:
889 			return REG_NON;
890 		case MLX5_XMETA_MODE_META16:
891 			return REG_C_0;
892 		case MLX5_XMETA_MODE_META32:
893 			return REG_C_1;
894 		}
895 		break;
896 	case MLX5_FLOW_MARK:
897 		switch (config->dv_xmeta_en) {
898 		case MLX5_XMETA_MODE_LEGACY:
899 			return REG_NON;
900 		case MLX5_XMETA_MODE_META16:
901 			return REG_C_1;
902 		case MLX5_XMETA_MODE_META32:
903 			return REG_C_0;
904 		}
905 		break;
906 	case MLX5_MTR_SFX:
907 		/*
908 		 * If meter color and flow match share one register, flow match
909 		 * should use the meter color register for match.
910 		 */
911 		if (priv->mtr_reg_share)
912 			return priv->mtr_color_reg;
913 		else
914 			return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
915 			       REG_C_3;
916 	case MLX5_MTR_COLOR:
917 		MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
918 		return priv->mtr_color_reg;
919 	case MLX5_COPY_MARK:
920 		/*
921 		 * Metadata COPY_MARK register using is in meter suffix sub
922 		 * flow while with meter. It's safe to share the same register.
923 		 */
924 		return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
925 	case MLX5_APP_TAG:
926 		/*
927 		 * If meter is enable, it will engage the register for color
928 		 * match and flow match. If meter color match is not using the
929 		 * REG_C_2, need to skip the REG_C_x be used by meter color
930 		 * match.
931 		 * If meter is disable, free to use all available registers.
932 		 */
933 		start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
934 			    (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
935 		skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
936 		if (id > (REG_C_7 - start_reg))
937 			return rte_flow_error_set(error, EINVAL,
938 						  RTE_FLOW_ERROR_TYPE_ITEM,
939 						  NULL, "invalid tag id");
940 		if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
941 			return rte_flow_error_set(error, ENOTSUP,
942 						  RTE_FLOW_ERROR_TYPE_ITEM,
943 						  NULL, "unsupported tag id");
944 		/*
945 		 * This case means meter is using the REG_C_x great than 2.
946 		 * Take care not to conflict with meter color REG_C_x.
947 		 * If the available index REG_C_y >= REG_C_x, skip the
948 		 * color register.
949 		 */
950 		if (skip_mtr_reg && config->flow_mreg_c
951 		    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
952 			if (id >= (REG_C_7 - start_reg))
953 				return rte_flow_error_set(error, EINVAL,
954 						       RTE_FLOW_ERROR_TYPE_ITEM,
955 							NULL, "invalid tag id");
956 			if (config->flow_mreg_c
957 			    [id + 1 + start_reg - REG_C_0] != REG_NON)
958 				return config->flow_mreg_c
959 					       [id + 1 + start_reg - REG_C_0];
960 			return rte_flow_error_set(error, ENOTSUP,
961 						  RTE_FLOW_ERROR_TYPE_ITEM,
962 						  NULL, "unsupported tag id");
963 		}
964 		return config->flow_mreg_c[id + start_reg - REG_C_0];
965 	}
966 	MLX5_ASSERT(false);
967 	return rte_flow_error_set(error, EINVAL,
968 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
969 				  NULL, "invalid feature name");
970 }
971 
972 /**
973  * Check extensive flow metadata register support.
974  *
975  * @param dev
976  *   Pointer to rte_eth_dev structure.
977  *
978  * @return
979  *   True if device supports extensive flow metadata register, otherwise false.
980  */
981 bool
982 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
983 {
984 	struct mlx5_priv *priv = dev->data->dev_private;
985 	struct mlx5_dev_config *config = &priv->config;
986 
987 	/*
988 	 * Having available reg_c can be regarded inclusively as supporting
989 	 * extensive flow metadata register, which could mean,
990 	 * - metadata register copy action by modify header.
991 	 * - 16 modify header actions is supported.
992 	 * - reg_c's are preserved across different domain (FDB and NIC) on
993 	 *   packet loopback by flow lookup miss.
994 	 */
995 	return config->flow_mreg_c[2] != REG_NON;
996 }
997 
998 /**
999  * Verify the @p item specifications (spec, last, mask) are compatible with the
1000  * NIC capabilities.
1001  *
1002  * @param[in] item
1003  *   Item specification.
1004  * @param[in] mask
1005  *   @p item->mask or flow default bit-masks.
1006  * @param[in] nic_mask
1007  *   Bit-masks covering supported fields by the NIC to compare with user mask.
1008  * @param[in] size
1009  *   Bit-masks size in bytes.
1010  * @param[in] range_accepted
1011  *   True if range of values is accepted for specific fields, false otherwise.
1012  * @param[out] error
1013  *   Pointer to error structure.
1014  *
1015  * @return
1016  *   0 on success, a negative errno value otherwise and rte_errno is set.
1017  */
1018 int
1019 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
1020 			  const uint8_t *mask,
1021 			  const uint8_t *nic_mask,
1022 			  unsigned int size,
1023 			  bool range_accepted,
1024 			  struct rte_flow_error *error)
1025 {
1026 	unsigned int i;
1027 
1028 	MLX5_ASSERT(nic_mask);
1029 	for (i = 0; i < size; ++i)
1030 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
1031 			return rte_flow_error_set(error, ENOTSUP,
1032 						  RTE_FLOW_ERROR_TYPE_ITEM,
1033 						  item,
1034 						  "mask enables non supported"
1035 						  " bits");
1036 	if (!item->spec && (item->mask || item->last))
1037 		return rte_flow_error_set(error, EINVAL,
1038 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1039 					  "mask/last without a spec is not"
1040 					  " supported");
1041 	if (item->spec && item->last && !range_accepted) {
1042 		uint8_t spec[size];
1043 		uint8_t last[size];
1044 		unsigned int i;
1045 		int ret;
1046 
1047 		for (i = 0; i < size; ++i) {
1048 			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
1049 			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
1050 		}
1051 		ret = memcmp(spec, last, size);
1052 		if (ret != 0)
1053 			return rte_flow_error_set(error, EINVAL,
1054 						  RTE_FLOW_ERROR_TYPE_ITEM,
1055 						  item,
1056 						  "range is not valid");
1057 	}
1058 	return 0;
1059 }
1060 
1061 /**
1062  * Adjust the hash fields according to the @p flow information.
1063  *
1064  * @param[in] dev_flow.
1065  *   Pointer to the mlx5_flow.
1066  * @param[in] tunnel
1067  *   1 when the hash field is for a tunnel item.
1068  * @param[in] layer_types
1069  *   ETH_RSS_* types.
1070  * @param[in] hash_fields
1071  *   Item hash fields.
1072  *
1073  * @return
1074  *   The hash fields that should be used.
1075  */
1076 uint64_t
1077 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
1078 			    int tunnel __rte_unused, uint64_t layer_types,
1079 			    uint64_t hash_fields)
1080 {
1081 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1082 	int rss_request_inner = rss_desc->level >= 2;
1083 
1084 	/* Check RSS hash level for tunnel. */
1085 	if (tunnel && rss_request_inner)
1086 		hash_fields |= IBV_RX_HASH_INNER;
1087 	else if (tunnel || rss_request_inner)
1088 		return 0;
1089 #endif
1090 	/* Check if requested layer matches RSS hash fields. */
1091 	if (!(rss_desc->types & layer_types))
1092 		return 0;
1093 	return hash_fields;
1094 }
1095 
1096 /**
1097  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
1098  * if several tunnel rules are used on this queue, the tunnel ptype will be
1099  * cleared.
1100  *
1101  * @param rxq_ctrl
1102  *   Rx queue to update.
1103  */
1104 static void
1105 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
1106 {
1107 	unsigned int i;
1108 	uint32_t tunnel_ptype = 0;
1109 
1110 	/* Look up for the ptype to use. */
1111 	for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
1112 		if (!rxq_ctrl->flow_tunnels_n[i])
1113 			continue;
1114 		if (!tunnel_ptype) {
1115 			tunnel_ptype = tunnels_info[i].ptype;
1116 		} else {
1117 			tunnel_ptype = 0;
1118 			break;
1119 		}
1120 	}
1121 	rxq_ctrl->rxq.tunnel = tunnel_ptype;
1122 }
1123 
1124 /**
1125  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
1126  * flow.
1127  *
1128  * @param[in] dev
1129  *   Pointer to the Ethernet device structure.
1130  * @param[in] dev_handle
1131  *   Pointer to device flow handle structure.
1132  */
1133 static void
1134 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
1135 		       struct mlx5_flow_handle *dev_handle)
1136 {
1137 	struct mlx5_priv *priv = dev->data->dev_private;
1138 	const int mark = dev_handle->mark;
1139 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1140 	struct mlx5_hrxq *hrxq;
1141 	unsigned int i;
1142 
1143 	if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
1144 		return;
1145 	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1146 			      dev_handle->rix_hrxq);
1147 	if (!hrxq)
1148 		return;
1149 	for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
1150 		int idx = hrxq->ind_table->queues[i];
1151 		struct mlx5_rxq_ctrl *rxq_ctrl =
1152 			container_of((*priv->rxqs)[idx],
1153 				     struct mlx5_rxq_ctrl, rxq);
1154 
1155 		/*
1156 		 * To support metadata register copy on Tx loopback,
1157 		 * this must be always enabled (metadata may arive
1158 		 * from other port - not from local flows only.
1159 		 */
1160 		if (priv->config.dv_flow_en &&
1161 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1162 		    mlx5_flow_ext_mreg_supported(dev)) {
1163 			rxq_ctrl->rxq.mark = 1;
1164 			rxq_ctrl->flow_mark_n = 1;
1165 		} else if (mark) {
1166 			rxq_ctrl->rxq.mark = 1;
1167 			rxq_ctrl->flow_mark_n++;
1168 		}
1169 		if (tunnel) {
1170 			unsigned int j;
1171 
1172 			/* Increase the counter matching the flow. */
1173 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1174 				if ((tunnels_info[j].tunnel &
1175 				     dev_handle->layers) ==
1176 				    tunnels_info[j].tunnel) {
1177 					rxq_ctrl->flow_tunnels_n[j]++;
1178 					break;
1179 				}
1180 			}
1181 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
1182 		}
1183 	}
1184 }
1185 
1186 /**
1187  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1188  *
1189  * @param[in] dev
1190  *   Pointer to the Ethernet device structure.
1191  * @param[in] flow
1192  *   Pointer to flow structure.
1193  */
1194 static void
1195 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1196 {
1197 	struct mlx5_priv *priv = dev->data->dev_private;
1198 	uint32_t handle_idx;
1199 	struct mlx5_flow_handle *dev_handle;
1200 
1201 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1202 		       handle_idx, dev_handle, next)
1203 		flow_drv_rxq_flags_set(dev, dev_handle);
1204 }
1205 
1206 /**
1207  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1208  * device flow if no other flow uses it with the same kind of request.
1209  *
1210  * @param dev
1211  *   Pointer to Ethernet device.
1212  * @param[in] dev_handle
1213  *   Pointer to the device flow handle structure.
1214  */
1215 static void
1216 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1217 			struct mlx5_flow_handle *dev_handle)
1218 {
1219 	struct mlx5_priv *priv = dev->data->dev_private;
1220 	const int mark = dev_handle->mark;
1221 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1222 	struct mlx5_hrxq *hrxq;
1223 	unsigned int i;
1224 
1225 	if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
1226 		return;
1227 	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1228 			      dev_handle->rix_hrxq);
1229 	if (!hrxq)
1230 		return;
1231 	MLX5_ASSERT(dev->data->dev_started);
1232 	for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
1233 		int idx = hrxq->ind_table->queues[i];
1234 		struct mlx5_rxq_ctrl *rxq_ctrl =
1235 			container_of((*priv->rxqs)[idx],
1236 				     struct mlx5_rxq_ctrl, rxq);
1237 
1238 		if (priv->config.dv_flow_en &&
1239 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1240 		    mlx5_flow_ext_mreg_supported(dev)) {
1241 			rxq_ctrl->rxq.mark = 1;
1242 			rxq_ctrl->flow_mark_n = 1;
1243 		} else if (mark) {
1244 			rxq_ctrl->flow_mark_n--;
1245 			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
1246 		}
1247 		if (tunnel) {
1248 			unsigned int j;
1249 
1250 			/* Decrease the counter matching the flow. */
1251 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1252 				if ((tunnels_info[j].tunnel &
1253 				     dev_handle->layers) ==
1254 				    tunnels_info[j].tunnel) {
1255 					rxq_ctrl->flow_tunnels_n[j]--;
1256 					break;
1257 				}
1258 			}
1259 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
1260 		}
1261 	}
1262 }
1263 
1264 /**
1265  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1266  * @p flow if no other flow uses it with the same kind of request.
1267  *
1268  * @param dev
1269  *   Pointer to Ethernet device.
1270  * @param[in] flow
1271  *   Pointer to the flow.
1272  */
1273 static void
1274 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1275 {
1276 	struct mlx5_priv *priv = dev->data->dev_private;
1277 	uint32_t handle_idx;
1278 	struct mlx5_flow_handle *dev_handle;
1279 
1280 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1281 		       handle_idx, dev_handle, next)
1282 		flow_drv_rxq_flags_trim(dev, dev_handle);
1283 }
1284 
1285 /**
1286  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1287  *
1288  * @param dev
1289  *   Pointer to Ethernet device.
1290  */
1291 static void
1292 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1293 {
1294 	struct mlx5_priv *priv = dev->data->dev_private;
1295 	unsigned int i;
1296 
1297 	for (i = 0; i != priv->rxqs_n; ++i) {
1298 		struct mlx5_rxq_ctrl *rxq_ctrl;
1299 		unsigned int j;
1300 
1301 		if (!(*priv->rxqs)[i])
1302 			continue;
1303 		rxq_ctrl = container_of((*priv->rxqs)[i],
1304 					struct mlx5_rxq_ctrl, rxq);
1305 		rxq_ctrl->flow_mark_n = 0;
1306 		rxq_ctrl->rxq.mark = 0;
1307 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1308 			rxq_ctrl->flow_tunnels_n[j] = 0;
1309 		rxq_ctrl->rxq.tunnel = 0;
1310 	}
1311 }
1312 
1313 /**
1314  * Set the Rx queue dynamic metadata (mask and offset) for a flow
1315  *
1316  * @param[in] dev
1317  *   Pointer to the Ethernet device structure.
1318  */
1319 void
1320 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
1321 {
1322 	struct mlx5_priv *priv = dev->data->dev_private;
1323 	struct mlx5_rxq_data *data;
1324 	unsigned int i;
1325 
1326 	for (i = 0; i != priv->rxqs_n; ++i) {
1327 		if (!(*priv->rxqs)[i])
1328 			continue;
1329 		data = (*priv->rxqs)[i];
1330 		if (!rte_flow_dynf_metadata_avail()) {
1331 			data->dynf_meta = 0;
1332 			data->flow_meta_mask = 0;
1333 			data->flow_meta_offset = -1;
1334 		} else {
1335 			data->dynf_meta = 1;
1336 			data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1337 			data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1338 		}
1339 	}
1340 }
1341 
1342 /*
1343  * return a pointer to the desired action in the list of actions.
1344  *
1345  * @param[in] actions
1346  *   The list of actions to search the action in.
1347  * @param[in] action
1348  *   The action to find.
1349  *
1350  * @return
1351  *   Pointer to the action in the list, if found. NULL otherwise.
1352  */
1353 const struct rte_flow_action *
1354 mlx5_flow_find_action(const struct rte_flow_action *actions,
1355 		      enum rte_flow_action_type action)
1356 {
1357 	if (actions == NULL)
1358 		return NULL;
1359 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1360 		if (actions->type == action)
1361 			return actions;
1362 	return NULL;
1363 }
1364 
1365 /*
1366  * Validate the flag action.
1367  *
1368  * @param[in] action_flags
1369  *   Bit-fields that holds the actions detected until now.
1370  * @param[in] attr
1371  *   Attributes of flow that includes this action.
1372  * @param[out] error
1373  *   Pointer to error structure.
1374  *
1375  * @return
1376  *   0 on success, a negative errno value otherwise and rte_errno is set.
1377  */
1378 int
1379 mlx5_flow_validate_action_flag(uint64_t action_flags,
1380 			       const struct rte_flow_attr *attr,
1381 			       struct rte_flow_error *error)
1382 {
1383 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1384 		return rte_flow_error_set(error, EINVAL,
1385 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1386 					  "can't mark and flag in same flow");
1387 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1388 		return rte_flow_error_set(error, EINVAL,
1389 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1390 					  "can't have 2 flag"
1391 					  " actions in same flow");
1392 	if (attr->egress)
1393 		return rte_flow_error_set(error, ENOTSUP,
1394 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1395 					  "flag action not supported for "
1396 					  "egress");
1397 	return 0;
1398 }
1399 
1400 /*
1401  * Validate the mark action.
1402  *
1403  * @param[in] action
1404  *   Pointer to the queue action.
1405  * @param[in] action_flags
1406  *   Bit-fields that holds the actions detected until now.
1407  * @param[in] attr
1408  *   Attributes of flow that includes this action.
1409  * @param[out] error
1410  *   Pointer to error structure.
1411  *
1412  * @return
1413  *   0 on success, a negative errno value otherwise and rte_errno is set.
1414  */
1415 int
1416 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1417 			       uint64_t action_flags,
1418 			       const struct rte_flow_attr *attr,
1419 			       struct rte_flow_error *error)
1420 {
1421 	const struct rte_flow_action_mark *mark = action->conf;
1422 
1423 	if (!mark)
1424 		return rte_flow_error_set(error, EINVAL,
1425 					  RTE_FLOW_ERROR_TYPE_ACTION,
1426 					  action,
1427 					  "configuration cannot be null");
1428 	if (mark->id >= MLX5_FLOW_MARK_MAX)
1429 		return rte_flow_error_set(error, EINVAL,
1430 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1431 					  &mark->id,
1432 					  "mark id must in 0 <= id < "
1433 					  RTE_STR(MLX5_FLOW_MARK_MAX));
1434 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1435 		return rte_flow_error_set(error, EINVAL,
1436 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1437 					  "can't flag and mark in same flow");
1438 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1439 		return rte_flow_error_set(error, EINVAL,
1440 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1441 					  "can't have 2 mark actions in same"
1442 					  " flow");
1443 	if (attr->egress)
1444 		return rte_flow_error_set(error, ENOTSUP,
1445 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1446 					  "mark action not supported for "
1447 					  "egress");
1448 	return 0;
1449 }
1450 
1451 /*
1452  * Validate the drop action.
1453  *
1454  * @param[in] action_flags
1455  *   Bit-fields that holds the actions detected until now.
1456  * @param[in] attr
1457  *   Attributes of flow that includes this action.
1458  * @param[out] error
1459  *   Pointer to error structure.
1460  *
1461  * @return
1462  *   0 on success, a negative errno value otherwise and rte_errno is set.
1463  */
1464 int
1465 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1466 			       const struct rte_flow_attr *attr,
1467 			       struct rte_flow_error *error)
1468 {
1469 	if (attr->egress)
1470 		return rte_flow_error_set(error, ENOTSUP,
1471 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1472 					  "drop action not supported for "
1473 					  "egress");
1474 	return 0;
1475 }
1476 
1477 /*
1478  * Validate the queue action.
1479  *
1480  * @param[in] action
1481  *   Pointer to the queue action.
1482  * @param[in] action_flags
1483  *   Bit-fields that holds the actions detected until now.
1484  * @param[in] dev
1485  *   Pointer to the Ethernet device structure.
1486  * @param[in] attr
1487  *   Attributes of flow that includes this action.
1488  * @param[out] error
1489  *   Pointer to error structure.
1490  *
1491  * @return
1492  *   0 on success, a negative errno value otherwise and rte_errno is set.
1493  */
1494 int
1495 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1496 				uint64_t action_flags,
1497 				struct rte_eth_dev *dev,
1498 				const struct rte_flow_attr *attr,
1499 				struct rte_flow_error *error)
1500 {
1501 	struct mlx5_priv *priv = dev->data->dev_private;
1502 	const struct rte_flow_action_queue *queue = action->conf;
1503 
1504 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1505 		return rte_flow_error_set(error, EINVAL,
1506 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1507 					  "can't have 2 fate actions in"
1508 					  " same flow");
1509 	if (!priv->rxqs_n)
1510 		return rte_flow_error_set(error, EINVAL,
1511 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1512 					  NULL, "No Rx queues configured");
1513 	if (queue->index >= priv->rxqs_n)
1514 		return rte_flow_error_set(error, EINVAL,
1515 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1516 					  &queue->index,
1517 					  "queue index out of range");
1518 	if (!(*priv->rxqs)[queue->index])
1519 		return rte_flow_error_set(error, EINVAL,
1520 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1521 					  &queue->index,
1522 					  "queue is not configured");
1523 	if (attr->egress)
1524 		return rte_flow_error_set(error, ENOTSUP,
1525 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1526 					  "queue action not supported for "
1527 					  "egress");
1528 	return 0;
1529 }
1530 
1531 /*
1532  * Validate the rss action.
1533  *
1534  * @param[in] dev
1535  *   Pointer to the Ethernet device structure.
1536  * @param[in] action
1537  *   Pointer to the queue action.
1538  * @param[out] error
1539  *   Pointer to error structure.
1540  *
1541  * @return
1542  *   0 on success, a negative errno value otherwise and rte_errno is set.
1543  */
1544 int
1545 mlx5_validate_action_rss(struct rte_eth_dev *dev,
1546 			 const struct rte_flow_action *action,
1547 			 struct rte_flow_error *error)
1548 {
1549 	struct mlx5_priv *priv = dev->data->dev_private;
1550 	const struct rte_flow_action_rss *rss = action->conf;
1551 	unsigned int i;
1552 
1553 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1554 	    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1555 		return rte_flow_error_set(error, ENOTSUP,
1556 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1557 					  &rss->func,
1558 					  "RSS hash function not supported");
1559 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1560 	if (rss->level > 2)
1561 #else
1562 	if (rss->level > 1)
1563 #endif
1564 		return rte_flow_error_set(error, ENOTSUP,
1565 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1566 					  &rss->level,
1567 					  "tunnel RSS is not supported");
1568 	/* allow RSS key_len 0 in case of NULL (default) RSS key. */
1569 	if (rss->key_len == 0 && rss->key != NULL)
1570 		return rte_flow_error_set(error, ENOTSUP,
1571 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1572 					  &rss->key_len,
1573 					  "RSS hash key length 0");
1574 	if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1575 		return rte_flow_error_set(error, ENOTSUP,
1576 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1577 					  &rss->key_len,
1578 					  "RSS hash key too small");
1579 	if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1580 		return rte_flow_error_set(error, ENOTSUP,
1581 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1582 					  &rss->key_len,
1583 					  "RSS hash key too large");
1584 	if (rss->queue_num > priv->config.ind_table_max_size)
1585 		return rte_flow_error_set(error, ENOTSUP,
1586 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1587 					  &rss->queue_num,
1588 					  "number of queues too large");
1589 	if (rss->types & MLX5_RSS_HF_MASK)
1590 		return rte_flow_error_set(error, ENOTSUP,
1591 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1592 					  &rss->types,
1593 					  "some RSS protocols are not"
1594 					  " supported");
1595 	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1596 	    !(rss->types & ETH_RSS_IP))
1597 		return rte_flow_error_set(error, EINVAL,
1598 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1599 					  "L3 partial RSS requested but L3 RSS"
1600 					  " type not specified");
1601 	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1602 	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1603 		return rte_flow_error_set(error, EINVAL,
1604 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1605 					  "L4 partial RSS requested but L4 RSS"
1606 					  " type not specified");
1607 	if (!priv->rxqs_n)
1608 		return rte_flow_error_set(error, EINVAL,
1609 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1610 					  NULL, "No Rx queues configured");
1611 	if (!rss->queue_num)
1612 		return rte_flow_error_set(error, EINVAL,
1613 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1614 					  NULL, "No queues configured");
1615 	for (i = 0; i != rss->queue_num; ++i) {
1616 		if (rss->queue[i] >= priv->rxqs_n)
1617 			return rte_flow_error_set
1618 				(error, EINVAL,
1619 				 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1620 				 &rss->queue[i], "queue index out of range");
1621 		if (!(*priv->rxqs)[rss->queue[i]])
1622 			return rte_flow_error_set
1623 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1624 				 &rss->queue[i], "queue is not configured");
1625 	}
1626 	return 0;
1627 }
1628 
1629 /*
1630  * Validate the rss action.
1631  *
1632  * @param[in] action
1633  *   Pointer to the queue action.
1634  * @param[in] action_flags
1635  *   Bit-fields that holds the actions detected until now.
1636  * @param[in] dev
1637  *   Pointer to the Ethernet device structure.
1638  * @param[in] attr
1639  *   Attributes of flow that includes this action.
1640  * @param[in] item_flags
1641  *   Items that were detected.
1642  * @param[out] error
1643  *   Pointer to error structure.
1644  *
1645  * @return
1646  *   0 on success, a negative errno value otherwise and rte_errno is set.
1647  */
1648 int
1649 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1650 			      uint64_t action_flags,
1651 			      struct rte_eth_dev *dev,
1652 			      const struct rte_flow_attr *attr,
1653 			      uint64_t item_flags,
1654 			      struct rte_flow_error *error)
1655 {
1656 	const struct rte_flow_action_rss *rss = action->conf;
1657 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1658 	int ret;
1659 
1660 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1661 		return rte_flow_error_set(error, EINVAL,
1662 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1663 					  "can't have 2 fate actions"
1664 					  " in same flow");
1665 	ret = mlx5_validate_action_rss(dev, action, error);
1666 	if (ret)
1667 		return ret;
1668 	if (attr->egress)
1669 		return rte_flow_error_set(error, ENOTSUP,
1670 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1671 					  "rss action not supported for "
1672 					  "egress");
1673 	if (rss->level > 1 && !tunnel)
1674 		return rte_flow_error_set(error, EINVAL,
1675 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1676 					  "inner RSS is not supported for "
1677 					  "non-tunnel flows");
1678 	if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
1679 	    !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
1680 		return rte_flow_error_set(error, EINVAL,
1681 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1682 					  "RSS on eCPRI is not supported now");
1683 	}
1684 	return 0;
1685 }
1686 
1687 /*
1688  * Validate the default miss action.
1689  *
1690  * @param[in] action_flags
1691  *   Bit-fields that holds the actions detected until now.
1692  * @param[out] error
1693  *   Pointer to error structure.
1694  *
1695  * @return
1696  *   0 on success, a negative errno value otherwise and rte_errno is set.
1697  */
1698 int
1699 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
1700 				const struct rte_flow_attr *attr,
1701 				struct rte_flow_error *error)
1702 {
1703 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1704 		return rte_flow_error_set(error, EINVAL,
1705 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1706 					  "can't have 2 fate actions in"
1707 					  " same flow");
1708 	if (attr->egress)
1709 		return rte_flow_error_set(error, ENOTSUP,
1710 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1711 					  "default miss action not supported "
1712 					  "for egress");
1713 	if (attr->group)
1714 		return rte_flow_error_set(error, ENOTSUP,
1715 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1716 					  "only group 0 is supported");
1717 	if (attr->transfer)
1718 		return rte_flow_error_set(error, ENOTSUP,
1719 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1720 					  NULL, "transfer is not supported");
1721 	return 0;
1722 }
1723 
1724 /*
1725  * Validate the count action.
1726  *
1727  * @param[in] dev
1728  *   Pointer to the Ethernet device structure.
1729  * @param[in] attr
1730  *   Attributes of flow that includes this action.
1731  * @param[out] error
1732  *   Pointer to error structure.
1733  *
1734  * @return
1735  *   0 on success, a negative errno value otherwise and rte_errno is set.
1736  */
1737 int
1738 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1739 				const struct rte_flow_attr *attr,
1740 				struct rte_flow_error *error)
1741 {
1742 	if (attr->egress)
1743 		return rte_flow_error_set(error, ENOTSUP,
1744 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1745 					  "count action not supported for "
1746 					  "egress");
1747 	return 0;
1748 }
1749 
1750 /**
1751  * Verify the @p attributes will be correctly understood by the NIC and store
1752  * them in the @p flow if everything is correct.
1753  *
1754  * @param[in] dev
1755  *   Pointer to the Ethernet device structure.
1756  * @param[in] attributes
1757  *   Pointer to flow attributes
1758  * @param[out] error
1759  *   Pointer to error structure.
1760  *
1761  * @return
1762  *   0 on success, a negative errno value otherwise and rte_errno is set.
1763  */
1764 int
1765 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1766 			      const struct rte_flow_attr *attributes,
1767 			      struct rte_flow_error *error)
1768 {
1769 	struct mlx5_priv *priv = dev->data->dev_private;
1770 	uint32_t priority_max = priv->config.flow_prio - 1;
1771 
1772 	if (attributes->group)
1773 		return rte_flow_error_set(error, ENOTSUP,
1774 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1775 					  NULL, "groups is not supported");
1776 	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1777 	    attributes->priority >= priority_max)
1778 		return rte_flow_error_set(error, ENOTSUP,
1779 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1780 					  NULL, "priority out of range");
1781 	if (attributes->egress)
1782 		return rte_flow_error_set(error, ENOTSUP,
1783 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1784 					  "egress is not supported");
1785 	if (attributes->transfer && !priv->config.dv_esw_en)
1786 		return rte_flow_error_set(error, ENOTSUP,
1787 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1788 					  NULL, "transfer is not supported");
1789 	if (!attributes->ingress)
1790 		return rte_flow_error_set(error, EINVAL,
1791 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1792 					  NULL,
1793 					  "ingress attribute is mandatory");
1794 	return 0;
1795 }
1796 
1797 /**
1798  * Validate ICMP6 item.
1799  *
1800  * @param[in] item
1801  *   Item specification.
1802  * @param[in] item_flags
1803  *   Bit-fields that holds the items detected until now.
1804  * @param[in] ext_vlan_sup
1805  *   Whether extended VLAN features are supported or not.
1806  * @param[out] error
1807  *   Pointer to error structure.
1808  *
1809  * @return
1810  *   0 on success, a negative errno value otherwise and rte_errno is set.
1811  */
1812 int
1813 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1814 			       uint64_t item_flags,
1815 			       uint8_t target_protocol,
1816 			       struct rte_flow_error *error)
1817 {
1818 	const struct rte_flow_item_icmp6 *mask = item->mask;
1819 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1820 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1821 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1822 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1823 				      MLX5_FLOW_LAYER_OUTER_L4;
1824 	int ret;
1825 
1826 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1827 		return rte_flow_error_set(error, EINVAL,
1828 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1829 					  "protocol filtering not compatible"
1830 					  " with ICMP6 layer");
1831 	if (!(item_flags & l3m))
1832 		return rte_flow_error_set(error, EINVAL,
1833 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1834 					  "IPv6 is mandatory to filter on"
1835 					  " ICMP6");
1836 	if (item_flags & l4m)
1837 		return rte_flow_error_set(error, EINVAL,
1838 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1839 					  "multiple L4 layers not supported");
1840 	if (!mask)
1841 		mask = &rte_flow_item_icmp6_mask;
1842 	ret = mlx5_flow_item_acceptable
1843 		(item, (const uint8_t *)mask,
1844 		 (const uint8_t *)&rte_flow_item_icmp6_mask,
1845 		 sizeof(struct rte_flow_item_icmp6),
1846 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1847 	if (ret < 0)
1848 		return ret;
1849 	return 0;
1850 }
1851 
1852 /**
1853  * Validate ICMP item.
1854  *
1855  * @param[in] item
1856  *   Item specification.
1857  * @param[in] item_flags
1858  *   Bit-fields that holds the items detected until now.
1859  * @param[out] error
1860  *   Pointer to error structure.
1861  *
1862  * @return
1863  *   0 on success, a negative errno value otherwise and rte_errno is set.
1864  */
1865 int
1866 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1867 			     uint64_t item_flags,
1868 			     uint8_t target_protocol,
1869 			     struct rte_flow_error *error)
1870 {
1871 	const struct rte_flow_item_icmp *mask = item->mask;
1872 	const struct rte_flow_item_icmp nic_mask = {
1873 		.hdr.icmp_type = 0xff,
1874 		.hdr.icmp_code = 0xff,
1875 		.hdr.icmp_ident = RTE_BE16(0xffff),
1876 		.hdr.icmp_seq_nb = RTE_BE16(0xffff),
1877 	};
1878 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1879 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1880 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1881 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1882 				      MLX5_FLOW_LAYER_OUTER_L4;
1883 	int ret;
1884 
1885 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1886 		return rte_flow_error_set(error, EINVAL,
1887 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1888 					  "protocol filtering not compatible"
1889 					  " with ICMP layer");
1890 	if (!(item_flags & l3m))
1891 		return rte_flow_error_set(error, EINVAL,
1892 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1893 					  "IPv4 is mandatory to filter"
1894 					  " on ICMP");
1895 	if (item_flags & l4m)
1896 		return rte_flow_error_set(error, EINVAL,
1897 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1898 					  "multiple L4 layers not supported");
1899 	if (!mask)
1900 		mask = &nic_mask;
1901 	ret = mlx5_flow_item_acceptable
1902 		(item, (const uint8_t *)mask,
1903 		 (const uint8_t *)&nic_mask,
1904 		 sizeof(struct rte_flow_item_icmp),
1905 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1906 	if (ret < 0)
1907 		return ret;
1908 	return 0;
1909 }
1910 
1911 /**
1912  * Validate Ethernet item.
1913  *
1914  * @param[in] item
1915  *   Item specification.
1916  * @param[in] item_flags
1917  *   Bit-fields that holds the items detected until now.
1918  * @param[out] error
1919  *   Pointer to error structure.
1920  *
1921  * @return
1922  *   0 on success, a negative errno value otherwise and rte_errno is set.
1923  */
1924 int
1925 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1926 			    uint64_t item_flags, bool ext_vlan_sup,
1927 			    struct rte_flow_error *error)
1928 {
1929 	const struct rte_flow_item_eth *mask = item->mask;
1930 	const struct rte_flow_item_eth nic_mask = {
1931 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1932 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1933 		.type = RTE_BE16(0xffff),
1934 		.has_vlan = ext_vlan_sup ? 1 : 0,
1935 	};
1936 	int ret;
1937 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1938 	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
1939 				       MLX5_FLOW_LAYER_OUTER_L2;
1940 
1941 	if (item_flags & ethm)
1942 		return rte_flow_error_set(error, ENOTSUP,
1943 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1944 					  "multiple L2 layers not supported");
1945 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1946 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1947 		return rte_flow_error_set(error, EINVAL,
1948 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1949 					  "L2 layer should not follow "
1950 					  "L3 layers");
1951 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1952 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1953 		return rte_flow_error_set(error, EINVAL,
1954 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1955 					  "L2 layer should not follow VLAN");
1956 	if (!mask)
1957 		mask = &rte_flow_item_eth_mask;
1958 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1959 					(const uint8_t *)&nic_mask,
1960 					sizeof(struct rte_flow_item_eth),
1961 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1962 	return ret;
1963 }
1964 
1965 /**
1966  * Validate VLAN item.
1967  *
1968  * @param[in] item
1969  *   Item specification.
1970  * @param[in] item_flags
1971  *   Bit-fields that holds the items detected until now.
1972  * @param[in] dev
1973  *   Ethernet device flow is being created on.
1974  * @param[out] error
1975  *   Pointer to error structure.
1976  *
1977  * @return
1978  *   0 on success, a negative errno value otherwise and rte_errno is set.
1979  */
1980 int
1981 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1982 			     uint64_t item_flags,
1983 			     struct rte_eth_dev *dev,
1984 			     struct rte_flow_error *error)
1985 {
1986 	const struct rte_flow_item_vlan *spec = item->spec;
1987 	const struct rte_flow_item_vlan *mask = item->mask;
1988 	const struct rte_flow_item_vlan nic_mask = {
1989 		.tci = RTE_BE16(UINT16_MAX),
1990 		.inner_type = RTE_BE16(UINT16_MAX),
1991 	};
1992 	uint16_t vlan_tag = 0;
1993 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1994 	int ret;
1995 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1996 					MLX5_FLOW_LAYER_INNER_L4) :
1997 				       (MLX5_FLOW_LAYER_OUTER_L3 |
1998 					MLX5_FLOW_LAYER_OUTER_L4);
1999 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2000 					MLX5_FLOW_LAYER_OUTER_VLAN;
2001 
2002 	if (item_flags & vlanm)
2003 		return rte_flow_error_set(error, EINVAL,
2004 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2005 					  "multiple VLAN layers not supported");
2006 	else if ((item_flags & l34m) != 0)
2007 		return rte_flow_error_set(error, EINVAL,
2008 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2009 					  "VLAN cannot follow L3/L4 layer");
2010 	if (!mask)
2011 		mask = &rte_flow_item_vlan_mask;
2012 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2013 					(const uint8_t *)&nic_mask,
2014 					sizeof(struct rte_flow_item_vlan),
2015 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2016 	if (ret)
2017 		return ret;
2018 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2019 		struct mlx5_priv *priv = dev->data->dev_private;
2020 
2021 		if (priv->vmwa_context) {
2022 			/*
2023 			 * Non-NULL context means we have a virtual machine
2024 			 * and SR-IOV enabled, we have to create VLAN interface
2025 			 * to make hypervisor to setup E-Switch vport
2026 			 * context correctly. We avoid creating the multiple
2027 			 * VLAN interfaces, so we cannot support VLAN tag mask.
2028 			 */
2029 			return rte_flow_error_set(error, EINVAL,
2030 						  RTE_FLOW_ERROR_TYPE_ITEM,
2031 						  item,
2032 						  "VLAN tag mask is not"
2033 						  " supported in virtual"
2034 						  " environment");
2035 		}
2036 	}
2037 	if (spec) {
2038 		vlan_tag = spec->tci;
2039 		vlan_tag &= mask->tci;
2040 	}
2041 	/*
2042 	 * From verbs perspective an empty VLAN is equivalent
2043 	 * to a packet without VLAN layer.
2044 	 */
2045 	if (!vlan_tag)
2046 		return rte_flow_error_set(error, EINVAL,
2047 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2048 					  item->spec,
2049 					  "VLAN cannot be empty");
2050 	return 0;
2051 }
2052 
2053 /**
2054  * Validate IPV4 item.
2055  *
2056  * @param[in] item
2057  *   Item specification.
2058  * @param[in] item_flags
2059  *   Bit-fields that holds the items detected until now.
2060  * @param[in] last_item
2061  *   Previous validated item in the pattern items.
2062  * @param[in] ether_type
2063  *   Type in the ethernet layer header (including dot1q).
2064  * @param[in] acc_mask
2065  *   Acceptable mask, if NULL default internal default mask
2066  *   will be used to check whether item fields are supported.
2067  * @param[in] range_accepted
2068  *   True if range of values is accepted for specific fields, false otherwise.
2069  * @param[out] error
2070  *   Pointer to error structure.
2071  *
2072  * @return
2073  *   0 on success, a negative errno value otherwise and rte_errno is set.
2074  */
2075 int
2076 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2077 			     uint64_t item_flags,
2078 			     uint64_t last_item,
2079 			     uint16_t ether_type,
2080 			     const struct rte_flow_item_ipv4 *acc_mask,
2081 			     bool range_accepted,
2082 			     struct rte_flow_error *error)
2083 {
2084 	const struct rte_flow_item_ipv4 *mask = item->mask;
2085 	const struct rte_flow_item_ipv4 *spec = item->spec;
2086 	const struct rte_flow_item_ipv4 nic_mask = {
2087 		.hdr = {
2088 			.src_addr = RTE_BE32(0xffffffff),
2089 			.dst_addr = RTE_BE32(0xffffffff),
2090 			.type_of_service = 0xff,
2091 			.next_proto_id = 0xff,
2092 		},
2093 	};
2094 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2095 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2096 				      MLX5_FLOW_LAYER_OUTER_L3;
2097 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2098 				      MLX5_FLOW_LAYER_OUTER_L4;
2099 	int ret;
2100 	uint8_t next_proto = 0xFF;
2101 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2102 				  MLX5_FLOW_LAYER_OUTER_VLAN |
2103 				  MLX5_FLOW_LAYER_INNER_VLAN);
2104 
2105 	if ((last_item & l2_vlan) && ether_type &&
2106 	    ether_type != RTE_ETHER_TYPE_IPV4)
2107 		return rte_flow_error_set(error, EINVAL,
2108 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2109 					  "IPv4 cannot follow L2/VLAN layer "
2110 					  "which ether type is not IPv4");
2111 	if (item_flags & MLX5_FLOW_LAYER_IPIP) {
2112 		if (mask && spec)
2113 			next_proto = mask->hdr.next_proto_id &
2114 				     spec->hdr.next_proto_id;
2115 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2116 			return rte_flow_error_set(error, EINVAL,
2117 						  RTE_FLOW_ERROR_TYPE_ITEM,
2118 						  item,
2119 						  "multiple tunnel "
2120 						  "not supported");
2121 	}
2122 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
2123 		return rte_flow_error_set(error, EINVAL,
2124 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2125 					  "wrong tunnel type - IPv6 specified "
2126 					  "but IPv4 item provided");
2127 	if (item_flags & l3m)
2128 		return rte_flow_error_set(error, ENOTSUP,
2129 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2130 					  "multiple L3 layers not supported");
2131 	else if (item_flags & l4m)
2132 		return rte_flow_error_set(error, EINVAL,
2133 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2134 					  "L3 cannot follow an L4 layer.");
2135 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2136 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2137 		return rte_flow_error_set(error, EINVAL,
2138 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2139 					  "L3 cannot follow an NVGRE layer.");
2140 	if (!mask)
2141 		mask = &rte_flow_item_ipv4_mask;
2142 	else if (mask->hdr.next_proto_id != 0 &&
2143 		 mask->hdr.next_proto_id != 0xff)
2144 		return rte_flow_error_set(error, EINVAL,
2145 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2146 					  "partial mask is not supported"
2147 					  " for protocol");
2148 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2149 					acc_mask ? (const uint8_t *)acc_mask
2150 						 : (const uint8_t *)&nic_mask,
2151 					sizeof(struct rte_flow_item_ipv4),
2152 					range_accepted, error);
2153 	if (ret < 0)
2154 		return ret;
2155 	return 0;
2156 }
2157 
2158 /**
2159  * Validate IPV6 item.
2160  *
2161  * @param[in] item
2162  *   Item specification.
2163  * @param[in] item_flags
2164  *   Bit-fields that holds the items detected until now.
2165  * @param[in] last_item
2166  *   Previous validated item in the pattern items.
2167  * @param[in] ether_type
2168  *   Type in the ethernet layer header (including dot1q).
2169  * @param[in] acc_mask
2170  *   Acceptable mask, if NULL default internal default mask
2171  *   will be used to check whether item fields are supported.
2172  * @param[out] error
2173  *   Pointer to error structure.
2174  *
2175  * @return
2176  *   0 on success, a negative errno value otherwise and rte_errno is set.
2177  */
2178 int
2179 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2180 			     uint64_t item_flags,
2181 			     uint64_t last_item,
2182 			     uint16_t ether_type,
2183 			     const struct rte_flow_item_ipv6 *acc_mask,
2184 			     struct rte_flow_error *error)
2185 {
2186 	const struct rte_flow_item_ipv6 *mask = item->mask;
2187 	const struct rte_flow_item_ipv6 *spec = item->spec;
2188 	const struct rte_flow_item_ipv6 nic_mask = {
2189 		.hdr = {
2190 			.src_addr =
2191 				"\xff\xff\xff\xff\xff\xff\xff\xff"
2192 				"\xff\xff\xff\xff\xff\xff\xff\xff",
2193 			.dst_addr =
2194 				"\xff\xff\xff\xff\xff\xff\xff\xff"
2195 				"\xff\xff\xff\xff\xff\xff\xff\xff",
2196 			.vtc_flow = RTE_BE32(0xffffffff),
2197 			.proto = 0xff,
2198 		},
2199 	};
2200 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2201 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2202 				      MLX5_FLOW_LAYER_OUTER_L3;
2203 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2204 				      MLX5_FLOW_LAYER_OUTER_L4;
2205 	int ret;
2206 	uint8_t next_proto = 0xFF;
2207 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2208 				  MLX5_FLOW_LAYER_OUTER_VLAN |
2209 				  MLX5_FLOW_LAYER_INNER_VLAN);
2210 
2211 	if ((last_item & l2_vlan) && ether_type &&
2212 	    ether_type != RTE_ETHER_TYPE_IPV6)
2213 		return rte_flow_error_set(error, EINVAL,
2214 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2215 					  "IPv6 cannot follow L2/VLAN layer "
2216 					  "which ether type is not IPv6");
2217 	if (mask && mask->hdr.proto == UINT8_MAX && spec)
2218 		next_proto = spec->hdr.proto;
2219 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
2220 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2221 			return rte_flow_error_set(error, EINVAL,
2222 						  RTE_FLOW_ERROR_TYPE_ITEM,
2223 						  item,
2224 						  "multiple tunnel "
2225 						  "not supported");
2226 	}
2227 	if (next_proto == IPPROTO_HOPOPTS  ||
2228 	    next_proto == IPPROTO_ROUTING  ||
2229 	    next_proto == IPPROTO_FRAGMENT ||
2230 	    next_proto == IPPROTO_ESP	   ||
2231 	    next_proto == IPPROTO_AH	   ||
2232 	    next_proto == IPPROTO_DSTOPTS)
2233 		return rte_flow_error_set(error, EINVAL,
2234 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2235 					  "IPv6 proto (next header) should "
2236 					  "not be set as extension header");
2237 	if (item_flags & MLX5_FLOW_LAYER_IPIP)
2238 		return rte_flow_error_set(error, EINVAL,
2239 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2240 					  "wrong tunnel type - IPv4 specified "
2241 					  "but IPv6 item provided");
2242 	if (item_flags & l3m)
2243 		return rte_flow_error_set(error, ENOTSUP,
2244 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2245 					  "multiple L3 layers not supported");
2246 	else if (item_flags & l4m)
2247 		return rte_flow_error_set(error, EINVAL,
2248 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2249 					  "L3 cannot follow an L4 layer.");
2250 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2251 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2252 		return rte_flow_error_set(error, EINVAL,
2253 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2254 					  "L3 cannot follow an NVGRE layer.");
2255 	if (!mask)
2256 		mask = &rte_flow_item_ipv6_mask;
2257 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2258 					acc_mask ? (const uint8_t *)acc_mask
2259 						 : (const uint8_t *)&nic_mask,
2260 					sizeof(struct rte_flow_item_ipv6),
2261 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2262 	if (ret < 0)
2263 		return ret;
2264 	return 0;
2265 }
2266 
2267 /**
2268  * Validate UDP item.
2269  *
2270  * @param[in] item
2271  *   Item specification.
2272  * @param[in] item_flags
2273  *   Bit-fields that holds the items detected until now.
2274  * @param[in] target_protocol
2275  *   The next protocol in the previous item.
2276  * @param[in] flow_mask
2277  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2278  * @param[out] error
2279  *   Pointer to error structure.
2280  *
2281  * @return
2282  *   0 on success, a negative errno value otherwise and rte_errno is set.
2283  */
2284 int
2285 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2286 			    uint64_t item_flags,
2287 			    uint8_t target_protocol,
2288 			    struct rte_flow_error *error)
2289 {
2290 	const struct rte_flow_item_udp *mask = item->mask;
2291 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2292 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2293 				      MLX5_FLOW_LAYER_OUTER_L3;
2294 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2295 				      MLX5_FLOW_LAYER_OUTER_L4;
2296 	int ret;
2297 
2298 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2299 		return rte_flow_error_set(error, EINVAL,
2300 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2301 					  "protocol filtering not compatible"
2302 					  " with UDP layer");
2303 	if (!(item_flags & l3m))
2304 		return rte_flow_error_set(error, EINVAL,
2305 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2306 					  "L3 is mandatory to filter on L4");
2307 	if (item_flags & l4m)
2308 		return rte_flow_error_set(error, EINVAL,
2309 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2310 					  "multiple L4 layers not supported");
2311 	if (!mask)
2312 		mask = &rte_flow_item_udp_mask;
2313 	ret = mlx5_flow_item_acceptable
2314 		(item, (const uint8_t *)mask,
2315 		 (const uint8_t *)&rte_flow_item_udp_mask,
2316 		 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2317 		 error);
2318 	if (ret < 0)
2319 		return ret;
2320 	return 0;
2321 }
2322 
2323 /**
2324  * Validate TCP item.
2325  *
2326  * @param[in] item
2327  *   Item specification.
2328  * @param[in] item_flags
2329  *   Bit-fields that holds the items detected until now.
2330  * @param[in] target_protocol
2331  *   The next protocol in the previous item.
2332  * @param[out] error
2333  *   Pointer to error structure.
2334  *
2335  * @return
2336  *   0 on success, a negative errno value otherwise and rte_errno is set.
2337  */
2338 int
2339 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2340 			    uint64_t item_flags,
2341 			    uint8_t target_protocol,
2342 			    const struct rte_flow_item_tcp *flow_mask,
2343 			    struct rte_flow_error *error)
2344 {
2345 	const struct rte_flow_item_tcp *mask = item->mask;
2346 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2347 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2348 				      MLX5_FLOW_LAYER_OUTER_L3;
2349 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2350 				      MLX5_FLOW_LAYER_OUTER_L4;
2351 	int ret;
2352 
2353 	MLX5_ASSERT(flow_mask);
2354 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
2355 		return rte_flow_error_set(error, EINVAL,
2356 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2357 					  "protocol filtering not compatible"
2358 					  " with TCP layer");
2359 	if (!(item_flags & l3m))
2360 		return rte_flow_error_set(error, EINVAL,
2361 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2362 					  "L3 is mandatory to filter on L4");
2363 	if (item_flags & l4m)
2364 		return rte_flow_error_set(error, EINVAL,
2365 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2366 					  "multiple L4 layers not supported");
2367 	if (!mask)
2368 		mask = &rte_flow_item_tcp_mask;
2369 	ret = mlx5_flow_item_acceptable
2370 		(item, (const uint8_t *)mask,
2371 		 (const uint8_t *)flow_mask,
2372 		 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2373 		 error);
2374 	if (ret < 0)
2375 		return ret;
2376 	return 0;
2377 }
2378 
2379 /**
2380  * Validate VXLAN item.
2381  *
2382  * @param[in] item
2383  *   Item specification.
2384  * @param[in] item_flags
2385  *   Bit-fields that holds the items detected until now.
2386  * @param[in] target_protocol
2387  *   The next protocol in the previous item.
2388  * @param[out] error
2389  *   Pointer to error structure.
2390  *
2391  * @return
2392  *   0 on success, a negative errno value otherwise and rte_errno is set.
2393  */
2394 int
2395 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
2396 			      uint64_t item_flags,
2397 			      struct rte_flow_error *error)
2398 {
2399 	const struct rte_flow_item_vxlan *spec = item->spec;
2400 	const struct rte_flow_item_vxlan *mask = item->mask;
2401 	int ret;
2402 	union vni {
2403 		uint32_t vlan_id;
2404 		uint8_t vni[4];
2405 	} id = { .vlan_id = 0, };
2406 
2407 
2408 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2409 		return rte_flow_error_set(error, ENOTSUP,
2410 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2411 					  "multiple tunnel layers not"
2412 					  " supported");
2413 	/*
2414 	 * Verify only UDPv4 is present as defined in
2415 	 * https://tools.ietf.org/html/rfc7348
2416 	 */
2417 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2418 		return rte_flow_error_set(error, EINVAL,
2419 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2420 					  "no outer UDP layer found");
2421 	if (!mask)
2422 		mask = &rte_flow_item_vxlan_mask;
2423 	ret = mlx5_flow_item_acceptable
2424 		(item, (const uint8_t *)mask,
2425 		 (const uint8_t *)&rte_flow_item_vxlan_mask,
2426 		 sizeof(struct rte_flow_item_vxlan),
2427 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2428 	if (ret < 0)
2429 		return ret;
2430 	if (spec) {
2431 		memcpy(&id.vni[1], spec->vni, 3);
2432 		memcpy(&id.vni[1], mask->vni, 3);
2433 	}
2434 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2435 		return rte_flow_error_set(error, ENOTSUP,
2436 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2437 					  "VXLAN tunnel must be fully defined");
2438 	return 0;
2439 }
2440 
2441 /**
2442  * Validate VXLAN_GPE item.
2443  *
2444  * @param[in] item
2445  *   Item specification.
2446  * @param[in] item_flags
2447  *   Bit-fields that holds the items detected until now.
2448  * @param[in] priv
2449  *   Pointer to the private data structure.
2450  * @param[in] target_protocol
2451  *   The next protocol in the previous item.
2452  * @param[out] error
2453  *   Pointer to error structure.
2454  *
2455  * @return
2456  *   0 on success, a negative errno value otherwise and rte_errno is set.
2457  */
2458 int
2459 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2460 				  uint64_t item_flags,
2461 				  struct rte_eth_dev *dev,
2462 				  struct rte_flow_error *error)
2463 {
2464 	struct mlx5_priv *priv = dev->data->dev_private;
2465 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
2466 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
2467 	int ret;
2468 	union vni {
2469 		uint32_t vlan_id;
2470 		uint8_t vni[4];
2471 	} id = { .vlan_id = 0, };
2472 
2473 	if (!priv->config.l3_vxlan_en)
2474 		return rte_flow_error_set(error, ENOTSUP,
2475 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2476 					  "L3 VXLAN is not enabled by device"
2477 					  " parameter and/or not configured in"
2478 					  " firmware");
2479 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2480 		return rte_flow_error_set(error, ENOTSUP,
2481 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2482 					  "multiple tunnel layers not"
2483 					  " supported");
2484 	/*
2485 	 * Verify only UDPv4 is present as defined in
2486 	 * https://tools.ietf.org/html/rfc7348
2487 	 */
2488 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2489 		return rte_flow_error_set(error, EINVAL,
2490 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2491 					  "no outer UDP layer found");
2492 	if (!mask)
2493 		mask = &rte_flow_item_vxlan_gpe_mask;
2494 	ret = mlx5_flow_item_acceptable
2495 		(item, (const uint8_t *)mask,
2496 		 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
2497 		 sizeof(struct rte_flow_item_vxlan_gpe),
2498 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2499 	if (ret < 0)
2500 		return ret;
2501 	if (spec) {
2502 		if (spec->protocol)
2503 			return rte_flow_error_set(error, ENOTSUP,
2504 						  RTE_FLOW_ERROR_TYPE_ITEM,
2505 						  item,
2506 						  "VxLAN-GPE protocol"
2507 						  " not supported");
2508 		memcpy(&id.vni[1], spec->vni, 3);
2509 		memcpy(&id.vni[1], mask->vni, 3);
2510 	}
2511 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2512 		return rte_flow_error_set(error, ENOTSUP,
2513 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2514 					  "VXLAN-GPE tunnel must be fully"
2515 					  " defined");
2516 	return 0;
2517 }
2518 /**
2519  * Validate GRE Key item.
2520  *
2521  * @param[in] item
2522  *   Item specification.
2523  * @param[in] item_flags
2524  *   Bit flags to mark detected items.
2525  * @param[in] gre_item
2526  *   Pointer to gre_item
2527  * @param[out] error
2528  *   Pointer to error structure.
2529  *
2530  * @return
2531  *   0 on success, a negative errno value otherwise and rte_errno is set.
2532  */
2533 int
2534 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2535 				uint64_t item_flags,
2536 				const struct rte_flow_item *gre_item,
2537 				struct rte_flow_error *error)
2538 {
2539 	const rte_be32_t *mask = item->mask;
2540 	int ret = 0;
2541 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2542 	const struct rte_flow_item_gre *gre_spec;
2543 	const struct rte_flow_item_gre *gre_mask;
2544 
2545 	if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2546 		return rte_flow_error_set(error, ENOTSUP,
2547 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2548 					  "Multiple GRE key not support");
2549 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2550 		return rte_flow_error_set(error, ENOTSUP,
2551 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2552 					  "No preceding GRE header");
2553 	if (item_flags & MLX5_FLOW_LAYER_INNER)
2554 		return rte_flow_error_set(error, ENOTSUP,
2555 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2556 					  "GRE key following a wrong item");
2557 	gre_mask = gre_item->mask;
2558 	if (!gre_mask)
2559 		gre_mask = &rte_flow_item_gre_mask;
2560 	gre_spec = gre_item->spec;
2561 	if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2562 			 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2563 		return rte_flow_error_set(error, EINVAL,
2564 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2565 					  "Key bit must be on");
2566 
2567 	if (!mask)
2568 		mask = &gre_key_default_mask;
2569 	ret = mlx5_flow_item_acceptable
2570 		(item, (const uint8_t *)mask,
2571 		 (const uint8_t *)&gre_key_default_mask,
2572 		 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2573 	return ret;
2574 }
2575 
2576 /**
2577  * Validate GRE item.
2578  *
2579  * @param[in] item
2580  *   Item specification.
2581  * @param[in] item_flags
2582  *   Bit flags to mark detected items.
2583  * @param[in] target_protocol
2584  *   The next protocol in the previous item.
2585  * @param[out] error
2586  *   Pointer to error structure.
2587  *
2588  * @return
2589  *   0 on success, a negative errno value otherwise and rte_errno is set.
2590  */
2591 int
2592 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2593 			    uint64_t item_flags,
2594 			    uint8_t target_protocol,
2595 			    struct rte_flow_error *error)
2596 {
2597 	const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2598 	const struct rte_flow_item_gre *mask = item->mask;
2599 	int ret;
2600 	const struct rte_flow_item_gre nic_mask = {
2601 		.c_rsvd0_ver = RTE_BE16(0xB000),
2602 		.protocol = RTE_BE16(UINT16_MAX),
2603 	};
2604 
2605 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2606 		return rte_flow_error_set(error, EINVAL,
2607 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2608 					  "protocol filtering not compatible"
2609 					  " with this GRE layer");
2610 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2611 		return rte_flow_error_set(error, ENOTSUP,
2612 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2613 					  "multiple tunnel layers not"
2614 					  " supported");
2615 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2616 		return rte_flow_error_set(error, ENOTSUP,
2617 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2618 					  "L3 Layer is missing");
2619 	if (!mask)
2620 		mask = &rte_flow_item_gre_mask;
2621 	ret = mlx5_flow_item_acceptable
2622 		(item, (const uint8_t *)mask,
2623 		 (const uint8_t *)&nic_mask,
2624 		 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2625 		 error);
2626 	if (ret < 0)
2627 		return ret;
2628 #ifndef HAVE_MLX5DV_DR
2629 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2630 	if (spec && (spec->protocol & mask->protocol))
2631 		return rte_flow_error_set(error, ENOTSUP,
2632 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2633 					  "without MPLS support the"
2634 					  " specification cannot be used for"
2635 					  " filtering");
2636 #endif
2637 #endif
2638 	return 0;
2639 }
2640 
2641 /**
2642  * Validate Geneve item.
2643  *
2644  * @param[in] item
2645  *   Item specification.
2646  * @param[in] itemFlags
2647  *   Bit-fields that holds the items detected until now.
2648  * @param[in] enPriv
2649  *   Pointer to the private data structure.
2650  * @param[out] error
2651  *   Pointer to error structure.
2652  *
2653  * @return
2654  *   0 on success, a negative errno value otherwise and rte_errno is set.
2655  */
2656 
2657 int
2658 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2659 			       uint64_t item_flags,
2660 			       struct rte_eth_dev *dev,
2661 			       struct rte_flow_error *error)
2662 {
2663 	struct mlx5_priv *priv = dev->data->dev_private;
2664 	const struct rte_flow_item_geneve *spec = item->spec;
2665 	const struct rte_flow_item_geneve *mask = item->mask;
2666 	int ret;
2667 	uint16_t gbhdr;
2668 	uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2669 			  MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2670 	const struct rte_flow_item_geneve nic_mask = {
2671 		.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2672 		.vni = "\xff\xff\xff",
2673 		.protocol = RTE_BE16(UINT16_MAX),
2674 	};
2675 
2676 	if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2677 		return rte_flow_error_set(error, ENOTSUP,
2678 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2679 					  "L3 Geneve is not enabled by device"
2680 					  " parameter and/or not configured in"
2681 					  " firmware");
2682 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2683 		return rte_flow_error_set(error, ENOTSUP,
2684 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2685 					  "multiple tunnel layers not"
2686 					  " supported");
2687 	/*
2688 	 * Verify only UDPv4 is present as defined in
2689 	 * https://tools.ietf.org/html/rfc7348
2690 	 */
2691 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2692 		return rte_flow_error_set(error, EINVAL,
2693 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2694 					  "no outer UDP layer found");
2695 	if (!mask)
2696 		mask = &rte_flow_item_geneve_mask;
2697 	ret = mlx5_flow_item_acceptable
2698 				  (item, (const uint8_t *)mask,
2699 				   (const uint8_t *)&nic_mask,
2700 				   sizeof(struct rte_flow_item_geneve),
2701 				   MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2702 	if (ret)
2703 		return ret;
2704 	if (spec) {
2705 		gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2706 		if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2707 		     MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2708 		     MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2709 			return rte_flow_error_set(error, ENOTSUP,
2710 						  RTE_FLOW_ERROR_TYPE_ITEM,
2711 						  item,
2712 						  "Geneve protocol unsupported"
2713 						  " fields are being used");
2714 		if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2715 			return rte_flow_error_set
2716 					(error, ENOTSUP,
2717 					 RTE_FLOW_ERROR_TYPE_ITEM,
2718 					 item,
2719 					 "Unsupported Geneve options length");
2720 	}
2721 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2722 		return rte_flow_error_set
2723 				    (error, ENOTSUP,
2724 				     RTE_FLOW_ERROR_TYPE_ITEM, item,
2725 				     "Geneve tunnel must be fully defined");
2726 	return 0;
2727 }
2728 
2729 /**
2730  * Validate MPLS item.
2731  *
2732  * @param[in] dev
2733  *   Pointer to the rte_eth_dev structure.
2734  * @param[in] item
2735  *   Item specification.
2736  * @param[in] item_flags
2737  *   Bit-fields that holds the items detected until now.
2738  * @param[in] prev_layer
2739  *   The protocol layer indicated in previous item.
2740  * @param[out] error
2741  *   Pointer to error structure.
2742  *
2743  * @return
2744  *   0 on success, a negative errno value otherwise and rte_errno is set.
2745  */
2746 int
2747 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2748 			     const struct rte_flow_item *item __rte_unused,
2749 			     uint64_t item_flags __rte_unused,
2750 			     uint64_t prev_layer __rte_unused,
2751 			     struct rte_flow_error *error)
2752 {
2753 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2754 	const struct rte_flow_item_mpls *mask = item->mask;
2755 	struct mlx5_priv *priv = dev->data->dev_private;
2756 	int ret;
2757 
2758 	if (!priv->config.mpls_en)
2759 		return rte_flow_error_set(error, ENOTSUP,
2760 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2761 					  "MPLS not supported or"
2762 					  " disabled in firmware"
2763 					  " configuration.");
2764 	/* MPLS over IP, UDP, GRE is allowed */
2765 	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2766 			    MLX5_FLOW_LAYER_OUTER_L4_UDP |
2767 			    MLX5_FLOW_LAYER_GRE)))
2768 		return rte_flow_error_set(error, EINVAL,
2769 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2770 					  "protocol filtering not compatible"
2771 					  " with MPLS layer");
2772 	/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2773 	if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2774 	    !(item_flags & MLX5_FLOW_LAYER_GRE))
2775 		return rte_flow_error_set(error, ENOTSUP,
2776 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2777 					  "multiple tunnel layers not"
2778 					  " supported");
2779 	if (!mask)
2780 		mask = &rte_flow_item_mpls_mask;
2781 	ret = mlx5_flow_item_acceptable
2782 		(item, (const uint8_t *)mask,
2783 		 (const uint8_t *)&rte_flow_item_mpls_mask,
2784 		 sizeof(struct rte_flow_item_mpls),
2785 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2786 	if (ret < 0)
2787 		return ret;
2788 	return 0;
2789 #else
2790 	return rte_flow_error_set(error, ENOTSUP,
2791 				  RTE_FLOW_ERROR_TYPE_ITEM, item,
2792 				  "MPLS is not supported by Verbs, please"
2793 				  " update.");
2794 #endif
2795 }
2796 
2797 /**
2798  * Validate NVGRE item.
2799  *
2800  * @param[in] item
2801  *   Item specification.
2802  * @param[in] item_flags
2803  *   Bit flags to mark detected items.
2804  * @param[in] target_protocol
2805  *   The next protocol in the previous item.
2806  * @param[out] error
2807  *   Pointer to error structure.
2808  *
2809  * @return
2810  *   0 on success, a negative errno value otherwise and rte_errno is set.
2811  */
2812 int
2813 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2814 			      uint64_t item_flags,
2815 			      uint8_t target_protocol,
2816 			      struct rte_flow_error *error)
2817 {
2818 	const struct rte_flow_item_nvgre *mask = item->mask;
2819 	int ret;
2820 
2821 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2822 		return rte_flow_error_set(error, EINVAL,
2823 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2824 					  "protocol filtering not compatible"
2825 					  " with this GRE layer");
2826 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2827 		return rte_flow_error_set(error, ENOTSUP,
2828 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2829 					  "multiple tunnel layers not"
2830 					  " supported");
2831 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2832 		return rte_flow_error_set(error, ENOTSUP,
2833 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2834 					  "L3 Layer is missing");
2835 	if (!mask)
2836 		mask = &rte_flow_item_nvgre_mask;
2837 	ret = mlx5_flow_item_acceptable
2838 		(item, (const uint8_t *)mask,
2839 		 (const uint8_t *)&rte_flow_item_nvgre_mask,
2840 		 sizeof(struct rte_flow_item_nvgre),
2841 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2842 	if (ret < 0)
2843 		return ret;
2844 	return 0;
2845 }
2846 
2847 /**
2848  * Validate eCPRI item.
2849  *
2850  * @param[in] item
2851  *   Item specification.
2852  * @param[in] item_flags
2853  *   Bit-fields that holds the items detected until now.
2854  * @param[in] last_item
2855  *   Previous validated item in the pattern items.
2856  * @param[in] ether_type
2857  *   Type in the ethernet layer header (including dot1q).
2858  * @param[in] acc_mask
2859  *   Acceptable mask, if NULL default internal default mask
2860  *   will be used to check whether item fields are supported.
2861  * @param[out] error
2862  *   Pointer to error structure.
2863  *
2864  * @return
2865  *   0 on success, a negative errno value otherwise and rte_errno is set.
2866  */
2867 int
2868 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
2869 			      uint64_t item_flags,
2870 			      uint64_t last_item,
2871 			      uint16_t ether_type,
2872 			      const struct rte_flow_item_ecpri *acc_mask,
2873 			      struct rte_flow_error *error)
2874 {
2875 	const struct rte_flow_item_ecpri *mask = item->mask;
2876 	const struct rte_flow_item_ecpri nic_mask = {
2877 		.hdr = {
2878 			.common = {
2879 				.u32 =
2880 				RTE_BE32(((const struct rte_ecpri_common_hdr) {
2881 					.type = 0xFF,
2882 					}).u32),
2883 			},
2884 			.dummy[0] = 0xFFFFFFFF,
2885 		},
2886 	};
2887 	const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
2888 					MLX5_FLOW_LAYER_OUTER_VLAN);
2889 	struct rte_flow_item_ecpri mask_lo;
2890 
2891 	if ((last_item & outer_l2_vlan) && ether_type &&
2892 	    ether_type != RTE_ETHER_TYPE_ECPRI)
2893 		return rte_flow_error_set(error, EINVAL,
2894 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2895 					  "eCPRI cannot follow L2/VLAN layer "
2896 					  "which ether type is not 0xAEFE.");
2897 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2898 		return rte_flow_error_set(error, EINVAL,
2899 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2900 					  "eCPRI with tunnel is not supported "
2901 					  "right now.");
2902 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
2903 		return rte_flow_error_set(error, ENOTSUP,
2904 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2905 					  "multiple L3 layers not supported");
2906 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
2907 		return rte_flow_error_set(error, EINVAL,
2908 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2909 					  "eCPRI cannot follow a TCP layer.");
2910 	/* In specification, eCPRI could be over UDP layer. */
2911 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
2912 		return rte_flow_error_set(error, EINVAL,
2913 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2914 					  "eCPRI over UDP layer is not yet "
2915 					  "supported right now.");
2916 	/* Mask for type field in common header could be zero. */
2917 	if (!mask)
2918 		mask = &rte_flow_item_ecpri_mask;
2919 	mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
2920 	/* Input mask is in big-endian format. */
2921 	if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
2922 		return rte_flow_error_set(error, EINVAL,
2923 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2924 					  "partial mask is not supported "
2925 					  "for protocol");
2926 	else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
2927 		return rte_flow_error_set(error, EINVAL,
2928 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2929 					  "message header mask must be after "
2930 					  "a type mask");
2931 	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2932 					 acc_mask ? (const uint8_t *)acc_mask
2933 						  : (const uint8_t *)&nic_mask,
2934 					 sizeof(struct rte_flow_item_ecpri),
2935 					 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2936 }
2937 
2938 /**
2939  * Release resource related QUEUE/RSS action split.
2940  *
2941  * @param dev
2942  *   Pointer to Ethernet device.
2943  * @param flow
2944  *   Flow to release id's from.
2945  */
2946 static void
2947 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2948 			     struct rte_flow *flow)
2949 {
2950 	struct mlx5_priv *priv = dev->data->dev_private;
2951 	uint32_t handle_idx;
2952 	struct mlx5_flow_handle *dev_handle;
2953 
2954 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2955 		       handle_idx, dev_handle, next)
2956 		if (dev_handle->split_flow_id)
2957 			mlx5_ipool_free(priv->sh->ipool
2958 					[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
2959 					dev_handle->split_flow_id);
2960 }
2961 
2962 static int
2963 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2964 		   const struct rte_flow_attr *attr __rte_unused,
2965 		   const struct rte_flow_item items[] __rte_unused,
2966 		   const struct rte_flow_action actions[] __rte_unused,
2967 		   bool external __rte_unused,
2968 		   int hairpin __rte_unused,
2969 		   struct rte_flow_error *error)
2970 {
2971 	return rte_flow_error_set(error, ENOTSUP,
2972 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2973 }
2974 
2975 static struct mlx5_flow *
2976 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
2977 		  const struct rte_flow_attr *attr __rte_unused,
2978 		  const struct rte_flow_item items[] __rte_unused,
2979 		  const struct rte_flow_action actions[] __rte_unused,
2980 		  struct rte_flow_error *error)
2981 {
2982 	rte_flow_error_set(error, ENOTSUP,
2983 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2984 	return NULL;
2985 }
2986 
2987 static int
2988 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2989 		    struct mlx5_flow *dev_flow __rte_unused,
2990 		    const struct rte_flow_attr *attr __rte_unused,
2991 		    const struct rte_flow_item items[] __rte_unused,
2992 		    const struct rte_flow_action actions[] __rte_unused,
2993 		    struct rte_flow_error *error)
2994 {
2995 	return rte_flow_error_set(error, ENOTSUP,
2996 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2997 }
2998 
2999 static int
3000 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
3001 		struct rte_flow *flow __rte_unused,
3002 		struct rte_flow_error *error)
3003 {
3004 	return rte_flow_error_set(error, ENOTSUP,
3005 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3006 }
3007 
3008 static void
3009 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
3010 		 struct rte_flow *flow __rte_unused)
3011 {
3012 }
3013 
3014 static void
3015 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
3016 		  struct rte_flow *flow __rte_unused)
3017 {
3018 }
3019 
3020 static int
3021 flow_null_query(struct rte_eth_dev *dev __rte_unused,
3022 		struct rte_flow *flow __rte_unused,
3023 		const struct rte_flow_action *actions __rte_unused,
3024 		void *data __rte_unused,
3025 		struct rte_flow_error *error)
3026 {
3027 	return rte_flow_error_set(error, ENOTSUP,
3028 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3029 }
3030 
3031 static int
3032 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
3033 		      uint32_t domains __rte_unused,
3034 		      uint32_t flags __rte_unused)
3035 {
3036 	return 0;
3037 }
3038 
3039 /* Void driver to protect from null pointer reference. */
3040 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
3041 	.validate = flow_null_validate,
3042 	.prepare = flow_null_prepare,
3043 	.translate = flow_null_translate,
3044 	.apply = flow_null_apply,
3045 	.remove = flow_null_remove,
3046 	.destroy = flow_null_destroy,
3047 	.query = flow_null_query,
3048 	.sync_domain = flow_null_sync_domain,
3049 };
3050 
3051 /**
3052  * Select flow driver type according to flow attributes and device
3053  * configuration.
3054  *
3055  * @param[in] dev
3056  *   Pointer to the dev structure.
3057  * @param[in] attr
3058  *   Pointer to the flow attributes.
3059  *
3060  * @return
3061  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
3062  */
3063 static enum mlx5_flow_drv_type
3064 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
3065 {
3066 	struct mlx5_priv *priv = dev->data->dev_private;
3067 	/* The OS can determine first a specific flow type (DV, VERBS) */
3068 	enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
3069 
3070 	if (type != MLX5_FLOW_TYPE_MAX)
3071 		return type;
3072 	/* If no OS specific type - continue with DV/VERBS selection */
3073 	if (attr->transfer && priv->config.dv_esw_en)
3074 		type = MLX5_FLOW_TYPE_DV;
3075 	if (!attr->transfer)
3076 		type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
3077 						 MLX5_FLOW_TYPE_VERBS;
3078 	return type;
3079 }
3080 
3081 #define flow_get_drv_ops(type) flow_drv_ops[type]
3082 
3083 /**
3084  * Flow driver validation API. This abstracts calling driver specific functions.
3085  * The type of flow driver is determined according to flow attributes.
3086  *
3087  * @param[in] dev
3088  *   Pointer to the dev structure.
3089  * @param[in] attr
3090  *   Pointer to the flow attributes.
3091  * @param[in] items
3092  *   Pointer to the list of items.
3093  * @param[in] actions
3094  *   Pointer to the list of actions.
3095  * @param[in] external
3096  *   This flow rule is created by request external to PMD.
3097  * @param[in] hairpin
3098  *   Number of hairpin TX actions, 0 means classic flow.
3099  * @param[out] error
3100  *   Pointer to the error structure.
3101  *
3102  * @return
3103  *   0 on success, a negative errno value otherwise and rte_errno is set.
3104  */
3105 static inline int
3106 flow_drv_validate(struct rte_eth_dev *dev,
3107 		  const struct rte_flow_attr *attr,
3108 		  const struct rte_flow_item items[],
3109 		  const struct rte_flow_action actions[],
3110 		  bool external, int hairpin, struct rte_flow_error *error)
3111 {
3112 	const struct mlx5_flow_driver_ops *fops;
3113 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
3114 
3115 	fops = flow_get_drv_ops(type);
3116 	return fops->validate(dev, attr, items, actions, external,
3117 			      hairpin, error);
3118 }
3119 
3120 /**
3121  * Flow driver preparation API. This abstracts calling driver specific
3122  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3123  * calculates the size of memory required for device flow, allocates the memory,
3124  * initializes the device flow and returns the pointer.
3125  *
3126  * @note
3127  *   This function initializes device flow structure such as dv or verbs in
3128  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
3129  *   rest. For example, adding returning device flow to flow->dev_flow list and
3130  *   setting backward reference to the flow should be done out of this function.
3131  *   layers field is not filled either.
3132  *
3133  * @param[in] dev
3134  *   Pointer to the dev structure.
3135  * @param[in] attr
3136  *   Pointer to the flow attributes.
3137  * @param[in] items
3138  *   Pointer to the list of items.
3139  * @param[in] actions
3140  *   Pointer to the list of actions.
3141  * @param[in] flow_idx
3142  *   This memory pool index to the flow.
3143  * @param[out] error
3144  *   Pointer to the error structure.
3145  *
3146  * @return
3147  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
3148  */
3149 static inline struct mlx5_flow *
3150 flow_drv_prepare(struct rte_eth_dev *dev,
3151 		 const struct rte_flow *flow,
3152 		 const struct rte_flow_attr *attr,
3153 		 const struct rte_flow_item items[],
3154 		 const struct rte_flow_action actions[],
3155 		 uint32_t flow_idx,
3156 		 struct rte_flow_error *error)
3157 {
3158 	const struct mlx5_flow_driver_ops *fops;
3159 	enum mlx5_flow_drv_type type = flow->drv_type;
3160 	struct mlx5_flow *mlx5_flow = NULL;
3161 
3162 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3163 	fops = flow_get_drv_ops(type);
3164 	mlx5_flow = fops->prepare(dev, attr, items, actions, error);
3165 	if (mlx5_flow)
3166 		mlx5_flow->flow_idx = flow_idx;
3167 	return mlx5_flow;
3168 }
3169 
3170 /**
3171  * Flow driver translation API. This abstracts calling driver specific
3172  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3173  * translates a generic flow into a driver flow. flow_drv_prepare() must
3174  * precede.
3175  *
3176  * @note
3177  *   dev_flow->layers could be filled as a result of parsing during translation
3178  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
3179  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
3180  *   flow->actions could be overwritten even though all the expanded dev_flows
3181  *   have the same actions.
3182  *
3183  * @param[in] dev
3184  *   Pointer to the rte dev structure.
3185  * @param[in, out] dev_flow
3186  *   Pointer to the mlx5 flow.
3187  * @param[in] attr
3188  *   Pointer to the flow attributes.
3189  * @param[in] items
3190  *   Pointer to the list of items.
3191  * @param[in] actions
3192  *   Pointer to the list of actions.
3193  * @param[out] error
3194  *   Pointer to the error structure.
3195  *
3196  * @return
3197  *   0 on success, a negative errno value otherwise and rte_errno is set.
3198  */
3199 static inline int
3200 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3201 		   const struct rte_flow_attr *attr,
3202 		   const struct rte_flow_item items[],
3203 		   const struct rte_flow_action actions[],
3204 		   struct rte_flow_error *error)
3205 {
3206 	const struct mlx5_flow_driver_ops *fops;
3207 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
3208 
3209 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3210 	fops = flow_get_drv_ops(type);
3211 	return fops->translate(dev, dev_flow, attr, items, actions, error);
3212 }
3213 
3214 /**
3215  * Flow driver apply API. This abstracts calling driver specific functions.
3216  * Parent flow (rte_flow) should have driver type (drv_type). It applies
3217  * translated driver flows on to device. flow_drv_translate() must precede.
3218  *
3219  * @param[in] dev
3220  *   Pointer to Ethernet device structure.
3221  * @param[in, out] flow
3222  *   Pointer to flow structure.
3223  * @param[out] error
3224  *   Pointer to error structure.
3225  *
3226  * @return
3227  *   0 on success, a negative errno value otherwise and rte_errno is set.
3228  */
3229 static inline int
3230 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3231 	       struct rte_flow_error *error)
3232 {
3233 	const struct mlx5_flow_driver_ops *fops;
3234 	enum mlx5_flow_drv_type type = flow->drv_type;
3235 
3236 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3237 	fops = flow_get_drv_ops(type);
3238 	return fops->apply(dev, flow, error);
3239 }
3240 
3241 /**
3242  * Flow driver remove API. This abstracts calling driver specific functions.
3243  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3244  * on device. All the resources of the flow should be freed by calling
3245  * flow_drv_destroy().
3246  *
3247  * @param[in] dev
3248  *   Pointer to Ethernet device.
3249  * @param[in, out] flow
3250  *   Pointer to flow structure.
3251  */
3252 static inline void
3253 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3254 {
3255 	const struct mlx5_flow_driver_ops *fops;
3256 	enum mlx5_flow_drv_type type = flow->drv_type;
3257 
3258 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3259 	fops = flow_get_drv_ops(type);
3260 	fops->remove(dev, flow);
3261 }
3262 
3263 /**
3264  * Flow driver destroy API. This abstracts calling driver specific functions.
3265  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3266  * on device and releases resources of the flow.
3267  *
3268  * @param[in] dev
3269  *   Pointer to Ethernet device.
3270  * @param[in, out] flow
3271  *   Pointer to flow structure.
3272  */
3273 static inline void
3274 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3275 {
3276 	const struct mlx5_flow_driver_ops *fops;
3277 	enum mlx5_flow_drv_type type = flow->drv_type;
3278 
3279 	flow_mreg_split_qrss_release(dev, flow);
3280 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3281 	fops = flow_get_drv_ops(type);
3282 	fops->destroy(dev, flow);
3283 }
3284 
3285 /**
3286  * Get RSS action from the action list.
3287  *
3288  * @param[in] actions
3289  *   Pointer to the list of actions.
3290  *
3291  * @return
3292  *   Pointer to the RSS action if exist, else return NULL.
3293  */
3294 static const struct rte_flow_action_rss*
3295 flow_get_rss_action(const struct rte_flow_action actions[])
3296 {
3297 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3298 		switch (actions->type) {
3299 		case RTE_FLOW_ACTION_TYPE_RSS:
3300 			return (const struct rte_flow_action_rss *)
3301 			       actions->conf;
3302 		default:
3303 			break;
3304 		}
3305 	}
3306 	return NULL;
3307 }
3308 
3309 /* maps shared action to translated non shared in some actions array */
3310 struct mlx5_translated_shared_action {
3311 	struct rte_flow_shared_action *action; /**< Shared action */
3312 	int index; /**< Index in related array of rte_flow_action */
3313 };
3314 
3315 /**
3316  * Translates actions of type RTE_FLOW_ACTION_TYPE_SHARED to related
3317  * non shared action if translation possible.
3318  * This functionality used to run same execution path for both shared & non
3319  * shared actions on flow create. All necessary preparations for shared
3320  * action handling should be preformed on *shared* actions list returned
3321  * from this call.
3322  *
3323  * @param[in] actions
3324  *   List of actions to translate.
3325  * @param[out] shared
3326  *   List to store translated shared actions.
3327  * @param[in, out] shared_n
3328  *   Size of *shared* array. On return should be updated with number of shared
3329  *   actions retrieved from the *actions* list.
3330  * @param[out] translated_actions
3331  *   List of actions where all shared actions were translated to non shared
3332  *   if possible. NULL if no translation took place.
3333  * @param[out] error
3334  *   Pointer to the error structure.
3335  *
3336  * @return
3337  *   0 on success, a negative errno value otherwise and rte_errno is set.
3338  */
3339 static int
3340 flow_shared_actions_translate(const struct rte_flow_action actions[],
3341 	struct mlx5_translated_shared_action *shared,
3342 	int *shared_n,
3343 	struct rte_flow_action **translated_actions,
3344 	struct rte_flow_error *error)
3345 {
3346 	struct rte_flow_action *translated = NULL;
3347 	size_t actions_size;
3348 	int n;
3349 	int copied_n = 0;
3350 	struct mlx5_translated_shared_action *shared_end = NULL;
3351 
3352 	for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
3353 		if (actions[n].type != RTE_FLOW_ACTION_TYPE_SHARED)
3354 			continue;
3355 		if (copied_n == *shared_n) {
3356 			return rte_flow_error_set
3357 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3358 				 NULL, "too many shared actions");
3359 		}
3360 		rte_memcpy(&shared[copied_n].action, &actions[n].conf,
3361 			   sizeof(actions[n].conf));
3362 		shared[copied_n].index = n;
3363 		copied_n++;
3364 	}
3365 	n++;
3366 	*shared_n = copied_n;
3367 	if (!copied_n)
3368 		return 0;
3369 	actions_size = sizeof(struct rte_flow_action) * n;
3370 	translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
3371 	if (!translated) {
3372 		rte_errno = ENOMEM;
3373 		return -ENOMEM;
3374 	}
3375 	memcpy(translated, actions, actions_size);
3376 	for (shared_end = shared + copied_n; shared < shared_end; shared++) {
3377 		const struct rte_flow_shared_action *shared_action;
3378 
3379 		shared_action = shared->action;
3380 		switch (shared_action->type) {
3381 		case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
3382 			translated[shared->index].type =
3383 				RTE_FLOW_ACTION_TYPE_RSS;
3384 			translated[shared->index].conf =
3385 				&shared_action->rss.origin;
3386 			break;
3387 		default:
3388 			mlx5_free(translated);
3389 			return rte_flow_error_set
3390 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3391 				 NULL, "invalid shared action type");
3392 		}
3393 	}
3394 	*translated_actions = translated;
3395 	return 0;
3396 }
3397 
3398 /**
3399  * Get Shared RSS action from the action list.
3400  *
3401  * @param[in] shared
3402  *   Pointer to the list of actions.
3403  * @param[in] shared_n
3404  *   Actions list length.
3405  *
3406  * @return
3407  *   Pointer to the MLX5 RSS action if exists, otherwise return NULL.
3408  */
3409 static struct mlx5_shared_action_rss *
3410 flow_get_shared_rss_action(struct mlx5_translated_shared_action *shared,
3411 			   int shared_n)
3412 {
3413 	struct mlx5_translated_shared_action *shared_end;
3414 
3415 	for (shared_end = shared + shared_n; shared < shared_end; shared++) {
3416 		struct rte_flow_shared_action *shared_action;
3417 
3418 		shared_action = shared->action;
3419 		switch (shared_action->type) {
3420 		case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
3421 			__atomic_add_fetch(&shared_action->refcnt, 1,
3422 					   __ATOMIC_RELAXED);
3423 			return &shared_action->rss;
3424 		default:
3425 			break;
3426 		}
3427 	}
3428 	return NULL;
3429 }
3430 
3431 struct rte_flow_shared_action *
3432 mlx5_flow_get_shared_rss(struct rte_flow *flow)
3433 {
3434 	if (flow->shared_rss)
3435 		return container_of(flow->shared_rss,
3436 				    struct rte_flow_shared_action, rss);
3437 	else
3438 		return NULL;
3439 }
3440 
3441 static unsigned int
3442 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
3443 {
3444 	const struct rte_flow_item *item;
3445 	unsigned int has_vlan = 0;
3446 
3447 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3448 		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
3449 			has_vlan = 1;
3450 			break;
3451 		}
3452 	}
3453 	if (has_vlan)
3454 		return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
3455 				       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
3456 	return rss_level < 2 ? MLX5_EXPANSION_ROOT :
3457 			       MLX5_EXPANSION_ROOT_OUTER;
3458 }
3459 
3460 /**
3461  *  Get layer flags from the prefix flow.
3462  *
3463  *  Some flows may be split to several subflows, the prefix subflow gets the
3464  *  match items and the suffix sub flow gets the actions.
3465  *  Some actions need the user defined match item flags to get the detail for
3466  *  the action.
3467  *  This function helps the suffix flow to get the item layer flags from prefix
3468  *  subflow.
3469  *
3470  * @param[in] dev_flow
3471  *   Pointer the created preifx subflow.
3472  *
3473  * @return
3474  *   The layers get from prefix subflow.
3475  */
3476 static inline uint64_t
3477 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
3478 {
3479 	uint64_t layers = 0;
3480 
3481 	/*
3482 	 * Layers bits could be localization, but usually the compiler will
3483 	 * help to do the optimization work for source code.
3484 	 * If no decap actions, use the layers directly.
3485 	 */
3486 	if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
3487 		return dev_flow->handle->layers;
3488 	/* Convert L3 layers with decap action. */
3489 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
3490 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3491 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
3492 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3493 	/* Convert L4 layers with decap action.  */
3494 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
3495 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
3496 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
3497 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
3498 	return layers;
3499 }
3500 
3501 /**
3502  * Get metadata split action information.
3503  *
3504  * @param[in] actions
3505  *   Pointer to the list of actions.
3506  * @param[out] qrss
3507  *   Pointer to the return pointer.
3508  * @param[out] qrss_type
3509  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
3510  *   if no QUEUE/RSS is found.
3511  * @param[out] encap_idx
3512  *   Pointer to the index of the encap action if exists, otherwise the last
3513  *   action index.
3514  *
3515  * @return
3516  *   Total number of actions.
3517  */
3518 static int
3519 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
3520 				       const struct rte_flow_action **qrss,
3521 				       int *encap_idx)
3522 {
3523 	const struct rte_flow_action_raw_encap *raw_encap;
3524 	int actions_n = 0;
3525 	int raw_decap_idx = -1;
3526 
3527 	*encap_idx = -1;
3528 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3529 		switch (actions->type) {
3530 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3531 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3532 			*encap_idx = actions_n;
3533 			break;
3534 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3535 			raw_decap_idx = actions_n;
3536 			break;
3537 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3538 			raw_encap = actions->conf;
3539 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3540 				*encap_idx = raw_decap_idx != -1 ?
3541 						      raw_decap_idx : actions_n;
3542 			break;
3543 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3544 		case RTE_FLOW_ACTION_TYPE_RSS:
3545 			*qrss = actions;
3546 			break;
3547 		default:
3548 			break;
3549 		}
3550 		actions_n++;
3551 	}
3552 	if (*encap_idx == -1)
3553 		*encap_idx = actions_n;
3554 	/* Count RTE_FLOW_ACTION_TYPE_END. */
3555 	return actions_n + 1;
3556 }
3557 
3558 /**
3559  * Check meter action from the action list.
3560  *
3561  * @param[in] actions
3562  *   Pointer to the list of actions.
3563  * @param[out] mtr
3564  *   Pointer to the meter exist flag.
3565  *
3566  * @return
3567  *   Total number of actions.
3568  */
3569 static int
3570 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
3571 {
3572 	int actions_n = 0;
3573 
3574 	MLX5_ASSERT(mtr);
3575 	*mtr = 0;
3576 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3577 		switch (actions->type) {
3578 		case RTE_FLOW_ACTION_TYPE_METER:
3579 			*mtr = 1;
3580 			break;
3581 		default:
3582 			break;
3583 		}
3584 		actions_n++;
3585 	}
3586 	/* Count RTE_FLOW_ACTION_TYPE_END. */
3587 	return actions_n + 1;
3588 }
3589 
3590 /**
3591  * Check if the flow should be split due to hairpin.
3592  * The reason for the split is that in current HW we can't
3593  * support encap and push-vlan on Rx, so if a flow contains
3594  * these actions we move it to Tx.
3595  *
3596  * @param dev
3597  *   Pointer to Ethernet device.
3598  * @param[in] attr
3599  *   Flow rule attributes.
3600  * @param[in] actions
3601  *   Associated actions (list terminated by the END action).
3602  *
3603  * @return
3604  *   > 0 the number of actions and the flow should be split,
3605  *   0 when no split required.
3606  */
3607 static int
3608 flow_check_hairpin_split(struct rte_eth_dev *dev,
3609 			 const struct rte_flow_attr *attr,
3610 			 const struct rte_flow_action actions[])
3611 {
3612 	int queue_action = 0;
3613 	int action_n = 0;
3614 	int split = 0;
3615 	const struct rte_flow_action_queue *queue;
3616 	const struct rte_flow_action_rss *rss;
3617 	const struct rte_flow_action_raw_encap *raw_encap;
3618 	const struct rte_eth_hairpin_conf *conf;
3619 
3620 	if (!attr->ingress)
3621 		return 0;
3622 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3623 		switch (actions->type) {
3624 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3625 			queue = actions->conf;
3626 			if (queue == NULL)
3627 				return 0;
3628 			conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
3629 			if (conf != NULL && !!conf->tx_explicit)
3630 				return 0;
3631 			queue_action = 1;
3632 			action_n++;
3633 			break;
3634 		case RTE_FLOW_ACTION_TYPE_RSS:
3635 			rss = actions->conf;
3636 			if (rss == NULL || rss->queue_num == 0)
3637 				return 0;
3638 			conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
3639 			if (conf != NULL && !!conf->tx_explicit)
3640 				return 0;
3641 			queue_action = 1;
3642 			action_n++;
3643 			break;
3644 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3645 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3646 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3647 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3648 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3649 			split++;
3650 			action_n++;
3651 			break;
3652 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3653 			raw_encap = actions->conf;
3654 			if (raw_encap->size >
3655 			    (sizeof(struct rte_flow_item_eth) +
3656 			     sizeof(struct rte_flow_item_ipv4)))
3657 				split++;
3658 			action_n++;
3659 			break;
3660 		default:
3661 			action_n++;
3662 			break;
3663 		}
3664 	}
3665 	if (split && queue_action)
3666 		return action_n;
3667 	return 0;
3668 }
3669 
3670 /* Declare flow create/destroy prototype in advance. */
3671 static uint32_t
3672 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
3673 		 const struct rte_flow_attr *attr,
3674 		 const struct rte_flow_item items[],
3675 		 const struct rte_flow_action actions[],
3676 		 bool external, struct rte_flow_error *error);
3677 
3678 static void
3679 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
3680 		  uint32_t flow_idx);
3681 
3682 /**
3683  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3684  *
3685  * As mark_id is unique, if there's already a registered flow for the mark_id,
3686  * return by increasing the reference counter of the resource. Otherwise, create
3687  * the resource (mcp_res) and flow.
3688  *
3689  * Flow looks like,
3690  *   - If ingress port is ANY and reg_c[1] is mark_id,
3691  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3692  *
3693  * For default flow (zero mark_id), flow is like,
3694  *   - If ingress port is ANY,
3695  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
3696  *
3697  * @param dev
3698  *   Pointer to Ethernet device.
3699  * @param mark_id
3700  *   ID of MARK action, zero means default flow for META.
3701  * @param[out] error
3702  *   Perform verbose error reporting if not NULL.
3703  *
3704  * @return
3705  *   Associated resource on success, NULL otherwise and rte_errno is set.
3706  */
3707 static struct mlx5_flow_mreg_copy_resource *
3708 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
3709 			  struct rte_flow_error *error)
3710 {
3711 	struct mlx5_priv *priv = dev->data->dev_private;
3712 	struct rte_flow_attr attr = {
3713 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3714 		.ingress = 1,
3715 	};
3716 	struct mlx5_rte_flow_item_tag tag_spec = {
3717 		.data = mark_id,
3718 	};
3719 	struct rte_flow_item items[] = {
3720 		[1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
3721 	};
3722 	struct rte_flow_action_mark ftag = {
3723 		.id = mark_id,
3724 	};
3725 	struct mlx5_flow_action_copy_mreg cp_mreg = {
3726 		.dst = REG_B,
3727 		.src = REG_NON,
3728 	};
3729 	struct rte_flow_action_jump jump = {
3730 		.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3731 	};
3732 	struct rte_flow_action actions[] = {
3733 		[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
3734 	};
3735 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3736 	uint32_t idx = 0;
3737 	int ret;
3738 
3739 	/* Fill the register fileds in the flow. */
3740 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3741 	if (ret < 0)
3742 		return NULL;
3743 	tag_spec.id = ret;
3744 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3745 	if (ret < 0)
3746 		return NULL;
3747 	cp_mreg.src = ret;
3748 	/* Check if already registered. */
3749 	MLX5_ASSERT(priv->mreg_cp_tbl);
3750 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
3751 	if (mcp_res) {
3752 		/* For non-default rule. */
3753 		if (mark_id != MLX5_DEFAULT_COPY_ID)
3754 			mcp_res->refcnt++;
3755 		MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
3756 			    mcp_res->refcnt == 1);
3757 		return mcp_res;
3758 	}
3759 	/* Provide the full width of FLAG specific value. */
3760 	if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
3761 		tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
3762 	/* Build a new flow. */
3763 	if (mark_id != MLX5_DEFAULT_COPY_ID) {
3764 		items[0] = (struct rte_flow_item){
3765 			.type = (enum rte_flow_item_type)
3766 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3767 			.spec = &tag_spec,
3768 		};
3769 		items[1] = (struct rte_flow_item){
3770 			.type = RTE_FLOW_ITEM_TYPE_END,
3771 		};
3772 		actions[0] = (struct rte_flow_action){
3773 			.type = (enum rte_flow_action_type)
3774 				MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3775 			.conf = &ftag,
3776 		};
3777 		actions[1] = (struct rte_flow_action){
3778 			.type = (enum rte_flow_action_type)
3779 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3780 			.conf = &cp_mreg,
3781 		};
3782 		actions[2] = (struct rte_flow_action){
3783 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
3784 			.conf = &jump,
3785 		};
3786 		actions[3] = (struct rte_flow_action){
3787 			.type = RTE_FLOW_ACTION_TYPE_END,
3788 		};
3789 	} else {
3790 		/* Default rule, wildcard match. */
3791 		attr.priority = MLX5_FLOW_PRIO_RSVD;
3792 		items[0] = (struct rte_flow_item){
3793 			.type = RTE_FLOW_ITEM_TYPE_END,
3794 		};
3795 		actions[0] = (struct rte_flow_action){
3796 			.type = (enum rte_flow_action_type)
3797 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3798 			.conf = &cp_mreg,
3799 		};
3800 		actions[1] = (struct rte_flow_action){
3801 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
3802 			.conf = &jump,
3803 		};
3804 		actions[2] = (struct rte_flow_action){
3805 			.type = RTE_FLOW_ACTION_TYPE_END,
3806 		};
3807 	}
3808 	/* Build a new entry. */
3809 	mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
3810 	if (!mcp_res) {
3811 		rte_errno = ENOMEM;
3812 		return NULL;
3813 	}
3814 	mcp_res->idx = idx;
3815 	/*
3816 	 * The copy Flows are not included in any list. There
3817 	 * ones are referenced from other Flows and can not
3818 	 * be applied, removed, deleted in ardbitrary order
3819 	 * by list traversing.
3820 	 */
3821 	mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
3822 					 actions, false, error);
3823 	if (!mcp_res->rix_flow)
3824 		goto error;
3825 	mcp_res->refcnt++;
3826 	mcp_res->hlist_ent.key = mark_id;
3827 	ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
3828 				&mcp_res->hlist_ent);
3829 	MLX5_ASSERT(!ret);
3830 	if (ret)
3831 		goto error;
3832 	return mcp_res;
3833 error:
3834 	if (mcp_res->rix_flow)
3835 		flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3836 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3837 	return NULL;
3838 }
3839 
3840 /**
3841  * Release flow in RX_CP_TBL.
3842  *
3843  * @param dev
3844  *   Pointer to Ethernet device.
3845  * @flow
3846  *   Parent flow for wich copying is provided.
3847  */
3848 static void
3849 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3850 			  struct rte_flow *flow)
3851 {
3852 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3853 	struct mlx5_priv *priv = dev->data->dev_private;
3854 
3855 	if (!flow->rix_mreg_copy)
3856 		return;
3857 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3858 				 flow->rix_mreg_copy);
3859 	if (!mcp_res || !priv->mreg_cp_tbl)
3860 		return;
3861 	if (flow->copy_applied) {
3862 		MLX5_ASSERT(mcp_res->appcnt);
3863 		flow->copy_applied = 0;
3864 		--mcp_res->appcnt;
3865 		if (!mcp_res->appcnt) {
3866 			struct rte_flow *mcp_flow = mlx5_ipool_get
3867 					(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
3868 					mcp_res->rix_flow);
3869 
3870 			if (mcp_flow)
3871 				flow_drv_remove(dev, mcp_flow);
3872 		}
3873 	}
3874 	/*
3875 	 * We do not check availability of metadata registers here,
3876 	 * because copy resources are not allocated in this case.
3877 	 */
3878 	if (--mcp_res->refcnt)
3879 		return;
3880 	MLX5_ASSERT(mcp_res->rix_flow);
3881 	flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3882 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3883 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3884 	flow->rix_mreg_copy = 0;
3885 }
3886 
3887 /**
3888  * Start flow in RX_CP_TBL.
3889  *
3890  * @param dev
3891  *   Pointer to Ethernet device.
3892  * @flow
3893  *   Parent flow for wich copying is provided.
3894  *
3895  * @return
3896  *   0 on success, a negative errno value otherwise and rte_errno is set.
3897  */
3898 static int
3899 flow_mreg_start_copy_action(struct rte_eth_dev *dev,
3900 			    struct rte_flow *flow)
3901 {
3902 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3903 	struct mlx5_priv *priv = dev->data->dev_private;
3904 	int ret;
3905 
3906 	if (!flow->rix_mreg_copy || flow->copy_applied)
3907 		return 0;
3908 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3909 				 flow->rix_mreg_copy);
3910 	if (!mcp_res)
3911 		return 0;
3912 	if (!mcp_res->appcnt) {
3913 		struct rte_flow *mcp_flow = mlx5_ipool_get
3914 				(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
3915 				mcp_res->rix_flow);
3916 
3917 		if (mcp_flow) {
3918 			ret = flow_drv_apply(dev, mcp_flow, NULL);
3919 			if (ret)
3920 				return ret;
3921 		}
3922 	}
3923 	++mcp_res->appcnt;
3924 	flow->copy_applied = 1;
3925 	return 0;
3926 }
3927 
3928 /**
3929  * Stop flow in RX_CP_TBL.
3930  *
3931  * @param dev
3932  *   Pointer to Ethernet device.
3933  * @flow
3934  *   Parent flow for wich copying is provided.
3935  */
3936 static void
3937 flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
3938 			   struct rte_flow *flow)
3939 {
3940 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3941 	struct mlx5_priv *priv = dev->data->dev_private;
3942 
3943 	if (!flow->rix_mreg_copy || !flow->copy_applied)
3944 		return;
3945 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3946 				 flow->rix_mreg_copy);
3947 	if (!mcp_res)
3948 		return;
3949 	MLX5_ASSERT(mcp_res->appcnt);
3950 	--mcp_res->appcnt;
3951 	flow->copy_applied = 0;
3952 	if (!mcp_res->appcnt) {
3953 		struct rte_flow *mcp_flow = mlx5_ipool_get
3954 				(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
3955 				mcp_res->rix_flow);
3956 
3957 		if (mcp_flow)
3958 			flow_drv_remove(dev, mcp_flow);
3959 	}
3960 }
3961 
3962 /**
3963  * Remove the default copy action from RX_CP_TBL.
3964  *
3965  * @param dev
3966  *   Pointer to Ethernet device.
3967  */
3968 static void
3969 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3970 {
3971 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3972 	struct mlx5_priv *priv = dev->data->dev_private;
3973 
3974 	/* Check if default flow is registered. */
3975 	if (!priv->mreg_cp_tbl)
3976 		return;
3977 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
3978 					    MLX5_DEFAULT_COPY_ID);
3979 	if (!mcp_res)
3980 		return;
3981 	MLX5_ASSERT(mcp_res->rix_flow);
3982 	flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3983 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3984 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3985 }
3986 
3987 /**
3988  * Add the default copy action in in RX_CP_TBL.
3989  *
3990  * @param dev
3991  *   Pointer to Ethernet device.
3992  * @param[out] error
3993  *   Perform verbose error reporting if not NULL.
3994  *
3995  * @return
3996  *   0 for success, negative value otherwise and rte_errno is set.
3997  */
3998 static int
3999 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
4000 				  struct rte_flow_error *error)
4001 {
4002 	struct mlx5_priv *priv = dev->data->dev_private;
4003 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4004 
4005 	/* Check whether extensive metadata feature is engaged. */
4006 	if (!priv->config.dv_flow_en ||
4007 	    priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4008 	    !mlx5_flow_ext_mreg_supported(dev) ||
4009 	    !priv->sh->dv_regc0_mask)
4010 		return 0;
4011 	mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
4012 	if (!mcp_res)
4013 		return -rte_errno;
4014 	return 0;
4015 }
4016 
4017 /**
4018  * Add a flow of copying flow metadata registers in RX_CP_TBL.
4019  *
4020  * All the flow having Q/RSS action should be split by
4021  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
4022  * performs the following,
4023  *   - CQE->flow_tag := reg_c[1] (MARK)
4024  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4025  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
4026  * but there should be a flow per each MARK ID set by MARK action.
4027  *
4028  * For the aforementioned reason, if there's a MARK action in flow's action
4029  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
4030  * the MARK ID to CQE's flow_tag like,
4031  *   - If reg_c[1] is mark_id,
4032  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4033  *
4034  * For SET_META action which stores value in reg_c[0], as the destination is
4035  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
4036  * MARK ID means the default flow. The default flow looks like,
4037  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4038  *
4039  * @param dev
4040  *   Pointer to Ethernet device.
4041  * @param flow
4042  *   Pointer to flow structure.
4043  * @param[in] actions
4044  *   Pointer to the list of actions.
4045  * @param[out] error
4046  *   Perform verbose error reporting if not NULL.
4047  *
4048  * @return
4049  *   0 on success, negative value otherwise and rte_errno is set.
4050  */
4051 static int
4052 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
4053 			    struct rte_flow *flow,
4054 			    const struct rte_flow_action *actions,
4055 			    struct rte_flow_error *error)
4056 {
4057 	struct mlx5_priv *priv = dev->data->dev_private;
4058 	struct mlx5_dev_config *config = &priv->config;
4059 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4060 	const struct rte_flow_action_mark *mark;
4061 
4062 	/* Check whether extensive metadata feature is engaged. */
4063 	if (!config->dv_flow_en ||
4064 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4065 	    !mlx5_flow_ext_mreg_supported(dev) ||
4066 	    !priv->sh->dv_regc0_mask)
4067 		return 0;
4068 	/* Find MARK action. */
4069 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4070 		switch (actions->type) {
4071 		case RTE_FLOW_ACTION_TYPE_FLAG:
4072 			mcp_res = flow_mreg_add_copy_action
4073 				(dev, MLX5_FLOW_MARK_DEFAULT, error);
4074 			if (!mcp_res)
4075 				return -rte_errno;
4076 			flow->rix_mreg_copy = mcp_res->idx;
4077 			if (dev->data->dev_started) {
4078 				mcp_res->appcnt++;
4079 				flow->copy_applied = 1;
4080 			}
4081 			return 0;
4082 		case RTE_FLOW_ACTION_TYPE_MARK:
4083 			mark = (const struct rte_flow_action_mark *)
4084 				actions->conf;
4085 			mcp_res =
4086 				flow_mreg_add_copy_action(dev, mark->id, error);
4087 			if (!mcp_res)
4088 				return -rte_errno;
4089 			flow->rix_mreg_copy = mcp_res->idx;
4090 			if (dev->data->dev_started) {
4091 				mcp_res->appcnt++;
4092 				flow->copy_applied = 1;
4093 			}
4094 			return 0;
4095 		default:
4096 			break;
4097 		}
4098 	}
4099 	return 0;
4100 }
4101 
4102 #define MLX5_MAX_SPLIT_ACTIONS 24
4103 #define MLX5_MAX_SPLIT_ITEMS 24
4104 
4105 /**
4106  * Split the hairpin flow.
4107  * Since HW can't support encap and push-vlan on Rx, we move these
4108  * actions to Tx.
4109  * If the count action is after the encap then we also
4110  * move the count action. in this case the count will also measure
4111  * the outer bytes.
4112  *
4113  * @param dev
4114  *   Pointer to Ethernet device.
4115  * @param[in] actions
4116  *   Associated actions (list terminated by the END action).
4117  * @param[out] actions_rx
4118  *   Rx flow actions.
4119  * @param[out] actions_tx
4120  *   Tx flow actions..
4121  * @param[out] pattern_tx
4122  *   The pattern items for the Tx flow.
4123  * @param[out] flow_id
4124  *   The flow ID connected to this flow.
4125  *
4126  * @return
4127  *   0 on success.
4128  */
4129 static int
4130 flow_hairpin_split(struct rte_eth_dev *dev,
4131 		   const struct rte_flow_action actions[],
4132 		   struct rte_flow_action actions_rx[],
4133 		   struct rte_flow_action actions_tx[],
4134 		   struct rte_flow_item pattern_tx[],
4135 		   uint32_t flow_id)
4136 {
4137 	const struct rte_flow_action_raw_encap *raw_encap;
4138 	const struct rte_flow_action_raw_decap *raw_decap;
4139 	struct mlx5_rte_flow_action_set_tag *set_tag;
4140 	struct rte_flow_action *tag_action;
4141 	struct mlx5_rte_flow_item_tag *tag_item;
4142 	struct rte_flow_item *item;
4143 	char *addr;
4144 	int encap = 0;
4145 
4146 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4147 		switch (actions->type) {
4148 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4149 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4150 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4151 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4152 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4153 			rte_memcpy(actions_tx, actions,
4154 			       sizeof(struct rte_flow_action));
4155 			actions_tx++;
4156 			break;
4157 		case RTE_FLOW_ACTION_TYPE_COUNT:
4158 			if (encap) {
4159 				rte_memcpy(actions_tx, actions,
4160 					   sizeof(struct rte_flow_action));
4161 				actions_tx++;
4162 			} else {
4163 				rte_memcpy(actions_rx, actions,
4164 					   sizeof(struct rte_flow_action));
4165 				actions_rx++;
4166 			}
4167 			break;
4168 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4169 			raw_encap = actions->conf;
4170 			if (raw_encap->size >
4171 			    (sizeof(struct rte_flow_item_eth) +
4172 			     sizeof(struct rte_flow_item_ipv4))) {
4173 				memcpy(actions_tx, actions,
4174 				       sizeof(struct rte_flow_action));
4175 				actions_tx++;
4176 				encap = 1;
4177 			} else {
4178 				rte_memcpy(actions_rx, actions,
4179 					   sizeof(struct rte_flow_action));
4180 				actions_rx++;
4181 			}
4182 			break;
4183 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4184 			raw_decap = actions->conf;
4185 			if (raw_decap->size <
4186 			    (sizeof(struct rte_flow_item_eth) +
4187 			     sizeof(struct rte_flow_item_ipv4))) {
4188 				memcpy(actions_tx, actions,
4189 				       sizeof(struct rte_flow_action));
4190 				actions_tx++;
4191 			} else {
4192 				rte_memcpy(actions_rx, actions,
4193 					   sizeof(struct rte_flow_action));
4194 				actions_rx++;
4195 			}
4196 			break;
4197 		default:
4198 			rte_memcpy(actions_rx, actions,
4199 				   sizeof(struct rte_flow_action));
4200 			actions_rx++;
4201 			break;
4202 		}
4203 	}
4204 	/* Add set meta action and end action for the Rx flow. */
4205 	tag_action = actions_rx;
4206 	tag_action->type = (enum rte_flow_action_type)
4207 			   MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4208 	actions_rx++;
4209 	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
4210 	actions_rx++;
4211 	set_tag = (void *)actions_rx;
4212 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
4213 	MLX5_ASSERT(set_tag->id > REG_NON);
4214 	set_tag->data = flow_id;
4215 	tag_action->conf = set_tag;
4216 	/* Create Tx item list. */
4217 	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
4218 	addr = (void *)&pattern_tx[2];
4219 	item = pattern_tx;
4220 	item->type = (enum rte_flow_item_type)
4221 		     MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4222 	tag_item = (void *)addr;
4223 	tag_item->data = flow_id;
4224 	tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
4225 	MLX5_ASSERT(set_tag->id > REG_NON);
4226 	item->spec = tag_item;
4227 	addr += sizeof(struct mlx5_rte_flow_item_tag);
4228 	tag_item = (void *)addr;
4229 	tag_item->data = UINT32_MAX;
4230 	tag_item->id = UINT16_MAX;
4231 	item->mask = tag_item;
4232 	item->last = NULL;
4233 	item++;
4234 	item->type = RTE_FLOW_ITEM_TYPE_END;
4235 	return 0;
4236 }
4237 
4238 __extension__
4239 union tunnel_offload_mark {
4240 	uint32_t val;
4241 	struct {
4242 		uint32_t app_reserve:8;
4243 		uint32_t table_id:15;
4244 		uint32_t transfer:1;
4245 		uint32_t _unused_:8;
4246 	};
4247 };
4248 
4249 struct tunnel_default_miss_ctx {
4250 	uint16_t *queue;
4251 	__extension__
4252 	union {
4253 		struct rte_flow_action_rss action_rss;
4254 		struct rte_flow_action_queue miss_queue;
4255 		struct rte_flow_action_jump miss_jump;
4256 		uint8_t raw[0];
4257 	};
4258 };
4259 
4260 static int
4261 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
4262 			     struct rte_flow *flow,
4263 			     const struct rte_flow_attr *attr,
4264 			     const struct rte_flow_action *app_actions,
4265 			     uint32_t flow_idx,
4266 			     struct tunnel_default_miss_ctx *ctx,
4267 			     struct rte_flow_error *error)
4268 {
4269 	struct mlx5_priv *priv = dev->data->dev_private;
4270 	struct mlx5_flow *dev_flow;
4271 	struct rte_flow_attr miss_attr = *attr;
4272 	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
4273 	const struct rte_flow_item miss_items[2] = {
4274 		{
4275 			.type = RTE_FLOW_ITEM_TYPE_ETH,
4276 			.spec = NULL,
4277 			.last = NULL,
4278 			.mask = NULL
4279 		},
4280 		{
4281 			.type = RTE_FLOW_ITEM_TYPE_END,
4282 			.spec = NULL,
4283 			.last = NULL,
4284 			.mask = NULL
4285 		}
4286 	};
4287 	union tunnel_offload_mark mark_id;
4288 	struct rte_flow_action_mark miss_mark;
4289 	struct rte_flow_action miss_actions[3] = {
4290 		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
4291 		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
4292 	};
4293 	const struct rte_flow_action_jump *jump_data;
4294 	uint32_t i, flow_table = 0; /* prevent compilation warning */
4295 	struct flow_grp_info grp_info = {
4296 		.external = 1,
4297 		.transfer = attr->transfer,
4298 		.fdb_def_rule = !!priv->fdb_def_rule,
4299 		.std_tbl_fix = 0,
4300 	};
4301 	int ret;
4302 
4303 	if (!attr->transfer) {
4304 		uint32_t q_size;
4305 
4306 		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
4307 		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
4308 		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
4309 					 0, SOCKET_ID_ANY);
4310 		if (!ctx->queue)
4311 			return rte_flow_error_set
4312 				(error, ENOMEM,
4313 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4314 				NULL, "invalid default miss RSS");
4315 		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4316 		ctx->action_rss.level = 0,
4317 		ctx->action_rss.types = priv->rss_conf.rss_hf,
4318 		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
4319 		ctx->action_rss.queue_num = priv->reta_idx_n,
4320 		ctx->action_rss.key = priv->rss_conf.rss_key,
4321 		ctx->action_rss.queue = ctx->queue;
4322 		if (!priv->reta_idx_n || !priv->rxqs_n)
4323 			return rte_flow_error_set
4324 				(error, EINVAL,
4325 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4326 				NULL, "invalid port configuration");
4327 		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
4328 			ctx->action_rss.types = 0;
4329 		for (i = 0; i != priv->reta_idx_n; ++i)
4330 			ctx->queue[i] = (*priv->reta_idx)[i];
4331 	} else {
4332 		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
4333 		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
4334 	}
4335 	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
4336 	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
4337 	jump_data = app_actions->conf;
4338 	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
4339 	miss_attr.group = jump_data->group;
4340 	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
4341 				       &flow_table, grp_info, error);
4342 	if (ret)
4343 		return rte_flow_error_set(error, EINVAL,
4344 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4345 					  NULL, "invalid tunnel id");
4346 	mark_id.app_reserve = 0;
4347 	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
4348 	mark_id.transfer = !!attr->transfer;
4349 	mark_id._unused_ = 0;
4350 	miss_mark.id = mark_id.val;
4351 	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
4352 				    miss_items, miss_actions, flow_idx, error);
4353 	if (!dev_flow)
4354 		return -rte_errno;
4355 	dev_flow->flow = flow;
4356 	dev_flow->external = true;
4357 	dev_flow->tunnel = tunnel;
4358 	/* Subflow object was created, we must include one in the list. */
4359 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4360 		      dev_flow->handle, next);
4361 	DRV_LOG(DEBUG,
4362 		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
4363 		dev->data->port_id, tunnel->app_tunnel.type,
4364 		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
4365 	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
4366 				  miss_actions, error);
4367 	if (!ret)
4368 		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
4369 						  error);
4370 
4371 	return ret;
4372 }
4373 
4374 /**
4375  * The last stage of splitting chain, just creates the subflow
4376  * without any modification.
4377  *
4378  * @param[in] dev
4379  *   Pointer to Ethernet device.
4380  * @param[in] flow
4381  *   Parent flow structure pointer.
4382  * @param[in, out] sub_flow
4383  *   Pointer to return the created subflow, may be NULL.
4384  * @param[in] prefix_layers
4385  *   Prefix subflow layers, may be 0.
4386  * @param[in] prefix_mark
4387  *   Prefix subflow mark flag, may be 0.
4388  * @param[in] attr
4389  *   Flow rule attributes.
4390  * @param[in] items
4391  *   Pattern specification (list terminated by the END pattern item).
4392  * @param[in] actions
4393  *   Associated actions (list terminated by the END action).
4394  * @param[in] external
4395  *   This flow rule is created by request external to PMD.
4396  * @param[in] flow_idx
4397  *   This memory pool index to the flow.
4398  * @param[out] error
4399  *   Perform verbose error reporting if not NULL.
4400  * @return
4401  *   0 on success, negative value otherwise
4402  */
4403 static int
4404 flow_create_split_inner(struct rte_eth_dev *dev,
4405 			struct rte_flow *flow,
4406 			struct mlx5_flow **sub_flow,
4407 			uint64_t prefix_layers,
4408 			uint32_t prefix_mark,
4409 			const struct rte_flow_attr *attr,
4410 			const struct rte_flow_item items[],
4411 			const struct rte_flow_action actions[],
4412 			bool external, uint32_t flow_idx,
4413 			struct rte_flow_error *error)
4414 {
4415 	struct mlx5_flow *dev_flow;
4416 
4417 	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
4418 		flow_idx, error);
4419 	if (!dev_flow)
4420 		return -rte_errno;
4421 	dev_flow->flow = flow;
4422 	dev_flow->external = external;
4423 	/* Subflow object was created, we must include one in the list. */
4424 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4425 		      dev_flow->handle, next);
4426 	/*
4427 	 * If dev_flow is as one of the suffix flow, some actions in suffix
4428 	 * flow may need some user defined item layer flags, and pass the
4429 	 * Metadate rxq mark flag to suffix flow as well.
4430 	 */
4431 	if (prefix_layers)
4432 		dev_flow->handle->layers = prefix_layers;
4433 	if (prefix_mark)
4434 		dev_flow->handle->mark = 1;
4435 	if (sub_flow)
4436 		*sub_flow = dev_flow;
4437 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
4438 }
4439 
4440 /**
4441  * Split the meter flow.
4442  *
4443  * As meter flow will split to three sub flow, other than meter
4444  * action, the other actions make sense to only meter accepts
4445  * the packet. If it need to be dropped, no other additional
4446  * actions should be take.
4447  *
4448  * One kind of special action which decapsulates the L3 tunnel
4449  * header will be in the prefix sub flow, as not to take the
4450  * L3 tunnel header into account.
4451  *
4452  * @param dev
4453  *   Pointer to Ethernet device.
4454  * @param[in] items
4455  *   Pattern specification (list terminated by the END pattern item).
4456  * @param[out] sfx_items
4457  *   Suffix flow match items (list terminated by the END pattern item).
4458  * @param[in] actions
4459  *   Associated actions (list terminated by the END action).
4460  * @param[out] actions_sfx
4461  *   Suffix flow actions.
4462  * @param[out] actions_pre
4463  *   Prefix flow actions.
4464  * @param[out] pattern_sfx
4465  *   The pattern items for the suffix flow.
4466  * @param[out] tag_sfx
4467  *   Pointer to suffix flow tag.
4468  *
4469  * @return
4470  *   0 on success.
4471  */
4472 static int
4473 flow_meter_split_prep(struct rte_eth_dev *dev,
4474 		 const struct rte_flow_item items[],
4475 		 struct rte_flow_item sfx_items[],
4476 		 const struct rte_flow_action actions[],
4477 		 struct rte_flow_action actions_sfx[],
4478 		 struct rte_flow_action actions_pre[])
4479 {
4480 	struct mlx5_priv *priv = dev->data->dev_private;
4481 	struct rte_flow_action *tag_action = NULL;
4482 	struct rte_flow_item *tag_item;
4483 	struct mlx5_rte_flow_action_set_tag *set_tag;
4484 	struct rte_flow_error error;
4485 	const struct rte_flow_action_raw_encap *raw_encap;
4486 	const struct rte_flow_action_raw_decap *raw_decap;
4487 	struct mlx5_rte_flow_item_tag *tag_spec;
4488 	struct mlx5_rte_flow_item_tag *tag_mask;
4489 	uint32_t tag_id = 0;
4490 	bool copy_vlan = false;
4491 
4492 	/* Prepare the actions for prefix and suffix flow. */
4493 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4494 		struct rte_flow_action **action_cur = NULL;
4495 
4496 		switch (actions->type) {
4497 		case RTE_FLOW_ACTION_TYPE_METER:
4498 			/* Add the extra tag action first. */
4499 			tag_action = actions_pre;
4500 			tag_action->type = (enum rte_flow_action_type)
4501 					   MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4502 			actions_pre++;
4503 			action_cur = &actions_pre;
4504 			break;
4505 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4506 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4507 			action_cur = &actions_pre;
4508 			break;
4509 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4510 			raw_encap = actions->conf;
4511 			if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
4512 				action_cur = &actions_pre;
4513 			break;
4514 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4515 			raw_decap = actions->conf;
4516 			if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4517 				action_cur = &actions_pre;
4518 			break;
4519 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4520 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4521 			copy_vlan = true;
4522 			break;
4523 		default:
4524 			break;
4525 		}
4526 		if (!action_cur)
4527 			action_cur = &actions_sfx;
4528 		memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
4529 		(*action_cur)++;
4530 	}
4531 	/* Add end action to the actions. */
4532 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
4533 	actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
4534 	actions_pre++;
4535 	/* Set the tag. */
4536 	set_tag = (void *)actions_pre;
4537 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4538 	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
4539 			  &tag_id);
4540 	if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) {
4541 		DRV_LOG(ERR, "Port %u meter flow id exceed max limit.",
4542 			dev->data->port_id);
4543 		mlx5_ipool_free(priv->sh->ipool
4544 				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id);
4545 		return 0;
4546 	} else if (!tag_id) {
4547 		return 0;
4548 	}
4549 	set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
4550 	assert(tag_action);
4551 	tag_action->conf = set_tag;
4552 	/* Prepare the suffix subflow items. */
4553 	tag_item = sfx_items++;
4554 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4555 		int item_type = items->type;
4556 
4557 		switch (item_type) {
4558 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
4559 			memcpy(sfx_items, items, sizeof(*sfx_items));
4560 			sfx_items++;
4561 			break;
4562 		case RTE_FLOW_ITEM_TYPE_VLAN:
4563 			if (copy_vlan) {
4564 				memcpy(sfx_items, items, sizeof(*sfx_items));
4565 				/*
4566 				 * Convert to internal match item, it is used
4567 				 * for vlan push and set vid.
4568 				 */
4569 				sfx_items->type = (enum rte_flow_item_type)
4570 						  MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
4571 				sfx_items++;
4572 			}
4573 			break;
4574 		default:
4575 			break;
4576 		}
4577 	}
4578 	sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
4579 	sfx_items++;
4580 	tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
4581 	tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
4582 	tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4583 	tag_mask = tag_spec + 1;
4584 	tag_mask->data = 0xffffff00;
4585 	tag_item->type = (enum rte_flow_item_type)
4586 			 MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4587 	tag_item->spec = tag_spec;
4588 	tag_item->last = NULL;
4589 	tag_item->mask = tag_mask;
4590 	return tag_id;
4591 }
4592 
4593 /**
4594  * Split action list having QUEUE/RSS for metadata register copy.
4595  *
4596  * Once Q/RSS action is detected in user's action list, the flow action
4597  * should be split in order to copy metadata registers, which will happen in
4598  * RX_CP_TBL like,
4599  *   - CQE->flow_tag := reg_c[1] (MARK)
4600  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4601  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
4602  * This is because the last action of each flow must be a terminal action
4603  * (QUEUE, RSS or DROP).
4604  *
4605  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
4606  * stored and kept in the mlx5_flow structure per each sub_flow.
4607  *
4608  * The Q/RSS action is replaced with,
4609  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
4610  * And the following JUMP action is added at the end,
4611  *   - JUMP, to RX_CP_TBL.
4612  *
4613  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
4614  * flow_create_split_metadata() routine. The flow will look like,
4615  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
4616  *
4617  * @param dev
4618  *   Pointer to Ethernet device.
4619  * @param[out] split_actions
4620  *   Pointer to store split actions to jump to CP_TBL.
4621  * @param[in] actions
4622  *   Pointer to the list of original flow actions.
4623  * @param[in] qrss
4624  *   Pointer to the Q/RSS action.
4625  * @param[in] actions_n
4626  *   Number of original actions.
4627  * @param[out] error
4628  *   Perform verbose error reporting if not NULL.
4629  *
4630  * @return
4631  *   non-zero unique flow_id on success, otherwise 0 and
4632  *   error/rte_error are set.
4633  */
4634 static uint32_t
4635 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
4636 			  struct rte_flow_action *split_actions,
4637 			  const struct rte_flow_action *actions,
4638 			  const struct rte_flow_action *qrss,
4639 			  int actions_n, struct rte_flow_error *error)
4640 {
4641 	struct mlx5_priv *priv = dev->data->dev_private;
4642 	struct mlx5_rte_flow_action_set_tag *set_tag;
4643 	struct rte_flow_action_jump *jump;
4644 	const int qrss_idx = qrss - actions;
4645 	uint32_t flow_id = 0;
4646 	int ret = 0;
4647 
4648 	/*
4649 	 * Given actions will be split
4650 	 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
4651 	 * - Add jump to mreg CP_TBL.
4652 	 * As a result, there will be one more action.
4653 	 */
4654 	++actions_n;
4655 	memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
4656 	set_tag = (void *)(split_actions + actions_n);
4657 	/*
4658 	 * If tag action is not set to void(it means we are not the meter
4659 	 * suffix flow), add the tag action. Since meter suffix flow already
4660 	 * has the tag added.
4661 	 */
4662 	if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
4663 		/*
4664 		 * Allocate the new subflow ID. This one is unique within
4665 		 * device and not shared with representors. Otherwise,
4666 		 * we would have to resolve multi-thread access synch
4667 		 * issue. Each flow on the shared device is appended
4668 		 * with source vport identifier, so the resulting
4669 		 * flows will be unique in the shared (by master and
4670 		 * representors) domain even if they have coinciding
4671 		 * IDs.
4672 		 */
4673 		mlx5_ipool_malloc(priv->sh->ipool
4674 				  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
4675 		if (!flow_id)
4676 			return rte_flow_error_set(error, ENOMEM,
4677 						  RTE_FLOW_ERROR_TYPE_ACTION,
4678 						  NULL, "can't allocate id "
4679 						  "for split Q/RSS subflow");
4680 		/* Internal SET_TAG action to set flow ID. */
4681 		*set_tag = (struct mlx5_rte_flow_action_set_tag){
4682 			.data = flow_id,
4683 		};
4684 		ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
4685 		if (ret < 0)
4686 			return ret;
4687 		set_tag->id = ret;
4688 		/* Construct new actions array. */
4689 		/* Replace QUEUE/RSS action. */
4690 		split_actions[qrss_idx] = (struct rte_flow_action){
4691 			.type = (enum rte_flow_action_type)
4692 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4693 			.conf = set_tag,
4694 		};
4695 	}
4696 	/* JUMP action to jump to mreg copy table (CP_TBL). */
4697 	jump = (void *)(set_tag + 1);
4698 	*jump = (struct rte_flow_action_jump){
4699 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4700 	};
4701 	split_actions[actions_n - 2] = (struct rte_flow_action){
4702 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
4703 		.conf = jump,
4704 	};
4705 	split_actions[actions_n - 1] = (struct rte_flow_action){
4706 		.type = RTE_FLOW_ACTION_TYPE_END,
4707 	};
4708 	return flow_id;
4709 }
4710 
4711 /**
4712  * Extend the given action list for Tx metadata copy.
4713  *
4714  * Copy the given action list to the ext_actions and add flow metadata register
4715  * copy action in order to copy reg_a set by WQE to reg_c[0].
4716  *
4717  * @param[out] ext_actions
4718  *   Pointer to the extended action list.
4719  * @param[in] actions
4720  *   Pointer to the list of actions.
4721  * @param[in] actions_n
4722  *   Number of actions in the list.
4723  * @param[out] error
4724  *   Perform verbose error reporting if not NULL.
4725  * @param[in] encap_idx
4726  *   The encap action inndex.
4727  *
4728  * @return
4729  *   0 on success, negative value otherwise
4730  */
4731 static int
4732 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
4733 		       struct rte_flow_action *ext_actions,
4734 		       const struct rte_flow_action *actions,
4735 		       int actions_n, struct rte_flow_error *error,
4736 		       int encap_idx)
4737 {
4738 	struct mlx5_flow_action_copy_mreg *cp_mreg =
4739 		(struct mlx5_flow_action_copy_mreg *)
4740 			(ext_actions + actions_n + 1);
4741 	int ret;
4742 
4743 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4744 	if (ret < 0)
4745 		return ret;
4746 	cp_mreg->dst = ret;
4747 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
4748 	if (ret < 0)
4749 		return ret;
4750 	cp_mreg->src = ret;
4751 	if (encap_idx != 0)
4752 		memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
4753 	if (encap_idx == actions_n - 1) {
4754 		ext_actions[actions_n - 1] = (struct rte_flow_action){
4755 			.type = (enum rte_flow_action_type)
4756 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4757 			.conf = cp_mreg,
4758 		};
4759 		ext_actions[actions_n] = (struct rte_flow_action){
4760 			.type = RTE_FLOW_ACTION_TYPE_END,
4761 		};
4762 	} else {
4763 		ext_actions[encap_idx] = (struct rte_flow_action){
4764 			.type = (enum rte_flow_action_type)
4765 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4766 			.conf = cp_mreg,
4767 		};
4768 		memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
4769 				sizeof(*ext_actions) * (actions_n - encap_idx));
4770 	}
4771 	return 0;
4772 }
4773 
4774 /**
4775  * Check the match action from the action list.
4776  *
4777  * @param[in] actions
4778  *   Pointer to the list of actions.
4779  * @param[in] attr
4780  *   Flow rule attributes.
4781  * @param[in] action
4782  *   The action to be check if exist.
4783  * @param[out] match_action_pos
4784  *   Pointer to the position of the matched action if exists, otherwise is -1.
4785  * @param[out] qrss_action_pos
4786  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
4787  *
4788  * @return
4789  *   > 0 the total number of actions.
4790  *   0 if not found match action in action list.
4791  */
4792 static int
4793 flow_check_match_action(const struct rte_flow_action actions[],
4794 			const struct rte_flow_attr *attr,
4795 			enum rte_flow_action_type action,
4796 			int *match_action_pos, int *qrss_action_pos)
4797 {
4798 	const struct rte_flow_action_sample *sample;
4799 	int actions_n = 0;
4800 	int jump_flag = 0;
4801 	uint32_t ratio = 0;
4802 	int sub_type = 0;
4803 	int flag = 0;
4804 
4805 	*match_action_pos = -1;
4806 	*qrss_action_pos = -1;
4807 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4808 		if (actions->type == action) {
4809 			flag = 1;
4810 			*match_action_pos = actions_n;
4811 		}
4812 		if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
4813 		    actions->type == RTE_FLOW_ACTION_TYPE_RSS)
4814 			*qrss_action_pos = actions_n;
4815 		if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP)
4816 			jump_flag = 1;
4817 		if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
4818 			sample = actions->conf;
4819 			ratio = sample->ratio;
4820 			sub_type = ((const struct rte_flow_action *)
4821 					(sample->actions))->type;
4822 		}
4823 		actions_n++;
4824 	}
4825 	if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
4826 		if (ratio == 1) {
4827 			/* JUMP Action not support for Mirroring;
4828 			 * Mirroring support multi-destination;
4829 			 */
4830 			if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END)
4831 				flag = 0;
4832 		}
4833 	}
4834 	/* Count RTE_FLOW_ACTION_TYPE_END. */
4835 	return flag ? actions_n + 1 : 0;
4836 }
4837 
4838 #define SAMPLE_SUFFIX_ITEM 2
4839 
4840 /**
4841  * Split the sample flow.
4842  *
4843  * As sample flow will split to two sub flow, sample flow with
4844  * sample action, the other actions will move to new suffix flow.
4845  *
4846  * Also add unique tag id with tag action in the sample flow,
4847  * the same tag id will be as match in the suffix flow.
4848  *
4849  * @param dev
4850  *   Pointer to Ethernet device.
4851  * @param[in] fdb_tx
4852  *   FDB egress flow flag.
4853  * @param[out] sfx_items
4854  *   Suffix flow match items (list terminated by the END pattern item).
4855  * @param[in] actions
4856  *   Associated actions (list terminated by the END action).
4857  * @param[out] actions_sfx
4858  *   Suffix flow actions.
4859  * @param[out] actions_pre
4860  *   Prefix flow actions.
4861  * @param[in] actions_n
4862  *  The total number of actions.
4863  * @param[in] sample_action_pos
4864  *   The sample action position.
4865  * @param[in] qrss_action_pos
4866  *   The Queue/RSS action position.
4867  * @param[out] error
4868  *   Perform verbose error reporting if not NULL.
4869  *
4870  * @return
4871  *   0 on success, or unique flow_id, a negative errno value
4872  *   otherwise and rte_errno is set.
4873  */
4874 static int
4875 flow_sample_split_prep(struct rte_eth_dev *dev,
4876 		       uint32_t fdb_tx,
4877 		       struct rte_flow_item sfx_items[],
4878 		       const struct rte_flow_action actions[],
4879 		       struct rte_flow_action actions_sfx[],
4880 		       struct rte_flow_action actions_pre[],
4881 		       int actions_n,
4882 		       int sample_action_pos,
4883 		       int qrss_action_pos,
4884 		       struct rte_flow_error *error)
4885 {
4886 	struct mlx5_priv *priv = dev->data->dev_private;
4887 	struct mlx5_rte_flow_action_set_tag *set_tag;
4888 	struct mlx5_rte_flow_item_tag *tag_spec;
4889 	struct mlx5_rte_flow_item_tag *tag_mask;
4890 	uint32_t tag_id = 0;
4891 	int index;
4892 	int ret;
4893 
4894 	if (sample_action_pos < 0)
4895 		return rte_flow_error_set(error, EINVAL,
4896 					  RTE_FLOW_ERROR_TYPE_ACTION,
4897 					  NULL, "invalid position of sample "
4898 					  "action in list");
4899 	if (!fdb_tx) {
4900 		/* Prepare the prefix tag action. */
4901 		set_tag = (void *)(actions_pre + actions_n + 1);
4902 		ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
4903 		if (ret < 0)
4904 			return ret;
4905 		set_tag->id = ret;
4906 		mlx5_ipool_malloc(priv->sh->ipool
4907 				  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
4908 		set_tag->data = tag_id;
4909 		/* Prepare the suffix subflow items. */
4910 		tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
4911 		tag_spec->data = tag_id;
4912 		tag_spec->id = set_tag->id;
4913 		tag_mask = tag_spec + 1;
4914 		tag_mask->data = UINT32_MAX;
4915 		sfx_items[0] = (struct rte_flow_item){
4916 			.type = (enum rte_flow_item_type)
4917 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4918 			.spec = tag_spec,
4919 			.last = NULL,
4920 			.mask = tag_mask,
4921 		};
4922 		sfx_items[1] = (struct rte_flow_item){
4923 			.type = (enum rte_flow_item_type)
4924 				RTE_FLOW_ITEM_TYPE_END,
4925 		};
4926 	}
4927 	/* Prepare the actions for prefix and suffix flow. */
4928 	if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
4929 		index = qrss_action_pos;
4930 		/* Put the preceding the Queue/RSS action into prefix flow. */
4931 		if (index != 0)
4932 			memcpy(actions_pre, actions,
4933 			       sizeof(struct rte_flow_action) * index);
4934 		/* Put others preceding the sample action into prefix flow. */
4935 		if (sample_action_pos > index + 1)
4936 			memcpy(actions_pre + index, actions + index + 1,
4937 			       sizeof(struct rte_flow_action) *
4938 			       (sample_action_pos - index - 1));
4939 		index = sample_action_pos - 1;
4940 		/* Put Queue/RSS action into Suffix flow. */
4941 		memcpy(actions_sfx, actions + qrss_action_pos,
4942 		       sizeof(struct rte_flow_action));
4943 		actions_sfx++;
4944 	} else {
4945 		index = sample_action_pos;
4946 		if (index != 0)
4947 			memcpy(actions_pre, actions,
4948 			       sizeof(struct rte_flow_action) * index);
4949 	}
4950 	/* Add the extra tag action for NIC-RX and E-Switch ingress. */
4951 	if (!fdb_tx) {
4952 		actions_pre[index++] =
4953 			(struct rte_flow_action){
4954 			.type = (enum rte_flow_action_type)
4955 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4956 			.conf = set_tag,
4957 		};
4958 	}
4959 	memcpy(actions_pre + index, actions + sample_action_pos,
4960 	       sizeof(struct rte_flow_action));
4961 	index += 1;
4962 	actions_pre[index] = (struct rte_flow_action){
4963 		.type = (enum rte_flow_action_type)
4964 			RTE_FLOW_ACTION_TYPE_END,
4965 	};
4966 	/* Put the actions after sample into Suffix flow. */
4967 	memcpy(actions_sfx, actions + sample_action_pos + 1,
4968 	       sizeof(struct rte_flow_action) *
4969 	       (actions_n - sample_action_pos - 1));
4970 	return tag_id;
4971 }
4972 
4973 /**
4974  * The splitting for metadata feature.
4975  *
4976  * - Q/RSS action on NIC Rx should be split in order to pass by
4977  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
4978  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
4979  *
4980  * - All the actions on NIC Tx should have a mreg copy action to
4981  *   copy reg_a from WQE to reg_c[0].
4982  *
4983  * @param dev
4984  *   Pointer to Ethernet device.
4985  * @param[in] flow
4986  *   Parent flow structure pointer.
4987  * @param[in] prefix_layers
4988  *   Prefix flow layer flags.
4989  * @param[in] prefix_mark
4990  *   Prefix subflow mark flag, may be 0.
4991  * @param[in] attr
4992  *   Flow rule attributes.
4993  * @param[in] items
4994  *   Pattern specification (list terminated by the END pattern item).
4995  * @param[in] actions
4996  *   Associated actions (list terminated by the END action).
4997  * @param[in] external
4998  *   This flow rule is created by request external to PMD.
4999  * @param[in] flow_idx
5000  *   This memory pool index to the flow.
5001  * @param[out] error
5002  *   Perform verbose error reporting if not NULL.
5003  * @return
5004  *   0 on success, negative value otherwise
5005  */
5006 static int
5007 flow_create_split_metadata(struct rte_eth_dev *dev,
5008 			   struct rte_flow *flow,
5009 			   uint64_t prefix_layers,
5010 			   uint32_t prefix_mark,
5011 			   const struct rte_flow_attr *attr,
5012 			   const struct rte_flow_item items[],
5013 			   const struct rte_flow_action actions[],
5014 			   bool external, uint32_t flow_idx,
5015 			   struct rte_flow_error *error)
5016 {
5017 	struct mlx5_priv *priv = dev->data->dev_private;
5018 	struct mlx5_dev_config *config = &priv->config;
5019 	const struct rte_flow_action *qrss = NULL;
5020 	struct rte_flow_action *ext_actions = NULL;
5021 	struct mlx5_flow *dev_flow = NULL;
5022 	uint32_t qrss_id = 0;
5023 	int mtr_sfx = 0;
5024 	size_t act_size;
5025 	int actions_n;
5026 	int encap_idx;
5027 	int ret;
5028 
5029 	/* Check whether extensive metadata feature is engaged. */
5030 	if (!config->dv_flow_en ||
5031 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
5032 	    !mlx5_flow_ext_mreg_supported(dev))
5033 		return flow_create_split_inner(dev, flow, NULL, prefix_layers,
5034 					       prefix_mark, attr, items,
5035 					       actions, external, flow_idx,
5036 					       error);
5037 	actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
5038 							   &encap_idx);
5039 	if (qrss) {
5040 		/* Exclude hairpin flows from splitting. */
5041 		if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
5042 			const struct rte_flow_action_queue *queue;
5043 
5044 			queue = qrss->conf;
5045 			if (mlx5_rxq_get_type(dev, queue->index) ==
5046 			    MLX5_RXQ_TYPE_HAIRPIN)
5047 				qrss = NULL;
5048 		} else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
5049 			const struct rte_flow_action_rss *rss;
5050 
5051 			rss = qrss->conf;
5052 			if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
5053 			    MLX5_RXQ_TYPE_HAIRPIN)
5054 				qrss = NULL;
5055 		}
5056 	}
5057 	if (qrss) {
5058 		/* Check if it is in meter suffix table. */
5059 		mtr_sfx = attr->group == (attr->transfer ?
5060 			  (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
5061 			  MLX5_FLOW_TABLE_LEVEL_SUFFIX);
5062 		/*
5063 		 * Q/RSS action on NIC Rx should be split in order to pass by
5064 		 * the mreg copy table (RX_CP_TBL) and then it jumps to the
5065 		 * action table (RX_ACT_TBL) which has the split Q/RSS action.
5066 		 */
5067 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
5068 			   sizeof(struct rte_flow_action_set_tag) +
5069 			   sizeof(struct rte_flow_action_jump);
5070 		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
5071 					  SOCKET_ID_ANY);
5072 		if (!ext_actions)
5073 			return rte_flow_error_set(error, ENOMEM,
5074 						  RTE_FLOW_ERROR_TYPE_ACTION,
5075 						  NULL, "no memory to split "
5076 						  "metadata flow");
5077 		/*
5078 		 * If we are the suffix flow of meter, tag already exist.
5079 		 * Set the tag action to void.
5080 		 */
5081 		if (mtr_sfx)
5082 			ext_actions[qrss - actions].type =
5083 						RTE_FLOW_ACTION_TYPE_VOID;
5084 		else
5085 			ext_actions[qrss - actions].type =
5086 						(enum rte_flow_action_type)
5087 						MLX5_RTE_FLOW_ACTION_TYPE_TAG;
5088 		/*
5089 		 * Create the new actions list with removed Q/RSS action
5090 		 * and appended set tag and jump to register copy table
5091 		 * (RX_CP_TBL). We should preallocate unique tag ID here
5092 		 * in advance, because it is needed for set tag action.
5093 		 */
5094 		qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
5095 						    qrss, actions_n, error);
5096 		if (!mtr_sfx && !qrss_id) {
5097 			ret = -rte_errno;
5098 			goto exit;
5099 		}
5100 	} else if (attr->egress && !attr->transfer) {
5101 		/*
5102 		 * All the actions on NIC Tx should have a metadata register
5103 		 * copy action to copy reg_a from WQE to reg_c[meta]
5104 		 */
5105 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
5106 			   sizeof(struct mlx5_flow_action_copy_mreg);
5107 		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
5108 					  SOCKET_ID_ANY);
5109 		if (!ext_actions)
5110 			return rte_flow_error_set(error, ENOMEM,
5111 						  RTE_FLOW_ERROR_TYPE_ACTION,
5112 						  NULL, "no memory to split "
5113 						  "metadata flow");
5114 		/* Create the action list appended with copy register. */
5115 		ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
5116 					     actions_n, error, encap_idx);
5117 		if (ret < 0)
5118 			goto exit;
5119 	}
5120 	/* Add the unmodified original or prefix subflow. */
5121 	ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers,
5122 				      prefix_mark, attr,
5123 				      items, ext_actions ? ext_actions :
5124 				      actions, external, flow_idx, error);
5125 	if (ret < 0)
5126 		goto exit;
5127 	MLX5_ASSERT(dev_flow);
5128 	if (qrss) {
5129 		const struct rte_flow_attr q_attr = {
5130 			.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5131 			.ingress = 1,
5132 		};
5133 		/* Internal PMD action to set register. */
5134 		struct mlx5_rte_flow_item_tag q_tag_spec = {
5135 			.data = qrss_id,
5136 			.id = REG_NON,
5137 		};
5138 		struct rte_flow_item q_items[] = {
5139 			{
5140 				.type = (enum rte_flow_item_type)
5141 					MLX5_RTE_FLOW_ITEM_TYPE_TAG,
5142 				.spec = &q_tag_spec,
5143 				.last = NULL,
5144 				.mask = NULL,
5145 			},
5146 			{
5147 				.type = RTE_FLOW_ITEM_TYPE_END,
5148 			},
5149 		};
5150 		struct rte_flow_action q_actions[] = {
5151 			{
5152 				.type = qrss->type,
5153 				.conf = qrss->conf,
5154 			},
5155 			{
5156 				.type = RTE_FLOW_ACTION_TYPE_END,
5157 			},
5158 		};
5159 		uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
5160 
5161 		/*
5162 		 * Configure the tag item only if there is no meter subflow.
5163 		 * Since tag is already marked in the meter suffix subflow
5164 		 * we can just use the meter suffix items as is.
5165 		 */
5166 		if (qrss_id) {
5167 			/* Not meter subflow. */
5168 			MLX5_ASSERT(!mtr_sfx);
5169 			/*
5170 			 * Put unique id in prefix flow due to it is destroyed
5171 			 * after suffix flow and id will be freed after there
5172 			 * is no actual flows with this id and identifier
5173 			 * reallocation becomes possible (for example, for
5174 			 * other flows in other threads).
5175 			 */
5176 			dev_flow->handle->split_flow_id = qrss_id;
5177 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
5178 						   error);
5179 			if (ret < 0)
5180 				goto exit;
5181 			q_tag_spec.id = ret;
5182 		}
5183 		dev_flow = NULL;
5184 		/* Add suffix subflow to execute Q/RSS. */
5185 		ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 0,
5186 					      &q_attr, mtr_sfx ? items :
5187 					      q_items, q_actions,
5188 					      external, flow_idx, error);
5189 		if (ret < 0)
5190 			goto exit;
5191 		/* qrss ID should be freed if failed. */
5192 		qrss_id = 0;
5193 		MLX5_ASSERT(dev_flow);
5194 	}
5195 
5196 exit:
5197 	/*
5198 	 * We do not destroy the partially created sub_flows in case of error.
5199 	 * These ones are included into parent flow list and will be destroyed
5200 	 * by flow_drv_destroy.
5201 	 */
5202 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
5203 			qrss_id);
5204 	mlx5_free(ext_actions);
5205 	return ret;
5206 }
5207 
5208 /**
5209  * The splitting for meter feature.
5210  *
5211  * - The meter flow will be split to two flows as prefix and
5212  *   suffix flow. The packets make sense only it pass the prefix
5213  *   meter action.
5214  *
5215  * - Reg_C_5 is used for the packet to match betweend prefix and
5216  *   suffix flow.
5217  *
5218  * @param dev
5219  *   Pointer to Ethernet device.
5220  * @param[in] flow
5221  *   Parent flow structure pointer.
5222  * @param[in] prefix_layers
5223  *   Prefix subflow layers, may be 0.
5224  * @param[in] prefix_mark
5225  *   Prefix subflow mark flag, may be 0.
5226  * @param[in] attr
5227  *   Flow rule attributes.
5228  * @param[in] items
5229  *   Pattern specification (list terminated by the END pattern item).
5230  * @param[in] actions
5231  *   Associated actions (list terminated by the END action).
5232  * @param[in] external
5233  *   This flow rule is created by request external to PMD.
5234  * @param[in] flow_idx
5235  *   This memory pool index to the flow.
5236  * @param[out] error
5237  *   Perform verbose error reporting if not NULL.
5238  * @return
5239  *   0 on success, negative value otherwise
5240  */
5241 static int
5242 flow_create_split_meter(struct rte_eth_dev *dev,
5243 			struct rte_flow *flow,
5244 			uint64_t prefix_layers,
5245 			uint32_t prefix_mark,
5246 			const struct rte_flow_attr *attr,
5247 			const struct rte_flow_item items[],
5248 			const struct rte_flow_action actions[],
5249 			bool external, uint32_t flow_idx,
5250 			struct rte_flow_error *error)
5251 {
5252 	struct mlx5_priv *priv = dev->data->dev_private;
5253 	struct rte_flow_action *sfx_actions = NULL;
5254 	struct rte_flow_action *pre_actions = NULL;
5255 	struct rte_flow_item *sfx_items = NULL;
5256 	struct mlx5_flow *dev_flow = NULL;
5257 	struct rte_flow_attr sfx_attr = *attr;
5258 	uint32_t mtr = 0;
5259 	uint32_t mtr_tag_id = 0;
5260 	size_t act_size;
5261 	size_t item_size;
5262 	int actions_n = 0;
5263 	int ret;
5264 
5265 	if (priv->mtr_en)
5266 		actions_n = flow_check_meter_action(actions, &mtr);
5267 	if (mtr) {
5268 		/* The five prefix actions: meter, decap, encap, tag, end. */
5269 		act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
5270 			   sizeof(struct mlx5_rte_flow_action_set_tag);
5271 		/* tag, vlan, port id, end. */
5272 #define METER_SUFFIX_ITEM 4
5273 		item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
5274 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
5275 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
5276 					  0, SOCKET_ID_ANY);
5277 		if (!sfx_actions)
5278 			return rte_flow_error_set(error, ENOMEM,
5279 						  RTE_FLOW_ERROR_TYPE_ACTION,
5280 						  NULL, "no memory to split "
5281 						  "meter flow");
5282 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
5283 			     act_size);
5284 		pre_actions = sfx_actions + actions_n;
5285 		mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
5286 						   actions, sfx_actions,
5287 						   pre_actions);
5288 		if (!mtr_tag_id) {
5289 			ret = -rte_errno;
5290 			goto exit;
5291 		}
5292 		/* Add the prefix subflow. */
5293 		ret = flow_create_split_inner(dev, flow, &dev_flow,
5294 					      prefix_layers, 0,
5295 					      attr, items,
5296 					      pre_actions, external,
5297 					      flow_idx, error);
5298 		if (ret) {
5299 			ret = -rte_errno;
5300 			goto exit;
5301 		}
5302 		dev_flow->handle->split_flow_id = mtr_tag_id;
5303 		/* Setting the sfx group atrr. */
5304 		sfx_attr.group = sfx_attr.transfer ?
5305 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
5306 				 MLX5_FLOW_TABLE_LEVEL_SUFFIX;
5307 	}
5308 	/* Add the prefix subflow. */
5309 	ret = flow_create_split_metadata(dev, flow, dev_flow ?
5310 					 flow_get_prefix_layer_flags(dev_flow) :
5311 					 prefix_layers, dev_flow ?
5312 					 dev_flow->handle->mark : prefix_mark,
5313 					 &sfx_attr, sfx_items ?
5314 					 sfx_items : items,
5315 					 sfx_actions ? sfx_actions : actions,
5316 					 external, flow_idx, error);
5317 exit:
5318 	if (sfx_actions)
5319 		mlx5_free(sfx_actions);
5320 	return ret;
5321 }
5322 
5323 /**
5324  * The splitting for sample feature.
5325  *
5326  * Once Sample action is detected in the action list, the flow actions should
5327  * be split into prefix sub flow and suffix sub flow.
5328  *
5329  * The original items remain in the prefix sub flow, all actions preceding the
5330  * sample action and the sample action itself will be copied to the prefix
5331  * sub flow, the actions following the sample action will be copied to the
5332  * suffix sub flow, Queue action always be located in the suffix sub flow.
5333  *
5334  * In order to make the packet from prefix sub flow matches with suffix sub
5335  * flow, an extra tag action be added into prefix sub flow, and the suffix sub
5336  * flow uses tag item with the unique flow id.
5337  *
5338  * @param dev
5339  *   Pointer to Ethernet device.
5340  * @param[in] flow
5341  *   Parent flow structure pointer.
5342  * @param[in] attr
5343  *   Flow rule attributes.
5344  * @param[in] items
5345  *   Pattern specification (list terminated by the END pattern item).
5346  * @param[in] actions
5347  *   Associated actions (list terminated by the END action).
5348  * @param[in] external
5349  *   This flow rule is created by request external to PMD.
5350  * @param[in] flow_idx
5351  *   This memory pool index to the flow.
5352  * @param[out] error
5353  *   Perform verbose error reporting if not NULL.
5354  * @return
5355  *   0 on success, negative value otherwise
5356  */
5357 static int
5358 flow_create_split_sample(struct rte_eth_dev *dev,
5359 			 struct rte_flow *flow,
5360 			 const struct rte_flow_attr *attr,
5361 			 const struct rte_flow_item items[],
5362 			 const struct rte_flow_action actions[],
5363 			 bool external, uint32_t flow_idx,
5364 			 struct rte_flow_error *error)
5365 {
5366 	struct mlx5_priv *priv = dev->data->dev_private;
5367 	struct rte_flow_action *sfx_actions = NULL;
5368 	struct rte_flow_action *pre_actions = NULL;
5369 	struct rte_flow_item *sfx_items = NULL;
5370 	struct mlx5_flow *dev_flow = NULL;
5371 	struct rte_flow_attr sfx_attr = *attr;
5372 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5373 	struct mlx5_flow_dv_sample_resource *sample_res;
5374 	struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
5375 	struct mlx5_flow_tbl_resource *sfx_tbl;
5376 	union mlx5_flow_tbl_key sfx_table_key;
5377 #endif
5378 	size_t act_size;
5379 	size_t item_size;
5380 	uint32_t fdb_tx = 0;
5381 	int32_t tag_id = 0;
5382 	int actions_n = 0;
5383 	int sample_action_pos;
5384 	int qrss_action_pos;
5385 	int ret = 0;
5386 
5387 	if (priv->sampler_en)
5388 		actions_n = flow_check_match_action(actions, attr,
5389 					RTE_FLOW_ACTION_TYPE_SAMPLE,
5390 					&sample_action_pos, &qrss_action_pos);
5391 	if (actions_n) {
5392 		/* The prefix actions must includes sample, tag, end. */
5393 		act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
5394 			   + sizeof(struct mlx5_rte_flow_action_set_tag);
5395 		item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
5396 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
5397 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
5398 					  item_size), 0, SOCKET_ID_ANY);
5399 		if (!sfx_actions)
5400 			return rte_flow_error_set(error, ENOMEM,
5401 						  RTE_FLOW_ERROR_TYPE_ACTION,
5402 						  NULL, "no memory to split "
5403 						  "sample flow");
5404 		/* The representor_id is -1 for uplink. */
5405 		fdb_tx = (attr->transfer && priv->representor_id != -1);
5406 		if (!fdb_tx)
5407 			sfx_items = (struct rte_flow_item *)((char *)sfx_actions
5408 					+ act_size);
5409 		pre_actions = sfx_actions + actions_n;
5410 		tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items,
5411 						actions, sfx_actions,
5412 						pre_actions, actions_n,
5413 						sample_action_pos,
5414 						qrss_action_pos, error);
5415 		if (tag_id < 0 || (!fdb_tx && !tag_id)) {
5416 			ret = -rte_errno;
5417 			goto exit;
5418 		}
5419 		/* Add the prefix subflow. */
5420 		ret = flow_create_split_inner(dev, flow, &dev_flow, 0, 0, attr,
5421 					      items, pre_actions, external,
5422 					      flow_idx, error);
5423 		if (ret) {
5424 			ret = -rte_errno;
5425 			goto exit;
5426 		}
5427 		dev_flow->handle->split_flow_id = tag_id;
5428 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5429 		/* Set the sfx group attr. */
5430 		sample_res = (struct mlx5_flow_dv_sample_resource *)
5431 					dev_flow->dv.sample_res;
5432 		sfx_tbl = (struct mlx5_flow_tbl_resource *)
5433 					sample_res->normal_path_tbl;
5434 		sfx_tbl_data = container_of(sfx_tbl,
5435 					struct mlx5_flow_tbl_data_entry, tbl);
5436 		sfx_table_key.v64 = sfx_tbl_data->entry.key;
5437 		sfx_attr.group = sfx_attr.transfer ?
5438 					(sfx_table_key.table_id - 1) :
5439 					 sfx_table_key.table_id;
5440 #endif
5441 	}
5442 	/* Add the suffix subflow. */
5443 	ret = flow_create_split_meter(dev, flow, dev_flow ?
5444 				 flow_get_prefix_layer_flags(dev_flow) : 0,
5445 				 dev_flow ? dev_flow->handle->mark : 0,
5446 				 &sfx_attr, sfx_items ? sfx_items : items,
5447 				 sfx_actions ? sfx_actions : actions,
5448 				 external, flow_idx, error);
5449 exit:
5450 	if (sfx_actions)
5451 		mlx5_free(sfx_actions);
5452 	return ret;
5453 }
5454 
5455 /**
5456  * Split the flow to subflow set. The splitters might be linked
5457  * in the chain, like this:
5458  * flow_create_split_outer() calls:
5459  *   flow_create_split_meter() calls:
5460  *     flow_create_split_metadata(meter_subflow_0) calls:
5461  *       flow_create_split_inner(metadata_subflow_0)
5462  *       flow_create_split_inner(metadata_subflow_1)
5463  *       flow_create_split_inner(metadata_subflow_2)
5464  *     flow_create_split_metadata(meter_subflow_1) calls:
5465  *       flow_create_split_inner(metadata_subflow_0)
5466  *       flow_create_split_inner(metadata_subflow_1)
5467  *       flow_create_split_inner(metadata_subflow_2)
5468  *
5469  * This provide flexible way to add new levels of flow splitting.
5470  * The all of successfully created subflows are included to the
5471  * parent flow dev_flow list.
5472  *
5473  * @param dev
5474  *   Pointer to Ethernet device.
5475  * @param[in] flow
5476  *   Parent flow structure pointer.
5477  * @param[in] attr
5478  *   Flow rule attributes.
5479  * @param[in] items
5480  *   Pattern specification (list terminated by the END pattern item).
5481  * @param[in] actions
5482  *   Associated actions (list terminated by the END action).
5483  * @param[in] external
5484  *   This flow rule is created by request external to PMD.
5485  * @param[in] flow_idx
5486  *   This memory pool index to the flow.
5487  * @param[out] error
5488  *   Perform verbose error reporting if not NULL.
5489  * @return
5490  *   0 on success, negative value otherwise
5491  */
5492 static int
5493 flow_create_split_outer(struct rte_eth_dev *dev,
5494 			struct rte_flow *flow,
5495 			const struct rte_flow_attr *attr,
5496 			const struct rte_flow_item items[],
5497 			const struct rte_flow_action actions[],
5498 			bool external, uint32_t flow_idx,
5499 			struct rte_flow_error *error)
5500 {
5501 	int ret;
5502 
5503 	ret = flow_create_split_sample(dev, flow, attr, items,
5504 				       actions, external, flow_idx, error);
5505 	MLX5_ASSERT(ret <= 0);
5506 	return ret;
5507 }
5508 
5509 static struct mlx5_flow_tunnel *
5510 flow_tunnel_from_rule(struct rte_eth_dev *dev,
5511 		      const struct rte_flow_attr *attr,
5512 		      const struct rte_flow_item items[],
5513 		      const struct rte_flow_action actions[])
5514 {
5515 	struct mlx5_flow_tunnel *tunnel;
5516 
5517 #pragma GCC diagnostic push
5518 #pragma GCC diagnostic ignored "-Wcast-qual"
5519 	if (is_flow_tunnel_match_rule(dev, attr, items, actions))
5520 		tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
5521 	else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
5522 		tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
5523 	else
5524 		tunnel = NULL;
5525 #pragma GCC diagnostic pop
5526 
5527 	return tunnel;
5528 }
5529 
5530 /**
5531  * Adjust flow RSS workspace if needed.
5532  *
5533  * @param wks
5534  *   Pointer to thread flow work space.
5535  * @param rss_desc
5536  *   Pointer to RSS descriptor.
5537  * @param[in] nrssq_num
5538  *   New RSS queue number.
5539  *
5540  * @return
5541  *   0 on success, -1 otherwise and rte_errno is set.
5542  */
5543 static int
5544 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
5545 			  struct mlx5_flow_rss_desc *rss_desc,
5546 			  uint32_t nrssq_num)
5547 {
5548 	bool fidx = !!wks->flow_idx;
5549 
5550 	if (likely(nrssq_num <= wks->rssq_num[fidx]))
5551 		return 0;
5552 	rss_desc->queue = realloc(rss_desc->queue,
5553 			  sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2));
5554 	if (!rss_desc->queue) {
5555 		rte_errno = ENOMEM;
5556 		return -1;
5557 	}
5558 	wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2);
5559 	return 0;
5560 }
5561 
5562 /**
5563  * Create a flow and add it to @p list.
5564  *
5565  * @param dev
5566  *   Pointer to Ethernet device.
5567  * @param list
5568  *   Pointer to a TAILQ flow list. If this parameter NULL,
5569  *   no list insertion occurred, flow is just created,
5570  *   this is caller's responsibility to track the
5571  *   created flow.
5572  * @param[in] attr
5573  *   Flow rule attributes.
5574  * @param[in] items
5575  *   Pattern specification (list terminated by the END pattern item).
5576  * @param[in] actions
5577  *   Associated actions (list terminated by the END action).
5578  * @param[in] external
5579  *   This flow rule is created by request external to PMD.
5580  * @param[out] error
5581  *   Perform verbose error reporting if not NULL.
5582  *
5583  * @return
5584  *   A flow index on success, 0 otherwise and rte_errno is set.
5585  */
5586 static uint32_t
5587 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
5588 		 const struct rte_flow_attr *attr,
5589 		 const struct rte_flow_item items[],
5590 		 const struct rte_flow_action original_actions[],
5591 		 bool external, struct rte_flow_error *error)
5592 {
5593 	struct mlx5_priv *priv = dev->data->dev_private;
5594 	struct rte_flow *flow = NULL;
5595 	struct mlx5_flow *dev_flow;
5596 	const struct rte_flow_action_rss *rss;
5597 	struct mlx5_translated_shared_action
5598 		shared_actions[MLX5_MAX_SHARED_ACTIONS];
5599 	int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5600 	union {
5601 		struct mlx5_flow_expand_rss buf;
5602 		uint8_t buffer[2048];
5603 	} expand_buffer;
5604 	union {
5605 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5606 		uint8_t buffer[2048];
5607 	} actions_rx;
5608 	union {
5609 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5610 		uint8_t buffer[2048];
5611 	} actions_hairpin_tx;
5612 	union {
5613 		struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
5614 		uint8_t buffer[2048];
5615 	} items_tx;
5616 	struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
5617 	struct mlx5_flow_rss_desc *rss_desc;
5618 	const struct rte_flow_action *p_actions_rx;
5619 	uint32_t i;
5620 	uint32_t idx = 0;
5621 	int hairpin_flow;
5622 	struct rte_flow_attr attr_tx = { .priority = 0 };
5623 	struct rte_flow_attr attr_factor = {0};
5624 	const struct rte_flow_action *actions;
5625 	struct rte_flow_action *translated_actions = NULL;
5626 	struct mlx5_flow_tunnel *tunnel;
5627 	struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
5628 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
5629 	bool fidx = !!wks->flow_idx;
5630 	int ret;
5631 
5632 	MLX5_ASSERT(wks);
5633 	rss_desc = &wks->rss_desc[fidx];
5634 	ret = flow_shared_actions_translate(original_actions,
5635 					    shared_actions,
5636 					    &shared_actions_n,
5637 					    &translated_actions, error);
5638 	if (ret < 0) {
5639 		MLX5_ASSERT(translated_actions == NULL);
5640 		return 0;
5641 	}
5642 	actions = translated_actions ? translated_actions : original_actions;
5643 	memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr));
5644 	p_actions_rx = actions;
5645 	hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions);
5646 	ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx,
5647 				external, hairpin_flow, error);
5648 	if (ret < 0)
5649 		goto error_before_hairpin_split;
5650 	flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
5651 	if (!flow) {
5652 		rte_errno = ENOMEM;
5653 		goto error_before_hairpin_split;
5654 	}
5655 	if (hairpin_flow > 0) {
5656 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
5657 			rte_errno = EINVAL;
5658 			goto error_before_hairpin_split;
5659 		}
5660 		flow_hairpin_split(dev, actions, actions_rx.actions,
5661 				   actions_hairpin_tx.actions, items_tx.items,
5662 				   idx);
5663 		p_actions_rx = actions_rx.actions;
5664 	}
5665 	flow->drv_type = flow_get_drv_type(dev, &attr_factor);
5666 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
5667 		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
5668 	memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
5669 	rss = flow_get_rss_action(p_actions_rx);
5670 	if (rss) {
5671 		if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
5672 			return 0;
5673 		/*
5674 		 * The following information is required by
5675 		 * mlx5_flow_hashfields_adjust() in advance.
5676 		 */
5677 		rss_desc->level = rss->level;
5678 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
5679 		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
5680 	}
5681 	flow->dev_handles = 0;
5682 	if (rss && rss->types) {
5683 		unsigned int graph_root;
5684 
5685 		graph_root = find_graph_root(items, rss->level);
5686 		ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
5687 					   items, rss->types,
5688 					   mlx5_support_expansion, graph_root);
5689 		MLX5_ASSERT(ret > 0 &&
5690 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
5691 	} else {
5692 		buf->entries = 1;
5693 		buf->entry[0].pattern = (void *)(uintptr_t)items;
5694 	}
5695 	flow->shared_rss = flow_get_shared_rss_action(shared_actions,
5696 						      shared_actions_n);
5697 	/*
5698 	 * Record the start index when there is a nested call. All sub-flows
5699 	 * need to be translated before another calling.
5700 	 * No need to use ping-pong buffer to save memory here.
5701 	 */
5702 	if (fidx) {
5703 		MLX5_ASSERT(!wks->flow_nested_idx);
5704 		wks->flow_nested_idx = fidx;
5705 	}
5706 	for (i = 0; i < buf->entries; ++i) {
5707 		/*
5708 		 * The splitter may create multiple dev_flows,
5709 		 * depending on configuration. In the simplest
5710 		 * case it just creates unmodified original flow.
5711 		 */
5712 		ret = flow_create_split_outer(dev, flow, &attr_factor,
5713 					      buf->entry[i].pattern,
5714 					      p_actions_rx, external, idx,
5715 					      error);
5716 		if (ret < 0)
5717 			goto error;
5718 		if (is_flow_tunnel_steer_rule(dev, attr,
5719 					      buf->entry[i].pattern,
5720 					      p_actions_rx)) {
5721 			ret = flow_tunnel_add_default_miss(dev, flow, attr,
5722 							   p_actions_rx,
5723 							   idx,
5724 							   &default_miss_ctx,
5725 							   error);
5726 			if (ret < 0) {
5727 				mlx5_free(default_miss_ctx.queue);
5728 				goto error;
5729 			}
5730 		}
5731 	}
5732 	/* Create the tx flow. */
5733 	if (hairpin_flow) {
5734 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
5735 		attr_tx.ingress = 0;
5736 		attr_tx.egress = 1;
5737 		dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
5738 					 actions_hairpin_tx.actions,
5739 					 idx, error);
5740 		if (!dev_flow)
5741 			goto error;
5742 		dev_flow->flow = flow;
5743 		dev_flow->external = 0;
5744 		SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
5745 			      dev_flow->handle, next);
5746 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
5747 					 items_tx.items,
5748 					 actions_hairpin_tx.actions, error);
5749 		if (ret < 0)
5750 			goto error;
5751 	}
5752 	/*
5753 	 * Update the metadata register copy table. If extensive
5754 	 * metadata feature is enabled and registers are supported
5755 	 * we might create the extra rte_flow for each unique
5756 	 * MARK/FLAG action ID.
5757 	 *
5758 	 * The table is updated for ingress Flows only, because
5759 	 * the egress Flows belong to the different device and
5760 	 * copy table should be updated in peer NIC Rx domain.
5761 	 */
5762 	if (attr_factor.ingress &&
5763 	    (external || attr_factor.group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
5764 		ret = flow_mreg_update_copy_table(dev, flow, actions, error);
5765 		if (ret)
5766 			goto error;
5767 	}
5768 	/*
5769 	 * If the flow is external (from application) OR device is started, then
5770 	 * the flow will be applied immediately.
5771 	 */
5772 	if (external || dev->data->dev_started) {
5773 		ret = flow_drv_apply(dev, flow, error);
5774 		if (ret < 0)
5775 			goto error;
5776 	}
5777 	if (list) {
5778 		rte_spinlock_lock(&priv->flow_list_lock);
5779 		ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
5780 			     flow, next);
5781 		rte_spinlock_unlock(&priv->flow_list_lock);
5782 	}
5783 	flow_rxq_flags_set(dev, flow);
5784 	rte_free(translated_actions);
5785 	/* Nested flow creation index recovery. */
5786 	wks->flow_idx = wks->flow_nested_idx;
5787 	if (wks->flow_nested_idx)
5788 		wks->flow_nested_idx = 0;
5789 	tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
5790 	if (tunnel) {
5791 		flow->tunnel = 1;
5792 		flow->tunnel_id = tunnel->tunnel_id;
5793 		__atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
5794 		mlx5_free(default_miss_ctx.queue);
5795 	}
5796 	return idx;
5797 error:
5798 	MLX5_ASSERT(flow);
5799 	ret = rte_errno; /* Save rte_errno before cleanup. */
5800 	flow_mreg_del_copy_action(dev, flow);
5801 	flow_drv_destroy(dev, flow);
5802 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
5803 	rte_errno = ret; /* Restore rte_errno. */
5804 	ret = rte_errno;
5805 	rte_errno = ret;
5806 	wks->flow_idx = wks->flow_nested_idx;
5807 	if (wks->flow_nested_idx)
5808 		wks->flow_nested_idx = 0;
5809 error_before_hairpin_split:
5810 	rte_free(translated_actions);
5811 	return 0;
5812 }
5813 
5814 /**
5815  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
5816  * incoming packets to table 1.
5817  *
5818  * Other flow rules, requested for group n, will be created in
5819  * e-switch table n+1.
5820  * Jump action to e-switch group n will be created to group n+1.
5821  *
5822  * Used when working in switchdev mode, to utilise advantages of table 1
5823  * and above.
5824  *
5825  * @param dev
5826  *   Pointer to Ethernet device.
5827  *
5828  * @return
5829  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
5830  */
5831 struct rte_flow *
5832 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
5833 {
5834 	const struct rte_flow_attr attr = {
5835 		.group = 0,
5836 		.priority = 0,
5837 		.ingress = 1,
5838 		.egress = 0,
5839 		.transfer = 1,
5840 	};
5841 	const struct rte_flow_item pattern = {
5842 		.type = RTE_FLOW_ITEM_TYPE_END,
5843 	};
5844 	struct rte_flow_action_jump jump = {
5845 		.group = 1,
5846 	};
5847 	const struct rte_flow_action actions[] = {
5848 		{
5849 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5850 			.conf = &jump,
5851 		},
5852 		{
5853 			.type = RTE_FLOW_ACTION_TYPE_END,
5854 		},
5855 	};
5856 	struct mlx5_priv *priv = dev->data->dev_private;
5857 	struct rte_flow_error error;
5858 
5859 	return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
5860 						   &attr, &pattern,
5861 						   actions, false, &error);
5862 }
5863 
5864 /**
5865  * Validate a flow supported by the NIC.
5866  *
5867  * @see rte_flow_validate()
5868  * @see rte_flow_ops
5869  */
5870 int
5871 mlx5_flow_validate(struct rte_eth_dev *dev,
5872 		   const struct rte_flow_attr *attr,
5873 		   const struct rte_flow_item items[],
5874 		   const struct rte_flow_action original_actions[],
5875 		   struct rte_flow_error *error)
5876 {
5877 	int hairpin_flow;
5878 	struct mlx5_translated_shared_action
5879 		shared_actions[MLX5_MAX_SHARED_ACTIONS];
5880 	int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5881 	const struct rte_flow_action *actions;
5882 	struct rte_flow_action *translated_actions = NULL;
5883 	int ret = flow_shared_actions_translate(original_actions,
5884 						shared_actions,
5885 						&shared_actions_n,
5886 						&translated_actions, error);
5887 
5888 	if (ret)
5889 		return ret;
5890 	actions = translated_actions ? translated_actions : original_actions;
5891 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5892 	ret = flow_drv_validate(dev, attr, items, actions,
5893 				true, hairpin_flow, error);
5894 	rte_free(translated_actions);
5895 	return ret;
5896 }
5897 
5898 /**
5899  * Create a flow.
5900  *
5901  * @see rte_flow_create()
5902  * @see rte_flow_ops
5903  */
5904 struct rte_flow *
5905 mlx5_flow_create(struct rte_eth_dev *dev,
5906 		 const struct rte_flow_attr *attr,
5907 		 const struct rte_flow_item items[],
5908 		 const struct rte_flow_action actions[],
5909 		 struct rte_flow_error *error)
5910 {
5911 	struct mlx5_priv *priv = dev->data->dev_private;
5912 
5913 	/*
5914 	 * If the device is not started yet, it is not allowed to created a
5915 	 * flow from application. PMD default flows and traffic control flows
5916 	 * are not affected.
5917 	 */
5918 	if (unlikely(!dev->data->dev_started)) {
5919 		DRV_LOG(DEBUG, "port %u is not started when "
5920 			"inserting a flow", dev->data->port_id);
5921 		rte_flow_error_set(error, ENODEV,
5922 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5923 				   NULL,
5924 				   "port not started");
5925 		return NULL;
5926 	}
5927 
5928 	return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
5929 				  attr, items, actions, true, error);
5930 }
5931 
5932 /**
5933  * Destroy a flow in a list.
5934  *
5935  * @param dev
5936  *   Pointer to Ethernet device.
5937  * @param list
5938  *   Pointer to the Indexed flow list. If this parameter NULL,
5939  *   there is no flow removal from the list. Be noted that as
5940  *   flow is add to the indexed list, memory of the indexed
5941  *   list points to maybe changed as flow destroyed.
5942  * @param[in] flow_idx
5943  *   Index of flow to destroy.
5944  */
5945 static void
5946 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
5947 		  uint32_t flow_idx)
5948 {
5949 	struct mlx5_priv *priv = dev->data->dev_private;
5950 	struct mlx5_fdir_flow *priv_fdir_flow = NULL;
5951 	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
5952 					       [MLX5_IPOOL_RTE_FLOW], flow_idx);
5953 
5954 	if (!flow)
5955 		return;
5956 	/*
5957 	 * Update RX queue flags only if port is started, otherwise it is
5958 	 * already clean.
5959 	 */
5960 	if (dev->data->dev_started)
5961 		flow_rxq_flags_trim(dev, flow);
5962 	flow_drv_destroy(dev, flow);
5963 	if (list) {
5964 		rte_spinlock_lock(&priv->flow_list_lock);
5965 		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
5966 			     flow_idx, flow, next);
5967 		rte_spinlock_unlock(&priv->flow_list_lock);
5968 	}
5969 	flow_mreg_del_copy_action(dev, flow);
5970 	if (flow->fdir) {
5971 		LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
5972 			if (priv_fdir_flow->rix_flow == flow_idx)
5973 				break;
5974 		}
5975 		if (priv_fdir_flow) {
5976 			LIST_REMOVE(priv_fdir_flow, next);
5977 			mlx5_free(priv_fdir_flow->fdir);
5978 			mlx5_free(priv_fdir_flow);
5979 		}
5980 	}
5981 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
5982 	if (flow->tunnel) {
5983 		struct mlx5_flow_tunnel *tunnel;
5984 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
5985 		RTE_VERIFY(tunnel);
5986 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
5987 			mlx5_flow_tunnel_free(dev, tunnel);
5988 	}
5989 }
5990 
5991 /**
5992  * Destroy all flows.
5993  *
5994  * @param dev
5995  *   Pointer to Ethernet device.
5996  * @param list
5997  *   Pointer to the Indexed flow list.
5998  * @param active
5999  *   If flushing is called avtively.
6000  */
6001 void
6002 mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
6003 {
6004 	uint32_t num_flushed = 0;
6005 
6006 	while (*list) {
6007 		flow_list_destroy(dev, list, *list);
6008 		num_flushed++;
6009 	}
6010 	if (active) {
6011 		DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
6012 			dev->data->port_id, num_flushed);
6013 	}
6014 }
6015 
6016 /**
6017  * Remove all flows.
6018  *
6019  * @param dev
6020  *   Pointer to Ethernet device.
6021  * @param list
6022  *   Pointer to the Indexed flow list.
6023  */
6024 void
6025 mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list)
6026 {
6027 	struct mlx5_priv *priv = dev->data->dev_private;
6028 	struct rte_flow *flow = NULL;
6029 	uint32_t idx;
6030 
6031 	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
6032 		      flow, next) {
6033 		flow_drv_remove(dev, flow);
6034 		flow_mreg_stop_copy_action(dev, flow);
6035 	}
6036 	flow_mreg_del_default_copy_action(dev);
6037 	flow_rxq_flags_clear(dev);
6038 }
6039 
6040 /**
6041  * Add all flows.
6042  *
6043  * @param dev
6044  *   Pointer to Ethernet device.
6045  * @param list
6046  *   Pointer to the Indexed flow list.
6047  *
6048  * @return
6049  *   0 on success, a negative errno value otherwise and rte_errno is set.
6050  */
6051 int
6052 mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list)
6053 {
6054 	struct mlx5_priv *priv = dev->data->dev_private;
6055 	struct rte_flow *flow = NULL;
6056 	struct rte_flow_error error;
6057 	uint32_t idx;
6058 	int ret = 0;
6059 
6060 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
6061 	ret = flow_mreg_add_default_copy_action(dev, &error);
6062 	if (ret < 0)
6063 		return -rte_errno;
6064 	/* Apply Flows created by application. */
6065 	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
6066 		      flow, next) {
6067 		ret = flow_mreg_start_copy_action(dev, flow);
6068 		if (ret < 0)
6069 			goto error;
6070 		ret = flow_drv_apply(dev, flow, &error);
6071 		if (ret < 0)
6072 			goto error;
6073 		flow_rxq_flags_set(dev, flow);
6074 	}
6075 	return 0;
6076 error:
6077 	ret = rte_errno; /* Save rte_errno before cleanup. */
6078 	mlx5_flow_stop(dev, list);
6079 	rte_errno = ret; /* Restore rte_errno. */
6080 	return -rte_errno;
6081 }
6082 
6083 /**
6084  * Stop all default actions for flows.
6085  *
6086  * @param dev
6087  *   Pointer to Ethernet device.
6088  */
6089 void
6090 mlx5_flow_stop_default(struct rte_eth_dev *dev)
6091 {
6092 	flow_mreg_del_default_copy_action(dev);
6093 	flow_rxq_flags_clear(dev);
6094 }
6095 
6096 /**
6097  * Start all default actions for flows.
6098  *
6099  * @param dev
6100  *   Pointer to Ethernet device.
6101  * @return
6102  *   0 on success, a negative errno value otherwise and rte_errno is set.
6103  */
6104 int
6105 mlx5_flow_start_default(struct rte_eth_dev *dev)
6106 {
6107 	struct rte_flow_error error;
6108 
6109 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
6110 	return flow_mreg_add_default_copy_action(dev, &error);
6111 }
6112 
6113 /**
6114  * Release key of thread specific flow workspace data.
6115  */
6116 static void
6117 flow_release_workspace(void *data)
6118 {
6119 	struct mlx5_flow_workspace *wks = data;
6120 
6121 	if (!wks)
6122 		return;
6123 	free(wks->rss_desc[0].queue);
6124 	free(wks->rss_desc[1].queue);
6125 	free(wks);
6126 }
6127 
6128 /**
6129  * Initialize key of thread specific flow workspace data.
6130  */
6131 static void
6132 flow_alloc_workspace(void)
6133 {
6134 	if (pthread_key_create(&key_workspace, flow_release_workspace))
6135 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
6136 }
6137 
6138 /**
6139  * Get thread specific flow workspace.
6140  *
6141  * @return pointer to thread specific flowworkspace data, NULL on error.
6142  */
6143 struct mlx5_flow_workspace*
6144 mlx5_flow_get_thread_workspace(void)
6145 {
6146 	struct mlx5_flow_workspace *data;
6147 
6148 	if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
6149 		DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
6150 		return NULL;
6151 	}
6152 	data = pthread_getspecific(key_workspace);
6153 	if (!data) {
6154 		data = calloc(1, sizeof(*data));
6155 		if (!data) {
6156 			DRV_LOG(ERR, "Failed to allocate flow workspace "
6157 				"memory.");
6158 			return NULL;
6159 		}
6160 		data->rss_desc[0].queue = calloc(1,
6161 				sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
6162 		if (!data->rss_desc[0].queue)
6163 			goto err;
6164 		data->rss_desc[1].queue = calloc(1,
6165 				sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
6166 		if (!data->rss_desc[1].queue)
6167 			goto err;
6168 		data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM;
6169 		data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM;
6170 		if (pthread_setspecific(key_workspace, data)) {
6171 			DRV_LOG(ERR, "Failed to set flow workspace to thread.");
6172 			goto err;
6173 		}
6174 	}
6175 	return data;
6176 err:
6177 	if (data->rss_desc[0].queue)
6178 		free(data->rss_desc[0].queue);
6179 	if (data->rss_desc[1].queue)
6180 		free(data->rss_desc[1].queue);
6181 	free(data);
6182 	return NULL;
6183 }
6184 
6185 /**
6186  * Verify the flow list is empty
6187  *
6188  * @param dev
6189  *  Pointer to Ethernet device.
6190  *
6191  * @return the number of flows not released.
6192  */
6193 int
6194 mlx5_flow_verify(struct rte_eth_dev *dev)
6195 {
6196 	struct mlx5_priv *priv = dev->data->dev_private;
6197 	struct rte_flow *flow;
6198 	uint32_t idx;
6199 	int ret = 0;
6200 
6201 	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
6202 		      flow, next) {
6203 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
6204 			dev->data->port_id, (void *)flow);
6205 		++ret;
6206 	}
6207 	return ret;
6208 }
6209 
6210 /**
6211  * Enable default hairpin egress flow.
6212  *
6213  * @param dev
6214  *   Pointer to Ethernet device.
6215  * @param queue
6216  *   The queue index.
6217  *
6218  * @return
6219  *   0 on success, a negative errno value otherwise and rte_errno is set.
6220  */
6221 int
6222 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
6223 			    uint32_t queue)
6224 {
6225 	struct mlx5_priv *priv = dev->data->dev_private;
6226 	const struct rte_flow_attr attr = {
6227 		.egress = 1,
6228 		.priority = 0,
6229 	};
6230 	struct mlx5_rte_flow_item_tx_queue queue_spec = {
6231 		.queue = queue,
6232 	};
6233 	struct mlx5_rte_flow_item_tx_queue queue_mask = {
6234 		.queue = UINT32_MAX,
6235 	};
6236 	struct rte_flow_item items[] = {
6237 		{
6238 			.type = (enum rte_flow_item_type)
6239 				MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
6240 			.spec = &queue_spec,
6241 			.last = NULL,
6242 			.mask = &queue_mask,
6243 		},
6244 		{
6245 			.type = RTE_FLOW_ITEM_TYPE_END,
6246 		},
6247 	};
6248 	struct rte_flow_action_jump jump = {
6249 		.group = MLX5_HAIRPIN_TX_TABLE,
6250 	};
6251 	struct rte_flow_action actions[2];
6252 	uint32_t flow_idx;
6253 	struct rte_flow_error error;
6254 
6255 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
6256 	actions[0].conf = &jump;
6257 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
6258 	flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6259 				&attr, items, actions, false, &error);
6260 	if (!flow_idx) {
6261 		DRV_LOG(DEBUG,
6262 			"Failed to create ctrl flow: rte_errno(%d),"
6263 			" type(%d), message(%s)",
6264 			rte_errno, error.type,
6265 			error.message ? error.message : " (no stated reason)");
6266 		return -rte_errno;
6267 	}
6268 	return 0;
6269 }
6270 
6271 /**
6272  * Enable a control flow configured from the control plane.
6273  *
6274  * @param dev
6275  *   Pointer to Ethernet device.
6276  * @param eth_spec
6277  *   An Ethernet flow spec to apply.
6278  * @param eth_mask
6279  *   An Ethernet flow mask to apply.
6280  * @param vlan_spec
6281  *   A VLAN flow spec to apply.
6282  * @param vlan_mask
6283  *   A VLAN flow mask to apply.
6284  *
6285  * @return
6286  *   0 on success, a negative errno value otherwise and rte_errno is set.
6287  */
6288 int
6289 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
6290 		    struct rte_flow_item_eth *eth_spec,
6291 		    struct rte_flow_item_eth *eth_mask,
6292 		    struct rte_flow_item_vlan *vlan_spec,
6293 		    struct rte_flow_item_vlan *vlan_mask)
6294 {
6295 	struct mlx5_priv *priv = dev->data->dev_private;
6296 	const struct rte_flow_attr attr = {
6297 		.ingress = 1,
6298 		.priority = MLX5_FLOW_PRIO_RSVD,
6299 	};
6300 	struct rte_flow_item items[] = {
6301 		{
6302 			.type = RTE_FLOW_ITEM_TYPE_ETH,
6303 			.spec = eth_spec,
6304 			.last = NULL,
6305 			.mask = eth_mask,
6306 		},
6307 		{
6308 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
6309 					      RTE_FLOW_ITEM_TYPE_END,
6310 			.spec = vlan_spec,
6311 			.last = NULL,
6312 			.mask = vlan_mask,
6313 		},
6314 		{
6315 			.type = RTE_FLOW_ITEM_TYPE_END,
6316 		},
6317 	};
6318 	uint16_t queue[priv->reta_idx_n];
6319 	struct rte_flow_action_rss action_rss = {
6320 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
6321 		.level = 0,
6322 		.types = priv->rss_conf.rss_hf,
6323 		.key_len = priv->rss_conf.rss_key_len,
6324 		.queue_num = priv->reta_idx_n,
6325 		.key = priv->rss_conf.rss_key,
6326 		.queue = queue,
6327 	};
6328 	struct rte_flow_action actions[] = {
6329 		{
6330 			.type = RTE_FLOW_ACTION_TYPE_RSS,
6331 			.conf = &action_rss,
6332 		},
6333 		{
6334 			.type = RTE_FLOW_ACTION_TYPE_END,
6335 		},
6336 	};
6337 	uint32_t flow_idx;
6338 	struct rte_flow_error error;
6339 	unsigned int i;
6340 
6341 	if (!priv->reta_idx_n || !priv->rxqs_n) {
6342 		return 0;
6343 	}
6344 	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
6345 		action_rss.types = 0;
6346 	for (i = 0; i != priv->reta_idx_n; ++i)
6347 		queue[i] = (*priv->reta_idx)[i];
6348 	flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6349 				&attr, items, actions, false, &error);
6350 	if (!flow_idx)
6351 		return -rte_errno;
6352 	return 0;
6353 }
6354 
6355 /**
6356  * Enable a flow control configured from the control plane.
6357  *
6358  * @param dev
6359  *   Pointer to Ethernet device.
6360  * @param eth_spec
6361  *   An Ethernet flow spec to apply.
6362  * @param eth_mask
6363  *   An Ethernet flow mask to apply.
6364  *
6365  * @return
6366  *   0 on success, a negative errno value otherwise and rte_errno is set.
6367  */
6368 int
6369 mlx5_ctrl_flow(struct rte_eth_dev *dev,
6370 	       struct rte_flow_item_eth *eth_spec,
6371 	       struct rte_flow_item_eth *eth_mask)
6372 {
6373 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
6374 }
6375 
6376 /**
6377  * Create default miss flow rule matching lacp traffic
6378  *
6379  * @param dev
6380  *   Pointer to Ethernet device.
6381  * @param eth_spec
6382  *   An Ethernet flow spec to apply.
6383  *
6384  * @return
6385  *   0 on success, a negative errno value otherwise and rte_errno is set.
6386  */
6387 int
6388 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
6389 {
6390 	struct mlx5_priv *priv = dev->data->dev_private;
6391 	/*
6392 	 * The LACP matching is done by only using ether type since using
6393 	 * a multicast dst mac causes kernel to give low priority to this flow.
6394 	 */
6395 	static const struct rte_flow_item_eth lacp_spec = {
6396 		.type = RTE_BE16(0x8809),
6397 	};
6398 	static const struct rte_flow_item_eth lacp_mask = {
6399 		.type = 0xffff,
6400 	};
6401 	const struct rte_flow_attr attr = {
6402 		.ingress = 1,
6403 	};
6404 	struct rte_flow_item items[] = {
6405 		{
6406 			.type = RTE_FLOW_ITEM_TYPE_ETH,
6407 			.spec = &lacp_spec,
6408 			.mask = &lacp_mask,
6409 		},
6410 		{
6411 			.type = RTE_FLOW_ITEM_TYPE_END,
6412 		},
6413 	};
6414 	struct rte_flow_action actions[] = {
6415 		{
6416 			.type = (enum rte_flow_action_type)
6417 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
6418 		},
6419 		{
6420 			.type = RTE_FLOW_ACTION_TYPE_END,
6421 		},
6422 	};
6423 	struct rte_flow_error error;
6424 	uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6425 				&attr, items, actions, false, &error);
6426 
6427 	if (!flow_idx)
6428 		return -rte_errno;
6429 	return 0;
6430 }
6431 
6432 /**
6433  * Destroy a flow.
6434  *
6435  * @see rte_flow_destroy()
6436  * @see rte_flow_ops
6437  */
6438 int
6439 mlx5_flow_destroy(struct rte_eth_dev *dev,
6440 		  struct rte_flow *flow,
6441 		  struct rte_flow_error *error __rte_unused)
6442 {
6443 	struct mlx5_priv *priv = dev->data->dev_private;
6444 
6445 	flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
6446 	return 0;
6447 }
6448 
6449 /**
6450  * Destroy all flows.
6451  *
6452  * @see rte_flow_flush()
6453  * @see rte_flow_ops
6454  */
6455 int
6456 mlx5_flow_flush(struct rte_eth_dev *dev,
6457 		struct rte_flow_error *error __rte_unused)
6458 {
6459 	struct mlx5_priv *priv = dev->data->dev_private;
6460 
6461 	mlx5_flow_list_flush(dev, &priv->flows, false);
6462 	return 0;
6463 }
6464 
6465 /**
6466  * Isolated mode.
6467  *
6468  * @see rte_flow_isolate()
6469  * @see rte_flow_ops
6470  */
6471 int
6472 mlx5_flow_isolate(struct rte_eth_dev *dev,
6473 		  int enable,
6474 		  struct rte_flow_error *error)
6475 {
6476 	struct mlx5_priv *priv = dev->data->dev_private;
6477 
6478 	if (dev->data->dev_started) {
6479 		rte_flow_error_set(error, EBUSY,
6480 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6481 				   NULL,
6482 				   "port must be stopped first");
6483 		return -rte_errno;
6484 	}
6485 	priv->isolated = !!enable;
6486 	if (enable)
6487 		dev->dev_ops = &mlx5_os_dev_ops_isolate;
6488 	else
6489 		dev->dev_ops = &mlx5_os_dev_ops;
6490 
6491 	dev->rx_descriptor_status = mlx5_rx_descriptor_status;
6492 	dev->tx_descriptor_status = mlx5_tx_descriptor_status;
6493 
6494 	return 0;
6495 }
6496 
6497 /**
6498  * Query a flow.
6499  *
6500  * @see rte_flow_query()
6501  * @see rte_flow_ops
6502  */
6503 static int
6504 flow_drv_query(struct rte_eth_dev *dev,
6505 	       uint32_t flow_idx,
6506 	       const struct rte_flow_action *actions,
6507 	       void *data,
6508 	       struct rte_flow_error *error)
6509 {
6510 	struct mlx5_priv *priv = dev->data->dev_private;
6511 	const struct mlx5_flow_driver_ops *fops;
6512 	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
6513 					       [MLX5_IPOOL_RTE_FLOW],
6514 					       flow_idx);
6515 	enum mlx5_flow_drv_type ftype;
6516 
6517 	if (!flow) {
6518 		return rte_flow_error_set(error, ENOENT,
6519 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6520 			  NULL,
6521 			  "invalid flow handle");
6522 	}
6523 	ftype = flow->drv_type;
6524 	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
6525 	fops = flow_get_drv_ops(ftype);
6526 
6527 	return fops->query(dev, flow, actions, data, error);
6528 }
6529 
6530 /**
6531  * Query a flow.
6532  *
6533  * @see rte_flow_query()
6534  * @see rte_flow_ops
6535  */
6536 int
6537 mlx5_flow_query(struct rte_eth_dev *dev,
6538 		struct rte_flow *flow,
6539 		const struct rte_flow_action *actions,
6540 		void *data,
6541 		struct rte_flow_error *error)
6542 {
6543 	int ret;
6544 
6545 	ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
6546 			     error);
6547 	if (ret < 0)
6548 		return ret;
6549 	return 0;
6550 }
6551 
6552 /**
6553  * Convert a flow director filter to a generic flow.
6554  *
6555  * @param dev
6556  *   Pointer to Ethernet device.
6557  * @param fdir_filter
6558  *   Flow director filter to add.
6559  * @param attributes
6560  *   Generic flow parameters structure.
6561  *
6562  * @return
6563  *   0 on success, a negative errno value otherwise and rte_errno is set.
6564  */
6565 static int
6566 flow_fdir_filter_convert(struct rte_eth_dev *dev,
6567 			 const struct rte_eth_fdir_filter *fdir_filter,
6568 			 struct mlx5_fdir *attributes)
6569 {
6570 	struct mlx5_priv *priv = dev->data->dev_private;
6571 	const struct rte_eth_fdir_input *input = &fdir_filter->input;
6572 	const struct rte_eth_fdir_masks *mask =
6573 		&dev->data->dev_conf.fdir_conf.mask;
6574 
6575 	/* Validate queue number. */
6576 	if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
6577 		DRV_LOG(ERR, "port %u invalid queue number %d",
6578 			dev->data->port_id, fdir_filter->action.rx_queue);
6579 		rte_errno = EINVAL;
6580 		return -rte_errno;
6581 	}
6582 	attributes->attr.ingress = 1;
6583 	attributes->items[0] = (struct rte_flow_item) {
6584 		.type = RTE_FLOW_ITEM_TYPE_ETH,
6585 		.spec = &attributes->l2,
6586 		.mask = &attributes->l2_mask,
6587 	};
6588 	switch (fdir_filter->action.behavior) {
6589 	case RTE_ETH_FDIR_ACCEPT:
6590 		attributes->actions[0] = (struct rte_flow_action){
6591 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
6592 			.conf = &attributes->queue,
6593 		};
6594 		break;
6595 	case RTE_ETH_FDIR_REJECT:
6596 		attributes->actions[0] = (struct rte_flow_action){
6597 			.type = RTE_FLOW_ACTION_TYPE_DROP,
6598 		};
6599 		break;
6600 	default:
6601 		DRV_LOG(ERR, "port %u invalid behavior %d",
6602 			dev->data->port_id,
6603 			fdir_filter->action.behavior);
6604 		rte_errno = ENOTSUP;
6605 		return -rte_errno;
6606 	}
6607 	attributes->queue.index = fdir_filter->action.rx_queue;
6608 	/* Handle L3. */
6609 	switch (fdir_filter->input.flow_type) {
6610 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
6611 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
6612 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
6613 		attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
6614 			.src_addr = input->flow.ip4_flow.src_ip,
6615 			.dst_addr = input->flow.ip4_flow.dst_ip,
6616 			.time_to_live = input->flow.ip4_flow.ttl,
6617 			.type_of_service = input->flow.ip4_flow.tos,
6618 		};
6619 		attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
6620 			.src_addr = mask->ipv4_mask.src_ip,
6621 			.dst_addr = mask->ipv4_mask.dst_ip,
6622 			.time_to_live = mask->ipv4_mask.ttl,
6623 			.type_of_service = mask->ipv4_mask.tos,
6624 			.next_proto_id = mask->ipv4_mask.proto,
6625 		};
6626 		attributes->items[1] = (struct rte_flow_item){
6627 			.type = RTE_FLOW_ITEM_TYPE_IPV4,
6628 			.spec = &attributes->l3,
6629 			.mask = &attributes->l3_mask,
6630 		};
6631 		break;
6632 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
6633 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
6634 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
6635 		attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
6636 			.hop_limits = input->flow.ipv6_flow.hop_limits,
6637 			.proto = input->flow.ipv6_flow.proto,
6638 		};
6639 
6640 		memcpy(attributes->l3.ipv6.hdr.src_addr,
6641 		       input->flow.ipv6_flow.src_ip,
6642 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
6643 		memcpy(attributes->l3.ipv6.hdr.dst_addr,
6644 		       input->flow.ipv6_flow.dst_ip,
6645 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
6646 		memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
6647 		       mask->ipv6_mask.src_ip,
6648 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
6649 		memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
6650 		       mask->ipv6_mask.dst_ip,
6651 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
6652 		attributes->items[1] = (struct rte_flow_item){
6653 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
6654 			.spec = &attributes->l3,
6655 			.mask = &attributes->l3_mask,
6656 		};
6657 		break;
6658 	default:
6659 		DRV_LOG(ERR, "port %u invalid flow type%d",
6660 			dev->data->port_id, fdir_filter->input.flow_type);
6661 		rte_errno = ENOTSUP;
6662 		return -rte_errno;
6663 	}
6664 	/* Handle L4. */
6665 	switch (fdir_filter->input.flow_type) {
6666 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
6667 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
6668 			.src_port = input->flow.udp4_flow.src_port,
6669 			.dst_port = input->flow.udp4_flow.dst_port,
6670 		};
6671 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
6672 			.src_port = mask->src_port_mask,
6673 			.dst_port = mask->dst_port_mask,
6674 		};
6675 		attributes->items[2] = (struct rte_flow_item){
6676 			.type = RTE_FLOW_ITEM_TYPE_UDP,
6677 			.spec = &attributes->l4,
6678 			.mask = &attributes->l4_mask,
6679 		};
6680 		break;
6681 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
6682 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
6683 			.src_port = input->flow.tcp4_flow.src_port,
6684 			.dst_port = input->flow.tcp4_flow.dst_port,
6685 		};
6686 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
6687 			.src_port = mask->src_port_mask,
6688 			.dst_port = mask->dst_port_mask,
6689 		};
6690 		attributes->items[2] = (struct rte_flow_item){
6691 			.type = RTE_FLOW_ITEM_TYPE_TCP,
6692 			.spec = &attributes->l4,
6693 			.mask = &attributes->l4_mask,
6694 		};
6695 		break;
6696 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
6697 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
6698 			.src_port = input->flow.udp6_flow.src_port,
6699 			.dst_port = input->flow.udp6_flow.dst_port,
6700 		};
6701 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
6702 			.src_port = mask->src_port_mask,
6703 			.dst_port = mask->dst_port_mask,
6704 		};
6705 		attributes->items[2] = (struct rte_flow_item){
6706 			.type = RTE_FLOW_ITEM_TYPE_UDP,
6707 			.spec = &attributes->l4,
6708 			.mask = &attributes->l4_mask,
6709 		};
6710 		break;
6711 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
6712 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
6713 			.src_port = input->flow.tcp6_flow.src_port,
6714 			.dst_port = input->flow.tcp6_flow.dst_port,
6715 		};
6716 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
6717 			.src_port = mask->src_port_mask,
6718 			.dst_port = mask->dst_port_mask,
6719 		};
6720 		attributes->items[2] = (struct rte_flow_item){
6721 			.type = RTE_FLOW_ITEM_TYPE_TCP,
6722 			.spec = &attributes->l4,
6723 			.mask = &attributes->l4_mask,
6724 		};
6725 		break;
6726 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
6727 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
6728 		break;
6729 	default:
6730 		DRV_LOG(ERR, "port %u invalid flow type%d",
6731 			dev->data->port_id, fdir_filter->input.flow_type);
6732 		rte_errno = ENOTSUP;
6733 		return -rte_errno;
6734 	}
6735 	return 0;
6736 }
6737 
6738 #define FLOW_FDIR_CMP(f1, f2, fld) \
6739 	memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
6740 
6741 /**
6742  * Compare two FDIR flows. If items and actions are identical, the two flows are
6743  * regarded as same.
6744  *
6745  * @param dev
6746  *   Pointer to Ethernet device.
6747  * @param f1
6748  *   FDIR flow to compare.
6749  * @param f2
6750  *   FDIR flow to compare.
6751  *
6752  * @return
6753  *   Zero on match, 1 otherwise.
6754  */
6755 static int
6756 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
6757 {
6758 	if (FLOW_FDIR_CMP(f1, f2, attr) ||
6759 	    FLOW_FDIR_CMP(f1, f2, l2) ||
6760 	    FLOW_FDIR_CMP(f1, f2, l2_mask) ||
6761 	    FLOW_FDIR_CMP(f1, f2, l3) ||
6762 	    FLOW_FDIR_CMP(f1, f2, l3_mask) ||
6763 	    FLOW_FDIR_CMP(f1, f2, l4) ||
6764 	    FLOW_FDIR_CMP(f1, f2, l4_mask) ||
6765 	    FLOW_FDIR_CMP(f1, f2, actions[0].type))
6766 		return 1;
6767 	if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
6768 	    FLOW_FDIR_CMP(f1, f2, queue))
6769 		return 1;
6770 	return 0;
6771 }
6772 
6773 /**
6774  * Search device flow list to find out a matched FDIR flow.
6775  *
6776  * @param dev
6777  *   Pointer to Ethernet device.
6778  * @param fdir_flow
6779  *   FDIR flow to lookup.
6780  *
6781  * @return
6782  *   Index of flow if found, 0 otherwise.
6783  */
6784 static uint32_t
6785 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
6786 {
6787 	struct mlx5_priv *priv = dev->data->dev_private;
6788 	uint32_t flow_idx = 0;
6789 	struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6790 
6791 	MLX5_ASSERT(fdir_flow);
6792 	LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
6793 		if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
6794 			DRV_LOG(DEBUG, "port %u found FDIR flow %u",
6795 				dev->data->port_id, flow_idx);
6796 			flow_idx = priv_fdir_flow->rix_flow;
6797 			break;
6798 		}
6799 	}
6800 	return flow_idx;
6801 }
6802 
6803 /**
6804  * Add new flow director filter and store it in list.
6805  *
6806  * @param dev
6807  *   Pointer to Ethernet device.
6808  * @param fdir_filter
6809  *   Flow director filter to add.
6810  *
6811  * @return
6812  *   0 on success, a negative errno value otherwise and rte_errno is set.
6813  */
6814 static int
6815 flow_fdir_filter_add(struct rte_eth_dev *dev,
6816 		     const struct rte_eth_fdir_filter *fdir_filter)
6817 {
6818 	struct mlx5_priv *priv = dev->data->dev_private;
6819 	struct mlx5_fdir *fdir_flow;
6820 	struct rte_flow *flow;
6821 	struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6822 	uint32_t flow_idx;
6823 	int ret;
6824 
6825 	fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
6826 				SOCKET_ID_ANY);
6827 	if (!fdir_flow) {
6828 		rte_errno = ENOMEM;
6829 		return -rte_errno;
6830 	}
6831 	ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
6832 	if (ret)
6833 		goto error;
6834 	flow_idx = flow_fdir_filter_lookup(dev, fdir_flow);
6835 	if (flow_idx) {
6836 		rte_errno = EEXIST;
6837 		goto error;
6838 	}
6839 	priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
6840 				     sizeof(struct mlx5_fdir_flow),
6841 				     0, SOCKET_ID_ANY);
6842 	if (!priv_fdir_flow) {
6843 		rte_errno = ENOMEM;
6844 		goto error;
6845 	}
6846 	flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
6847 				    fdir_flow->items, fdir_flow->actions, true,
6848 				    NULL);
6849 	flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
6850 	if (!flow)
6851 		goto error;
6852 	flow->fdir = 1;
6853 	priv_fdir_flow->fdir = fdir_flow;
6854 	priv_fdir_flow->rix_flow = flow_idx;
6855 	LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
6856 	DRV_LOG(DEBUG, "port %u created FDIR flow %p",
6857 		dev->data->port_id, (void *)flow);
6858 	return 0;
6859 error:
6860 	mlx5_free(priv_fdir_flow);
6861 	mlx5_free(fdir_flow);
6862 	return -rte_errno;
6863 }
6864 
6865 /**
6866  * Delete specific filter.
6867  *
6868  * @param dev
6869  *   Pointer to Ethernet device.
6870  * @param fdir_filter
6871  *   Filter to be deleted.
6872  *
6873  * @return
6874  *   0 on success, a negative errno value otherwise and rte_errno is set.
6875  */
6876 static int
6877 flow_fdir_filter_delete(struct rte_eth_dev *dev,
6878 			const struct rte_eth_fdir_filter *fdir_filter)
6879 {
6880 	struct mlx5_priv *priv = dev->data->dev_private;
6881 	uint32_t flow_idx;
6882 	struct mlx5_fdir fdir_flow = {
6883 		.attr.group = 0,
6884 	};
6885 	struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6886 	int ret;
6887 
6888 	ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
6889 	if (ret)
6890 		return -rte_errno;
6891 	LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
6892 		/* Find the fdir in priv list */
6893 		if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
6894 			break;
6895 	}
6896 	if (!priv_fdir_flow)
6897 		return 0;
6898 	LIST_REMOVE(priv_fdir_flow, next);
6899 	flow_idx = priv_fdir_flow->rix_flow;
6900 	flow_list_destroy(dev, &priv->flows, flow_idx);
6901 	mlx5_free(priv_fdir_flow->fdir);
6902 	mlx5_free(priv_fdir_flow);
6903 	DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
6904 		dev->data->port_id, flow_idx);
6905 	return 0;
6906 }
6907 
6908 /**
6909  * Update queue for specific filter.
6910  *
6911  * @param dev
6912  *   Pointer to Ethernet device.
6913  * @param fdir_filter
6914  *   Filter to be updated.
6915  *
6916  * @return
6917  *   0 on success, a negative errno value otherwise and rte_errno is set.
6918  */
6919 static int
6920 flow_fdir_filter_update(struct rte_eth_dev *dev,
6921 			const struct rte_eth_fdir_filter *fdir_filter)
6922 {
6923 	int ret;
6924 
6925 	ret = flow_fdir_filter_delete(dev, fdir_filter);
6926 	if (ret)
6927 		return ret;
6928 	return flow_fdir_filter_add(dev, fdir_filter);
6929 }
6930 
6931 /**
6932  * Flush all filters.
6933  *
6934  * @param dev
6935  *   Pointer to Ethernet device.
6936  */
6937 static void
6938 flow_fdir_filter_flush(struct rte_eth_dev *dev)
6939 {
6940 	struct mlx5_priv *priv = dev->data->dev_private;
6941 	struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6942 
6943 	while (!LIST_EMPTY(&priv->fdir_flows)) {
6944 		priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
6945 		LIST_REMOVE(priv_fdir_flow, next);
6946 		flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
6947 		mlx5_free(priv_fdir_flow->fdir);
6948 		mlx5_free(priv_fdir_flow);
6949 	}
6950 }
6951 
6952 /**
6953  * Get flow director information.
6954  *
6955  * @param dev
6956  *   Pointer to Ethernet device.
6957  * @param[out] fdir_info
6958  *   Resulting flow director information.
6959  */
6960 static void
6961 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
6962 {
6963 	struct rte_eth_fdir_masks *mask =
6964 		&dev->data->dev_conf.fdir_conf.mask;
6965 
6966 	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
6967 	fdir_info->guarant_spc = 0;
6968 	rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
6969 	fdir_info->max_flexpayload = 0;
6970 	fdir_info->flow_types_mask[0] = 0;
6971 	fdir_info->flex_payload_unit = 0;
6972 	fdir_info->max_flex_payload_segment_num = 0;
6973 	fdir_info->flex_payload_limit = 0;
6974 	memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
6975 }
6976 
6977 /**
6978  * Deal with flow director operations.
6979  *
6980  * @param dev
6981  *   Pointer to Ethernet device.
6982  * @param filter_op
6983  *   Operation to perform.
6984  * @param arg
6985  *   Pointer to operation-specific structure.
6986  *
6987  * @return
6988  *   0 on success, a negative errno value otherwise and rte_errno is set.
6989  */
6990 static int
6991 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
6992 		    void *arg)
6993 {
6994 	enum rte_fdir_mode fdir_mode =
6995 		dev->data->dev_conf.fdir_conf.mode;
6996 
6997 	if (filter_op == RTE_ETH_FILTER_NOP)
6998 		return 0;
6999 	if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
7000 	    fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
7001 		DRV_LOG(ERR, "port %u flow director mode %d not supported",
7002 			dev->data->port_id, fdir_mode);
7003 		rte_errno = EINVAL;
7004 		return -rte_errno;
7005 	}
7006 	switch (filter_op) {
7007 	case RTE_ETH_FILTER_ADD:
7008 		return flow_fdir_filter_add(dev, arg);
7009 	case RTE_ETH_FILTER_UPDATE:
7010 		return flow_fdir_filter_update(dev, arg);
7011 	case RTE_ETH_FILTER_DELETE:
7012 		return flow_fdir_filter_delete(dev, arg);
7013 	case RTE_ETH_FILTER_FLUSH:
7014 		flow_fdir_filter_flush(dev);
7015 		break;
7016 	case RTE_ETH_FILTER_INFO:
7017 		flow_fdir_info_get(dev, arg);
7018 		break;
7019 	default:
7020 		DRV_LOG(DEBUG, "port %u unknown operation %u",
7021 			dev->data->port_id, filter_op);
7022 		rte_errno = EINVAL;
7023 		return -rte_errno;
7024 	}
7025 	return 0;
7026 }
7027 
7028 /**
7029  * Manage filter operations.
7030  *
7031  * @param dev
7032  *   Pointer to Ethernet device structure.
7033  * @param filter_type
7034  *   Filter type.
7035  * @param filter_op
7036  *   Operation to perform.
7037  * @param arg
7038  *   Pointer to operation-specific structure.
7039  *
7040  * @return
7041  *   0 on success, a negative errno value otherwise and rte_errno is set.
7042  */
7043 int
7044 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
7045 		     enum rte_filter_type filter_type,
7046 		     enum rte_filter_op filter_op,
7047 		     void *arg)
7048 {
7049 	switch (filter_type) {
7050 	case RTE_ETH_FILTER_GENERIC:
7051 		if (filter_op != RTE_ETH_FILTER_GET) {
7052 			rte_errno = EINVAL;
7053 			return -rte_errno;
7054 		}
7055 		*(const void **)arg = &mlx5_flow_ops;
7056 		return 0;
7057 	case RTE_ETH_FILTER_FDIR:
7058 		return flow_fdir_ctrl_func(dev, filter_op, arg);
7059 	default:
7060 		DRV_LOG(ERR, "port %u filter type (%d) not supported",
7061 			dev->data->port_id, filter_type);
7062 		rte_errno = ENOTSUP;
7063 		return -rte_errno;
7064 	}
7065 	return 0;
7066 }
7067 
7068 /**
7069  * Create the needed meter and suffix tables.
7070  *
7071  * @param[in] dev
7072  *   Pointer to Ethernet device.
7073  * @param[in] fm
7074  *   Pointer to the flow meter.
7075  *
7076  * @return
7077  *   Pointer to table set on success, NULL otherwise.
7078  */
7079 struct mlx5_meter_domains_infos *
7080 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
7081 			  const struct mlx5_flow_meter *fm)
7082 {
7083 	const struct mlx5_flow_driver_ops *fops;
7084 
7085 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7086 	return fops->create_mtr_tbls(dev, fm);
7087 }
7088 
7089 /**
7090  * Destroy the meter table set.
7091  *
7092  * @param[in] dev
7093  *   Pointer to Ethernet device.
7094  * @param[in] tbl
7095  *   Pointer to the meter table set.
7096  *
7097  * @return
7098  *   0 on success.
7099  */
7100 int
7101 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
7102 			   struct mlx5_meter_domains_infos *tbls)
7103 {
7104 	const struct mlx5_flow_driver_ops *fops;
7105 
7106 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7107 	return fops->destroy_mtr_tbls(dev, tbls);
7108 }
7109 
7110 /**
7111  * Create policer rules.
7112  *
7113  * @param[in] dev
7114  *   Pointer to Ethernet device.
7115  * @param[in] fm
7116  *   Pointer to flow meter structure.
7117  * @param[in] attr
7118  *   Pointer to flow attributes.
7119  *
7120  * @return
7121  *   0 on success, -1 otherwise.
7122  */
7123 int
7124 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
7125 			       struct mlx5_flow_meter *fm,
7126 			       const struct rte_flow_attr *attr)
7127 {
7128 	const struct mlx5_flow_driver_ops *fops;
7129 
7130 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7131 	return fops->create_policer_rules(dev, fm, attr);
7132 }
7133 
7134 /**
7135  * Destroy policer rules.
7136  *
7137  * @param[in] fm
7138  *   Pointer to flow meter structure.
7139  * @param[in] attr
7140  *   Pointer to flow attributes.
7141  *
7142  * @return
7143  *   0 on success, -1 otherwise.
7144  */
7145 int
7146 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
7147 				struct mlx5_flow_meter *fm,
7148 				const struct rte_flow_attr *attr)
7149 {
7150 	const struct mlx5_flow_driver_ops *fops;
7151 
7152 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7153 	return fops->destroy_policer_rules(dev, fm, attr);
7154 }
7155 
7156 /**
7157  * Allocate a counter.
7158  *
7159  * @param[in] dev
7160  *   Pointer to Ethernet device structure.
7161  *
7162  * @return
7163  *   Index to allocated counter  on success, 0 otherwise.
7164  */
7165 uint32_t
7166 mlx5_counter_alloc(struct rte_eth_dev *dev)
7167 {
7168 	const struct mlx5_flow_driver_ops *fops;
7169 	struct rte_flow_attr attr = { .transfer = 0 };
7170 
7171 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7172 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7173 		return fops->counter_alloc(dev);
7174 	}
7175 	DRV_LOG(ERR,
7176 		"port %u counter allocate is not supported.",
7177 		 dev->data->port_id);
7178 	return 0;
7179 }
7180 
7181 /**
7182  * Free a counter.
7183  *
7184  * @param[in] dev
7185  *   Pointer to Ethernet device structure.
7186  * @param[in] cnt
7187  *   Index to counter to be free.
7188  */
7189 void
7190 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
7191 {
7192 	const struct mlx5_flow_driver_ops *fops;
7193 	struct rte_flow_attr attr = { .transfer = 0 };
7194 
7195 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7196 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7197 		fops->counter_free(dev, cnt);
7198 		return;
7199 	}
7200 	DRV_LOG(ERR,
7201 		"port %u counter free is not supported.",
7202 		 dev->data->port_id);
7203 }
7204 
7205 /**
7206  * Query counter statistics.
7207  *
7208  * @param[in] dev
7209  *   Pointer to Ethernet device structure.
7210  * @param[in] cnt
7211  *   Index to counter to query.
7212  * @param[in] clear
7213  *   Set to clear counter statistics.
7214  * @param[out] pkts
7215  *   The counter hits packets number to save.
7216  * @param[out] bytes
7217  *   The counter hits bytes number to save.
7218  *
7219  * @return
7220  *   0 on success, a negative errno value otherwise.
7221  */
7222 int
7223 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
7224 		   bool clear, uint64_t *pkts, uint64_t *bytes)
7225 {
7226 	const struct mlx5_flow_driver_ops *fops;
7227 	struct rte_flow_attr attr = { .transfer = 0 };
7228 
7229 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7230 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7231 		return fops->counter_query(dev, cnt, clear, pkts, bytes);
7232 	}
7233 	DRV_LOG(ERR,
7234 		"port %u counter query is not supported.",
7235 		 dev->data->port_id);
7236 	return -ENOTSUP;
7237 }
7238 
7239 /**
7240  * Allocate a new memory for the counter values wrapped by all the needed
7241  * management.
7242  *
7243  * @param[in] sh
7244  *   Pointer to mlx5_dev_ctx_shared object.
7245  *
7246  * @return
7247  *   0 on success, a negative errno value otherwise.
7248  */
7249 static int
7250 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
7251 {
7252 	struct mlx5_devx_mkey_attr mkey_attr;
7253 	struct mlx5_counter_stats_mem_mng *mem_mng;
7254 	volatile struct flow_counter_stats *raw_data;
7255 	int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
7256 	int size = (sizeof(struct flow_counter_stats) *
7257 			MLX5_COUNTERS_PER_POOL +
7258 			sizeof(struct mlx5_counter_stats_raw)) * raws_n +
7259 			sizeof(struct mlx5_counter_stats_mem_mng);
7260 	size_t pgsize = rte_mem_page_size();
7261 	uint8_t *mem;
7262 	int i;
7263 
7264 	if (pgsize == (size_t)-1) {
7265 		DRV_LOG(ERR, "Failed to get mem page size");
7266 		rte_errno = ENOMEM;
7267 		return -ENOMEM;
7268 	}
7269 	mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
7270 	if (!mem) {
7271 		rte_errno = ENOMEM;
7272 		return -ENOMEM;
7273 	}
7274 	mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
7275 	size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
7276 	mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
7277 						 IBV_ACCESS_LOCAL_WRITE);
7278 	if (!mem_mng->umem) {
7279 		rte_errno = errno;
7280 		mlx5_free(mem);
7281 		return -rte_errno;
7282 	}
7283 	mkey_attr.addr = (uintptr_t)mem;
7284 	mkey_attr.size = size;
7285 	mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
7286 	mkey_attr.pd = sh->pdn;
7287 	mkey_attr.log_entity_size = 0;
7288 	mkey_attr.pg_access = 0;
7289 	mkey_attr.klm_array = NULL;
7290 	mkey_attr.klm_num = 0;
7291 	mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
7292 	mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
7293 	if (!mem_mng->dm) {
7294 		mlx5_glue->devx_umem_dereg(mem_mng->umem);
7295 		rte_errno = errno;
7296 		mlx5_free(mem);
7297 		return -rte_errno;
7298 	}
7299 	mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
7300 	raw_data = (volatile struct flow_counter_stats *)mem;
7301 	for (i = 0; i < raws_n; ++i) {
7302 		mem_mng->raws[i].mem_mng = mem_mng;
7303 		mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
7304 	}
7305 	for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
7306 		LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
7307 				 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
7308 				 next);
7309 	LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
7310 	sh->cmng.mem_mng = mem_mng;
7311 	return 0;
7312 }
7313 
7314 /**
7315  * Set the statistic memory to the new counter pool.
7316  *
7317  * @param[in] sh
7318  *   Pointer to mlx5_dev_ctx_shared object.
7319  * @param[in] pool
7320  *   Pointer to the pool to set the statistic memory.
7321  *
7322  * @return
7323  *   0 on success, a negative errno value otherwise.
7324  */
7325 static int
7326 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
7327 			       struct mlx5_flow_counter_pool *pool)
7328 {
7329 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7330 	/* Resize statistic memory once used out. */
7331 	if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
7332 	    mlx5_flow_create_counter_stat_mem_mng(sh)) {
7333 		DRV_LOG(ERR, "Cannot resize counter stat mem.");
7334 		return -1;
7335 	}
7336 	rte_spinlock_lock(&pool->sl);
7337 	pool->raw = cmng->mem_mng->raws + pool->index %
7338 		    MLX5_CNT_CONTAINER_RESIZE;
7339 	rte_spinlock_unlock(&pool->sl);
7340 	pool->raw_hw = NULL;
7341 	return 0;
7342 }
7343 
7344 #define MLX5_POOL_QUERY_FREQ_US 1000000
7345 
7346 /**
7347  * Set the periodic procedure for triggering asynchronous batch queries for all
7348  * the counter pools.
7349  *
7350  * @param[in] sh
7351  *   Pointer to mlx5_dev_ctx_shared object.
7352  */
7353 void
7354 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
7355 {
7356 	uint32_t pools_n, us;
7357 
7358 	pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
7359 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
7360 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
7361 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
7362 		sh->cmng.query_thread_on = 0;
7363 		DRV_LOG(ERR, "Cannot reinitialize query alarm");
7364 	} else {
7365 		sh->cmng.query_thread_on = 1;
7366 	}
7367 }
7368 
7369 /**
7370  * The periodic procedure for triggering asynchronous batch queries for all the
7371  * counter pools. This function is probably called by the host thread.
7372  *
7373  * @param[in] arg
7374  *   The parameter for the alarm process.
7375  */
7376 void
7377 mlx5_flow_query_alarm(void *arg)
7378 {
7379 	struct mlx5_dev_ctx_shared *sh = arg;
7380 	int ret;
7381 	uint16_t pool_index = sh->cmng.pool_index;
7382 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7383 	struct mlx5_flow_counter_pool *pool;
7384 	uint16_t n_valid;
7385 
7386 	if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
7387 		goto set_alarm;
7388 	rte_spinlock_lock(&cmng->pool_update_sl);
7389 	pool = cmng->pools[pool_index];
7390 	n_valid = cmng->n_valid;
7391 	rte_spinlock_unlock(&cmng->pool_update_sl);
7392 	/* Set the statistic memory to the new created pool. */
7393 	if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
7394 		goto set_alarm;
7395 	if (pool->raw_hw)
7396 		/* There is a pool query in progress. */
7397 		goto set_alarm;
7398 	pool->raw_hw =
7399 		LIST_FIRST(&sh->cmng.free_stat_raws);
7400 	if (!pool->raw_hw)
7401 		/* No free counter statistics raw memory. */
7402 		goto set_alarm;
7403 	/*
7404 	 * Identify the counters released between query trigger and query
7405 	 * handle more efficiently. The counter released in this gap period
7406 	 * should wait for a new round of query as the new arrived packets
7407 	 * will not be taken into account.
7408 	 */
7409 	pool->query_gen++;
7410 	ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
7411 					       MLX5_COUNTERS_PER_POOL,
7412 					       NULL, NULL,
7413 					       pool->raw_hw->mem_mng->dm->id,
7414 					       (void *)(uintptr_t)
7415 					       pool->raw_hw->data,
7416 					       sh->devx_comp,
7417 					       (uint64_t)(uintptr_t)pool);
7418 	if (ret) {
7419 		DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
7420 			" %d", pool->min_dcs->id);
7421 		pool->raw_hw = NULL;
7422 		goto set_alarm;
7423 	}
7424 	LIST_REMOVE(pool->raw_hw, next);
7425 	sh->cmng.pending_queries++;
7426 	pool_index++;
7427 	if (pool_index >= n_valid)
7428 		pool_index = 0;
7429 set_alarm:
7430 	sh->cmng.pool_index = pool_index;
7431 	mlx5_set_query_alarm(sh);
7432 }
7433 
7434 /**
7435  * Check and callback event for new aged flow in the counter pool
7436  *
7437  * @param[in] sh
7438  *   Pointer to mlx5_dev_ctx_shared object.
7439  * @param[in] pool
7440  *   Pointer to Current counter pool.
7441  */
7442 static void
7443 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
7444 		   struct mlx5_flow_counter_pool *pool)
7445 {
7446 	struct mlx5_priv *priv;
7447 	struct mlx5_flow_counter *cnt;
7448 	struct mlx5_age_info *age_info;
7449 	struct mlx5_age_param *age_param;
7450 	struct mlx5_counter_stats_raw *cur = pool->raw_hw;
7451 	struct mlx5_counter_stats_raw *prev = pool->raw;
7452 	const uint64_t curr_time = MLX5_CURR_TIME_SEC;
7453 	const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
7454 	uint16_t expected = AGE_CANDIDATE;
7455 	uint32_t i;
7456 
7457 	pool->time_of_last_age_check = curr_time;
7458 	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
7459 		cnt = MLX5_POOL_GET_CNT(pool, i);
7460 		age_param = MLX5_CNT_TO_AGE(cnt);
7461 		if (__atomic_load_n(&age_param->state,
7462 				    __ATOMIC_RELAXED) != AGE_CANDIDATE)
7463 			continue;
7464 		if (cur->data[i].hits != prev->data[i].hits) {
7465 			__atomic_store_n(&age_param->sec_since_last_hit, 0,
7466 					 __ATOMIC_RELAXED);
7467 			continue;
7468 		}
7469 		if (__atomic_add_fetch(&age_param->sec_since_last_hit,
7470 				       time_delta,
7471 				       __ATOMIC_RELAXED) <= age_param->timeout)
7472 			continue;
7473 		/**
7474 		 * Hold the lock first, or if between the
7475 		 * state AGE_TMOUT and tailq operation the
7476 		 * release happened, the release procedure
7477 		 * may delete a non-existent tailq node.
7478 		 */
7479 		priv = rte_eth_devices[age_param->port_id].data->dev_private;
7480 		age_info = GET_PORT_AGE_INFO(priv);
7481 		rte_spinlock_lock(&age_info->aged_sl);
7482 		if (__atomic_compare_exchange_n(&age_param->state, &expected,
7483 						AGE_TMOUT, false,
7484 						__ATOMIC_RELAXED,
7485 						__ATOMIC_RELAXED)) {
7486 			TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
7487 			MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
7488 		}
7489 		rte_spinlock_unlock(&age_info->aged_sl);
7490 	}
7491 	for (i = 0; i < sh->max_port; i++) {
7492 		age_info = &sh->port[i].age_info;
7493 		if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
7494 			continue;
7495 		if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
7496 			rte_eth_dev_callback_process
7497 				(&rte_eth_devices[sh->port[i].devx_ih_port_id],
7498 				RTE_ETH_EVENT_FLOW_AGED, NULL);
7499 		age_info->flags = 0;
7500 	}
7501 }
7502 
7503 /**
7504  * Handler for the HW respond about ready values from an asynchronous batch
7505  * query. This function is probably called by the host thread.
7506  *
7507  * @param[in] sh
7508  *   The pointer to the shared device context.
7509  * @param[in] async_id
7510  *   The Devx async ID.
7511  * @param[in] status
7512  *   The status of the completion.
7513  */
7514 void
7515 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
7516 				  uint64_t async_id, int status)
7517 {
7518 	struct mlx5_flow_counter_pool *pool =
7519 		(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
7520 	struct mlx5_counter_stats_raw *raw_to_free;
7521 	uint8_t query_gen = pool->query_gen ^ 1;
7522 	struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7523 	enum mlx5_counter_type cnt_type =
7524 		pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
7525 				MLX5_COUNTER_TYPE_ORIGIN;
7526 
7527 	if (unlikely(status)) {
7528 		raw_to_free = pool->raw_hw;
7529 	} else {
7530 		raw_to_free = pool->raw;
7531 		if (pool->is_aged)
7532 			mlx5_flow_aging_check(sh, pool);
7533 		rte_spinlock_lock(&pool->sl);
7534 		pool->raw = pool->raw_hw;
7535 		rte_spinlock_unlock(&pool->sl);
7536 		/* Be sure the new raw counters data is updated in memory. */
7537 		rte_io_wmb();
7538 		if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
7539 			rte_spinlock_lock(&cmng->csl[cnt_type]);
7540 			TAILQ_CONCAT(&cmng->counters[cnt_type],
7541 				     &pool->counters[query_gen], next);
7542 			rte_spinlock_unlock(&cmng->csl[cnt_type]);
7543 		}
7544 	}
7545 	LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
7546 	pool->raw_hw = NULL;
7547 	sh->cmng.pending_queries--;
7548 }
7549 
7550 static const struct mlx5_flow_tbl_data_entry  *
7551 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
7552 {
7553 	struct mlx5_priv *priv = dev->data->dev_private;
7554 	struct mlx5_dev_ctx_shared *sh = priv->sh;
7555 	struct mlx5_hlist_entry *he;
7556 	union tunnel_offload_mark mbits = { .val = mark };
7557 	union mlx5_flow_tbl_key table_key = {
7558 		{
7559 			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
7560 			.reserved = 0,
7561 			.domain = !!mbits.transfer,
7562 			.direction = 0,
7563 		}
7564 	};
7565 	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
7566 	return he ?
7567 	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
7568 }
7569 
7570 static uint32_t
7571 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
7572 				const struct mlx5_flow_tunnel *tunnel,
7573 				uint32_t group, uint32_t *table,
7574 				struct rte_flow_error *error)
7575 {
7576 	struct mlx5_priv *priv = dev->data->dev_private;
7577 	struct mlx5_hlist_entry *he;
7578 	struct tunnel_tbl_entry *tte;
7579 	union tunnel_tbl_key key = {
7580 		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
7581 		.group = group
7582 	};
7583 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7584 	struct mlx5_hlist *group_hash;
7585 
7586 	group_hash = tunnel ? tunnel->groups : thub->groups;
7587 	he = mlx5_hlist_lookup(group_hash, key.val);
7588 	if (!he) {
7589 		tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
7590 				  sizeof(*tte), 0,
7591 				  SOCKET_ID_ANY);
7592 		if (!tte)
7593 			goto err;
7594 		tte->hash.key = key.val;
7595 		mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7596 				  &tte->flow_table);
7597 		if (tte->flow_table >= MLX5_MAX_TABLES) {
7598 			DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
7599 				tte->flow_table);
7600 			mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7601 					tte->flow_table);
7602 			goto err;
7603 		} else if (!tte->flow_table) {
7604 			goto err;
7605 		}
7606 		tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
7607 		mlx5_hlist_insert(group_hash, &tte->hash);
7608 	} else {
7609 		tte = container_of(he, typeof(*tte), hash);
7610 	}
7611 	*table = tte->flow_table;
7612 	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
7613 		dev->data->port_id, key.tunnel_id, group, *table);
7614 	return 0;
7615 
7616 err:
7617 	if (tte)
7618 		mlx5_free(tte);
7619 	return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7620 				  NULL, "tunnel group index not supported");
7621 }
7622 
7623 static int
7624 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
7625 		    struct flow_grp_info grp_info, struct rte_flow_error *error)
7626 {
7627 	if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
7628 		if (group == UINT32_MAX)
7629 			return rte_flow_error_set
7630 						(error, EINVAL,
7631 						 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7632 						 NULL,
7633 						 "group index not supported");
7634 		*table = group + 1;
7635 	} else {
7636 		*table = group;
7637 	}
7638 	DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
7639 	return 0;
7640 }
7641 
7642 /**
7643  * Translate the rte_flow group index to HW table value.
7644  *
7645  * If tunnel offload is disabled, all group ids converted to flow table
7646  * id using the standard method.
7647  * If tunnel offload is enabled, group id can be converted using the
7648  * standard or tunnel conversion method. Group conversion method
7649  * selection depends on flags in `grp_info` parameter:
7650  * - Internal (grp_info.external == 0) groups conversion uses the
7651  *   standard method.
7652  * - Group ids in JUMP action converted with the tunnel conversion.
7653  * - Group id in rule attribute conversion depends on a rule type and
7654  *   group id value:
7655  *   ** non zero group attributes converted with the tunnel method
7656  *   ** zero group attribute in non-tunnel rule is converted using the
7657  *      standard method - there's only one root table
7658  *   ** zero group attribute in steer tunnel rule is converted with the
7659  *      standard method - single root table
7660  *   ** zero group attribute in match tunnel rule is a special OvS
7661  *      case: that value is used for portability reasons. That group
7662  *      id is converted with the tunnel conversion method.
7663  *
7664  * @param[in] dev
7665  *   Port device
7666  * @param[in] tunnel
7667  *   PMD tunnel offload object
7668  * @param[in] group
7669  *   rte_flow group index value.
7670  * @param[out] table
7671  *   HW table value.
7672  * @param[in] grp_info
7673  *   flags used for conversion
7674  * @param[out] error
7675  *   Pointer to error structure.
7676  *
7677  * @return
7678  *   0 on success, a negative errno value otherwise and rte_errno is set.
7679  */
7680 int
7681 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
7682 			 const struct mlx5_flow_tunnel *tunnel,
7683 			 uint32_t group, uint32_t *table,
7684 			 struct flow_grp_info grp_info,
7685 			 struct rte_flow_error *error)
7686 {
7687 	int ret;
7688 	bool standard_translation;
7689 
7690 	if (grp_info.external && group < MLX5_MAX_TABLES_EXTERNAL)
7691 		group *= MLX5_FLOW_TABLE_FACTOR;
7692 	if (is_tunnel_offload_active(dev)) {
7693 		standard_translation = !grp_info.external ||
7694 					grp_info.std_tbl_fix;
7695 	} else {
7696 		standard_translation = true;
7697 	}
7698 	DRV_LOG(DEBUG,
7699 		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
7700 		dev->data->port_id, group, grp_info.transfer,
7701 		grp_info.external, grp_info.fdb_def_rule,
7702 		standard_translation ? "STANDARD" : "TUNNEL");
7703 	if (standard_translation)
7704 		ret = flow_group_to_table(dev->data->port_id, group, table,
7705 					  grp_info, error);
7706 	else
7707 		ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
7708 						      table, error);
7709 
7710 	return ret;
7711 }
7712 
7713 /**
7714  * Discover availability of metadata reg_c's.
7715  *
7716  * Iteratively use test flows to check availability.
7717  *
7718  * @param[in] dev
7719  *   Pointer to the Ethernet device structure.
7720  *
7721  * @return
7722  *   0 on success, a negative errno value otherwise and rte_errno is set.
7723  */
7724 int
7725 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
7726 {
7727 	struct mlx5_priv *priv = dev->data->dev_private;
7728 	struct mlx5_dev_config *config = &priv->config;
7729 	enum modify_reg idx;
7730 	int n = 0;
7731 
7732 	/* reg_c[0] and reg_c[1] are reserved. */
7733 	config->flow_mreg_c[n++] = REG_C_0;
7734 	config->flow_mreg_c[n++] = REG_C_1;
7735 	/* Discover availability of other reg_c's. */
7736 	for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
7737 		struct rte_flow_attr attr = {
7738 			.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
7739 			.priority = MLX5_FLOW_PRIO_RSVD,
7740 			.ingress = 1,
7741 		};
7742 		struct rte_flow_item items[] = {
7743 			[0] = {
7744 				.type = RTE_FLOW_ITEM_TYPE_END,
7745 			},
7746 		};
7747 		struct rte_flow_action actions[] = {
7748 			[0] = {
7749 				.type = (enum rte_flow_action_type)
7750 					MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
7751 				.conf = &(struct mlx5_flow_action_copy_mreg){
7752 					.src = REG_C_1,
7753 					.dst = idx,
7754 				},
7755 			},
7756 			[1] = {
7757 				.type = RTE_FLOW_ACTION_TYPE_JUMP,
7758 				.conf = &(struct rte_flow_action_jump){
7759 					.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
7760 				},
7761 			},
7762 			[2] = {
7763 				.type = RTE_FLOW_ACTION_TYPE_END,
7764 			},
7765 		};
7766 		uint32_t flow_idx;
7767 		struct rte_flow *flow;
7768 		struct rte_flow_error error;
7769 
7770 		if (!config->dv_flow_en)
7771 			break;
7772 		/* Create internal flow, validation skips copy action. */
7773 		flow_idx = flow_list_create(dev, NULL, &attr, items,
7774 					    actions, false, &error);
7775 		flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
7776 				      flow_idx);
7777 		if (!flow)
7778 			continue;
7779 		if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
7780 			config->flow_mreg_c[n++] = idx;
7781 		flow_list_destroy(dev, NULL, flow_idx);
7782 	}
7783 	for (; n < MLX5_MREG_C_NUM; ++n)
7784 		config->flow_mreg_c[n] = REG_NON;
7785 	return 0;
7786 }
7787 
7788 /**
7789  * Dump flow raw hw data to file
7790  *
7791  * @param[in] dev
7792  *    The pointer to Ethernet device.
7793  * @param[in] file
7794  *   A pointer to a file for output.
7795  * @param[out] error
7796  *   Perform verbose error reporting if not NULL. PMDs initialize this
7797  *   structure in case of error only.
7798  * @return
7799  *   0 on success, a nagative value otherwise.
7800  */
7801 int
7802 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
7803 		   FILE *file,
7804 		   struct rte_flow_error *error __rte_unused)
7805 {
7806 	struct mlx5_priv *priv = dev->data->dev_private;
7807 	struct mlx5_dev_ctx_shared *sh = priv->sh;
7808 
7809 	if (!priv->config.dv_flow_en) {
7810 		if (fputs("device dv flow disabled\n", file) <= 0)
7811 			return -errno;
7812 		return -ENOTSUP;
7813 	}
7814 	return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
7815 				       sh->tx_domain, file);
7816 }
7817 
7818 /**
7819  * Get aged-out flows.
7820  *
7821  * @param[in] dev
7822  *   Pointer to the Ethernet device structure.
7823  * @param[in] context
7824  *   The address of an array of pointers to the aged-out flows contexts.
7825  * @param[in] nb_countexts
7826  *   The length of context array pointers.
7827  * @param[out] error
7828  *   Perform verbose error reporting if not NULL. Initialized in case of
7829  *   error only.
7830  *
7831  * @return
7832  *   how many contexts get in success, otherwise negative errno value.
7833  *   if nb_contexts is 0, return the amount of all aged contexts.
7834  *   if nb_contexts is not 0 , return the amount of aged flows reported
7835  *   in the context array.
7836  */
7837 int
7838 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
7839 			uint32_t nb_contexts, struct rte_flow_error *error)
7840 {
7841 	const struct mlx5_flow_driver_ops *fops;
7842 	struct rte_flow_attr attr = { .transfer = 0 };
7843 
7844 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7845 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7846 		return fops->get_aged_flows(dev, contexts, nb_contexts,
7847 						    error);
7848 	}
7849 	DRV_LOG(ERR,
7850 		"port %u get aged flows is not supported.",
7851 		 dev->data->port_id);
7852 	return -ENOTSUP;
7853 }
7854 
7855 /* Wrapper for driver action_validate op callback */
7856 static int
7857 flow_drv_action_validate(struct rte_eth_dev *dev,
7858 			 const struct rte_flow_shared_action_conf *conf,
7859 			 const struct rte_flow_action *action,
7860 			 const struct mlx5_flow_driver_ops *fops,
7861 			 struct rte_flow_error *error)
7862 {
7863 	static const char err_msg[] = "shared action validation unsupported";
7864 
7865 	if (!fops->action_validate) {
7866 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7867 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7868 				   NULL, err_msg);
7869 		return -rte_errno;
7870 	}
7871 	return fops->action_validate(dev, conf, action, error);
7872 }
7873 
7874 /**
7875  * Destroys the shared action by handle.
7876  *
7877  * @param dev
7878  *   Pointer to Ethernet device structure.
7879  * @param[in] action
7880  *   Handle for the shared action to be destroyed.
7881  * @param[out] error
7882  *   Perform verbose error reporting if not NULL. PMDs initialize this
7883  *   structure in case of error only.
7884  *
7885  * @return
7886  *   0 on success, a negative errno value otherwise and rte_errno is set.
7887  *
7888  * @note: wrapper for driver action_create op callback.
7889  */
7890 static int
7891 mlx5_shared_action_destroy(struct rte_eth_dev *dev,
7892 			   struct rte_flow_shared_action *action,
7893 			   struct rte_flow_error *error)
7894 {
7895 	static const char err_msg[] = "shared action destruction unsupported";
7896 	struct rte_flow_attr attr = { .transfer = 0 };
7897 	const struct mlx5_flow_driver_ops *fops =
7898 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7899 
7900 	if (!fops->action_destroy) {
7901 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7902 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7903 				   NULL, err_msg);
7904 		return -rte_errno;
7905 	}
7906 	return fops->action_destroy(dev, action, error);
7907 }
7908 
7909 /* Wrapper for driver action_destroy op callback */
7910 static int
7911 flow_drv_action_update(struct rte_eth_dev *dev,
7912 		       struct rte_flow_shared_action *action,
7913 		       const void *action_conf,
7914 		       const struct mlx5_flow_driver_ops *fops,
7915 		       struct rte_flow_error *error)
7916 {
7917 	static const char err_msg[] = "shared action update unsupported";
7918 
7919 	if (!fops->action_update) {
7920 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7921 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7922 				   NULL, err_msg);
7923 		return -rte_errno;
7924 	}
7925 	return fops->action_update(dev, action, action_conf, error);
7926 }
7927 
7928 /**
7929  * Create shared action for reuse in multiple flow rules.
7930  *
7931  * @param dev
7932  *   Pointer to Ethernet device structure.
7933  * @param[in] action
7934  *   Action configuration for shared action creation.
7935  * @param[out] error
7936  *   Perform verbose error reporting if not NULL. PMDs initialize this
7937  *   structure in case of error only.
7938  * @return
7939  *   A valid handle in case of success, NULL otherwise and rte_errno is set.
7940  */
7941 static struct rte_flow_shared_action *
7942 mlx5_shared_action_create(struct rte_eth_dev *dev,
7943 			  const struct rte_flow_shared_action_conf *conf,
7944 			  const struct rte_flow_action *action,
7945 			  struct rte_flow_error *error)
7946 {
7947 	static const char err_msg[] = "shared action creation unsupported";
7948 	struct rte_flow_attr attr = { .transfer = 0 };
7949 	const struct mlx5_flow_driver_ops *fops =
7950 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7951 
7952 	if (flow_drv_action_validate(dev, conf, action, fops, error))
7953 		return NULL;
7954 	if (!fops->action_create) {
7955 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7956 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7957 				   NULL, err_msg);
7958 		return NULL;
7959 	}
7960 	return fops->action_create(dev, conf, action, error);
7961 }
7962 
7963 /**
7964  * Updates inplace the shared action configuration pointed by *action* handle
7965  * with the configuration provided as *action* argument.
7966  * The update of the shared action configuration effects all flow rules reusing
7967  * the action via handle.
7968  *
7969  * @param dev
7970  *   Pointer to Ethernet device structure.
7971  * @param[in] shared_action
7972  *   Handle for the shared action to be updated.
7973  * @param[in] action
7974  *   Action specification used to modify the action pointed by handle.
7975  *   *action* should be of same type with the action pointed by the *action*
7976  *   handle argument, otherwise considered as invalid.
7977  * @param[out] error
7978  *   Perform verbose error reporting if not NULL. PMDs initialize this
7979  *   structure in case of error only.
7980  *
7981  * @return
7982  *   0 on success, a negative errno value otherwise and rte_errno is set.
7983  */
7984 static int
7985 mlx5_shared_action_update(struct rte_eth_dev *dev,
7986 		struct rte_flow_shared_action *shared_action,
7987 		const struct rte_flow_action *action,
7988 		struct rte_flow_error *error)
7989 {
7990 	struct rte_flow_attr attr = { .transfer = 0 };
7991 	const struct mlx5_flow_driver_ops *fops =
7992 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7993 	int ret;
7994 
7995 	switch (shared_action->type) {
7996 	case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
7997 		if (action->type != RTE_FLOW_ACTION_TYPE_RSS) {
7998 			return rte_flow_error_set(error, EINVAL,
7999 						  RTE_FLOW_ERROR_TYPE_ACTION,
8000 						  NULL,
8001 						  "update action type invalid");
8002 		}
8003 		ret = flow_drv_action_validate(dev, NULL, action, fops, error);
8004 		if (ret)
8005 			return ret;
8006 		return flow_drv_action_update(dev, shared_action, action->conf,
8007 					      fops, error);
8008 	default:
8009 		return rte_flow_error_set(error, ENOTSUP,
8010 					  RTE_FLOW_ERROR_TYPE_ACTION,
8011 					  NULL,
8012 					  "action type not supported");
8013 	}
8014 }
8015 
8016 /**
8017  * Query the shared action by handle.
8018  *
8019  * This function allows retrieving action-specific data such as counters.
8020  * Data is gathered by special action which may be present/referenced in
8021  * more than one flow rule definition.
8022  *
8023  * \see RTE_FLOW_ACTION_TYPE_COUNT
8024  *
8025  * @param dev
8026  *   Pointer to Ethernet device structure.
8027  * @param[in] action
8028  *   Handle for the shared action to query.
8029  * @param[in, out] data
8030  *   Pointer to storage for the associated query data type.
8031  * @param[out] error
8032  *   Perform verbose error reporting if not NULL. PMDs initialize this
8033  *   structure in case of error only.
8034  *
8035  * @return
8036  *   0 on success, a negative errno value otherwise and rte_errno is set.
8037  */
8038 static int
8039 mlx5_shared_action_query(struct rte_eth_dev *dev,
8040 			 const struct rte_flow_shared_action *action,
8041 			 void *data,
8042 			 struct rte_flow_error *error)
8043 {
8044 	(void)dev;
8045 	switch (action->type) {
8046 	case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
8047 		__atomic_load(&action->refcnt, (uint32_t *)data,
8048 			      __ATOMIC_RELAXED);
8049 		return 0;
8050 	default:
8051 		return rte_flow_error_set(error, ENOTSUP,
8052 					  RTE_FLOW_ERROR_TYPE_ACTION,
8053 					  NULL,
8054 					  "action type not supported");
8055 	}
8056 }
8057 
8058 /**
8059  * Destroy all shared actions.
8060  *
8061  * @param dev
8062  *   Pointer to Ethernet device.
8063  *
8064  * @return
8065  *   0 on success, a negative errno value otherwise and rte_errno is set.
8066  */
8067 int
8068 mlx5_shared_action_flush(struct rte_eth_dev *dev)
8069 {
8070 	struct rte_flow_error error;
8071 	struct mlx5_priv *priv = dev->data->dev_private;
8072 	struct rte_flow_shared_action *action;
8073 	int ret = 0;
8074 
8075 	while (!LIST_EMPTY(&priv->shared_actions)) {
8076 		action = LIST_FIRST(&priv->shared_actions);
8077 		ret = mlx5_shared_action_destroy(dev, action, &error);
8078 	}
8079 	return ret;
8080 }
8081 
8082 static void
8083 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
8084 		      struct mlx5_flow_tunnel *tunnel)
8085 {
8086 	struct mlx5_priv *priv = dev->data->dev_private;
8087 
8088 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
8089 		dev->data->port_id, tunnel->tunnel_id);
8090 	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
8091 	LIST_REMOVE(tunnel, chain);
8092 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
8093 			tunnel->tunnel_id);
8094 	mlx5_hlist_destroy(tunnel->groups, NULL, NULL);
8095 	mlx5_free(tunnel);
8096 }
8097 
8098 static struct mlx5_flow_tunnel *
8099 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
8100 {
8101 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
8102 	struct mlx5_flow_tunnel *tun;
8103 
8104 	LIST_FOREACH(tun, &thub->tunnels, chain) {
8105 		if (tun->tunnel_id == id)
8106 			break;
8107 	}
8108 
8109 	return tun;
8110 }
8111 
8112 static struct mlx5_flow_tunnel *
8113 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
8114 			  const struct rte_flow_tunnel *app_tunnel)
8115 {
8116 	struct mlx5_priv *priv = dev->data->dev_private;
8117 	struct mlx5_flow_tunnel *tunnel;
8118 	uint32_t id;
8119 
8120 	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
8121 			  &id);
8122 	if (id >= MLX5_MAX_TUNNELS) {
8123 		mlx5_ipool_free(priv->sh->ipool
8124 				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
8125 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
8126 		return NULL;
8127 	} else if (!id) {
8128 		return NULL;
8129 	}
8130 	/**
8131 	 * mlx5 flow tunnel is an auxlilary data structure
8132 	 * It's not part of IO. No need to allocate it from
8133 	 * huge pages pools dedicated for IO
8134 	 */
8135 	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
8136 			     0, SOCKET_ID_ANY);
8137 	if (!tunnel) {
8138 		mlx5_ipool_free(priv->sh->ipool
8139 				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
8140 		return NULL;
8141 	}
8142 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024);
8143 	if (!tunnel->groups) {
8144 		mlx5_ipool_free(priv->sh->ipool
8145 				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
8146 		mlx5_free(tunnel);
8147 		return NULL;
8148 	}
8149 	/* initiate new PMD tunnel */
8150 	memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
8151 	tunnel->tunnel_id = id;
8152 	tunnel->action.type = (typeof(tunnel->action.type))
8153 			      MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
8154 	tunnel->action.conf = tunnel;
8155 	tunnel->item.type = (typeof(tunnel->item.type))
8156 			    MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
8157 	tunnel->item.spec = tunnel;
8158 	tunnel->item.last = NULL;
8159 	tunnel->item.mask = NULL;
8160 
8161 	DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
8162 		dev->data->port_id, tunnel->tunnel_id);
8163 
8164 	return tunnel;
8165 }
8166 
8167 static int
8168 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
8169 		     const struct rte_flow_tunnel *app_tunnel,
8170 		     struct mlx5_flow_tunnel **tunnel)
8171 {
8172 	int ret;
8173 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
8174 	struct mlx5_flow_tunnel *tun;
8175 
8176 	LIST_FOREACH(tun, &thub->tunnels, chain) {
8177 		if (!memcmp(app_tunnel, &tun->app_tunnel,
8178 			    sizeof(*app_tunnel))) {
8179 			*tunnel = tun;
8180 			ret = 0;
8181 			break;
8182 		}
8183 	}
8184 	if (!tun) {
8185 		tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
8186 		if (tun) {
8187 			LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
8188 			*tunnel = tun;
8189 		} else {
8190 			ret = -ENOMEM;
8191 		}
8192 	}
8193 	if (tun)
8194 		__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
8195 
8196 	return ret;
8197 }
8198 
8199 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
8200 {
8201 	struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8202 
8203 	if (!thub)
8204 		return;
8205 	if (!LIST_EMPTY(&thub->tunnels))
8206 		DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
8207 	mlx5_hlist_destroy(thub->groups, NULL, NULL);
8208 	mlx5_free(thub);
8209 }
8210 
8211 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
8212 {
8213 	int err;
8214 	struct mlx5_flow_tunnel_hub *thub;
8215 
8216 	thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
8217 			   0, SOCKET_ID_ANY);
8218 	if (!thub)
8219 		return -ENOMEM;
8220 	LIST_INIT(&thub->tunnels);
8221 	thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES);
8222 	if (!thub->groups) {
8223 		err = -rte_errno;
8224 		goto err;
8225 	}
8226 	sh->tunnel_hub = thub;
8227 
8228 	return 0;
8229 
8230 err:
8231 	if (thub->groups)
8232 		mlx5_hlist_destroy(thub->groups, NULL, NULL);
8233 	if (thub)
8234 		mlx5_free(thub);
8235 	return err;
8236 }
8237 
8238 #ifndef HAVE_MLX5DV_DR
8239 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
8240 #else
8241 #define MLX5_DOMAIN_SYNC_FLOW \
8242 	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
8243 #endif
8244 
8245 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
8246 {
8247 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
8248 	const struct mlx5_flow_driver_ops *fops;
8249 	int ret;
8250 	struct rte_flow_attr attr = { .transfer = 0 };
8251 
8252 	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
8253 	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
8254 	if (ret > 0)
8255 		ret = -ret;
8256 	return ret;
8257 }
8258