xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <sys/queue.h>
11 
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_eal_paging.h>
16 #include <rte_flow.h>
17 #include <rte_cycles.h>
18 #include <rte_flow_driver.h>
19 #include <rte_malloc.h>
20 #include <rte_ip.h>
21 
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
24 #include <mlx5_prm.h>
25 #include <mlx5_malloc.h>
26 
27 #include "mlx5_defs.h"
28 #include "mlx5.h"
29 #include "mlx5_flow.h"
30 #include "mlx5_flow_os.h"
31 #include "mlx5_rx.h"
32 #include "mlx5_tx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
35 
36 /*
37  * Shared array for quick translation between port_id and vport mask/values
38  * used for HWS rules.
39  */
40 struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
41 
42 struct tunnel_default_miss_ctx {
43 	uint16_t *queue;
44 	__extension__
45 	union {
46 		struct rte_flow_action_rss action_rss;
47 		struct rte_flow_action_queue miss_queue;
48 		struct rte_flow_action_jump miss_jump;
49 		uint8_t raw[0];
50 	};
51 };
52 
53 void
54 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
55 {
56 	struct mlx5_priv *priv = dev->data->dev_private;
57 #ifdef HAVE_MLX5_HWS_SUPPORT
58 	struct rte_flow_error error;
59 #endif
60 
61 	while (!LIST_EMPTY(&priv->indirect_list_head)) {
62 		struct mlx5_indirect_list *e =
63 			LIST_FIRST(&priv->indirect_list_head);
64 
65 		LIST_REMOVE(e, entry);
66 		switch (e->type) {
67 #ifdef HAVE_MLX5_HWS_SUPPORT
68 		case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
69 			mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);
70 		break;
71 		case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
72 			mlx5_destroy_legacy_indirect(dev, e);
73 			break;
74 		case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
75 			mlx5_reformat_action_destroy(dev,
76 				(struct rte_flow_action_list_handle *)e, &error);
77 			break;
78 #endif
79 		default:
80 			DRV_LOG(ERR, "invalid indirect list type");
81 			MLX5_ASSERT(false);
82 			break;
83 		}
84 	}
85 }
86 
87 static int
88 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
89 			     struct rte_flow *flow,
90 			     const struct rte_flow_attr *attr,
91 			     const struct rte_flow_action *app_actions,
92 			     uint32_t flow_idx,
93 			     const struct mlx5_flow_tunnel *tunnel,
94 			     struct tunnel_default_miss_ctx *ctx,
95 			     struct rte_flow_error *error);
96 static struct mlx5_flow_tunnel *
97 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
98 static void
99 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
100 static uint32_t
101 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
102 				const struct mlx5_flow_tunnel *tunnel,
103 				uint32_t group, uint32_t *table,
104 				struct rte_flow_error *error);
105 
106 /** Device flow drivers. */
107 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
108 
109 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
110 
111 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
112 	[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
113 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
114 	[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
115 #endif
116 #ifdef HAVE_MLX5_HWS_SUPPORT
117 	[MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
118 #endif
119 	[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
120 	[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
121 };
122 
123 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
124 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
125 	(const int []){ \
126 		__VA_ARGS__, 0, \
127 	}
128 
129 /** Node object of input graph for mlx5_flow_expand_rss(). */
130 struct mlx5_flow_expand_node {
131 	const int *const next;
132 	/**<
133 	 * List of next node indexes. Index 0 is interpreted as a terminator.
134 	 */
135 	const enum rte_flow_item_type type;
136 	/**< Pattern item type of current node. */
137 	uint64_t rss_types;
138 	/**<
139 	 * RSS types bit-field associated with this node
140 	 * (see RTE_ETH_RSS_* definitions).
141 	 */
142 	uint64_t node_flags;
143 	/**<
144 	 *  Bit-fields that define how the node is used in the expansion.
145 	 * (see MLX5_EXPANSION_NODE_* definitions).
146 	 */
147 };
148 
149 /** Keep same format with mlx5_flow_expand_rss to share the buffer for expansion. */
150 struct mlx5_flow_expand_sqn {
151 	uint32_t entries; /** Number of entries */
152 	struct {
153 		struct rte_flow_item *pattern; /**< Expanded pattern array. */
154 		uint32_t priority; /**< Priority offset for each expansion. */
155 	} entry[];
156 };
157 
158 /* Optional expand field. The expansion alg will not go deeper. */
159 #define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
160 
161 /* The node is not added implicitly as expansion to the flow pattern.
162  * If the node type does not match the flow pattern item type, the
163  * expansion alg will go deeper to its next items.
164  * In the current implementation, the list of next nodes indexes can
165  * have up to one node with this flag set and it has to be the last
166  * node index (before the list terminator).
167  */
168 #define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1)
169 
170 /** Object returned by mlx5_flow_expand_rss(). */
171 struct mlx5_flow_expand_rss {
172 	uint32_t entries;
173 	/**< Number of entries @p patterns and @p priorities. */
174 	struct {
175 		struct rte_flow_item *pattern; /**< Expanded pattern array. */
176 		uint32_t priority; /**< Priority offset for each expansion. */
177 	} entry[];
178 };
179 
180 static void
181 mlx5_dbg__print_pattern(const struct rte_flow_item *item);
182 
183 static const struct mlx5_flow_expand_node *
184 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
185 		unsigned int item_idx,
186 		const struct mlx5_flow_expand_node graph[],
187 		const struct mlx5_flow_expand_node *node);
188 
189 static __rte_always_inline int
190 mlx5_need_cache_flow(const struct mlx5_priv *priv,
191 		     const struct rte_flow_attr *attr)
192 {
193 	return priv->isolated && priv->sh->config.dv_flow_en == 1 &&
194 		(attr ? !attr->group : true) &&
195 		priv->mode_info.mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY &&
196 		(!priv->sh->config.dv_esw_en || !priv->sh->config.fdb_def_rule);
197 }
198 
199 static bool
200 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
201 {
202 	switch (item->type) {
203 	case RTE_FLOW_ITEM_TYPE_ETH:
204 	case RTE_FLOW_ITEM_TYPE_VLAN:
205 	case RTE_FLOW_ITEM_TYPE_IPV4:
206 	case RTE_FLOW_ITEM_TYPE_IPV6:
207 	case RTE_FLOW_ITEM_TYPE_UDP:
208 	case RTE_FLOW_ITEM_TYPE_TCP:
209 	case RTE_FLOW_ITEM_TYPE_ESP:
210 	case RTE_FLOW_ITEM_TYPE_ICMP:
211 	case RTE_FLOW_ITEM_TYPE_ICMP6:
212 	case RTE_FLOW_ITEM_TYPE_VXLAN:
213 	case RTE_FLOW_ITEM_TYPE_NVGRE:
214 	case RTE_FLOW_ITEM_TYPE_GRE:
215 	case RTE_FLOW_ITEM_TYPE_GENEVE:
216 	case RTE_FLOW_ITEM_TYPE_MPLS:
217 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
218 	case RTE_FLOW_ITEM_TYPE_GRE_KEY:
219 	case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
220 	case RTE_FLOW_ITEM_TYPE_GTP:
221 		return true;
222 	default:
223 		break;
224 	}
225 	return false;
226 }
227 
228 /**
229  * Network Service Header (NSH) and its next protocol values
230  * are described in RFC-8393.
231  */
232 static enum rte_flow_item_type
233 mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
234 {
235 	enum rte_flow_item_type type;
236 
237 	switch (proto_mask & proto_spec) {
238 	case 0:
239 		type = RTE_FLOW_ITEM_TYPE_VOID;
240 		break;
241 	case RTE_VXLAN_GPE_TYPE_IPV4:
242 		type = RTE_FLOW_ITEM_TYPE_IPV4;
243 		break;
244 	case RTE_VXLAN_GPE_TYPE_IPV6:
245 		type = RTE_VXLAN_GPE_TYPE_IPV6;
246 		break;
247 	case RTE_VXLAN_GPE_TYPE_ETH:
248 		type = RTE_FLOW_ITEM_TYPE_ETH;
249 		break;
250 	default:
251 		type = RTE_FLOW_ITEM_TYPE_END;
252 	}
253 	return type;
254 }
255 
256 static enum rte_flow_item_type
257 mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
258 {
259 	enum rte_flow_item_type type;
260 
261 	switch (proto_mask & proto_spec) {
262 	case 0:
263 		type = RTE_FLOW_ITEM_TYPE_VOID;
264 		break;
265 	case IPPROTO_UDP:
266 		type = RTE_FLOW_ITEM_TYPE_UDP;
267 		break;
268 	case IPPROTO_TCP:
269 		type = RTE_FLOW_ITEM_TYPE_TCP;
270 		break;
271 	case IPPROTO_IPIP:
272 		type = RTE_FLOW_ITEM_TYPE_IPV4;
273 		break;
274 	case IPPROTO_IPV6:
275 		type = RTE_FLOW_ITEM_TYPE_IPV6;
276 		break;
277 	case IPPROTO_ESP:
278 		type = RTE_FLOW_ITEM_TYPE_ESP;
279 		break;
280 	default:
281 		type = RTE_FLOW_ITEM_TYPE_END;
282 	}
283 	return type;
284 }
285 
286 static enum rte_flow_item_type
287 mlx5_ethertype_to_item_type(rte_be16_t type_spec,
288 			    rte_be16_t type_mask, bool is_tunnel)
289 {
290 	enum rte_flow_item_type type;
291 
292 	switch (rte_be_to_cpu_16(type_spec & type_mask)) {
293 	case 0:
294 		type = RTE_FLOW_ITEM_TYPE_VOID;
295 		break;
296 	case RTE_ETHER_TYPE_TEB:
297 		type = is_tunnel ?
298 		       RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END;
299 		break;
300 	case RTE_ETHER_TYPE_VLAN:
301 		type = !is_tunnel ?
302 		       RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END;
303 		break;
304 	case RTE_ETHER_TYPE_IPV4:
305 		type = RTE_FLOW_ITEM_TYPE_IPV4;
306 		break;
307 	case RTE_ETHER_TYPE_IPV6:
308 		type = RTE_FLOW_ITEM_TYPE_IPV6;
309 		break;
310 	default:
311 		type = RTE_FLOW_ITEM_TYPE_END;
312 	}
313 	return type;
314 }
315 
316 static enum rte_flow_item_type
317 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
318 {
319 #define MLX5_XSET_ITEM_MASK_SPEC(type, fld)                              \
320 	do {                                                             \
321 		const void *m = item->mask;                              \
322 		const void *s = item->spec;                              \
323 		mask = m ?                                               \
324 			((const struct rte_flow_item_##type *)m)->fld :  \
325 			rte_flow_item_##type##_mask.fld;                 \
326 		spec = ((const struct rte_flow_item_##type *)s)->fld;    \
327 	} while (0)
328 
329 	enum rte_flow_item_type ret;
330 	uint16_t spec, mask;
331 
332 	if (item == NULL || item->spec == NULL)
333 		return RTE_FLOW_ITEM_TYPE_VOID;
334 	switch (item->type) {
335 	case RTE_FLOW_ITEM_TYPE_ETH:
336 		MLX5_XSET_ITEM_MASK_SPEC(eth, hdr.ether_type);
337 		if (!mask)
338 			return RTE_FLOW_ITEM_TYPE_VOID;
339 		ret = mlx5_ethertype_to_item_type(spec, mask, false);
340 		break;
341 	case RTE_FLOW_ITEM_TYPE_VLAN:
342 		MLX5_XSET_ITEM_MASK_SPEC(vlan, hdr.eth_proto);
343 		if (!mask)
344 			return RTE_FLOW_ITEM_TYPE_VOID;
345 		ret = mlx5_ethertype_to_item_type(spec, mask, false);
346 		break;
347 	case RTE_FLOW_ITEM_TYPE_IPV4:
348 		MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id);
349 		if (!mask)
350 			return RTE_FLOW_ITEM_TYPE_VOID;
351 		ret = mlx5_inet_proto_to_item_type(spec, mask);
352 		break;
353 	case RTE_FLOW_ITEM_TYPE_IPV6:
354 		MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto);
355 		if (!mask)
356 			return RTE_FLOW_ITEM_TYPE_VOID;
357 		ret = mlx5_inet_proto_to_item_type(spec, mask);
358 		break;
359 	case RTE_FLOW_ITEM_TYPE_GENEVE:
360 		MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol);
361 		ret = mlx5_ethertype_to_item_type(spec, mask, true);
362 		break;
363 	case RTE_FLOW_ITEM_TYPE_GRE:
364 		MLX5_XSET_ITEM_MASK_SPEC(gre, protocol);
365 		ret = mlx5_ethertype_to_item_type(spec, mask, true);
366 		break;
367 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
368 		MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, hdr.proto);
369 		ret = mlx5_nsh_proto_to_item_type(spec, mask);
370 		break;
371 	default:
372 		ret = RTE_FLOW_ITEM_TYPE_VOID;
373 		break;
374 	}
375 	return ret;
376 #undef MLX5_XSET_ITEM_MASK_SPEC
377 }
378 
379 static const int *
380 mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
381 		const int *next_node)
382 {
383 	const struct mlx5_flow_expand_node *node = NULL;
384 	const int *next = next_node;
385 
386 	while (next && *next) {
387 		/*
388 		 * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT
389 		 * flag set, because they were not found in the flow pattern.
390 		 */
391 		node = &graph[*next];
392 		if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT))
393 			break;
394 		next = node->next;
395 	}
396 	return next;
397 }
398 
399 #define MLX5_RSS_EXP_ELT_N 32
400 
401 /**
402  * Expand RSS flows into several possible flows according to the RSS hash
403  * fields requested and the driver capabilities.
404  *
405  * @param[out] buf
406  *   Buffer to store the result expansion.
407  * @param[in] size
408  *   Buffer size in bytes. If 0, @p buf can be NULL.
409  * @param[in] pattern
410  *   User flow pattern.
411  * @param[in] types
412  *   RSS types to expand (see RTE_ETH_RSS_* definitions).
413  * @param[in] graph
414  *   Input graph to expand @p pattern according to @p types.
415  * @param[in] graph_root_index
416  *   Index of root node in @p graph, typically 0.
417  *
418  * @return
419  *   A positive value representing the size of @p buf in bytes regardless of
420  *   @p size on success, a negative errno value otherwise and rte_errno is
421  *   set, the following errors are defined:
422  *
423  *   -E2BIG: graph-depth @p graph is too deep.
424  *   -EINVAL: @p size has not enough space for expanded pattern.
425  */
426 static int
427 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
428 		     const struct rte_flow_item *pattern, uint64_t types,
429 		     const struct mlx5_flow_expand_node graph[],
430 		     int graph_root_index)
431 {
432 	const struct rte_flow_item *item;
433 	const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
434 	const int *next_node;
435 	const int *stack[MLX5_RSS_EXP_ELT_N];
436 	int stack_pos = 0;
437 	struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
438 	unsigned int i, item_idx, last_expand_item_idx = 0;
439 	size_t lsize;
440 	size_t user_pattern_size = 0;
441 	void *addr = NULL;
442 	const struct mlx5_flow_expand_node *next = NULL;
443 	struct rte_flow_item missed_item;
444 	int missed = 0;
445 	int elt = 0;
446 	const struct rte_flow_item *last_expand_item = NULL;
447 
448 	memset(&missed_item, 0, sizeof(missed_item));
449 	lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
450 		MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
451 	if (lsize > size)
452 		return -EINVAL;
453 	buf->entry[0].priority = 0;
454 	buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
455 	buf->entries = 0;
456 	addr = buf->entry[0].pattern;
457 	for (item = pattern, item_idx = 0;
458 			item->type != RTE_FLOW_ITEM_TYPE_END;
459 			item++, item_idx++) {
460 		if (!mlx5_flow_is_rss_expandable_item(item)) {
461 			user_pattern_size += sizeof(*item);
462 			continue;
463 		}
464 		last_expand_item = item;
465 		last_expand_item_idx = item_idx;
466 		i = 0;
467 		while (node->next && node->next[i]) {
468 			next = &graph[node->next[i]];
469 			if (next->type == item->type)
470 				break;
471 			if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
472 				node = next;
473 				i = 0;
474 			} else {
475 				++i;
476 			}
477 		}
478 		if (next)
479 			node = next;
480 		user_pattern_size += sizeof(*item);
481 	}
482 	user_pattern_size += sizeof(*item); /* Handle END item. */
483 	lsize += user_pattern_size;
484 	if (lsize > size)
485 		return -EINVAL;
486 	/* Copy the user pattern in the first entry of the buffer. */
487 	rte_memcpy(addr, pattern, user_pattern_size);
488 	addr = (void *)(((uintptr_t)addr) + user_pattern_size);
489 	buf->entries = 1;
490 	/* Start expanding. */
491 	memset(flow_items, 0, sizeof(flow_items));
492 	user_pattern_size -= sizeof(*item);
493 	/*
494 	 * Check if the last valid item has spec set, need complete pattern,
495 	 * and the pattern can be used for expansion.
496 	 */
497 	missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item);
498 	if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
499 		/* Item type END indicates expansion is not required. */
500 		return lsize;
501 	}
502 	if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
503 		next = NULL;
504 		missed = 1;
505 		i = 0;
506 		while (node->next && node->next[i]) {
507 			next = &graph[node->next[i]];
508 			if (next->type == missed_item.type) {
509 				flow_items[0].type = missed_item.type;
510 				flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
511 				break;
512 			}
513 			if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
514 				node = next;
515 				i = 0;
516 			} else {
517 				++i;
518 			}
519 			next = NULL;
520 		}
521 	}
522 	if (next && missed) {
523 		elt = 2; /* missed item + item end. */
524 		node = next;
525 		lsize += elt * sizeof(*item) + user_pattern_size;
526 		if (lsize > size)
527 			return -EINVAL;
528 		if (node->rss_types & types) {
529 			buf->entry[buf->entries].priority = 1;
530 			buf->entry[buf->entries].pattern = addr;
531 			buf->entries++;
532 			rte_memcpy(addr, buf->entry[0].pattern,
533 				   user_pattern_size);
534 			addr = (void *)(((uintptr_t)addr) + user_pattern_size);
535 			rte_memcpy(addr, flow_items, elt * sizeof(*item));
536 			addr = (void *)(((uintptr_t)addr) +
537 					elt * sizeof(*item));
538 		}
539 	} else if (last_expand_item != NULL) {
540 		node = mlx5_flow_expand_rss_adjust_node(pattern,
541 				last_expand_item_idx, graph, node);
542 	}
543 	memset(flow_items, 0, sizeof(flow_items));
544 	next_node = mlx5_flow_expand_rss_skip_explicit(graph,
545 			node->next);
546 	stack[stack_pos] = next_node;
547 	node = next_node ? &graph[*next_node] : NULL;
548 	while (node) {
549 		flow_items[stack_pos].type = node->type;
550 		if (node->rss_types & types) {
551 			size_t n;
552 			/*
553 			 * compute the number of items to copy from the
554 			 * expansion and copy it.
555 			 * When the stack_pos is 0, there are 1 element in it,
556 			 * plus the addition END item.
557 			 */
558 			elt = stack_pos + 2;
559 			flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
560 			lsize += elt * sizeof(*item) + user_pattern_size;
561 			if (lsize > size)
562 				return -EINVAL;
563 			n = elt * sizeof(*item);
564 			MLX5_ASSERT((buf->entries) < MLX5_RSS_EXP_ELT_N);
565 			buf->entry[buf->entries].priority =
566 				stack_pos + 1 + missed;
567 			buf->entry[buf->entries].pattern = addr;
568 			buf->entries++;
569 			rte_memcpy(addr, buf->entry[0].pattern,
570 				   user_pattern_size);
571 			addr = (void *)(((uintptr_t)addr) +
572 					user_pattern_size);
573 			rte_memcpy(addr, &missed_item,
574 				   missed * sizeof(*item));
575 			addr = (void *)(((uintptr_t)addr) +
576 				missed * sizeof(*item));
577 			rte_memcpy(addr, flow_items, n);
578 			addr = (void *)(((uintptr_t)addr) + n);
579 		}
580 		/* Go deeper. */
581 		if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) &&
582 				node->next) {
583 			next_node = mlx5_flow_expand_rss_skip_explicit(graph,
584 					node->next);
585 			if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
586 				rte_errno = E2BIG;
587 				return -rte_errno;
588 			}
589 			stack[stack_pos] = next_node;
590 		} else if (*(next_node + 1)) {
591 			/* Follow up with the next possibility. */
592 			next_node = mlx5_flow_expand_rss_skip_explicit(graph,
593 					++next_node);
594 		} else if (!stack_pos) {
595 			/*
596 			 * Completing the traverse over the different paths.
597 			 * The next_node is advanced to the terminator.
598 			 */
599 			++next_node;
600 		} else {
601 			/* Move to the next path. */
602 			while (stack_pos) {
603 				next_node = stack[--stack_pos];
604 				next_node++;
605 				if (*next_node)
606 					break;
607 			}
608 			next_node = mlx5_flow_expand_rss_skip_explicit(graph,
609 					next_node);
610 			stack[stack_pos] = next_node;
611 		}
612 		node = next_node && *next_node ? &graph[*next_node] : NULL;
613 	};
614 	return lsize;
615 }
616 
617 /**
618  * Expand SQN flows into several possible flows according to the Tx queue
619  * number
620  *
621  * @param[in] buf
622  *   Buffer to store the result expansion.
623  * @param[in] size
624  *   Buffer size in bytes. If 0, @p buf can be NULL.
625  * @param[in] pattern
626  *   User flow pattern.
627  * @param[in] sq_specs
628  *   Buffer to store sq spec.
629  *
630  * @return
631  *   0 for success and negative value for failure
632  *
633  */
634 static int
635 mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size,
636 		     const struct rte_flow_item *pattern,
637 		     struct mlx5_rte_flow_item_sq *sq_specs)
638 {
639 	const struct rte_flow_item *item;
640 	bool port_representor = false;
641 	size_t user_pattern_size = 0;
642 	struct rte_eth_dev *dev;
643 	struct mlx5_priv *priv;
644 	void *addr = NULL;
645 	uint16_t port_id;
646 	size_t lsize;
647 	int elt = 2;
648 	uint16_t i;
649 
650 	buf->entries = 0;
651 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
652 		if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
653 			const struct rte_flow_item_ethdev *pid_v = item->spec;
654 
655 			if (!pid_v)
656 				return 0;
657 			port_id = pid_v->port_id;
658 			port_representor = true;
659 		}
660 		user_pattern_size += sizeof(*item);
661 	}
662 	if (!port_representor)
663 		return 0;
664 	dev = &rte_eth_devices[port_id];
665 	priv = dev->data->dev_private;
666 	buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n];
667 	lsize = offsetof(struct mlx5_flow_expand_sqn, entry) +
668 		sizeof(buf->entry[0]) * priv->txqs_n;
669 	if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) * elt) * priv->txqs_n > size)
670 		return -EINVAL;
671 	addr = buf->entry[0].pattern;
672 	for (i = 0; i != priv->txqs_n; ++i) {
673 		struct rte_flow_item pattern_add[] = {
674 			{
675 				.type = (enum rte_flow_item_type)
676 					MLX5_RTE_FLOW_ITEM_TYPE_SQ,
677 				.spec = &sq_specs[i],
678 			},
679 			{
680 				.type = RTE_FLOW_ITEM_TYPE_END,
681 			},
682 		};
683 		struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
684 
685 		if (txq == NULL)
686 			return -EINVAL;
687 		buf->entry[i].pattern = addr;
688 		sq_specs[i].queue = mlx5_txq_get_sqn(txq);
689 		mlx5_txq_release(dev, i);
690 		rte_memcpy(addr, pattern, user_pattern_size);
691 		addr = (void *)(((uintptr_t)addr) + user_pattern_size);
692 		rte_memcpy(addr, pattern_add, sizeof(struct rte_flow_item) * elt);
693 		addr = (void *)(((uintptr_t)addr) + sizeof(struct rte_flow_item) * elt);
694 		buf->entries++;
695 	}
696 	return 0;
697 }
698 
699 enum mlx5_expansion {
700 	MLX5_EXPANSION_ROOT,
701 	MLX5_EXPANSION_ROOT_OUTER,
702 	MLX5_EXPANSION_OUTER_ETH,
703 	MLX5_EXPANSION_OUTER_VLAN,
704 	MLX5_EXPANSION_OUTER_IPV4,
705 	MLX5_EXPANSION_OUTER_IPV4_UDP,
706 	MLX5_EXPANSION_OUTER_IPV4_TCP,
707 	MLX5_EXPANSION_OUTER_IPV4_ESP,
708 	MLX5_EXPANSION_OUTER_IPV4_ICMP,
709 	MLX5_EXPANSION_OUTER_IPV6,
710 	MLX5_EXPANSION_OUTER_IPV6_UDP,
711 	MLX5_EXPANSION_OUTER_IPV6_TCP,
712 	MLX5_EXPANSION_OUTER_IPV6_ESP,
713 	MLX5_EXPANSION_OUTER_IPV6_ICMP6,
714 	MLX5_EXPANSION_VXLAN,
715 	MLX5_EXPANSION_STD_VXLAN,
716 	MLX5_EXPANSION_L3_VXLAN,
717 	MLX5_EXPANSION_VXLAN_GPE,
718 	MLX5_EXPANSION_GRE,
719 	MLX5_EXPANSION_NVGRE,
720 	MLX5_EXPANSION_GRE_KEY,
721 	MLX5_EXPANSION_MPLS,
722 	MLX5_EXPANSION_ETH,
723 	MLX5_EXPANSION_VLAN,
724 	MLX5_EXPANSION_IPV4,
725 	MLX5_EXPANSION_IPV4_UDP,
726 	MLX5_EXPANSION_IPV4_TCP,
727 	MLX5_EXPANSION_IPV4_ESP,
728 	MLX5_EXPANSION_IPV4_ICMP,
729 	MLX5_EXPANSION_IPV6,
730 	MLX5_EXPANSION_IPV6_UDP,
731 	MLX5_EXPANSION_IPV6_TCP,
732 	MLX5_EXPANSION_IPV6_ESP,
733 	MLX5_EXPANSION_IPV6_ICMP6,
734 	MLX5_EXPANSION_IPV6_FRAG_EXT,
735 	MLX5_EXPANSION_GTP,
736 	MLX5_EXPANSION_GENEVE,
737 };
738 
739 /** Supported expansion of items. */
740 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
741 	[MLX5_EXPANSION_ROOT] = {
742 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
743 						  MLX5_EXPANSION_IPV4,
744 						  MLX5_EXPANSION_IPV6),
745 		.type = RTE_FLOW_ITEM_TYPE_END,
746 	},
747 	[MLX5_EXPANSION_ROOT_OUTER] = {
748 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
749 						  MLX5_EXPANSION_OUTER_IPV4,
750 						  MLX5_EXPANSION_OUTER_IPV6),
751 		.type = RTE_FLOW_ITEM_TYPE_END,
752 	},
753 	[MLX5_EXPANSION_OUTER_ETH] = {
754 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
755 		.type = RTE_FLOW_ITEM_TYPE_ETH,
756 		.rss_types = 0,
757 	},
758 	[MLX5_EXPANSION_OUTER_VLAN] = {
759 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
760 						  MLX5_EXPANSION_OUTER_IPV6),
761 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
762 		.node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
763 	},
764 	[MLX5_EXPANSION_OUTER_IPV4] = {
765 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
766 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
767 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
768 			 MLX5_EXPANSION_OUTER_IPV4_ESP,
769 			 MLX5_EXPANSION_OUTER_IPV4_ICMP,
770 			 MLX5_EXPANSION_GRE,
771 			 MLX5_EXPANSION_NVGRE,
772 			 MLX5_EXPANSION_IPV4,
773 			 MLX5_EXPANSION_IPV6),
774 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
775 		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
776 			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
777 	},
778 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
779 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
780 						  MLX5_EXPANSION_VXLAN_GPE,
781 						  MLX5_EXPANSION_MPLS,
782 						  MLX5_EXPANSION_GENEVE,
783 						  MLX5_EXPANSION_GTP),
784 		.type = RTE_FLOW_ITEM_TYPE_UDP,
785 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
786 	},
787 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
788 		.type = RTE_FLOW_ITEM_TYPE_TCP,
789 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
790 	},
791 	[MLX5_EXPANSION_OUTER_IPV4_ESP] = {
792 		.type = RTE_FLOW_ITEM_TYPE_ESP,
793 		.rss_types = RTE_ETH_RSS_ESP,
794 	},
795 	[MLX5_EXPANSION_OUTER_IPV4_ICMP] = {
796 		.type = RTE_FLOW_ITEM_TYPE_ICMP,
797 	},
798 	[MLX5_EXPANSION_OUTER_IPV6] = {
799 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
800 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
801 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
802 			 MLX5_EXPANSION_OUTER_IPV6_ESP,
803 			 MLX5_EXPANSION_OUTER_IPV6_ICMP6,
804 			 MLX5_EXPANSION_IPV4,
805 			 MLX5_EXPANSION_IPV6,
806 			 MLX5_EXPANSION_GRE,
807 			 MLX5_EXPANSION_NVGRE),
808 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
809 		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
810 			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
811 	},
812 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
813 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
814 						  MLX5_EXPANSION_VXLAN_GPE,
815 						  MLX5_EXPANSION_MPLS,
816 						  MLX5_EXPANSION_GENEVE,
817 						  MLX5_EXPANSION_GTP),
818 		.type = RTE_FLOW_ITEM_TYPE_UDP,
819 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
820 	},
821 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
822 		.type = RTE_FLOW_ITEM_TYPE_TCP,
823 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
824 	},
825 	[MLX5_EXPANSION_OUTER_IPV6_ESP] = {
826 		.type = RTE_FLOW_ITEM_TYPE_ESP,
827 		.rss_types = RTE_ETH_RSS_ESP,
828 	},
829 	[MLX5_EXPANSION_OUTER_IPV6_ICMP6] = {
830 		.type = RTE_FLOW_ITEM_TYPE_ICMP6,
831 	},
832 	[MLX5_EXPANSION_VXLAN] = {
833 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
834 						  MLX5_EXPANSION_IPV4,
835 						  MLX5_EXPANSION_IPV6),
836 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
837 	},
838 	[MLX5_EXPANSION_STD_VXLAN] = {
839 			.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
840 					.type = RTE_FLOW_ITEM_TYPE_VXLAN,
841 	},
842 	[MLX5_EXPANSION_L3_VXLAN] = {
843 			.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
844 					MLX5_EXPANSION_IPV6),
845 					.type = RTE_FLOW_ITEM_TYPE_VXLAN,
846 	},
847 	[MLX5_EXPANSION_VXLAN_GPE] = {
848 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
849 						  MLX5_EXPANSION_IPV4,
850 						  MLX5_EXPANSION_IPV6),
851 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
852 	},
853 	[MLX5_EXPANSION_GRE] = {
854 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
855 						  MLX5_EXPANSION_IPV4,
856 						  MLX5_EXPANSION_IPV6,
857 						  MLX5_EXPANSION_GRE_KEY,
858 						  MLX5_EXPANSION_MPLS),
859 		.type = RTE_FLOW_ITEM_TYPE_GRE,
860 	},
861 	[MLX5_EXPANSION_GRE_KEY] = {
862 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
863 						  MLX5_EXPANSION_IPV6,
864 						  MLX5_EXPANSION_MPLS),
865 		.type = RTE_FLOW_ITEM_TYPE_GRE_KEY,
866 		.node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
867 	},
868 	[MLX5_EXPANSION_NVGRE] = {
869 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
870 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
871 	},
872 	[MLX5_EXPANSION_MPLS] = {
873 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
874 						  MLX5_EXPANSION_IPV6,
875 						  MLX5_EXPANSION_ETH),
876 		.type = RTE_FLOW_ITEM_TYPE_MPLS,
877 		.node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
878 	},
879 	[MLX5_EXPANSION_ETH] = {
880 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
881 		.type = RTE_FLOW_ITEM_TYPE_ETH,
882 	},
883 	[MLX5_EXPANSION_VLAN] = {
884 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
885 						  MLX5_EXPANSION_IPV6),
886 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
887 		.node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
888 	},
889 	[MLX5_EXPANSION_IPV4] = {
890 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
891 						  MLX5_EXPANSION_IPV4_TCP,
892 						  MLX5_EXPANSION_IPV4_ESP,
893 						  MLX5_EXPANSION_IPV4_ICMP),
894 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
895 		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
896 			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
897 	},
898 	[MLX5_EXPANSION_IPV4_UDP] = {
899 		.type = RTE_FLOW_ITEM_TYPE_UDP,
900 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
901 	},
902 	[MLX5_EXPANSION_IPV4_TCP] = {
903 		.type = RTE_FLOW_ITEM_TYPE_TCP,
904 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
905 	},
906 	[MLX5_EXPANSION_IPV4_ESP] = {
907 		.type = RTE_FLOW_ITEM_TYPE_ESP,
908 		.rss_types = RTE_ETH_RSS_ESP,
909 	},
910 	[MLX5_EXPANSION_IPV4_ICMP] = {
911 		.type = RTE_FLOW_ITEM_TYPE_ICMP,
912 	},
913 	[MLX5_EXPANSION_IPV6] = {
914 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
915 						  MLX5_EXPANSION_IPV6_TCP,
916 						  MLX5_EXPANSION_IPV6_ESP,
917 						  MLX5_EXPANSION_IPV6_ICMP6,
918 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
919 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
920 		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
921 			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
922 	},
923 	[MLX5_EXPANSION_IPV6_UDP] = {
924 		.type = RTE_FLOW_ITEM_TYPE_UDP,
925 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
926 	},
927 	[MLX5_EXPANSION_IPV6_TCP] = {
928 		.type = RTE_FLOW_ITEM_TYPE_TCP,
929 		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
930 	},
931 	[MLX5_EXPANSION_IPV6_ESP] = {
932 		.type = RTE_FLOW_ITEM_TYPE_ESP,
933 		.rss_types = RTE_ETH_RSS_ESP,
934 	},
935 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
936 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
937 	},
938 	[MLX5_EXPANSION_IPV6_ICMP6] = {
939 		.type = RTE_FLOW_ITEM_TYPE_ICMP6,
940 	},
941 	[MLX5_EXPANSION_GTP] = {
942 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
943 						  MLX5_EXPANSION_IPV6),
944 		.type = RTE_FLOW_ITEM_TYPE_GTP,
945 	},
946 	[MLX5_EXPANSION_GENEVE] = {
947 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
948 						  MLX5_EXPANSION_IPV4,
949 						  MLX5_EXPANSION_IPV6),
950 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
951 	},
952 };
953 
954 static struct rte_flow_action_handle *
955 mlx5_action_handle_create(struct rte_eth_dev *dev,
956 			  const struct rte_flow_indir_action_conf *conf,
957 			  const struct rte_flow_action *action,
958 			  struct rte_flow_error *error);
959 static int mlx5_action_handle_destroy
960 				(struct rte_eth_dev *dev,
961 				 struct rte_flow_action_handle *handle,
962 				 struct rte_flow_error *error);
963 static int mlx5_action_handle_update
964 				(struct rte_eth_dev *dev,
965 				 struct rte_flow_action_handle *handle,
966 				 const void *update,
967 				 struct rte_flow_error *error);
968 static int mlx5_action_handle_query
969 				(struct rte_eth_dev *dev,
970 				 const struct rte_flow_action_handle *handle,
971 				 void *data,
972 				 struct rte_flow_error *error);
973 static int
974 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
975 		    struct rte_flow_tunnel *app_tunnel,
976 		    struct rte_flow_action **actions,
977 		    uint32_t *num_of_actions,
978 		    struct rte_flow_error *error);
979 static int
980 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
981 		       struct rte_flow_tunnel *app_tunnel,
982 		       struct rte_flow_item **items,
983 		       uint32_t *num_of_items,
984 		       struct rte_flow_error *error);
985 static int
986 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
987 			      struct rte_flow_item *pmd_items,
988 			      uint32_t num_items, struct rte_flow_error *err);
989 static int
990 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
991 				struct rte_flow_action *pmd_actions,
992 				uint32_t num_actions,
993 				struct rte_flow_error *err);
994 static int
995 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
996 				  struct rte_mbuf *m,
997 				  struct rte_flow_restore_info *info,
998 				  struct rte_flow_error *err);
999 static struct rte_flow_item_flex_handle *
1000 mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
1001 			   const struct rte_flow_item_flex_conf *conf,
1002 			   struct rte_flow_error *error);
1003 static int
1004 mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
1005 			    const struct rte_flow_item_flex_handle *handle,
1006 			    struct rte_flow_error *error);
1007 static int
1008 mlx5_flow_info_get(struct rte_eth_dev *dev,
1009 		   struct rte_flow_port_info *port_info,
1010 		   struct rte_flow_queue_info *queue_info,
1011 		   struct rte_flow_error *error);
1012 static int
1013 mlx5_flow_port_configure(struct rte_eth_dev *dev,
1014 			 const struct rte_flow_port_attr *port_attr,
1015 			 uint16_t nb_queue,
1016 			 const struct rte_flow_queue_attr *queue_attr[],
1017 			 struct rte_flow_error *err);
1018 
1019 static struct rte_flow_pattern_template *
1020 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
1021 		const struct rte_flow_pattern_template_attr *attr,
1022 		const struct rte_flow_item items[],
1023 		struct rte_flow_error *error);
1024 
1025 static int
1026 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
1027 				   struct rte_flow_pattern_template *template,
1028 				   struct rte_flow_error *error);
1029 static struct rte_flow_actions_template *
1030 mlx5_flow_actions_template_create(struct rte_eth_dev *dev,
1031 			const struct rte_flow_actions_template_attr *attr,
1032 			const struct rte_flow_action actions[],
1033 			const struct rte_flow_action masks[],
1034 			struct rte_flow_error *error);
1035 static int
1036 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
1037 				   struct rte_flow_actions_template *template,
1038 				   struct rte_flow_error *error);
1039 
1040 static struct rte_flow_template_table *
1041 mlx5_flow_table_create(struct rte_eth_dev *dev,
1042 		       const struct rte_flow_template_table_attr *attr,
1043 		       struct rte_flow_pattern_template *item_templates[],
1044 		       uint8_t nb_item_templates,
1045 		       struct rte_flow_actions_template *action_templates[],
1046 		       uint8_t nb_action_templates,
1047 		       struct rte_flow_error *error);
1048 static int
1049 mlx5_flow_table_destroy(struct rte_eth_dev *dev,
1050 			struct rte_flow_template_table *table,
1051 			struct rte_flow_error *error);
1052 static int
1053 mlx5_flow_group_set_miss_actions(struct rte_eth_dev *dev,
1054 				 uint32_t group_id,
1055 				 const struct rte_flow_group_attr *attr,
1056 				 const struct rte_flow_action actions[],
1057 				 struct rte_flow_error *error);
1058 
1059 static int
1060 mlx5_action_handle_query_update(struct rte_eth_dev *dev,
1061 				struct rte_flow_action_handle *handle,
1062 				const void *update, void *query,
1063 				enum rte_flow_query_update_mode qu_mode,
1064 				struct rte_flow_error *error);
1065 
1066 static struct rte_flow_action_list_handle *
1067 mlx5_action_list_handle_create(struct rte_eth_dev *dev,
1068 			       const struct rte_flow_indir_action_conf *conf,
1069 			       const struct rte_flow_action *actions,
1070 			       struct rte_flow_error *error);
1071 
1072 static int
1073 mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
1074 				struct rte_flow_action_list_handle *handle,
1075 				struct rte_flow_error *error);
1076 
1077 static int
1078 mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
1079 					  const
1080 					  struct rte_flow_action_list_handle *handle,
1081 					  const void **update, void **query,
1082 					  enum rte_flow_query_update_mode mode,
1083 					  struct rte_flow_error *error);
1084 
1085 static int
1086 mlx5_flow_calc_table_hash(struct rte_eth_dev *dev,
1087 			  const struct rte_flow_template_table *table,
1088 			  const struct rte_flow_item pattern[],
1089 			  uint8_t pattern_template_index,
1090 			  uint32_t *hash, struct rte_flow_error *error);
1091 static int
1092 mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev,
1093 			  const struct rte_flow_item pattern[],
1094 			  enum rte_flow_encap_hash_field dest_field,
1095 			  uint8_t *hash,
1096 			  struct rte_flow_error *error);
1097 
1098 static int
1099 mlx5_template_table_resize(struct rte_eth_dev *dev,
1100 			   struct rte_flow_template_table *table,
1101 			   uint32_t nb_rules, struct rte_flow_error *error);
1102 static int
1103 mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue,
1104 			       const struct rte_flow_op_attr *attr,
1105 			       struct rte_flow *rule, void *user_data,
1106 			       struct rte_flow_error *error);
1107 static int
1108 mlx5_table_resize_complete(struct rte_eth_dev *dev,
1109 			   struct rte_flow_template_table *table,
1110 			   struct rte_flow_error *error);
1111 
1112 static const struct rte_flow_ops mlx5_flow_ops = {
1113 	.validate = mlx5_flow_validate,
1114 	.create = mlx5_flow_create,
1115 	.destroy = mlx5_flow_destroy,
1116 	.flush = mlx5_flow_flush,
1117 	.isolate = mlx5_flow_isolate,
1118 	.query = mlx5_flow_query,
1119 	.dev_dump = mlx5_flow_dev_dump,
1120 	.get_q_aged_flows = mlx5_flow_get_q_aged_flows,
1121 	.get_aged_flows = mlx5_flow_get_aged_flows,
1122 	.action_handle_create = mlx5_action_handle_create,
1123 	.action_handle_destroy = mlx5_action_handle_destroy,
1124 	.action_handle_update = mlx5_action_handle_update,
1125 	.action_handle_query = mlx5_action_handle_query,
1126 	.action_handle_query_update = mlx5_action_handle_query_update,
1127 	.action_list_handle_create = mlx5_action_list_handle_create,
1128 	.action_list_handle_destroy = mlx5_action_list_handle_destroy,
1129 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
1130 	.tunnel_match = mlx5_flow_tunnel_match,
1131 	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
1132 	.tunnel_item_release = mlx5_flow_tunnel_item_release,
1133 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
1134 	.flex_item_create = mlx5_flow_flex_item_create,
1135 	.flex_item_release = mlx5_flow_flex_item_release,
1136 	.info_get = mlx5_flow_info_get,
1137 	.pick_transfer_proxy = mlx5_flow_pick_transfer_proxy,
1138 	.configure = mlx5_flow_port_configure,
1139 	.pattern_template_create = mlx5_flow_pattern_template_create,
1140 	.pattern_template_destroy = mlx5_flow_pattern_template_destroy,
1141 	.actions_template_create = mlx5_flow_actions_template_create,
1142 	.actions_template_destroy = mlx5_flow_actions_template_destroy,
1143 	.template_table_create = mlx5_flow_table_create,
1144 	.template_table_destroy = mlx5_flow_table_destroy,
1145 	.group_set_miss_actions = mlx5_flow_group_set_miss_actions,
1146 	.action_list_handle_query_update =
1147 		mlx5_flow_action_list_handle_query_update,
1148 	.flow_calc_table_hash = mlx5_flow_calc_table_hash,
1149 	.flow_calc_encap_hash = mlx5_flow_calc_encap_hash,
1150 	.flow_template_table_resize = mlx5_template_table_resize,
1151 	.flow_update_resized = mlx5_flow_async_update_resized,
1152 	.flow_template_table_resize_complete = mlx5_table_resize_complete,
1153 };
1154 
1155 /* Tunnel information. */
1156 struct mlx5_flow_tunnel_info {
1157 	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
1158 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
1159 };
1160 
1161 static struct mlx5_flow_tunnel_info tunnels_info[] = {
1162 	{
1163 		.tunnel = MLX5_FLOW_LAYER_VXLAN,
1164 		.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
1165 	},
1166 	{
1167 		.tunnel = MLX5_FLOW_LAYER_GENEVE,
1168 		.ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
1169 	},
1170 	{
1171 		.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
1172 		.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
1173 	},
1174 	{
1175 		.tunnel = MLX5_FLOW_LAYER_GRE,
1176 		.ptype = RTE_PTYPE_TUNNEL_GRE,
1177 	},
1178 	{
1179 		.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
1180 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
1181 	},
1182 	{
1183 		.tunnel = MLX5_FLOW_LAYER_MPLS,
1184 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
1185 	},
1186 	{
1187 		.tunnel = MLX5_FLOW_LAYER_NVGRE,
1188 		.ptype = RTE_PTYPE_TUNNEL_NVGRE,
1189 	},
1190 	{
1191 		.tunnel = MLX5_FLOW_LAYER_IPIP,
1192 		.ptype = RTE_PTYPE_TUNNEL_IP,
1193 	},
1194 	{
1195 		.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
1196 		.ptype = RTE_PTYPE_TUNNEL_IP,
1197 	},
1198 	{
1199 		.tunnel = MLX5_FLOW_LAYER_GTP,
1200 		.ptype = RTE_PTYPE_TUNNEL_GTPU,
1201 	},
1202 };
1203 
1204 
1205 
1206 /**
1207  * Translate tag ID to register.
1208  *
1209  * @param[in] dev
1210  *   Pointer to the Ethernet device structure.
1211  * @param[in] feature
1212  *   The feature that request the register.
1213  * @param[in] id
1214  *   The request register ID.
1215  * @param[out] error
1216  *   Error description in case of any.
1217  *
1218  * @return
1219  *   The request register on success, a negative errno
1220  *   value otherwise and rte_errno is set.
1221  */
1222 int
1223 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
1224 		     enum mlx5_feature_name feature,
1225 		     uint32_t id,
1226 		     struct rte_flow_error *error)
1227 {
1228 	struct mlx5_priv *priv = dev->data->dev_private;
1229 	struct mlx5_sh_config *config = &priv->sh->config;
1230 	struct mlx5_dev_registers *reg = &priv->sh->registers;
1231 	enum modify_reg start_reg;
1232 	bool skip_mtr_reg = false;
1233 
1234 	switch (feature) {
1235 	case MLX5_HAIRPIN_RX:
1236 		return REG_B;
1237 	case MLX5_HAIRPIN_TX:
1238 		return REG_A;
1239 	case MLX5_METADATA_RX:
1240 		switch (config->dv_xmeta_en) {
1241 		case MLX5_XMETA_MODE_LEGACY:
1242 			return REG_B;
1243 		case MLX5_XMETA_MODE_META16:
1244 			return REG_C_0;
1245 		case MLX5_XMETA_MODE_META32:
1246 			return REG_C_1;
1247 		case MLX5_XMETA_MODE_META32_HWS:
1248 			return REG_C_1;
1249 		}
1250 		break;
1251 	case MLX5_METADATA_TX:
1252 		if (config->dv_flow_en == 2 && config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
1253 			return REG_C_1;
1254 		} else {
1255 			return REG_A;
1256 		}
1257 	case MLX5_METADATA_FDB:
1258 		switch (config->dv_xmeta_en) {
1259 		case MLX5_XMETA_MODE_LEGACY:
1260 			return REG_NON;
1261 		case MLX5_XMETA_MODE_META16:
1262 			return REG_C_0;
1263 		case MLX5_XMETA_MODE_META32:
1264 			return REG_C_1;
1265 		case MLX5_XMETA_MODE_META32_HWS:
1266 			return REG_C_1;
1267 		}
1268 		break;
1269 	case MLX5_FLOW_MARK:
1270 		switch (config->dv_xmeta_en) {
1271 		case MLX5_XMETA_MODE_LEGACY:
1272 		case MLX5_XMETA_MODE_META32_HWS:
1273 			return REG_NON;
1274 		case MLX5_XMETA_MODE_META16:
1275 			return REG_C_1;
1276 		case MLX5_XMETA_MODE_META32:
1277 			return REG_C_0;
1278 		}
1279 		break;
1280 	case MLX5_MTR_ID:
1281 		/*
1282 		 * If meter color and meter id share one register, flow match
1283 		 * should use the meter color register for match.
1284 		 */
1285 		if (priv->mtr_reg_share)
1286 			return reg->aso_reg;
1287 		else
1288 			return reg->aso_reg != REG_C_2 ? REG_C_2 :
1289 			       REG_C_3;
1290 	case MLX5_MTR_COLOR:
1291 	case MLX5_ASO_FLOW_HIT:
1292 	case MLX5_ASO_CONNTRACK:
1293 	case MLX5_SAMPLE_ID:
1294 		/* All features use the same REG_C. */
1295 		MLX5_ASSERT(reg->aso_reg != REG_NON);
1296 		return reg->aso_reg;
1297 	case MLX5_COPY_MARK:
1298 		/*
1299 		 * Metadata COPY_MARK register using is in meter suffix sub
1300 		 * flow while with meter. It's safe to share the same register.
1301 		 */
1302 		return reg->aso_reg != REG_C_2 ? REG_C_2 : REG_C_3;
1303 	case MLX5_APP_TAG:
1304 		/*
1305 		 * If meter is enable, it will engage the register for color
1306 		 * match and flow match. If meter color match is not using the
1307 		 * REG_C_2, need to skip the REG_C_x be used by meter color
1308 		 * match.
1309 		 * If meter is disable, free to use all available registers.
1310 		 */
1311 		start_reg = reg->aso_reg != REG_C_2 ? REG_C_2 :
1312 			    (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
1313 		skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
1314 		if (id > (uint32_t)(REG_C_7 - start_reg))
1315 			return rte_flow_error_set(error, EINVAL,
1316 						  RTE_FLOW_ERROR_TYPE_ITEM,
1317 						  NULL, "invalid tag id");
1318 		if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
1319 			return rte_flow_error_set(error, ENOTSUP,
1320 						  RTE_FLOW_ERROR_TYPE_ITEM,
1321 						  NULL, "unsupported tag id");
1322 		/*
1323 		 * This case means meter is using the REG_C_x great than 2.
1324 		 * Take care not to conflict with meter color REG_C_x.
1325 		 * If the available index REG_C_y >= REG_C_x, skip the
1326 		 * color register.
1327 		 */
1328 		if (skip_mtr_reg && priv->sh->flow_mreg_c
1329 		    [id + start_reg - REG_C_0] >= reg->aso_reg) {
1330 			if (id >= (uint32_t)(REG_C_7 - start_reg))
1331 				return rte_flow_error_set(error, EINVAL,
1332 						       RTE_FLOW_ERROR_TYPE_ITEM,
1333 							NULL, "invalid tag id");
1334 			if (priv->sh->flow_mreg_c
1335 			    [id + 1 + start_reg - REG_C_0] != REG_NON)
1336 				return priv->sh->flow_mreg_c
1337 					       [id + 1 + start_reg - REG_C_0];
1338 			return rte_flow_error_set(error, ENOTSUP,
1339 						  RTE_FLOW_ERROR_TYPE_ITEM,
1340 						  NULL, "unsupported tag id");
1341 		}
1342 		return priv->sh->flow_mreg_c[id + start_reg - REG_C_0];
1343 	}
1344 	MLX5_ASSERT(false);
1345 	return rte_flow_error_set(error, EINVAL,
1346 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1347 				  NULL, "invalid feature name");
1348 }
1349 
1350 /**
1351  * Check extensive flow metadata register support.
1352  *
1353  * @param dev
1354  *   Pointer to rte_eth_dev structure.
1355  *
1356  * @return
1357  *   True if device supports extensive flow metadata register, otherwise false.
1358  */
1359 bool
1360 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
1361 {
1362 	struct mlx5_priv *priv = dev->data->dev_private;
1363 
1364 	/*
1365 	 * Having available reg_c can be regarded inclusively as supporting
1366 	 * extensive flow metadata register, which could mean,
1367 	 * - metadata register copy action by modify header.
1368 	 * - 16 modify header actions is supported.
1369 	 * - reg_c's are preserved across different domain (FDB and NIC) on
1370 	 *   packet loopback by flow lookup miss.
1371 	 */
1372 	return priv->sh->flow_mreg_c[2] != REG_NON;
1373 }
1374 
1375 /**
1376  * Get the lowest priority.
1377  *
1378  * @param[in] dev
1379  *   Pointer to the Ethernet device structure.
1380  * @param[in] attributes
1381  *   Pointer to device flow rule attributes.
1382  *
1383  * @return
1384  *   The value of lowest priority of flow.
1385  */
1386 uint32_t
1387 mlx5_get_lowest_priority(struct rte_eth_dev *dev,
1388 			  const struct rte_flow_attr *attr)
1389 {
1390 	struct mlx5_priv *priv = dev->data->dev_private;
1391 
1392 	if (!attr->group && !(attr->transfer && priv->fdb_def_rule))
1393 		return priv->sh->flow_max_priority - 2;
1394 	return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
1395 }
1396 
1397 /**
1398  * Calculate matcher priority of the flow.
1399  *
1400  * @param[in] dev
1401  *   Pointer to the Ethernet device structure.
1402  * @param[in] attr
1403  *   Pointer to device flow rule attributes.
1404  * @param[in] subpriority
1405  *   The priority based on the items.
1406  * @param[in] external
1407  *   Flow is user flow.
1408  * @return
1409  *   The matcher priority of the flow.
1410  */
1411 uint16_t
1412 mlx5_get_matcher_priority(struct rte_eth_dev *dev,
1413 			  const struct rte_flow_attr *attr,
1414 			  uint32_t subpriority, bool external)
1415 {
1416 	uint16_t priority = (uint16_t)attr->priority;
1417 	struct mlx5_priv *priv = dev->data->dev_private;
1418 
1419 	/* NIC root rules */
1420 	if (!attr->group && !attr->transfer) {
1421 		if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1422 			priority = priv->sh->flow_max_priority - 1;
1423 		return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
1424 	/* FDB root rules */
1425 	} else if (attr->transfer && (!external || !priv->fdb_def_rule) &&
1426 		   attr->group == 0 &&
1427 		   attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
1428 		return (priv->sh->flow_max_priority - 1) * 3;
1429 	}
1430 	if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1431 		priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
1432 	return priority * 3 + subpriority;
1433 }
1434 
1435 /**
1436  * Verify the @p item specifications (spec, last, mask) are compatible with the
1437  * NIC capabilities.
1438  *
1439  * @param[in] item
1440  *   Item specification.
1441  * @param[in] mask
1442  *   @p item->mask or flow default bit-masks.
1443  * @param[in] nic_mask
1444  *   Bit-masks covering supported fields by the NIC to compare with user mask.
1445  * @param[in] size
1446  *   Bit-masks size in bytes.
1447  * @param[in] range_accepted
1448  *   True if range of values is accepted for specific fields, false otherwise.
1449  * @param[out] error
1450  *   Pointer to error structure.
1451  *
1452  * @return
1453  *   0 on success, a negative errno value otherwise and rte_errno is set.
1454  */
1455 int
1456 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
1457 			  const uint8_t *mask,
1458 			  const uint8_t *nic_mask,
1459 			  unsigned int size,
1460 			  bool range_accepted,
1461 			  struct rte_flow_error *error)
1462 {
1463 	unsigned int i;
1464 
1465 	MLX5_ASSERT(nic_mask);
1466 	for (i = 0; i < size; ++i)
1467 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
1468 			return rte_flow_error_set(error, ENOTSUP,
1469 						  RTE_FLOW_ERROR_TYPE_ITEM,
1470 						  item,
1471 						  "mask enables non supported"
1472 						  " bits");
1473 	if (!item->spec && (item->mask || item->last))
1474 		return rte_flow_error_set(error, EINVAL,
1475 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1476 					  "mask/last without a spec is not"
1477 					  " supported");
1478 	if (item->spec && item->last && !range_accepted) {
1479 		uint8_t spec[size];
1480 		uint8_t last[size];
1481 		unsigned int i;
1482 		int ret;
1483 
1484 		for (i = 0; i < size; ++i) {
1485 			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
1486 			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
1487 		}
1488 		ret = memcmp(spec, last, size);
1489 		if (ret != 0)
1490 			return rte_flow_error_set(error, EINVAL,
1491 						  RTE_FLOW_ERROR_TYPE_ITEM,
1492 						  item,
1493 						  "range is not valid");
1494 	}
1495 	return 0;
1496 }
1497 
1498 /**
1499  * Adjust the hash fields according to the @p flow information.
1500  *
1501  * @param[in] dev_flow.
1502  *   Pointer to the mlx5_flow.
1503  * @param[in] tunnel
1504  *   1 when the hash field is for a tunnel item.
1505  * @param[in] layer_types
1506  *   RTE_ETH_RSS_* types.
1507  * @param[in] hash_fields
1508  *   Item hash fields.
1509  *
1510  * @return
1511  *   The hash fields that should be used.
1512  */
1513 uint64_t
1514 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
1515 			    int tunnel __rte_unused, uint64_t layer_types,
1516 			    uint64_t hash_fields)
1517 {
1518 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1519 	int rss_request_inner = rss_desc->level >= 2;
1520 
1521 	/* Check RSS hash level for tunnel. */
1522 	if (tunnel && rss_request_inner)
1523 		hash_fields |= IBV_RX_HASH_INNER;
1524 	else if (tunnel || rss_request_inner)
1525 		return 0;
1526 #endif
1527 	/* Check if requested layer matches RSS hash fields. */
1528 	if (!(rss_desc->types & layer_types))
1529 		return 0;
1530 	return hash_fields;
1531 }
1532 
1533 /**
1534  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
1535  * if several tunnel rules are used on this queue, the tunnel ptype will be
1536  * cleared.
1537  *
1538  * @param rxq_ctrl
1539  *   Rx queue to update.
1540  */
1541 static void
1542 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
1543 {
1544 	unsigned int i;
1545 	uint32_t tunnel_ptype = 0;
1546 
1547 	/* Look up for the ptype to use. */
1548 	for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
1549 		if (!rxq_ctrl->flow_tunnels_n[i])
1550 			continue;
1551 		if (!tunnel_ptype) {
1552 			tunnel_ptype = tunnels_info[i].ptype;
1553 		} else {
1554 			tunnel_ptype = 0;
1555 			break;
1556 		}
1557 	}
1558 	rxq_ctrl->rxq.tunnel = tunnel_ptype;
1559 }
1560 
1561 /**
1562  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
1563  * flow.
1564  *
1565  * @param[in] dev
1566  *   Pointer to the Ethernet device structure.
1567  * @param[in] dev_handle
1568  *   Pointer to device flow handle structure.
1569  */
1570 void
1571 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
1572 		       struct mlx5_flow_handle *dev_handle)
1573 {
1574 	struct mlx5_priv *priv = dev->data->dev_private;
1575 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1576 	struct mlx5_ind_table_obj *ind_tbl = NULL;
1577 	unsigned int i;
1578 
1579 	if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1580 		struct mlx5_hrxq *hrxq;
1581 
1582 		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1583 			      dev_handle->rix_hrxq);
1584 		if (hrxq)
1585 			ind_tbl = hrxq->ind_table;
1586 	} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1587 		struct mlx5_shared_action_rss *shared_rss;
1588 
1589 		shared_rss = mlx5_ipool_get
1590 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1591 			 dev_handle->rix_srss);
1592 		if (shared_rss)
1593 			ind_tbl = shared_rss->ind_tbl;
1594 	}
1595 	if (!ind_tbl)
1596 		return;
1597 	for (i = 0; i != ind_tbl->queues_n; ++i) {
1598 		int idx = ind_tbl->queues[i];
1599 		struct mlx5_rxq_ctrl *rxq_ctrl;
1600 
1601 		if (mlx5_is_external_rxq(dev, idx))
1602 			continue;
1603 		rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
1604 		MLX5_ASSERT(rxq_ctrl != NULL);
1605 		if (rxq_ctrl == NULL)
1606 			continue;
1607 		/*
1608 		 * To support metadata register copy on Tx loopback,
1609 		 * this must be always enabled (metadata may arive
1610 		 * from other port - not from local flows only.
1611 		 */
1612 		if (tunnel) {
1613 			unsigned int j;
1614 
1615 			/* Increase the counter matching the flow. */
1616 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1617 				if ((tunnels_info[j].tunnel &
1618 				     dev_handle->layers) ==
1619 				    tunnels_info[j].tunnel) {
1620 					rxq_ctrl->flow_tunnels_n[j]++;
1621 					break;
1622 				}
1623 			}
1624 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
1625 		}
1626 	}
1627 }
1628 
1629 static void
1630 flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
1631 {
1632 	struct mlx5_priv *priv = dev->data->dev_private;
1633 	struct mlx5_rxq_ctrl *rxq_ctrl;
1634 	uint16_t port_id;
1635 
1636 	if (priv->sh->shared_mark_enabled)
1637 		return;
1638 	if (priv->master || priv->representor) {
1639 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
1640 			struct mlx5_priv *opriv =
1641 				rte_eth_devices[port_id].data->dev_private;
1642 
1643 			if (!opriv ||
1644 			    opriv->sh != priv->sh ||
1645 			    opriv->domain_id != priv->domain_id ||
1646 			    opriv->mark_enabled)
1647 				continue;
1648 			LIST_FOREACH(rxq_ctrl, &opriv->rxqsctrl, next) {
1649 				rxq_ctrl->rxq.mark = 1;
1650 			}
1651 			opriv->mark_enabled = 1;
1652 		}
1653 	} else {
1654 		LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1655 			rxq_ctrl->rxq.mark = 1;
1656 		}
1657 		priv->mark_enabled = 1;
1658 	}
1659 	priv->sh->shared_mark_enabled = 1;
1660 }
1661 
1662 /**
1663  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1664  *
1665  * @param[in] dev
1666  *   Pointer to the Ethernet device structure.
1667  * @param[in] flow
1668  *   Pointer to flow structure.
1669  */
1670 static void
1671 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1672 {
1673 	struct mlx5_priv *priv = dev->data->dev_private;
1674 	uint32_t handle_idx;
1675 	struct mlx5_flow_handle *dev_handle;
1676 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1677 
1678 	MLX5_ASSERT(wks);
1679 	if (wks->mark)
1680 		flow_rxq_mark_flag_set(dev);
1681 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1682 		       handle_idx, dev_handle, next)
1683 		flow_drv_rxq_flags_set(dev, dev_handle);
1684 }
1685 
1686 /**
1687  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1688  * device flow if no other flow uses it with the same kind of request.
1689  *
1690  * @param dev
1691  *   Pointer to Ethernet device.
1692  * @param[in] dev_handle
1693  *   Pointer to the device flow handle structure.
1694  */
1695 static void
1696 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1697 			struct mlx5_flow_handle *dev_handle)
1698 {
1699 	struct mlx5_priv *priv = dev->data->dev_private;
1700 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1701 	struct mlx5_ind_table_obj *ind_tbl = NULL;
1702 	unsigned int i;
1703 
1704 	if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1705 		struct mlx5_hrxq *hrxq;
1706 
1707 		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1708 			      dev_handle->rix_hrxq);
1709 		if (hrxq)
1710 			ind_tbl = hrxq->ind_table;
1711 	} else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1712 		struct mlx5_shared_action_rss *shared_rss;
1713 
1714 		shared_rss = mlx5_ipool_get
1715 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1716 			 dev_handle->rix_srss);
1717 		if (shared_rss)
1718 			ind_tbl = shared_rss->ind_tbl;
1719 	}
1720 	if (!ind_tbl)
1721 		return;
1722 	MLX5_ASSERT(dev->data->dev_started);
1723 	for (i = 0; i != ind_tbl->queues_n; ++i) {
1724 		int idx = ind_tbl->queues[i];
1725 		struct mlx5_rxq_ctrl *rxq_ctrl;
1726 
1727 		if (mlx5_is_external_rxq(dev, idx))
1728 			continue;
1729 		rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
1730 		MLX5_ASSERT(rxq_ctrl != NULL);
1731 		if (rxq_ctrl == NULL)
1732 			continue;
1733 		if (tunnel) {
1734 			unsigned int j;
1735 
1736 			/* Decrease the counter matching the flow. */
1737 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1738 				if ((tunnels_info[j].tunnel &
1739 				     dev_handle->layers) ==
1740 				    tunnels_info[j].tunnel) {
1741 					rxq_ctrl->flow_tunnels_n[j]--;
1742 					break;
1743 				}
1744 			}
1745 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
1746 		}
1747 	}
1748 }
1749 
1750 /**
1751  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1752  * @p flow if no other flow uses it with the same kind of request.
1753  *
1754  * @param dev
1755  *   Pointer to Ethernet device.
1756  * @param[in] flow
1757  *   Pointer to the flow.
1758  */
1759 static void
1760 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1761 {
1762 	struct mlx5_priv *priv = dev->data->dev_private;
1763 	uint32_t handle_idx;
1764 	struct mlx5_flow_handle *dev_handle;
1765 
1766 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1767 		       handle_idx, dev_handle, next)
1768 		flow_drv_rxq_flags_trim(dev, dev_handle);
1769 }
1770 
1771 /**
1772  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1773  *
1774  * @param dev
1775  *   Pointer to Ethernet device.
1776  */
1777 static void
1778 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1779 {
1780 	struct mlx5_priv *priv = dev->data->dev_private;
1781 	unsigned int i;
1782 
1783 	for (i = 0; i != priv->rxqs_n; ++i) {
1784 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
1785 		unsigned int j;
1786 
1787 		if (rxq == NULL || rxq->ctrl == NULL)
1788 			continue;
1789 		rxq->ctrl->rxq.mark = 0;
1790 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1791 			rxq->ctrl->flow_tunnels_n[j] = 0;
1792 		rxq->ctrl->rxq.tunnel = 0;
1793 	}
1794 	priv->mark_enabled = 0;
1795 	priv->sh->shared_mark_enabled = 0;
1796 }
1797 
1798 static uint64_t mlx5_restore_info_dynflag;
1799 
1800 int
1801 mlx5_flow_rx_metadata_negotiate(struct rte_eth_dev *dev, uint64_t *features)
1802 {
1803 	struct mlx5_priv *priv = dev->data->dev_private;
1804 	uint64_t supported = 0;
1805 
1806 	if (!is_tunnel_offload_active(dev)) {
1807 		supported |= RTE_ETH_RX_METADATA_USER_FLAG;
1808 		supported |= RTE_ETH_RX_METADATA_USER_MARK;
1809 		if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0) {
1810 			DRV_LOG(DEBUG,
1811 				"tunnel offload was not activated, consider setting dv_xmeta_en=%d",
1812 				MLX5_XMETA_MODE_MISS_INFO);
1813 		}
1814 	} else {
1815 		supported |= RTE_ETH_RX_METADATA_TUNNEL_ID;
1816 		if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 &&
1817 				mlx5_restore_info_dynflag == 0)
1818 			mlx5_restore_info_dynflag = rte_flow_restore_info_dynflag();
1819 	}
1820 
1821 	if (((*features & supported) & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0)
1822 		priv->tunnel_enabled = 1;
1823 	else
1824 		priv->tunnel_enabled = 0;
1825 
1826 	*features &= supported;
1827 	return 0;
1828 }
1829 
1830 /**
1831  * Set the Rx queue dynamic metadata (mask and offset) for a flow
1832  *
1833  * @param[in] dev
1834  *   Pointer to the Ethernet device structure.
1835  */
1836 void
1837 mlx5_flow_rxq_dynf_set(struct rte_eth_dev *dev)
1838 {
1839 	struct mlx5_priv *priv = dev->data->dev_private;
1840 	uint64_t mark_flag = RTE_MBUF_F_RX_FDIR_ID;
1841 	unsigned int i;
1842 
1843 	if (priv->tunnel_enabled)
1844 		mark_flag |= mlx5_restore_info_dynflag;
1845 
1846 	for (i = 0; i != priv->rxqs_n; ++i) {
1847 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
1848 		struct mlx5_rxq_data *data;
1849 
1850 		if (rxq == NULL || rxq->ctrl == NULL)
1851 			continue;
1852 		data = &rxq->ctrl->rxq;
1853 		if (!rte_flow_dynf_metadata_avail()) {
1854 			data->dynf_meta = 0;
1855 			data->flow_meta_mask = 0;
1856 			data->flow_meta_offset = -1;
1857 			data->flow_meta_port_mask = 0;
1858 		} else {
1859 			data->dynf_meta = 1;
1860 			data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1861 			data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1862 			data->flow_meta_port_mask = priv->sh->dv_meta_mask;
1863 		}
1864 		data->mark_flag = mark_flag;
1865 	}
1866 }
1867 
1868 /*
1869  * return a pointer to the desired action in the list of actions.
1870  *
1871  * @param[in] actions
1872  *   The list of actions to search the action in.
1873  * @param[in] action
1874  *   The action to find.
1875  *
1876  * @return
1877  *   Pointer to the action in the list, if found. NULL otherwise.
1878  */
1879 const struct rte_flow_action *
1880 mlx5_flow_find_action(const struct rte_flow_action *actions,
1881 		      enum rte_flow_action_type action)
1882 {
1883 	if (actions == NULL)
1884 		return NULL;
1885 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1886 		if (actions->type == action)
1887 			return actions;
1888 	return NULL;
1889 }
1890 
1891 /*
1892  * Validate the flag action.
1893  *
1894  * @param[in] action_flags
1895  *   Bit-fields that holds the actions detected until now.
1896  * @param[in] attr
1897  *   Attributes of flow that includes this action.
1898  * @param[out] error
1899  *   Pointer to error structure.
1900  *
1901  * @return
1902  *   0 on success, a negative errno value otherwise and rte_errno is set.
1903  */
1904 int
1905 mlx5_flow_validate_action_flag(uint64_t action_flags,
1906 			       const struct rte_flow_attr *attr,
1907 			       struct rte_flow_error *error)
1908 {
1909 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1910 		return rte_flow_error_set(error, EINVAL,
1911 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1912 					  "can't mark and flag in same flow");
1913 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1914 		return rte_flow_error_set(error, EINVAL,
1915 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1916 					  "can't have 2 flag"
1917 					  " actions in same flow");
1918 	if (attr->egress)
1919 		return rte_flow_error_set(error, ENOTSUP,
1920 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1921 					  "flag action not supported for "
1922 					  "egress");
1923 	return 0;
1924 }
1925 
1926 /*
1927  * Validate the mark action.
1928  *
1929  * @param[in] action
1930  *   Pointer to the queue action.
1931  * @param[in] action_flags
1932  *   Bit-fields that holds the actions detected until now.
1933  * @param[in] attr
1934  *   Attributes of flow that includes this action.
1935  * @param[out] error
1936  *   Pointer to error structure.
1937  *
1938  * @return
1939  *   0 on success, a negative errno value otherwise and rte_errno is set.
1940  */
1941 int
1942 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1943 			       uint64_t action_flags,
1944 			       const struct rte_flow_attr *attr,
1945 			       struct rte_flow_error *error)
1946 {
1947 	const struct rte_flow_action_mark *mark = action->conf;
1948 
1949 	if (!mark)
1950 		return rte_flow_error_set(error, EINVAL,
1951 					  RTE_FLOW_ERROR_TYPE_ACTION,
1952 					  action,
1953 					  "configuration cannot be null");
1954 	if (mark->id >= MLX5_FLOW_MARK_MAX)
1955 		return rte_flow_error_set(error, EINVAL,
1956 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1957 					  &mark->id,
1958 					  "mark id must in 0 <= id < "
1959 					  RTE_STR(MLX5_FLOW_MARK_MAX));
1960 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1961 		return rte_flow_error_set(error, EINVAL,
1962 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1963 					  "can't flag and mark in same flow");
1964 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1965 		return rte_flow_error_set(error, EINVAL,
1966 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1967 					  "can't have 2 mark actions in same"
1968 					  " flow");
1969 	if (attr->egress)
1970 		return rte_flow_error_set(error, ENOTSUP,
1971 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1972 					  "mark action not supported for "
1973 					  "egress");
1974 	return 0;
1975 }
1976 
1977 /*
1978  * Validate the drop action.
1979  *
1980  * @param[in] dev
1981  *   Pointer to the Ethernet device structure.
1982  * @param[in] is_root
1983  *   True if flow is validated for root table. False otherwise.
1984  * @param[in] attr
1985  *   Attributes of flow that includes this action.
1986  * @param[out] error
1987  *   Pointer to error structure.
1988  *
1989  * @return
1990  *   0 on success, a negative errno value otherwise and rte_errno is set.
1991  */
1992 int
1993 mlx5_flow_validate_action_drop(struct rte_eth_dev *dev,
1994 			       bool is_root,
1995 			       const struct rte_flow_attr *attr,
1996 			       struct rte_flow_error *error)
1997 {
1998 	struct mlx5_priv *priv = dev->data->dev_private;
1999 
2000 	if (priv->sh->config.dv_flow_en == 0 && attr->egress)
2001 		return rte_flow_error_set(error, ENOTSUP,
2002 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2003 					  "drop action not supported for "
2004 					  "egress");
2005 	if (priv->sh->config.dv_flow_en == 1 && is_root && (attr->egress || attr->transfer) &&
2006 	    !priv->sh->dr_root_drop_action_en) {
2007 		return rte_flow_error_set(error, ENOTSUP,
2008 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2009 					  "drop action not supported for "
2010 					  "egress and transfer on group 0");
2011 	}
2012 	return 0;
2013 }
2014 
2015 /*
2016  * Validate the queue action.
2017  *
2018  * @param[in] action
2019  *   Pointer to the queue action.
2020  * @param[in] action_flags
2021  *   Bit-fields that holds the actions detected until now.
2022  * @param[in] dev
2023  *   Pointer to the Ethernet device structure.
2024  * @param[in] attr
2025  *   Attributes of flow that includes this action.
2026  * @param[out] error
2027  *   Pointer to error structure.
2028  *
2029  * @return
2030  *   0 on success, a negative errno value otherwise and rte_errno is set.
2031  */
2032 int
2033 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
2034 				uint64_t action_flags,
2035 				struct rte_eth_dev *dev,
2036 				const struct rte_flow_attr *attr,
2037 				struct rte_flow_error *error)
2038 {
2039 	struct mlx5_priv *priv = dev->data->dev_private;
2040 	const struct rte_flow_action_queue *queue = action->conf;
2041 
2042 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
2043 		return rte_flow_error_set(error, EINVAL,
2044 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2045 					  "can't have 2 fate actions in"
2046 					  " same flow");
2047 	if (attr->egress)
2048 		return rte_flow_error_set(error, ENOTSUP,
2049 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2050 					  "queue action not supported for egress.");
2051 	if (mlx5_is_external_rxq(dev, queue->index))
2052 		return 0;
2053 	if (!priv->rxqs_n)
2054 		return rte_flow_error_set(error, EINVAL,
2055 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2056 					  NULL, "No Rx queues configured");
2057 	if (queue->index >= priv->rxqs_n)
2058 		return rte_flow_error_set(error, EINVAL,
2059 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2060 					  &queue->index,
2061 					  "queue index out of range");
2062 	if (mlx5_rxq_get(dev, queue->index) == NULL)
2063 		return rte_flow_error_set(error, EINVAL,
2064 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2065 					  &queue->index,
2066 					  "queue is not configured");
2067 	return 0;
2068 }
2069 
2070 /**
2071  * Validate queue numbers for device RSS.
2072  *
2073  * @param[in] dev
2074  *   Configured device.
2075  * @param[in] queues
2076  *   Array of queue numbers.
2077  * @param[in] queues_n
2078  *   Size of the @p queues array.
2079  * @param[out] error
2080  *   On error, filled with a textual error description.
2081  * @param[out] queue_idx
2082  *   On error, filled with an offending queue index in @p queues array.
2083  *
2084  * @return
2085  *   0 on success, a negative errno code on error.
2086  */
2087 static int
2088 mlx5_validate_rss_queues(struct rte_eth_dev *dev,
2089 			 const uint16_t *queues, uint32_t queues_n,
2090 			 const char **error, uint32_t *queue_idx)
2091 {
2092 	const struct mlx5_priv *priv = dev->data->dev_private;
2093 	bool is_hairpin = false;
2094 	bool is_ext_rss = false;
2095 	uint32_t i;
2096 
2097 	for (i = 0; i != queues_n; ++i) {
2098 		struct mlx5_rxq_ctrl *rxq_ctrl;
2099 
2100 		if (mlx5_is_external_rxq(dev, queues[0])) {
2101 			is_ext_rss = true;
2102 			continue;
2103 		}
2104 		if (is_ext_rss) {
2105 			*error = "Combining external and regular RSS queues is not supported";
2106 			*queue_idx = i;
2107 			return -ENOTSUP;
2108 		}
2109 		if (queues[i] >= priv->rxqs_n) {
2110 			*error = "queue index out of range";
2111 			*queue_idx = i;
2112 			return -EINVAL;
2113 		}
2114 		rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]);
2115 		if (rxq_ctrl == NULL) {
2116 			*error =  "queue is not configured";
2117 			*queue_idx = i;
2118 			return -EINVAL;
2119 		}
2120 		if (i == 0 && rxq_ctrl->is_hairpin)
2121 			is_hairpin = true;
2122 		if (is_hairpin != rxq_ctrl->is_hairpin) {
2123 			*error = "combining hairpin and regular RSS queues is not supported";
2124 			*queue_idx = i;
2125 			return -ENOTSUP;
2126 		}
2127 	}
2128 	return 0;
2129 }
2130 
2131 /*
2132  * Validate the rss action.
2133  *
2134  * @param[in] dev
2135  *   Pointer to the Ethernet device structure.
2136  * @param[in] action
2137  *   Pointer to the queue action.
2138  * @param[out] error
2139  *   Pointer to error structure.
2140  *
2141  * @return
2142  *   0 on success, a negative errno value otherwise and rte_errno is set.
2143  */
2144 int
2145 mlx5_validate_action_rss(struct rte_eth_dev *dev,
2146 			 const struct rte_flow_action *action,
2147 			 struct rte_flow_error *error)
2148 {
2149 	struct mlx5_priv *priv = dev->data->dev_private;
2150 	const struct rte_flow_action_rss *rss = action->conf;
2151 	int ret;
2152 	const char *message;
2153 	uint32_t queue_idx;
2154 
2155 	if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
2156 		DRV_LOG(WARNING, "port %u symmetric RSS supported with SORT",
2157 			dev->data->port_id);
2158 	} else if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
2159 		   rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
2160 		return rte_flow_error_set(error, ENOTSUP,
2161 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2162 					  &rss->func,
2163 					  "RSS hash function not supported");
2164 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2165 	if (rss->level > 2)
2166 #else
2167 	if (rss->level > 1)
2168 #endif
2169 		return rte_flow_error_set(error, ENOTSUP,
2170 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2171 					  &rss->level,
2172 					  "tunnel RSS is not supported");
2173 	/* allow RSS key_len 0 in case of NULL (default) RSS key. */
2174 	if (rss->key_len == 0 && rss->key != NULL)
2175 		return rte_flow_error_set(error, ENOTSUP,
2176 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2177 					  &rss->key_len,
2178 					  "RSS hash key length 0");
2179 	if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
2180 		return rte_flow_error_set(error, ENOTSUP,
2181 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2182 					  &rss->key_len,
2183 					  "RSS hash key too small");
2184 	if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
2185 		return rte_flow_error_set(error, ENOTSUP,
2186 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2187 					  &rss->key_len,
2188 					  "RSS hash key too large");
2189 	if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size)
2190 		return rte_flow_error_set(error, ENOTSUP,
2191 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2192 					  &rss->queue_num,
2193 					  "number of queues too large");
2194 	if (rss->types & MLX5_RSS_HF_MASK)
2195 		return rte_flow_error_set(error, ENOTSUP,
2196 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2197 					  &rss->types,
2198 					  "some RSS protocols are not"
2199 					  " supported");
2200 	if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
2201 	    !(rss->types & RTE_ETH_RSS_IP))
2202 		return rte_flow_error_set(error, EINVAL,
2203 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2204 					  "L3 partial RSS requested but L3 RSS"
2205 					  " type not specified");
2206 	if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
2207 	    !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
2208 		return rte_flow_error_set(error, EINVAL,
2209 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2210 					  "L4 partial RSS requested but L4 RSS"
2211 					  " type not specified");
2212 	if (!priv->rxqs_n && priv->ext_rxqs == NULL)
2213 		return rte_flow_error_set(error, EINVAL,
2214 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2215 					  NULL, "No Rx queues configured");
2216 	if (!rss->queue_num)
2217 		return rte_flow_error_set(error, EINVAL,
2218 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2219 					  NULL, "No queues configured");
2220 	ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,
2221 				       &message, &queue_idx);
2222 	if (ret != 0) {
2223 		return rte_flow_error_set(error, -ret,
2224 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2225 					  &rss->queue[queue_idx], message);
2226 	}
2227 	return 0;
2228 }
2229 
2230 /*
2231  * Validate the rss action.
2232  *
2233  * @param[in] action
2234  *   Pointer to the queue action.
2235  * @param[in] action_flags
2236  *   Bit-fields that holds the actions detected until now.
2237  * @param[in] dev
2238  *   Pointer to the Ethernet device structure.
2239  * @param[in] attr
2240  *   Attributes of flow that includes this action.
2241  * @param[in] item_flags
2242  *   Items that were detected.
2243  * @param[out] error
2244  *   Pointer to error structure.
2245  *
2246  * @return
2247  *   0 on success, a negative errno value otherwise and rte_errno is set.
2248  */
2249 int
2250 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
2251 			      uint64_t action_flags,
2252 			      struct rte_eth_dev *dev,
2253 			      const struct rte_flow_attr *attr,
2254 			      uint64_t item_flags,
2255 			      struct rte_flow_error *error)
2256 {
2257 	const struct rte_flow_action_rss *rss = action->conf;
2258 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2259 	int ret;
2260 
2261 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
2262 		return rte_flow_error_set(error, EINVAL,
2263 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2264 					  "can't have 2 fate actions"
2265 					  " in same flow");
2266 	ret = mlx5_validate_action_rss(dev, action, error);
2267 	if (ret)
2268 		return ret;
2269 	if (attr->egress)
2270 		return rte_flow_error_set(error, ENOTSUP,
2271 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2272 					  "rss action not supported for "
2273 					  "egress");
2274 	if (rss->level > 1 && !tunnel)
2275 		return rte_flow_error_set(error, EINVAL,
2276 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2277 					  "inner RSS is not supported for "
2278 					  "non-tunnel flows");
2279 	if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
2280 	    !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
2281 		return rte_flow_error_set(error, EINVAL,
2282 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2283 					  "RSS on eCPRI is not supported now");
2284 	}
2285 	if ((item_flags & MLX5_FLOW_LAYER_MPLS) &&
2286 	    !(item_flags &
2287 	      (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) &&
2288 	    rss->level > 1)
2289 		return rte_flow_error_set(error, EINVAL,
2290 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2291 					  "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern");
2292 	return 0;
2293 }
2294 
2295 /*
2296  * Validate the default miss action.
2297  *
2298  * @param[in] action_flags
2299  *   Bit-fields that holds the actions detected until now.
2300  * @param[out] error
2301  *   Pointer to error structure.
2302  *
2303  * @return
2304  *   0 on success, a negative errno value otherwise and rte_errno is set.
2305  */
2306 int
2307 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
2308 				const struct rte_flow_attr *attr,
2309 				struct rte_flow_error *error)
2310 {
2311 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
2312 		return rte_flow_error_set(error, EINVAL,
2313 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2314 					  "can't have 2 fate actions in"
2315 					  " same flow");
2316 	if (attr->egress)
2317 		return rte_flow_error_set(error, ENOTSUP,
2318 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2319 					  "default miss action not supported "
2320 					  "for egress");
2321 	if (attr->group)
2322 		return rte_flow_error_set(error, ENOTSUP,
2323 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2324 					  "only group 0 is supported");
2325 	if (attr->transfer)
2326 		return rte_flow_error_set(error, ENOTSUP,
2327 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2328 					  NULL, "transfer is not supported");
2329 	return 0;
2330 }
2331 
2332 /*
2333  * Validate the count action.
2334  *
2335  * @param[in] dev
2336  *   Pointer to the Ethernet device structure.
2337  * @param[in] attr
2338  *   Attributes of flow that includes this action.
2339  * @param[out] error
2340  *   Pointer to error structure.
2341  *
2342  * @return
2343  *   0 on success, a negative errno value otherwise and rte_errno is set.
2344  */
2345 int
2346 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
2347 				const struct rte_flow_attr *attr,
2348 				struct rte_flow_error *error)
2349 {
2350 	if (attr->egress)
2351 		return rte_flow_error_set(error, ENOTSUP,
2352 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2353 					  "count action not supported for "
2354 					  "egress");
2355 	return 0;
2356 }
2357 
2358 /*
2359  * Validate the ASO CT action.
2360  *
2361  * @param[in] dev
2362  *   Pointer to the Ethernet device structure.
2363  * @param[in] conntrack
2364  *   Pointer to the CT action profile.
2365  * @param[out] error
2366  *   Pointer to error structure.
2367  *
2368  * @return
2369  *   0 on success, a negative errno value otherwise and rte_errno is set.
2370  */
2371 int
2372 mlx5_validate_action_ct(struct rte_eth_dev *dev,
2373 			const struct rte_flow_action_conntrack *conntrack,
2374 			struct rte_flow_error *error)
2375 {
2376 	RTE_SET_USED(dev);
2377 
2378 	if (conntrack->state > RTE_FLOW_CONNTRACK_STATE_TIME_WAIT)
2379 		return rte_flow_error_set(error, EINVAL,
2380 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2381 					  "Invalid CT state");
2382 	if (conntrack->last_index > RTE_FLOW_CONNTRACK_FLAG_RST)
2383 		return rte_flow_error_set(error, EINVAL,
2384 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2385 					  "Invalid last TCP packet flag");
2386 	return 0;
2387 }
2388 
2389 /**
2390  * Validate the level value for modify field action.
2391  *
2392  * @param[in] data
2393  *   Pointer to the rte_flow_field_data structure either src or dst.
2394  * @param[out] error
2395  *   Pointer to error structure.
2396  *
2397  * @return
2398  *   0 on success, a negative errno value otherwise and rte_errno is set.
2399  */
2400 int
2401 flow_validate_modify_field_level(const struct rte_flow_field_data *data,
2402 				 struct rte_flow_error *error)
2403 {
2404 	if (data->level == 0 || data->field == RTE_FLOW_FIELD_FLEX_ITEM)
2405 		return 0;
2406 	if (data->field != RTE_FLOW_FIELD_TAG &&
2407 	    data->field != (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
2408 		if (data->level > 1)
2409 			return rte_flow_error_set(error, ENOTSUP,
2410 						  RTE_FLOW_ERROR_TYPE_ACTION,
2411 						  NULL,
2412 						  "inner header fields modification is not supported");
2413 		return 0;
2414 	}
2415 	if (data->tag_index != 0)
2416 		return rte_flow_error_set(error, EINVAL,
2417 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2418 					  "tag array can be provided using 'level' or 'tag_index' fields, not both");
2419 	/*
2420 	 * The tag array for RTE_FLOW_FIELD_TAG type is provided using
2421 	 * 'tag_index' field. In old API, it was provided using 'level' field
2422 	 * and it is still supported for backwards compatibility.
2423 	 */
2424 	DRV_LOG(DEBUG, "tag array provided in 'level' field instead of 'tag_index' field.");
2425 	return 0;
2426 }
2427 
2428 /**
2429  * Validate ICMP6 item.
2430  *
2431  * @param[in] item
2432  *   Item specification.
2433  * @param[in] item_flags
2434  *   Bit-fields that holds the items detected until now.
2435  * @param[in] ext_vlan_sup
2436  *   Whether extended VLAN features are supported or not.
2437  * @param[out] error
2438  *   Pointer to error structure.
2439  *
2440  * @return
2441  *   0 on success, a negative errno value otherwise and rte_errno is set.
2442  */
2443 int
2444 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
2445 			       uint64_t item_flags,
2446 			       uint8_t target_protocol,
2447 			       struct rte_flow_error *error)
2448 {
2449 	const struct rte_flow_item_icmp6 *mask = item->mask;
2450 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2451 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2452 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2453 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2454 				      MLX5_FLOW_LAYER_OUTER_L4;
2455 	int ret;
2456 
2457 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
2458 		return rte_flow_error_set(error, EINVAL,
2459 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2460 					  "protocol filtering not compatible"
2461 					  " with ICMP6 layer");
2462 	if (!(item_flags & l3m))
2463 		return rte_flow_error_set(error, EINVAL,
2464 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2465 					  "IPv6 is mandatory to filter on"
2466 					  " ICMP6");
2467 	if (item_flags & l4m)
2468 		return rte_flow_error_set(error, EINVAL,
2469 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2470 					  "multiple L4 layers not supported");
2471 	if (!mask)
2472 		mask = &rte_flow_item_icmp6_mask;
2473 	ret = mlx5_flow_item_acceptable
2474 		(item, (const uint8_t *)mask,
2475 		 (const uint8_t *)&rte_flow_item_icmp6_mask,
2476 		 sizeof(struct rte_flow_item_icmp6),
2477 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2478 	if (ret < 0)
2479 		return ret;
2480 	return 0;
2481 }
2482 
2483 /**
2484  * Validate ICMP6 echo request/reply item.
2485  *
2486  * @param[in] item
2487  *   Item specification.
2488  * @param[in] item_flags
2489  *   Bit-fields that holds the items detected until now.
2490  * @param[in] ext_vlan_sup
2491  *   Whether extended VLAN features are supported or not.
2492  * @param[out] error
2493  *   Pointer to error structure.
2494  *
2495  * @return
2496  *   0 on success, a negative errno value otherwise and rte_errno is set.
2497  */
2498 int
2499 mlx5_flow_validate_item_icmp6_echo(const struct rte_flow_item *item,
2500 				   uint64_t item_flags,
2501 				   uint8_t target_protocol,
2502 				   struct rte_flow_error *error)
2503 {
2504 	const struct rte_flow_item_icmp6_echo *mask = item->mask;
2505 	const struct rte_flow_item_icmp6_echo nic_mask = {
2506 		.hdr.base.type = 0xff,
2507 		.hdr.base.code = 0xff,
2508 		.hdr.identifier = RTE_BE16(0xffff),
2509 		.hdr.sequence = RTE_BE16(0xffff),
2510 	};
2511 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2512 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2513 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2514 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2515 				      MLX5_FLOW_LAYER_OUTER_L4;
2516 	int ret;
2517 
2518 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
2519 		return rte_flow_error_set(error, EINVAL,
2520 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2521 					  "protocol filtering not compatible"
2522 					  " with ICMP6 layer");
2523 	if (!(item_flags & l3m))
2524 		return rte_flow_error_set(error, EINVAL,
2525 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2526 					  "IPv6 is mandatory to filter on"
2527 					  " ICMP6");
2528 	if (item_flags & l4m)
2529 		return rte_flow_error_set(error, EINVAL,
2530 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2531 					  "multiple L4 layers not supported");
2532 	if (!mask)
2533 		mask = &nic_mask;
2534 	ret = mlx5_flow_item_acceptable
2535 		(item, (const uint8_t *)mask,
2536 		 (const uint8_t *)&nic_mask,
2537 		 sizeof(struct rte_flow_item_icmp6_echo),
2538 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2539 	if (ret < 0)
2540 		return ret;
2541 	return 0;
2542 }
2543 
2544 /**
2545  * Validate ICMP item.
2546  *
2547  * @param[in] item
2548  *   Item specification.
2549  * @param[in] item_flags
2550  *   Bit-fields that holds the items detected until now.
2551  * @param[out] error
2552  *   Pointer to error structure.
2553  *
2554  * @return
2555  *   0 on success, a negative errno value otherwise and rte_errno is set.
2556  */
2557 int
2558 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
2559 			     uint64_t item_flags,
2560 			     uint8_t target_protocol,
2561 			     struct rte_flow_error *error)
2562 {
2563 	const struct rte_flow_item_icmp *mask = item->mask;
2564 	const struct rte_flow_item_icmp nic_mask = {
2565 		.hdr.icmp_type = 0xff,
2566 		.hdr.icmp_code = 0xff,
2567 		.hdr.icmp_ident = RTE_BE16(0xffff),
2568 		.hdr.icmp_seq_nb = RTE_BE16(0xffff),
2569 	};
2570 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2571 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2572 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2573 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2574 				      MLX5_FLOW_LAYER_OUTER_L4;
2575 	int ret;
2576 
2577 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
2578 		return rte_flow_error_set(error, EINVAL,
2579 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2580 					  "protocol filtering not compatible"
2581 					  " with ICMP layer");
2582 	if (!(item_flags & l3m))
2583 		return rte_flow_error_set(error, EINVAL,
2584 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2585 					  "IPv4 is mandatory to filter"
2586 					  " on ICMP");
2587 	if (item_flags & l4m)
2588 		return rte_flow_error_set(error, EINVAL,
2589 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2590 					  "multiple L4 layers not supported");
2591 	if (!mask)
2592 		mask = &nic_mask;
2593 	ret = mlx5_flow_item_acceptable
2594 		(item, (const uint8_t *)mask,
2595 		 (const uint8_t *)&nic_mask,
2596 		 sizeof(struct rte_flow_item_icmp),
2597 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2598 	if (ret < 0)
2599 		return ret;
2600 	return 0;
2601 }
2602 
2603 /**
2604  * Validate Ethernet item.
2605  *
2606  * @param[in] item
2607  *   Item specification.
2608  * @param[in] item_flags
2609  *   Bit-fields that holds the items detected until now.
2610  * @param[out] error
2611  *   Pointer to error structure.
2612  *
2613  * @return
2614  *   0 on success, a negative errno value otherwise and rte_errno is set.
2615  */
2616 int
2617 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
2618 			    uint64_t item_flags, bool ext_vlan_sup,
2619 			    struct rte_flow_error *error)
2620 {
2621 	const struct rte_flow_item_eth *mask = item->mask;
2622 	const struct rte_flow_item_eth nic_mask = {
2623 		.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2624 		.hdr.src_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2625 		.hdr.ether_type = RTE_BE16(0xffff),
2626 		.has_vlan = ext_vlan_sup ? 1 : 0,
2627 	};
2628 	int ret;
2629 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2630 	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
2631 				       MLX5_FLOW_LAYER_OUTER_L2;
2632 
2633 	if (item_flags & ethm)
2634 		return rte_flow_error_set(error, ENOTSUP,
2635 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2636 					  "multiple L2 layers not supported");
2637 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
2638 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
2639 		return rte_flow_error_set(error, EINVAL,
2640 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2641 					  "L2 layer should not follow "
2642 					  "L3 layers");
2643 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
2644 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
2645 		return rte_flow_error_set(error, EINVAL,
2646 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2647 					  "L2 layer should not follow VLAN");
2648 	if (item_flags & MLX5_FLOW_LAYER_GTP)
2649 		return rte_flow_error_set(error, EINVAL,
2650 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2651 					  "L2 layer should not follow GTP");
2652 	if (!mask)
2653 		mask = &rte_flow_item_eth_mask;
2654 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2655 					(const uint8_t *)&nic_mask,
2656 					sizeof(struct rte_flow_item_eth),
2657 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2658 	return ret;
2659 }
2660 
2661 /**
2662  * Validate VLAN item.
2663  *
2664  * @param[in] item
2665  *   Item specification.
2666  * @param[in] item_flags
2667  *   Bit-fields that holds the items detected until now.
2668  * @param[in] dev
2669  *   Ethernet device flow is being created on.
2670  * @param[out] error
2671  *   Pointer to error structure.
2672  *
2673  * @return
2674  *   0 on success, a negative errno value otherwise and rte_errno is set.
2675  */
2676 int
2677 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
2678 			     uint64_t item_flags,
2679 			     struct rte_eth_dev *dev,
2680 			     struct rte_flow_error *error)
2681 {
2682 	const struct rte_flow_item_vlan *spec = item->spec;
2683 	const struct rte_flow_item_vlan *mask = item->mask;
2684 	const struct rte_flow_item_vlan nic_mask = {
2685 		.hdr.vlan_tci = RTE_BE16(UINT16_MAX),
2686 		.hdr.eth_proto = RTE_BE16(UINT16_MAX),
2687 	};
2688 	uint16_t vlan_tag = 0;
2689 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2690 	int ret;
2691 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2692 					MLX5_FLOW_LAYER_INNER_L4) :
2693 				       (MLX5_FLOW_LAYER_OUTER_L3 |
2694 					MLX5_FLOW_LAYER_OUTER_L4);
2695 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2696 					MLX5_FLOW_LAYER_OUTER_VLAN;
2697 
2698 	if (item_flags & vlanm)
2699 		return rte_flow_error_set(error, EINVAL,
2700 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2701 					  "multiple VLAN layers not supported");
2702 	else if ((item_flags & l34m) != 0)
2703 		return rte_flow_error_set(error, EINVAL,
2704 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2705 					  "VLAN cannot follow L3/L4 layer");
2706 	if (!mask)
2707 		mask = &rte_flow_item_vlan_mask;
2708 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2709 					(const uint8_t *)&nic_mask,
2710 					sizeof(struct rte_flow_item_vlan),
2711 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2712 	if (ret)
2713 		return ret;
2714 	if (!tunnel && mask->hdr.vlan_tci != RTE_BE16(0x0fff)) {
2715 		struct mlx5_priv *priv = dev->data->dev_private;
2716 
2717 		if (priv->vmwa_context) {
2718 			/*
2719 			 * Non-NULL context means we have a virtual machine
2720 			 * and SR-IOV enabled, we have to create VLAN interface
2721 			 * to make hypervisor to setup E-Switch vport
2722 			 * context correctly. We avoid creating the multiple
2723 			 * VLAN interfaces, so we cannot support VLAN tag mask.
2724 			 */
2725 			return rte_flow_error_set(error, EINVAL,
2726 						  RTE_FLOW_ERROR_TYPE_ITEM,
2727 						  item,
2728 						  "VLAN tag mask is not"
2729 						  " supported in virtual"
2730 						  " environment");
2731 		}
2732 	}
2733 	if (spec) {
2734 		vlan_tag = spec->hdr.vlan_tci;
2735 		vlan_tag &= mask->hdr.vlan_tci;
2736 	}
2737 	/*
2738 	 * From verbs perspective an empty VLAN is equivalent
2739 	 * to a packet without VLAN layer.
2740 	 */
2741 	if (!vlan_tag)
2742 		return rte_flow_error_set(error, EINVAL,
2743 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2744 					  item->spec,
2745 					  "VLAN cannot be empty");
2746 	return 0;
2747 }
2748 
2749 /**
2750  * Validate IPV4 item.
2751  *
2752  * @param[in] item
2753  *   Item specification.
2754  * @param[in] item_flags
2755  *   Bit-fields that holds the items detected until now.
2756  * @param[in] last_item
2757  *   Previous validated item in the pattern items.
2758  * @param[in] ether_type
2759  *   Type in the ethernet layer header (including dot1q).
2760  * @param[in] acc_mask
2761  *   Acceptable mask, if NULL default internal default mask
2762  *   will be used to check whether item fields are supported.
2763  * @param[in] range_accepted
2764  *   True if range of values is accepted for specific fields, false otherwise.
2765  * @param[out] error
2766  *   Pointer to error structure.
2767  *
2768  * @return
2769  *   0 on success, a negative errno value otherwise and rte_errno is set.
2770  */
2771 int
2772 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2773 			     uint64_t item_flags,
2774 			     uint64_t last_item,
2775 			     uint16_t ether_type,
2776 			     const struct rte_flow_item_ipv4 *acc_mask,
2777 			     bool range_accepted,
2778 			     struct rte_flow_error *error)
2779 {
2780 	const struct rte_flow_item_ipv4 *mask = item->mask;
2781 	const struct rte_flow_item_ipv4 *spec = item->spec;
2782 	const struct rte_flow_item_ipv4 nic_mask = {
2783 		.hdr = {
2784 			.src_addr = RTE_BE32(0xffffffff),
2785 			.dst_addr = RTE_BE32(0xffffffff),
2786 			.type_of_service = 0xff,
2787 			.next_proto_id = 0xff,
2788 		},
2789 	};
2790 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2791 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2792 				      MLX5_FLOW_LAYER_OUTER_L3;
2793 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2794 				      MLX5_FLOW_LAYER_OUTER_L4;
2795 	int ret;
2796 	uint8_t next_proto = 0xFF;
2797 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2798 				  MLX5_FLOW_LAYER_OUTER_VLAN |
2799 				  MLX5_FLOW_LAYER_INNER_VLAN);
2800 
2801 	if ((last_item & l2_vlan) && ether_type &&
2802 	    ether_type != RTE_ETHER_TYPE_IPV4)
2803 		return rte_flow_error_set(error, EINVAL,
2804 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2805 					  "IPv4 cannot follow L2/VLAN layer "
2806 					  "which ether type is not IPv4");
2807 	if (item_flags & MLX5_FLOW_LAYER_IPIP) {
2808 		if (mask && spec)
2809 			next_proto = mask->hdr.next_proto_id &
2810 				     spec->hdr.next_proto_id;
2811 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2812 			return rte_flow_error_set(error, EINVAL,
2813 						  RTE_FLOW_ERROR_TYPE_ITEM,
2814 						  item,
2815 						  "multiple tunnel "
2816 						  "not supported");
2817 	}
2818 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
2819 		return rte_flow_error_set(error, EINVAL,
2820 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2821 					  "wrong tunnel type - IPv6 specified "
2822 					  "but IPv4 item provided");
2823 	if (item_flags & l3m)
2824 		return rte_flow_error_set(error, ENOTSUP,
2825 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2826 					  "multiple L3 layers not supported");
2827 	else if (item_flags & l4m)
2828 		return rte_flow_error_set(error, EINVAL,
2829 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2830 					  "L3 cannot follow an L4 layer.");
2831 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2832 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2833 		return rte_flow_error_set(error, EINVAL,
2834 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2835 					  "L3 cannot follow an NVGRE layer.");
2836 	if (!mask)
2837 		mask = &rte_flow_item_ipv4_mask;
2838 	else if (mask->hdr.next_proto_id != 0 &&
2839 		 mask->hdr.next_proto_id != 0xff)
2840 		return rte_flow_error_set(error, EINVAL,
2841 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2842 					  "partial mask is not supported"
2843 					  " for protocol");
2844 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2845 					acc_mask ? (const uint8_t *)acc_mask
2846 						 : (const uint8_t *)&nic_mask,
2847 					sizeof(struct rte_flow_item_ipv4),
2848 					range_accepted, error);
2849 	if (ret < 0)
2850 		return ret;
2851 	return 0;
2852 }
2853 
2854 /**
2855  * Validate IPV6 item.
2856  *
2857  * @param[in] item
2858  *   Item specification.
2859  * @param[in] item_flags
2860  *   Bit-fields that holds the items detected until now.
2861  * @param[in] last_item
2862  *   Previous validated item in the pattern items.
2863  * @param[in] ether_type
2864  *   Type in the ethernet layer header (including dot1q).
2865  * @param[in] acc_mask
2866  *   Acceptable mask, if NULL default internal default mask
2867  *   will be used to check whether item fields are supported.
2868  * @param[out] error
2869  *   Pointer to error structure.
2870  *
2871  * @return
2872  *   0 on success, a negative errno value otherwise and rte_errno is set.
2873  */
2874 int
2875 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2876 			     uint64_t item_flags,
2877 			     uint64_t last_item,
2878 			     uint16_t ether_type,
2879 			     const struct rte_flow_item_ipv6 *acc_mask,
2880 			     struct rte_flow_error *error)
2881 {
2882 	const struct rte_flow_item_ipv6 *mask = item->mask;
2883 	const struct rte_flow_item_ipv6 *spec = item->spec;
2884 	const struct rte_flow_item_ipv6 nic_mask = {
2885 		.hdr = {
2886 			.src_addr =
2887 				"\xff\xff\xff\xff\xff\xff\xff\xff"
2888 				"\xff\xff\xff\xff\xff\xff\xff\xff",
2889 			.dst_addr =
2890 				"\xff\xff\xff\xff\xff\xff\xff\xff"
2891 				"\xff\xff\xff\xff\xff\xff\xff\xff",
2892 			.vtc_flow = RTE_BE32(0xffffffff),
2893 			.proto = 0xff,
2894 		},
2895 	};
2896 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2897 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2898 				      MLX5_FLOW_LAYER_OUTER_L3;
2899 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2900 				      MLX5_FLOW_LAYER_OUTER_L4;
2901 	int ret;
2902 	uint8_t next_proto = 0xFF;
2903 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2904 				  MLX5_FLOW_LAYER_OUTER_VLAN |
2905 				  MLX5_FLOW_LAYER_INNER_VLAN);
2906 
2907 	if ((last_item & l2_vlan) && ether_type &&
2908 	    ether_type != RTE_ETHER_TYPE_IPV6)
2909 		return rte_flow_error_set(error, EINVAL,
2910 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2911 					  "IPv6 cannot follow L2/VLAN layer "
2912 					  "which ether type is not IPv6");
2913 	if (mask && mask->hdr.proto == UINT8_MAX && spec)
2914 		next_proto = spec->hdr.proto;
2915 	if (item_flags & MLX5_FLOW_LAYER_IPIP) {
2916 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2917 			return rte_flow_error_set(error, EINVAL,
2918 						  RTE_FLOW_ERROR_TYPE_ITEM,
2919 						  item,
2920 						  "multiple tunnel "
2921 						  "not supported");
2922 	}
2923 	if (next_proto == IPPROTO_HOPOPTS  ||
2924 	    next_proto == IPPROTO_ROUTING  ||
2925 	    next_proto == IPPROTO_FRAGMENT ||
2926 	    next_proto == IPPROTO_ESP	   ||
2927 	    next_proto == IPPROTO_AH	   ||
2928 	    next_proto == IPPROTO_DSTOPTS)
2929 		return rte_flow_error_set(error, EINVAL,
2930 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2931 					  "IPv6 proto (next header) should "
2932 					  "not be set as extension header");
2933 	if (item_flags & MLX5_FLOW_LAYER_IPIP)
2934 		return rte_flow_error_set(error, EINVAL,
2935 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2936 					  "wrong tunnel type - IPv4 specified "
2937 					  "but IPv6 item provided");
2938 	if (item_flags & l3m)
2939 		return rte_flow_error_set(error, ENOTSUP,
2940 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2941 					  "multiple L3 layers not supported");
2942 	else if (item_flags & l4m)
2943 		return rte_flow_error_set(error, EINVAL,
2944 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2945 					  "L3 cannot follow an L4 layer.");
2946 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2947 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2948 		return rte_flow_error_set(error, EINVAL,
2949 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2950 					  "L3 cannot follow an NVGRE layer.");
2951 	if (!mask)
2952 		mask = &rte_flow_item_ipv6_mask;
2953 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2954 					acc_mask ? (const uint8_t *)acc_mask
2955 						 : (const uint8_t *)&nic_mask,
2956 					sizeof(struct rte_flow_item_ipv6),
2957 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2958 	if (ret < 0)
2959 		return ret;
2960 	return 0;
2961 }
2962 
2963 /**
2964  * Validate UDP item.
2965  *
2966  * @param[in] item
2967  *   Item specification.
2968  * @param[in] item_flags
2969  *   Bit-fields that holds the items detected until now.
2970  * @param[in] target_protocol
2971  *   The next protocol in the previous item.
2972  * @param[in] flow_mask
2973  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2974  * @param[out] error
2975  *   Pointer to error structure.
2976  *
2977  * @return
2978  *   0 on success, a negative errno value otherwise and rte_errno is set.
2979  */
2980 int
2981 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2982 			    uint64_t item_flags,
2983 			    uint8_t target_protocol,
2984 			    struct rte_flow_error *error)
2985 {
2986 	const struct rte_flow_item_udp *mask = item->mask;
2987 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2988 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2989 				      MLX5_FLOW_LAYER_OUTER_L3;
2990 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2991 				      MLX5_FLOW_LAYER_OUTER_L4;
2992 	int ret;
2993 
2994 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2995 		return rte_flow_error_set(error, EINVAL,
2996 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2997 					  "protocol filtering not compatible"
2998 					  " with UDP layer");
2999 	if (!(item_flags & l3m))
3000 		return rte_flow_error_set(error, EINVAL,
3001 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3002 					  "L3 is mandatory to filter on L4");
3003 	if (item_flags & l4m)
3004 		return rte_flow_error_set(error, EINVAL,
3005 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3006 					  "multiple L4 layers not supported");
3007 	if (!mask)
3008 		mask = &rte_flow_item_udp_mask;
3009 	ret = mlx5_flow_item_acceptable
3010 		(item, (const uint8_t *)mask,
3011 		 (const uint8_t *)&rte_flow_item_udp_mask,
3012 		 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
3013 		 error);
3014 	if (ret < 0)
3015 		return ret;
3016 	return 0;
3017 }
3018 
3019 /**
3020  * Validate TCP item.
3021  *
3022  * @param[in] item
3023  *   Item specification.
3024  * @param[in] item_flags
3025  *   Bit-fields that holds the items detected until now.
3026  * @param[in] target_protocol
3027  *   The next protocol in the previous item.
3028  * @param[out] error
3029  *   Pointer to error structure.
3030  *
3031  * @return
3032  *   0 on success, a negative errno value otherwise and rte_errno is set.
3033  */
3034 int
3035 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
3036 			    uint64_t item_flags,
3037 			    uint8_t target_protocol,
3038 			    const struct rte_flow_item_tcp *flow_mask,
3039 			    struct rte_flow_error *error)
3040 {
3041 	const struct rte_flow_item_tcp *mask = item->mask;
3042 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3043 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
3044 				      MLX5_FLOW_LAYER_OUTER_L3;
3045 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
3046 				      MLX5_FLOW_LAYER_OUTER_L4;
3047 	int ret;
3048 
3049 	MLX5_ASSERT(flow_mask);
3050 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
3051 		return rte_flow_error_set(error, EINVAL,
3052 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3053 					  "protocol filtering not compatible"
3054 					  " with TCP layer");
3055 	if (!(item_flags & l3m))
3056 		return rte_flow_error_set(error, EINVAL,
3057 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3058 					  "L3 is mandatory to filter on L4");
3059 	if (item_flags & l4m)
3060 		return rte_flow_error_set(error, EINVAL,
3061 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3062 					  "multiple L4 layers not supported");
3063 	if (!mask)
3064 		mask = &rte_flow_item_tcp_mask;
3065 	ret = mlx5_flow_item_acceptable
3066 		(item, (const uint8_t *)mask,
3067 		 (const uint8_t *)flow_mask,
3068 		 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
3069 		 error);
3070 	if (ret < 0)
3071 		return ret;
3072 	return 0;
3073 }
3074 
3075 /**
3076  * Validate VXLAN item.
3077  *
3078  * @param[in] dev
3079  *   Pointer to the Ethernet device structure.
3080  * @param[in] udp_dport
3081  *   UDP destination port
3082  * @param[in] item
3083  *   Item specification.
3084  * @param[in] item_flags
3085  *   Bit-fields that holds the items detected until now.
3086  * @param root
3087  *   Whether action is on root table.
3088  * @param[out] error
3089  *   Pointer to error structure.
3090  *
3091  * @return
3092  *   0 on success, a negative errno value otherwise and rte_errno is set.
3093  */
3094 int
3095 mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
3096 			      uint16_t udp_dport,
3097 			      const struct rte_flow_item *item,
3098 			      uint64_t item_flags,
3099 			      bool root,
3100 			      struct rte_flow_error *error)
3101 {
3102 	const struct rte_flow_item_vxlan *spec = item->spec;
3103 	const struct rte_flow_item_vxlan *mask = item->mask;
3104 	int ret;
3105 	struct mlx5_priv *priv = dev->data->dev_private;
3106 	union vni {
3107 		uint32_t vlan_id;
3108 		uint8_t vni[4];
3109 	} id = { .vlan_id = 0, };
3110 	const struct rte_flow_item_vxlan nic_mask = {
3111 		.hdr.vni = "\xff\xff\xff",
3112 		.hdr.rsvd1 = 0xff,
3113 	};
3114 	const struct rte_flow_item_vxlan *valid_mask;
3115 
3116 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3117 		return rte_flow_error_set(error, ENOTSUP,
3118 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3119 					  "multiple tunnel layers not"
3120 					  " supported");
3121 	valid_mask = &rte_flow_item_vxlan_mask;
3122 	/*
3123 	 * Verify only UDPv4 is present as defined in
3124 	 * https://tools.ietf.org/html/rfc7348
3125 	 */
3126 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
3127 		return rte_flow_error_set(error, EINVAL,
3128 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3129 					  "no outer UDP layer found");
3130 	if (!mask)
3131 		mask = &rte_flow_item_vxlan_mask;
3132 
3133 	if (priv->sh->steering_format_version !=
3134 	    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
3135 	    !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
3136 		/* non-root table */
3137 		if (!root && priv->sh->misc5_cap)
3138 			valid_mask = &nic_mask;
3139 		/* Group zero in NIC domain */
3140 		if (!root && priv->sh->tunnel_header_0_1)
3141 			valid_mask = &nic_mask;
3142 	}
3143 	ret = mlx5_flow_item_acceptable
3144 		(item, (const uint8_t *)mask,
3145 		 (const uint8_t *)valid_mask,
3146 		 sizeof(struct rte_flow_item_vxlan),
3147 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3148 	if (ret < 0)
3149 		return ret;
3150 	if (spec) {
3151 		memcpy(&id.vni[1], spec->hdr.vni, 3);
3152 		memcpy(&id.vni[1], mask->hdr.vni, 3);
3153 	}
3154 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
3155 		return rte_flow_error_set(error, ENOTSUP,
3156 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3157 					  "VXLAN tunnel must be fully defined");
3158 	return 0;
3159 }
3160 
3161 /**
3162  * Validate VXLAN_GPE item.
3163  *
3164  * @param[in] item
3165  *   Item specification.
3166  * @param[in] item_flags
3167  *   Bit-fields that holds the items detected until now.
3168  * @param[in] priv
3169  *   Pointer to the private data structure.
3170  * @param[in] target_protocol
3171  *   The next protocol in the previous item.
3172  * @param[out] error
3173  *   Pointer to error structure.
3174  *
3175  * @return
3176  *   0 on success, a negative errno value otherwise and rte_errno is set.
3177  */
3178 int
3179 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
3180 				  uint64_t item_flags,
3181 				  struct rte_eth_dev *dev,
3182 				  struct rte_flow_error *error)
3183 {
3184 	struct mlx5_priv *priv = dev->data->dev_private;
3185 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
3186 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
3187 	int ret;
3188 	union vni {
3189 		uint32_t vlan_id;
3190 		uint8_t vni[4];
3191 	} id = { .vlan_id = 0, };
3192 
3193 	struct rte_flow_item_vxlan_gpe nic_mask = {
3194 		.vni = "\xff\xff\xff",
3195 		.protocol = 0xff,
3196 		.flags = 0xff,
3197 	};
3198 
3199 	if (!priv->sh->config.l3_vxlan_en)
3200 		return rte_flow_error_set(error, ENOTSUP,
3201 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3202 					  "L3 VXLAN is not enabled by device"
3203 					  " parameter and/or not configured in"
3204 					  " firmware");
3205 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3206 		return rte_flow_error_set(error, ENOTSUP,
3207 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3208 					  "multiple tunnel layers not"
3209 					  " supported");
3210 	/*
3211 	 * Verify only UDPv4 is present as defined in
3212 	 * https://tools.ietf.org/html/rfc7348
3213 	 */
3214 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
3215 		return rte_flow_error_set(error, EINVAL,
3216 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3217 					  "no outer UDP layer found");
3218 	if (!mask)
3219 		mask = &rte_flow_item_vxlan_gpe_mask;
3220 	if (priv->sh->misc5_cap && priv->sh->tunnel_header_0_1) {
3221 		nic_mask.rsvd0[0] = 0xff;
3222 		nic_mask.rsvd0[1] = 0xff;
3223 		nic_mask.rsvd1 = 0xff;
3224 	}
3225 	ret = mlx5_flow_item_acceptable
3226 		(item, (const uint8_t *)mask,
3227 		 (const uint8_t *)&nic_mask,
3228 		 sizeof(struct rte_flow_item_vxlan_gpe),
3229 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3230 	if (ret < 0)
3231 		return ret;
3232 	if (spec) {
3233 		memcpy(&id.vni[1], spec->hdr.vni, 3);
3234 		memcpy(&id.vni[1], mask->hdr.vni, 3);
3235 	}
3236 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
3237 		return rte_flow_error_set(error, ENOTSUP,
3238 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3239 					  "VXLAN-GPE tunnel must be fully"
3240 					  " defined");
3241 	return 0;
3242 }
3243 /**
3244  * Validate GRE Key item.
3245  *
3246  * @param[in] item
3247  *   Item specification.
3248  * @param[in] item_flags
3249  *   Bit flags to mark detected items.
3250  * @param[in] gre_item
3251  *   Pointer to gre_item
3252  * @param[out] error
3253  *   Pointer to error structure.
3254  *
3255  * @return
3256  *   0 on success, a negative errno value otherwise and rte_errno is set.
3257  */
3258 int
3259 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
3260 				uint64_t item_flags,
3261 				const struct rte_flow_item *gre_item,
3262 				struct rte_flow_error *error)
3263 {
3264 	const rte_be32_t *mask = item->mask;
3265 	int ret = 0;
3266 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
3267 	const struct rte_flow_item_gre *gre_spec;
3268 	const struct rte_flow_item_gre *gre_mask;
3269 
3270 	if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
3271 		return rte_flow_error_set(error, ENOTSUP,
3272 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3273 					  "Multiple GRE key not support");
3274 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
3275 		return rte_flow_error_set(error, ENOTSUP,
3276 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3277 					  "No preceding GRE header");
3278 	if (item_flags & MLX5_FLOW_LAYER_INNER)
3279 		return rte_flow_error_set(error, ENOTSUP,
3280 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3281 					  "GRE key following a wrong item");
3282 	gre_mask = gre_item->mask;
3283 	if (!gre_mask)
3284 		gre_mask = &rte_flow_item_gre_mask;
3285 	gre_spec = gre_item->spec;
3286 	if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
3287 			 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
3288 		return rte_flow_error_set(error, EINVAL,
3289 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3290 					  "Key bit must be on");
3291 
3292 	if (!mask)
3293 		mask = &gre_key_default_mask;
3294 	ret = mlx5_flow_item_acceptable
3295 		(item, (const uint8_t *)mask,
3296 		 (const uint8_t *)&gre_key_default_mask,
3297 		 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3298 	return ret;
3299 }
3300 
3301 /**
3302  * Validate GRE optional item.
3303  *
3304  * @param[in] dev
3305  *   Pointer to the Ethernet device structure.
3306  * @param[in] item
3307  *   Item specification.
3308  * @param[in] item_flags
3309  *   Bit flags to mark detected items.
3310  * @param[in] attr
3311  *   Flow rule attributes.
3312  * @param[in] gre_item
3313  *   Pointer to gre_item
3314  * @param[out] error
3315  *   Pointer to error structure.
3316  *
3317  * @return
3318  *   0 on success, a negative errno value otherwise and rte_errno is set.
3319  */
3320 int
3321 mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
3322 				   const struct rte_flow_item *item,
3323 				   uint64_t item_flags,
3324 				   const struct rte_flow_attr *attr,
3325 				   const struct rte_flow_item *gre_item,
3326 				   struct rte_flow_error *error)
3327 {
3328 	const struct rte_flow_item_gre *gre_spec = gre_item->spec;
3329 	const struct rte_flow_item_gre *gre_mask = gre_item->mask;
3330 	const struct rte_flow_item_gre_opt *spec = item->spec;
3331 	const struct rte_flow_item_gre_opt *mask = item->mask;
3332 	struct mlx5_priv *priv = dev->data->dev_private;
3333 	int ret = 0;
3334 	struct rte_flow_item_gre_opt nic_mask = {
3335 		.checksum_rsvd = {
3336 			.checksum = RTE_BE16(UINT16_MAX),
3337 			.reserved1 = 0x0,
3338 		},
3339 		.key = {
3340 			.key = RTE_BE32(UINT32_MAX),
3341 		},
3342 		.sequence = {
3343 			.sequence = RTE_BE32(UINT32_MAX),
3344 		},
3345 	};
3346 
3347 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
3348 		return rte_flow_error_set(error, ENOTSUP,
3349 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3350 					  "No preceding GRE header");
3351 	if (item_flags & MLX5_FLOW_LAYER_INNER)
3352 		return rte_flow_error_set(error, ENOTSUP,
3353 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3354 					  "GRE option following a wrong item");
3355 	if (!spec || !mask)
3356 		return rte_flow_error_set(error, EINVAL,
3357 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3358 					  "At least one field gre_option(checksum/key/sequence) must be specified");
3359 	if (!gre_mask)
3360 		gre_mask = &rte_flow_item_gre_mask;
3361 	if (mask->checksum_rsvd.checksum)
3362 		if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x8000)) &&
3363 				 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x8000)))
3364 			return rte_flow_error_set(error, EINVAL,
3365 						  RTE_FLOW_ERROR_TYPE_ITEM,
3366 						  item,
3367 						  "Checksum bit must be on");
3368 	if (mask->key.key)
3369 		if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
3370 				 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
3371 			return rte_flow_error_set(error, EINVAL,
3372 						  RTE_FLOW_ERROR_TYPE_ITEM,
3373 						  item, "Key bit must be on");
3374 	if (mask->sequence.sequence)
3375 		if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x1000)) &&
3376 				 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x1000)))
3377 			return rte_flow_error_set(error, EINVAL,
3378 						  RTE_FLOW_ERROR_TYPE_ITEM,
3379 						  item,
3380 						  "Sequence bit must be on");
3381 	if (mask->checksum_rsvd.checksum || mask->sequence.sequence) {
3382 		if (priv->sh->steering_format_version ==
3383 		    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
3384 		    ((attr->group || (attr->transfer && priv->fdb_def_rule)) &&
3385 		     !priv->sh->misc5_cap) ||
3386 		    (!(priv->sh->tunnel_header_0_1 &&
3387 		       priv->sh->tunnel_header_2_3) &&
3388 		    !attr->group && (!attr->transfer || !priv->fdb_def_rule)))
3389 			return rte_flow_error_set(error, EINVAL,
3390 						  RTE_FLOW_ERROR_TYPE_ITEM,
3391 						  item,
3392 						  "Checksum/Sequence not supported");
3393 	}
3394 	ret = mlx5_flow_item_acceptable
3395 		(item, (const uint8_t *)mask,
3396 		 (const uint8_t *)&nic_mask,
3397 		 sizeof(struct rte_flow_item_gre_opt),
3398 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3399 	return ret;
3400 }
3401 
3402 /**
3403  * Validate GRE item.
3404  *
3405  * @param[in] item
3406  *   Item specification.
3407  * @param[in] item_flags
3408  *   Bit flags to mark detected items.
3409  * @param[in] target_protocol
3410  *   The next protocol in the previous item.
3411  * @param[out] error
3412  *   Pointer to error structure.
3413  *
3414  * @return
3415  *   0 on success, a negative errno value otherwise and rte_errno is set.
3416  */
3417 int
3418 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
3419 			    uint64_t item_flags,
3420 			    uint8_t target_protocol,
3421 			    struct rte_flow_error *error)
3422 {
3423 	const struct rte_flow_item_gre *spec __rte_unused = item->spec;
3424 	const struct rte_flow_item_gre *mask = item->mask;
3425 	int ret;
3426 	const struct rte_flow_item_gre nic_mask = {
3427 		.c_rsvd0_ver = RTE_BE16(0xB000),
3428 		.protocol = RTE_BE16(UINT16_MAX),
3429 	};
3430 
3431 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
3432 		return rte_flow_error_set(error, EINVAL,
3433 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3434 					  "protocol filtering not compatible"
3435 					  " with this GRE layer");
3436 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3437 		return rte_flow_error_set(error, ENOTSUP,
3438 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3439 					  "multiple tunnel layers not"
3440 					  " supported");
3441 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
3442 		return rte_flow_error_set(error, ENOTSUP,
3443 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3444 					  "L3 Layer is missing");
3445 	if (!mask)
3446 		mask = &rte_flow_item_gre_mask;
3447 	ret = mlx5_flow_item_acceptable
3448 		(item, (const uint8_t *)mask,
3449 		 (const uint8_t *)&nic_mask,
3450 		 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
3451 		 error);
3452 	if (ret < 0)
3453 		return ret;
3454 #ifndef HAVE_MLX5DV_DR
3455 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
3456 	if (spec && (spec->protocol & mask->protocol))
3457 		return rte_flow_error_set(error, ENOTSUP,
3458 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3459 					  "without MPLS support the"
3460 					  " specification cannot be used for"
3461 					  " filtering");
3462 #endif
3463 #endif
3464 	return 0;
3465 }
3466 
3467 /**
3468  * Validate Geneve item.
3469  *
3470  * @param[in] item
3471  *   Item specification.
3472  * @param[in] itemFlags
3473  *   Bit-fields that holds the items detected until now.
3474  * @param[in] enPriv
3475  *   Pointer to the private data structure.
3476  * @param[out] error
3477  *   Pointer to error structure.
3478  *
3479  * @return
3480  *   0 on success, a negative errno value otherwise and rte_errno is set.
3481  */
3482 
3483 int
3484 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
3485 			       uint64_t item_flags,
3486 			       struct rte_eth_dev *dev,
3487 			       struct rte_flow_error *error)
3488 {
3489 	struct mlx5_priv *priv = dev->data->dev_private;
3490 	const struct rte_flow_item_geneve *spec = item->spec;
3491 	const struct rte_flow_item_geneve *mask = item->mask;
3492 	int ret;
3493 	uint16_t gbhdr;
3494 	uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
3495 			  MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
3496 	const struct rte_flow_item_geneve nic_mask = {
3497 		.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
3498 		.vni = "\xff\xff\xff",
3499 		.protocol = RTE_BE16(UINT16_MAX),
3500 	};
3501 
3502 	if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
3503 		return rte_flow_error_set(error, ENOTSUP,
3504 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3505 					  "L3 Geneve is not enabled by device"
3506 					  " parameter and/or not configured in"
3507 					  " firmware");
3508 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3509 		return rte_flow_error_set(error, ENOTSUP,
3510 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3511 					  "multiple tunnel layers not"
3512 					  " supported");
3513 	/*
3514 	 * Verify only UDPv4 is present as defined in
3515 	 * https://tools.ietf.org/html/rfc7348
3516 	 */
3517 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
3518 		return rte_flow_error_set(error, EINVAL,
3519 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3520 					  "no outer UDP layer found");
3521 	if (!mask)
3522 		mask = &rte_flow_item_geneve_mask;
3523 	ret = mlx5_flow_item_acceptable
3524 				  (item, (const uint8_t *)mask,
3525 				   (const uint8_t *)&nic_mask,
3526 				   sizeof(struct rte_flow_item_geneve),
3527 				   MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3528 	if (ret)
3529 		return ret;
3530 	if (spec) {
3531 		gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
3532 		if (MLX5_GENEVE_VER_VAL(gbhdr) ||
3533 		     MLX5_GENEVE_CRITO_VAL(gbhdr) ||
3534 		     MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
3535 			return rte_flow_error_set(error, ENOTSUP,
3536 						  RTE_FLOW_ERROR_TYPE_ITEM,
3537 						  item,
3538 						  "Geneve protocol unsupported"
3539 						  " fields are being used");
3540 		if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
3541 			return rte_flow_error_set
3542 					(error, ENOTSUP,
3543 					 RTE_FLOW_ERROR_TYPE_ITEM,
3544 					 item,
3545 					 "Unsupported Geneve options length");
3546 	}
3547 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
3548 		return rte_flow_error_set
3549 				    (error, ENOTSUP,
3550 				     RTE_FLOW_ERROR_TYPE_ITEM, item,
3551 				     "Geneve tunnel must be fully defined");
3552 	return 0;
3553 }
3554 
3555 /**
3556  * Validate Geneve TLV option item.
3557  *
3558  * @param[in] item
3559  *   Item specification.
3560  * @param[in] last_item
3561  *   Previous validated item in the pattern items.
3562  * @param[in] geneve_item
3563  *   Previous GENEVE item specification.
3564  * @param[in] dev
3565  *   Pointer to the rte_eth_dev structure.
3566  * @param[out] error
3567  *   Pointer to error structure.
3568  *
3569  * @return
3570  *   0 on success, a negative errno value otherwise and rte_errno is set.
3571  */
3572 int
3573 mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
3574 				   uint64_t last_item,
3575 				   const struct rte_flow_item *geneve_item,
3576 				   struct rte_eth_dev *dev,
3577 				   struct rte_flow_error *error)
3578 {
3579 	struct mlx5_priv *priv = dev->data->dev_private;
3580 	struct mlx5_dev_ctx_shared *sh = priv->sh;
3581 	struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
3582 	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
3583 	uint8_t data_max_supported =
3584 			hca_attr->max_geneve_tlv_option_data_len * 4;
3585 	const struct rte_flow_item_geneve *geneve_spec;
3586 	const struct rte_flow_item_geneve *geneve_mask;
3587 	const struct rte_flow_item_geneve_opt *spec = item->spec;
3588 	const struct rte_flow_item_geneve_opt *mask = item->mask;
3589 	unsigned int i;
3590 	unsigned int data_len;
3591 	uint8_t tlv_option_len;
3592 	uint16_t optlen_m, optlen_v;
3593 	const struct rte_flow_item_geneve_opt full_mask = {
3594 		.option_class = RTE_BE16(0xffff),
3595 		.option_type = 0xff,
3596 		.option_len = 0x1f,
3597 	};
3598 
3599 	if (!mask)
3600 		mask = &rte_flow_item_geneve_opt_mask;
3601 	if (!spec)
3602 		return rte_flow_error_set
3603 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3604 			"Geneve TLV opt class/type/length must be specified");
3605 	if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
3606 		return rte_flow_error_set
3607 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3608 			"Geneve TLV opt length exceeds the limit (31)");
3609 	/* Check if class type and length masks are full. */
3610 	if (full_mask.option_class != mask->option_class ||
3611 	    full_mask.option_type != mask->option_type ||
3612 	    full_mask.option_len != (mask->option_len & full_mask.option_len))
3613 		return rte_flow_error_set
3614 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3615 			"Geneve TLV opt class/type/length masks must be full");
3616 	/* Check if length is supported */
3617 	if ((uint32_t)spec->option_len >
3618 			hca_attr->max_geneve_tlv_option_data_len)
3619 		return rte_flow_error_set
3620 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3621 			"Geneve TLV opt length not supported");
3622 	if (hca_attr->max_geneve_tlv_options > 1)
3623 		DRV_LOG(DEBUG,
3624 			"max_geneve_tlv_options supports more than 1 option");
3625 	/* Check GENEVE item preceding. */
3626 	if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE))
3627 		return rte_flow_error_set
3628 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3629 			"Geneve opt item must be preceded with Geneve item");
3630 	geneve_spec = geneve_item->spec;
3631 	geneve_mask = geneve_item->mask ? geneve_item->mask :
3632 					  &rte_flow_item_geneve_mask;
3633 	/* Check if GENEVE TLV option size doesn't exceed option length */
3634 	if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 ||
3635 			    geneve_spec->ver_opt_len_o_c_rsvd0)) {
3636 		tlv_option_len = spec->option_len & mask->option_len;
3637 		optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0);
3638 		optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v);
3639 		optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0);
3640 		optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m);
3641 		if ((optlen_v & optlen_m) <= tlv_option_len)
3642 			return rte_flow_error_set
3643 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3644 				 "GENEVE TLV option length exceeds optlen");
3645 	}
3646 	/* Check if length is 0 or data is 0. */
3647 	if (spec->data == NULL || spec->option_len == 0)
3648 		return rte_flow_error_set
3649 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3650 			"Geneve TLV opt with zero data/length not supported");
3651 	/* Check not all data & mask are 0. */
3652 	data_len = spec->option_len * 4;
3653 	if (mask->data == NULL) {
3654 		for (i = 0; i < data_len; i++)
3655 			if (spec->data[i])
3656 				break;
3657 		if (i == data_len)
3658 			return rte_flow_error_set(error, ENOTSUP,
3659 				RTE_FLOW_ERROR_TYPE_ITEM, item,
3660 				"Can't match on Geneve option data 0");
3661 	} else {
3662 		for (i = 0; i < data_len; i++)
3663 			if (spec->data[i] & mask->data[i])
3664 				break;
3665 		if (i == data_len)
3666 			return rte_flow_error_set(error, ENOTSUP,
3667 				RTE_FLOW_ERROR_TYPE_ITEM, item,
3668 				"Can't match on Geneve option data and mask 0");
3669 		/* Check data mask supported. */
3670 		for (i = data_max_supported; i < data_len ; i++)
3671 			if (mask->data[i])
3672 				return rte_flow_error_set(error, ENOTSUP,
3673 					RTE_FLOW_ERROR_TYPE_ITEM, item,
3674 					"Data mask is of unsupported size");
3675 	}
3676 	/* Check GENEVE option is supported in NIC. */
3677 	if (!hca_attr->geneve_tlv_opt)
3678 		return rte_flow_error_set
3679 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
3680 			"Geneve TLV opt not supported");
3681 	/* Check if we already have geneve option with different type/class. */
3682 	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
3683 	geneve_opt_resource = sh->geneve_tlv_option_resource;
3684 	if (geneve_opt_resource != NULL)
3685 		if (geneve_opt_resource->option_class != spec->option_class ||
3686 		    geneve_opt_resource->option_type != spec->option_type ||
3687 		    geneve_opt_resource->length != spec->option_len) {
3688 			rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
3689 			return rte_flow_error_set(error, ENOTSUP,
3690 				RTE_FLOW_ERROR_TYPE_ITEM, item,
3691 				"Only one Geneve TLV option supported");
3692 		}
3693 	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
3694 	return 0;
3695 }
3696 
3697 /**
3698  * Validate MPLS item.
3699  *
3700  * @param[in] dev
3701  *   Pointer to the rte_eth_dev structure.
3702  * @param[in] item
3703  *   Item specification.
3704  * @param[in] item_flags
3705  *   Bit-fields that holds the items detected until now.
3706  * @param[in] prev_layer
3707  *   The protocol layer indicated in previous item.
3708  * @param[out] error
3709  *   Pointer to error structure.
3710  *
3711  * @return
3712  *   0 on success, a negative errno value otherwise and rte_errno is set.
3713  */
3714 int
3715 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
3716 			     const struct rte_flow_item *item __rte_unused,
3717 			     uint64_t item_flags __rte_unused,
3718 			     uint64_t prev_layer __rte_unused,
3719 			     struct rte_flow_error *error)
3720 {
3721 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
3722 	const struct rte_flow_item_mpls *mask = item->mask;
3723 	struct mlx5_priv *priv = dev->data->dev_private;
3724 	int ret;
3725 
3726 	if (!priv->sh->dev_cap.mpls_en)
3727 		return rte_flow_error_set(error, ENOTSUP,
3728 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3729 					  "MPLS not supported or"
3730 					  " disabled in firmware"
3731 					  " configuration.");
3732 	/* MPLS over UDP, GRE is allowed */
3733 	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP |
3734 			    MLX5_FLOW_LAYER_GRE |
3735 			    MLX5_FLOW_LAYER_GRE_KEY)))
3736 		return rte_flow_error_set(error, EINVAL,
3737 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3738 					  "protocol filtering not compatible"
3739 					  " with MPLS layer");
3740 	/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
3741 	if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
3742 	    !(item_flags & MLX5_FLOW_LAYER_GRE))
3743 		return rte_flow_error_set(error, ENOTSUP,
3744 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3745 					  "multiple tunnel layers not"
3746 					  " supported");
3747 	if (!mask)
3748 		mask = &rte_flow_item_mpls_mask;
3749 	ret = mlx5_flow_item_acceptable
3750 		(item, (const uint8_t *)mask,
3751 		 (const uint8_t *)&rte_flow_item_mpls_mask,
3752 		 sizeof(struct rte_flow_item_mpls),
3753 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3754 	if (ret < 0)
3755 		return ret;
3756 	return 0;
3757 #else
3758 	return rte_flow_error_set(error, ENOTSUP,
3759 				  RTE_FLOW_ERROR_TYPE_ITEM, item,
3760 				  "MPLS is not supported by Verbs, please"
3761 				  " update.");
3762 #endif
3763 }
3764 
3765 /**
3766  * Validate NVGRE item.
3767  *
3768  * @param[in] item
3769  *   Item specification.
3770  * @param[in] item_flags
3771  *   Bit flags to mark detected items.
3772  * @param[in] target_protocol
3773  *   The next protocol in the previous item.
3774  * @param[out] error
3775  *   Pointer to error structure.
3776  *
3777  * @return
3778  *   0 on success, a negative errno value otherwise and rte_errno is set.
3779  */
3780 int
3781 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
3782 			      uint64_t item_flags,
3783 			      uint8_t target_protocol,
3784 			      struct rte_flow_error *error)
3785 {
3786 	const struct rte_flow_item_nvgre *mask = item->mask;
3787 	int ret;
3788 
3789 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
3790 		return rte_flow_error_set(error, EINVAL,
3791 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3792 					  "protocol filtering not compatible"
3793 					  " with this GRE layer");
3794 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3795 		return rte_flow_error_set(error, ENOTSUP,
3796 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3797 					  "multiple tunnel layers not"
3798 					  " supported");
3799 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
3800 		return rte_flow_error_set(error, ENOTSUP,
3801 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3802 					  "L3 Layer is missing");
3803 	if (!mask)
3804 		mask = &rte_flow_item_nvgre_mask;
3805 	ret = mlx5_flow_item_acceptable
3806 		(item, (const uint8_t *)mask,
3807 		 (const uint8_t *)&rte_flow_item_nvgre_mask,
3808 		 sizeof(struct rte_flow_item_nvgre),
3809 		 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3810 	if (ret < 0)
3811 		return ret;
3812 	return 0;
3813 }
3814 
3815 /**
3816  * Validate eCPRI item.
3817  *
3818  * @param[in] item
3819  *   Item specification.
3820  * @param[in] item_flags
3821  *   Bit-fields that holds the items detected until now.
3822  * @param[in] last_item
3823  *   Previous validated item in the pattern items.
3824  * @param[in] ether_type
3825  *   Type in the ethernet layer header (including dot1q).
3826  * @param[in] acc_mask
3827  *   Acceptable mask, if NULL default internal default mask
3828  *   will be used to check whether item fields are supported.
3829  * @param[out] error
3830  *   Pointer to error structure.
3831  *
3832  * @return
3833  *   0 on success, a negative errno value otherwise and rte_errno is set.
3834  */
3835 int
3836 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
3837 			      uint64_t item_flags,
3838 			      uint64_t last_item,
3839 			      uint16_t ether_type,
3840 			      const struct rte_flow_item_ecpri *acc_mask,
3841 			      struct rte_flow_error *error)
3842 {
3843 	const struct rte_flow_item_ecpri *mask = item->mask;
3844 	const struct rte_flow_item_ecpri nic_mask = {
3845 		.hdr = {
3846 			.common = {
3847 				.u32 =
3848 				RTE_BE32(((const struct rte_ecpri_common_hdr) {
3849 					.type = 0xFF,
3850 					}).u32),
3851 			},
3852 			.dummy[0] = 0xFFFFFFFF,
3853 		},
3854 	};
3855 	const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
3856 					MLX5_FLOW_LAYER_OUTER_VLAN);
3857 	struct rte_flow_item_ecpri mask_lo;
3858 
3859 	if (!(last_item & outer_l2_vlan) &&
3860 	    last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
3861 		return rte_flow_error_set(error, EINVAL,
3862 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3863 					  "eCPRI can only follow L2/VLAN layer or UDP layer");
3864 	if ((last_item & outer_l2_vlan) && ether_type &&
3865 	    ether_type != RTE_ETHER_TYPE_ECPRI)
3866 		return rte_flow_error_set(error, EINVAL,
3867 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3868 					  "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
3869 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3870 		return rte_flow_error_set(error, EINVAL,
3871 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3872 					  "eCPRI with tunnel is not supported right now");
3873 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
3874 		return rte_flow_error_set(error, ENOTSUP,
3875 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3876 					  "multiple L3 layers not supported");
3877 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
3878 		return rte_flow_error_set(error, EINVAL,
3879 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3880 					  "eCPRI cannot coexist with a TCP layer");
3881 	/* In specification, eCPRI could be over UDP layer. */
3882 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
3883 		return rte_flow_error_set(error, EINVAL,
3884 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3885 					  "eCPRI over UDP layer is not yet supported right now");
3886 	/* Mask for type field in common header could be zero. */
3887 	if (!mask)
3888 		mask = &rte_flow_item_ecpri_mask;
3889 	mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
3890 	/* Input mask is in big-endian format. */
3891 	if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
3892 		return rte_flow_error_set(error, EINVAL,
3893 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
3894 					  "partial mask is not supported for protocol");
3895 	else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
3896 		return rte_flow_error_set(error, EINVAL,
3897 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
3898 					  "message header mask must be after a type mask");
3899 	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
3900 					 acc_mask ? (const uint8_t *)acc_mask
3901 						  : (const uint8_t *)&nic_mask,
3902 					 sizeof(struct rte_flow_item_ecpri),
3903 					 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3904 }
3905 
3906 /**
3907  * Validate the NSH item.
3908  *
3909  * @param[in] dev
3910  *   Pointer to Ethernet device on which flow rule is being created on.
3911  * @param[out] error
3912  *   Pointer to error structure.
3913  *
3914  * @return
3915  *   0 on success, a negative errno value otherwise and rte_errno is set.
3916  */
3917 int
3918 mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
3919 			    const struct rte_flow_item *item,
3920 			    struct rte_flow_error *error)
3921 {
3922 	struct mlx5_priv *priv = dev->data->dev_private;
3923 
3924 	if (item->mask) {
3925 		return rte_flow_error_set(error, ENOTSUP,
3926 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3927 					  "NSH fields matching is not supported");
3928 	}
3929 
3930 	if (!priv->sh->config.dv_flow_en) {
3931 		return rte_flow_error_set(error, ENOTSUP,
3932 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3933 					  NULL, "NSH support requires DV flow interface");
3934 	}
3935 
3936 	if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_vxlan_gpe_nsh) {
3937 		return rte_flow_error_set(error, ENOTSUP,
3938 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
3939 					  "Current FW does not support matching on NSH");
3940 	}
3941 
3942 	return 0;
3943 }
3944 
3945 static int
3946 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
3947 		   const struct rte_flow_attr *attr __rte_unused,
3948 		   const struct rte_flow_item items[] __rte_unused,
3949 		   const struct rte_flow_action actions[] __rte_unused,
3950 		   bool external __rte_unused,
3951 		   int hairpin __rte_unused,
3952 		   struct rte_flow_error *error)
3953 {
3954 	return rte_flow_error_set(error, ENOTSUP,
3955 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3956 }
3957 
3958 static struct mlx5_flow *
3959 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
3960 		  const struct rte_flow_attr *attr __rte_unused,
3961 		  const struct rte_flow_item items[] __rte_unused,
3962 		  const struct rte_flow_action actions[] __rte_unused,
3963 		  struct rte_flow_error *error)
3964 {
3965 	rte_flow_error_set(error, ENOTSUP,
3966 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3967 	return NULL;
3968 }
3969 
3970 static int
3971 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
3972 		    struct mlx5_flow *dev_flow __rte_unused,
3973 		    const struct rte_flow_attr *attr __rte_unused,
3974 		    const struct rte_flow_item items[] __rte_unused,
3975 		    const struct rte_flow_action actions[] __rte_unused,
3976 		    struct rte_flow_error *error)
3977 {
3978 	return rte_flow_error_set(error, ENOTSUP,
3979 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3980 }
3981 
3982 static int
3983 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
3984 		struct rte_flow *flow __rte_unused,
3985 		struct rte_flow_error *error)
3986 {
3987 	return rte_flow_error_set(error, ENOTSUP,
3988 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3989 }
3990 
3991 static void
3992 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
3993 		 struct rte_flow *flow __rte_unused)
3994 {
3995 }
3996 
3997 static void
3998 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
3999 		  struct rte_flow *flow __rte_unused)
4000 {
4001 }
4002 
4003 static int
4004 flow_null_query(struct rte_eth_dev *dev __rte_unused,
4005 		struct rte_flow *flow __rte_unused,
4006 		const struct rte_flow_action *actions __rte_unused,
4007 		void *data __rte_unused,
4008 		struct rte_flow_error *error)
4009 {
4010 	return rte_flow_error_set(error, ENOTSUP,
4011 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
4012 }
4013 
4014 static int
4015 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
4016 		      uint32_t domains __rte_unused,
4017 		      uint32_t flags __rte_unused)
4018 {
4019 	return 0;
4020 }
4021 
4022 int
4023 flow_null_get_aged_flows(struct rte_eth_dev *dev,
4024 		    void **context __rte_unused,
4025 		    uint32_t nb_contexts __rte_unused,
4026 		    struct rte_flow_error *error __rte_unused)
4027 {
4028 	DRV_LOG(ERR, "port %u get aged flows is not supported.",
4029 		dev->data->port_id);
4030 	return -ENOTSUP;
4031 }
4032 
4033 uint32_t
4034 flow_null_counter_allocate(struct rte_eth_dev *dev)
4035 {
4036 	DRV_LOG(ERR, "port %u counter allocate is not supported.",
4037 		dev->data->port_id);
4038 	return 0;
4039 }
4040 
4041 void
4042 flow_null_counter_free(struct rte_eth_dev *dev,
4043 			uint32_t counter __rte_unused)
4044 {
4045 	DRV_LOG(ERR, "port %u counter free is not supported.",
4046 		 dev->data->port_id);
4047 }
4048 
4049 int
4050 flow_null_counter_query(struct rte_eth_dev *dev,
4051 			uint32_t counter __rte_unused,
4052 			bool clear __rte_unused,
4053 			uint64_t *pkts __rte_unused,
4054 			uint64_t *bytes __rte_unused,
4055 			void **action __rte_unused)
4056 {
4057 	DRV_LOG(ERR, "port %u counter query is not supported.",
4058 		 dev->data->port_id);
4059 	return -ENOTSUP;
4060 }
4061 
4062 /* Void driver to protect from null pointer reference. */
4063 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
4064 	.validate = flow_null_validate,
4065 	.prepare = flow_null_prepare,
4066 	.translate = flow_null_translate,
4067 	.apply = flow_null_apply,
4068 	.remove = flow_null_remove,
4069 	.destroy = flow_null_destroy,
4070 	.query = flow_null_query,
4071 	.sync_domain = flow_null_sync_domain,
4072 	.get_aged_flows = flow_null_get_aged_flows,
4073 	.counter_alloc = flow_null_counter_allocate,
4074 	.counter_free = flow_null_counter_free,
4075 	.counter_query = flow_null_counter_query
4076 };
4077 
4078 /**
4079  * Select flow driver type according to flow attributes and device
4080  * configuration.
4081  *
4082  * @param[in] dev
4083  *   Pointer to the dev structure.
4084  * @param[in] attr
4085  *   Pointer to the flow attributes.
4086  *
4087  * @return
4088  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
4089  */
4090 static enum mlx5_flow_drv_type
4091 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
4092 {
4093 	struct mlx5_priv *priv = dev->data->dev_private;
4094 	/* The OS can determine first a specific flow type (DV, VERBS) */
4095 	enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
4096 
4097 	if (type != MLX5_FLOW_TYPE_MAX)
4098 		return type;
4099 	/*
4100 	 * Currently when dv_flow_en == 2, only HW steering engine is
4101 	 * supported. New engines can also be chosen here if ready.
4102 	 */
4103 	if (priv->sh->config.dv_flow_en == 2)
4104 		return MLX5_FLOW_TYPE_HW;
4105 	if (!attr)
4106 		return MLX5_FLOW_TYPE_MIN;
4107 	/* If no OS specific type - continue with DV/VERBS selection */
4108 	if (attr->transfer && priv->sh->config.dv_esw_en)
4109 		type = MLX5_FLOW_TYPE_DV;
4110 	if (!attr->transfer)
4111 		type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
4112 						     MLX5_FLOW_TYPE_VERBS;
4113 	return type;
4114 }
4115 
4116 #define flow_get_drv_ops(type) flow_drv_ops[type]
4117 
4118 /**
4119  * Flow driver validation API. This abstracts calling driver specific functions.
4120  * The type of flow driver is determined according to flow attributes.
4121  *
4122  * @param[in] dev
4123  *   Pointer to the dev structure.
4124  * @param[in] attr
4125  *   Pointer to the flow attributes.
4126  * @param[in] items
4127  *   Pointer to the list of items.
4128  * @param[in] actions
4129  *   Pointer to the list of actions.
4130  * @param[in] external
4131  *   This flow rule is created by request external to PMD.
4132  * @param[in] hairpin
4133  *   Number of hairpin TX actions, 0 means classic flow.
4134  * @param[out] error
4135  *   Pointer to the error structure.
4136  *
4137  * @return
4138  *   0 on success, a negative errno value otherwise and rte_errno is set.
4139  */
4140 static inline int
4141 flow_drv_validate(struct rte_eth_dev *dev,
4142 		  const struct rte_flow_attr *attr,
4143 		  const struct rte_flow_item items[],
4144 		  const struct rte_flow_action actions[],
4145 		  bool external, int hairpin, struct rte_flow_error *error)
4146 {
4147 	const struct mlx5_flow_driver_ops *fops;
4148 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
4149 
4150 	fops = flow_get_drv_ops(type);
4151 	return fops->validate(dev, attr, items, actions, external,
4152 			      hairpin, error);
4153 }
4154 
4155 /**
4156  * Flow driver preparation API. This abstracts calling driver specific
4157  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
4158  * calculates the size of memory required for device flow, allocates the memory,
4159  * initializes the device flow and returns the pointer.
4160  *
4161  * @note
4162  *   This function initializes device flow structure such as dv or verbs in
4163  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
4164  *   rest. For example, adding returning device flow to flow->dev_flow list and
4165  *   setting backward reference to the flow should be done out of this function.
4166  *   layers field is not filled either.
4167  *
4168  * @param[in] dev
4169  *   Pointer to the dev structure.
4170  * @param[in] attr
4171  *   Pointer to the flow attributes.
4172  * @param[in] items
4173  *   Pointer to the list of items.
4174  * @param[in] actions
4175  *   Pointer to the list of actions.
4176  * @param[in] flow_idx
4177  *   This memory pool index to the flow.
4178  * @param[out] error
4179  *   Pointer to the error structure.
4180  *
4181  * @return
4182  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
4183  */
4184 static inline struct mlx5_flow *
4185 flow_drv_prepare(struct rte_eth_dev *dev,
4186 		 const struct rte_flow *flow,
4187 		 const struct rte_flow_attr *attr,
4188 		 const struct rte_flow_item items[],
4189 		 const struct rte_flow_action actions[],
4190 		 uint32_t flow_idx,
4191 		 struct rte_flow_error *error)
4192 {
4193 	const struct mlx5_flow_driver_ops *fops;
4194 	enum mlx5_flow_drv_type type = flow->drv_type;
4195 	struct mlx5_flow *mlx5_flow = NULL;
4196 
4197 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
4198 	fops = flow_get_drv_ops(type);
4199 	mlx5_flow = fops->prepare(dev, attr, items, actions, error);
4200 	if (mlx5_flow)
4201 		mlx5_flow->flow_idx = flow_idx;
4202 	return mlx5_flow;
4203 }
4204 
4205 /**
4206  * Flow driver translation API. This abstracts calling driver specific
4207  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
4208  * translates a generic flow into a driver flow. flow_drv_prepare() must
4209  * precede.
4210  *
4211  * @note
4212  *   dev_flow->layers could be filled as a result of parsing during translation
4213  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
4214  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
4215  *   flow->actions could be overwritten even though all the expanded dev_flows
4216  *   have the same actions.
4217  *
4218  * @param[in] dev
4219  *   Pointer to the rte dev structure.
4220  * @param[in, out] dev_flow
4221  *   Pointer to the mlx5 flow.
4222  * @param[in] attr
4223  *   Pointer to the flow attributes.
4224  * @param[in] items
4225  *   Pointer to the list of items.
4226  * @param[in] actions
4227  *   Pointer to the list of actions.
4228  * @param[out] error
4229  *   Pointer to the error structure.
4230  *
4231  * @return
4232  *   0 on success, a negative errno value otherwise and rte_errno is set.
4233  */
4234 static inline int
4235 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
4236 		   const struct rte_flow_attr *attr,
4237 		   const struct rte_flow_item items[],
4238 		   const struct rte_flow_action actions[],
4239 		   struct rte_flow_error *error)
4240 {
4241 	const struct mlx5_flow_driver_ops *fops;
4242 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
4243 
4244 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
4245 	fops = flow_get_drv_ops(type);
4246 	return fops->translate(dev, dev_flow, attr, items, actions, error);
4247 }
4248 
4249 /**
4250  * Flow driver apply API. This abstracts calling driver specific functions.
4251  * Parent flow (rte_flow) should have driver type (drv_type). It applies
4252  * translated driver flows on to device. flow_drv_translate() must precede.
4253  *
4254  * @param[in] dev
4255  *   Pointer to Ethernet device structure.
4256  * @param[in, out] flow
4257  *   Pointer to flow structure.
4258  * @param[out] error
4259  *   Pointer to error structure.
4260  *
4261  * @return
4262  *   0 on success, a negative errno value otherwise and rte_errno is set.
4263  */
4264 static inline int
4265 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4266 	       struct rte_flow_error *error)
4267 {
4268 	const struct mlx5_flow_driver_ops *fops;
4269 	enum mlx5_flow_drv_type type = flow->drv_type;
4270 
4271 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
4272 	fops = flow_get_drv_ops(type);
4273 	return fops->apply(dev, flow, error);
4274 }
4275 
4276 /**
4277  * Flow driver destroy API. This abstracts calling driver specific functions.
4278  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
4279  * on device and releases resources of the flow.
4280  *
4281  * @param[in] dev
4282  *   Pointer to Ethernet device.
4283  * @param[in, out] flow
4284  *   Pointer to flow structure.
4285  */
4286 static inline void
4287 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4288 {
4289 	const struct mlx5_flow_driver_ops *fops;
4290 	enum mlx5_flow_drv_type type = flow->drv_type;
4291 
4292 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
4293 	fops = flow_get_drv_ops(type);
4294 	fops->destroy(dev, flow);
4295 }
4296 
4297 /**
4298  * Flow driver find RSS policy tbl API. This abstracts calling driver
4299  * specific functions. Parent flow (rte_flow) should have driver
4300  * type (drv_type). It will find the RSS policy table that has the rss_desc.
4301  *
4302  * @param[in] dev
4303  *   Pointer to Ethernet device.
4304  * @param[in, out] flow
4305  *   Pointer to flow structure.
4306  * @param[in] policy
4307  *   Pointer to meter policy table.
4308  * @param[in] rss_desc
4309  *   Pointer to rss_desc
4310  */
4311 static struct mlx5_flow_meter_sub_policy *
4312 flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
4313 		struct rte_flow *flow,
4314 		struct mlx5_flow_meter_policy *policy,
4315 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
4316 {
4317 	const struct mlx5_flow_driver_ops *fops;
4318 	enum mlx5_flow_drv_type type = flow->drv_type;
4319 
4320 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
4321 	fops = flow_get_drv_ops(type);
4322 	return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc);
4323 }
4324 
4325 /**
4326  * Flow driver color tag rule API. This abstracts calling driver
4327  * specific functions. Parent flow (rte_flow) should have driver
4328  * type (drv_type). It will create the color tag rules in hierarchy meter.
4329  *
4330  * @param[in] dev
4331  *   Pointer to Ethernet device.
4332  * @param[in, out] flow
4333  *   Pointer to flow structure.
4334  * @param[in] fm
4335  *   Pointer to flow meter structure.
4336  * @param[in] src_port
4337  *   The src port this extra rule should use.
4338  * @param[in] item
4339  *   The src port id match item.
4340  * @param[out] error
4341  *   Pointer to error structure.
4342  */
4343 static int
4344 flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev,
4345 		struct rte_flow *flow,
4346 		struct mlx5_flow_meter_info *fm,
4347 		int32_t src_port,
4348 		const struct rte_flow_item *item,
4349 		struct rte_flow_error *error)
4350 {
4351 	const struct mlx5_flow_driver_ops *fops;
4352 	enum mlx5_flow_drv_type type = flow->drv_type;
4353 
4354 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
4355 	fops = flow_get_drv_ops(type);
4356 	return fops->meter_hierarchy_rule_create(dev, fm,
4357 						src_port, item, error);
4358 }
4359 
4360 /**
4361  * Get RSS action from the action list.
4362  *
4363  * @param[in] dev
4364  *   Pointer to Ethernet device.
4365  * @param[in] actions
4366  *   Pointer to the list of actions.
4367  * @param[in] flow
4368  *   Parent flow structure pointer.
4369  *
4370  * @return
4371  *   Pointer to the RSS action if exist, else return NULL.
4372  */
4373 static const struct rte_flow_action_rss*
4374 flow_get_rss_action(struct rte_eth_dev *dev,
4375 		    const struct rte_flow_action actions[])
4376 {
4377 	struct mlx5_priv *priv = dev->data->dev_private;
4378 	const struct rte_flow_action_rss *rss = NULL;
4379 	struct mlx5_meter_policy_action_container *acg;
4380 	struct mlx5_meter_policy_action_container *acy;
4381 
4382 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4383 		switch (actions->type) {
4384 		case RTE_FLOW_ACTION_TYPE_RSS:
4385 			rss = actions->conf;
4386 			break;
4387 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
4388 		{
4389 			const struct rte_flow_action_sample *sample =
4390 								actions->conf;
4391 			const struct rte_flow_action *act = sample->actions;
4392 			for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++)
4393 				if (act->type == RTE_FLOW_ACTION_TYPE_RSS)
4394 					rss = act->conf;
4395 			break;
4396 		}
4397 		case RTE_FLOW_ACTION_TYPE_METER:
4398 		{
4399 			uint32_t mtr_idx;
4400 			struct mlx5_flow_meter_info *fm;
4401 			struct mlx5_flow_meter_policy *policy;
4402 			const struct rte_flow_action_meter *mtr = actions->conf;
4403 
4404 			fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx);
4405 			if (fm && !fm->def_policy) {
4406 				policy = mlx5_flow_meter_policy_find(dev,
4407 						fm->policy_id, NULL);
4408 				MLX5_ASSERT(policy);
4409 				if (policy->is_hierarchy) {
4410 					policy =
4411 				mlx5_flow_meter_hierarchy_get_final_policy(dev,
4412 									policy);
4413 					if (!policy)
4414 						return NULL;
4415 				}
4416 				if (policy->is_rss) {
4417 					acg =
4418 					&policy->act_cnt[RTE_COLOR_GREEN];
4419 					acy =
4420 					&policy->act_cnt[RTE_COLOR_YELLOW];
4421 					if (acg->fate_action ==
4422 					    MLX5_FLOW_FATE_SHARED_RSS)
4423 						rss = acg->rss->conf;
4424 					else if (acy->fate_action ==
4425 						 MLX5_FLOW_FATE_SHARED_RSS)
4426 						rss = acy->rss->conf;
4427 				}
4428 			}
4429 			break;
4430 		}
4431 		default:
4432 			break;
4433 		}
4434 	}
4435 	return rss;
4436 }
4437 
4438 /**
4439  * Get ASO age action by index.
4440  *
4441  * @param[in] dev
4442  *   Pointer to the Ethernet device structure.
4443  * @param[in] age_idx
4444  *   Index to the ASO age action.
4445  *
4446  * @return
4447  *   The specified ASO age action.
4448  */
4449 struct mlx5_aso_age_action*
4450 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
4451 {
4452 	uint16_t pool_idx = age_idx & UINT16_MAX;
4453 	uint16_t offset = (age_idx >> 16) & UINT16_MAX;
4454 	struct mlx5_priv *priv = dev->data->dev_private;
4455 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
4456 	struct mlx5_aso_age_pool *pool;
4457 
4458 	rte_rwlock_read_lock(&mng->resize_rwl);
4459 	pool = mng->pools[pool_idx];
4460 	rte_rwlock_read_unlock(&mng->resize_rwl);
4461 	return &pool->actions[offset - 1];
4462 }
4463 
4464 /* maps indirect action to translated direct in some actions array */
4465 struct mlx5_translated_action_handle {
4466 	struct rte_flow_action_handle *action; /**< Indirect action handle. */
4467 	int index; /**< Index in related array of rte_flow_action. */
4468 };
4469 
4470 /**
4471  * Translates actions of type RTE_FLOW_ACTION_TYPE_INDIRECT to related
4472  * direct action if translation possible.
4473  * This functionality used to run same execution path for both direct and
4474  * indirect actions on flow create. All necessary preparations for indirect
4475  * action handling should be performed on *handle* actions list returned
4476  * from this call.
4477  *
4478  * @param[in] dev
4479  *   Pointer to Ethernet device.
4480  * @param[in] actions
4481  *   List of actions to translate.
4482  * @param[out] handle
4483  *   List to store translated indirect action object handles.
4484  * @param[in, out] indir_n
4485  *   Size of *handle* array. On return should be updated with number of
4486  *   indirect actions retrieved from the *actions* list.
4487  * @param[out] translated_actions
4488  *   List of actions where all indirect actions were translated to direct
4489  *   if possible. NULL if no translation took place.
4490  * @param[out] error
4491  *   Pointer to the error structure.
4492  *
4493  * @return
4494  *   0 on success, a negative errno value otherwise and rte_errno is set.
4495  */
4496 static int
4497 flow_action_handles_translate(struct rte_eth_dev *dev,
4498 			      const struct rte_flow_action actions[],
4499 			      struct mlx5_translated_action_handle *handle,
4500 			      int *indir_n,
4501 			      struct rte_flow_action **translated_actions,
4502 			      struct rte_flow_error *error)
4503 {
4504 	struct mlx5_priv *priv = dev->data->dev_private;
4505 	struct rte_flow_action *translated = NULL;
4506 	size_t actions_size;
4507 	int n;
4508 	int copied_n = 0;
4509 	struct mlx5_translated_action_handle *handle_end = NULL;
4510 
4511 	for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
4512 		if (actions[n].type != RTE_FLOW_ACTION_TYPE_INDIRECT)
4513 			continue;
4514 		if (copied_n == *indir_n) {
4515 			return rte_flow_error_set
4516 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
4517 				 NULL, "too many shared actions");
4518 		}
4519 		rte_memcpy(&handle[copied_n].action, &actions[n].conf,
4520 			   sizeof(actions[n].conf));
4521 		handle[copied_n].index = n;
4522 		copied_n++;
4523 	}
4524 	n++;
4525 	*indir_n = copied_n;
4526 	if (!copied_n)
4527 		return 0;
4528 	actions_size = sizeof(struct rte_flow_action) * n;
4529 	translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
4530 	if (!translated) {
4531 		rte_errno = ENOMEM;
4532 		return -ENOMEM;
4533 	}
4534 	memcpy(translated, actions, actions_size);
4535 	for (handle_end = handle + copied_n; handle < handle_end; handle++) {
4536 		struct mlx5_shared_action_rss *shared_rss;
4537 		uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
4538 		uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
4539 		uint32_t idx = act_idx &
4540 			       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
4541 
4542 		switch (type) {
4543 		case MLX5_INDIRECT_ACTION_TYPE_RSS:
4544 			shared_rss = mlx5_ipool_get
4545 			  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
4546 			translated[handle->index].type =
4547 				RTE_FLOW_ACTION_TYPE_RSS;
4548 			translated[handle->index].conf =
4549 				&shared_rss->origin;
4550 			break;
4551 		case MLX5_INDIRECT_ACTION_TYPE_COUNT:
4552 			translated[handle->index].type =
4553 						(enum rte_flow_action_type)
4554 						MLX5_RTE_FLOW_ACTION_TYPE_COUNT;
4555 			translated[handle->index].conf = (void *)(uintptr_t)idx;
4556 			break;
4557 		case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
4558 			translated[handle->index].type =
4559 						(enum rte_flow_action_type)
4560 						MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK;
4561 			translated[handle->index].conf = (void *)(uintptr_t)idx;
4562 			break;
4563 		case MLX5_INDIRECT_ACTION_TYPE_AGE:
4564 			if (priv->sh->flow_hit_aso_en) {
4565 				translated[handle->index].type =
4566 					(enum rte_flow_action_type)
4567 					MLX5_RTE_FLOW_ACTION_TYPE_AGE;
4568 				translated[handle->index].conf =
4569 							 (void *)(uintptr_t)idx;
4570 				break;
4571 			}
4572 			/* Fall-through */
4573 		case MLX5_INDIRECT_ACTION_TYPE_CT:
4574 			if (priv->sh->ct_aso_en) {
4575 				translated[handle->index].type =
4576 					RTE_FLOW_ACTION_TYPE_CONNTRACK;
4577 				translated[handle->index].conf =
4578 							 (void *)(uintptr_t)idx;
4579 				break;
4580 			}
4581 			/* Fall-through */
4582 		default:
4583 			mlx5_free(translated);
4584 			return rte_flow_error_set
4585 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
4586 				 NULL, "invalid indirect action type");
4587 		}
4588 	}
4589 	*translated_actions = translated;
4590 	return 0;
4591 }
4592 
4593 /**
4594  * Get Shared RSS action from the action list.
4595  *
4596  * @param[in] dev
4597  *   Pointer to Ethernet device.
4598  * @param[in] shared
4599  *   Pointer to the list of actions.
4600  * @param[in] shared_n
4601  *   Actions list length.
4602  *
4603  * @return
4604  *   The MLX5 RSS action ID if exists, otherwise return 0.
4605  */
4606 static uint32_t
4607 flow_get_shared_rss_action(struct rte_eth_dev *dev,
4608 			   struct mlx5_translated_action_handle *handle,
4609 			   int shared_n)
4610 {
4611 	struct mlx5_translated_action_handle *handle_end;
4612 	struct mlx5_priv *priv = dev->data->dev_private;
4613 	struct mlx5_shared_action_rss *shared_rss;
4614 
4615 
4616 	for (handle_end = handle + shared_n; handle < handle_end; handle++) {
4617 		uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
4618 		uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
4619 		uint32_t idx = act_idx &
4620 			       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
4621 		switch (type) {
4622 		case MLX5_INDIRECT_ACTION_TYPE_RSS:
4623 			shared_rss = mlx5_ipool_get
4624 				(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
4625 									   idx);
4626 			rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
4627 					   rte_memory_order_relaxed);
4628 			return idx;
4629 		default:
4630 			break;
4631 		}
4632 	}
4633 	return 0;
4634 }
4635 
4636 static unsigned int
4637 find_graph_root(uint32_t rss_level)
4638 {
4639 	return rss_level < 2 ? MLX5_EXPANSION_ROOT :
4640 			       MLX5_EXPANSION_ROOT_OUTER;
4641 }
4642 
4643 /**
4644  *  Get layer flags from the prefix flow.
4645  *
4646  *  Some flows may be split to several subflows, the prefix subflow gets the
4647  *  match items and the suffix sub flow gets the actions.
4648  *  Some actions need the user defined match item flags to get the detail for
4649  *  the action.
4650  *  This function helps the suffix flow to get the item layer flags from prefix
4651  *  subflow.
4652  *
4653  * @param[in] dev_flow
4654  *   Pointer the created prefix subflow.
4655  *
4656  * @return
4657  *   The layers get from prefix subflow.
4658  */
4659 static inline uint64_t
4660 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
4661 {
4662 	uint64_t layers = 0;
4663 
4664 	/*
4665 	 * Layers bits could be localization, but usually the compiler will
4666 	 * help to do the optimization work for source code.
4667 	 * If no decap actions, use the layers directly.
4668 	 */
4669 	if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
4670 		return dev_flow->handle->layers;
4671 	/* Convert L3 layers with decap action. */
4672 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
4673 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4674 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
4675 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4676 	/* Convert L4 layers with decap action.  */
4677 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
4678 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
4679 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
4680 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
4681 	return layers;
4682 }
4683 
4684 /**
4685  * Get metadata split action information.
4686  *
4687  * @param[in] actions
4688  *   Pointer to the list of actions.
4689  * @param[out] qrss
4690  *   Pointer to the return pointer.
4691  * @param[out] qrss_type
4692  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
4693  *   if no QUEUE/RSS is found.
4694  * @param[out] encap_idx
4695  *   Pointer to the index of the encap action if exists, otherwise the last
4696  *   action index.
4697  *
4698  * @return
4699  *   Total number of actions.
4700  */
4701 static int
4702 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
4703 				       const struct rte_flow_action **qrss,
4704 				       int *encap_idx)
4705 {
4706 	const struct rte_flow_action_raw_encap *raw_encap;
4707 	int actions_n = 0;
4708 	int raw_decap_idx = -1;
4709 
4710 	*encap_idx = -1;
4711 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4712 		switch (actions->type) {
4713 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4714 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4715 			*encap_idx = actions_n;
4716 			break;
4717 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4718 			raw_decap_idx = actions_n;
4719 			break;
4720 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4721 			raw_encap = actions->conf;
4722 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4723 				*encap_idx = raw_decap_idx != -1 ?
4724 						      raw_decap_idx : actions_n;
4725 			break;
4726 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4727 		case RTE_FLOW_ACTION_TYPE_RSS:
4728 			*qrss = actions;
4729 			break;
4730 		default:
4731 			break;
4732 		}
4733 		actions_n++;
4734 	}
4735 	if (*encap_idx == -1)
4736 		*encap_idx = actions_n;
4737 	/* Count RTE_FLOW_ACTION_TYPE_END. */
4738 	return actions_n + 1;
4739 }
4740 
4741 /**
4742  * Check if the action will change packet.
4743  *
4744  * @param dev
4745  *   Pointer to Ethernet device.
4746  * @param[in] type
4747  *   action type.
4748  *
4749  * @return
4750  *   true if action will change packet, false otherwise.
4751  */
4752 static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
4753 					  enum rte_flow_action_type type)
4754 {
4755 	struct mlx5_priv *priv = dev->data->dev_private;
4756 
4757 	switch (type) {
4758 	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4759 	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4760 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4761 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4762 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4763 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4764 	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4765 	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4766 	case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4767 	case RTE_FLOW_ACTION_TYPE_SET_TTL:
4768 	case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4769 	case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4770 	case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4771 	case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4772 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
4773 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
4774 	case RTE_FLOW_ACTION_TYPE_SET_META:
4775 	case RTE_FLOW_ACTION_TYPE_SET_TAG:
4776 	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4777 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4778 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4779 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4780 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4781 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4782 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4783 	case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4784 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4785 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4786 	case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
4787 		return true;
4788 	case RTE_FLOW_ACTION_TYPE_FLAG:
4789 	case RTE_FLOW_ACTION_TYPE_MARK:
4790 		if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4791 		    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS)
4792 			return true;
4793 		else
4794 			return false;
4795 	default:
4796 		return false;
4797 	}
4798 }
4799 
4800 /**
4801  * Check meter action from the action list.
4802  *
4803  * @param dev
4804  *   Pointer to Ethernet device.
4805  * @param[in] actions
4806  *   Pointer to the list of actions.
4807  * @param[out] has_mtr
4808  *   Pointer to the meter exist flag.
4809  * @param[out] has_modify
4810  *   Pointer to the flag showing there's packet change action.
4811  * @param[out] meter_id
4812  *   Pointer to the meter id.
4813  *
4814  * @return
4815  *   Total number of actions.
4816  */
4817 static int
4818 flow_check_meter_action(struct rte_eth_dev *dev,
4819 			const struct rte_flow_action actions[],
4820 			bool *has_mtr, bool *has_modify, uint32_t *meter_id)
4821 {
4822 	const struct rte_flow_action_meter *mtr = NULL;
4823 	int actions_n = 0;
4824 
4825 	MLX5_ASSERT(has_mtr);
4826 	*has_mtr = false;
4827 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4828 		switch (actions->type) {
4829 		case RTE_FLOW_ACTION_TYPE_METER:
4830 			mtr = actions->conf;
4831 			*meter_id = mtr->mtr_id;
4832 			*has_mtr = true;
4833 			break;
4834 		default:
4835 			break;
4836 		}
4837 		if (!*has_mtr)
4838 			*has_modify |= flow_check_modify_action_type(dev,
4839 								actions->type);
4840 		actions_n++;
4841 	}
4842 	/* Count RTE_FLOW_ACTION_TYPE_END. */
4843 	return actions_n + 1;
4844 }
4845 
4846 /**
4847  * Check if the flow should be split due to hairpin.
4848  * The reason for the split is that in current HW we can't
4849  * support encap and push-vlan on Rx, so if a flow contains
4850  * these actions we move it to Tx.
4851  *
4852  * @param dev
4853  *   Pointer to Ethernet device.
4854  * @param[in] attr
4855  *   Flow rule attributes.
4856  * @param[in] actions
4857  *   Associated actions (list terminated by the END action).
4858  *
4859  * @return
4860  *   > 0 the number of actions and the flow should be split,
4861  *   0 when no split required.
4862  */
4863 static int
4864 flow_check_hairpin_split(struct rte_eth_dev *dev,
4865 			 const struct rte_flow_attr *attr,
4866 			 const struct rte_flow_action actions[])
4867 {
4868 	int queue_action = 0;
4869 	int action_n = 0;
4870 	int split = 0;
4871 	int push_vlan = 0;
4872 	const struct rte_flow_action_queue *queue;
4873 	const struct rte_flow_action_rss *rss;
4874 	const struct rte_flow_action_raw_encap *raw_encap;
4875 	const struct rte_eth_hairpin_conf *conf;
4876 
4877 	if (!attr->ingress)
4878 		return 0;
4879 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4880 		if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
4881 			push_vlan = 1;
4882 		switch (actions->type) {
4883 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4884 			queue = actions->conf;
4885 			if (queue == NULL)
4886 				return 0;
4887 			conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
4888 			if (conf == NULL || conf->tx_explicit != 0)
4889 				return 0;
4890 			queue_action = 1;
4891 			action_n++;
4892 			break;
4893 		case RTE_FLOW_ACTION_TYPE_RSS:
4894 			rss = actions->conf;
4895 			if (rss == NULL || rss->queue_num == 0)
4896 				return 0;
4897 			conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
4898 			if (conf == NULL || conf->tx_explicit != 0)
4899 				return 0;
4900 			queue_action = 1;
4901 			action_n++;
4902 			break;
4903 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4904 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4905 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4906 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4907 			split++;
4908 			action_n++;
4909 			break;
4910 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4911 			if (push_vlan)
4912 				split++;
4913 			action_n++;
4914 			break;
4915 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4916 			raw_encap = actions->conf;
4917 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4918 				split++;
4919 			action_n++;
4920 			break;
4921 		default:
4922 			action_n++;
4923 			break;
4924 		}
4925 	}
4926 	if (split && queue_action)
4927 		return action_n;
4928 	return 0;
4929 }
4930 
4931 /* Declare flow create/destroy prototype in advance. */
4932 static uint32_t
4933 flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
4934 		 const struct rte_flow_attr *attr,
4935 		 const struct rte_flow_item items[],
4936 		 const struct rte_flow_action actions[],
4937 		 bool external, struct rte_flow_error *error);
4938 
4939 static void
4940 flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
4941 		  uint32_t flow_idx);
4942 
4943 int
4944 flow_dv_mreg_match_cb(void *tool_ctx __rte_unused,
4945 		      struct mlx5_list_entry *entry, void *cb_ctx)
4946 {
4947 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4948 	struct mlx5_flow_mreg_copy_resource *mcp_res =
4949 			       container_of(entry, typeof(*mcp_res), hlist_ent);
4950 
4951 	return mcp_res->mark_id != *(uint32_t *)(ctx->data);
4952 }
4953 
4954 struct mlx5_list_entry *
4955 flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
4956 {
4957 	struct rte_eth_dev *dev = tool_ctx;
4958 	struct mlx5_priv *priv = dev->data->dev_private;
4959 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4960 	struct mlx5_flow_mreg_copy_resource *mcp_res;
4961 	struct rte_flow_error *error = ctx->error;
4962 	uint32_t idx = 0;
4963 	int ret;
4964 	uint32_t mark_id = *(uint32_t *)(ctx->data);
4965 	struct rte_flow_attr attr = {
4966 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4967 		.ingress = 1,
4968 	};
4969 	struct mlx5_rte_flow_item_tag tag_spec = {
4970 		.data = mark_id,
4971 	};
4972 	struct rte_flow_item items[] = {
4973 		[1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
4974 	};
4975 	struct rte_flow_action_mark ftag = {
4976 		.id = mark_id,
4977 	};
4978 	struct mlx5_flow_action_copy_mreg cp_mreg = {
4979 		.dst = REG_B,
4980 		.src = REG_NON,
4981 	};
4982 	struct rte_flow_action_jump jump = {
4983 		.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
4984 	};
4985 	struct rte_flow_action actions[] = {
4986 		[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
4987 	};
4988 
4989 	/* Fill the register fields in the flow. */
4990 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
4991 	if (ret < 0)
4992 		return NULL;
4993 	tag_spec.id = ret;
4994 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4995 	if (ret < 0)
4996 		return NULL;
4997 	cp_mreg.src = ret;
4998 	/* Provide the full width of FLAG specific value. */
4999 	if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
5000 		tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
5001 	/* Build a new flow. */
5002 	if (mark_id != MLX5_DEFAULT_COPY_ID) {
5003 		items[0] = (struct rte_flow_item){
5004 			.type = (enum rte_flow_item_type)
5005 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
5006 			.spec = &tag_spec,
5007 		};
5008 		items[1] = (struct rte_flow_item){
5009 			.type = RTE_FLOW_ITEM_TYPE_END,
5010 		};
5011 		actions[0] = (struct rte_flow_action){
5012 			.type = (enum rte_flow_action_type)
5013 				MLX5_RTE_FLOW_ACTION_TYPE_MARK,
5014 			.conf = &ftag,
5015 		};
5016 		actions[1] = (struct rte_flow_action){
5017 			.type = (enum rte_flow_action_type)
5018 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5019 			.conf = &cp_mreg,
5020 		};
5021 		actions[2] = (struct rte_flow_action){
5022 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5023 			.conf = &jump,
5024 		};
5025 		actions[3] = (struct rte_flow_action){
5026 			.type = RTE_FLOW_ACTION_TYPE_END,
5027 		};
5028 	} else {
5029 		/* Default rule, wildcard match. */
5030 		attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
5031 		items[0] = (struct rte_flow_item){
5032 			.type = RTE_FLOW_ITEM_TYPE_END,
5033 		};
5034 		actions[0] = (struct rte_flow_action){
5035 			.type = (enum rte_flow_action_type)
5036 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5037 			.conf = &cp_mreg,
5038 		};
5039 		actions[1] = (struct rte_flow_action){
5040 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5041 			.conf = &jump,
5042 		};
5043 		actions[2] = (struct rte_flow_action){
5044 			.type = RTE_FLOW_ACTION_TYPE_END,
5045 		};
5046 	}
5047 	/* Build a new entry. */
5048 	mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
5049 	if (!mcp_res) {
5050 		rte_errno = ENOMEM;
5051 		return NULL;
5052 	}
5053 	mcp_res->idx = idx;
5054 	mcp_res->mark_id = mark_id;
5055 	/*
5056 	 * The copy Flows are not included in any list. There
5057 	 * ones are referenced from other Flows and can not
5058 	 * be applied, removed, deleted in arbitrary order
5059 	 * by list traversing.
5060 	 */
5061 	mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
5062 					&attr, items, actions, false, error);
5063 	if (!mcp_res->rix_flow) {
5064 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
5065 		return NULL;
5066 	}
5067 	return &mcp_res->hlist_ent;
5068 }
5069 
5070 struct mlx5_list_entry *
5071 flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5072 		      void *cb_ctx __rte_unused)
5073 {
5074 	struct rte_eth_dev *dev = tool_ctx;
5075 	struct mlx5_priv *priv = dev->data->dev_private;
5076 	struct mlx5_flow_mreg_copy_resource *mcp_res;
5077 	uint32_t idx = 0;
5078 
5079 	mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
5080 	if (!mcp_res) {
5081 		rte_errno = ENOMEM;
5082 		return NULL;
5083 	}
5084 	memcpy(mcp_res, oentry, sizeof(*mcp_res));
5085 	mcp_res->idx = idx;
5086 	return &mcp_res->hlist_ent;
5087 }
5088 
5089 void
5090 flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5091 {
5092 	struct mlx5_flow_mreg_copy_resource *mcp_res =
5093 			       container_of(entry, typeof(*mcp_res), hlist_ent);
5094 	struct rte_eth_dev *dev = tool_ctx;
5095 	struct mlx5_priv *priv = dev->data->dev_private;
5096 
5097 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
5098 }
5099 
5100 /**
5101  * Add a flow of copying flow metadata registers in RX_CP_TBL.
5102  *
5103  * As mark_id is unique, if there's already a registered flow for the mark_id,
5104  * return by increasing the reference counter of the resource. Otherwise, create
5105  * the resource (mcp_res) and flow.
5106  *
5107  * Flow looks like,
5108  *   - If ingress port is ANY and reg_c[1] is mark_id,
5109  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
5110  *
5111  * For default flow (zero mark_id), flow is like,
5112  *   - If ingress port is ANY,
5113  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
5114  *
5115  * @param dev
5116  *   Pointer to Ethernet device.
5117  * @param mark_id
5118  *   ID of MARK action, zero means default flow for META.
5119  * @param[out] error
5120  *   Perform verbose error reporting if not NULL.
5121  *
5122  * @return
5123  *   Associated resource on success, NULL otherwise and rte_errno is set.
5124  */
5125 static struct mlx5_flow_mreg_copy_resource *
5126 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
5127 			  struct rte_flow_error *error)
5128 {
5129 	struct mlx5_priv *priv = dev->data->dev_private;
5130 	struct mlx5_list_entry *entry;
5131 	struct mlx5_flow_cb_ctx ctx = {
5132 		.dev = dev,
5133 		.error = error,
5134 		.data = &mark_id,
5135 	};
5136 
5137 	/* Check if already registered. */
5138 	MLX5_ASSERT(priv->mreg_cp_tbl);
5139 	entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
5140 	if (!entry)
5141 		return NULL;
5142 	return container_of(entry, struct mlx5_flow_mreg_copy_resource,
5143 			    hlist_ent);
5144 }
5145 
5146 void
5147 flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5148 {
5149 	struct mlx5_flow_mreg_copy_resource *mcp_res =
5150 			       container_of(entry, typeof(*mcp_res), hlist_ent);
5151 	struct rte_eth_dev *dev = tool_ctx;
5152 	struct mlx5_priv *priv = dev->data->dev_private;
5153 
5154 	MLX5_ASSERT(mcp_res->rix_flow);
5155 	flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);
5156 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
5157 }
5158 
5159 /**
5160  * Release flow in RX_CP_TBL.
5161  *
5162  * @param dev
5163  *   Pointer to Ethernet device.
5164  * @flow
5165  *   Parent flow for wich copying is provided.
5166  */
5167 static void
5168 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
5169 			  struct rte_flow *flow)
5170 {
5171 	struct mlx5_flow_mreg_copy_resource *mcp_res;
5172 	struct mlx5_priv *priv = dev->data->dev_private;
5173 
5174 	if (!flow->rix_mreg_copy)
5175 		return;
5176 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
5177 				 flow->rix_mreg_copy);
5178 	if (!mcp_res || !priv->mreg_cp_tbl)
5179 		return;
5180 	MLX5_ASSERT(mcp_res->rix_flow);
5181 	mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
5182 	flow->rix_mreg_copy = 0;
5183 }
5184 
5185 /**
5186  * Remove the default copy action from RX_CP_TBL.
5187  *
5188  * This functions is called in the mlx5_dev_start(). No thread safe
5189  * is guaranteed.
5190  *
5191  * @param dev
5192  *   Pointer to Ethernet device.
5193  */
5194 static void
5195 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
5196 {
5197 	struct mlx5_list_entry *entry;
5198 	struct mlx5_priv *priv = dev->data->dev_private;
5199 	struct mlx5_flow_cb_ctx ctx;
5200 	uint32_t mark_id;
5201 
5202 	/* Check if default flow is registered. */
5203 	if (!priv->mreg_cp_tbl)
5204 		return;
5205 	mark_id = MLX5_DEFAULT_COPY_ID;
5206 	ctx.data = &mark_id;
5207 	entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx);
5208 	if (!entry)
5209 		return;
5210 	mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
5211 }
5212 
5213 /**
5214  * Add the default copy action in RX_CP_TBL.
5215  *
5216  * This functions is called in the mlx5_dev_start(). No thread safe
5217  * is guaranteed.
5218  *
5219  * @param dev
5220  *   Pointer to Ethernet device.
5221  * @param[out] error
5222  *   Perform verbose error reporting if not NULL.
5223  *
5224  * @return
5225  *   0 for success, negative value otherwise and rte_errno is set.
5226  */
5227 static int
5228 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
5229 				  struct rte_flow_error *error)
5230 {
5231 	struct mlx5_priv *priv = dev->data->dev_private;
5232 	struct mlx5_flow_mreg_copy_resource *mcp_res;
5233 	struct mlx5_flow_cb_ctx ctx;
5234 	uint32_t mark_id;
5235 
5236 	/* Check whether extensive metadata feature is engaged. */
5237 	if (!priv->sh->config.dv_flow_en ||
5238 	    priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
5239 	    !mlx5_flow_ext_mreg_supported(dev) ||
5240 	    !priv->sh->dv_regc0_mask)
5241 		return 0;
5242 	/*
5243 	 * Add default mreg copy flow may be called multiple time, but
5244 	 * only be called once in stop. Avoid register it twice.
5245 	 */
5246 	mark_id = MLX5_DEFAULT_COPY_ID;
5247 	ctx.data = &mark_id;
5248 	if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx))
5249 		return 0;
5250 	mcp_res = flow_mreg_add_copy_action(dev, mark_id, error);
5251 	if (!mcp_res)
5252 		return -rte_errno;
5253 	return 0;
5254 }
5255 
5256 /**
5257  * Add a flow of copying flow metadata registers in RX_CP_TBL.
5258  *
5259  * All the flow having Q/RSS action should be split by
5260  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
5261  * performs the following,
5262  *   - CQE->flow_tag := reg_c[1] (MARK)
5263  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
5264  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
5265  * but there should be a flow per each MARK ID set by MARK action.
5266  *
5267  * For the aforementioned reason, if there's a MARK action in flow's action
5268  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
5269  * the MARK ID to CQE's flow_tag like,
5270  *   - If reg_c[1] is mark_id,
5271  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
5272  *
5273  * For SET_META action which stores value in reg_c[0], as the destination is
5274  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
5275  * MARK ID means the default flow. The default flow looks like,
5276  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
5277  *
5278  * @param dev
5279  *   Pointer to Ethernet device.
5280  * @param flow
5281  *   Pointer to flow structure.
5282  * @param[in] actions
5283  *   Pointer to the list of actions.
5284  * @param[out] error
5285  *   Perform verbose error reporting if not NULL.
5286  *
5287  * @return
5288  *   0 on success, negative value otherwise and rte_errno is set.
5289  */
5290 static int
5291 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
5292 			    struct rte_flow *flow,
5293 			    const struct rte_flow_action *actions,
5294 			    struct rte_flow_error *error)
5295 {
5296 	struct mlx5_priv *priv = dev->data->dev_private;
5297 	struct mlx5_sh_config *config = &priv->sh->config;
5298 	struct mlx5_flow_mreg_copy_resource *mcp_res;
5299 	const struct rte_flow_action_mark *mark;
5300 
5301 	/* Check whether extensive metadata feature is engaged. */
5302 	if (!config->dv_flow_en ||
5303 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
5304 	    !mlx5_flow_ext_mreg_supported(dev) ||
5305 	    !priv->sh->dv_regc0_mask)
5306 		return 0;
5307 	/* Find MARK action. */
5308 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5309 		switch (actions->type) {
5310 		case RTE_FLOW_ACTION_TYPE_FLAG:
5311 			mcp_res = flow_mreg_add_copy_action
5312 				(dev, MLX5_FLOW_MARK_DEFAULT, error);
5313 			if (!mcp_res)
5314 				return -rte_errno;
5315 			flow->rix_mreg_copy = mcp_res->idx;
5316 			return 0;
5317 		case RTE_FLOW_ACTION_TYPE_MARK:
5318 			mark = (const struct rte_flow_action_mark *)
5319 				actions->conf;
5320 			mcp_res =
5321 				flow_mreg_add_copy_action(dev, mark->id, error);
5322 			if (!mcp_res)
5323 				return -rte_errno;
5324 			flow->rix_mreg_copy = mcp_res->idx;
5325 			return 0;
5326 		default:
5327 			break;
5328 		}
5329 	}
5330 	return 0;
5331 }
5332 
5333 #define MLX5_MAX_SPLIT_ACTIONS 24
5334 #define MLX5_MAX_SPLIT_ITEMS 24
5335 
5336 /**
5337  * Split the hairpin flow.
5338  * Since HW can't support encap and push-vlan on Rx, we move these
5339  * actions to Tx.
5340  * If the count action is after the encap then we also
5341  * move the count action. in this case the count will also measure
5342  * the outer bytes.
5343  *
5344  * @param dev
5345  *   Pointer to Ethernet device.
5346  * @param[in] actions
5347  *   Associated actions (list terminated by the END action).
5348  * @param[out] actions_rx
5349  *   Rx flow actions.
5350  * @param[out] actions_tx
5351  *   Tx flow actions..
5352  * @param[out] pattern_tx
5353  *   The pattern items for the Tx flow.
5354  * @param[out] flow_id
5355  *   The flow ID connected to this flow.
5356  *
5357  * @return
5358  *   0 on success.
5359  */
5360 static int
5361 flow_hairpin_split(struct rte_eth_dev *dev,
5362 		   const struct rte_flow_action actions[],
5363 		   struct rte_flow_action actions_rx[],
5364 		   struct rte_flow_action actions_tx[],
5365 		   struct rte_flow_item pattern_tx[],
5366 		   uint32_t flow_id)
5367 {
5368 	const struct rte_flow_action_raw_encap *raw_encap;
5369 	const struct rte_flow_action_raw_decap *raw_decap;
5370 	struct mlx5_rte_flow_action_set_tag *set_tag;
5371 	struct rte_flow_action *tag_action;
5372 	struct mlx5_rte_flow_item_tag *tag_item;
5373 	struct rte_flow_item *item;
5374 	char *addr;
5375 	int push_vlan = 0;
5376 	int encap = 0;
5377 
5378 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5379 		if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
5380 			push_vlan = 1;
5381 		switch (actions->type) {
5382 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5383 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5384 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5385 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5386 			rte_memcpy(actions_tx, actions,
5387 			       sizeof(struct rte_flow_action));
5388 			actions_tx++;
5389 			break;
5390 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5391 			if (push_vlan) {
5392 				rte_memcpy(actions_tx, actions,
5393 					   sizeof(struct rte_flow_action));
5394 				actions_tx++;
5395 			} else {
5396 				rte_memcpy(actions_rx, actions,
5397 					   sizeof(struct rte_flow_action));
5398 				actions_rx++;
5399 			}
5400 			break;
5401 		case RTE_FLOW_ACTION_TYPE_COUNT:
5402 		case RTE_FLOW_ACTION_TYPE_AGE:
5403 			if (encap) {
5404 				rte_memcpy(actions_tx, actions,
5405 					   sizeof(struct rte_flow_action));
5406 				actions_tx++;
5407 			} else {
5408 				rte_memcpy(actions_rx, actions,
5409 					   sizeof(struct rte_flow_action));
5410 				actions_rx++;
5411 			}
5412 			break;
5413 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5414 			raw_encap = actions->conf;
5415 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
5416 				memcpy(actions_tx, actions,
5417 				       sizeof(struct rte_flow_action));
5418 				actions_tx++;
5419 				encap = 1;
5420 			} else {
5421 				rte_memcpy(actions_rx, actions,
5422 					   sizeof(struct rte_flow_action));
5423 				actions_rx++;
5424 			}
5425 			break;
5426 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5427 			raw_decap = actions->conf;
5428 			if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
5429 				memcpy(actions_tx, actions,
5430 				       sizeof(struct rte_flow_action));
5431 				actions_tx++;
5432 			} else {
5433 				rte_memcpy(actions_rx, actions,
5434 					   sizeof(struct rte_flow_action));
5435 				actions_rx++;
5436 			}
5437 			break;
5438 		default:
5439 			rte_memcpy(actions_rx, actions,
5440 				   sizeof(struct rte_flow_action));
5441 			actions_rx++;
5442 			break;
5443 		}
5444 	}
5445 	/* Add set meta action and end action for the Rx flow. */
5446 	tag_action = actions_rx;
5447 	tag_action->type = (enum rte_flow_action_type)
5448 			   MLX5_RTE_FLOW_ACTION_TYPE_TAG;
5449 	actions_rx++;
5450 	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
5451 	actions_rx++;
5452 	set_tag = (void *)actions_rx;
5453 	*set_tag = (struct mlx5_rte_flow_action_set_tag) {
5454 		.id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL),
5455 		.data = flow_id,
5456 	};
5457 	MLX5_ASSERT(set_tag->id > REG_NON);
5458 	tag_action->conf = set_tag;
5459 	/* Create Tx item list. */
5460 	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
5461 	addr = (void *)&pattern_tx[2];
5462 	item = pattern_tx;
5463 	item->type = (enum rte_flow_item_type)
5464 		     MLX5_RTE_FLOW_ITEM_TYPE_TAG;
5465 	tag_item = (void *)addr;
5466 	tag_item->data = flow_id;
5467 	tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
5468 	MLX5_ASSERT(set_tag->id > REG_NON);
5469 	item->spec = tag_item;
5470 	addr += sizeof(struct mlx5_rte_flow_item_tag);
5471 	tag_item = (void *)addr;
5472 	tag_item->data = UINT32_MAX;
5473 	tag_item->id = UINT16_MAX;
5474 	item->mask = tag_item;
5475 	item->last = NULL;
5476 	item++;
5477 	item->type = RTE_FLOW_ITEM_TYPE_END;
5478 	return 0;
5479 }
5480 
5481 /**
5482  * The last stage of splitting chain, just creates the subflow
5483  * without any modification.
5484  *
5485  * @param[in] dev
5486  *   Pointer to Ethernet device.
5487  * @param[in] flow
5488  *   Parent flow structure pointer.
5489  * @param[in, out] sub_flow
5490  *   Pointer to return the created subflow, may be NULL.
5491  * @param[in] attr
5492  *   Flow rule attributes.
5493  * @param[in] items
5494  *   Pattern specification (list terminated by the END pattern item).
5495  * @param[in] actions
5496  *   Associated actions (list terminated by the END action).
5497  * @param[in] flow_split_info
5498  *   Pointer to flow split info structure.
5499  * @param[out] error
5500  *   Perform verbose error reporting if not NULL.
5501  * @return
5502  *   0 on success, negative value otherwise
5503  */
5504 static int
5505 flow_create_split_inner(struct rte_eth_dev *dev,
5506 			struct rte_flow *flow,
5507 			struct mlx5_flow **sub_flow,
5508 			const struct rte_flow_attr *attr,
5509 			const struct rte_flow_item items[],
5510 			const struct rte_flow_action actions[],
5511 			struct mlx5_flow_split_info *flow_split_info,
5512 			struct rte_flow_error *error)
5513 {
5514 	struct mlx5_flow *dev_flow;
5515 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
5516 
5517 	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
5518 				    flow_split_info->flow_idx, error);
5519 	if (!dev_flow)
5520 		return -rte_errno;
5521 	dev_flow->flow = flow;
5522 	dev_flow->external = flow_split_info->external;
5523 	dev_flow->skip_scale = flow_split_info->skip_scale;
5524 	/* Subflow object was created, we must include one in the list. */
5525 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
5526 		      dev_flow->handle, next);
5527 	/*
5528 	 * If dev_flow is as one of the suffix flow, some actions in suffix
5529 	 * flow may need some user defined item layer flags, and pass the
5530 	 * Metadata rxq mark flag to suffix flow as well.
5531 	 */
5532 	if (flow_split_info->prefix_layers)
5533 		dev_flow->handle->layers = flow_split_info->prefix_layers;
5534 	if (flow_split_info->prefix_mark) {
5535 		MLX5_ASSERT(wks);
5536 		wks->mark = 1;
5537 	}
5538 	if (sub_flow)
5539 		*sub_flow = dev_flow;
5540 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5541 	dev_flow->dv.table_id = flow_split_info->table_id;
5542 #endif
5543 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
5544 }
5545 
5546 /**
5547  * Get the sub policy of a meter.
5548  *
5549  * @param[in] dev
5550  *   Pointer to Ethernet device.
5551  * @param[in] flow
5552  *   Parent flow structure pointer.
5553  * @param wks
5554  *   Pointer to thread flow work space.
5555  * @param[in] attr
5556  *   Flow rule attributes.
5557  * @param[in] items
5558  *   Pattern specification (list terminated by the END pattern item).
5559  * @param[out] error
5560  *   Perform verbose error reporting if not NULL.
5561  *
5562  * @return
5563  *   Pointer to the meter sub policy, NULL otherwise and rte_errno is set.
5564  */
5565 static struct mlx5_flow_meter_sub_policy *
5566 get_meter_sub_policy(struct rte_eth_dev *dev,
5567 		     struct rte_flow *flow,
5568 		     struct mlx5_flow_workspace *wks,
5569 		     const struct rte_flow_attr *attr,
5570 		     const struct rte_flow_item items[],
5571 		     struct rte_flow_error *error)
5572 {
5573 	struct mlx5_flow_meter_policy *policy;
5574 	struct mlx5_flow_meter_policy *final_policy;
5575 	struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
5576 
5577 	policy = wks->policy;
5578 	final_policy = policy->is_hierarchy ? wks->final_policy : policy;
5579 	if (final_policy->is_rss || final_policy->is_queue) {
5580 		struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS];
5581 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
5582 		uint32_t i;
5583 
5584 		/*
5585 		 * This is a tmp dev_flow,
5586 		 * no need to register any matcher for it in translate.
5587 		 */
5588 		wks->skip_matcher_reg = 1;
5589 		for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
5590 			struct mlx5_flow dev_flow = {0};
5591 			struct mlx5_flow_handle dev_handle = { {0} };
5592 			uint8_t fate = final_policy->act_cnt[i].fate_action;
5593 
5594 			if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
5595 				const struct rte_flow_action_rss *rss_act =
5596 					final_policy->act_cnt[i].rss->conf;
5597 				struct rte_flow_action rss_actions[2] = {
5598 					[0] = {
5599 					.type = RTE_FLOW_ACTION_TYPE_RSS,
5600 					.conf = rss_act,
5601 					},
5602 					[1] = {
5603 					.type = RTE_FLOW_ACTION_TYPE_END,
5604 					.conf = NULL,
5605 					}
5606 				};
5607 
5608 				dev_flow.handle = &dev_handle;
5609 				dev_flow.ingress = attr->ingress;
5610 				dev_flow.flow = flow;
5611 				dev_flow.external = 0;
5612 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5613 				dev_flow.dv.transfer = attr->transfer;
5614 #endif
5615 				/**
5616 				 * Translate RSS action to get rss hash fields.
5617 				 */
5618 				if (flow_drv_translate(dev, &dev_flow, attr,
5619 						items, rss_actions, error))
5620 					goto exit;
5621 				rss_desc_v[i] = wks->rss_desc;
5622 				rss_desc_v[i].symmetric_hash_function =
5623 						dev_flow.symmetric_hash_function;
5624 				rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
5625 				rss_desc_v[i].hash_fields =
5626 						dev_flow.hash_fields;
5627 				rss_desc_v[i].queue_num =
5628 						rss_desc_v[i].hash_fields ?
5629 						rss_desc_v[i].queue_num : 1;
5630 				rss_desc_v[i].tunnel =
5631 						!!(dev_flow.handle->layers &
5632 						   MLX5_FLOW_LAYER_TUNNEL);
5633 				/* Use the RSS queues in the containers. */
5634 				rss_desc_v[i].queue =
5635 					(uint16_t *)(uintptr_t)rss_act->queue;
5636 				rss_desc[i] = &rss_desc_v[i];
5637 			} else if (fate == MLX5_FLOW_FATE_QUEUE) {
5638 				/* This is queue action. */
5639 				rss_desc_v[i] = wks->rss_desc;
5640 				rss_desc_v[i].key_len = 0;
5641 				rss_desc_v[i].hash_fields = 0;
5642 				rss_desc_v[i].queue =
5643 					&final_policy->act_cnt[i].queue;
5644 				rss_desc_v[i].queue_num = 1;
5645 				rss_desc[i] = &rss_desc_v[i];
5646 			} else {
5647 				rss_desc[i] = NULL;
5648 			}
5649 		}
5650 		sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
5651 						flow, policy, rss_desc);
5652 	} else {
5653 		enum mlx5_meter_domain mtr_domain =
5654 			attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
5655 				(attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
5656 						MLX5_MTR_DOMAIN_INGRESS);
5657 		sub_policy = policy->sub_policys[mtr_domain][0];
5658 	}
5659 	if (!sub_policy)
5660 		rte_flow_error_set(error, EINVAL,
5661 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5662 				   "Failed to get meter sub-policy.");
5663 exit:
5664 	return sub_policy;
5665 }
5666 
5667 /**
5668  * Split the meter flow.
5669  *
5670  * As meter flow will split to three sub flow, other than meter
5671  * action, the other actions make sense to only meter accepts
5672  * the packet. If it need to be dropped, no other additional
5673  * actions should be take.
5674  *
5675  * One kind of special action which decapsulates the L3 tunnel
5676  * header will be in the prefix sub flow, as not to take the
5677  * L3 tunnel header into account.
5678  *
5679  * @param[in] dev
5680  *   Pointer to Ethernet device.
5681  * @param[in] flow
5682  *   Parent flow structure pointer.
5683  * @param wks
5684  *   Pointer to thread flow work space.
5685  * @param[in] attr
5686  *   Flow rule attributes.
5687  * @param[in] items
5688  *   Pattern specification (list terminated by the END pattern item).
5689  * @param[out] sfx_items
5690  *   Suffix flow match items (list terminated by the END pattern item).
5691  * @param[in] actions
5692  *   Associated actions (list terminated by the END action).
5693  * @param[out] actions_sfx
5694  *   Suffix flow actions.
5695  * @param[out] actions_pre
5696  *   Prefix flow actions.
5697  * @param[out] mtr_flow_id
5698  *   Pointer to meter flow id.
5699  * @param[out] error
5700  *   Perform verbose error reporting if not NULL.
5701  *
5702  * @return
5703  *   0 on success, a negative errno value otherwise and rte_errno is set.
5704  */
5705 static int
5706 flow_meter_split_prep(struct rte_eth_dev *dev,
5707 		      struct rte_flow *flow,
5708 		      struct mlx5_flow_workspace *wks,
5709 		      const struct rte_flow_attr *attr,
5710 		      const struct rte_flow_item items[],
5711 		      struct rte_flow_item sfx_items[],
5712 		      const struct rte_flow_action actions[],
5713 		      struct rte_flow_action actions_sfx[],
5714 		      struct rte_flow_action actions_pre[],
5715 		      uint32_t *mtr_flow_id,
5716 		      struct rte_flow_error *error)
5717 {
5718 	struct mlx5_priv *priv = dev->data->dev_private;
5719 	struct mlx5_flow_meter_info *fm = wks->fm;
5720 	struct rte_flow_action *tag_action = NULL;
5721 	struct rte_flow_item *tag_item;
5722 	struct mlx5_rte_flow_action_set_tag *set_tag;
5723 	const struct rte_flow_action_raw_encap *raw_encap;
5724 	const struct rte_flow_action_raw_decap *raw_decap;
5725 	struct mlx5_rte_flow_item_tag *tag_item_spec;
5726 	struct mlx5_rte_flow_item_tag *tag_item_mask;
5727 	uint32_t tag_id = 0;
5728 	bool vlan_actions;
5729 	struct rte_flow_item *orig_sfx_items = sfx_items;
5730 	const struct rte_flow_item *orig_items = items;
5731 	struct rte_flow_action *hw_mtr_action;
5732 	struct rte_flow_action *action_pre_head = NULL;
5733 	uint16_t flow_src_port = priv->representor_id;
5734 	bool mtr_first;
5735 	uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
5736 	uint8_t mtr_reg_bits = priv->mtr_reg_share ?
5737 				MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS;
5738 	uint32_t flow_id = 0;
5739 	uint32_t flow_id_reversed = 0;
5740 	uint8_t flow_id_bits = 0;
5741 	bool after_meter = false;
5742 	int shift;
5743 
5744 	/* Prepare the suffix subflow items. */
5745 	tag_item = sfx_items++;
5746 	tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG;
5747 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5748 		int item_type = items->type;
5749 
5750 		switch (item_type) {
5751 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
5752 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
5753 		case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
5754 			if (mlx5_flow_get_item_vport_id(dev, items, &flow_src_port, NULL, error))
5755 				return -rte_errno;
5756 			if (!fm->def_policy && wks->policy->hierarchy_match_port &&
5757 			    flow_src_port != priv->representor_id) {
5758 				if (flow_drv_mtr_hierarchy_rule_create(dev,
5759 								flow, fm,
5760 								flow_src_port,
5761 								items,
5762 								error))
5763 					return -rte_errno;
5764 			}
5765 			memcpy(sfx_items, items, sizeof(*sfx_items));
5766 			sfx_items++;
5767 			break;
5768 		case RTE_FLOW_ITEM_TYPE_VLAN:
5769 			/*
5770 			 * Copy VLAN items in case VLAN actions are performed.
5771 			 * If there are no VLAN actions, these items will be VOID.
5772 			 */
5773 			memcpy(sfx_items, items, sizeof(*sfx_items));
5774 			sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
5775 			sfx_items++;
5776 			break;
5777 		default:
5778 			break;
5779 		}
5780 	}
5781 	sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
5782 	sfx_items++;
5783 	mtr_first = priv->sh->meter_aso_en &&
5784 		(attr->egress || (attr->transfer && flow_src_port != UINT16_MAX));
5785 	/* For ASO meter, meter must be before tag in TX direction. */
5786 	if (mtr_first) {
5787 		action_pre_head = actions_pre++;
5788 		/* Leave space for tag action. */
5789 		tag_action = actions_pre++;
5790 	}
5791 	/* Prepare the actions for prefix and suffix flow. */
5792 	vlan_actions = false;
5793 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5794 		struct rte_flow_action *action_cur = NULL;
5795 
5796 		switch (actions->type) {
5797 		case RTE_FLOW_ACTION_TYPE_METER:
5798 			if (mtr_first) {
5799 				action_cur = action_pre_head;
5800 			} else {
5801 				/* Leave space for tag action. */
5802 				tag_action = actions_pre++;
5803 				action_cur = actions_pre++;
5804 			}
5805 			after_meter = true;
5806 			break;
5807 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5808 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5809 			action_cur = actions_pre++;
5810 			break;
5811 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5812 			raw_encap = actions->conf;
5813 			if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
5814 				action_cur = actions_pre++;
5815 			break;
5816 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5817 			raw_decap = actions->conf;
5818 			if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
5819 				action_cur = actions_pre++;
5820 			break;
5821 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5822 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5823 			vlan_actions = true;
5824 			break;
5825 		case RTE_FLOW_ACTION_TYPE_COUNT:
5826 			if (fm->def_policy)
5827 				action_cur = after_meter ?
5828 						actions_sfx++ : actions_pre++;
5829 			break;
5830 		default:
5831 			break;
5832 		}
5833 		if (!action_cur)
5834 			action_cur = (fm->def_policy) ?
5835 					actions_sfx++ : actions_pre++;
5836 		memcpy(action_cur, actions, sizeof(struct rte_flow_action));
5837 	}
5838 	/* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */
5839 	if (!vlan_actions) {
5840 		struct rte_flow_item *it = orig_sfx_items;
5841 
5842 		for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++)
5843 			if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
5844 				it->type = RTE_FLOW_ITEM_TYPE_VOID;
5845 	}
5846 	/* Add end action to the actions. */
5847 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
5848 	if (priv->sh->meter_aso_en) {
5849 		/**
5850 		 * For ASO meter, need to add an extra jump action explicitly,
5851 		 * to jump from meter to policer table.
5852 		 */
5853 		struct mlx5_flow_meter_sub_policy *sub_policy;
5854 		struct mlx5_flow_tbl_data_entry *tbl_data;
5855 
5856 		if (!fm->def_policy) {
5857 			sub_policy = get_meter_sub_policy(dev, flow, wks,
5858 							  attr, orig_items,
5859 							  error);
5860 			if (!sub_policy)
5861 				return -rte_errno;
5862 		} else {
5863 			enum mlx5_meter_domain mtr_domain =
5864 			attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
5865 				(attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
5866 						MLX5_MTR_DOMAIN_INGRESS);
5867 
5868 			sub_policy =
5869 			&priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
5870 		}
5871 		tbl_data = container_of(sub_policy->tbl_rsc,
5872 					struct mlx5_flow_tbl_data_entry, tbl);
5873 		hw_mtr_action = actions_pre++;
5874 		hw_mtr_action->type = (enum rte_flow_action_type)
5875 				      MLX5_RTE_FLOW_ACTION_TYPE_JUMP;
5876 		hw_mtr_action->conf = tbl_data->jump.action;
5877 	}
5878 	actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
5879 	actions_pre++;
5880 	if (!tag_action)
5881 		return rte_flow_error_set(error, ENOMEM,
5882 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5883 					  NULL, "No tag action space.");
5884 	if (!mtr_flow_id) {
5885 		tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
5886 		goto exit;
5887 	}
5888 	/* Only default-policy Meter creates mtr flow id. */
5889 	if (fm->def_policy) {
5890 		mlx5_ipool_malloc(fm->flow_ipool, &tag_id);
5891 		if (!tag_id)
5892 			return rte_flow_error_set(error, ENOMEM,
5893 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5894 					"Failed to allocate meter flow id.");
5895 		flow_id = tag_id - 1;
5896 		flow_id_bits = (!flow_id) ? 1 :
5897 				(MLX5_REG_BITS - rte_clz32(flow_id));
5898 		if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) >
5899 		    mtr_reg_bits) {
5900 			mlx5_ipool_free(fm->flow_ipool, tag_id);
5901 			return rte_flow_error_set(error, EINVAL,
5902 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5903 					"Meter flow id exceeds max limit.");
5904 		}
5905 		if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits)
5906 			priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits;
5907 	}
5908 	/* Build tag actions and items for meter_id/meter flow_id. */
5909 	set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre;
5910 	tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
5911 	tag_item_mask = tag_item_spec + 1;
5912 	/* Both flow_id and meter_id share the same register. */
5913 	*set_tag = (struct mlx5_rte_flow_action_set_tag) {
5914 		.id = (enum modify_reg)mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
5915 							    0, error),
5916 		.offset = mtr_id_offset,
5917 		.length = mtr_reg_bits,
5918 		.data = flow->meter,
5919 	};
5920 	/*
5921 	 * The color Reg bits used by flow_id are growing from
5922 	 * msb to lsb, so must do bit reverse for flow_id val in RegC.
5923 	 */
5924 	for (shift = 0; shift < flow_id_bits; shift++)
5925 		flow_id_reversed = (flow_id_reversed << 1) |
5926 				((flow_id >> shift) & 0x1);
5927 	set_tag->data |=
5928 		flow_id_reversed << (mtr_reg_bits - flow_id_bits);
5929 	tag_item_spec->id = set_tag->id;
5930 	tag_item_spec->data = set_tag->data << mtr_id_offset;
5931 	tag_item_mask->data = UINT32_MAX << mtr_id_offset;
5932 	tag_action->type = (enum rte_flow_action_type)
5933 				MLX5_RTE_FLOW_ACTION_TYPE_TAG;
5934 	tag_action->conf = set_tag;
5935 	tag_item->spec = tag_item_spec;
5936 	tag_item->last = NULL;
5937 	tag_item->mask = tag_item_mask;
5938 exit:
5939 	if (mtr_flow_id)
5940 		*mtr_flow_id = tag_id;
5941 	return 0;
5942 }
5943 
5944 /**
5945  * Split action list having QUEUE/RSS for metadata register copy.
5946  *
5947  * Once Q/RSS action is detected in user's action list, the flow action
5948  * should be split in order to copy metadata registers, which will happen in
5949  * RX_CP_TBL like,
5950  *   - CQE->flow_tag := reg_c[1] (MARK)
5951  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
5952  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
5953  * This is because the last action of each flow must be a terminal action
5954  * (QUEUE, RSS or DROP).
5955  *
5956  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
5957  * stored and kept in the mlx5_flow structure per each sub_flow.
5958  *
5959  * The Q/RSS action is replaced with,
5960  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
5961  * And the following JUMP action is added at the end,
5962  *   - JUMP, to RX_CP_TBL.
5963  *
5964  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
5965  * flow_create_split_metadata() routine. The flow will look like,
5966  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
5967  *
5968  * @param dev
5969  *   Pointer to Ethernet device.
5970  * @param[out] split_actions
5971  *   Pointer to store split actions to jump to CP_TBL.
5972  * @param[in] actions
5973  *   Pointer to the list of original flow actions.
5974  * @param[in] qrss
5975  *   Pointer to the Q/RSS action.
5976  * @param[in] actions_n
5977  *   Number of original actions.
5978  * @param[in] mtr_sfx
5979  *   Check if it is in meter suffix table.
5980  * @param[out] error
5981  *   Perform verbose error reporting if not NULL.
5982  *
5983  * @return
5984  *   non-zero unique flow_id on success, otherwise 0 and
5985  *   error/rte_error are set.
5986  */
5987 static uint32_t
5988 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
5989 			  struct rte_flow_action *split_actions,
5990 			  const struct rte_flow_action *actions,
5991 			  const struct rte_flow_action *qrss,
5992 			  int actions_n, int mtr_sfx,
5993 			  struct rte_flow_error *error)
5994 {
5995 	struct mlx5_priv *priv = dev->data->dev_private;
5996 	struct mlx5_rte_flow_action_set_tag *set_tag;
5997 	struct rte_flow_action_jump *jump;
5998 	const int qrss_idx = qrss - actions;
5999 	uint32_t flow_id = 0;
6000 	int ret = 0;
6001 
6002 	/*
6003 	 * Given actions will be split
6004 	 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
6005 	 * - Add jump to mreg CP_TBL.
6006 	 * As a result, there will be one more action.
6007 	 */
6008 	memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
6009 	/* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */
6010 	++actions_n;
6011 	set_tag = (void *)(split_actions + actions_n);
6012 	/*
6013 	 * If we are not the meter suffix flow, add the tag action.
6014 	 * Since meter suffix flow already has the tag added.
6015 	 */
6016 	if (!mtr_sfx) {
6017 		/*
6018 		 * Allocate the new subflow ID. This one is unique within
6019 		 * device and not shared with representors. Otherwise,
6020 		 * we would have to resolve multi-thread access synch
6021 		 * issue. Each flow on the shared device is appended
6022 		 * with source vport identifier, so the resulting
6023 		 * flows will be unique in the shared (by master and
6024 		 * representors) domain even if they have coinciding
6025 		 * IDs.
6026 		 */
6027 		mlx5_ipool_malloc(priv->sh->ipool
6028 				  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
6029 		if (!flow_id)
6030 			return rte_flow_error_set(error, ENOMEM,
6031 						  RTE_FLOW_ERROR_TYPE_ACTION,
6032 						  NULL, "can't allocate id "
6033 						  "for split Q/RSS subflow");
6034 		/* Internal SET_TAG action to set flow ID. */
6035 		*set_tag = (struct mlx5_rte_flow_action_set_tag){
6036 			.data = flow_id,
6037 		};
6038 		ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
6039 		if (ret < 0)
6040 			return ret;
6041 		set_tag->id = ret;
6042 		/* Construct new actions array. */
6043 		/* Replace QUEUE/RSS action. */
6044 		split_actions[qrss_idx] = (struct rte_flow_action){
6045 			.type = (enum rte_flow_action_type)
6046 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
6047 			.conf = set_tag,
6048 		};
6049 	} else {
6050 		/*
6051 		 * If we are the suffix flow of meter, tag already exist.
6052 		 * Set the QUEUE/RSS action to void.
6053 		 */
6054 		split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID;
6055 	}
6056 	/* JUMP action to jump to mreg copy table (CP_TBL). */
6057 	jump = (void *)(set_tag + 1);
6058 	*jump = (struct rte_flow_action_jump){
6059 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
6060 	};
6061 	split_actions[actions_n - 2] = (struct rte_flow_action){
6062 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
6063 		.conf = jump,
6064 	};
6065 	split_actions[actions_n - 1] = (struct rte_flow_action){
6066 		.type = RTE_FLOW_ACTION_TYPE_END,
6067 	};
6068 	return flow_id;
6069 }
6070 
6071 /**
6072  * Extend the given action list for Tx metadata copy.
6073  *
6074  * Copy the given action list to the ext_actions and add flow metadata register
6075  * copy action in order to copy reg_a set by WQE to reg_c[0].
6076  *
6077  * @param[out] ext_actions
6078  *   Pointer to the extended action list.
6079  * @param[in] actions
6080  *   Pointer to the list of actions.
6081  * @param[in] actions_n
6082  *   Number of actions in the list.
6083  * @param[out] error
6084  *   Perform verbose error reporting if not NULL.
6085  * @param[in] encap_idx
6086  *   The encap action index.
6087  *
6088  * @return
6089  *   0 on success, negative value otherwise
6090  */
6091 static int
6092 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
6093 		       struct rte_flow_action *ext_actions,
6094 		       const struct rte_flow_action *actions,
6095 		       int actions_n, struct rte_flow_error *error,
6096 		       int encap_idx)
6097 {
6098 	struct mlx5_flow_action_copy_mreg *cp_mreg =
6099 		(struct mlx5_flow_action_copy_mreg *)
6100 			(ext_actions + actions_n + 1);
6101 	int ret;
6102 
6103 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
6104 	if (ret < 0)
6105 		return ret;
6106 	cp_mreg->dst = ret;
6107 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
6108 	if (ret < 0)
6109 		return ret;
6110 	cp_mreg->src = ret;
6111 	if (encap_idx != 0)
6112 		memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
6113 	if (encap_idx == actions_n - 1) {
6114 		ext_actions[actions_n - 1] = (struct rte_flow_action){
6115 			.type = (enum rte_flow_action_type)
6116 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
6117 			.conf = cp_mreg,
6118 		};
6119 		ext_actions[actions_n] = (struct rte_flow_action){
6120 			.type = RTE_FLOW_ACTION_TYPE_END,
6121 		};
6122 	} else {
6123 		ext_actions[encap_idx] = (struct rte_flow_action){
6124 			.type = (enum rte_flow_action_type)
6125 				MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
6126 			.conf = cp_mreg,
6127 		};
6128 		memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
6129 				sizeof(*ext_actions) * (actions_n - encap_idx));
6130 	}
6131 	return 0;
6132 }
6133 
6134 /**
6135  * Check the match action from the action list.
6136  *
6137  * @param[in] actions
6138  *   Pointer to the list of actions.
6139  * @param[in] attr
6140  *   Flow rule attributes.
6141  * @param[in] action
6142  *   The action to be check if exist.
6143  * @param[out] match_action_pos
6144  *   Pointer to the position of the matched action if exists, otherwise is -1.
6145  * @param[out] qrss_action_pos
6146  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
6147  * @param[out] modify_after_mirror
6148  *   Pointer to the flag of modify action after FDB mirroring.
6149  *
6150  * @return
6151  *   > 0 the total number of actions.
6152  *   0 if not found match action in action list.
6153  */
6154 static int
6155 flow_check_match_action(const struct rte_flow_action actions[],
6156 			const struct rte_flow_attr *attr,
6157 			enum rte_flow_action_type action,
6158 			int *match_action_pos, int *qrss_action_pos,
6159 			int *modify_after_mirror)
6160 {
6161 	const struct rte_flow_action_sample *sample;
6162 	const struct rte_flow_action_raw_decap *decap;
6163 	const struct rte_flow_action *action_cur = NULL;
6164 	int actions_n = 0;
6165 	uint32_t ratio = 0;
6166 	int sub_type = 0;
6167 	int flag = 0;
6168 	int fdb_mirror = 0;
6169 
6170 	*match_action_pos = -1;
6171 	*qrss_action_pos = -1;
6172 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6173 		if (actions->type == action) {
6174 			flag = 1;
6175 			*match_action_pos = actions_n;
6176 		}
6177 		switch (actions->type) {
6178 		case RTE_FLOW_ACTION_TYPE_QUEUE:
6179 		case RTE_FLOW_ACTION_TYPE_RSS:
6180 			*qrss_action_pos = actions_n;
6181 			break;
6182 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
6183 			sample = actions->conf;
6184 			ratio = sample->ratio;
6185 			sub_type = ((const struct rte_flow_action *)
6186 					(sample->actions))->type;
6187 			if (ratio == 1 && attr->transfer &&
6188 			    sub_type != RTE_FLOW_ACTION_TYPE_END)
6189 				fdb_mirror = 1;
6190 			break;
6191 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
6192 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
6193 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
6194 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
6195 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
6196 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
6197 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6198 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6199 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
6200 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
6201 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
6202 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
6203 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6204 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6205 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
6206 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
6207 		case RTE_FLOW_ACTION_TYPE_FLAG:
6208 		case RTE_FLOW_ACTION_TYPE_MARK:
6209 		case RTE_FLOW_ACTION_TYPE_SET_META:
6210 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
6211 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6212 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6213 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6214 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
6215 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6216 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6217 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6218 		case RTE_FLOW_ACTION_TYPE_METER:
6219 			if (fdb_mirror)
6220 				*modify_after_mirror = 1;
6221 			break;
6222 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6223 			decap = actions->conf;
6224 			action_cur = actions;
6225 			while ((++action_cur)->type == RTE_FLOW_ACTION_TYPE_VOID)
6226 				;
6227 			if (action_cur->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6228 				const struct rte_flow_action_raw_encap *encap =
6229 								action_cur->conf;
6230 				if (decap->size <=
6231 					MLX5_ENCAPSULATION_DECISION_SIZE &&
6232 				    encap->size >
6233 					MLX5_ENCAPSULATION_DECISION_SIZE)
6234 					/* L3 encap. */
6235 					break;
6236 			}
6237 			if (fdb_mirror)
6238 				*modify_after_mirror = 1;
6239 			break;
6240 		default:
6241 			break;
6242 		}
6243 		actions_n++;
6244 	}
6245 	if (flag && fdb_mirror && !*modify_after_mirror) {
6246 		/* FDB mirroring uses the destination array to implement
6247 		 * instead of FLOW_SAMPLER object.
6248 		 */
6249 		if (sub_type != RTE_FLOW_ACTION_TYPE_END)
6250 			flag = 0;
6251 	}
6252 	/* Count RTE_FLOW_ACTION_TYPE_END. */
6253 	return flag ? actions_n + 1 : 0;
6254 }
6255 
6256 #define SAMPLE_SUFFIX_ITEM 3
6257 
6258 /**
6259  * Split the sample flow.
6260  *
6261  * As sample flow will split to two sub flow, sample flow with
6262  * sample action, the other actions will move to new suffix flow.
6263  *
6264  * Also add unique tag id with tag action in the sample flow,
6265  * the same tag id will be as match in the suffix flow.
6266  *
6267  * @param dev
6268  *   Pointer to Ethernet device.
6269  * @param[in] add_tag
6270  *   Add extra tag action flag.
6271  * @param[out] sfx_items
6272  *   Suffix flow match items (list terminated by the END pattern item).
6273  * @param[in] actions
6274  *   Associated actions (list terminated by the END action).
6275  * @param[out] actions_sfx
6276  *   Suffix flow actions.
6277  * @param[out] actions_pre
6278  *   Prefix flow actions.
6279  * @param[in] actions_n
6280  *  The total number of actions.
6281  * @param[in] sample_action_pos
6282  *   The sample action position.
6283  * @param[in] qrss_action_pos
6284  *   The Queue/RSS action position.
6285  * @param[in] jump_table
6286  *   Add extra jump action flag.
6287  * @param[out] error
6288  *   Perform verbose error reporting if not NULL.
6289  *
6290  * @return
6291  *   0 on success, or unique flow_id, a negative errno value
6292  *   otherwise and rte_errno is set.
6293  */
6294 static int
6295 flow_sample_split_prep(struct rte_eth_dev *dev,
6296 		       int add_tag,
6297 		       const struct rte_flow_item items[],
6298 		       struct rte_flow_item sfx_items[],
6299 		       const struct rte_flow_action actions[],
6300 		       struct rte_flow_action actions_sfx[],
6301 		       struct rte_flow_action actions_pre[],
6302 		       int actions_n,
6303 		       int sample_action_pos,
6304 		       int qrss_action_pos,
6305 		       int jump_table,
6306 		       struct rte_flow_error *error)
6307 {
6308 	struct mlx5_priv *priv = dev->data->dev_private;
6309 	struct mlx5_rte_flow_action_set_tag *set_tag;
6310 	struct mlx5_rte_flow_item_tag *tag_spec;
6311 	struct mlx5_rte_flow_item_tag *tag_mask;
6312 	struct rte_flow_action_jump *jump_action;
6313 	uint32_t tag_id = 0;
6314 	int append_index = 0;
6315 	int set_tag_idx = -1;
6316 	int index;
6317 	int ret;
6318 
6319 	if (sample_action_pos < 0)
6320 		return rte_flow_error_set(error, EINVAL,
6321 					  RTE_FLOW_ERROR_TYPE_ACTION,
6322 					  NULL, "invalid position of sample "
6323 					  "action in list");
6324 	/* Prepare the actions for prefix and suffix flow. */
6325 	if (add_tag) {
6326 		/* Update the new added tag action index preceding
6327 		 * the PUSH_VLAN or ENCAP action.
6328 		 */
6329 		const struct rte_flow_action_raw_encap *raw_encap;
6330 		const struct rte_flow_action *action = actions;
6331 		int encap_idx;
6332 		int action_idx = 0;
6333 		int raw_decap_idx = -1;
6334 		int push_vlan_idx = -1;
6335 		for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
6336 			switch (action->type) {
6337 			case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6338 				raw_decap_idx = action_idx;
6339 				break;
6340 			case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6341 				raw_encap = action->conf;
6342 				if (raw_encap->size >
6343 					MLX5_ENCAPSULATION_DECISION_SIZE) {
6344 					encap_idx = raw_decap_idx != -1 ?
6345 						    raw_decap_idx : action_idx;
6346 					if (encap_idx < sample_action_pos &&
6347 					    push_vlan_idx == -1)
6348 						set_tag_idx = encap_idx;
6349 				}
6350 				break;
6351 			case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6352 			case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6353 				encap_idx = action_idx;
6354 				if (encap_idx < sample_action_pos &&
6355 				    push_vlan_idx == -1)
6356 					set_tag_idx = encap_idx;
6357 				break;
6358 			case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6359 			case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6360 				if (action_idx < sample_action_pos &&
6361 				    push_vlan_idx == -1) {
6362 					set_tag_idx = action_idx;
6363 					push_vlan_idx = action_idx;
6364 				}
6365 				break;
6366 			default:
6367 				break;
6368 			}
6369 			action_idx++;
6370 		}
6371 	}
6372 	/* Prepare the actions for prefix and suffix flow. */
6373 	if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
6374 		index = qrss_action_pos;
6375 		/* Put the preceding the Queue/RSS action into prefix flow. */
6376 		if (index != 0)
6377 			memcpy(actions_pre, actions,
6378 			       sizeof(struct rte_flow_action) * index);
6379 		/* Put others preceding the sample action into prefix flow. */
6380 		if (sample_action_pos > index + 1)
6381 			memcpy(actions_pre + index, actions + index + 1,
6382 			       sizeof(struct rte_flow_action) *
6383 			       (sample_action_pos - index - 1));
6384 		index = sample_action_pos - 1;
6385 		/* Put Queue/RSS action into Suffix flow. */
6386 		memcpy(actions_sfx, actions + qrss_action_pos,
6387 		       sizeof(struct rte_flow_action));
6388 		actions_sfx++;
6389 	} else if (add_tag && set_tag_idx >= 0) {
6390 		if (set_tag_idx > 0)
6391 			memcpy(actions_pre, actions,
6392 			       sizeof(struct rte_flow_action) * set_tag_idx);
6393 		memcpy(actions_pre + set_tag_idx + 1, actions + set_tag_idx,
6394 		       sizeof(struct rte_flow_action) *
6395 		       (sample_action_pos - set_tag_idx));
6396 		index = sample_action_pos;
6397 	} else {
6398 		index = sample_action_pos;
6399 		if (index != 0)
6400 			memcpy(actions_pre, actions,
6401 			       sizeof(struct rte_flow_action) * index);
6402 	}
6403 	/* For CX5, add an extra tag action for NIC-RX and E-Switch ingress.
6404 	 * For CX6DX and above, metadata registers Cx preserve their value,
6405 	 * add an extra tag action for NIC-RX and E-Switch Domain.
6406 	 */
6407 	if (add_tag) {
6408 		/* Prepare the prefix tag action. */
6409 		append_index++;
6410 		set_tag = (void *)(actions_pre + actions_n + append_index);
6411 		/* Trust VF/SF on CX5 not supported meter so that the reserved
6412 		 * metadata regC is REG_NON, back to use application tag
6413 		 * index 0.
6414 		 */
6415 		if (unlikely(priv->sh->registers.aso_reg == REG_NON))
6416 			ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
6417 		else
6418 			ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
6419 		if (ret < 0)
6420 			return ret;
6421 		mlx5_ipool_malloc(priv->sh->ipool
6422 				  [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
6423 		*set_tag = (struct mlx5_rte_flow_action_set_tag) {
6424 			.id = ret,
6425 			.data = tag_id,
6426 		};
6427 		/* Prepare the suffix subflow items. */
6428 		tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
6429 		tag_spec->data = tag_id;
6430 		tag_spec->id = set_tag->id;
6431 		tag_mask = tag_spec + 1;
6432 		tag_mask->data = UINT32_MAX;
6433 		for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6434 			if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID ||
6435 			    items->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR ||
6436 			    items->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) {
6437 				memcpy(sfx_items, items, sizeof(*sfx_items));
6438 				sfx_items++;
6439 				break;
6440 			}
6441 		}
6442 		sfx_items[0] = (struct rte_flow_item){
6443 			.type = (enum rte_flow_item_type)
6444 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
6445 			.spec = tag_spec,
6446 			.last = NULL,
6447 			.mask = tag_mask,
6448 		};
6449 		sfx_items[1] = (struct rte_flow_item){
6450 			.type = (enum rte_flow_item_type)
6451 				RTE_FLOW_ITEM_TYPE_END,
6452 		};
6453 		/* Prepare the tag action in prefix subflow. */
6454 		set_tag_idx = (set_tag_idx == -1) ? index : set_tag_idx;
6455 		actions_pre[set_tag_idx] =
6456 			(struct rte_flow_action){
6457 			.type = (enum rte_flow_action_type)
6458 				MLX5_RTE_FLOW_ACTION_TYPE_TAG,
6459 			.conf = set_tag,
6460 		};
6461 		/* Update next sample position due to add one tag action */
6462 		index += 1;
6463 	}
6464 	/* Copy the sample action into prefix flow. */
6465 	memcpy(actions_pre + index, actions + sample_action_pos,
6466 	       sizeof(struct rte_flow_action));
6467 	index += 1;
6468 	/* For the modify action after the sample action in E-Switch mirroring,
6469 	 * Add the extra jump action in prefix subflow and jump into the next
6470 	 * table, then do the modify action in the new table.
6471 	 */
6472 	if (jump_table) {
6473 		/* Prepare the prefix jump action. */
6474 		append_index++;
6475 		jump_action = (void *)(actions_pre + actions_n + append_index);
6476 		jump_action->group = jump_table;
6477 		actions_pre[index++] =
6478 			(struct rte_flow_action){
6479 			.type = (enum rte_flow_action_type)
6480 				RTE_FLOW_ACTION_TYPE_JUMP,
6481 			.conf = jump_action,
6482 		};
6483 	}
6484 	actions_pre[index] = (struct rte_flow_action){
6485 		.type = (enum rte_flow_action_type)
6486 			RTE_FLOW_ACTION_TYPE_END,
6487 	};
6488 	/* Put the actions after sample into Suffix flow. */
6489 	memcpy(actions_sfx, actions + sample_action_pos + 1,
6490 	       sizeof(struct rte_flow_action) *
6491 	       (actions_n - sample_action_pos - 1));
6492 	return tag_id;
6493 }
6494 
6495 /**
6496  * The splitting for metadata feature.
6497  *
6498  * - Q/RSS action on NIC Rx should be split in order to pass by
6499  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
6500  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
6501  *
6502  * - All the actions on NIC Tx should have a mreg copy action to
6503  *   copy reg_a from WQE to reg_c[0].
6504  *
6505  * @param dev
6506  *   Pointer to Ethernet device.
6507  * @param[in] flow
6508  *   Parent flow structure pointer.
6509  * @param[in] attr
6510  *   Flow rule attributes.
6511  * @param[in] items
6512  *   Pattern specification (list terminated by the END pattern item).
6513  * @param[in] actions
6514  *   Associated actions (list terminated by the END action).
6515  * @param[in] flow_split_info
6516  *   Pointer to flow split info structure.
6517  * @param[out] error
6518  *   Perform verbose error reporting if not NULL.
6519  * @return
6520  *   0 on success, negative value otherwise
6521  */
6522 static int
6523 flow_create_split_metadata(struct rte_eth_dev *dev,
6524 			   struct rte_flow *flow,
6525 			   const struct rte_flow_attr *attr,
6526 			   const struct rte_flow_item items[],
6527 			   const struct rte_flow_action actions[],
6528 			   struct mlx5_flow_split_info *flow_split_info,
6529 			   struct rte_flow_error *error)
6530 {
6531 	struct mlx5_priv *priv = dev->data->dev_private;
6532 	struct mlx5_sh_config *config = &priv->sh->config;
6533 	const struct rte_flow_action *qrss = NULL;
6534 	struct rte_flow_action *ext_actions = NULL;
6535 	struct mlx5_flow *dev_flow = NULL;
6536 	uint32_t qrss_id = 0;
6537 	int mtr_sfx = 0;
6538 	size_t act_size;
6539 	int actions_n;
6540 	int encap_idx;
6541 	int ret;
6542 
6543 	/* Check whether extensive metadata feature is engaged. */
6544 	if (!config->dv_flow_en ||
6545 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
6546 	    !mlx5_flow_ext_mreg_supported(dev))
6547 		return flow_create_split_inner(dev, flow, NULL, attr, items,
6548 					       actions, flow_split_info, error);
6549 	actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
6550 							   &encap_idx);
6551 	if (qrss) {
6552 		/* Exclude hairpin flows from splitting. */
6553 		if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
6554 			const struct rte_flow_action_queue *queue;
6555 
6556 			queue = qrss->conf;
6557 			if (mlx5_rxq_is_hairpin(dev, queue->index))
6558 				qrss = NULL;
6559 		} else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
6560 			const struct rte_flow_action_rss *rss;
6561 
6562 			rss = qrss->conf;
6563 			if (mlx5_rxq_is_hairpin(dev, rss->queue[0]))
6564 				qrss = NULL;
6565 		}
6566 	}
6567 	if (qrss) {
6568 		/* Check if it is in meter suffix table. */
6569 		mtr_sfx = attr->group ==
6570 			  ((attr->transfer && priv->fdb_def_rule) ?
6571 			  (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
6572 			  MLX5_FLOW_TABLE_LEVEL_METER);
6573 		/*
6574 		 * Q/RSS action on NIC Rx should be split in order to pass by
6575 		 * the mreg copy table (RX_CP_TBL) and then it jumps to the
6576 		 * action table (RX_ACT_TBL) which has the split Q/RSS action.
6577 		 */
6578 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
6579 			   sizeof(struct rte_flow_action_set_tag) +
6580 			   sizeof(struct rte_flow_action_jump);
6581 		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
6582 					  SOCKET_ID_ANY);
6583 		if (!ext_actions)
6584 			return rte_flow_error_set(error, ENOMEM,
6585 						  RTE_FLOW_ERROR_TYPE_ACTION,
6586 						  NULL, "no memory to split "
6587 						  "metadata flow");
6588 		/*
6589 		 * Create the new actions list with removed Q/RSS action
6590 		 * and appended set tag and jump to register copy table
6591 		 * (RX_CP_TBL). We should preallocate unique tag ID here
6592 		 * in advance, because it is needed for set tag action.
6593 		 */
6594 		qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
6595 						    qrss, actions_n,
6596 						    mtr_sfx, error);
6597 		if (!mtr_sfx && !qrss_id) {
6598 			ret = -rte_errno;
6599 			goto exit;
6600 		}
6601 	} else if (attr->egress) {
6602 		/*
6603 		 * All the actions on NIC Tx should have a metadata register
6604 		 * copy action to copy reg_a from WQE to reg_c[meta]
6605 		 */
6606 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
6607 			   sizeof(struct mlx5_flow_action_copy_mreg);
6608 		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
6609 					  SOCKET_ID_ANY);
6610 		if (!ext_actions)
6611 			return rte_flow_error_set(error, ENOMEM,
6612 						  RTE_FLOW_ERROR_TYPE_ACTION,
6613 						  NULL, "no memory to split "
6614 						  "metadata flow");
6615 		/* Create the action list appended with copy register. */
6616 		ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
6617 					     actions_n, error, encap_idx);
6618 		if (ret < 0)
6619 			goto exit;
6620 	}
6621 	/* Add the unmodified original or prefix subflow. */
6622 	ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
6623 				      items, ext_actions ? ext_actions :
6624 				      actions, flow_split_info, error);
6625 	if (ret < 0)
6626 		goto exit;
6627 	MLX5_ASSERT(dev_flow);
6628 	if (qrss) {
6629 		const struct rte_flow_attr q_attr = {
6630 			.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
6631 			.ingress = 1,
6632 		};
6633 		/* Internal PMD action to set register. */
6634 		struct mlx5_rte_flow_item_tag q_tag_spec = {
6635 			.data = qrss_id,
6636 			.id = REG_NON,
6637 		};
6638 		struct rte_flow_item q_items[] = {
6639 			{
6640 				.type = (enum rte_flow_item_type)
6641 					MLX5_RTE_FLOW_ITEM_TYPE_TAG,
6642 				.spec = &q_tag_spec,
6643 				.last = NULL,
6644 				.mask = NULL,
6645 			},
6646 			{
6647 				.type = RTE_FLOW_ITEM_TYPE_END,
6648 			},
6649 		};
6650 		struct rte_flow_action q_actions[] = {
6651 			{
6652 				.type = qrss->type,
6653 				.conf = qrss->conf,
6654 			},
6655 			{
6656 				.type = RTE_FLOW_ACTION_TYPE_END,
6657 			},
6658 		};
6659 		uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
6660 
6661 		/*
6662 		 * Configure the tag item only if there is no meter subflow.
6663 		 * Since tag is already marked in the meter suffix subflow
6664 		 * we can just use the meter suffix items as is.
6665 		 */
6666 		if (qrss_id) {
6667 			/* Not meter subflow. */
6668 			MLX5_ASSERT(!mtr_sfx);
6669 			/*
6670 			 * Put unique id in prefix flow due to it is destroyed
6671 			 * after suffix flow and id will be freed after there
6672 			 * is no actual flows with this id and identifier
6673 			 * reallocation becomes possible (for example, for
6674 			 * other flows in other threads).
6675 			 */
6676 			dev_flow->handle->split_flow_id = qrss_id;
6677 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
6678 						   error);
6679 			if (ret < 0)
6680 				goto exit;
6681 			q_tag_spec.id = ret;
6682 		}
6683 		dev_flow = NULL;
6684 		/* Add suffix subflow to execute Q/RSS. */
6685 		flow_split_info->prefix_layers = layers;
6686 		flow_split_info->prefix_mark = 0;
6687 		flow_split_info->table_id = 0;
6688 		ret = flow_create_split_inner(dev, flow, &dev_flow,
6689 					      &q_attr, mtr_sfx ? items :
6690 					      q_items, q_actions,
6691 					      flow_split_info, error);
6692 		if (ret < 0)
6693 			goto exit;
6694 		/* qrss ID should be freed if failed. */
6695 		qrss_id = 0;
6696 		MLX5_ASSERT(dev_flow);
6697 	}
6698 
6699 exit:
6700 	/*
6701 	 * We do not destroy the partially created sub_flows in case of error.
6702 	 * These ones are included into parent flow list and will be destroyed
6703 	 * by flow_drv_destroy.
6704 	 */
6705 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
6706 			qrss_id);
6707 	mlx5_free(ext_actions);
6708 	return ret;
6709 }
6710 
6711 /**
6712  * Create meter internal drop flow with the original pattern.
6713  *
6714  * @param dev
6715  *   Pointer to Ethernet device.
6716  * @param[in] flow
6717  *   Parent flow structure pointer.
6718  * @param[in] attr
6719  *   Flow rule attributes.
6720  * @param[in] items
6721  *   Pattern specification (list terminated by the END pattern item).
6722  * @param[in] flow_split_info
6723  *   Pointer to flow split info structure.
6724  * @param[in] fm
6725  *   Pointer to flow meter structure.
6726  * @param[out] error
6727  *   Perform verbose error reporting if not NULL.
6728  * @return
6729  *   0 on success, negative value otherwise
6730  */
6731 static uint32_t
6732 flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev,
6733 			struct rte_flow *flow,
6734 			const struct rte_flow_attr *attr,
6735 			const struct rte_flow_item items[],
6736 			struct mlx5_flow_split_info *flow_split_info,
6737 			struct mlx5_flow_meter_info *fm,
6738 			struct rte_flow_error *error)
6739 {
6740 	struct mlx5_flow *dev_flow = NULL;
6741 	struct rte_flow_attr drop_attr = *attr;
6742 	struct rte_flow_action drop_actions[3];
6743 	struct mlx5_flow_split_info drop_split_info = *flow_split_info;
6744 
6745 	MLX5_ASSERT(fm->drop_cnt);
6746 	drop_actions[0].type =
6747 		(enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_COUNT;
6748 	drop_actions[0].conf = (void *)(uintptr_t)fm->drop_cnt;
6749 	drop_actions[1].type = RTE_FLOW_ACTION_TYPE_DROP;
6750 	drop_actions[1].conf = NULL;
6751 	drop_actions[2].type = RTE_FLOW_ACTION_TYPE_END;
6752 	drop_actions[2].conf = NULL;
6753 	drop_split_info.external = false;
6754 	drop_split_info.skip_scale |= 1 << MLX5_SCALE_FLOW_GROUP_BIT;
6755 	drop_split_info.table_id = MLX5_MTR_TABLE_ID_DROP;
6756 	drop_attr.group = MLX5_FLOW_TABLE_LEVEL_METER;
6757 	return flow_create_split_inner(dev, flow, &dev_flow,
6758 				&drop_attr, items, drop_actions,
6759 				&drop_split_info, error);
6760 }
6761 
6762 static int
6763 flow_count_vlan_items(const struct rte_flow_item items[])
6764 {
6765 	int items_n = 0;
6766 
6767 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6768 		if (items->type == RTE_FLOW_ITEM_TYPE_VLAN ||
6769 		    items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
6770 			items_n++;
6771 	}
6772 	return items_n;
6773 }
6774 
6775 /**
6776  * The splitting for meter feature.
6777  *
6778  * - The meter flow will be split to two flows as prefix and
6779  *   suffix flow. The packets make sense only it pass the prefix
6780  *   meter action.
6781  *
6782  * - Reg_C_5 is used for the packet to match betweend prefix and
6783  *   suffix flow.
6784  *
6785  * @param dev
6786  *   Pointer to Ethernet device.
6787  * @param[in] flow
6788  *   Parent flow structure pointer.
6789  * @param[in] attr
6790  *   Flow rule attributes.
6791  * @param[in] items
6792  *   Pattern specification (list terminated by the END pattern item).
6793  * @param[in] actions
6794  *   Associated actions (list terminated by the END action).
6795  * @param[in] flow_split_info
6796  *   Pointer to flow split info structure.
6797  * @param[out] error
6798  *   Perform verbose error reporting if not NULL.
6799  * @return
6800  *   0 on success, negative value otherwise
6801  */
6802 static int
6803 flow_create_split_meter(struct rte_eth_dev *dev,
6804 			struct rte_flow *flow,
6805 			const struct rte_flow_attr *attr,
6806 			const struct rte_flow_item items[],
6807 			const struct rte_flow_action actions[],
6808 			struct mlx5_flow_split_info *flow_split_info,
6809 			struct rte_flow_error *error)
6810 {
6811 	struct mlx5_priv *priv = dev->data->dev_private;
6812 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6813 	struct rte_flow_action *sfx_actions = NULL;
6814 	struct rte_flow_action *pre_actions = NULL;
6815 	struct rte_flow_item *sfx_items = NULL;
6816 	struct mlx5_flow *dev_flow = NULL;
6817 	struct rte_flow_attr sfx_attr = *attr;
6818 	struct mlx5_flow_meter_info *fm = NULL;
6819 	uint8_t skip_scale_restore;
6820 	bool has_mtr = false;
6821 	bool has_modify = false;
6822 	bool set_mtr_reg = true;
6823 	bool is_mtr_hierarchy = false;
6824 	uint32_t meter_id = 0;
6825 	uint32_t mtr_idx = 0;
6826 	uint32_t mtr_flow_id = 0;
6827 	size_t act_size;
6828 	size_t item_size;
6829 	int actions_n = 0;
6830 	int vlan_items_n = 0;
6831 	int ret = 0;
6832 
6833 	if (priv->mtr_en)
6834 		actions_n = flow_check_meter_action(dev, actions, &has_mtr,
6835 						    &has_modify, &meter_id);
6836 	if (has_mtr) {
6837 		if (flow->meter) {
6838 			fm = flow_dv_meter_find_by_idx(priv, flow->meter);
6839 			if (!fm)
6840 				return rte_flow_error_set(error, EINVAL,
6841 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6842 						NULL, "Meter not found.");
6843 		} else {
6844 			fm = mlx5_flow_meter_find(priv, meter_id, &mtr_idx);
6845 			if (!fm)
6846 				return rte_flow_error_set(error, EINVAL,
6847 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6848 						NULL, "Meter not found.");
6849 			ret = mlx5_flow_meter_attach(priv, fm,
6850 						     &sfx_attr, error);
6851 			if (ret)
6852 				return -rte_errno;
6853 			flow->meter = mtr_idx;
6854 		}
6855 		MLX5_ASSERT(wks);
6856 		wks->fm = fm;
6857 		if (!fm->def_policy) {
6858 			wks->policy = mlx5_flow_meter_policy_find(dev,
6859 								  fm->policy_id,
6860 								  NULL);
6861 			MLX5_ASSERT(wks->policy);
6862 			if (wks->policy->mark)
6863 				wks->mark = 1;
6864 			if (wks->policy->is_hierarchy) {
6865 				wks->final_policy =
6866 				mlx5_flow_meter_hierarchy_get_final_policy(dev,
6867 								wks->policy);
6868 				if (!wks->final_policy)
6869 					return rte_flow_error_set(error,
6870 					EINVAL,
6871 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6872 				"Failed to find terminal policy of hierarchy.");
6873 				is_mtr_hierarchy = true;
6874 			}
6875 		}
6876 		/*
6877 		 * If it isn't default-policy Meter, and
6878 		 * 1. Not meter hierarchy and there's no action in flow to change
6879 		 *    packet (modify/encap/decap etc.), OR
6880 		 * 2. No drop count needed for this meter.
6881 		 * Then no need to use regC to save meter id anymore.
6882 		 */
6883 		if (!fm->def_policy && ((!has_modify && !is_mtr_hierarchy) || !fm->drop_cnt))
6884 			set_mtr_reg = false;
6885 		/* Prefix actions: meter, decap, encap, tag, jump, end, cnt. */
6886 #define METER_PREFIX_ACTION 7
6887 		act_size = (sizeof(struct rte_flow_action) *
6888 			    (actions_n + METER_PREFIX_ACTION)) +
6889 			   sizeof(struct mlx5_rte_flow_action_set_tag);
6890 		/* Flow can have multiple VLAN items. Account for them in suffix items. */
6891 		vlan_items_n = flow_count_vlan_items(items);
6892 		/* Suffix items: tag, [vlans], port id, end. */
6893 #define METER_SUFFIX_ITEM 3
6894 		item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) +
6895 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
6896 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
6897 					  0, SOCKET_ID_ANY);
6898 		if (!sfx_actions)
6899 			return rte_flow_error_set(error, ENOMEM,
6900 						  RTE_FLOW_ERROR_TYPE_ACTION,
6901 						  NULL, "no memory to split "
6902 						  "meter flow");
6903 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
6904 			     act_size);
6905 		/* There's no suffix flow for meter of non-default policy. */
6906 		if (!fm->def_policy)
6907 			pre_actions = sfx_actions + 1;
6908 		else
6909 			pre_actions = sfx_actions + actions_n;
6910 		ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr,
6911 					    items, sfx_items, actions,
6912 					    sfx_actions, pre_actions,
6913 					    (set_mtr_reg ? &mtr_flow_id : NULL),
6914 					    error);
6915 		if (ret) {
6916 			ret = -rte_errno;
6917 			goto exit;
6918 		}
6919 		/* Add the prefix subflow. */
6920 		skip_scale_restore = flow_split_info->skip_scale;
6921 		flow_split_info->skip_scale |=
6922 			1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
6923 		ret = flow_create_split_inner(dev, flow, &dev_flow,
6924 					      attr, items, pre_actions,
6925 					      flow_split_info, error);
6926 		flow_split_info->skip_scale = skip_scale_restore;
6927 		if (ret) {
6928 			if (mtr_flow_id)
6929 				mlx5_ipool_free(fm->flow_ipool, mtr_flow_id);
6930 			ret = -rte_errno;
6931 			goto exit;
6932 		}
6933 		if (mtr_flow_id) {
6934 			dev_flow->handle->split_flow_id = mtr_flow_id;
6935 			dev_flow->handle->is_meter_flow_id = 1;
6936 		}
6937 		if (!fm->def_policy) {
6938 			if (!set_mtr_reg && fm->drop_cnt)
6939 				ret =
6940 			flow_meter_create_drop_flow_with_org_pattern(dev, flow,
6941 							&sfx_attr, items,
6942 							flow_split_info,
6943 							fm, error);
6944 			goto exit;
6945 		}
6946 		/* Setting the sfx group atrr. */
6947 		sfx_attr.group = sfx_attr.transfer ?
6948 				(MLX5_FLOW_TABLE_LEVEL_METER - 1) :
6949 				 MLX5_FLOW_TABLE_LEVEL_METER;
6950 		flow_split_info->prefix_layers =
6951 				flow_get_prefix_layer_flags(dev_flow);
6952 		flow_split_info->prefix_mark |= wks->mark;
6953 		flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
6954 	}
6955 	/* Add the prefix subflow. */
6956 	ret = flow_create_split_metadata(dev, flow,
6957 					 &sfx_attr, sfx_items ?
6958 					 sfx_items : items,
6959 					 sfx_actions ? sfx_actions : actions,
6960 					 flow_split_info, error);
6961 exit:
6962 	if (sfx_actions)
6963 		mlx5_free(sfx_actions);
6964 	return ret;
6965 }
6966 
6967 /**
6968  * The splitting for sample feature.
6969  *
6970  * Once Sample action is detected in the action list, the flow actions should
6971  * be split into prefix sub flow and suffix sub flow.
6972  *
6973  * The original items remain in the prefix sub flow, all actions preceding the
6974  * sample action and the sample action itself will be copied to the prefix
6975  * sub flow, the actions following the sample action will be copied to the
6976  * suffix sub flow, Queue action always be located in the suffix sub flow.
6977  *
6978  * In order to make the packet from prefix sub flow matches with suffix sub
6979  * flow, an extra tag action be added into prefix sub flow, and the suffix sub
6980  * flow uses tag item with the unique flow id.
6981  *
6982  * @param dev
6983  *   Pointer to Ethernet device.
6984  * @param[in] flow
6985  *   Parent flow structure pointer.
6986  * @param[in] attr
6987  *   Flow rule attributes.
6988  * @param[in] items
6989  *   Pattern specification (list terminated by the END pattern item).
6990  * @param[in] actions
6991  *   Associated actions (list terminated by the END action).
6992  * @param[in] flow_split_info
6993  *   Pointer to flow split info structure.
6994  * @param[out] error
6995  *   Perform verbose error reporting if not NULL.
6996  * @return
6997  *   0 on success, negative value otherwise
6998  */
6999 static int
7000 flow_create_split_sample(struct rte_eth_dev *dev,
7001 			 struct rte_flow *flow,
7002 			 const struct rte_flow_attr *attr,
7003 			 const struct rte_flow_item items[],
7004 			 const struct rte_flow_action actions[],
7005 			 struct mlx5_flow_split_info *flow_split_info,
7006 			 struct rte_flow_error *error)
7007 {
7008 	struct mlx5_priv *priv = dev->data->dev_private;
7009 	struct rte_flow_action *sfx_actions = NULL;
7010 	struct rte_flow_action *pre_actions = NULL;
7011 	struct rte_flow_item *sfx_items = NULL;
7012 	struct mlx5_flow *dev_flow = NULL;
7013 	struct rte_flow_attr sfx_attr = *attr;
7014 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
7015 	struct mlx5_flow_dv_sample_resource *sample_res;
7016 	struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
7017 	struct mlx5_flow_tbl_resource *sfx_tbl;
7018 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7019 #endif
7020 	size_t act_size;
7021 	size_t item_size;
7022 	uint32_t fdb_tx = 0;
7023 	int32_t tag_id = 0;
7024 	int actions_n = 0;
7025 	int sample_action_pos;
7026 	int qrss_action_pos;
7027 	int add_tag = 0;
7028 	int modify_after_mirror = 0;
7029 	uint16_t jump_table = 0;
7030 	const uint32_t next_ft_step = 1;
7031 	int ret = 0;
7032 	struct mlx5_priv *item_port_priv = NULL;
7033 	const struct rte_flow_item *item;
7034 
7035 	if (priv->sampler_en)
7036 		actions_n = flow_check_match_action(actions, attr,
7037 					RTE_FLOW_ACTION_TYPE_SAMPLE,
7038 					&sample_action_pos, &qrss_action_pos,
7039 					&modify_after_mirror);
7040 	if (actions_n) {
7041 		/* The prefix actions must includes sample, tag, end. */
7042 		act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
7043 			   + sizeof(struct mlx5_rte_flow_action_set_tag);
7044 		item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
7045 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
7046 		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
7047 					  item_size), 0, SOCKET_ID_ANY);
7048 		if (!sfx_actions)
7049 			return rte_flow_error_set(error, ENOMEM,
7050 						  RTE_FLOW_ERROR_TYPE_ACTION,
7051 						  NULL, "no memory to split "
7052 						  "sample flow");
7053 		for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
7054 			if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
7055 				const struct rte_flow_item_port_id *spec;
7056 
7057 				spec = (const struct rte_flow_item_port_id *)item->spec;
7058 				if (spec)
7059 					item_port_priv =
7060 						mlx5_port_to_eswitch_info(spec->id, true);
7061 				break;
7062 			} else if (item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) {
7063 				const struct rte_flow_item_ethdev *spec;
7064 
7065 				spec = (const struct rte_flow_item_ethdev *)item->spec;
7066 				if (spec)
7067 					item_port_priv =
7068 						mlx5_port_to_eswitch_info(spec->port_id, true);
7069 				break;
7070 			} else if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
7071 				const struct rte_flow_item_ethdev *spec;
7072 
7073 				spec = (const struct rte_flow_item_ethdev *)item->spec;
7074 				if (spec)
7075 					item_port_priv =
7076 						mlx5_port_to_eswitch_info(spec->port_id, true);
7077 				break;
7078 			}
7079 		}
7080 		/* The representor_id is UINT16_MAX for uplink. */
7081 		fdb_tx = (attr->transfer &&
7082 			  flow_source_vport_representor(priv, item_port_priv));
7083 		/*
7084 		 * When reg_c_preserve is set, metadata registers Cx preserve
7085 		 * their value even through packet duplication.
7086 		 */
7087 		add_tag = (!fdb_tx ||
7088 			   priv->sh->cdev->config.hca_attr.reg_c_preserve);
7089 		if (add_tag)
7090 			sfx_items = (struct rte_flow_item *)((char *)sfx_actions
7091 					+ act_size);
7092 		if (modify_after_mirror)
7093 			jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR +
7094 				     next_ft_step;
7095 		pre_actions = sfx_actions + actions_n;
7096 		tag_id = flow_sample_split_prep(dev, add_tag, items, sfx_items,
7097 						actions, sfx_actions,
7098 						pre_actions, actions_n,
7099 						sample_action_pos,
7100 						qrss_action_pos, jump_table,
7101 						error);
7102 		if (tag_id < 0 || (add_tag && !tag_id)) {
7103 			ret = -rte_errno;
7104 			goto exit;
7105 		}
7106 		if (modify_after_mirror)
7107 			flow_split_info->skip_scale =
7108 					1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
7109 		/* Add the prefix subflow. */
7110 		ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
7111 					      items, pre_actions,
7112 					      flow_split_info, error);
7113 		if (ret) {
7114 			ret = -rte_errno;
7115 			goto exit;
7116 		}
7117 		dev_flow->handle->split_flow_id = tag_id;
7118 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
7119 		if (!modify_after_mirror) {
7120 			/* Set the sfx group attr. */
7121 			sample_res = (struct mlx5_flow_dv_sample_resource *)
7122 						dev_flow->dv.sample_res;
7123 			sfx_tbl = (struct mlx5_flow_tbl_resource *)
7124 						sample_res->normal_path_tbl;
7125 			sfx_tbl_data = container_of(sfx_tbl,
7126 						struct mlx5_flow_tbl_data_entry,
7127 						tbl);
7128 			sfx_attr.group = sfx_attr.transfer ?
7129 			(sfx_tbl_data->level - 1) : sfx_tbl_data->level;
7130 		} else {
7131 			MLX5_ASSERT(attr->transfer);
7132 			sfx_attr.group = jump_table;
7133 		}
7134 		flow_split_info->prefix_layers =
7135 				flow_get_prefix_layer_flags(dev_flow);
7136 		MLX5_ASSERT(wks);
7137 		flow_split_info->prefix_mark |= wks->mark;
7138 		/* Suffix group level already be scaled with factor, set
7139 		 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
7140 		 * again in translation.
7141 		 */
7142 		flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT;
7143 #endif
7144 	}
7145 	/* Add the suffix subflow. */
7146 	ret = flow_create_split_meter(dev, flow, &sfx_attr,
7147 				      sfx_items ? sfx_items : items,
7148 				      sfx_actions ? sfx_actions : actions,
7149 				      flow_split_info, error);
7150 exit:
7151 	if (sfx_actions)
7152 		mlx5_free(sfx_actions);
7153 	return ret;
7154 }
7155 
7156 /**
7157  * Split the flow to subflow set. The splitters might be linked
7158  * in the chain, like this:
7159  * flow_create_split_outer() calls:
7160  *   flow_create_split_meter() calls:
7161  *     flow_create_split_metadata(meter_subflow_0) calls:
7162  *       flow_create_split_inner(metadata_subflow_0)
7163  *       flow_create_split_inner(metadata_subflow_1)
7164  *       flow_create_split_inner(metadata_subflow_2)
7165  *     flow_create_split_metadata(meter_subflow_1) calls:
7166  *       flow_create_split_inner(metadata_subflow_0)
7167  *       flow_create_split_inner(metadata_subflow_1)
7168  *       flow_create_split_inner(metadata_subflow_2)
7169  *
7170  * This provide flexible way to add new levels of flow splitting.
7171  * The all of successfully created subflows are included to the
7172  * parent flow dev_flow list.
7173  *
7174  * @param dev
7175  *   Pointer to Ethernet device.
7176  * @param[in] flow
7177  *   Parent flow structure pointer.
7178  * @param[in] attr
7179  *   Flow rule attributes.
7180  * @param[in] items
7181  *   Pattern specification (list terminated by the END pattern item).
7182  * @param[in] actions
7183  *   Associated actions (list terminated by the END action).
7184  * @param[in] flow_split_info
7185  *   Pointer to flow split info structure.
7186  * @param[out] error
7187  *   Perform verbose error reporting if not NULL.
7188  * @return
7189  *   0 on success, negative value otherwise
7190  */
7191 static int
7192 flow_create_split_outer(struct rte_eth_dev *dev,
7193 			struct rte_flow *flow,
7194 			const struct rte_flow_attr *attr,
7195 			const struct rte_flow_item items[],
7196 			const struct rte_flow_action actions[],
7197 			struct mlx5_flow_split_info *flow_split_info,
7198 			struct rte_flow_error *error)
7199 {
7200 	int ret;
7201 
7202 	ret = flow_create_split_sample(dev, flow, attr, items,
7203 				       actions, flow_split_info, error);
7204 	MLX5_ASSERT(ret <= 0);
7205 	return ret;
7206 }
7207 
7208 static inline struct mlx5_flow_tunnel *
7209 flow_tunnel_from_rule(const struct mlx5_flow *flow)
7210 {
7211 	struct mlx5_flow_tunnel *tunnel;
7212 
7213 #pragma GCC diagnostic push
7214 #pragma GCC diagnostic ignored "-Wcast-qual"
7215 	tunnel = (typeof(tunnel))flow->tunnel;
7216 #pragma GCC diagnostic pop
7217 
7218 	return tunnel;
7219 }
7220 
7221 /**
7222  * Create a flow and add it to @p list.
7223  *
7224  * @param dev
7225  *   Pointer to Ethernet device.
7226  * @param list
7227  *   Pointer to a TAILQ flow list. If this parameter NULL,
7228  *   no list insertion occurred, flow is just created,
7229  *   this is caller's responsibility to track the
7230  *   created flow.
7231  * @param[in] attr
7232  *   Flow rule attributes.
7233  * @param[in] items
7234  *   Pattern specification (list terminated by the END pattern item).
7235  * @param[in] actions
7236  *   Associated actions (list terminated by the END action).
7237  * @param[in] external
7238  *   This flow rule is created by request external to PMD.
7239  * @param[out] error
7240  *   Perform verbose error reporting if not NULL.
7241  *
7242  * @return
7243  *   A flow index on success, 0 otherwise and rte_errno is set.
7244  */
7245 static uint32_t
7246 flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
7247 		 const struct rte_flow_attr *attr,
7248 		 const struct rte_flow_item items[],
7249 		 const struct rte_flow_action original_actions[],
7250 		 bool external, struct rte_flow_error *error)
7251 {
7252 	struct mlx5_priv *priv = dev->data->dev_private;
7253 	struct rte_flow *flow = NULL;
7254 	struct mlx5_flow *dev_flow;
7255 	const struct rte_flow_action_rss *rss = NULL;
7256 	struct mlx5_translated_action_handle
7257 		indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
7258 	int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
7259 	union {
7260 		struct mlx5_flow_expand_rss buf;
7261 		uint8_t buffer[8192];
7262 	} expand_buffer;
7263 	union {
7264 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
7265 		uint8_t buffer[2048];
7266 	} actions_rx;
7267 	union {
7268 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
7269 		uint8_t buffer[2048];
7270 	} actions_hairpin_tx;
7271 	union {
7272 		struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
7273 		uint8_t buffer[2048];
7274 	} items_tx;
7275 	struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT];
7276 	struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
7277 	struct mlx5_flow_rss_desc *rss_desc;
7278 	const struct rte_flow_action *p_actions_rx;
7279 	uint32_t i;
7280 	uint32_t idx = 0;
7281 	int hairpin_flow;
7282 	struct rte_flow_attr attr_tx = { .priority = 0 };
7283 	const struct rte_flow_action *actions;
7284 	struct rte_flow_action *translated_actions = NULL;
7285 	struct mlx5_flow_tunnel *tunnel;
7286 	struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
7287 	struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
7288 	struct mlx5_flow_split_info flow_split_info = {
7289 		.external = !!external,
7290 		.skip_scale = 0,
7291 		.flow_idx = 0,
7292 		.prefix_mark = 0,
7293 		.prefix_layers = 0,
7294 		.table_id = 0
7295 	};
7296 	int ret;
7297 
7298 	MLX5_ASSERT(wks);
7299 	rss_desc = &wks->rss_desc;
7300 	ret = flow_action_handles_translate(dev, original_actions,
7301 					    indir_actions,
7302 					    &indir_actions_n,
7303 					    &translated_actions, error);
7304 	if (ret < 0) {
7305 		MLX5_ASSERT(translated_actions == NULL);
7306 		return 0;
7307 	}
7308 	actions = translated_actions ? translated_actions : original_actions;
7309 	p_actions_rx = actions;
7310 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
7311 	ret = flow_drv_validate(dev, attr, items, p_actions_rx,
7312 				external, hairpin_flow, error);
7313 	if (ret < 0)
7314 		goto error_before_hairpin_split;
7315 	flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
7316 	if (!flow) {
7317 		rte_errno = ENOMEM;
7318 		goto error_before_hairpin_split;
7319 	}
7320 	if (hairpin_flow > 0) {
7321 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
7322 			rte_errno = EINVAL;
7323 			goto error_before_hairpin_split;
7324 		}
7325 		flow_hairpin_split(dev, actions, actions_rx.actions,
7326 				   actions_hairpin_tx.actions, items_tx.items,
7327 				   idx);
7328 		p_actions_rx = actions_rx.actions;
7329 	}
7330 	flow_split_info.flow_idx = idx;
7331 	flow->drv_type = flow_get_drv_type(dev, attr);
7332 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
7333 		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
7334 	memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
7335 	/* RSS Action only works on NIC RX domain */
7336 	if (attr->ingress)
7337 		rss = flow_get_rss_action(dev, p_actions_rx);
7338 	if (rss) {
7339 		MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512);
7340 		rss_desc->symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
7341 		/*
7342 		 * The following information is required by
7343 		 * mlx5_flow_hashfields_adjust() in advance.
7344 		 */
7345 		rss_desc->level = rss->level;
7346 		/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
7347 		rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
7348 	}
7349 	flow->dev_handles = 0;
7350 	if (rss && rss->types) {
7351 		unsigned int graph_root;
7352 
7353 		graph_root = find_graph_root(rss->level);
7354 		ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
7355 					   items, rss->types,
7356 					   mlx5_support_expansion, graph_root);
7357 		MLX5_ASSERT(ret > 0 &&
7358 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
7359 		if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) {
7360 			for (i = 0; i < buf->entries; ++i)
7361 				mlx5_dbg__print_pattern(buf->entry[i].pattern);
7362 		}
7363 	} else {
7364 		ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn *)buf,
7365 					   sizeof(expand_buffer.buffer),
7366 					   items, sq_specs);
7367 		if (ret) {
7368 			rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
7369 					   NULL, "not enough memory for rte_flow");
7370 			goto error;
7371 		}
7372 		if (buf->entries == 0) {
7373 			buf->entries = 1;
7374 			buf->entry[0].pattern = (void *)(uintptr_t)items;
7375 		}
7376 	}
7377 	rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
7378 						      indir_actions_n);
7379 	for (i = 0; i < buf->entries; ++i) {
7380 		/* Initialize flow split data. */
7381 		flow_split_info.prefix_layers = 0;
7382 		flow_split_info.prefix_mark = 0;
7383 		flow_split_info.skip_scale = 0;
7384 		/*
7385 		 * The splitter may create multiple dev_flows,
7386 		 * depending on configuration. In the simplest
7387 		 * case it just creates unmodified original flow.
7388 		 */
7389 		ret = flow_create_split_outer(dev, flow, attr,
7390 					      buf->entry[i].pattern,
7391 					      p_actions_rx, &flow_split_info,
7392 					      error);
7393 		if (ret < 0)
7394 			goto error;
7395 		if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {
7396 			ret = flow_tunnel_add_default_miss(dev, flow, attr,
7397 							   p_actions_rx,
7398 							   idx,
7399 							   wks->flows[0].tunnel,
7400 							   &default_miss_ctx,
7401 							   error);
7402 			if (ret < 0) {
7403 				mlx5_free(default_miss_ctx.queue);
7404 				goto error;
7405 			}
7406 		}
7407 	}
7408 	/* Create the tx flow. */
7409 	if (hairpin_flow) {
7410 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
7411 		attr_tx.ingress = 0;
7412 		attr_tx.egress = 1;
7413 		dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
7414 					 actions_hairpin_tx.actions,
7415 					 idx, error);
7416 		if (!dev_flow)
7417 			goto error;
7418 		dev_flow->flow = flow;
7419 		dev_flow->external = 0;
7420 		SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
7421 			      dev_flow->handle, next);
7422 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
7423 					 items_tx.items,
7424 					 actions_hairpin_tx.actions, error);
7425 		if (ret < 0)
7426 			goto error;
7427 	}
7428 	/*
7429 	 * Update the metadata register copy table. If extensive
7430 	 * metadata feature is enabled and registers are supported
7431 	 * we might create the extra rte_flow for each unique
7432 	 * MARK/FLAG action ID.
7433 	 *
7434 	 * The table is updated for ingress and transfer flows only, because
7435 	 * the egress Flows belong to the different device and
7436 	 * copy table should be updated in peer NIC Rx domain.
7437 	 */
7438 	if ((attr->ingress || attr->transfer) &&
7439 	    (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
7440 		ret = flow_mreg_update_copy_table(dev, flow, actions, error);
7441 		if (ret)
7442 			goto error;
7443 	}
7444 	/*
7445 	 * If the flow is external (from application) OR device is started,
7446 	 * OR mreg discover, then apply immediately.
7447 	 */
7448 	if (external || dev->data->dev_started ||
7449 	    (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
7450 	     attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
7451 		ret = flow_drv_apply(dev, flow, error);
7452 		if (ret < 0)
7453 			goto error;
7454 	}
7455 	flow->type = type;
7456 	flow_rxq_flags_set(dev, flow);
7457 	rte_free(translated_actions);
7458 	tunnel = flow_tunnel_from_rule(wks->flows);
7459 	if (tunnel) {
7460 		flow->tunnel = 1;
7461 		flow->tunnel_id = tunnel->tunnel_id;
7462 		rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
7463 		mlx5_free(default_miss_ctx.queue);
7464 	}
7465 	mlx5_flow_pop_thread_workspace();
7466 	return idx;
7467 error:
7468 	MLX5_ASSERT(flow);
7469 	ret = rte_errno; /* Save rte_errno before cleanup. */
7470 	flow_mreg_del_copy_action(dev, flow);
7471 	flow_drv_destroy(dev, flow);
7472 	if (rss_desc->shared_rss)
7473 		rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
7474 			mlx5_ipool_get
7475 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
7476 			rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
7477 	mlx5_ipool_free(priv->flows[type], idx);
7478 	rte_errno = ret; /* Restore rte_errno. */
7479 	ret = rte_errno;
7480 	rte_errno = ret;
7481 error_before_hairpin_split:
7482 	mlx5_flow_pop_thread_workspace();
7483 	rte_free(translated_actions);
7484 	return 0;
7485 }
7486 
7487 /**
7488  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
7489  * incoming packets to table 1.
7490  *
7491  * Other flow rules, requested for group n, will be created in
7492  * e-switch table n+1.
7493  * Jump action to e-switch group n will be created to group n+1.
7494  *
7495  * Used when working in switchdev mode, to utilise advantages of table 1
7496  * and above.
7497  *
7498  * @param dev
7499  *   Pointer to Ethernet device.
7500  *
7501  * @return
7502  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
7503  */
7504 struct rte_flow *
7505 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
7506 {
7507 	const struct rte_flow_attr attr = {
7508 		.group = 0,
7509 		.priority = 0,
7510 		.ingress = 0,
7511 		.egress = 0,
7512 		.transfer = 1,
7513 	};
7514 	const struct rte_flow_item pattern = {
7515 		.type = RTE_FLOW_ITEM_TYPE_END,
7516 	};
7517 	struct rte_flow_action_jump jump = {
7518 		.group = 1,
7519 	};
7520 	const struct rte_flow_action actions[] = {
7521 		{
7522 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
7523 			.conf = &jump,
7524 		},
7525 		{
7526 			.type = RTE_FLOW_ACTION_TYPE_END,
7527 		},
7528 	};
7529 	struct rte_flow_error error;
7530 
7531 	return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
7532 						   &attr, &pattern,
7533 						   actions, false, &error);
7534 }
7535 
7536 /**
7537  * Create a dedicated flow rule on e-switch table 1, matches ESW manager
7538  * and sq number, directs all packets to peer vport.
7539  *
7540  * @param dev
7541  *   Pointer to Ethernet device.
7542  * @param sq_num
7543  *   SQ number.
7544  *
7545  * @return
7546  *   Flow ID on success, 0 otherwise and rte_errno is set.
7547  */
7548 uint32_t
7549 mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sq_num)
7550 {
7551 	struct rte_flow_attr attr = {
7552 		.group = 0,
7553 		.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
7554 		.ingress = 0,
7555 		.egress = 0,
7556 		.transfer = 1,
7557 	};
7558 	struct rte_flow_item_port_id port_spec = {
7559 		.id = MLX5_PORT_ESW_MGR,
7560 	};
7561 	struct mlx5_rte_flow_item_sq sq_spec = {
7562 		.queue = sq_num,
7563 	};
7564 	struct rte_flow_item pattern[] = {
7565 		{
7566 			.type = RTE_FLOW_ITEM_TYPE_PORT_ID,
7567 			.spec = &port_spec,
7568 		},
7569 		{
7570 			.type = (enum rte_flow_item_type)
7571 				MLX5_RTE_FLOW_ITEM_TYPE_SQ,
7572 			.spec = &sq_spec,
7573 		},
7574 		{
7575 			.type = RTE_FLOW_ITEM_TYPE_END,
7576 		},
7577 	};
7578 	struct rte_flow_action_jump jump = {
7579 		.group = 1,
7580 	};
7581 	struct rte_flow_action_port_id port = {
7582 		.id = dev->data->port_id,
7583 	};
7584 	struct rte_flow_action actions[] = {
7585 		{
7586 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
7587 			.conf = &jump,
7588 		},
7589 		{
7590 			.type = RTE_FLOW_ACTION_TYPE_END,
7591 		},
7592 	};
7593 	struct rte_flow_error error;
7594 
7595 	/*
7596 	 * Creates group 0, highest priority jump flow.
7597 	 * Matches txq to bypass kernel packets.
7598 	 */
7599 	if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions,
7600 			     false, &error) == 0)
7601 		return 0;
7602 	/* Create group 1, lowest priority redirect flow for txq. */
7603 	attr.group = 1;
7604 	actions[0].conf = &port;
7605 	actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
7606 	return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern,
7607 				actions, false, &error);
7608 }
7609 
7610 /**
7611  * Validate a flow supported by the NIC.
7612  *
7613  * @see rte_flow_validate()
7614  * @see rte_flow_ops
7615  */
7616 int
7617 mlx5_flow_validate(struct rte_eth_dev *dev,
7618 		   const struct rte_flow_attr *attr,
7619 		   const struct rte_flow_item items[],
7620 		   const struct rte_flow_action original_actions[],
7621 		   struct rte_flow_error *error)
7622 {
7623 	int hairpin_flow;
7624 	struct mlx5_translated_action_handle
7625 		indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
7626 	int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
7627 	const struct rte_flow_action *actions;
7628 	struct rte_flow_action *translated_actions = NULL;
7629 	int ret = flow_action_handles_translate(dev, original_actions,
7630 						indir_actions,
7631 						&indir_actions_n,
7632 						&translated_actions, error);
7633 
7634 	if (ret)
7635 		return ret;
7636 	actions = translated_actions ? translated_actions : original_actions;
7637 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
7638 	ret = flow_drv_validate(dev, attr, items, actions,
7639 				true, hairpin_flow, error);
7640 	rte_free(translated_actions);
7641 	return ret;
7642 }
7643 
7644 static int
7645 mlx5_flow_cache_flow_info(struct rte_eth_dev *dev,
7646 			  const struct rte_flow_attr *attr,
7647 			  const uint32_t orig_prio,
7648 			  const struct rte_flow_item *items,
7649 			  const struct rte_flow_action *actions,
7650 			  uint32_t flow_idx)
7651 {
7652 	struct mlx5_priv *priv = dev->data->dev_private;
7653 	struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
7654 	struct mlx5_dv_flow_info *flow_info, *tmp_info;
7655 	struct rte_flow_error error;
7656 	int len, ret;
7657 
7658 	flow_info = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*flow_info), 0, SOCKET_ID_ANY);
7659 	if (!flow_info) {
7660 		DRV_LOG(ERR, "No enough memory for flow_info caching.");
7661 		return -1;
7662 	}
7663 	flow_info->orig_prio = orig_prio;
7664 	flow_info->attr = *attr;
7665 	/* Standby mode rule awlays saves it in low priority entry. */
7666 	flow_info->flow_idx_low_prio = flow_idx;
7667 
7668 	/* Store matching items. */
7669 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, NULL, 0, items, &error);
7670 	if (ret <= 0) {
7671 		DRV_LOG(ERR, "Can't get items length.");
7672 		goto end;
7673 	}
7674 	len = RTE_ALIGN(ret, 16);
7675 	flow_info->items = mlx5_malloc(MLX5_MEM_ZERO, len, 0, SOCKET_ID_ANY);
7676 	if (!flow_info->items) {
7677 		DRV_LOG(ERR, "No enough memory for items caching.");
7678 		goto end;
7679 	}
7680 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, flow_info->items, ret, items, &error);
7681 	if (ret <= 0) {
7682 		DRV_LOG(ERR, "Can't duplicate items.");
7683 		goto end;
7684 	}
7685 
7686 	/* Store flow actions. */
7687 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, actions, &error);
7688 	if (ret <= 0) {
7689 		DRV_LOG(ERR, "Can't get actions length.");
7690 		goto end;
7691 	}
7692 	len = RTE_ALIGN(ret, 16);
7693 	flow_info->actions = mlx5_malloc(MLX5_MEM_ZERO, len, 0, SOCKET_ID_ANY);
7694 	if (!flow_info->actions) {
7695 		DRV_LOG(ERR, "No enough memory for actions caching.");
7696 		goto end;
7697 	}
7698 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, flow_info->actions, ret, actions, &error);
7699 	if (ret <= 0) {
7700 		DRV_LOG(ERR, "Can't duplicate actions.");
7701 		goto end;
7702 	}
7703 
7704 	/* Insert to the list end. */
7705 	if (LIST_EMPTY(&mode_info->hot_upgrade)) {
7706 		LIST_INSERT_HEAD(&mode_info->hot_upgrade, flow_info,  next);
7707 	} else {
7708 		tmp_info = LIST_FIRST(&mode_info->hot_upgrade);
7709 		while (LIST_NEXT(tmp_info, next))
7710 			tmp_info = LIST_NEXT(tmp_info, next);
7711 		LIST_INSERT_AFTER(tmp_info, flow_info, next);
7712 	}
7713 	return 0;
7714 end:
7715 	if (flow_info->items)
7716 		mlx5_free(flow_info->items);
7717 	if (flow_info->actions)
7718 		mlx5_free(flow_info->actions);
7719 	mlx5_free(flow_info);
7720 	return -1;
7721 }
7722 
7723 static int
7724 mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, bool orig_prio)
7725 {
7726 	struct mlx5_priv *priv = dev->data->dev_private;
7727 	struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
7728 	struct mlx5_dv_flow_info *flow_info;
7729 	struct rte_flow_attr attr;
7730 	struct rte_flow_error error;
7731 	struct rte_flow *high, *low;
7732 
7733 	flow_info = LIST_FIRST(&mode_info->hot_upgrade);
7734 	while (flow_info) {
7735 		/* DUP flow may have the same priority. */
7736 		if (flow_info->orig_prio != flow_info->attr.priority) {
7737 			attr = flow_info->attr;
7738 			if (orig_prio)
7739 				attr.priority = flow_info->orig_prio;
7740 			flow_info->flow_idx_high_prio = flow_list_create(dev, MLX5_FLOW_TYPE_GEN,
7741 					&attr, flow_info->items, flow_info->actions,
7742 					true, &error);
7743 			if (!flow_info->flow_idx_high_prio) {
7744 				DRV_LOG(ERR, "Priority toggle failed internally.");
7745 				goto err;
7746 			}
7747 		}
7748 		flow_info = LIST_NEXT(flow_info, next);
7749 	}
7750 	/* Delete the low priority rules and swap the flow handle. */
7751 	flow_info = LIST_FIRST(&mode_info->hot_upgrade);
7752 	while (flow_info) {
7753 		MLX5_ASSERT(flow_info->flow_idx_low_prio);
7754 		if (flow_info->orig_prio != flow_info->attr.priority) {
7755 			high = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
7756 					flow_info->flow_idx_high_prio);
7757 			low = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
7758 					flow_info->flow_idx_low_prio);
7759 			if (high && low) {
7760 				RTE_SWAP(*low, *high);
7761 				flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
7762 						  flow_info->flow_idx_low_prio);
7763 				flow_info->flow_idx_high_prio = 0;
7764 			}
7765 		}
7766 		flow_info = LIST_NEXT(flow_info, next);
7767 	}
7768 	return 0;
7769 err:
7770 	/* Destroy preceding successful high priority rules. */
7771 	flow_info = LIST_FIRST(&mode_info->hot_upgrade);
7772 	while (flow_info) {
7773 		if (flow_info->orig_prio != flow_info->attr.priority) {
7774 			if (flow_info->flow_idx_high_prio)
7775 				flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
7776 						  flow_info->flow_idx_high_prio);
7777 			else
7778 				break;
7779 			flow_info->flow_idx_high_prio = 0;
7780 		}
7781 		flow_info = LIST_NEXT(flow_info, next);
7782 	}
7783 	return -1;
7784 }
7785 
7786 /**
7787  * Set the mode of the flow engine of a process to active or standby during live migration.
7788  *
7789  * @param[in] mode
7790  *   MLX5 flow engine mode, @see `enum rte_pmd_mlx5_flow_engine_mode`.
7791  * @param[in] flags
7792  *   Flow engine mode specific flags.
7793  *
7794  * @return
7795  *   Negative value on error, positive on success.
7796  */
7797 int
7798 rte_pmd_mlx5_flow_engine_set_mode(enum rte_pmd_mlx5_flow_engine_mode mode, uint32_t flags)
7799 {
7800 	struct mlx5_priv *priv;
7801 	struct rte_pmd_mlx5_flow_engine_mode_info *mode_info;
7802 	struct mlx5_dv_flow_info *flow_info, *tmp_info;
7803 	uint16_t port, port_id;
7804 	uint16_t toggle_num = 0;
7805 	struct rte_eth_dev *dev;
7806 	enum rte_pmd_mlx5_flow_engine_mode orig_mode;
7807 	uint32_t orig_flags;
7808 	bool need_toggle = false;
7809 
7810 	/* Check if flags combinations are supported. */
7811 	if (flags && flags != RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS) {
7812 		DRV_LOG(ERR, "Doesn't support such flags %u", flags);
7813 		return -1;
7814 	}
7815 	MLX5_ETH_FOREACH_DEV(port, NULL) {
7816 		dev = &rte_eth_devices[port];
7817 		priv = dev->data->dev_private;
7818 		mode_info = &priv->mode_info;
7819 		/* No mode change. Assume all devices hold the same mode. */
7820 		if (mode_info->mode == mode) {
7821 			DRV_LOG(INFO, "Process flow engine has been in mode %u", mode);
7822 			if (mode_info->mode_flag != flags && !LIST_EMPTY(&mode_info->hot_upgrade)) {
7823 				DRV_LOG(ERR, "Port %u has rule cache with different flag %u\n",
7824 						port, mode_info->mode_flag);
7825 				orig_mode = mode_info->mode;
7826 				orig_flags = mode_info->mode_flag;
7827 				goto err;
7828 			}
7829 			mode_info->mode_flag = flags;
7830 			toggle_num++;
7831 			continue;
7832 		}
7833 		/* Active -> standby. */
7834 		if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY) {
7835 			if (!LIST_EMPTY(&mode_info->hot_upgrade)) {
7836 				DRV_LOG(ERR, "Cached rule existed");
7837 				orig_mode = mode_info->mode;
7838 				orig_flags = mode_info->mode_flag;
7839 				goto err;
7840 			}
7841 			mode_info->mode_flag = flags;
7842 			mode_info->mode = mode;
7843 			toggle_num++;
7844 		/* Standby -> active. */
7845 		} else if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE) {
7846 			if (LIST_EMPTY(&mode_info->hot_upgrade)) {
7847 				DRV_LOG(INFO, "No cached rule existed");
7848 			} else {
7849 				if (mlx5_flow_cache_flow_toggle(dev, true)) {
7850 					orig_mode = mode_info->mode;
7851 					orig_flags = mode_info->mode_flag;
7852 					need_toggle = true;
7853 					goto err;
7854 				}
7855 			}
7856 			toggle_num++;
7857 		}
7858 	}
7859 	if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE) {
7860 		/* Clear cache flow rules. */
7861 		MLX5_ETH_FOREACH_DEV(port, NULL) {
7862 			priv = rte_eth_devices[port].data->dev_private;
7863 			mode_info = &priv->mode_info;
7864 			flow_info = LIST_FIRST(&mode_info->hot_upgrade);
7865 			while (flow_info) {
7866 				tmp_info = LIST_NEXT(flow_info, next);
7867 				LIST_REMOVE(flow_info, next);
7868 				mlx5_free(flow_info->actions);
7869 				mlx5_free(flow_info->items);
7870 				mlx5_free(flow_info);
7871 				flow_info = tmp_info;
7872 			}
7873 			MLX5_ASSERT(LIST_EMPTY(&mode_info->hot_upgrade));
7874 		}
7875 	}
7876 	return toggle_num;
7877 err:
7878 	/* Rollback all preceding successful ports. */
7879 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
7880 		if (port_id == port)
7881 			break;
7882 		priv = rte_eth_devices[port_id].data->dev_private;
7883 		mode_info = &priv->mode_info;
7884 		if (need_toggle && !LIST_EMPTY(&mode_info->hot_upgrade) &&
7885 		    mlx5_flow_cache_flow_toggle(dev, false))
7886 			return -EPERM;
7887 		mode_info->mode = orig_mode;
7888 		mode_info->mode_flag = orig_flags;
7889 	}
7890 	return -EINVAL;
7891 }
7892 /**
7893  * Create a flow.
7894  *
7895  * @see rte_flow_create()
7896  * @see rte_flow_ops
7897  */
7898 struct rte_flow *
7899 mlx5_flow_create(struct rte_eth_dev *dev,
7900 		 const struct rte_flow_attr *attr,
7901 		 const struct rte_flow_item items[],
7902 		 const struct rte_flow_action actions[],
7903 		 struct rte_flow_error *error)
7904 {
7905 	struct mlx5_priv *priv = dev->data->dev_private;
7906 	struct rte_flow_attr *new_attr = (void *)(uintptr_t)attr;
7907 	uint32_t prio = attr->priority;
7908 	uint32_t flow_idx;
7909 
7910 	if (priv->sh->config.dv_flow_en == 2) {
7911 		rte_flow_error_set(error, ENOTSUP,
7912 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7913 			  NULL,
7914 			  "Flow non-Q creation not supported");
7915 		return NULL;
7916 	}
7917 	/*
7918 	 * If the device is not started yet, it is not allowed to created a
7919 	 * flow from application. PMD default flows and traffic control flows
7920 	 * are not affected.
7921 	 */
7922 	if (unlikely(!dev->data->dev_started)) {
7923 		DRV_LOG(DEBUG, "port %u is not started when "
7924 			"inserting a flow", dev->data->port_id);
7925 		rte_flow_error_set(error, ENODEV,
7926 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7927 				   NULL,
7928 				   "port not started");
7929 		return NULL;
7930 	}
7931 	if (unlikely(mlx5_need_cache_flow(priv, attr))) {
7932 		if (attr->transfer ||
7933 				(attr->ingress && !(priv->mode_info.mode_flag &
7934 				RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS)))
7935 			new_attr->priority += 1;
7936 	}
7937 	flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, attr, items, actions, true, error);
7938 	if (!flow_idx)
7939 		return NULL;
7940 	if (unlikely(mlx5_need_cache_flow(priv, attr))) {
7941 		if (mlx5_flow_cache_flow_info(dev, attr, prio, items, actions, flow_idx)) {
7942 			flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
7943 			flow_idx = 0;
7944 		}
7945 	}
7946 	return (void *)(uintptr_t)flow_idx;
7947 }
7948 
7949 /**
7950  * Destroy a flow in a list.
7951  *
7952  * @param dev
7953  *   Pointer to Ethernet device.
7954  * @param[in] flow_idx
7955  *   Index of flow to destroy.
7956  */
7957 static void
7958 flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
7959 		  uint32_t flow_idx)
7960 {
7961 	struct mlx5_priv *priv = dev->data->dev_private;
7962 	struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx);
7963 
7964 	if (!flow)
7965 		return;
7966 	MLX5_ASSERT(flow->type == type);
7967 	/*
7968 	 * Update RX queue flags only if port is started, otherwise it is
7969 	 * already clean.
7970 	 */
7971 	if (dev->data->dev_started)
7972 		flow_rxq_flags_trim(dev, flow);
7973 	flow_drv_destroy(dev, flow);
7974 	if (flow->tunnel) {
7975 		struct mlx5_flow_tunnel *tunnel;
7976 
7977 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
7978 		RTE_VERIFY(tunnel);
7979 		if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
7980 				rte_memory_order_relaxed) - 1))
7981 			mlx5_flow_tunnel_free(dev, tunnel);
7982 	}
7983 	flow_mreg_del_copy_action(dev, flow);
7984 	mlx5_ipool_free(priv->flows[type], flow_idx);
7985 }
7986 
7987 /**
7988  * Destroy all flows.
7989  *
7990  * @param dev
7991  *   Pointer to Ethernet device.
7992  * @param type
7993  *   Flow type to be flushed.
7994  * @param active
7995  *   If flushing is called actively.
7996  */
7997 void
7998 mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
7999 		     bool active)
8000 {
8001 	struct mlx5_priv *priv = dev->data->dev_private;
8002 	uint32_t num_flushed = 0, fidx = 1;
8003 	struct rte_flow *flow;
8004 	struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
8005 	struct mlx5_dv_flow_info *flow_info;
8006 
8007 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
8008 	if (priv->sh->config.dv_flow_en == 2 &&
8009 	    type == MLX5_FLOW_TYPE_GEN) {
8010 		flow_hw_q_flow_flush(dev, NULL);
8011 		return;
8012 	}
8013 #endif
8014 
8015 	MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
8016 		flow_list_destroy(dev, type, fidx);
8017 		if (unlikely(mlx5_need_cache_flow(priv, NULL) && type == MLX5_FLOW_TYPE_GEN)) {
8018 			flow_info = LIST_FIRST(&mode_info->hot_upgrade);
8019 			while (flow_info) {
8020 				/* Romove the cache flow info. */
8021 				if (flow_info->flow_idx_low_prio == (uint32_t)(uintptr_t)fidx) {
8022 					MLX5_ASSERT(!flow_info->flow_idx_high_prio);
8023 					LIST_REMOVE(flow_info, next);
8024 					mlx5_free(flow_info->items);
8025 					mlx5_free(flow_info->actions);
8026 					mlx5_free(flow_info);
8027 					break;
8028 				}
8029 				flow_info = LIST_NEXT(flow_info, next);
8030 			}
8031 		}
8032 		num_flushed++;
8033 	}
8034 	if (active) {
8035 		DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
8036 			dev->data->port_id, num_flushed);
8037 	}
8038 }
8039 
8040 /**
8041  * Stop all default actions for flows.
8042  *
8043  * @param dev
8044  *   Pointer to Ethernet device.
8045  */
8046 void
8047 mlx5_flow_stop_default(struct rte_eth_dev *dev)
8048 {
8049 	flow_mreg_del_default_copy_action(dev);
8050 	flow_rxq_flags_clear(dev);
8051 }
8052 
8053 /**
8054  * Set rxq flag.
8055  *
8056  * @param[in] dev
8057  *   Pointer to the rte_eth_dev structure.
8058  * @param[in] enable
8059  *   Flag to enable or not.
8060  */
8061 void
8062 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)
8063 {
8064 	struct mlx5_priv *priv = dev->data->dev_private;
8065 	unsigned int i;
8066 
8067 	if ((!priv->mark_enabled && !enable) ||
8068 	    (priv->mark_enabled && enable))
8069 		return;
8070 	for (i = 0; i < priv->rxqs_n; ++i) {
8071 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
8072 
8073 		/* With RXQ start/stop feature, RXQ might be stopped. */
8074 		if (!rxq_ctrl)
8075 			continue;
8076 		rxq_ctrl->rxq.mark = enable;
8077 	}
8078 	priv->mark_enabled = enable;
8079 }
8080 
8081 /**
8082  * Start all default actions for flows.
8083  *
8084  * @param dev
8085  *   Pointer to Ethernet device.
8086  * @return
8087  *   0 on success, a negative errno value otherwise and rte_errno is set.
8088  */
8089 int
8090 mlx5_flow_start_default(struct rte_eth_dev *dev)
8091 {
8092 	struct rte_flow_error error;
8093 
8094 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
8095 	return flow_mreg_add_default_copy_action(dev, &error);
8096 }
8097 
8098 /**
8099  * Release key of thread specific flow workspace data.
8100  */
8101 void
8102 flow_release_workspace(void *data)
8103 {
8104 	struct mlx5_flow_workspace *wks = data;
8105 	struct mlx5_flow_workspace *next;
8106 
8107 	while (wks) {
8108 		next = wks->next;
8109 		free(wks);
8110 		wks = next;
8111 	}
8112 }
8113 
8114 /**
8115  * Get thread specific current flow workspace.
8116  *
8117  * @return pointer to thread specific flow workspace data, NULL on error.
8118  */
8119 struct mlx5_flow_workspace*
8120 mlx5_flow_get_thread_workspace(void)
8121 {
8122 	struct mlx5_flow_workspace *data;
8123 
8124 	data = mlx5_flow_os_get_specific_workspace();
8125 	MLX5_ASSERT(data && data->inuse);
8126 	if (!data || !data->inuse)
8127 		DRV_LOG(ERR, "flow workspace not initialized.");
8128 	return data;
8129 }
8130 
8131 /**
8132  * Allocate and init new flow workspace.
8133  *
8134  * @return pointer to flow workspace data, NULL on error.
8135  */
8136 static struct mlx5_flow_workspace*
8137 flow_alloc_thread_workspace(void)
8138 {
8139 	size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long));
8140 	size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512;
8141 	struct mlx5_flow_workspace *data = calloc(1, data_size +
8142 						     rss_queue_array_size);
8143 
8144 	if (!data) {
8145 		DRV_LOG(ERR, "Failed to allocate flow workspace memory.");
8146 		return NULL;
8147 	}
8148 	data->rss_desc.queue = RTE_PTR_ADD(data, data_size);
8149 	return data;
8150 }
8151 
8152 /**
8153  * Get new thread specific flow workspace.
8154  *
8155  * If current workspace inuse, create new one and set as current.
8156  *
8157  * @return pointer to thread specific flow workspace data, NULL on error.
8158  */
8159 struct mlx5_flow_workspace*
8160 mlx5_flow_push_thread_workspace(void)
8161 {
8162 	struct mlx5_flow_workspace *curr;
8163 	struct mlx5_flow_workspace *data;
8164 
8165 	curr = mlx5_flow_os_get_specific_workspace();
8166 	if (!curr) {
8167 		data = flow_alloc_thread_workspace();
8168 		if (!data)
8169 			return NULL;
8170 		mlx5_flow_os_workspace_gc_add(data);
8171 	} else if (!curr->inuse) {
8172 		data = curr;
8173 	} else if (curr->next) {
8174 		data = curr->next;
8175 	} else {
8176 		data = flow_alloc_thread_workspace();
8177 		if (!data)
8178 			return NULL;
8179 		curr->next = data;
8180 		data->prev = curr;
8181 	}
8182 	data->inuse = 1;
8183 	data->flow_idx = 0;
8184 	/* Set as current workspace */
8185 	if (mlx5_flow_os_set_specific_workspace(data))
8186 		DRV_LOG(ERR, "Failed to set flow workspace to thread.");
8187 	return data;
8188 }
8189 
8190 /**
8191  * Close current thread specific flow workspace.
8192  *
8193  * If previous workspace available, set it as current.
8194  *
8195  * @return pointer to thread specific flow workspace data, NULL on error.
8196  */
8197 void
8198 mlx5_flow_pop_thread_workspace(void)
8199 {
8200 	struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
8201 
8202 	if (!data)
8203 		return;
8204 	if (!data->inuse) {
8205 		DRV_LOG(ERR, "Failed to close unused flow workspace.");
8206 		return;
8207 	}
8208 	data->inuse = 0;
8209 	if (!data->prev)
8210 		return;
8211 	if (mlx5_flow_os_set_specific_workspace(data->prev))
8212 		DRV_LOG(ERR, "Failed to set flow workspace to thread.");
8213 }
8214 
8215 /**
8216  * Verify the flow list is empty
8217  *
8218  * @param dev
8219  *  Pointer to Ethernet device.
8220  *
8221  * @return the number of flows not released.
8222  */
8223 int
8224 mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused)
8225 {
8226 	struct mlx5_priv *priv = dev->data->dev_private;
8227 	struct rte_flow *flow;
8228 	uint32_t idx = 0;
8229 	int ret = 0, i;
8230 
8231 	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
8232 		MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) {
8233 			DRV_LOG(DEBUG, "port %u flow %p still referenced",
8234 				dev->data->port_id, (void *)flow);
8235 			ret++;
8236 		}
8237 	}
8238 	return ret;
8239 }
8240 
8241 /**
8242  * Enable default hairpin egress flow.
8243  *
8244  * @param dev
8245  *   Pointer to Ethernet device.
8246  * @param sq_num
8247  *   The SQ hw number.
8248  *
8249  * @return
8250  *   0 on success, a negative errno value otherwise and rte_errno is set.
8251  */
8252 int
8253 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
8254 			    uint32_t sq_num)
8255 {
8256 	const struct rte_flow_attr attr = {
8257 		.egress = 1,
8258 		.priority = 0,
8259 	};
8260 	struct mlx5_rte_flow_item_sq queue_spec = {
8261 		.queue = sq_num,
8262 	};
8263 	struct mlx5_rte_flow_item_sq queue_mask = {
8264 		.queue = UINT32_MAX,
8265 	};
8266 	struct rte_flow_item items[] = {
8267 		{
8268 			.type = (enum rte_flow_item_type)
8269 				MLX5_RTE_FLOW_ITEM_TYPE_SQ,
8270 			.spec = &queue_spec,
8271 			.last = NULL,
8272 			.mask = &queue_mask,
8273 		},
8274 		{
8275 			.type = RTE_FLOW_ITEM_TYPE_END,
8276 		},
8277 	};
8278 	struct rte_flow_action_jump jump = {
8279 		.group = MLX5_HAIRPIN_TX_TABLE,
8280 	};
8281 	struct rte_flow_action actions[2];
8282 	uint32_t flow_idx;
8283 	struct rte_flow_error error;
8284 
8285 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
8286 	actions[0].conf = &jump;
8287 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
8288 	flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
8289 				    &attr, items, actions, false, &error);
8290 	if (!flow_idx) {
8291 		DRV_LOG(DEBUG,
8292 			"Failed to create ctrl flow: rte_errno(%d),"
8293 			" type(%d), message(%s)",
8294 			rte_errno, error.type,
8295 			error.message ? error.message : " (no stated reason)");
8296 		return -rte_errno;
8297 	}
8298 	return 0;
8299 }
8300 
8301 /**
8302  * Enable a control flow configured from the control plane.
8303  *
8304  * @param dev
8305  *   Pointer to Ethernet device.
8306  * @param eth_spec
8307  *   An Ethernet flow spec to apply.
8308  * @param eth_mask
8309  *   An Ethernet flow mask to apply.
8310  * @param vlan_spec
8311  *   A VLAN flow spec to apply.
8312  * @param vlan_mask
8313  *   A VLAN flow mask to apply.
8314  *
8315  * @return
8316  *   0 on success, a negative errno value otherwise and rte_errno is set.
8317  */
8318 int
8319 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
8320 		    struct rte_flow_item_eth *eth_spec,
8321 		    struct rte_flow_item_eth *eth_mask,
8322 		    struct rte_flow_item_vlan *vlan_spec,
8323 		    struct rte_flow_item_vlan *vlan_mask)
8324 {
8325 	struct mlx5_priv *priv = dev->data->dev_private;
8326 	const struct rte_flow_attr attr = {
8327 		.ingress = 1,
8328 		.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
8329 	};
8330 	struct rte_flow_item items[] = {
8331 		{
8332 			.type = RTE_FLOW_ITEM_TYPE_ETH,
8333 			.spec = eth_spec,
8334 			.last = NULL,
8335 			.mask = eth_mask,
8336 		},
8337 		{
8338 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
8339 					      RTE_FLOW_ITEM_TYPE_END,
8340 			.spec = vlan_spec,
8341 			.last = NULL,
8342 			.mask = vlan_mask,
8343 		},
8344 		{
8345 			.type = RTE_FLOW_ITEM_TYPE_END,
8346 		},
8347 	};
8348 	uint16_t queue[priv->reta_idx_n];
8349 	struct rte_flow_action_rss action_rss = {
8350 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
8351 		.level = 0,
8352 		.types = priv->rss_conf.rss_hf,
8353 		.key_len = priv->rss_conf.rss_key_len,
8354 		.queue_num = priv->reta_idx_n,
8355 		.key = priv->rss_conf.rss_key,
8356 		.queue = queue,
8357 	};
8358 	struct rte_flow_action actions[] = {
8359 		{
8360 			.type = RTE_FLOW_ACTION_TYPE_RSS,
8361 			.conf = &action_rss,
8362 		},
8363 		{
8364 			.type = RTE_FLOW_ACTION_TYPE_END,
8365 		},
8366 	};
8367 	uint32_t flow_idx;
8368 	struct rte_flow_error error;
8369 	unsigned int i;
8370 
8371 	if (!priv->reta_idx_n || !priv->rxqs_n) {
8372 		return 0;
8373 	}
8374 	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
8375 		action_rss.types = 0;
8376 	for (i = 0; i != priv->reta_idx_n; ++i)
8377 		queue[i] = (*priv->reta_idx)[i];
8378 	flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
8379 				    &attr, items, actions, false, &error);
8380 	if (!flow_idx)
8381 		return -rte_errno;
8382 	return 0;
8383 }
8384 
8385 /**
8386  * Enable a flow control configured from the control plane.
8387  *
8388  * @param dev
8389  *   Pointer to Ethernet device.
8390  * @param eth_spec
8391  *   An Ethernet flow spec to apply.
8392  * @param eth_mask
8393  *   An Ethernet flow mask to apply.
8394  *
8395  * @return
8396  *   0 on success, a negative errno value otherwise and rte_errno is set.
8397  */
8398 int
8399 mlx5_ctrl_flow(struct rte_eth_dev *dev,
8400 	       struct rte_flow_item_eth *eth_spec,
8401 	       struct rte_flow_item_eth *eth_mask)
8402 {
8403 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
8404 }
8405 
8406 /**
8407  * Create default miss flow rule matching lacp traffic
8408  *
8409  * @param dev
8410  *   Pointer to Ethernet device.
8411  * @param eth_spec
8412  *   An Ethernet flow spec to apply.
8413  *
8414  * @return
8415  *   0 on success, a negative errno value otherwise and rte_errno is set.
8416  */
8417 int
8418 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
8419 {
8420 	/*
8421 	 * The LACP matching is done by only using ether type since using
8422 	 * a multicast dst mac causes kernel to give low priority to this flow.
8423 	 */
8424 	static const struct rte_flow_item_eth lacp_spec = {
8425 		.hdr.ether_type = RTE_BE16(0x8809),
8426 	};
8427 	static const struct rte_flow_item_eth lacp_mask = {
8428 		.hdr.ether_type = 0xffff,
8429 	};
8430 	const struct rte_flow_attr attr = {
8431 		.ingress = 1,
8432 	};
8433 	struct rte_flow_item items[] = {
8434 		{
8435 			.type = RTE_FLOW_ITEM_TYPE_ETH,
8436 			.spec = &lacp_spec,
8437 			.mask = &lacp_mask,
8438 		},
8439 		{
8440 			.type = RTE_FLOW_ITEM_TYPE_END,
8441 		},
8442 	};
8443 	struct rte_flow_action actions[] = {
8444 		{
8445 			.type = (enum rte_flow_action_type)
8446 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
8447 		},
8448 		{
8449 			.type = RTE_FLOW_ACTION_TYPE_END,
8450 		},
8451 	};
8452 	struct rte_flow_error error;
8453 	uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
8454 					&attr, items, actions,
8455 					false, &error);
8456 
8457 	if (!flow_idx)
8458 		return -rte_errno;
8459 	return 0;
8460 }
8461 
8462 /**
8463  * Destroy a flow.
8464  *
8465  * @see rte_flow_destroy()
8466  * @see rte_flow_ops
8467  */
8468 int
8469 mlx5_flow_destroy(struct rte_eth_dev *dev,
8470 		  struct rte_flow *flow,
8471 		  struct rte_flow_error *error __rte_unused)
8472 {
8473 	struct mlx5_priv *priv = dev->data->dev_private;
8474 	struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
8475 	struct mlx5_dv_flow_info *flow_info;
8476 
8477 	if (priv->sh->config.dv_flow_en == 2)
8478 		return rte_flow_error_set(error, ENOTSUP,
8479 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8480 			  NULL,
8481 			  "Flow non-Q destruction not supported");
8482 	flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
8483 				(uintptr_t)(void *)flow);
8484 	if (unlikely(mlx5_need_cache_flow(priv, NULL))) {
8485 		flow_info = LIST_FIRST(&mode_info->hot_upgrade);
8486 		while (flow_info) {
8487 			/* Romove the cache flow info. */
8488 			if (flow_info->flow_idx_low_prio == (uint32_t)(uintptr_t)flow) {
8489 				MLX5_ASSERT(!flow_info->flow_idx_high_prio);
8490 				LIST_REMOVE(flow_info, next);
8491 				mlx5_free(flow_info->items);
8492 				mlx5_free(flow_info->actions);
8493 				mlx5_free(flow_info);
8494 				break;
8495 			}
8496 			flow_info = LIST_NEXT(flow_info, next);
8497 		}
8498 	}
8499 	return 0;
8500 }
8501 
8502 /**
8503  * Destroy all flows.
8504  *
8505  * @see rte_flow_flush()
8506  * @see rte_flow_ops
8507  */
8508 int
8509 mlx5_flow_flush(struct rte_eth_dev *dev,
8510 		struct rte_flow_error *error __rte_unused)
8511 {
8512 	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false);
8513 	return 0;
8514 }
8515 
8516 /**
8517  * Isolated mode.
8518  *
8519  * @see rte_flow_isolate()
8520  * @see rte_flow_ops
8521  */
8522 int
8523 mlx5_flow_isolate(struct rte_eth_dev *dev,
8524 		  int enable,
8525 		  struct rte_flow_error *error)
8526 {
8527 	struct mlx5_priv *priv = dev->data->dev_private;
8528 
8529 	if (dev->data->dev_started) {
8530 		rte_flow_error_set(error, EBUSY,
8531 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8532 				   NULL,
8533 				   "port must be stopped first");
8534 		return -rte_errno;
8535 	}
8536 	if (!enable && !priv->sh->config.repr_matching)
8537 		return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8538 					  "isolated mode cannot be disabled when "
8539 					  "representor matching is disabled");
8540 	priv->isolated = !!enable;
8541 	if (enable)
8542 		dev->dev_ops = &mlx5_dev_ops_isolate;
8543 	else
8544 		dev->dev_ops = &mlx5_dev_ops;
8545 
8546 	dev->rx_descriptor_status = mlx5_rx_descriptor_status;
8547 	dev->tx_descriptor_status = mlx5_tx_descriptor_status;
8548 
8549 	return 0;
8550 }
8551 
8552 /**
8553  * Query a flow.
8554  *
8555  * @see rte_flow_query()
8556  * @see rte_flow_ops
8557  */
8558 static int
8559 flow_drv_query(struct rte_eth_dev *dev,
8560 	       struct rte_flow *eflow,
8561 	       const struct rte_flow_action *actions,
8562 	       void *data,
8563 	       struct rte_flow_error *error)
8564 {
8565 	struct mlx5_priv *priv = dev->data->dev_private;
8566 	const struct mlx5_flow_driver_ops *fops;
8567 	struct rte_flow *flow = NULL;
8568 	enum mlx5_flow_drv_type ftype = MLX5_FLOW_TYPE_MIN;
8569 
8570 	if (priv->sh->config.dv_flow_en == 2) {
8571 #ifdef HAVE_MLX5_HWS_SUPPORT
8572 		flow = eflow;
8573 		ftype = MLX5_FLOW_TYPE_HW;
8574 #endif
8575 	} else {
8576 		flow = (struct rte_flow *)mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
8577 				(uintptr_t)(void *)eflow);
8578 	}
8579 	if (!flow) {
8580 		return rte_flow_error_set(error, ENOENT,
8581 			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8582 			  NULL,
8583 			  "invalid flow handle");
8584 	}
8585 	if (ftype == MLX5_FLOW_TYPE_MIN)
8586 		ftype = flow->drv_type;
8587 	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
8588 	fops = flow_get_drv_ops(ftype);
8589 
8590 	return fops->query(dev, flow, actions, data, error);
8591 }
8592 
8593 /**
8594  * Query a flow.
8595  *
8596  * @see rte_flow_query()
8597  * @see rte_flow_ops
8598  */
8599 int
8600 mlx5_flow_query(struct rte_eth_dev *dev,
8601 		struct rte_flow *flow,
8602 		const struct rte_flow_action *actions,
8603 		void *data,
8604 		struct rte_flow_error *error)
8605 {
8606 	int ret;
8607 
8608 	ret = flow_drv_query(dev, flow, actions, data,
8609 			     error);
8610 	if (ret < 0)
8611 		return ret;
8612 	return 0;
8613 }
8614 
8615 /**
8616  * Get rte_flow callbacks.
8617  *
8618  * @param dev
8619  *   Pointer to Ethernet device structure.
8620  * @param ops
8621  *   Pointer to operation-specific structure.
8622  *
8623  * @return 0
8624  */
8625 int
8626 mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
8627 		  const struct rte_flow_ops **ops)
8628 {
8629 	*ops = &mlx5_flow_ops;
8630 	return 0;
8631 }
8632 
8633 /**
8634  * Validate meter policy actions.
8635  * Dispatcher for action type specific validation.
8636  *
8637  * @param[in] dev
8638  *   Pointer to the Ethernet device structure.
8639  * @param[in] action
8640  *   The meter policy action object to validate.
8641  * @param[in] attr
8642  *   Attributes of flow to determine steering domain.
8643  * @param[out] is_rss
8644  *   Is RSS or not.
8645  * @param[out] domain_bitmap
8646  *   Domain bitmap.
8647  * @param[out] is_def_policy
8648  *   Is default policy or not.
8649  * @param[out] error
8650  *   Perform verbose error reporting if not NULL. Initialized in case of
8651  *   error only.
8652  *
8653  * @return
8654  *   0 on success, otherwise negative errno value.
8655  */
8656 int
8657 mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
8658 			const struct rte_flow_action *actions[RTE_COLORS],
8659 			struct rte_flow_attr *attr,
8660 			bool *is_rss,
8661 			uint8_t *domain_bitmap,
8662 			uint8_t *policy_mode,
8663 			struct rte_mtr_error *error)
8664 {
8665 	const struct mlx5_flow_driver_ops *fops;
8666 
8667 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8668 	return fops->validate_mtr_acts(dev, actions, attr, is_rss,
8669 				       domain_bitmap, policy_mode, error);
8670 }
8671 
8672 /**
8673  * Destroy the meter table set.
8674  *
8675  * @param[in] dev
8676  *   Pointer to Ethernet device.
8677  * @param[in] mtr_policy
8678  *   Meter policy struct.
8679  */
8680 void
8681 mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
8682 		      struct mlx5_flow_meter_policy *mtr_policy)
8683 {
8684 	const struct mlx5_flow_driver_ops *fops;
8685 
8686 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8687 	fops->destroy_mtr_acts(dev, mtr_policy);
8688 }
8689 
8690 /**
8691  * Create policy action, lock free,
8692  * (mutex should be acquired by caller).
8693  * Dispatcher for action type specific call.
8694  *
8695  * @param[in] dev
8696  *   Pointer to the Ethernet device structure.
8697  * @param[in] mtr_policy
8698  *   Meter policy struct.
8699  * @param[in] action
8700  *   Action specification used to create meter actions.
8701  * @param[in] attr
8702  *   Flow rule attributes.
8703  * @param[out] error
8704  *   Perform verbose error reporting if not NULL. Initialized in case of
8705  *   error only.
8706  *
8707  * @return
8708  *   0 on success, otherwise negative errno value.
8709  */
8710 int
8711 mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
8712 		      struct mlx5_flow_meter_policy *mtr_policy,
8713 		      const struct rte_flow_action *actions[RTE_COLORS],
8714 		      struct rte_flow_attr *attr,
8715 		      struct rte_mtr_error *error)
8716 {
8717 	const struct mlx5_flow_driver_ops *fops;
8718 
8719 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8720 	return fops->create_mtr_acts(dev, mtr_policy, actions, attr, error);
8721 }
8722 
8723 /**
8724  * Create policy rules, lock free,
8725  * (mutex should be acquired by caller).
8726  * Dispatcher for action type specific call.
8727  *
8728  * @param[in] dev
8729  *   Pointer to the Ethernet device structure.
8730  * @param[in] mtr_policy
8731  *   Meter policy struct.
8732  *
8733  * @return
8734  *   0 on success, -1 otherwise.
8735  */
8736 int
8737 mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
8738 			     struct mlx5_flow_meter_policy *mtr_policy)
8739 {
8740 	const struct mlx5_flow_driver_ops *fops;
8741 
8742 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8743 	return fops->create_policy_rules(dev, mtr_policy);
8744 }
8745 
8746 /**
8747  * Destroy policy rules, lock free,
8748  * (mutex should be acquired by caller).
8749  * Dispatcher for action type specific call.
8750  *
8751  * @param[in] dev
8752  *   Pointer to the Ethernet device structure.
8753  * @param[in] mtr_policy
8754  *   Meter policy struct.
8755  */
8756 void
8757 mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
8758 			     struct mlx5_flow_meter_policy *mtr_policy)
8759 {
8760 	const struct mlx5_flow_driver_ops *fops;
8761 
8762 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8763 	fops->destroy_policy_rules(dev, mtr_policy);
8764 }
8765 
8766 /**
8767  * Destroy the default policy table set.
8768  *
8769  * @param[in] dev
8770  *   Pointer to Ethernet device.
8771  */
8772 void
8773 mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev)
8774 {
8775 	const struct mlx5_flow_driver_ops *fops;
8776 
8777 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8778 	fops->destroy_def_policy(dev);
8779 }
8780 
8781 /**
8782  * Destroy the default policy table set.
8783  *
8784  * @param[in] dev
8785  *   Pointer to Ethernet device.
8786  *
8787  * @return
8788  *   0 on success, -1 otherwise.
8789  */
8790 int
8791 mlx5_flow_create_def_policy(struct rte_eth_dev *dev)
8792 {
8793 	const struct mlx5_flow_driver_ops *fops;
8794 
8795 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8796 	return fops->create_def_policy(dev);
8797 }
8798 
8799 /**
8800  * Create the needed meter and suffix tables.
8801  *
8802  * @param[in] dev
8803  *   Pointer to Ethernet device.
8804  *
8805  * @return
8806  *   0 on success, -1 otherwise.
8807  */
8808 int
8809 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
8810 			struct mlx5_flow_meter_info *fm,
8811 			uint32_t mtr_idx,
8812 			uint8_t domain_bitmap)
8813 {
8814 	const struct mlx5_flow_driver_ops *fops;
8815 
8816 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8817 	return fops->create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap);
8818 }
8819 
8820 /**
8821  * Destroy the meter table set.
8822  *
8823  * @param[in] dev
8824  *   Pointer to Ethernet device.
8825  * @param[in] tbl
8826  *   Pointer to the meter table set.
8827  */
8828 void
8829 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
8830 			   struct mlx5_flow_meter_info *fm)
8831 {
8832 	const struct mlx5_flow_driver_ops *fops;
8833 
8834 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8835 	fops->destroy_mtr_tbls(dev, fm);
8836 }
8837 
8838 /**
8839  * Destroy the global meter drop table.
8840  *
8841  * @param[in] dev
8842  *   Pointer to Ethernet device.
8843  */
8844 void
8845 mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
8846 {
8847 	const struct mlx5_flow_driver_ops *fops;
8848 
8849 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8850 	fops->destroy_mtr_drop_tbls(dev);
8851 }
8852 
8853 /**
8854  * Destroy the sub policy table with RX queue.
8855  *
8856  * @param[in] dev
8857  *   Pointer to Ethernet device.
8858  * @param[in] mtr_policy
8859  *   Pointer to meter policy table.
8860  */
8861 void
8862 mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
8863 		struct mlx5_flow_meter_policy *mtr_policy)
8864 {
8865 	const struct mlx5_flow_driver_ops *fops;
8866 
8867 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8868 	fops->destroy_sub_policy_with_rxq(dev, mtr_policy);
8869 }
8870 
8871 /**
8872  * Allocate the needed aso flow meter id.
8873  *
8874  * @param[in] dev
8875  *   Pointer to Ethernet device.
8876  *
8877  * @return
8878  *   Index to aso flow meter on success, NULL otherwise.
8879  */
8880 uint32_t
8881 mlx5_flow_mtr_alloc(struct rte_eth_dev *dev)
8882 {
8883 	const struct mlx5_flow_driver_ops *fops;
8884 
8885 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8886 	return fops->create_meter(dev);
8887 }
8888 
8889 /**
8890  * Free the aso flow meter id.
8891  *
8892  * @param[in] dev
8893  *   Pointer to Ethernet device.
8894  * @param[in] mtr_idx
8895  *  Index to aso flow meter to be free.
8896  *
8897  * @return
8898  *   0 on success.
8899  */
8900 void
8901 mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx)
8902 {
8903 	const struct mlx5_flow_driver_ops *fops;
8904 
8905 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
8906 	fops->free_meter(dev, mtr_idx);
8907 }
8908 
8909 /**
8910  * Allocate a counter.
8911  *
8912  * @param[in] dev
8913  *   Pointer to Ethernet device structure.
8914  *
8915  * @return
8916  *   Index to allocated counter  on success, 0 otherwise.
8917  */
8918 uint32_t
8919 mlx5_counter_alloc(struct rte_eth_dev *dev)
8920 {
8921 	struct rte_flow_attr attr = { .transfer = 0 };
8922 
8923 	return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_alloc
8924 		(dev);
8925 }
8926 
8927 /**
8928  * Free a counter.
8929  *
8930  * @param[in] dev
8931  *   Pointer to Ethernet device structure.
8932  * @param[in] cnt
8933  *   Index to counter to be free.
8934  */
8935 void
8936 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
8937 {
8938 	struct rte_flow_attr attr = { .transfer = 0 };
8939 
8940 	flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_free(dev, cnt);
8941 }
8942 
8943 /**
8944  * Query counter statistics.
8945  *
8946  * @param[in] dev
8947  *   Pointer to Ethernet device structure.
8948  * @param[in] cnt
8949  *   Index to counter to query.
8950  * @param[in] clear
8951  *   Set to clear counter statistics.
8952  * @param[out] pkts
8953  *   The counter hits packets number to save.
8954  * @param[out] bytes
8955  *   The counter hits bytes number to save.
8956  *
8957  * @return
8958  *   0 on success, a negative errno value otherwise.
8959  */
8960 int
8961 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
8962 		   bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
8963 {
8964 	struct rte_flow_attr attr = { .transfer = 0 };
8965 
8966 	return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->counter_query
8967 		(dev, cnt, clear, pkts, bytes, action);
8968 }
8969 
8970 /**
8971  * Get information about HWS pre-configurable resources.
8972  *
8973  * @param[in] dev
8974  *   Pointer to the rte_eth_dev structure.
8975  * @param[out] port_info
8976  *   Pointer to port information.
8977  * @param[out] queue_info
8978  *   Pointer to queue information.
8979  * @param[out] error
8980  *   Pointer to error structure.
8981  *
8982  * @return
8983  *   0 on success, a negative errno value otherwise and rte_errno is set.
8984  */
8985 static int
8986 mlx5_flow_info_get(struct rte_eth_dev *dev,
8987 		   struct rte_flow_port_info *port_info,
8988 		   struct rte_flow_queue_info *queue_info,
8989 		   struct rte_flow_error *error)
8990 {
8991 	const struct mlx5_flow_driver_ops *fops;
8992 	struct rte_flow_attr attr = {0};
8993 
8994 	if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW)
8995 		return rte_flow_error_set(error, ENOTSUP,
8996 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8997 				NULL,
8998 				"info get with incorrect steering mode");
8999 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9000 	return fops->info_get(dev, port_info, queue_info, error);
9001 }
9002 
9003 /**
9004  * Configure port HWS resources.
9005  *
9006  * @param[in] dev
9007  *   Pointer to the rte_eth_dev structure.
9008  * @param[in] port_attr
9009  *   Port configuration attributes.
9010  * @param[in] nb_queue
9011  *   Number of queue.
9012  * @param[in] queue_attr
9013  *   Array that holds attributes for each flow queue.
9014  * @param[out] error
9015  *   Pointer to error structure.
9016  *
9017  * @return
9018  *   0 on success, a negative errno value otherwise and rte_errno is set.
9019  */
9020 static int
9021 mlx5_flow_port_configure(struct rte_eth_dev *dev,
9022 			 const struct rte_flow_port_attr *port_attr,
9023 			 uint16_t nb_queue,
9024 			 const struct rte_flow_queue_attr *queue_attr[],
9025 			 struct rte_flow_error *error)
9026 {
9027 	const struct mlx5_flow_driver_ops *fops;
9028 	struct rte_flow_attr attr = {0};
9029 
9030 	if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW)
9031 		return rte_flow_error_set(error, ENOTSUP,
9032 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9033 				NULL,
9034 				"port configure with incorrect steering mode");
9035 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9036 	return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
9037 }
9038 
9039 /**
9040  * Validate item template.
9041  *
9042  * @param[in] dev
9043  *   Pointer to the rte_eth_dev structure.
9044  * @param[in] attr
9045  *   Pointer to the item template attributes.
9046  * @param[in] items
9047  *   The template item pattern.
9048  * @param[out] error
9049  *   Pointer to error structure.
9050  *
9051  * @return
9052  *   0 on success, a negative errno value otherwise and rte_errno is set.
9053  */
9054 int
9055 mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
9056 		const struct rte_flow_pattern_template_attr *attr,
9057 		const struct rte_flow_item items[],
9058 		struct rte_flow_error *error)
9059 {
9060 	const struct mlx5_flow_driver_ops *fops;
9061 	struct rte_flow_attr fattr = {0};
9062 
9063 	if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
9064 		rte_flow_error_set(error, ENOTSUP,
9065 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9066 			"pattern validate with incorrect steering mode");
9067 		return -ENOTSUP;
9068 	}
9069 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9070 	return fops->pattern_validate(dev, attr, items, error);
9071 }
9072 
9073 /**
9074  * Create flow item template.
9075  *
9076  * @param[in] dev
9077  *   Pointer to the rte_eth_dev structure.
9078  * @param[in] attr
9079  *   Pointer to the item template attributes.
9080  * @param[in] items
9081  *   The template item pattern.
9082  * @param[out] error
9083  *   Pointer to error structure.
9084  *
9085  * @return
9086  *   0 on success, a negative errno value otherwise and rte_errno is set.
9087  */
9088 static struct rte_flow_pattern_template *
9089 mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
9090 		const struct rte_flow_pattern_template_attr *attr,
9091 		const struct rte_flow_item items[],
9092 		struct rte_flow_error *error)
9093 {
9094 	const struct mlx5_flow_driver_ops *fops;
9095 	struct rte_flow_attr fattr = {0};
9096 
9097 	if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
9098 		rte_flow_error_set(error, ENOTSUP,
9099 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9100 				NULL,
9101 				"pattern create with incorrect steering mode");
9102 		return NULL;
9103 	}
9104 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9105 	return fops->pattern_template_create(dev, attr, items, error);
9106 }
9107 
9108 /**
9109  * Destroy flow item template.
9110  *
9111  * @param[in] dev
9112  *   Pointer to the rte_eth_dev structure.
9113  * @param[in] template
9114  *   Pointer to the item template to be destroyed.
9115  * @param[out] error
9116  *   Pointer to error structure.
9117  *
9118  * @return
9119  *   0 on success, a negative errno value otherwise and rte_errno is set.
9120  */
9121 static int
9122 mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
9123 				   struct rte_flow_pattern_template *template,
9124 				   struct rte_flow_error *error)
9125 {
9126 	const struct mlx5_flow_driver_ops *fops;
9127 	struct rte_flow_attr attr = {0};
9128 
9129 	if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW)
9130 		return rte_flow_error_set(error, ENOTSUP,
9131 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9132 				NULL,
9133 				"pattern destroy with incorrect steering mode");
9134 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9135 	return fops->pattern_template_destroy(dev, template, error);
9136 }
9137 
9138 /**
9139  * Validate flow actions template.
9140  *
9141  * @param[in] dev
9142  *   Pointer to the rte_eth_dev structure.
9143  * @param[in] attr
9144  *   Pointer to the action template attributes.
9145  * @param[in] actions
9146  *   Associated actions (list terminated by the END action).
9147  * @param[in] masks
9148  *   List of actions that marks which of the action's member is constant.
9149  * @param[out] error
9150  *   Pointer to error structure.
9151  *
9152  * @return
9153  *   0 on success, a negative errno value otherwise and rte_errno is set.
9154  */
9155 int
9156 mlx5_flow_actions_validate(struct rte_eth_dev *dev,
9157 			const struct rte_flow_actions_template_attr *attr,
9158 			const struct rte_flow_action actions[],
9159 			const struct rte_flow_action masks[],
9160 			struct rte_flow_error *error)
9161 {
9162 	const struct mlx5_flow_driver_ops *fops;
9163 	struct rte_flow_attr fattr = {0};
9164 
9165 	if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
9166 		rte_flow_error_set(error, ENOTSUP,
9167 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9168 			"actions validate with incorrect steering mode");
9169 		return -ENOTSUP;
9170 	}
9171 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9172 	return fops->actions_validate(dev, attr, actions, masks, error);
9173 }
9174 
9175 /**
9176  * Create flow item template.
9177  *
9178  * @param[in] dev
9179  *   Pointer to the rte_eth_dev structure.
9180  * @param[in] attr
9181  *   Pointer to the action template attributes.
9182  * @param[in] actions
9183  *   Associated actions (list terminated by the END action).
9184  * @param[in] masks
9185  *   List of actions that marks which of the action's member is constant.
9186  * @param[out] error
9187  *   Pointer to error structure.
9188  *
9189  * @return
9190  *   0 on success, a negative errno value otherwise and rte_errno is set.
9191  */
9192 static struct rte_flow_actions_template *
9193 mlx5_flow_actions_template_create(struct rte_eth_dev *dev,
9194 			const struct rte_flow_actions_template_attr *attr,
9195 			const struct rte_flow_action actions[],
9196 			const struct rte_flow_action masks[],
9197 			struct rte_flow_error *error)
9198 {
9199 	const struct mlx5_flow_driver_ops *fops;
9200 	struct rte_flow_attr fattr = {0};
9201 
9202 	if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
9203 		rte_flow_error_set(error, ENOTSUP,
9204 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9205 				NULL,
9206 				"action create with incorrect steering mode");
9207 		return NULL;
9208 	}
9209 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9210 	return fops->actions_template_create(dev, attr, actions, masks, error);
9211 }
9212 
9213 /**
9214  * Destroy flow action template.
9215  *
9216  * @param[in] dev
9217  *   Pointer to the rte_eth_dev structure.
9218  * @param[in] template
9219  *   Pointer to the action template to be destroyed.
9220  * @param[out] error
9221  *   Pointer to error structure.
9222  *
9223  * @return
9224  *   0 on success, a negative errno value otherwise and rte_errno is set.
9225  */
9226 static int
9227 mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
9228 				   struct rte_flow_actions_template *template,
9229 				   struct rte_flow_error *error)
9230 {
9231 	const struct mlx5_flow_driver_ops *fops;
9232 	struct rte_flow_attr attr = {0};
9233 
9234 	if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW)
9235 		return rte_flow_error_set(error, ENOTSUP,
9236 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9237 				NULL,
9238 				"action destroy with incorrect steering mode");
9239 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9240 	return fops->actions_template_destroy(dev, template, error);
9241 }
9242 
9243 /**
9244  * Create flow table.
9245  *
9246  * @param[in] dev
9247  *   Pointer to the rte_eth_dev structure.
9248  * @param[in] attr
9249  *   Pointer to the table attributes.
9250  * @param[in] item_templates
9251  *   Item template array to be binded to the table.
9252  * @param[in] nb_item_templates
9253  *   Number of item template.
9254  * @param[in] action_templates
9255  *   Action template array to be binded to the table.
9256  * @param[in] nb_action_templates
9257  *   Number of action template.
9258  * @param[out] error
9259  *   Pointer to error structure.
9260  *
9261  * @return
9262  *    Table on success, NULL otherwise and rte_errno is set.
9263  */
9264 static struct rte_flow_template_table *
9265 mlx5_flow_table_create(struct rte_eth_dev *dev,
9266 		       const struct rte_flow_template_table_attr *attr,
9267 		       struct rte_flow_pattern_template *item_templates[],
9268 		       uint8_t nb_item_templates,
9269 		       struct rte_flow_actions_template *action_templates[],
9270 		       uint8_t nb_action_templates,
9271 		       struct rte_flow_error *error)
9272 {
9273 	const struct mlx5_flow_driver_ops *fops;
9274 	struct rte_flow_attr fattr = {0};
9275 
9276 	if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
9277 		rte_flow_error_set(error, ENOTSUP,
9278 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9279 				NULL,
9280 				"table create with incorrect steering mode");
9281 		return NULL;
9282 	}
9283 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9284 	return fops->template_table_create(dev,
9285 					   attr,
9286 					   item_templates,
9287 					   nb_item_templates,
9288 					   action_templates,
9289 					   nb_action_templates,
9290 					   error);
9291 }
9292 
9293 /**
9294  * PMD destroy flow table.
9295  *
9296  * @param[in] dev
9297  *   Pointer to the rte_eth_dev structure.
9298  * @param[in] table
9299  *   Pointer to the table to be destroyed.
9300  * @param[out] error
9301  *   Pointer to error structure.
9302  *
9303  * @return
9304  *   0 on success, a negative errno value otherwise and rte_errno is set.
9305  */
9306 static int
9307 mlx5_flow_table_destroy(struct rte_eth_dev *dev,
9308 			struct rte_flow_template_table *table,
9309 			struct rte_flow_error *error)
9310 {
9311 	const struct mlx5_flow_driver_ops *fops;
9312 	struct rte_flow_attr attr = {0};
9313 
9314 	if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_HW)
9315 		return rte_flow_error_set(error, ENOTSUP,
9316 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9317 				NULL,
9318 				"table destroy with incorrect steering mode");
9319 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9320 	return fops->template_table_destroy(dev, table, error);
9321 }
9322 
9323 /**
9324  * PMD group set miss actions.
9325  *
9326  * @param[in] dev
9327  *   Pointer to the rte_eth_dev structure.
9328  * @param[in] attr
9329  *   Pointer to group attributes
9330  * @param[in] actions
9331  *   Array of actions
9332  * @param[out] error
9333  *   Pointer to error structure.
9334  *
9335  * @return
9336  *   0 on success, a negative errno value otherwise and rte_errno is set.
9337  */
9338 static int
9339 mlx5_flow_group_set_miss_actions(struct rte_eth_dev *dev,
9340 				 uint32_t group_id,
9341 				 const struct rte_flow_group_attr *attr,
9342 				 const struct rte_flow_action actions[],
9343 				 struct rte_flow_error *error)
9344 {
9345 	const struct mlx5_flow_driver_ops *fops;
9346 	struct rte_flow_attr fattr = {0};
9347 
9348 	if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW)
9349 		return rte_flow_error_set(error, ENOTSUP,
9350 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9351 				NULL,
9352 				"group set miss actions with incorrect steering mode");
9353 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
9354 	return fops->group_set_miss_actions(dev, group_id, attr, actions, error);
9355 }
9356 
9357 /**
9358  * Allocate a new memory for the counter values wrapped by all the needed
9359  * management.
9360  *
9361  * @param[in] sh
9362  *   Pointer to mlx5_dev_ctx_shared object.
9363  *
9364  * @return
9365  *   0 on success, a negative errno value otherwise.
9366  */
9367 static int
9368 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
9369 {
9370 	struct mlx5_counter_stats_mem_mng *mem_mng;
9371 	volatile struct flow_counter_stats *raw_data;
9372 	int raws_n = MLX5_CNT_MR_ALLOC_BULK + MLX5_MAX_PENDING_QUERIES;
9373 	int size = (sizeof(struct flow_counter_stats) *
9374 			MLX5_COUNTERS_PER_POOL +
9375 			sizeof(struct mlx5_counter_stats_raw)) * raws_n +
9376 			sizeof(struct mlx5_counter_stats_mem_mng);
9377 	size_t pgsize = rte_mem_page_size();
9378 	uint8_t *mem;
9379 	int ret;
9380 	int i;
9381 
9382 	if (pgsize == (size_t)-1) {
9383 		DRV_LOG(ERR, "Failed to get mem page size");
9384 		rte_errno = ENOMEM;
9385 		return -ENOMEM;
9386 	}
9387 	mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
9388 	if (!mem) {
9389 		rte_errno = ENOMEM;
9390 		return -ENOMEM;
9391 	}
9392 	mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
9393 	size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
9394 	ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd,
9395 					  sh->cdev->pdn, mem, size,
9396 					  &mem_mng->wm);
9397 	if (ret) {
9398 		rte_errno = errno;
9399 		mlx5_free(mem);
9400 		return -rte_errno;
9401 	}
9402 	mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
9403 	raw_data = (volatile struct flow_counter_stats *)mem;
9404 	for (i = 0; i < raws_n; ++i) {
9405 		mem_mng->raws[i].mem_mng = mem_mng;
9406 		mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
9407 	}
9408 	for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
9409 		LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws,
9410 				 mem_mng->raws + MLX5_CNT_MR_ALLOC_BULK + i,
9411 				 next);
9412 	LIST_INSERT_HEAD(&sh->sws_cmng.mem_mngs, mem_mng, next);
9413 	sh->sws_cmng.mem_mng = mem_mng;
9414 	return 0;
9415 }
9416 
9417 /**
9418  * Set the statistic memory to the new counter pool.
9419  *
9420  * @param[in] sh
9421  *   Pointer to mlx5_dev_ctx_shared object.
9422  * @param[in] pool
9423  *   Pointer to the pool to set the statistic memory.
9424  *
9425  * @return
9426  *   0 on success, a negative errno value otherwise.
9427  */
9428 static int
9429 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
9430 			       struct mlx5_flow_counter_pool *pool)
9431 {
9432 	struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
9433 	/* Resize statistic memory once used out. */
9434 	if (!(pool->index % MLX5_CNT_MR_ALLOC_BULK) &&
9435 	    mlx5_flow_create_counter_stat_mem_mng(sh)) {
9436 		DRV_LOG(ERR, "Cannot resize counter stat mem.");
9437 		return -1;
9438 	}
9439 	rte_spinlock_lock(&pool->sl);
9440 	pool->raw = cmng->mem_mng->raws + pool->index % MLX5_CNT_MR_ALLOC_BULK;
9441 	rte_spinlock_unlock(&pool->sl);
9442 	pool->raw_hw = NULL;
9443 	return 0;
9444 }
9445 
9446 #define MLX5_POOL_QUERY_FREQ_US 1000000
9447 
9448 /**
9449  * Set the periodic procedure for triggering asynchronous batch queries for all
9450  * the counter pools.
9451  *
9452  * @param[in] sh
9453  *   Pointer to mlx5_dev_ctx_shared object.
9454  */
9455 void
9456 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
9457 {
9458 	uint32_t pools_n, us;
9459 
9460 	pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
9461 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
9462 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
9463 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
9464 		sh->sws_cmng.query_thread_on = 0;
9465 		DRV_LOG(ERR, "Cannot reinitialize query alarm");
9466 	} else {
9467 		sh->sws_cmng.query_thread_on = 1;
9468 	}
9469 }
9470 
9471 /**
9472  * The periodic procedure for triggering asynchronous batch queries for all the
9473  * counter pools. This function is probably called by the host thread.
9474  *
9475  * @param[in] arg
9476  *   The parameter for the alarm process.
9477  */
9478 void
9479 mlx5_flow_query_alarm(void *arg)
9480 {
9481 	struct mlx5_dev_ctx_shared *sh = arg;
9482 	struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
9483 	uint16_t pool_index = cmng->pool_index;
9484 	struct mlx5_flow_counter_pool *pool;
9485 	uint16_t n_valid;
9486 	int ret;
9487 
9488 	if (cmng->pending_queries >= MLX5_MAX_PENDING_QUERIES)
9489 		goto set_alarm;
9490 	rte_spinlock_lock(&cmng->pool_update_sl);
9491 	pool = cmng->pools[pool_index];
9492 	n_valid = cmng->n_valid;
9493 	rte_spinlock_unlock(&cmng->pool_update_sl);
9494 	/* Set the statistic memory to the new created pool. */
9495 	if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
9496 		goto set_alarm;
9497 	if (pool->raw_hw)
9498 		/* There is a pool query in progress. */
9499 		goto set_alarm;
9500 	pool->raw_hw = LIST_FIRST(&cmng->free_stat_raws);
9501 	if (!pool->raw_hw)
9502 		/* No free counter statistics raw memory. */
9503 		goto set_alarm;
9504 	/*
9505 	 * Identify the counters released between query trigger and query
9506 	 * handle more efficiently. The counter released in this gap period
9507 	 * should wait for a new round of query as the new arrived packets
9508 	 * will not be taken into account.
9509 	 */
9510 	pool->query_gen++;
9511 	ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
9512 					       MLX5_COUNTERS_PER_POOL,
9513 					       NULL, NULL,
9514 					       pool->raw_hw->mem_mng->wm.lkey,
9515 					       (void *)(uintptr_t)
9516 					       pool->raw_hw->data,
9517 					       sh->devx_comp,
9518 					       (uint64_t)(uintptr_t)pool);
9519 	if (ret) {
9520 		DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
9521 			" %d", pool->min_dcs->id);
9522 		pool->raw_hw = NULL;
9523 		goto set_alarm;
9524 	}
9525 	LIST_REMOVE(pool->raw_hw, next);
9526 	cmng->pending_queries++;
9527 	pool_index++;
9528 	if (pool_index >= n_valid)
9529 		pool_index = 0;
9530 set_alarm:
9531 	cmng->pool_index = pool_index;
9532 	mlx5_set_query_alarm(sh);
9533 }
9534 
9535 /**
9536  * Check and callback event for new aged flow in the counter pool
9537  *
9538  * @param[in] sh
9539  *   Pointer to mlx5_dev_ctx_shared object.
9540  * @param[in] pool
9541  *   Pointer to Current counter pool.
9542  */
9543 static void
9544 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
9545 		   struct mlx5_flow_counter_pool *pool)
9546 {
9547 	struct mlx5_priv *priv;
9548 	struct mlx5_flow_counter *cnt;
9549 	struct mlx5_age_info *age_info;
9550 	struct mlx5_age_param *age_param;
9551 	struct mlx5_counter_stats_raw *cur = pool->raw_hw;
9552 	struct mlx5_counter_stats_raw *prev = pool->raw;
9553 	const uint64_t curr_time = MLX5_CURR_TIME_SEC;
9554 	const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
9555 	uint16_t expected = AGE_CANDIDATE;
9556 	uint32_t i;
9557 
9558 	pool->time_of_last_age_check = curr_time;
9559 	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
9560 		cnt = MLX5_POOL_GET_CNT(pool, i);
9561 		age_param = MLX5_CNT_TO_AGE(cnt);
9562 		if (rte_atomic_load_explicit(&age_param->state,
9563 				    rte_memory_order_relaxed) != AGE_CANDIDATE)
9564 			continue;
9565 		if (cur->data[i].hits != prev->data[i].hits) {
9566 			rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
9567 					 rte_memory_order_relaxed);
9568 			continue;
9569 		}
9570 		if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
9571 				       time_delta,
9572 				       rte_memory_order_relaxed) + time_delta <= age_param->timeout)
9573 			continue;
9574 		/**
9575 		 * Hold the lock first, or if between the
9576 		 * state AGE_TMOUT and tailq operation the
9577 		 * release happened, the release procedure
9578 		 * may delete a non-existent tailq node.
9579 		 */
9580 		priv = rte_eth_devices[age_param->port_id].data->dev_private;
9581 		age_info = GET_PORT_AGE_INFO(priv);
9582 		rte_spinlock_lock(&age_info->aged_sl);
9583 		if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
9584 						AGE_TMOUT,
9585 						rte_memory_order_relaxed,
9586 						rte_memory_order_relaxed)) {
9587 			TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
9588 			MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
9589 		}
9590 		rte_spinlock_unlock(&age_info->aged_sl);
9591 	}
9592 	mlx5_age_event_prepare(sh);
9593 }
9594 
9595 /**
9596  * Handler for the HW respond about ready values from an asynchronous batch
9597  * query. This function is probably called by the host thread.
9598  *
9599  * @param[in] sh
9600  *   The pointer to the shared device context.
9601  * @param[in] async_id
9602  *   The Devx async ID.
9603  * @param[in] status
9604  *   The status of the completion.
9605  */
9606 void
9607 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
9608 				  uint64_t async_id, int status)
9609 {
9610 	struct mlx5_flow_counter_pool *pool =
9611 		(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
9612 	struct mlx5_counter_stats_raw *raw_to_free;
9613 	uint8_t query_gen = pool->query_gen ^ 1;
9614 	struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
9615 	enum mlx5_counter_type cnt_type =
9616 		pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
9617 				MLX5_COUNTER_TYPE_ORIGIN;
9618 
9619 	if (unlikely(status)) {
9620 		raw_to_free = pool->raw_hw;
9621 	} else {
9622 		raw_to_free = pool->raw;
9623 		if (pool->is_aged)
9624 			mlx5_flow_aging_check(sh, pool);
9625 		rte_spinlock_lock(&pool->sl);
9626 		pool->raw = pool->raw_hw;
9627 		rte_spinlock_unlock(&pool->sl);
9628 		/* Be sure the new raw counters data is updated in memory. */
9629 		rte_io_wmb();
9630 		if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
9631 			rte_spinlock_lock(&cmng->csl[cnt_type]);
9632 			TAILQ_CONCAT(&cmng->counters[cnt_type],
9633 				     &pool->counters[query_gen], next);
9634 			rte_spinlock_unlock(&cmng->csl[cnt_type]);
9635 		}
9636 	}
9637 	LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws, raw_to_free, next);
9638 	pool->raw_hw = NULL;
9639 	sh->sws_cmng.pending_queries--;
9640 }
9641 
9642 static int
9643 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
9644 		    const struct flow_grp_info *grp_info,
9645 		    struct rte_flow_error *error)
9646 {
9647 	if (grp_info->transfer && grp_info->external &&
9648 	    grp_info->fdb_def_rule) {
9649 		if (group == UINT32_MAX)
9650 			return rte_flow_error_set
9651 						(error, EINVAL,
9652 						 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
9653 						 NULL,
9654 						 "group index not supported");
9655 		*table = group + 1;
9656 	} else {
9657 		*table = group;
9658 	}
9659 	DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
9660 	return 0;
9661 }
9662 
9663 /**
9664  * Translate the rte_flow group index to HW table value.
9665  *
9666  * If tunnel offload is disabled, all group ids converted to flow table
9667  * id using the standard method.
9668  * If tunnel offload is enabled, group id can be converted using the
9669  * standard or tunnel conversion method. Group conversion method
9670  * selection depends on flags in `grp_info` parameter:
9671  * - Internal (grp_info.external == 0) groups conversion uses the
9672  *   standard method.
9673  * - Group ids in JUMP action converted with the tunnel conversion.
9674  * - Group id in rule attribute conversion depends on a rule type and
9675  *   group id value:
9676  *   ** non zero group attributes converted with the tunnel method
9677  *   ** zero group attribute in non-tunnel rule is converted using the
9678  *      standard method - there's only one root table
9679  *   ** zero group attribute in steer tunnel rule is converted with the
9680  *      standard method - single root table
9681  *   ** zero group attribute in match tunnel rule is a special OvS
9682  *      case: that value is used for portability reasons. That group
9683  *      id is converted with the tunnel conversion method.
9684  *
9685  * @param[in] dev
9686  *   Port device
9687  * @param[in] tunnel
9688  *   PMD tunnel offload object
9689  * @param[in] group
9690  *   rte_flow group index value.
9691  * @param[out] table
9692  *   HW table value.
9693  * @param[in] grp_info
9694  *   flags used for conversion
9695  * @param[out] error
9696  *   Pointer to error structure.
9697  *
9698  * @return
9699  *   0 on success, a negative errno value otherwise and rte_errno is set.
9700  */
9701 int
9702 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
9703 			 const struct mlx5_flow_tunnel *tunnel,
9704 			 uint32_t group, uint32_t *table,
9705 			 const struct flow_grp_info *grp_info,
9706 			 struct rte_flow_error *error)
9707 {
9708 	int ret;
9709 	bool standard_translation;
9710 
9711 	if (!grp_info->skip_scale && grp_info->external &&
9712 	    group < MLX5_MAX_TABLES_EXTERNAL)
9713 		group *= MLX5_FLOW_TABLE_FACTOR;
9714 	if (is_tunnel_offload_active(dev)) {
9715 		standard_translation = !grp_info->external ||
9716 					grp_info->std_tbl_fix;
9717 	} else {
9718 		standard_translation = true;
9719 	}
9720 	DRV_LOG(DEBUG,
9721 		"port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
9722 		dev->data->port_id, group, grp_info->transfer,
9723 		grp_info->external, grp_info->fdb_def_rule,
9724 		standard_translation ? "STANDARD" : "TUNNEL");
9725 	if (standard_translation)
9726 		ret = flow_group_to_table(dev->data->port_id, group, table,
9727 					  grp_info, error);
9728 	else
9729 		ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
9730 						      table, error);
9731 
9732 	return ret;
9733 }
9734 
9735 /**
9736  * Discover availability of metadata reg_c's.
9737  *
9738  * Iteratively use test flows to check availability.
9739  *
9740  * @param[in] dev
9741  *   Pointer to the Ethernet device structure.
9742  *
9743  * @return
9744  *   0 on success, a negative errno value otherwise and rte_errno is set.
9745  */
9746 int
9747 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
9748 {
9749 	struct mlx5_priv *priv = dev->data->dev_private;
9750 	enum modify_reg idx;
9751 	int n = 0;
9752 
9753 	/* reg_c[0] and reg_c[1] are reserved. */
9754 	priv->sh->flow_mreg_c[n++] = REG_C_0;
9755 	priv->sh->flow_mreg_c[n++] = REG_C_1;
9756 	/* Discover availability of other reg_c's. */
9757 	for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
9758 		struct rte_flow_attr attr = {
9759 			.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
9760 			.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
9761 			.ingress = 1,
9762 		};
9763 		struct rte_flow_item items[] = {
9764 			[0] = {
9765 				.type = RTE_FLOW_ITEM_TYPE_END,
9766 			},
9767 		};
9768 		struct rte_flow_action actions[] = {
9769 			[0] = {
9770 				.type = (enum rte_flow_action_type)
9771 					MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
9772 				.conf = &(struct mlx5_flow_action_copy_mreg){
9773 					.src = REG_C_1,
9774 					.dst = idx,
9775 				},
9776 			},
9777 			[1] = {
9778 				.type = RTE_FLOW_ACTION_TYPE_JUMP,
9779 				.conf = &(struct rte_flow_action_jump){
9780 					.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
9781 				},
9782 			},
9783 			[2] = {
9784 				.type = RTE_FLOW_ACTION_TYPE_END,
9785 			},
9786 		};
9787 		uint32_t flow_idx;
9788 		struct rte_flow *flow;
9789 		struct rte_flow_error error;
9790 
9791 		if (!priv->sh->config.dv_flow_en)
9792 			break;
9793 		/* Create internal flow, validation skips copy action. */
9794 		flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
9795 					items, actions, false, &error);
9796 		flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
9797 				      flow_idx);
9798 		if (!flow)
9799 			continue;
9800 		priv->sh->flow_mreg_c[n++] = idx;
9801 		flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
9802 	}
9803 	for (; n < MLX5_MREG_C_NUM; ++n)
9804 		priv->sh->flow_mreg_c[n] = REG_NON;
9805 	priv->sh->metadata_regc_check_flag = 1;
9806 	return 0;
9807 }
9808 
9809 int
9810 save_dump_file(const uint8_t *data, uint32_t size,
9811 	uint32_t type, uint64_t id, void *arg, FILE *file)
9812 {
9813 	char line[BUF_SIZE];
9814 	uint32_t out = 0;
9815 	uint32_t k;
9816 	uint32_t actions_num;
9817 	struct rte_flow_query_count *count;
9818 
9819 	memset(line, 0, BUF_SIZE);
9820 	switch (type) {
9821 	case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
9822 		actions_num = *(uint32_t *)(arg);
9823 		out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,",
9824 				type, id, actions_num);
9825 		break;
9826 	case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
9827 		out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",",
9828 				type, id);
9829 		break;
9830 	case DR_DUMP_REC_TYPE_PMD_COUNTER:
9831 		count = (struct rte_flow_query_count *)arg;
9832 		fprintf(file,
9833 			"%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n",
9834 			type, id, count->hits, count->bytes);
9835 		return 0;
9836 	default:
9837 		return -1;
9838 	}
9839 
9840 	for (k = 0; k < size; k++) {
9841 		/* Make sure we do not overrun the line buffer length. */
9842 		if (out >= BUF_SIZE - 4) {
9843 			line[out] = '\0';
9844 			break;
9845 		}
9846 		out += snprintf(line + out, BUF_SIZE - out, "%02x",
9847 				(data[k]) & 0xff);
9848 	}
9849 	fprintf(file, "%s\n", line);
9850 	return 0;
9851 }
9852 
9853 int
9854 mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
9855 	struct rte_flow_query_count *count, struct rte_flow_error *error)
9856 {
9857 	struct rte_flow_action action[2];
9858 	enum mlx5_flow_drv_type ftype;
9859 	const struct mlx5_flow_driver_ops *fops;
9860 
9861 	if (!flow) {
9862 		return rte_flow_error_set(error, ENOENT,
9863 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9864 				NULL,
9865 				"invalid flow handle");
9866 	}
9867 	action[0].type = RTE_FLOW_ACTION_TYPE_COUNT;
9868 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
9869 	if (flow->counter) {
9870 		memset(count, 0, sizeof(struct rte_flow_query_count));
9871 		ftype = (enum mlx5_flow_drv_type)(flow->drv_type);
9872 		MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN &&
9873 						ftype < MLX5_FLOW_TYPE_MAX);
9874 		fops = flow_get_drv_ops(ftype);
9875 		return fops->query(dev, flow, action, count, error);
9876 	}
9877 	return -1;
9878 }
9879 
9880 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
9881 /**
9882  * Dump flow ipool data to file
9883  *
9884  * @param[in] dev
9885  *   The pointer to Ethernet device.
9886  * @param[in] file
9887  *   A pointer to a file for output.
9888  * @param[out] error
9889  *   Perform verbose error reporting if not NULL. PMDs initialize this
9890  *   structure in case of error only.
9891  * @return
9892  *   0 on success, a negative value otherwise.
9893  */
9894 int
9895 mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
9896 	struct rte_flow *flow, FILE *file,
9897 	struct rte_flow_error *error)
9898 {
9899 	struct mlx5_priv *priv = dev->data->dev_private;
9900 	struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
9901 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
9902 	uint32_t handle_idx;
9903 	struct mlx5_flow_handle *dh;
9904 	struct rte_flow_query_count count;
9905 	uint32_t actions_num;
9906 	const uint8_t *data;
9907 	size_t size;
9908 	uint64_t id;
9909 	uint32_t type;
9910 	void *action = NULL;
9911 
9912 	if (!flow) {
9913 		return rte_flow_error_set(error, ENOENT,
9914 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9915 				NULL,
9916 				"invalid flow handle");
9917 	}
9918 	handle_idx = flow->dev_handles;
9919 	/* query counter */
9920 	if (flow->counter &&
9921 	(!mlx5_counter_query(dev, flow->counter, false,
9922 	&count.hits, &count.bytes, &action)) && action) {
9923 		id = (uint64_t)(uintptr_t)action;
9924 		type = DR_DUMP_REC_TYPE_PMD_COUNTER;
9925 		save_dump_file(NULL, 0, type,
9926 			id, (void *)&count, file);
9927 	}
9928 
9929 	while (handle_idx) {
9930 		dh = mlx5_ipool_get(priv->sh->ipool
9931 				[MLX5_IPOOL_MLX5_FLOW], handle_idx);
9932 		if (!dh)
9933 			continue;
9934 		handle_idx = dh->next.next;
9935 
9936 		/* Get modify_hdr and encap_decap buf from ipools. */
9937 		encap_decap = NULL;
9938 		modify_hdr = dh->dvh.modify_hdr;
9939 
9940 		if (dh->dvh.rix_encap_decap) {
9941 			encap_decap = mlx5_ipool_get(priv->sh->ipool
9942 						[MLX5_IPOOL_DECAP_ENCAP],
9943 						dh->dvh.rix_encap_decap);
9944 		}
9945 		if (modify_hdr) {
9946 			data = (const uint8_t *)modify_hdr->actions;
9947 			size = (size_t)(modify_hdr->actions_num) * 8;
9948 			id = (uint64_t)(uintptr_t)modify_hdr->action;
9949 			actions_num = modify_hdr->actions_num;
9950 			type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
9951 			save_dump_file(data, size, type, id,
9952 						(void *)(&actions_num), file);
9953 		}
9954 		if (encap_decap) {
9955 			data = encap_decap->buf;
9956 			size = encap_decap->size;
9957 			id = (uint64_t)(uintptr_t)encap_decap->action;
9958 			type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
9959 			save_dump_file(data, size, type,
9960 						id, NULL, file);
9961 		}
9962 	}
9963 	return 0;
9964 }
9965 
9966 /**
9967  * Dump all flow's encap_decap/modify_hdr/counter data to file
9968  *
9969  * @param[in] dev
9970  *   The pointer to Ethernet device.
9971  * @param[in] file
9972  *   A pointer to a file for output.
9973  * @param[out] error
9974  *   Perform verbose error reporting if not NULL. PMDs initialize this
9975  *   structure in case of error only.
9976  * @return
9977  *   0 on success, a negative value otherwise.
9978  */
9979 static int
9980 mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
9981 	FILE *file, struct rte_flow_error *error __rte_unused)
9982 {
9983 	struct mlx5_priv *priv = dev->data->dev_private;
9984 	struct mlx5_dev_ctx_shared *sh = priv->sh;
9985 	struct mlx5_hlist *h;
9986 	struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
9987 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
9988 	struct rte_flow_query_count count;
9989 	uint32_t actions_num;
9990 	const uint8_t *data;
9991 	size_t size;
9992 	uint64_t id;
9993 	uint32_t type;
9994 	uint32_t i;
9995 	uint32_t j;
9996 	struct mlx5_list_inconst *l_inconst;
9997 	struct mlx5_list_entry *e;
9998 	int lcore_index;
9999 	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
10000 	uint32_t max;
10001 	void *action;
10002 
10003 	/* encap_decap hlist is lcore_share, get global core cache. */
10004 	i = MLX5_LIST_GLOBAL;
10005 	h = sh->encaps_decaps;
10006 	if (h) {
10007 		for (j = 0; j <= h->mask; j++) {
10008 			l_inconst = &h->buckets[j].l;
10009 			if (!l_inconst || !l_inconst->cache[i])
10010 				continue;
10011 
10012 			e = LIST_FIRST(&l_inconst->cache[i]->h);
10013 			while (e) {
10014 				encap_decap =
10015 				(struct mlx5_flow_dv_encap_decap_resource *)e;
10016 				data = encap_decap->buf;
10017 				size = encap_decap->size;
10018 				id = (uint64_t)(uintptr_t)encap_decap->action;
10019 				type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
10020 				save_dump_file(data, size, type,
10021 					id, NULL, file);
10022 				e = LIST_NEXT(e, next);
10023 			}
10024 		}
10025 	}
10026 
10027 	/* get modify_hdr */
10028 	h = sh->modify_cmds;
10029 	if (h) {
10030 		lcore_index = rte_lcore_index(rte_lcore_id());
10031 		if (unlikely(lcore_index == -1)) {
10032 			lcore_index = MLX5_LIST_NLCORE;
10033 			rte_spinlock_lock(&h->l_const.lcore_lock);
10034 		}
10035 		i = lcore_index;
10036 
10037 		if (lcore_index == MLX5_LIST_NLCORE) {
10038 			for (i = 0; i <= (uint32_t)lcore_index; i++) {
10039 				for (j = 0; j <= h->mask; j++) {
10040 					l_inconst = &h->buckets[j].l;
10041 					if (!l_inconst || !l_inconst->cache[i])
10042 						continue;
10043 
10044 					e = LIST_FIRST(&l_inconst->cache[i]->h);
10045 					while (e) {
10046 						modify_hdr =
10047 						(struct mlx5_flow_dv_modify_hdr_resource *)e;
10048 						data = (const uint8_t *)modify_hdr->actions;
10049 						size = (size_t)(modify_hdr->actions_num) * 8;
10050 						actions_num = modify_hdr->actions_num;
10051 						id = (uint64_t)(uintptr_t)modify_hdr->action;
10052 						type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
10053 						save_dump_file(data, size, type, id,
10054 								(void *)(&actions_num), file);
10055 						e = LIST_NEXT(e, next);
10056 					}
10057 				}
10058 			}
10059 		} else {
10060 			for (j = 0; j <= h->mask; j++) {
10061 				l_inconst = &h->buckets[j].l;
10062 				if (!l_inconst || !l_inconst->cache[i])
10063 					continue;
10064 
10065 				e = LIST_FIRST(&l_inconst->cache[i]->h);
10066 				while (e) {
10067 					modify_hdr =
10068 					(struct mlx5_flow_dv_modify_hdr_resource *)e;
10069 					data = (const uint8_t *)modify_hdr->actions;
10070 					size = (size_t)(modify_hdr->actions_num) * 8;
10071 					actions_num = modify_hdr->actions_num;
10072 					id = (uint64_t)(uintptr_t)modify_hdr->action;
10073 					type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
10074 					save_dump_file(data, size, type, id,
10075 							(void *)(&actions_num), file);
10076 					e = LIST_NEXT(e, next);
10077 				}
10078 			}
10079 		}
10080 
10081 		if (unlikely(lcore_index == MLX5_LIST_NLCORE))
10082 			rte_spinlock_unlock(&h->l_const.lcore_lock);
10083 	}
10084 
10085 	/* get counter */
10086 	MLX5_ASSERT(cmng->n_valid <= MLX5_COUNTER_POOLS_MAX_NUM);
10087 	max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
10088 	for (j = 1; j <= max; j++) {
10089 		action = NULL;
10090 		if ((!mlx5_counter_query(dev, j, false, &count.hits,
10091 		&count.bytes, &action)) && action) {
10092 			id = (uint64_t)(uintptr_t)action;
10093 			type = DR_DUMP_REC_TYPE_PMD_COUNTER;
10094 			save_dump_file(NULL, 0, type,
10095 					id, (void *)&count, file);
10096 		}
10097 	}
10098 	return 0;
10099 }
10100 #endif
10101 
10102 /**
10103  * Dump flow raw hw data to file
10104  *
10105  * @param[in] dev
10106  *    The pointer to Ethernet device.
10107  * @param[in] file
10108  *   A pointer to a file for output.
10109  * @param[out] error
10110  *   Perform verbose error reporting if not NULL. PMDs initialize this
10111  *   structure in case of error only.
10112  * @return
10113  *   0 on success, a negative value otherwise.
10114  */
10115 int
10116 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
10117 		   FILE *file,
10118 		   struct rte_flow_error *error __rte_unused)
10119 {
10120 	struct mlx5_priv *priv = dev->data->dev_private;
10121 	struct mlx5_dev_ctx_shared *sh = priv->sh;
10122 	uint32_t handle_idx;
10123 	int ret;
10124 	struct mlx5_flow_handle *dh;
10125 	struct rte_flow *flow;
10126 
10127 	if (!sh->config.dv_flow_en) {
10128 		if (fputs("device dv flow disabled\n", file) <= 0)
10129 			return -errno;
10130 		return -ENOTSUP;
10131 	}
10132 
10133 	/* dump all */
10134 	if (!flow_idx) {
10135 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
10136 		if (mlx5_flow_dev_dump_sh_all(dev, file, error))
10137 			return -EINVAL;
10138 
10139 		if (sh->config.dv_flow_en == 2)
10140 			return mlx5dr_debug_dump(priv->dr_ctx, file);
10141 #endif
10142 		return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
10143 					       sh->rx_domain,
10144 					       sh->tx_domain, file);
10145 	}
10146 	/* dump one */
10147 	flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
10148 			(uintptr_t)(void *)flow_idx);
10149 	if (!flow)
10150 		return -EINVAL;
10151 
10152 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
10153 	mlx5_flow_dev_dump_ipool(dev, flow, file, error);
10154 #endif
10155 	handle_idx = flow->dev_handles;
10156 	while (handle_idx) {
10157 		dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10158 				handle_idx);
10159 		if (!dh)
10160 			return -ENOENT;
10161 		if (dh->drv_flow) {
10162 			if (sh->config.dv_flow_en == 2)
10163 				return -ENOTSUP;
10164 
10165 			ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow,
10166 							     file);
10167 			if (ret)
10168 				return -ENOENT;
10169 		}
10170 		handle_idx = dh->next.next;
10171 	}
10172 	return 0;
10173 }
10174 
10175 /**
10176  * Get aged-out flows.
10177  *
10178  * @param[in] dev
10179  *   Pointer to the Ethernet device structure.
10180  * @param[in] context
10181  *   The address of an array of pointers to the aged-out flows contexts.
10182  * @param[in] nb_countexts
10183  *   The length of context array pointers.
10184  * @param[out] error
10185  *   Perform verbose error reporting if not NULL. Initialized in case of
10186  *   error only.
10187  *
10188  * @return
10189  *   how many contexts get in success, otherwise negative errno value.
10190  *   if nb_contexts is 0, return the amount of all aged contexts.
10191  *   if nb_contexts is not 0 , return the amount of aged flows reported
10192  *   in the context array.
10193  */
10194 int
10195 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
10196 			uint32_t nb_contexts, struct rte_flow_error *error)
10197 {
10198 	struct rte_flow_attr attr = { .transfer = 0 };
10199 
10200 	return flow_get_drv_ops(flow_get_drv_type(dev, &attr))->get_aged_flows
10201 		(dev, contexts, nb_contexts, error);
10202 }
10203 
10204 /**
10205  * Get aged-out flows per HWS queue.
10206  *
10207  * @param[in] dev
10208  *   Pointer to the Ethernet device structure.
10209  * @param[in] queue_id
10210  *   Flow queue to query.
10211  * @param[in] context
10212  *   The address of an array of pointers to the aged-out flows contexts.
10213  * @param[in] nb_countexts
10214  *   The length of context array pointers.
10215  * @param[out] error
10216  *   Perform verbose error reporting if not NULL. Initialized in case of
10217  *   error only.
10218  *
10219  * @return
10220  *   how many contexts get in success, otherwise negative errno value.
10221  *   if nb_contexts is 0, return the amount of all aged contexts.
10222  *   if nb_contexts is not 0 , return the amount of aged flows reported
10223  *   in the context array.
10224  */
10225 int
10226 mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
10227 			   void **contexts, uint32_t nb_contexts,
10228 			   struct rte_flow_error *error)
10229 {
10230 	const struct mlx5_flow_driver_ops *fops;
10231 	struct rte_flow_attr attr = { 0 };
10232 
10233 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_HW) {
10234 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
10235 		return fops->get_q_aged_flows(dev, queue_id, contexts,
10236 					      nb_contexts, error);
10237 	}
10238 	DRV_LOG(ERR, "port %u queue %u get aged flows is not supported.",
10239 		dev->data->port_id, queue_id);
10240 	return rte_flow_error_set(error, ENOTSUP,
10241 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10242 				  "get Q aged flows with incorrect steering mode");
10243 }
10244 
10245 /* Wrapper for driver action_validate op callback */
10246 static int
10247 flow_drv_action_validate(struct rte_eth_dev *dev,
10248 			 const struct rte_flow_indir_action_conf *conf,
10249 			 const struct rte_flow_action *action,
10250 			 const struct mlx5_flow_driver_ops *fops,
10251 			 struct rte_flow_error *error)
10252 {
10253 	static const char err_msg[] = "indirect action validation unsupported";
10254 
10255 	if (!fops->action_validate) {
10256 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
10257 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10258 				   NULL, err_msg);
10259 		return -rte_errno;
10260 	}
10261 	return fops->action_validate(dev, conf, action, error);
10262 }
10263 
10264 /**
10265  * Destroys the shared action by handle.
10266  *
10267  * @param dev
10268  *   Pointer to Ethernet device structure.
10269  * @param[in] handle
10270  *   Handle for the indirect action object to be destroyed.
10271  * @param[out] error
10272  *   Perform verbose error reporting if not NULL. PMDs initialize this
10273  *   structure in case of error only.
10274  *
10275  * @return
10276  *   0 on success, a negative errno value otherwise and rte_errno is set.
10277  *
10278  * @note: wrapper for driver action_create op callback.
10279  */
10280 static int
10281 mlx5_action_handle_destroy(struct rte_eth_dev *dev,
10282 			   struct rte_flow_action_handle *handle,
10283 			   struct rte_flow_error *error)
10284 {
10285 	static const char err_msg[] = "indirect action destruction unsupported";
10286 	struct rte_flow_attr attr = { .transfer = 0 };
10287 	const struct mlx5_flow_driver_ops *fops =
10288 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
10289 
10290 	if (!fops->action_destroy) {
10291 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
10292 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10293 				   NULL, err_msg);
10294 		return -rte_errno;
10295 	}
10296 	return fops->action_destroy(dev, handle, error);
10297 }
10298 
10299 /* Wrapper for driver action_destroy op callback */
10300 static int
10301 flow_drv_action_update(struct rte_eth_dev *dev,
10302 		       struct rte_flow_action_handle *handle,
10303 		       const void *update,
10304 		       const struct mlx5_flow_driver_ops *fops,
10305 		       struct rte_flow_error *error)
10306 {
10307 	static const char err_msg[] = "indirect action update unsupported";
10308 
10309 	if (!fops->action_update) {
10310 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
10311 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10312 				   NULL, err_msg);
10313 		return -rte_errno;
10314 	}
10315 	return fops->action_update(dev, handle, update, error);
10316 }
10317 
10318 /* Wrapper for driver action_destroy op callback */
10319 static int
10320 flow_drv_action_query(struct rte_eth_dev *dev,
10321 		      const struct rte_flow_action_handle *handle,
10322 		      void *data,
10323 		      const struct mlx5_flow_driver_ops *fops,
10324 		      struct rte_flow_error *error)
10325 {
10326 	static const char err_msg[] = "indirect action query unsupported";
10327 
10328 	if (!fops->action_query) {
10329 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
10330 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10331 				   NULL, err_msg);
10332 		return -rte_errno;
10333 	}
10334 	return fops->action_query(dev, handle, data, error);
10335 }
10336 
10337 /**
10338  * Create indirect action for reuse in multiple flow rules.
10339  *
10340  * @param dev
10341  *   Pointer to Ethernet device structure.
10342  * @param conf
10343  *   Pointer to indirect action object configuration.
10344  * @param[in] action
10345  *   Action configuration for indirect action object creation.
10346  * @param[out] error
10347  *   Perform verbose error reporting if not NULL. PMDs initialize this
10348  *   structure in case of error only.
10349  * @return
10350  *   A valid handle in case of success, NULL otherwise and rte_errno is set.
10351  */
10352 static struct rte_flow_action_handle *
10353 mlx5_action_handle_create(struct rte_eth_dev *dev,
10354 			  const struct rte_flow_indir_action_conf *conf,
10355 			  const struct rte_flow_action *action,
10356 			  struct rte_flow_error *error)
10357 {
10358 	static const char err_msg[] = "indirect action creation unsupported";
10359 	struct rte_flow_attr attr = { .transfer = 0 };
10360 	const struct mlx5_flow_driver_ops *fops =
10361 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
10362 
10363 	if (flow_drv_action_validate(dev, conf, action, fops, error))
10364 		return NULL;
10365 	if (!fops->action_create) {
10366 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
10367 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10368 				   NULL, err_msg);
10369 		return NULL;
10370 	}
10371 	return fops->action_create(dev, conf, action, error);
10372 }
10373 
10374 /**
10375  * Updates inplace the indirect action configuration pointed by *handle*
10376  * with the configuration provided as *update* argument.
10377  * The update of the indirect action configuration effects all flow rules
10378  * reusing the action via handle.
10379  *
10380  * @param dev
10381  *   Pointer to Ethernet device structure.
10382  * @param[in] handle
10383  *   Handle for the indirect action to be updated.
10384  * @param[in] update
10385  *   Action specification used to modify the action pointed by handle.
10386  *   *update* could be of same type with the action pointed by the *handle*
10387  *   handle argument, or some other structures like a wrapper, depending on
10388  *   the indirect action type.
10389  * @param[out] error
10390  *   Perform verbose error reporting if not NULL. PMDs initialize this
10391  *   structure in case of error only.
10392  *
10393  * @return
10394  *   0 on success, a negative errno value otherwise and rte_errno is set.
10395  */
10396 static int
10397 mlx5_action_handle_update(struct rte_eth_dev *dev,
10398 		struct rte_flow_action_handle *handle,
10399 		const void *update,
10400 		struct rte_flow_error *error)
10401 {
10402 	struct rte_flow_attr attr = { .transfer = 0 };
10403 	const struct mlx5_flow_driver_ops *fops =
10404 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
10405 	int ret;
10406 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
10407 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
10408 
10409 	switch (type) {
10410 	case MLX5_INDIRECT_ACTION_TYPE_CT:
10411 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
10412 		ret = 0;
10413 		break;
10414 	default:
10415 		ret = flow_drv_action_validate(dev, NULL,
10416 				(const struct rte_flow_action *)update,
10417 				fops, error);
10418 	}
10419 	if (ret)
10420 		return ret;
10421 	return flow_drv_action_update(dev, handle, update, fops,
10422 				      error);
10423 }
10424 
10425 /**
10426  * Query the indirect action by handle.
10427  *
10428  * This function allows retrieving action-specific data such as counters.
10429  * Data is gathered by special action which may be present/referenced in
10430  * more than one flow rule definition.
10431  *
10432  * see @RTE_FLOW_ACTION_TYPE_COUNT
10433  *
10434  * @param dev
10435  *   Pointer to Ethernet device structure.
10436  * @param[in] handle
10437  *   Handle for the indirect action to query.
10438  * @param[in, out] data
10439  *   Pointer to storage for the associated query data type.
10440  * @param[out] error
10441  *   Perform verbose error reporting if not NULL. PMDs initialize this
10442  *   structure in case of error only.
10443  *
10444  * @return
10445  *   0 on success, a negative errno value otherwise and rte_errno is set.
10446  */
10447 static int
10448 mlx5_action_handle_query(struct rte_eth_dev *dev,
10449 			 const struct rte_flow_action_handle *handle,
10450 			 void *data,
10451 			 struct rte_flow_error *error)
10452 {
10453 	struct rte_flow_attr attr = { .transfer = 0 };
10454 	const struct mlx5_flow_driver_ops *fops =
10455 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
10456 
10457 	return flow_drv_action_query(dev, handle, data, fops, error);
10458 }
10459 
10460 static int
10461 mlx5_action_handle_query_update(struct rte_eth_dev *dev,
10462 				struct rte_flow_action_handle *handle,
10463 				const void *update, void *query,
10464 				enum rte_flow_query_update_mode qu_mode,
10465 				struct rte_flow_error *error)
10466 {
10467 	struct rte_flow_attr attr = { .transfer = 0 };
10468 	enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr);
10469 	const struct mlx5_flow_driver_ops *fops;
10470 
10471 	if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX)
10472 		return rte_flow_error_set(error, ENOTSUP,
10473 					  RTE_FLOW_ERROR_TYPE_ACTION,
10474 					  NULL, "invalid driver type");
10475 	fops = flow_get_drv_ops(drv_type);
10476 	if (!fops || !fops->action_query_update)
10477 		return rte_flow_error_set(error, ENOTSUP,
10478 					  RTE_FLOW_ERROR_TYPE_ACTION,
10479 					  NULL, "no query_update handler");
10480 	return fops->action_query_update(dev, handle, update,
10481 					 query, qu_mode, error);
10482 }
10483 
10484 
10485 #define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret)                           \
10486 {                                                                              \
10487 	struct rte_flow_attr attr = { .transfer = 0 };                         \
10488 	enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr);    \
10489 	if (drv_type == MLX5_FLOW_TYPE_MIN ||                                  \
10490 	    drv_type == MLX5_FLOW_TYPE_MAX) {                                  \
10491 		rte_flow_error_set(error, ENOTSUP,                             \
10492 				   RTE_FLOW_ERROR_TYPE_ACTION,                 \
10493 				   NULL, "invalid driver type");               \
10494 		return ret;                                                    \
10495 	}                                                                      \
10496 	(fops) = flow_get_drv_ops(drv_type);                                   \
10497 	if (!(fops) || !(fops)->drv_cb) {                                      \
10498 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \
10499 				   NULL, "no action_list handler");            \
10500 		return ret;                                                    \
10501 	}                                                                      \
10502 }
10503 
10504 static struct rte_flow_action_list_handle *
10505 mlx5_action_list_handle_create(struct rte_eth_dev *dev,
10506 			       const struct rte_flow_indir_action_conf *conf,
10507 			       const struct rte_flow_action *actions,
10508 			       struct rte_flow_error *error)
10509 {
10510 	const struct mlx5_flow_driver_ops *fops;
10511 
10512 	MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL);
10513 	return fops->action_list_handle_create(dev, conf, actions, error);
10514 }
10515 
10516 static int
10517 mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
10518 				struct rte_flow_action_list_handle *handle,
10519 				struct rte_flow_error *error)
10520 {
10521 	const struct mlx5_flow_driver_ops *fops;
10522 
10523 	MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP);
10524 	return fops->action_list_handle_destroy(dev, handle, error);
10525 }
10526 
10527 static int
10528 mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
10529 					  const
10530 					  struct rte_flow_action_list_handle *handle,
10531 					  const void **update, void **query,
10532 					  enum rte_flow_query_update_mode mode,
10533 					  struct rte_flow_error *error)
10534 {
10535 	const struct mlx5_flow_driver_ops *fops;
10536 
10537 	MLX5_DRV_FOPS_OR_ERR(dev, fops,
10538 			     action_list_handle_query_update, ENOTSUP);
10539 	return fops->action_list_handle_query_update(dev, handle, update, query,
10540 						     mode, error);
10541 }
10542 static int
10543 mlx5_flow_calc_table_hash(struct rte_eth_dev *dev,
10544 			  const struct rte_flow_template_table *table,
10545 			  const struct rte_flow_item pattern[],
10546 			  uint8_t pattern_template_index,
10547 			  uint32_t *hash, struct rte_flow_error *error)
10548 {
10549 	struct rte_flow_attr attr = { .transfer = 0 };
10550 	enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr);
10551 	const struct mlx5_flow_driver_ops *fops;
10552 
10553 	if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX)
10554 		return rte_flow_error_set(error, ENOTSUP,
10555 					  RTE_FLOW_ERROR_TYPE_ACTION,
10556 					  NULL, "invalid driver type");
10557 	fops = flow_get_drv_ops(drv_type);
10558 	if (!fops || !fops->action_query_update)
10559 		return rte_flow_error_set(error, ENOTSUP,
10560 					  RTE_FLOW_ERROR_TYPE_ACTION,
10561 					  NULL, "no query_update handler");
10562 	return fops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
10563 					  hash, error);
10564 }
10565 
10566 static int
10567 mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev,
10568 			  const struct rte_flow_item pattern[],
10569 			  enum rte_flow_encap_hash_field dest_field,
10570 			  uint8_t *hash,
10571 			  struct rte_flow_error *error)
10572 {
10573 	enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, NULL);
10574 	const struct mlx5_flow_driver_ops *fops;
10575 
10576 	if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX)
10577 		return rte_flow_error_set(error, ENOTSUP,
10578 					  RTE_FLOW_ERROR_TYPE_ACTION,
10579 					  NULL, "invalid driver type");
10580 	fops = flow_get_drv_ops(drv_type);
10581 	if (!fops || !fops->flow_calc_encap_hash)
10582 		return rte_flow_error_set(error, ENOTSUP,
10583 					  RTE_FLOW_ERROR_TYPE_ACTION,
10584 					  NULL, "no calc encap hash handler");
10585 	return fops->flow_calc_encap_hash(dev, pattern, dest_field, hash, error);
10586 }
10587 
10588 static int
10589 mlx5_template_table_resize(struct rte_eth_dev *dev,
10590 			   struct rte_flow_template_table *table,
10591 			   uint32_t nb_rules, struct rte_flow_error *error)
10592 {
10593 	const struct mlx5_flow_driver_ops *fops;
10594 
10595 	MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize, ENOTSUP);
10596 	return fops->table_resize(dev, table, nb_rules, error);
10597 }
10598 
10599 static int
10600 mlx5_table_resize_complete(struct rte_eth_dev *dev,
10601 			   struct rte_flow_template_table *table,
10602 			   struct rte_flow_error *error)
10603 {
10604 	const struct mlx5_flow_driver_ops *fops;
10605 
10606 	MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize_complete, ENOTSUP);
10607 	return fops->table_resize_complete(dev, table, error);
10608 }
10609 
10610 static int
10611 mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue,
10612 			       const struct rte_flow_op_attr *op_attr,
10613 			       struct rte_flow *rule, void *user_data,
10614 			       struct rte_flow_error *error)
10615 {
10616 	const struct mlx5_flow_driver_ops *fops;
10617 
10618 	MLX5_DRV_FOPS_OR_ERR(dev, fops, flow_update_resized, ENOTSUP);
10619 	return fops->flow_update_resized(dev, queue, op_attr, rule, user_data, error);
10620 }
10621 
10622 /**
10623  * Destroy all indirect actions (shared RSS).
10624  *
10625  * @param dev
10626  *   Pointer to Ethernet device.
10627  *
10628  * @return
10629  *   0 on success, a negative errno value otherwise and rte_errno is set.
10630  */
10631 int
10632 mlx5_action_handle_flush(struct rte_eth_dev *dev)
10633 {
10634 	struct rte_flow_error error;
10635 	struct mlx5_priv *priv = dev->data->dev_private;
10636 	struct mlx5_shared_action_rss *shared_rss;
10637 	int ret = 0;
10638 	uint32_t idx;
10639 
10640 	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
10641 		      priv->rss_shared_actions, idx, shared_rss, next) {
10642 		ret |= mlx5_action_handle_destroy(dev,
10643 		       (struct rte_flow_action_handle *)(uintptr_t)idx, &error);
10644 	}
10645 	return ret;
10646 }
10647 
10648 /**
10649  * Validate existing indirect actions against current device configuration
10650  * and attach them to device resources.
10651  *
10652  * @param dev
10653  *   Pointer to Ethernet device.
10654  *
10655  * @return
10656  *   0 on success, a negative errno value otherwise and rte_errno is set.
10657  */
10658 int
10659 mlx5_action_handle_attach(struct rte_eth_dev *dev)
10660 {
10661 	struct mlx5_priv *priv = dev->data->dev_private;
10662 	int ret = 0;
10663 	struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
10664 
10665 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
10666 		const char *message;
10667 		uint32_t queue_idx;
10668 
10669 		ret = mlx5_validate_rss_queues(dev, ind_tbl->queues,
10670 					       ind_tbl->queues_n,
10671 					       &message, &queue_idx);
10672 		if (ret != 0) {
10673 			DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s",
10674 				dev->data->port_id, ind_tbl->queues[queue_idx],
10675 				message);
10676 			break;
10677 		}
10678 	}
10679 	if (ret != 0)
10680 		return ret;
10681 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
10682 		ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
10683 		if (ret != 0) {
10684 			DRV_LOG(ERR, "Port %u could not attach "
10685 				"indirection table obj %p",
10686 				dev->data->port_id, (void *)ind_tbl);
10687 			goto error;
10688 		}
10689 	}
10690 
10691 	return 0;
10692 error:
10693 	ind_tbl_last = ind_tbl;
10694 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
10695 		if (ind_tbl == ind_tbl_last)
10696 			break;
10697 		if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
10698 			DRV_LOG(CRIT, "Port %u could not detach "
10699 				"indirection table obj %p on rollback",
10700 				dev->data->port_id, (void *)ind_tbl);
10701 	}
10702 	return ret;
10703 }
10704 
10705 /**
10706  * Detach indirect actions of the device from its resources.
10707  *
10708  * @param dev
10709  *   Pointer to Ethernet device.
10710  *
10711  * @return
10712  *   0 on success, a negative errno value otherwise and rte_errno is set.
10713  */
10714 int
10715 mlx5_action_handle_detach(struct rte_eth_dev *dev)
10716 {
10717 	struct mlx5_priv *priv = dev->data->dev_private;
10718 	int ret = 0;
10719 	struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
10720 
10721 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
10722 		ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
10723 		if (ret != 0) {
10724 			DRV_LOG(ERR, "Port %u could not detach "
10725 				"indirection table obj %p",
10726 				dev->data->port_id, (void *)ind_tbl);
10727 			goto error;
10728 		}
10729 	}
10730 	return 0;
10731 error:
10732 	ind_tbl_last = ind_tbl;
10733 	LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
10734 		if (ind_tbl == ind_tbl_last)
10735 			break;
10736 		if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
10737 			DRV_LOG(CRIT, "Port %u could not attach "
10738 				"indirection table obj %p on rollback",
10739 				dev->data->port_id, (void *)ind_tbl);
10740 	}
10741 	return ret;
10742 }
10743 
10744 #ifndef HAVE_MLX5DV_DR
10745 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
10746 #else
10747 #define MLX5_DOMAIN_SYNC_FLOW \
10748 	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
10749 #endif
10750 
10751 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
10752 {
10753 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
10754 	const struct mlx5_flow_driver_ops *fops;
10755 	int ret;
10756 	struct rte_flow_attr attr = { .transfer = 0 };
10757 
10758 	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
10759 	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
10760 	if (ret > 0)
10761 		ret = -ret;
10762 	return ret;
10763 }
10764 
10765 const struct mlx5_flow_tunnel *
10766 mlx5_get_tof(const struct rte_flow_item *item,
10767 	     const struct rte_flow_action *action,
10768 	     enum mlx5_tof_rule_type *rule_type)
10769 {
10770 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
10771 		if (item->type == (typeof(item->type))
10772 				  MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {
10773 			*rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
10774 			return flow_items_to_tunnel(item);
10775 		}
10776 	}
10777 	for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {
10778 		if (action->type == (typeof(action->type))
10779 				    MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
10780 			*rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;
10781 			return flow_actions_to_tunnel(action);
10782 		}
10783 	}
10784 	return NULL;
10785 }
10786 
10787 /**
10788  * tunnel offload functionality is defined for DV environment only
10789  */
10790 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
10791 __extension__
10792 union tunnel_offload_mark {
10793 	uint32_t val;
10794 	struct {
10795 		uint32_t app_reserve:8;
10796 		uint32_t table_id:15;
10797 		uint32_t transfer:1;
10798 		uint32_t _unused_:8;
10799 	};
10800 };
10801 
10802 static bool
10803 mlx5_access_tunnel_offload_db
10804 	(struct rte_eth_dev *dev,
10805 	 bool (*match)(struct rte_eth_dev *,
10806 		       struct mlx5_flow_tunnel *, const void *),
10807 	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
10808 	 void (*miss)(struct rte_eth_dev *, void *),
10809 	 void *ctx, bool lock_op);
10810 
10811 static int
10812 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
10813 			     struct rte_flow *flow,
10814 			     const struct rte_flow_attr *attr,
10815 			     const struct rte_flow_action *app_actions,
10816 			     uint32_t flow_idx,
10817 			     const struct mlx5_flow_tunnel *tunnel,
10818 			     struct tunnel_default_miss_ctx *ctx,
10819 			     struct rte_flow_error *error)
10820 {
10821 	struct mlx5_priv *priv = dev->data->dev_private;
10822 	struct mlx5_flow *dev_flow;
10823 	struct rte_flow_attr miss_attr = *attr;
10824 	const struct rte_flow_item miss_items[2] = {
10825 		{
10826 			.type = RTE_FLOW_ITEM_TYPE_ETH,
10827 			.spec = NULL,
10828 			.last = NULL,
10829 			.mask = NULL
10830 		},
10831 		{
10832 			.type = RTE_FLOW_ITEM_TYPE_END,
10833 			.spec = NULL,
10834 			.last = NULL,
10835 			.mask = NULL
10836 		}
10837 	};
10838 	union tunnel_offload_mark mark_id;
10839 	struct rte_flow_action_mark miss_mark;
10840 	struct rte_flow_action miss_actions[3] = {
10841 		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
10842 		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
10843 	};
10844 	const struct rte_flow_action_jump *jump_data;
10845 	uint32_t i, flow_table = 0; /* prevent compilation warning */
10846 	struct flow_grp_info grp_info = {
10847 		.external = 1,
10848 		.transfer = attr->transfer,
10849 		.fdb_def_rule = !!priv->fdb_def_rule,
10850 		.std_tbl_fix = 0,
10851 	};
10852 	int ret;
10853 
10854 	if (!attr->transfer) {
10855 		uint32_t q_size;
10856 
10857 		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
10858 		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
10859 		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
10860 					 0, SOCKET_ID_ANY);
10861 		if (!ctx->queue)
10862 			return rte_flow_error_set
10863 				(error, ENOMEM,
10864 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
10865 				NULL, "invalid default miss RSS");
10866 		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
10867 		ctx->action_rss.level = 0,
10868 		ctx->action_rss.types = priv->rss_conf.rss_hf,
10869 		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
10870 		ctx->action_rss.queue_num = priv->reta_idx_n,
10871 		ctx->action_rss.key = priv->rss_conf.rss_key,
10872 		ctx->action_rss.queue = ctx->queue;
10873 		if (!priv->reta_idx_n || !priv->rxqs_n)
10874 			return rte_flow_error_set
10875 				(error, EINVAL,
10876 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
10877 				NULL, "invalid port configuration");
10878 		if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
10879 			ctx->action_rss.types = 0;
10880 		for (i = 0; i != priv->reta_idx_n; ++i)
10881 			ctx->queue[i] = (*priv->reta_idx)[i];
10882 	} else {
10883 		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
10884 		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
10885 	}
10886 	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
10887 	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
10888 	jump_data = app_actions->conf;
10889 	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
10890 	miss_attr.group = jump_data->group;
10891 	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
10892 				       &flow_table, &grp_info, error);
10893 	if (ret)
10894 		return rte_flow_error_set(error, EINVAL,
10895 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
10896 					  NULL, "invalid tunnel id");
10897 	mark_id.app_reserve = 0;
10898 	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
10899 	mark_id.transfer = !!attr->transfer;
10900 	mark_id._unused_ = 0;
10901 	miss_mark.id = mark_id.val;
10902 	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
10903 				    miss_items, miss_actions, flow_idx, error);
10904 	if (!dev_flow)
10905 		return -rte_errno;
10906 	dev_flow->flow = flow;
10907 	dev_flow->external = true;
10908 	dev_flow->tunnel = tunnel;
10909 	dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;
10910 	/* Subflow object was created, we must include one in the list. */
10911 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
10912 		      dev_flow->handle, next);
10913 	DRV_LOG(DEBUG,
10914 		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
10915 		dev->data->port_id, tunnel->app_tunnel.type,
10916 		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
10917 	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
10918 				  miss_actions, error);
10919 	if (!ret)
10920 		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
10921 						  error);
10922 
10923 	return ret;
10924 }
10925 
10926 static const struct mlx5_flow_tbl_data_entry  *
10927 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
10928 {
10929 	struct mlx5_priv *priv = dev->data->dev_private;
10930 	struct mlx5_dev_ctx_shared *sh = priv->sh;
10931 	struct mlx5_list_entry *he;
10932 	union tunnel_offload_mark mbits = { .val = mark };
10933 	union mlx5_flow_tbl_key table_key = {
10934 		{
10935 			.level = tunnel_id_to_flow_tbl(mbits.table_id),
10936 			.id = 0,
10937 			.reserved = 0,
10938 			.dummy = 0,
10939 			.is_fdb = !!mbits.transfer,
10940 			.is_egress = 0,
10941 		}
10942 	};
10943 	struct mlx5_flow_cb_ctx ctx = {
10944 		.data = &table_key.v64,
10945 	};
10946 
10947 	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx);
10948 	return he ?
10949 	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
10950 }
10951 
10952 static void
10953 mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx,
10954 				   struct mlx5_list_entry *entry)
10955 {
10956 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
10957 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
10958 
10959 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
10960 			tunnel_flow_tbl_to_id(tte->flow_table));
10961 	mlx5_free(tte);
10962 }
10963 
10964 static int
10965 mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused,
10966 				  struct mlx5_list_entry *entry, void *cb_ctx)
10967 {
10968 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10969 	union tunnel_tbl_key tbl = {
10970 		.val = *(uint64_t *)(ctx->data),
10971 	};
10972 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
10973 
10974 	return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
10975 }
10976 
10977 static struct mlx5_list_entry *
10978 mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx)
10979 {
10980 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
10981 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10982 	struct tunnel_tbl_entry *tte;
10983 	union tunnel_tbl_key tbl = {
10984 		.val = *(uint64_t *)(ctx->data),
10985 	};
10986 
10987 	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
10988 			  sizeof(*tte), 0,
10989 			  SOCKET_ID_ANY);
10990 	if (!tte)
10991 		goto err;
10992 	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
10993 			  &tte->flow_table);
10994 	if (tte->flow_table >= MLX5_MAX_TABLES) {
10995 		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
10996 			tte->flow_table);
10997 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
10998 				tte->flow_table);
10999 		goto err;
11000 	} else if (!tte->flow_table) {
11001 		goto err;
11002 	}
11003 	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
11004 	tte->tunnel_id = tbl.tunnel_id;
11005 	tte->group = tbl.group;
11006 	return &tte->hash;
11007 err:
11008 	if (tte)
11009 		mlx5_free(tte);
11010 	return NULL;
11011 }
11012 
11013 static struct mlx5_list_entry *
11014 mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused,
11015 				  struct mlx5_list_entry *oentry,
11016 				  void *cb_ctx __rte_unused)
11017 {
11018 	struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte),
11019 						   0, SOCKET_ID_ANY);
11020 
11021 	if (!tte)
11022 		return NULL;
11023 	memcpy(tte, oentry, sizeof(*tte));
11024 	return &tte->hash;
11025 }
11026 
11027 static void
11028 mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused,
11029 				       struct mlx5_list_entry *entry)
11030 {
11031 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
11032 
11033 	mlx5_free(tte);
11034 }
11035 
11036 static uint32_t
11037 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
11038 				const struct mlx5_flow_tunnel *tunnel,
11039 				uint32_t group, uint32_t *table,
11040 				struct rte_flow_error *error)
11041 {
11042 	struct mlx5_list_entry *he;
11043 	struct tunnel_tbl_entry *tte;
11044 	union tunnel_tbl_key key = {
11045 		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
11046 		.group = group
11047 	};
11048 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
11049 	struct mlx5_hlist *group_hash;
11050 	struct mlx5_flow_cb_ctx ctx = {
11051 		.data = &key.val,
11052 	};
11053 
11054 	group_hash = tunnel ? tunnel->groups : thub->groups;
11055 	he = mlx5_hlist_register(group_hash, key.val, &ctx);
11056 	if (!he)
11057 		return rte_flow_error_set(error, EINVAL,
11058 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
11059 					  NULL,
11060 					  "tunnel group index not supported");
11061 	tte = container_of(he, typeof(*tte), hash);
11062 	*table = tte->flow_table;
11063 	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
11064 		dev->data->port_id, key.tunnel_id, group, *table);
11065 	return 0;
11066 }
11067 
11068 static void
11069 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
11070 		      struct mlx5_flow_tunnel *tunnel)
11071 {
11072 	struct mlx5_priv *priv = dev->data->dev_private;
11073 	struct mlx5_indexed_pool *ipool;
11074 
11075 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
11076 		dev->data->port_id, tunnel->tunnel_id);
11077 	LIST_REMOVE(tunnel, chain);
11078 	mlx5_hlist_destroy(tunnel->groups);
11079 	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
11080 	mlx5_ipool_free(ipool, tunnel->tunnel_id);
11081 }
11082 
11083 static bool
11084 mlx5_access_tunnel_offload_db
11085 	(struct rte_eth_dev *dev,
11086 	 bool (*match)(struct rte_eth_dev *,
11087 		       struct mlx5_flow_tunnel *, const void *),
11088 	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
11089 	 void (*miss)(struct rte_eth_dev *, void *),
11090 	 void *ctx, bool lock_op)
11091 {
11092 	bool verdict = false;
11093 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
11094 	struct mlx5_flow_tunnel *tunnel;
11095 
11096 	rte_spinlock_lock(&thub->sl);
11097 	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
11098 		verdict = match(dev, tunnel, (const void *)ctx);
11099 		if (verdict)
11100 			break;
11101 	}
11102 	if (!lock_op)
11103 		rte_spinlock_unlock(&thub->sl);
11104 	if (verdict && hit)
11105 		hit(dev, tunnel, ctx);
11106 	if (!verdict && miss)
11107 		miss(dev, ctx);
11108 	if (lock_op)
11109 		rte_spinlock_unlock(&thub->sl);
11110 
11111 	return verdict;
11112 }
11113 
11114 struct tunnel_db_find_tunnel_id_ctx {
11115 	uint32_t tunnel_id;
11116 	struct mlx5_flow_tunnel *tunnel;
11117 };
11118 
11119 static bool
11120 find_tunnel_id_match(struct rte_eth_dev *dev,
11121 		     struct mlx5_flow_tunnel *tunnel, const void *x)
11122 {
11123 	const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
11124 
11125 	RTE_SET_USED(dev);
11126 	return tunnel->tunnel_id == ctx->tunnel_id;
11127 }
11128 
11129 static void
11130 find_tunnel_id_hit(struct rte_eth_dev *dev,
11131 		   struct mlx5_flow_tunnel *tunnel, void *x)
11132 {
11133 	struct tunnel_db_find_tunnel_id_ctx *ctx = x;
11134 	RTE_SET_USED(dev);
11135 	ctx->tunnel = tunnel;
11136 }
11137 
11138 static struct mlx5_flow_tunnel *
11139 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
11140 {
11141 	struct tunnel_db_find_tunnel_id_ctx ctx = {
11142 		.tunnel_id = id,
11143 	};
11144 
11145 	mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
11146 				      find_tunnel_id_hit, NULL, &ctx, true);
11147 
11148 	return ctx.tunnel;
11149 }
11150 
11151 static struct mlx5_flow_tunnel *
11152 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
11153 			  const struct rte_flow_tunnel *app_tunnel)
11154 {
11155 	struct mlx5_priv *priv = dev->data->dev_private;
11156 	struct mlx5_indexed_pool *ipool;
11157 	struct mlx5_flow_tunnel *tunnel;
11158 	uint32_t id;
11159 
11160 	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
11161 	tunnel = mlx5_ipool_zmalloc(ipool, &id);
11162 	if (!tunnel)
11163 		return NULL;
11164 	if (id >= MLX5_MAX_TUNNELS) {
11165 		mlx5_ipool_free(ipool, id);
11166 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
11167 		return NULL;
11168 	}
11169 	tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true,
11170 					   priv->sh,
11171 					   mlx5_flow_tunnel_grp2tbl_create_cb,
11172 					   mlx5_flow_tunnel_grp2tbl_match_cb,
11173 					   mlx5_flow_tunnel_grp2tbl_remove_cb,
11174 					   mlx5_flow_tunnel_grp2tbl_clone_cb,
11175 					mlx5_flow_tunnel_grp2tbl_clone_free_cb);
11176 	if (!tunnel->groups) {
11177 		mlx5_ipool_free(ipool, id);
11178 		return NULL;
11179 	}
11180 	/* initiate new PMD tunnel */
11181 	memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
11182 	tunnel->tunnel_id = id;
11183 	tunnel->action.type = (typeof(tunnel->action.type))
11184 			      MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
11185 	tunnel->action.conf = tunnel;
11186 	tunnel->item.type = (typeof(tunnel->item.type))
11187 			    MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
11188 	tunnel->item.spec = tunnel;
11189 	tunnel->item.last = NULL;
11190 	tunnel->item.mask = NULL;
11191 
11192 	DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
11193 		dev->data->port_id, tunnel->tunnel_id);
11194 
11195 	return tunnel;
11196 }
11197 
11198 struct tunnel_db_get_tunnel_ctx {
11199 	const struct rte_flow_tunnel *app_tunnel;
11200 	struct mlx5_flow_tunnel *tunnel;
11201 };
11202 
11203 static bool get_tunnel_match(struct rte_eth_dev *dev,
11204 			     struct mlx5_flow_tunnel *tunnel, const void *x)
11205 {
11206 	const struct tunnel_db_get_tunnel_ctx *ctx = x;
11207 
11208 	RTE_SET_USED(dev);
11209 	return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
11210 		       sizeof(*ctx->app_tunnel));
11211 }
11212 
11213 static void get_tunnel_hit(struct rte_eth_dev *dev,
11214 			   struct mlx5_flow_tunnel *tunnel, void *x)
11215 {
11216 	/* called under tunnel spinlock protection */
11217 	struct tunnel_db_get_tunnel_ctx *ctx = x;
11218 
11219 	RTE_SET_USED(dev);
11220 	tunnel->refctn++;
11221 	ctx->tunnel = tunnel;
11222 }
11223 
11224 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
11225 {
11226 	/* called under tunnel spinlock protection */
11227 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
11228 	struct tunnel_db_get_tunnel_ctx *ctx = x;
11229 
11230 	rte_spinlock_unlock(&thub->sl);
11231 	ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
11232 	rte_spinlock_lock(&thub->sl);
11233 	if (ctx->tunnel) {
11234 		ctx->tunnel->refctn = 1;
11235 		LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
11236 	}
11237 }
11238 
11239 
11240 static int
11241 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
11242 		     const struct rte_flow_tunnel *app_tunnel,
11243 		     struct mlx5_flow_tunnel **tunnel)
11244 {
11245 	struct tunnel_db_get_tunnel_ctx ctx = {
11246 		.app_tunnel = app_tunnel,
11247 	};
11248 
11249 	mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
11250 				      get_tunnel_miss, &ctx, true);
11251 	*tunnel = ctx.tunnel;
11252 	return ctx.tunnel ? 0 : -ENOMEM;
11253 }
11254 
11255 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
11256 {
11257 	struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
11258 
11259 	if (!thub)
11260 		return;
11261 	if (!LIST_EMPTY(&thub->tunnels))
11262 		DRV_LOG(WARNING, "port %u tunnels present", port_id);
11263 	mlx5_hlist_destroy(thub->groups);
11264 	mlx5_free(thub);
11265 }
11266 
11267 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
11268 {
11269 	int err;
11270 	struct mlx5_flow_tunnel_hub *thub;
11271 
11272 	thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
11273 			   0, SOCKET_ID_ANY);
11274 	if (!thub)
11275 		return -ENOMEM;
11276 	LIST_INIT(&thub->tunnels);
11277 	rte_spinlock_init(&thub->sl);
11278 	thub->groups = mlx5_hlist_create("flow groups", 64,
11279 					 false, true, sh,
11280 					 mlx5_flow_tunnel_grp2tbl_create_cb,
11281 					 mlx5_flow_tunnel_grp2tbl_match_cb,
11282 					 mlx5_flow_tunnel_grp2tbl_remove_cb,
11283 					 mlx5_flow_tunnel_grp2tbl_clone_cb,
11284 					mlx5_flow_tunnel_grp2tbl_clone_free_cb);
11285 	if (!thub->groups) {
11286 		err = -rte_errno;
11287 		goto err;
11288 	}
11289 	sh->tunnel_hub = thub;
11290 
11291 	return 0;
11292 
11293 err:
11294 	if (thub->groups)
11295 		mlx5_hlist_destroy(thub->groups);
11296 	if (thub)
11297 		mlx5_free(thub);
11298 	return err;
11299 }
11300 
11301 static inline int
11302 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
11303 			  struct rte_flow_tunnel *tunnel,
11304 			  struct rte_flow_error *error)
11305 {
11306 	struct mlx5_priv *priv = dev->data->dev_private;
11307 
11308 	if (!priv->sh->config.dv_flow_en)
11309 		return rte_flow_error_set(error, ENOTSUP,
11310 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
11311 					  "flow DV interface is off");
11312 	if (!is_tunnel_offload_active(dev))
11313 		return rte_flow_error_set(error, ENOTSUP,
11314 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
11315 					  "tunnel offload was not activated, consider setting dv_xmeta_en=3");
11316 	if (!tunnel)
11317 		return rte_flow_error_set(error, EINVAL,
11318 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
11319 					  "no application tunnel");
11320 	switch (tunnel->type) {
11321 	default:
11322 		return rte_flow_error_set(error, EINVAL,
11323 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
11324 					  "unsupported tunnel type");
11325 	case RTE_FLOW_ITEM_TYPE_VXLAN:
11326 	case RTE_FLOW_ITEM_TYPE_GRE:
11327 	case RTE_FLOW_ITEM_TYPE_NVGRE:
11328 	case RTE_FLOW_ITEM_TYPE_GENEVE:
11329 		break;
11330 	}
11331 	return 0;
11332 }
11333 
11334 static int
11335 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
11336 		    struct rte_flow_tunnel *app_tunnel,
11337 		    struct rte_flow_action **actions,
11338 		    uint32_t *num_of_actions,
11339 		    struct rte_flow_error *error)
11340 {
11341 	struct mlx5_flow_tunnel *tunnel;
11342 	int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
11343 
11344 	if (ret)
11345 		return ret;
11346 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
11347 	if (ret < 0) {
11348 		return rte_flow_error_set(error, ret,
11349 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
11350 					  "failed to initialize pmd tunnel");
11351 	}
11352 	*actions = &tunnel->action;
11353 	*num_of_actions = 1;
11354 	return 0;
11355 }
11356 
11357 static int
11358 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
11359 		       struct rte_flow_tunnel *app_tunnel,
11360 		       struct rte_flow_item **items,
11361 		       uint32_t *num_of_items,
11362 		       struct rte_flow_error *error)
11363 {
11364 	struct mlx5_flow_tunnel *tunnel;
11365 	int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
11366 
11367 	if (ret)
11368 		return ret;
11369 	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
11370 	if (ret < 0) {
11371 		return rte_flow_error_set(error, ret,
11372 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
11373 					  "failed to initialize pmd tunnel");
11374 	}
11375 	*items = &tunnel->item;
11376 	*num_of_items = 1;
11377 	return 0;
11378 }
11379 
11380 struct tunnel_db_element_release_ctx {
11381 	struct rte_flow_item *items;
11382 	struct rte_flow_action *actions;
11383 	uint32_t num_elements;
11384 	struct rte_flow_error *error;
11385 	int ret;
11386 };
11387 
11388 static bool
11389 tunnel_element_release_match(struct rte_eth_dev *dev,
11390 			     struct mlx5_flow_tunnel *tunnel, const void *x)
11391 {
11392 	const struct tunnel_db_element_release_ctx *ctx = x;
11393 
11394 	RTE_SET_USED(dev);
11395 	if (ctx->num_elements != 1)
11396 		return false;
11397 	else if (ctx->items)
11398 		return ctx->items == &tunnel->item;
11399 	else if (ctx->actions)
11400 		return ctx->actions == &tunnel->action;
11401 
11402 	return false;
11403 }
11404 
11405 static void
11406 tunnel_element_release_hit(struct rte_eth_dev *dev,
11407 			   struct mlx5_flow_tunnel *tunnel, void *x)
11408 {
11409 	struct tunnel_db_element_release_ctx *ctx = x;
11410 	ctx->ret = 0;
11411 	if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
11412 		mlx5_flow_tunnel_free(dev, tunnel);
11413 }
11414 
11415 static void
11416 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
11417 {
11418 	struct tunnel_db_element_release_ctx *ctx = x;
11419 	RTE_SET_USED(dev);
11420 	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
11421 				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
11422 				      "invalid argument");
11423 }
11424 
11425 static int
11426 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
11427 		       struct rte_flow_item *pmd_items,
11428 		       uint32_t num_items, struct rte_flow_error *err)
11429 {
11430 	struct tunnel_db_element_release_ctx ctx = {
11431 		.items = pmd_items,
11432 		.actions = NULL,
11433 		.num_elements = num_items,
11434 		.error = err,
11435 	};
11436 
11437 	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
11438 				      tunnel_element_release_hit,
11439 				      tunnel_element_release_miss, &ctx, false);
11440 
11441 	return ctx.ret;
11442 }
11443 
11444 static int
11445 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
11446 			 struct rte_flow_action *pmd_actions,
11447 			 uint32_t num_actions, struct rte_flow_error *err)
11448 {
11449 	struct tunnel_db_element_release_ctx ctx = {
11450 		.items = NULL,
11451 		.actions = pmd_actions,
11452 		.num_elements = num_actions,
11453 		.error = err,
11454 	};
11455 
11456 	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
11457 				      tunnel_element_release_hit,
11458 				      tunnel_element_release_miss, &ctx, false);
11459 
11460 	return ctx.ret;
11461 }
11462 
11463 static int
11464 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
11465 				  struct rte_mbuf *m,
11466 				  struct rte_flow_restore_info *info,
11467 				  struct rte_flow_error *err)
11468 {
11469 	uint64_t ol_flags = m->ol_flags;
11470 	const struct mlx5_flow_tbl_data_entry *tble;
11471 	const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
11472 	struct mlx5_priv *priv = dev->data->dev_private;
11473 
11474 	if (priv->tunnel_enabled == 0)
11475 		goto err;
11476 	if ((ol_flags & mask) != mask)
11477 		goto err;
11478 	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
11479 	if (!tble) {
11480 		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
11481 			dev->data->port_id, m->hash.fdir.hi);
11482 		goto err;
11483 	}
11484 	MLX5_ASSERT(tble->tunnel);
11485 	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
11486 	info->group_id = tble->group_id;
11487 	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
11488 		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
11489 		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
11490 
11491 	return 0;
11492 
11493 err:
11494 	return rte_flow_error_set(err, EINVAL,
11495 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11496 				  "failed to get restore info");
11497 }
11498 
11499 #else /* HAVE_IBV_FLOW_DV_SUPPORT */
11500 static int
11501 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
11502 			   __rte_unused struct rte_flow_tunnel *app_tunnel,
11503 			   __rte_unused struct rte_flow_action **actions,
11504 			   __rte_unused uint32_t *num_of_actions,
11505 			   __rte_unused struct rte_flow_error *error)
11506 {
11507 	return -ENOTSUP;
11508 }
11509 
11510 static int
11511 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
11512 		       __rte_unused struct rte_flow_tunnel *app_tunnel,
11513 		       __rte_unused struct rte_flow_item **items,
11514 		       __rte_unused uint32_t *num_of_items,
11515 		       __rte_unused struct rte_flow_error *error)
11516 {
11517 	return -ENOTSUP;
11518 }
11519 
11520 static int
11521 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
11522 			      __rte_unused struct rte_flow_item *pmd_items,
11523 			      __rte_unused uint32_t num_items,
11524 			      __rte_unused struct rte_flow_error *err)
11525 {
11526 	return -ENOTSUP;
11527 }
11528 
11529 static int
11530 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
11531 				__rte_unused struct rte_flow_action *pmd_action,
11532 				__rte_unused uint32_t num_actions,
11533 				__rte_unused struct rte_flow_error *err)
11534 {
11535 	return -ENOTSUP;
11536 }
11537 
11538 static int
11539 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
11540 				  __rte_unused struct rte_mbuf *m,
11541 				  __rte_unused struct rte_flow_restore_info *i,
11542 				  __rte_unused struct rte_flow_error *err)
11543 {
11544 	return -ENOTSUP;
11545 }
11546 
11547 static int
11548 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
11549 			     __rte_unused struct rte_flow *flow,
11550 			     __rte_unused const struct rte_flow_attr *attr,
11551 			     __rte_unused const struct rte_flow_action *actions,
11552 			     __rte_unused uint32_t flow_idx,
11553 			     __rte_unused const struct mlx5_flow_tunnel *tunnel,
11554 			     __rte_unused struct tunnel_default_miss_ctx *ctx,
11555 			     __rte_unused struct rte_flow_error *error)
11556 {
11557 	return -ENOTSUP;
11558 }
11559 
11560 static struct mlx5_flow_tunnel *
11561 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
11562 		    __rte_unused uint32_t id)
11563 {
11564 	return NULL;
11565 }
11566 
11567 static void
11568 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
11569 		      __rte_unused struct mlx5_flow_tunnel *tunnel)
11570 {
11571 }
11572 
11573 static uint32_t
11574 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
11575 				__rte_unused const struct mlx5_flow_tunnel *t,
11576 				__rte_unused uint32_t group,
11577 				__rte_unused uint32_t *table,
11578 				struct rte_flow_error *error)
11579 {
11580 	return rte_flow_error_set(error, ENOTSUP,
11581 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11582 				  "tunnel offload requires DV support");
11583 }
11584 
11585 void
11586 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
11587 			__rte_unused  uint16_t port_id)
11588 {
11589 }
11590 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
11591 
11592 /* Flex flow item API */
11593 static struct rte_flow_item_flex_handle *
11594 mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
11595 			   const struct rte_flow_item_flex_conf *conf,
11596 			   struct rte_flow_error *error)
11597 {
11598 	static const char err_msg[] = "flex item creation unsupported";
11599 	struct mlx5_priv *priv = dev->data->dev_private;
11600 	struct rte_flow_attr attr = { .transfer = 0 };
11601 	const struct mlx5_flow_driver_ops *fops =
11602 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
11603 
11604 	if (!priv->pci_dev) {
11605 		rte_flow_error_set(error, ENOTSUP,
11606 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11607 				   "create flex item on PF only");
11608 		return NULL;
11609 	}
11610 	switch (priv->pci_dev->id.device_id) {
11611 	case PCI_DEVICE_ID_MELLANOX_BLUEFIELD2:
11612 	case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3:
11613 		break;
11614 	default:
11615 		rte_flow_error_set(error, ENOTSUP,
11616 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11617 				   "flex item available on BlueField ports only");
11618 		return NULL;
11619 	}
11620 	if (!fops->item_create) {
11621 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
11622 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11623 				   NULL, err_msg);
11624 		return NULL;
11625 	}
11626 	return fops->item_create(dev, conf, error);
11627 }
11628 
11629 static int
11630 mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
11631 			    const struct rte_flow_item_flex_handle *handle,
11632 			    struct rte_flow_error *error)
11633 {
11634 	static const char err_msg[] = "flex item release unsupported";
11635 	struct rte_flow_attr attr = { .transfer = 0 };
11636 	const struct mlx5_flow_driver_ops *fops =
11637 			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
11638 
11639 	if (!fops->item_release) {
11640 		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
11641 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11642 				   NULL, err_msg);
11643 		return -rte_errno;
11644 	}
11645 	return fops->item_release(dev, handle, error);
11646 }
11647 
11648 static void
11649 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
11650 {
11651 	int ret;
11652 	struct rte_flow_error error;
11653 
11654 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
11655 		char *item_name;
11656 		ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name,
11657 				    sizeof(item_name),
11658 				    (void *)(uintptr_t)item->type, &error);
11659 		if (ret > 0)
11660 			printf("%s ", item_name);
11661 		else
11662 			printf("%d\n", (int)item->type);
11663 	}
11664 	printf("END\n");
11665 }
11666 
11667 static int
11668 mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item)
11669 {
11670 	const struct rte_flow_item_udp *spec = udp_item->spec;
11671 	const struct rte_flow_item_udp *mask = udp_item->mask;
11672 	uint16_t udp_dport = 0;
11673 
11674 	if (spec != NULL) {
11675 		if (!mask)
11676 			mask = &rte_flow_item_udp_mask;
11677 		udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port &
11678 				mask->hdr.dst_port);
11679 	}
11680 	return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN);
11681 }
11682 
11683 static const struct mlx5_flow_expand_node *
11684 mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
11685 		unsigned int item_idx,
11686 		const struct mlx5_flow_expand_node graph[],
11687 		const struct mlx5_flow_expand_node *node)
11688 {
11689 	const struct rte_flow_item *item = pattern + item_idx, *prev_item;
11690 
11691 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN &&
11692 			node != NULL &&
11693 			node->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
11694 		/*
11695 		 * The expansion node is VXLAN and it is also the last
11696 		 * expandable item in the pattern, so need to continue
11697 		 * expansion of the inner tunnel.
11698 		 */
11699 		MLX5_ASSERT(item_idx > 0);
11700 		prev_item = pattern + item_idx - 1;
11701 		MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
11702 		if (mlx5_flow_is_std_vxlan_port(prev_item))
11703 			return &graph[MLX5_EXPANSION_STD_VXLAN];
11704 		return &graph[MLX5_EXPANSION_L3_VXLAN];
11705 	}
11706 	return node;
11707 }
11708 
11709 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
11710 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
11711 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
11712 };
11713 
11714 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
11715 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
11716 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
11717 	{ 9, 10, 11 }, { 12, 13, 14 },
11718 };
11719 
11720 /**
11721  * Discover the number of available flow priorities.
11722  *
11723  * @param dev
11724  *   Ethernet device.
11725  *
11726  * @return
11727  *   On success, number of available flow priorities.
11728  *   On failure, a negative errno-style code and rte_errno is set.
11729  */
11730 int
11731 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
11732 {
11733 	static const uint16_t vprio[] = {8, 16};
11734 	const struct mlx5_priv *priv = dev->data->dev_private;
11735 	const struct mlx5_flow_driver_ops *fops;
11736 	enum mlx5_flow_drv_type type;
11737 	int ret;
11738 
11739 	type = mlx5_flow_os_get_type();
11740 	if (type == MLX5_FLOW_TYPE_MAX) {
11741 		type = MLX5_FLOW_TYPE_VERBS;
11742 		if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
11743 			type = MLX5_FLOW_TYPE_DV;
11744 	}
11745 	fops = flow_get_drv_ops(type);
11746 	if (fops->discover_priorities == NULL) {
11747 		DRV_LOG(ERR, "Priority discovery not supported");
11748 		rte_errno = ENOTSUP;
11749 		return -rte_errno;
11750 	}
11751 	ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio));
11752 	if (ret < 0)
11753 		return ret;
11754 	switch (ret) {
11755 	case 8:
11756 		ret = RTE_DIM(priority_map_3);
11757 		break;
11758 	case 16:
11759 		ret = RTE_DIM(priority_map_5);
11760 		break;
11761 	default:
11762 		rte_errno = ENOTSUP;
11763 		DRV_LOG(ERR,
11764 			"port %u maximum priority: %d expected 8/16",
11765 			dev->data->port_id, ret);
11766 		return -rte_errno;
11767 	}
11768 	DRV_LOG(INFO, "port %u supported flow priorities:"
11769 		" 0-%d for ingress or egress root table,"
11770 		" 0-%d for non-root table or transfer root table.",
11771 		dev->data->port_id, ret - 2,
11772 		MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
11773 	return ret;
11774 }
11775 
11776 /**
11777  * Adjust flow priority based on the highest layer and the request priority.
11778  *
11779  * @param[in] dev
11780  *   Pointer to the Ethernet device structure.
11781  * @param[in] priority
11782  *   The rule base priority.
11783  * @param[in] subpriority
11784  *   The priority based on the items.
11785  *
11786  * @return
11787  *   The new priority.
11788  */
11789 uint32_t
11790 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
11791 			  uint32_t subpriority)
11792 {
11793 	uint32_t res = 0;
11794 	struct mlx5_priv *priv = dev->data->dev_private;
11795 
11796 	switch (priv->sh->flow_max_priority) {
11797 	case RTE_DIM(priority_map_3):
11798 		res = priority_map_3[priority][subpriority];
11799 		break;
11800 	case RTE_DIM(priority_map_5):
11801 		res = priority_map_5[priority][subpriority];
11802 		break;
11803 	}
11804 	return  res;
11805 }
11806 
11807 /**
11808  * Get the priority for sending traffic to kernel table.
11809  *
11810  * @param[in] dev
11811  *   Pointer to the Ethernet device structure.
11812  *
11813  * @return
11814  *   On success: the value of priority for sending traffic to kernel table
11815  *   On failure: -1
11816  */
11817 uint32_t
11818 mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev)
11819 {
11820 	struct mlx5_priv *priv = dev->data->dev_private;
11821 	uint32_t res;
11822 
11823 	switch (priv->sh->flow_max_priority) {
11824 	case RTE_DIM(priority_map_5):
11825 		res = 15;
11826 		break;
11827 	case RTE_DIM(priority_map_3):
11828 		res = 7;
11829 		break;
11830 	default:
11831 		DRV_LOG(ERR,
11832 			"port %u maximum priority: %d expected 8/16",
11833 			dev->data->port_id, priv->sh->flow_max_priority);
11834 		res = (uint32_t)-1;
11835 	}
11836 	return res;
11837 }
11838 
11839 /**
11840  * Get the E-Switch Manager vport id.
11841  *
11842  * @param[in] dev
11843  *   Pointer to the Ethernet device structure.
11844  *
11845  * @return
11846  *   The vport id.
11847  */
11848 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev)
11849 {
11850 	struct mlx5_priv *priv = dev->data->dev_private;
11851 	struct mlx5_common_device *cdev = priv->sh->cdev;
11852 
11853 	/* New FW exposes E-Switch Manager vport ID, can use it directly. */
11854 	if (cdev->config.hca_attr.esw_mgr_vport_id_valid)
11855 		return (int16_t)cdev->config.hca_attr.esw_mgr_vport_id;
11856 
11857 	if (priv->pci_dev == NULL)
11858 		return 0;
11859 	switch (priv->pci_dev->id.device_id) {
11860 	case PCI_DEVICE_ID_MELLANOX_BLUEFIELD:
11861 	case PCI_DEVICE_ID_MELLANOX_BLUEFIELD2:
11862 	case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3:
11863 	/*
11864 	 * In old FW which doesn't expose the E-Switch Manager vport ID in the capability,
11865 	 * only the BF embedded CPUs control the E-Switch Manager port. Hence,
11866 	 * ECPF vport ID is selected and not the host port (0) in any BF case.
11867 	 */
11868 		return (int16_t)MLX5_ECPF_VPORT_ID;
11869 	default:
11870 		return MLX5_PF_VPORT_ID;
11871 	}
11872 }
11873 
11874 /**
11875  * Parse item to get the vport id.
11876  *
11877  * @param[in] dev
11878  *   Pointer to the Ethernet device structure.
11879  * @param[in] item
11880  *   The src port id match item.
11881  * @param[out] vport_id
11882  *   Pointer to put the vport id.
11883  * @param[out] all_ports
11884  *   Indicate if the item matches all ports.
11885  * @param[out] error
11886  *   Pointer to error structure.
11887  *
11888  * @return
11889  *   0 on success, a negative errno value otherwise and rte_errno is set.
11890  */
11891 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
11892 				const struct rte_flow_item *item,
11893 				uint16_t *vport_id,
11894 				bool *all_ports,
11895 				struct rte_flow_error *error)
11896 {
11897 	struct mlx5_priv *port_priv;
11898 	const struct rte_flow_item_port_id *pid_v = NULL;
11899 	const struct rte_flow_item_ethdev *dev_v = NULL;
11900 	uint32_t esw_mgr_port;
11901 	uint32_t src_port;
11902 
11903 	if (all_ports)
11904 		*all_ports = false;
11905 	switch (item->type) {
11906 	case RTE_FLOW_ITEM_TYPE_PORT_ID:
11907 		pid_v = item->spec;
11908 		if (!pid_v)
11909 			return 0;
11910 		src_port = pid_v->id;
11911 		esw_mgr_port = MLX5_PORT_ESW_MGR;
11912 		break;
11913 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
11914 		dev_v = item->spec;
11915 		if (!dev_v) {
11916 			if (all_ports)
11917 				*all_ports = true;
11918 			return 0;
11919 		}
11920 		src_port = dev_v->port_id;
11921 		esw_mgr_port = MLX5_REPRESENTED_PORT_ESW_MGR;
11922 		break;
11923 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
11924 		src_port = MLX5_REPRESENTED_PORT_ESW_MGR;
11925 		esw_mgr_port = MLX5_REPRESENTED_PORT_ESW_MGR;
11926 		break;
11927 	default:
11928 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
11929 					  NULL, "Incorrect item type.");
11930 	}
11931 	if (src_port == esw_mgr_port) {
11932 		*vport_id = mlx5_flow_get_esw_manager_vport_id(dev);
11933 	} else {
11934 		port_priv = mlx5_port_to_eswitch_info(src_port, false);
11935 		if (!port_priv)
11936 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
11937 						  NULL, "Failed to get port info.");
11938 		*vport_id = port_priv->representor_id;
11939 	}
11940 
11941 	return 0;
11942 }
11943 
11944 int
11945 mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
11946 			      uint16_t *proxy_port_id,
11947 			      struct rte_flow_error *error)
11948 {
11949 	const struct mlx5_priv *priv = dev->data->dev_private;
11950 	uint16_t port_id;
11951 
11952 	if (!priv->sh->config.dv_esw_en)
11953 		return rte_flow_error_set(error, EINVAL,
11954 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11955 					  NULL,
11956 					  "unable to provide a proxy port"
11957 					  " without E-Switch configured");
11958 	if (!priv->master && !priv->representor)
11959 		return rte_flow_error_set(error, EINVAL,
11960 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11961 					  NULL,
11962 					  "unable to provide a proxy port"
11963 					  " for port which is not a master"
11964 					  " or a representor port");
11965 	if (priv->master) {
11966 		*proxy_port_id = dev->data->port_id;
11967 		return 0;
11968 	}
11969 	MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
11970 		const struct rte_eth_dev *port_dev = &rte_eth_devices[port_id];
11971 		const struct mlx5_priv *port_priv = port_dev->data->dev_private;
11972 
11973 		if (port_priv->master &&
11974 		    port_priv->domain_id == priv->domain_id) {
11975 			*proxy_port_id = port_id;
11976 			return 0;
11977 		}
11978 	}
11979 	return rte_flow_error_set(error, ENODEV,
11980 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11981 				  NULL, "unable to find a proxy port");
11982 }
11983 
11984 /**
11985  * Discover IPv6 traffic class ID support in rdma-core and firmware.
11986  *
11987  * @param dev
11988  *   Ethernet device.
11989  *
11990  * @return
11991  *   0, rdma-core is good to work with firmware.
11992  *   -EOPNOTSUPP, rdma-core could not work with new IPv6 TC ID.
11993  */
11994 int
11995 mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev)
11996 {
11997 	struct rte_flow_action_set_dscp set_dscp;
11998 	struct rte_flow_attr attr;
11999 	struct rte_flow_action actions[2];
12000 	struct rte_flow_item items[3];
12001 	struct rte_flow_error error;
12002 	uint32_t flow_idx;
12003 
12004 	memset(&attr, 0, sizeof(attr));
12005 	memset(actions, 0, sizeof(actions));
12006 	memset(items, 0, sizeof(items));
12007 	attr.group = 1;
12008 	attr.egress = 1;
12009 	items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
12010 	items[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
12011 	items[2].type = RTE_FLOW_ITEM_TYPE_END;
12012 	/* Random value */
12013 	set_dscp.dscp = 9;
12014 	actions[0].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
12015 	actions[0].conf = &set_dscp;
12016 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
12017 
12018 	flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, items, actions, true, &error);
12019 	if (!flow_idx)
12020 		return -EOPNOTSUPP;
12021 
12022 	flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
12023 	return 0;
12024 }
12025 
12026 void *
12027 rte_pmd_mlx5_create_geneve_tlv_parser(uint16_t port_id,
12028 				      const struct rte_pmd_mlx5_geneve_tlv tlv_list[],
12029 				      uint8_t nb_options)
12030 {
12031 #ifdef HAVE_MLX5_HWS_SUPPORT
12032 	return mlx5_geneve_tlv_parser_create(port_id, tlv_list, nb_options);
12033 #else
12034 	(void)port_id;
12035 	(void)tlv_list;
12036 	(void)nb_options;
12037 	DRV_LOG(ERR, "%s is not supported.", __func__);
12038 	rte_errno = ENOTSUP;
12039 	return NULL;
12040 #endif
12041 }
12042 
12043 int
12044 rte_pmd_mlx5_destroy_geneve_tlv_parser(void *handle)
12045 {
12046 #ifdef HAVE_MLX5_HWS_SUPPORT
12047 	return mlx5_geneve_tlv_parser_destroy(handle);
12048 #else
12049 	(void)handle;
12050 	DRV_LOG(ERR, "%s is not supported.", __func__);
12051 	rte_errno = ENOTSUP;
12052 	return -rte_errno;
12053 #endif
12054 }
12055