xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision 90e6053a1924a178a64c995bc5d3f5655919232b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <stdbool.h>
12 
13 /* Verbs header. */
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
15 #ifdef PEDANTIC
16 #pragma GCC diagnostic ignored "-Wpedantic"
17 #endif
18 #include <infiniband/verbs.h>
19 #ifdef PEDANTIC
20 #pragma GCC diagnostic error "-Wpedantic"
21 #endif
22 
23 #include <rte_common.h>
24 #include <rte_ether.h>
25 #include <rte_ethdev_driver.h>
26 #include <rte_flow.h>
27 #include <rte_flow_driver.h>
28 #include <rte_malloc.h>
29 #include <rte_ip.h>
30 
31 #include <mlx5_glue.h>
32 #include <mlx5_devx_cmds.h>
33 #include <mlx5_prm.h>
34 
35 #include "mlx5_defs.h"
36 #include "mlx5.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_rxtx.h"
39 
40 /* Dev ops structure defined in mlx5.c */
41 extern const struct eth_dev_ops mlx5_dev_ops;
42 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
43 
44 /** Device flow drivers. */
45 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
46 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
47 #endif
48 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
49 
50 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
51 
52 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
53 	[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
54 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
55 	[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
56 #endif
57 	[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
58 	[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
59 };
60 
61 enum mlx5_expansion {
62 	MLX5_EXPANSION_ROOT,
63 	MLX5_EXPANSION_ROOT_OUTER,
64 	MLX5_EXPANSION_ROOT_ETH_VLAN,
65 	MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
66 	MLX5_EXPANSION_OUTER_ETH,
67 	MLX5_EXPANSION_OUTER_ETH_VLAN,
68 	MLX5_EXPANSION_OUTER_VLAN,
69 	MLX5_EXPANSION_OUTER_IPV4,
70 	MLX5_EXPANSION_OUTER_IPV4_UDP,
71 	MLX5_EXPANSION_OUTER_IPV4_TCP,
72 	MLX5_EXPANSION_OUTER_IPV6,
73 	MLX5_EXPANSION_OUTER_IPV6_UDP,
74 	MLX5_EXPANSION_OUTER_IPV6_TCP,
75 	MLX5_EXPANSION_VXLAN,
76 	MLX5_EXPANSION_VXLAN_GPE,
77 	MLX5_EXPANSION_GRE,
78 	MLX5_EXPANSION_MPLS,
79 	MLX5_EXPANSION_ETH,
80 	MLX5_EXPANSION_ETH_VLAN,
81 	MLX5_EXPANSION_VLAN,
82 	MLX5_EXPANSION_IPV4,
83 	MLX5_EXPANSION_IPV4_UDP,
84 	MLX5_EXPANSION_IPV4_TCP,
85 	MLX5_EXPANSION_IPV6,
86 	MLX5_EXPANSION_IPV6_UDP,
87 	MLX5_EXPANSION_IPV6_TCP,
88 };
89 
90 /** Supported expansion of items. */
91 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
92 	[MLX5_EXPANSION_ROOT] = {
93 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
94 						 MLX5_EXPANSION_IPV4,
95 						 MLX5_EXPANSION_IPV6),
96 		.type = RTE_FLOW_ITEM_TYPE_END,
97 	},
98 	[MLX5_EXPANSION_ROOT_OUTER] = {
99 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
100 						 MLX5_EXPANSION_OUTER_IPV4,
101 						 MLX5_EXPANSION_OUTER_IPV6),
102 		.type = RTE_FLOW_ITEM_TYPE_END,
103 	},
104 	[MLX5_EXPANSION_ROOT_ETH_VLAN] = {
105 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
106 		.type = RTE_FLOW_ITEM_TYPE_END,
107 	},
108 	[MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
109 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
110 		.type = RTE_FLOW_ITEM_TYPE_END,
111 	},
112 	[MLX5_EXPANSION_OUTER_ETH] = {
113 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
114 						 MLX5_EXPANSION_OUTER_IPV6,
115 						 MLX5_EXPANSION_MPLS),
116 		.type = RTE_FLOW_ITEM_TYPE_ETH,
117 		.rss_types = 0,
118 	},
119 	[MLX5_EXPANSION_OUTER_ETH_VLAN] = {
120 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
121 		.type = RTE_FLOW_ITEM_TYPE_ETH,
122 		.rss_types = 0,
123 	},
124 	[MLX5_EXPANSION_OUTER_VLAN] = {
125 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
126 						 MLX5_EXPANSION_OUTER_IPV6),
127 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
128 	},
129 	[MLX5_EXPANSION_OUTER_IPV4] = {
130 		.next = RTE_FLOW_EXPAND_RSS_NEXT
131 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
132 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
133 			 MLX5_EXPANSION_GRE,
134 			 MLX5_EXPANSION_IPV4,
135 			 MLX5_EXPANSION_IPV6),
136 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
137 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
138 			ETH_RSS_NONFRAG_IPV4_OTHER,
139 	},
140 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
141 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
142 						 MLX5_EXPANSION_VXLAN_GPE),
143 		.type = RTE_FLOW_ITEM_TYPE_UDP,
144 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
145 	},
146 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
147 		.type = RTE_FLOW_ITEM_TYPE_TCP,
148 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
149 	},
150 	[MLX5_EXPANSION_OUTER_IPV6] = {
151 		.next = RTE_FLOW_EXPAND_RSS_NEXT
152 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
153 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
154 			 MLX5_EXPANSION_IPV4,
155 			 MLX5_EXPANSION_IPV6),
156 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
157 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
158 			ETH_RSS_NONFRAG_IPV6_OTHER,
159 	},
160 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
161 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
162 						 MLX5_EXPANSION_VXLAN_GPE),
163 		.type = RTE_FLOW_ITEM_TYPE_UDP,
164 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
165 	},
166 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
167 		.type = RTE_FLOW_ITEM_TYPE_TCP,
168 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
169 	},
170 	[MLX5_EXPANSION_VXLAN] = {
171 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
172 						 MLX5_EXPANSION_IPV4,
173 						 MLX5_EXPANSION_IPV6),
174 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
175 	},
176 	[MLX5_EXPANSION_VXLAN_GPE] = {
177 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
178 						 MLX5_EXPANSION_IPV4,
179 						 MLX5_EXPANSION_IPV6),
180 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
181 	},
182 	[MLX5_EXPANSION_GRE] = {
183 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
184 		.type = RTE_FLOW_ITEM_TYPE_GRE,
185 	},
186 	[MLX5_EXPANSION_MPLS] = {
187 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
188 						 MLX5_EXPANSION_IPV6),
189 		.type = RTE_FLOW_ITEM_TYPE_MPLS,
190 	},
191 	[MLX5_EXPANSION_ETH] = {
192 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
193 						 MLX5_EXPANSION_IPV6),
194 		.type = RTE_FLOW_ITEM_TYPE_ETH,
195 	},
196 	[MLX5_EXPANSION_ETH_VLAN] = {
197 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
198 		.type = RTE_FLOW_ITEM_TYPE_ETH,
199 	},
200 	[MLX5_EXPANSION_VLAN] = {
201 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
202 						 MLX5_EXPANSION_IPV6),
203 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
204 	},
205 	[MLX5_EXPANSION_IPV4] = {
206 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
207 						 MLX5_EXPANSION_IPV4_TCP),
208 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
209 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
210 			ETH_RSS_NONFRAG_IPV4_OTHER,
211 	},
212 	[MLX5_EXPANSION_IPV4_UDP] = {
213 		.type = RTE_FLOW_ITEM_TYPE_UDP,
214 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
215 	},
216 	[MLX5_EXPANSION_IPV4_TCP] = {
217 		.type = RTE_FLOW_ITEM_TYPE_TCP,
218 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
219 	},
220 	[MLX5_EXPANSION_IPV6] = {
221 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
222 						 MLX5_EXPANSION_IPV6_TCP),
223 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
224 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
225 			ETH_RSS_NONFRAG_IPV6_OTHER,
226 	},
227 	[MLX5_EXPANSION_IPV6_UDP] = {
228 		.type = RTE_FLOW_ITEM_TYPE_UDP,
229 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
230 	},
231 	[MLX5_EXPANSION_IPV6_TCP] = {
232 		.type = RTE_FLOW_ITEM_TYPE_TCP,
233 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
234 	},
235 };
236 
237 static const struct rte_flow_ops mlx5_flow_ops = {
238 	.validate = mlx5_flow_validate,
239 	.create = mlx5_flow_create,
240 	.destroy = mlx5_flow_destroy,
241 	.flush = mlx5_flow_flush,
242 	.isolate = mlx5_flow_isolate,
243 	.query = mlx5_flow_query,
244 	.dev_dump = mlx5_flow_dev_dump,
245 };
246 
247 /* Convert FDIR request to Generic flow. */
248 struct mlx5_fdir {
249 	struct rte_flow_attr attr;
250 	struct rte_flow_item items[4];
251 	struct rte_flow_item_eth l2;
252 	struct rte_flow_item_eth l2_mask;
253 	union {
254 		struct rte_flow_item_ipv4 ipv4;
255 		struct rte_flow_item_ipv6 ipv6;
256 	} l3;
257 	union {
258 		struct rte_flow_item_ipv4 ipv4;
259 		struct rte_flow_item_ipv6 ipv6;
260 	} l3_mask;
261 	union {
262 		struct rte_flow_item_udp udp;
263 		struct rte_flow_item_tcp tcp;
264 	} l4;
265 	union {
266 		struct rte_flow_item_udp udp;
267 		struct rte_flow_item_tcp tcp;
268 	} l4_mask;
269 	struct rte_flow_action actions[2];
270 	struct rte_flow_action_queue queue;
271 };
272 
273 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
274 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
275 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
276 };
277 
278 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
279 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
280 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
281 	{ 9, 10, 11 }, { 12, 13, 14 },
282 };
283 
284 /* Tunnel information. */
285 struct mlx5_flow_tunnel_info {
286 	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
287 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
288 };
289 
290 static struct mlx5_flow_tunnel_info tunnels_info[] = {
291 	{
292 		.tunnel = MLX5_FLOW_LAYER_VXLAN,
293 		.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
294 	},
295 	{
296 		.tunnel = MLX5_FLOW_LAYER_GENEVE,
297 		.ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
298 	},
299 	{
300 		.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
301 		.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
302 	},
303 	{
304 		.tunnel = MLX5_FLOW_LAYER_GRE,
305 		.ptype = RTE_PTYPE_TUNNEL_GRE,
306 	},
307 	{
308 		.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
309 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
310 	},
311 	{
312 		.tunnel = MLX5_FLOW_LAYER_MPLS,
313 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
314 	},
315 	{
316 		.tunnel = MLX5_FLOW_LAYER_NVGRE,
317 		.ptype = RTE_PTYPE_TUNNEL_NVGRE,
318 	},
319 	{
320 		.tunnel = MLX5_FLOW_LAYER_IPIP,
321 		.ptype = RTE_PTYPE_TUNNEL_IP,
322 	},
323 	{
324 		.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
325 		.ptype = RTE_PTYPE_TUNNEL_IP,
326 	},
327 	{
328 		.tunnel = MLX5_FLOW_LAYER_GTP,
329 		.ptype = RTE_PTYPE_TUNNEL_GTPU,
330 	},
331 };
332 
333 /**
334  * Translate tag ID to register.
335  *
336  * @param[in] dev
337  *   Pointer to the Ethernet device structure.
338  * @param[in] feature
339  *   The feature that request the register.
340  * @param[in] id
341  *   The request register ID.
342  * @param[out] error
343  *   Error description in case of any.
344  *
345  * @return
346  *   The request register on success, a negative errno
347  *   value otherwise and rte_errno is set.
348  */
349 int
350 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
351 		     enum mlx5_feature_name feature,
352 		     uint32_t id,
353 		     struct rte_flow_error *error)
354 {
355 	struct mlx5_priv *priv = dev->data->dev_private;
356 	struct mlx5_dev_config *config = &priv->config;
357 	enum modify_reg start_reg;
358 	bool skip_mtr_reg = false;
359 
360 	switch (feature) {
361 	case MLX5_HAIRPIN_RX:
362 		return REG_B;
363 	case MLX5_HAIRPIN_TX:
364 		return REG_A;
365 	case MLX5_METADATA_RX:
366 		switch (config->dv_xmeta_en) {
367 		case MLX5_XMETA_MODE_LEGACY:
368 			return REG_B;
369 		case MLX5_XMETA_MODE_META16:
370 			return REG_C_0;
371 		case MLX5_XMETA_MODE_META32:
372 			return REG_C_1;
373 		}
374 		break;
375 	case MLX5_METADATA_TX:
376 		return REG_A;
377 	case MLX5_METADATA_FDB:
378 		switch (config->dv_xmeta_en) {
379 		case MLX5_XMETA_MODE_LEGACY:
380 			return REG_NONE;
381 		case MLX5_XMETA_MODE_META16:
382 			return REG_C_0;
383 		case MLX5_XMETA_MODE_META32:
384 			return REG_C_1;
385 		}
386 		break;
387 	case MLX5_FLOW_MARK:
388 		switch (config->dv_xmeta_en) {
389 		case MLX5_XMETA_MODE_LEGACY:
390 			return REG_NONE;
391 		case MLX5_XMETA_MODE_META16:
392 			return REG_C_1;
393 		case MLX5_XMETA_MODE_META32:
394 			return REG_C_0;
395 		}
396 		break;
397 	case MLX5_MTR_SFX:
398 		/*
399 		 * If meter color and flow match share one register, flow match
400 		 * should use the meter color register for match.
401 		 */
402 		if (priv->mtr_reg_share)
403 			return priv->mtr_color_reg;
404 		else
405 			return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
406 			       REG_C_3;
407 	case MLX5_MTR_COLOR:
408 		MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
409 		return priv->mtr_color_reg;
410 	case MLX5_COPY_MARK:
411 		/*
412 		 * Metadata COPY_MARK register using is in meter suffix sub
413 		 * flow while with meter. It's safe to share the same register.
414 		 */
415 		return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
416 	case MLX5_APP_TAG:
417 		/*
418 		 * If meter is enable, it will engage the register for color
419 		 * match and flow match. If meter color match is not using the
420 		 * REG_C_2, need to skip the REG_C_x be used by meter color
421 		 * match.
422 		 * If meter is disable, free to use all available registers.
423 		 */
424 		start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
425 			    (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
426 		skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
427 		if (id > (REG_C_7 - start_reg))
428 			return rte_flow_error_set(error, EINVAL,
429 						  RTE_FLOW_ERROR_TYPE_ITEM,
430 						  NULL, "invalid tag id");
431 		if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE)
432 			return rte_flow_error_set(error, ENOTSUP,
433 						  RTE_FLOW_ERROR_TYPE_ITEM,
434 						  NULL, "unsupported tag id");
435 		/*
436 		 * This case means meter is using the REG_C_x great than 2.
437 		 * Take care not to conflict with meter color REG_C_x.
438 		 * If the available index REG_C_y >= REG_C_x, skip the
439 		 * color register.
440 		 */
441 		if (skip_mtr_reg && config->flow_mreg_c
442 		    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
443 			if (config->flow_mreg_c
444 			    [id + 1 + start_reg - REG_C_0] != REG_NONE)
445 				return config->flow_mreg_c
446 					       [id + 1 + start_reg - REG_C_0];
447 			return rte_flow_error_set(error, ENOTSUP,
448 						  RTE_FLOW_ERROR_TYPE_ITEM,
449 						  NULL, "unsupported tag id");
450 		}
451 		return config->flow_mreg_c[id + start_reg - REG_C_0];
452 	}
453 	MLX5_ASSERT(false);
454 	return rte_flow_error_set(error, EINVAL,
455 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
456 				  NULL, "invalid feature name");
457 }
458 
459 /**
460  * Check extensive flow metadata register support.
461  *
462  * @param dev
463  *   Pointer to rte_eth_dev structure.
464  *
465  * @return
466  *   True if device supports extensive flow metadata register, otherwise false.
467  */
468 bool
469 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
470 {
471 	struct mlx5_priv *priv = dev->data->dev_private;
472 	struct mlx5_dev_config *config = &priv->config;
473 
474 	/*
475 	 * Having available reg_c can be regarded inclusively as supporting
476 	 * extensive flow metadata register, which could mean,
477 	 * - metadata register copy action by modify header.
478 	 * - 16 modify header actions is supported.
479 	 * - reg_c's are preserved across different domain (FDB and NIC) on
480 	 *   packet loopback by flow lookup miss.
481 	 */
482 	return config->flow_mreg_c[2] != REG_NONE;
483 }
484 
485 /**
486  * Discover the maximum number of priority available.
487  *
488  * @param[in] dev
489  *   Pointer to the Ethernet device structure.
490  *
491  * @return
492  *   number of supported flow priority on success, a negative errno
493  *   value otherwise and rte_errno is set.
494  */
495 int
496 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
497 {
498 	struct mlx5_priv *priv = dev->data->dev_private;
499 	struct {
500 		struct ibv_flow_attr attr;
501 		struct ibv_flow_spec_eth eth;
502 		struct ibv_flow_spec_action_drop drop;
503 	} flow_attr = {
504 		.attr = {
505 			.num_of_specs = 2,
506 			.port = (uint8_t)priv->ibv_port,
507 		},
508 		.eth = {
509 			.type = IBV_FLOW_SPEC_ETH,
510 			.size = sizeof(struct ibv_flow_spec_eth),
511 		},
512 		.drop = {
513 			.size = sizeof(struct ibv_flow_spec_action_drop),
514 			.type = IBV_FLOW_SPEC_ACTION_DROP,
515 		},
516 	};
517 	struct ibv_flow *flow;
518 	struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
519 	uint16_t vprio[] = { 8, 16 };
520 	int i;
521 	int priority = 0;
522 
523 	if (!drop) {
524 		rte_errno = ENOTSUP;
525 		return -rte_errno;
526 	}
527 	for (i = 0; i != RTE_DIM(vprio); i++) {
528 		flow_attr.attr.priority = vprio[i] - 1;
529 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
530 		if (!flow)
531 			break;
532 		claim_zero(mlx5_glue->destroy_flow(flow));
533 		priority = vprio[i];
534 	}
535 	mlx5_hrxq_drop_release(dev);
536 	switch (priority) {
537 	case 8:
538 		priority = RTE_DIM(priority_map_3);
539 		break;
540 	case 16:
541 		priority = RTE_DIM(priority_map_5);
542 		break;
543 	default:
544 		rte_errno = ENOTSUP;
545 		DRV_LOG(ERR,
546 			"port %u verbs maximum priority: %d expected 8/16",
547 			dev->data->port_id, priority);
548 		return -rte_errno;
549 	}
550 	DRV_LOG(INFO, "port %u flow maximum priority: %d",
551 		dev->data->port_id, priority);
552 	return priority;
553 }
554 
555 /**
556  * Adjust flow priority based on the highest layer and the request priority.
557  *
558  * @param[in] dev
559  *   Pointer to the Ethernet device structure.
560  * @param[in] priority
561  *   The rule base priority.
562  * @param[in] subpriority
563  *   The priority based on the items.
564  *
565  * @return
566  *   The new priority.
567  */
568 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
569 				   uint32_t subpriority)
570 {
571 	uint32_t res = 0;
572 	struct mlx5_priv *priv = dev->data->dev_private;
573 
574 	switch (priv->config.flow_prio) {
575 	case RTE_DIM(priority_map_3):
576 		res = priority_map_3[priority][subpriority];
577 		break;
578 	case RTE_DIM(priority_map_5):
579 		res = priority_map_5[priority][subpriority];
580 		break;
581 	}
582 	return  res;
583 }
584 
585 /**
586  * Verify the @p item specifications (spec, last, mask) are compatible with the
587  * NIC capabilities.
588  *
589  * @param[in] item
590  *   Item specification.
591  * @param[in] mask
592  *   @p item->mask or flow default bit-masks.
593  * @param[in] nic_mask
594  *   Bit-masks covering supported fields by the NIC to compare with user mask.
595  * @param[in] size
596  *   Bit-masks size in bytes.
597  * @param[out] error
598  *   Pointer to error structure.
599  *
600  * @return
601  *   0 on success, a negative errno value otherwise and rte_errno is set.
602  */
603 int
604 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
605 			  const uint8_t *mask,
606 			  const uint8_t *nic_mask,
607 			  unsigned int size,
608 			  struct rte_flow_error *error)
609 {
610 	unsigned int i;
611 
612 	MLX5_ASSERT(nic_mask);
613 	for (i = 0; i < size; ++i)
614 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
615 			return rte_flow_error_set(error, ENOTSUP,
616 						  RTE_FLOW_ERROR_TYPE_ITEM,
617 						  item,
618 						  "mask enables non supported"
619 						  " bits");
620 	if (!item->spec && (item->mask || item->last))
621 		return rte_flow_error_set(error, EINVAL,
622 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
623 					  "mask/last without a spec is not"
624 					  " supported");
625 	if (item->spec && item->last) {
626 		uint8_t spec[size];
627 		uint8_t last[size];
628 		unsigned int i;
629 		int ret;
630 
631 		for (i = 0; i < size; ++i) {
632 			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
633 			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
634 		}
635 		ret = memcmp(spec, last, size);
636 		if (ret != 0)
637 			return rte_flow_error_set(error, EINVAL,
638 						  RTE_FLOW_ERROR_TYPE_ITEM,
639 						  item,
640 						  "range is not valid");
641 	}
642 	return 0;
643 }
644 
645 /**
646  * Adjust the hash fields according to the @p flow information.
647  *
648  * @param[in] dev_flow.
649  *   Pointer to the mlx5_flow.
650  * @param[in] tunnel
651  *   1 when the hash field is for a tunnel item.
652  * @param[in] layer_types
653  *   ETH_RSS_* types.
654  * @param[in] hash_fields
655  *   Item hash fields.
656  *
657  * @return
658  *   The hash fields that should be used.
659  */
660 uint64_t
661 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
662 			    int tunnel __rte_unused, uint64_t layer_types,
663 			    uint64_t hash_fields)
664 {
665 	struct rte_flow *flow = dev_flow->flow;
666 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
667 	int rss_request_inner = flow->rss.level >= 2;
668 
669 	/* Check RSS hash level for tunnel. */
670 	if (tunnel && rss_request_inner)
671 		hash_fields |= IBV_RX_HASH_INNER;
672 	else if (tunnel || rss_request_inner)
673 		return 0;
674 #endif
675 	/* Check if requested layer matches RSS hash fields. */
676 	if (!(flow->rss.types & layer_types))
677 		return 0;
678 	return hash_fields;
679 }
680 
681 /**
682  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
683  * if several tunnel rules are used on this queue, the tunnel ptype will be
684  * cleared.
685  *
686  * @param rxq_ctrl
687  *   Rx queue to update.
688  */
689 static void
690 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
691 {
692 	unsigned int i;
693 	uint32_t tunnel_ptype = 0;
694 
695 	/* Look up for the ptype to use. */
696 	for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
697 		if (!rxq_ctrl->flow_tunnels_n[i])
698 			continue;
699 		if (!tunnel_ptype) {
700 			tunnel_ptype = tunnels_info[i].ptype;
701 		} else {
702 			tunnel_ptype = 0;
703 			break;
704 		}
705 	}
706 	rxq_ctrl->rxq.tunnel = tunnel_ptype;
707 }
708 
709 /**
710  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
711  * flow.
712  *
713  * @param[in] dev
714  *   Pointer to the Ethernet device structure.
715  * @param[in] flow
716  *   Pointer to flow structure.
717  * @param[in] dev_handle
718  *   Pointer to device flow handle structure.
719  */
720 static void
721 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
722 		       struct mlx5_flow_handle *dev_handle)
723 {
724 	struct mlx5_priv *priv = dev->data->dev_private;
725 	const int mark = dev_handle->mark;
726 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
727 	unsigned int i;
728 
729 	for (i = 0; i != flow->rss.queue_num; ++i) {
730 		int idx = (*flow->rss.queue)[i];
731 		struct mlx5_rxq_ctrl *rxq_ctrl =
732 			container_of((*priv->rxqs)[idx],
733 				     struct mlx5_rxq_ctrl, rxq);
734 
735 		/*
736 		 * To support metadata register copy on Tx loopback,
737 		 * this must be always enabled (metadata may arive
738 		 * from other port - not from local flows only.
739 		 */
740 		if (priv->config.dv_flow_en &&
741 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
742 		    mlx5_flow_ext_mreg_supported(dev)) {
743 			rxq_ctrl->rxq.mark = 1;
744 			rxq_ctrl->flow_mark_n = 1;
745 		} else if (mark) {
746 			rxq_ctrl->rxq.mark = 1;
747 			rxq_ctrl->flow_mark_n++;
748 		}
749 		if (tunnel) {
750 			unsigned int j;
751 
752 			/* Increase the counter matching the flow. */
753 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
754 				if ((tunnels_info[j].tunnel &
755 				     dev_handle->layers) ==
756 				    tunnels_info[j].tunnel) {
757 					rxq_ctrl->flow_tunnels_n[j]++;
758 					break;
759 				}
760 			}
761 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
762 		}
763 	}
764 }
765 
766 /**
767  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
768  *
769  * @param[in] dev
770  *   Pointer to the Ethernet device structure.
771  * @param[in] flow
772  *   Pointer to flow structure.
773  */
774 static void
775 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
776 {
777 	struct mlx5_priv *priv = dev->data->dev_private;
778 	uint32_t handle_idx;
779 	struct mlx5_flow_handle *dev_handle;
780 
781 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
782 		       handle_idx, dev_handle, next)
783 		flow_drv_rxq_flags_set(dev, flow, dev_handle);
784 }
785 
786 /**
787  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
788  * device flow if no other flow uses it with the same kind of request.
789  *
790  * @param dev
791  *   Pointer to Ethernet device.
792  * @param[in] flow
793  *   Pointer to flow structure.
794  * @param[in] dev_handle
795  *   Pointer to the device flow handle structure.
796  */
797 static void
798 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
799 			struct mlx5_flow_handle *dev_handle)
800 {
801 	struct mlx5_priv *priv = dev->data->dev_private;
802 	const int mark = dev_handle->mark;
803 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
804 	unsigned int i;
805 
806 	MLX5_ASSERT(dev->data->dev_started);
807 	for (i = 0; i != flow->rss.queue_num; ++i) {
808 		int idx = (*flow->rss.queue)[i];
809 		struct mlx5_rxq_ctrl *rxq_ctrl =
810 			container_of((*priv->rxqs)[idx],
811 				     struct mlx5_rxq_ctrl, rxq);
812 
813 		if (priv->config.dv_flow_en &&
814 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
815 		    mlx5_flow_ext_mreg_supported(dev)) {
816 			rxq_ctrl->rxq.mark = 1;
817 			rxq_ctrl->flow_mark_n = 1;
818 		} else if (mark) {
819 			rxq_ctrl->flow_mark_n--;
820 			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
821 		}
822 		if (tunnel) {
823 			unsigned int j;
824 
825 			/* Decrease the counter matching the flow. */
826 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
827 				if ((tunnels_info[j].tunnel &
828 				     dev_handle->layers) ==
829 				    tunnels_info[j].tunnel) {
830 					rxq_ctrl->flow_tunnels_n[j]--;
831 					break;
832 				}
833 			}
834 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
835 		}
836 	}
837 }
838 
839 /**
840  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
841  * @p flow if no other flow uses it with the same kind of request.
842  *
843  * @param dev
844  *   Pointer to Ethernet device.
845  * @param[in] flow
846  *   Pointer to the flow.
847  */
848 static void
849 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
850 {
851 	struct mlx5_priv *priv = dev->data->dev_private;
852 	uint32_t handle_idx;
853 	struct mlx5_flow_handle *dev_handle;
854 
855 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
856 		       handle_idx, dev_handle, next)
857 		flow_drv_rxq_flags_trim(dev, flow, dev_handle);
858 }
859 
860 /**
861  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
862  *
863  * @param dev
864  *   Pointer to Ethernet device.
865  */
866 static void
867 flow_rxq_flags_clear(struct rte_eth_dev *dev)
868 {
869 	struct mlx5_priv *priv = dev->data->dev_private;
870 	unsigned int i;
871 
872 	for (i = 0; i != priv->rxqs_n; ++i) {
873 		struct mlx5_rxq_ctrl *rxq_ctrl;
874 		unsigned int j;
875 
876 		if (!(*priv->rxqs)[i])
877 			continue;
878 		rxq_ctrl = container_of((*priv->rxqs)[i],
879 					struct mlx5_rxq_ctrl, rxq);
880 		rxq_ctrl->flow_mark_n = 0;
881 		rxq_ctrl->rxq.mark = 0;
882 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
883 			rxq_ctrl->flow_tunnels_n[j] = 0;
884 		rxq_ctrl->rxq.tunnel = 0;
885 	}
886 }
887 
888 /*
889  * return a pointer to the desired action in the list of actions.
890  *
891  * @param[in] actions
892  *   The list of actions to search the action in.
893  * @param[in] action
894  *   The action to find.
895  *
896  * @return
897  *   Pointer to the action in the list, if found. NULL otherwise.
898  */
899 const struct rte_flow_action *
900 mlx5_flow_find_action(const struct rte_flow_action *actions,
901 		      enum rte_flow_action_type action)
902 {
903 	if (actions == NULL)
904 		return NULL;
905 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
906 		if (actions->type == action)
907 			return actions;
908 	return NULL;
909 }
910 
911 /*
912  * Validate the flag action.
913  *
914  * @param[in] action_flags
915  *   Bit-fields that holds the actions detected until now.
916  * @param[in] attr
917  *   Attributes of flow that includes this action.
918  * @param[out] error
919  *   Pointer to error structure.
920  *
921  * @return
922  *   0 on success, a negative errno value otherwise and rte_errno is set.
923  */
924 int
925 mlx5_flow_validate_action_flag(uint64_t action_flags,
926 			       const struct rte_flow_attr *attr,
927 			       struct rte_flow_error *error)
928 {
929 	if (action_flags & MLX5_FLOW_ACTION_MARK)
930 		return rte_flow_error_set(error, EINVAL,
931 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
932 					  "can't mark and flag in same flow");
933 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
934 		return rte_flow_error_set(error, EINVAL,
935 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
936 					  "can't have 2 flag"
937 					  " actions in same flow");
938 	if (attr->egress)
939 		return rte_flow_error_set(error, ENOTSUP,
940 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
941 					  "flag action not supported for "
942 					  "egress");
943 	return 0;
944 }
945 
946 /*
947  * Validate the mark action.
948  *
949  * @param[in] action
950  *   Pointer to the queue action.
951  * @param[in] action_flags
952  *   Bit-fields that holds the actions detected until now.
953  * @param[in] attr
954  *   Attributes of flow that includes this action.
955  * @param[out] error
956  *   Pointer to error structure.
957  *
958  * @return
959  *   0 on success, a negative errno value otherwise and rte_errno is set.
960  */
961 int
962 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
963 			       uint64_t action_flags,
964 			       const struct rte_flow_attr *attr,
965 			       struct rte_flow_error *error)
966 {
967 	const struct rte_flow_action_mark *mark = action->conf;
968 
969 	if (!mark)
970 		return rte_flow_error_set(error, EINVAL,
971 					  RTE_FLOW_ERROR_TYPE_ACTION,
972 					  action,
973 					  "configuration cannot be null");
974 	if (mark->id >= MLX5_FLOW_MARK_MAX)
975 		return rte_flow_error_set(error, EINVAL,
976 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
977 					  &mark->id,
978 					  "mark id must in 0 <= id < "
979 					  RTE_STR(MLX5_FLOW_MARK_MAX));
980 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
981 		return rte_flow_error_set(error, EINVAL,
982 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
983 					  "can't flag and mark in same flow");
984 	if (action_flags & MLX5_FLOW_ACTION_MARK)
985 		return rte_flow_error_set(error, EINVAL,
986 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
987 					  "can't have 2 mark actions in same"
988 					  " flow");
989 	if (attr->egress)
990 		return rte_flow_error_set(error, ENOTSUP,
991 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
992 					  "mark action not supported for "
993 					  "egress");
994 	return 0;
995 }
996 
997 /*
998  * Validate the drop action.
999  *
1000  * @param[in] action_flags
1001  *   Bit-fields that holds the actions detected until now.
1002  * @param[in] attr
1003  *   Attributes of flow that includes this action.
1004  * @param[out] error
1005  *   Pointer to error structure.
1006  *
1007  * @return
1008  *   0 on success, a negative errno value otherwise and rte_errno is set.
1009  */
1010 int
1011 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1012 			       const struct rte_flow_attr *attr,
1013 			       struct rte_flow_error *error)
1014 {
1015 	if (attr->egress)
1016 		return rte_flow_error_set(error, ENOTSUP,
1017 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1018 					  "drop action not supported for "
1019 					  "egress");
1020 	return 0;
1021 }
1022 
1023 /*
1024  * Validate the queue action.
1025  *
1026  * @param[in] action
1027  *   Pointer to the queue action.
1028  * @param[in] action_flags
1029  *   Bit-fields that holds the actions detected until now.
1030  * @param[in] dev
1031  *   Pointer to the Ethernet device structure.
1032  * @param[in] attr
1033  *   Attributes of flow that includes this action.
1034  * @param[out] error
1035  *   Pointer to error structure.
1036  *
1037  * @return
1038  *   0 on success, a negative errno value otherwise and rte_errno is set.
1039  */
1040 int
1041 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1042 				uint64_t action_flags,
1043 				struct rte_eth_dev *dev,
1044 				const struct rte_flow_attr *attr,
1045 				struct rte_flow_error *error)
1046 {
1047 	struct mlx5_priv *priv = dev->data->dev_private;
1048 	const struct rte_flow_action_queue *queue = action->conf;
1049 
1050 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1051 		return rte_flow_error_set(error, EINVAL,
1052 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1053 					  "can't have 2 fate actions in"
1054 					  " same flow");
1055 	if (!priv->rxqs_n)
1056 		return rte_flow_error_set(error, EINVAL,
1057 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1058 					  NULL, "No Rx queues configured");
1059 	if (queue->index >= priv->rxqs_n)
1060 		return rte_flow_error_set(error, EINVAL,
1061 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1062 					  &queue->index,
1063 					  "queue index out of range");
1064 	if (!(*priv->rxqs)[queue->index])
1065 		return rte_flow_error_set(error, EINVAL,
1066 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1067 					  &queue->index,
1068 					  "queue is not configured");
1069 	if (attr->egress)
1070 		return rte_flow_error_set(error, ENOTSUP,
1071 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1072 					  "queue action not supported for "
1073 					  "egress");
1074 	return 0;
1075 }
1076 
1077 /*
1078  * Validate the rss action.
1079  *
1080  * @param[in] action
1081  *   Pointer to the queue action.
1082  * @param[in] action_flags
1083  *   Bit-fields that holds the actions detected until now.
1084  * @param[in] dev
1085  *   Pointer to the Ethernet device structure.
1086  * @param[in] attr
1087  *   Attributes of flow that includes this action.
1088  * @param[in] item_flags
1089  *   Items that were detected.
1090  * @param[out] error
1091  *   Pointer to error structure.
1092  *
1093  * @return
1094  *   0 on success, a negative errno value otherwise and rte_errno is set.
1095  */
1096 int
1097 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1098 			      uint64_t action_flags,
1099 			      struct rte_eth_dev *dev,
1100 			      const struct rte_flow_attr *attr,
1101 			      uint64_t item_flags,
1102 			      struct rte_flow_error *error)
1103 {
1104 	struct mlx5_priv *priv = dev->data->dev_private;
1105 	const struct rte_flow_action_rss *rss = action->conf;
1106 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1107 	unsigned int i;
1108 
1109 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1110 		return rte_flow_error_set(error, EINVAL,
1111 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1112 					  "can't have 2 fate actions"
1113 					  " in same flow");
1114 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1115 	    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1116 		return rte_flow_error_set(error, ENOTSUP,
1117 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1118 					  &rss->func,
1119 					  "RSS hash function not supported");
1120 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1121 	if (rss->level > 2)
1122 #else
1123 	if (rss->level > 1)
1124 #endif
1125 		return rte_flow_error_set(error, ENOTSUP,
1126 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1127 					  &rss->level,
1128 					  "tunnel RSS is not supported");
1129 	/* allow RSS key_len 0 in case of NULL (default) RSS key. */
1130 	if (rss->key_len == 0 && rss->key != NULL)
1131 		return rte_flow_error_set(error, ENOTSUP,
1132 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1133 					  &rss->key_len,
1134 					  "RSS hash key length 0");
1135 	if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1136 		return rte_flow_error_set(error, ENOTSUP,
1137 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1138 					  &rss->key_len,
1139 					  "RSS hash key too small");
1140 	if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1141 		return rte_flow_error_set(error, ENOTSUP,
1142 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1143 					  &rss->key_len,
1144 					  "RSS hash key too large");
1145 	if (rss->queue_num > priv->config.ind_table_max_size)
1146 		return rte_flow_error_set(error, ENOTSUP,
1147 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1148 					  &rss->queue_num,
1149 					  "number of queues too large");
1150 	if (rss->types & MLX5_RSS_HF_MASK)
1151 		return rte_flow_error_set(error, ENOTSUP,
1152 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1153 					  &rss->types,
1154 					  "some RSS protocols are not"
1155 					  " supported");
1156 	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1157 	    !(rss->types & ETH_RSS_IP))
1158 		return rte_flow_error_set(error, EINVAL,
1159 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1160 					  "L3 partial RSS requested but L3 RSS"
1161 					  " type not specified");
1162 	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1163 	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1164 		return rte_flow_error_set(error, EINVAL,
1165 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1166 					  "L4 partial RSS requested but L4 RSS"
1167 					  " type not specified");
1168 	if (!priv->rxqs_n)
1169 		return rte_flow_error_set(error, EINVAL,
1170 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1171 					  NULL, "No Rx queues configured");
1172 	if (!rss->queue_num)
1173 		return rte_flow_error_set(error, EINVAL,
1174 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1175 					  NULL, "No queues configured");
1176 	for (i = 0; i != rss->queue_num; ++i) {
1177 		if (rss->queue[i] >= priv->rxqs_n)
1178 			return rte_flow_error_set
1179 				(error, EINVAL,
1180 				 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1181 				 &rss->queue[i], "queue index out of range");
1182 		if (!(*priv->rxqs)[rss->queue[i]])
1183 			return rte_flow_error_set
1184 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1185 				 &rss->queue[i], "queue is not configured");
1186 	}
1187 	if (attr->egress)
1188 		return rte_flow_error_set(error, ENOTSUP,
1189 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1190 					  "rss action not supported for "
1191 					  "egress");
1192 	if (rss->level > 1 &&  !tunnel)
1193 		return rte_flow_error_set(error, EINVAL,
1194 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1195 					  "inner RSS is not supported for "
1196 					  "non-tunnel flows");
1197 	return 0;
1198 }
1199 
1200 /*
1201  * Validate the count action.
1202  *
1203  * @param[in] dev
1204  *   Pointer to the Ethernet device structure.
1205  * @param[in] attr
1206  *   Attributes of flow that includes this action.
1207  * @param[out] error
1208  *   Pointer to error structure.
1209  *
1210  * @return
1211  *   0 on success, a negative errno value otherwise and rte_errno is set.
1212  */
1213 int
1214 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1215 				const struct rte_flow_attr *attr,
1216 				struct rte_flow_error *error)
1217 {
1218 	if (attr->egress)
1219 		return rte_flow_error_set(error, ENOTSUP,
1220 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1221 					  "count action not supported for "
1222 					  "egress");
1223 	return 0;
1224 }
1225 
1226 /**
1227  * Verify the @p attributes will be correctly understood by the NIC and store
1228  * them in the @p flow if everything is correct.
1229  *
1230  * @param[in] dev
1231  *   Pointer to the Ethernet device structure.
1232  * @param[in] attributes
1233  *   Pointer to flow attributes
1234  * @param[out] error
1235  *   Pointer to error structure.
1236  *
1237  * @return
1238  *   0 on success, a negative errno value otherwise and rte_errno is set.
1239  */
1240 int
1241 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1242 			      const struct rte_flow_attr *attributes,
1243 			      struct rte_flow_error *error)
1244 {
1245 	struct mlx5_priv *priv = dev->data->dev_private;
1246 	uint32_t priority_max = priv->config.flow_prio - 1;
1247 
1248 	if (attributes->group)
1249 		return rte_flow_error_set(error, ENOTSUP,
1250 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1251 					  NULL, "groups is not supported");
1252 	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1253 	    attributes->priority >= priority_max)
1254 		return rte_flow_error_set(error, ENOTSUP,
1255 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1256 					  NULL, "priority out of range");
1257 	if (attributes->egress)
1258 		return rte_flow_error_set(error, ENOTSUP,
1259 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1260 					  "egress is not supported");
1261 	if (attributes->transfer && !priv->config.dv_esw_en)
1262 		return rte_flow_error_set(error, ENOTSUP,
1263 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1264 					  NULL, "transfer is not supported");
1265 	if (!attributes->ingress)
1266 		return rte_flow_error_set(error, EINVAL,
1267 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1268 					  NULL,
1269 					  "ingress attribute is mandatory");
1270 	return 0;
1271 }
1272 
1273 /**
1274  * Validate ICMP6 item.
1275  *
1276  * @param[in] item
1277  *   Item specification.
1278  * @param[in] item_flags
1279  *   Bit-fields that holds the items detected until now.
1280  * @param[out] error
1281  *   Pointer to error structure.
1282  *
1283  * @return
1284  *   0 on success, a negative errno value otherwise and rte_errno is set.
1285  */
1286 int
1287 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1288 			       uint64_t item_flags,
1289 			       uint8_t target_protocol,
1290 			       struct rte_flow_error *error)
1291 {
1292 	const struct rte_flow_item_icmp6 *mask = item->mask;
1293 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1294 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1295 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1296 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1297 				      MLX5_FLOW_LAYER_OUTER_L4;
1298 	int ret;
1299 
1300 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1301 		return rte_flow_error_set(error, EINVAL,
1302 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1303 					  "protocol filtering not compatible"
1304 					  " with ICMP6 layer");
1305 	if (!(item_flags & l3m))
1306 		return rte_flow_error_set(error, EINVAL,
1307 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1308 					  "IPv6 is mandatory to filter on"
1309 					  " ICMP6");
1310 	if (item_flags & l4m)
1311 		return rte_flow_error_set(error, EINVAL,
1312 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1313 					  "multiple L4 layers not supported");
1314 	if (!mask)
1315 		mask = &rte_flow_item_icmp6_mask;
1316 	ret = mlx5_flow_item_acceptable
1317 		(item, (const uint8_t *)mask,
1318 		 (const uint8_t *)&rte_flow_item_icmp6_mask,
1319 		 sizeof(struct rte_flow_item_icmp6), error);
1320 	if (ret < 0)
1321 		return ret;
1322 	return 0;
1323 }
1324 
1325 /**
1326  * Validate ICMP item.
1327  *
1328  * @param[in] item
1329  *   Item specification.
1330  * @param[in] item_flags
1331  *   Bit-fields that holds the items detected until now.
1332  * @param[out] error
1333  *   Pointer to error structure.
1334  *
1335  * @return
1336  *   0 on success, a negative errno value otherwise and rte_errno is set.
1337  */
1338 int
1339 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1340 			     uint64_t item_flags,
1341 			     uint8_t target_protocol,
1342 			     struct rte_flow_error *error)
1343 {
1344 	const struct rte_flow_item_icmp *mask = item->mask;
1345 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1346 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1347 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1348 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1349 				      MLX5_FLOW_LAYER_OUTER_L4;
1350 	int ret;
1351 
1352 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1353 		return rte_flow_error_set(error, EINVAL,
1354 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1355 					  "protocol filtering not compatible"
1356 					  " with ICMP layer");
1357 	if (!(item_flags & l3m))
1358 		return rte_flow_error_set(error, EINVAL,
1359 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1360 					  "IPv4 is mandatory to filter"
1361 					  " on ICMP");
1362 	if (item_flags & l4m)
1363 		return rte_flow_error_set(error, EINVAL,
1364 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1365 					  "multiple L4 layers not supported");
1366 	if (!mask)
1367 		mask = &rte_flow_item_icmp_mask;
1368 	ret = mlx5_flow_item_acceptable
1369 		(item, (const uint8_t *)mask,
1370 		 (const uint8_t *)&rte_flow_item_icmp_mask,
1371 		 sizeof(struct rte_flow_item_icmp), error);
1372 	if (ret < 0)
1373 		return ret;
1374 	return 0;
1375 }
1376 
1377 /**
1378  * Validate Ethernet item.
1379  *
1380  * @param[in] item
1381  *   Item specification.
1382  * @param[in] item_flags
1383  *   Bit-fields that holds the items detected until now.
1384  * @param[out] error
1385  *   Pointer to error structure.
1386  *
1387  * @return
1388  *   0 on success, a negative errno value otherwise and rte_errno is set.
1389  */
1390 int
1391 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1392 			    uint64_t item_flags,
1393 			    struct rte_flow_error *error)
1394 {
1395 	const struct rte_flow_item_eth *mask = item->mask;
1396 	const struct rte_flow_item_eth nic_mask = {
1397 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1398 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1399 		.type = RTE_BE16(0xffff),
1400 	};
1401 	int ret;
1402 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1403 	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
1404 				       MLX5_FLOW_LAYER_OUTER_L2;
1405 
1406 	if (item_flags & ethm)
1407 		return rte_flow_error_set(error, ENOTSUP,
1408 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1409 					  "multiple L2 layers not supported");
1410 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1411 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1412 		return rte_flow_error_set(error, EINVAL,
1413 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1414 					  "L2 layer should not follow "
1415 					  "L3 layers");
1416 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1417 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1418 		return rte_flow_error_set(error, EINVAL,
1419 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1420 					  "L2 layer should not follow VLAN");
1421 	if (!mask)
1422 		mask = &rte_flow_item_eth_mask;
1423 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1424 					(const uint8_t *)&nic_mask,
1425 					sizeof(struct rte_flow_item_eth),
1426 					error);
1427 	return ret;
1428 }
1429 
1430 /**
1431  * Validate VLAN item.
1432  *
1433  * @param[in] item
1434  *   Item specification.
1435  * @param[in] item_flags
1436  *   Bit-fields that holds the items detected until now.
1437  * @param[in] dev
1438  *   Ethernet device flow is being created on.
1439  * @param[out] error
1440  *   Pointer to error structure.
1441  *
1442  * @return
1443  *   0 on success, a negative errno value otherwise and rte_errno is set.
1444  */
1445 int
1446 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1447 			     uint64_t item_flags,
1448 			     struct rte_eth_dev *dev,
1449 			     struct rte_flow_error *error)
1450 {
1451 	const struct rte_flow_item_vlan *spec = item->spec;
1452 	const struct rte_flow_item_vlan *mask = item->mask;
1453 	const struct rte_flow_item_vlan nic_mask = {
1454 		.tci = RTE_BE16(UINT16_MAX),
1455 		.inner_type = RTE_BE16(UINT16_MAX),
1456 	};
1457 	uint16_t vlan_tag = 0;
1458 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1459 	int ret;
1460 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1461 					MLX5_FLOW_LAYER_INNER_L4) :
1462 				       (MLX5_FLOW_LAYER_OUTER_L3 |
1463 					MLX5_FLOW_LAYER_OUTER_L4);
1464 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1465 					MLX5_FLOW_LAYER_OUTER_VLAN;
1466 
1467 	if (item_flags & vlanm)
1468 		return rte_flow_error_set(error, EINVAL,
1469 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1470 					  "multiple VLAN layers not supported");
1471 	else if ((item_flags & l34m) != 0)
1472 		return rte_flow_error_set(error, EINVAL,
1473 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1474 					  "VLAN cannot follow L3/L4 layer");
1475 	if (!mask)
1476 		mask = &rte_flow_item_vlan_mask;
1477 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1478 					(const uint8_t *)&nic_mask,
1479 					sizeof(struct rte_flow_item_vlan),
1480 					error);
1481 	if (ret)
1482 		return ret;
1483 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1484 		struct mlx5_priv *priv = dev->data->dev_private;
1485 
1486 		if (priv->vmwa_context) {
1487 			/*
1488 			 * Non-NULL context means we have a virtual machine
1489 			 * and SR-IOV enabled, we have to create VLAN interface
1490 			 * to make hypervisor to setup E-Switch vport
1491 			 * context correctly. We avoid creating the multiple
1492 			 * VLAN interfaces, so we cannot support VLAN tag mask.
1493 			 */
1494 			return rte_flow_error_set(error, EINVAL,
1495 						  RTE_FLOW_ERROR_TYPE_ITEM,
1496 						  item,
1497 						  "VLAN tag mask is not"
1498 						  " supported in virtual"
1499 						  " environment");
1500 		}
1501 	}
1502 	if (spec) {
1503 		vlan_tag = spec->tci;
1504 		vlan_tag &= mask->tci;
1505 	}
1506 	/*
1507 	 * From verbs perspective an empty VLAN is equivalent
1508 	 * to a packet without VLAN layer.
1509 	 */
1510 	if (!vlan_tag)
1511 		return rte_flow_error_set(error, EINVAL,
1512 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1513 					  item->spec,
1514 					  "VLAN cannot be empty");
1515 	return 0;
1516 }
1517 
1518 /**
1519  * Validate IPV4 item.
1520  *
1521  * @param[in] item
1522  *   Item specification.
1523  * @param[in] item_flags
1524  *   Bit-fields that holds the items detected until now.
1525  * @param[in] acc_mask
1526  *   Acceptable mask, if NULL default internal default mask
1527  *   will be used to check whether item fields are supported.
1528  * @param[out] error
1529  *   Pointer to error structure.
1530  *
1531  * @return
1532  *   0 on success, a negative errno value otherwise and rte_errno is set.
1533  */
1534 int
1535 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1536 			     uint64_t item_flags,
1537 			     uint64_t last_item,
1538 			     uint16_t ether_type,
1539 			     const struct rte_flow_item_ipv4 *acc_mask,
1540 			     struct rte_flow_error *error)
1541 {
1542 	const struct rte_flow_item_ipv4 *mask = item->mask;
1543 	const struct rte_flow_item_ipv4 *spec = item->spec;
1544 	const struct rte_flow_item_ipv4 nic_mask = {
1545 		.hdr = {
1546 			.src_addr = RTE_BE32(0xffffffff),
1547 			.dst_addr = RTE_BE32(0xffffffff),
1548 			.type_of_service = 0xff,
1549 			.next_proto_id = 0xff,
1550 		},
1551 	};
1552 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1553 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1554 				      MLX5_FLOW_LAYER_OUTER_L3;
1555 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1556 				      MLX5_FLOW_LAYER_OUTER_L4;
1557 	int ret;
1558 	uint8_t next_proto = 0xFF;
1559 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1560 				  MLX5_FLOW_LAYER_OUTER_VLAN |
1561 				  MLX5_FLOW_LAYER_INNER_VLAN);
1562 
1563 	if ((last_item & l2_vlan) && ether_type &&
1564 	    ether_type != RTE_ETHER_TYPE_IPV4)
1565 		return rte_flow_error_set(error, EINVAL,
1566 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1567 					  "IPv4 cannot follow L2/VLAN layer "
1568 					  "which ether type is not IPv4");
1569 	if (item_flags & MLX5_FLOW_LAYER_IPIP) {
1570 		if (mask && spec)
1571 			next_proto = mask->hdr.next_proto_id &
1572 				     spec->hdr.next_proto_id;
1573 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1574 			return rte_flow_error_set(error, EINVAL,
1575 						  RTE_FLOW_ERROR_TYPE_ITEM,
1576 						  item,
1577 						  "multiple tunnel "
1578 						  "not supported");
1579 	}
1580 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
1581 		return rte_flow_error_set(error, EINVAL,
1582 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1583 					  "wrong tunnel type - IPv6 specified "
1584 					  "but IPv4 item provided");
1585 	if (item_flags & l3m)
1586 		return rte_flow_error_set(error, ENOTSUP,
1587 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1588 					  "multiple L3 layers not supported");
1589 	else if (item_flags & l4m)
1590 		return rte_flow_error_set(error, EINVAL,
1591 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1592 					  "L3 cannot follow an L4 layer.");
1593 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1594 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1595 		return rte_flow_error_set(error, EINVAL,
1596 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1597 					  "L3 cannot follow an NVGRE layer.");
1598 	if (!mask)
1599 		mask = &rte_flow_item_ipv4_mask;
1600 	else if (mask->hdr.next_proto_id != 0 &&
1601 		 mask->hdr.next_proto_id != 0xff)
1602 		return rte_flow_error_set(error, EINVAL,
1603 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1604 					  "partial mask is not supported"
1605 					  " for protocol");
1606 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1607 					acc_mask ? (const uint8_t *)acc_mask
1608 						 : (const uint8_t *)&nic_mask,
1609 					sizeof(struct rte_flow_item_ipv4),
1610 					error);
1611 	if (ret < 0)
1612 		return ret;
1613 	return 0;
1614 }
1615 
1616 /**
1617  * Validate IPV6 item.
1618  *
1619  * @param[in] item
1620  *   Item specification.
1621  * @param[in] item_flags
1622  *   Bit-fields that holds the items detected until now.
1623  * @param[in] acc_mask
1624  *   Acceptable mask, if NULL default internal default mask
1625  *   will be used to check whether item fields are supported.
1626  * @param[out] error
1627  *   Pointer to error structure.
1628  *
1629  * @return
1630  *   0 on success, a negative errno value otherwise and rte_errno is set.
1631  */
1632 int
1633 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
1634 			     uint64_t item_flags,
1635 			     uint64_t last_item,
1636 			     uint16_t ether_type,
1637 			     const struct rte_flow_item_ipv6 *acc_mask,
1638 			     struct rte_flow_error *error)
1639 {
1640 	const struct rte_flow_item_ipv6 *mask = item->mask;
1641 	const struct rte_flow_item_ipv6 *spec = item->spec;
1642 	const struct rte_flow_item_ipv6 nic_mask = {
1643 		.hdr = {
1644 			.src_addr =
1645 				"\xff\xff\xff\xff\xff\xff\xff\xff"
1646 				"\xff\xff\xff\xff\xff\xff\xff\xff",
1647 			.dst_addr =
1648 				"\xff\xff\xff\xff\xff\xff\xff\xff"
1649 				"\xff\xff\xff\xff\xff\xff\xff\xff",
1650 			.vtc_flow = RTE_BE32(0xffffffff),
1651 			.proto = 0xff,
1652 		},
1653 	};
1654 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1655 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1656 				      MLX5_FLOW_LAYER_OUTER_L3;
1657 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1658 				      MLX5_FLOW_LAYER_OUTER_L4;
1659 	int ret;
1660 	uint8_t next_proto = 0xFF;
1661 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1662 				  MLX5_FLOW_LAYER_OUTER_VLAN |
1663 				  MLX5_FLOW_LAYER_INNER_VLAN);
1664 
1665 	if ((last_item & l2_vlan) && ether_type &&
1666 	    ether_type != RTE_ETHER_TYPE_IPV6)
1667 		return rte_flow_error_set(error, EINVAL,
1668 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1669 					  "IPv6 cannot follow L2/VLAN layer "
1670 					  "which ether type is not IPv6");
1671 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
1672 		if (mask && spec)
1673 			next_proto = mask->hdr.proto & spec->hdr.proto;
1674 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1675 			return rte_flow_error_set(error, EINVAL,
1676 						  RTE_FLOW_ERROR_TYPE_ITEM,
1677 						  item,
1678 						  "multiple tunnel "
1679 						  "not supported");
1680 	}
1681 	if (item_flags & MLX5_FLOW_LAYER_IPIP)
1682 		return rte_flow_error_set(error, EINVAL,
1683 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1684 					  "wrong tunnel type - IPv4 specified "
1685 					  "but IPv6 item provided");
1686 	if (item_flags & l3m)
1687 		return rte_flow_error_set(error, ENOTSUP,
1688 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1689 					  "multiple L3 layers not supported");
1690 	else if (item_flags & l4m)
1691 		return rte_flow_error_set(error, EINVAL,
1692 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1693 					  "L3 cannot follow an L4 layer.");
1694 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1695 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1696 		return rte_flow_error_set(error, EINVAL,
1697 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1698 					  "L3 cannot follow an NVGRE layer.");
1699 	if (!mask)
1700 		mask = &rte_flow_item_ipv6_mask;
1701 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1702 					acc_mask ? (const uint8_t *)acc_mask
1703 						 : (const uint8_t *)&nic_mask,
1704 					sizeof(struct rte_flow_item_ipv6),
1705 					error);
1706 	if (ret < 0)
1707 		return ret;
1708 	return 0;
1709 }
1710 
1711 /**
1712  * Validate UDP item.
1713  *
1714  * @param[in] item
1715  *   Item specification.
1716  * @param[in] item_flags
1717  *   Bit-fields that holds the items detected until now.
1718  * @param[in] target_protocol
1719  *   The next protocol in the previous item.
1720  * @param[in] flow_mask
1721  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
1722  * @param[out] error
1723  *   Pointer to error structure.
1724  *
1725  * @return
1726  *   0 on success, a negative errno value otherwise and rte_errno is set.
1727  */
1728 int
1729 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
1730 			    uint64_t item_flags,
1731 			    uint8_t target_protocol,
1732 			    struct rte_flow_error *error)
1733 {
1734 	const struct rte_flow_item_udp *mask = item->mask;
1735 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1736 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1737 				      MLX5_FLOW_LAYER_OUTER_L3;
1738 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1739 				      MLX5_FLOW_LAYER_OUTER_L4;
1740 	int ret;
1741 
1742 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
1743 		return rte_flow_error_set(error, EINVAL,
1744 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1745 					  "protocol filtering not compatible"
1746 					  " with UDP layer");
1747 	if (!(item_flags & l3m))
1748 		return rte_flow_error_set(error, EINVAL,
1749 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1750 					  "L3 is mandatory to filter on L4");
1751 	if (item_flags & l4m)
1752 		return rte_flow_error_set(error, EINVAL,
1753 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1754 					  "multiple L4 layers not supported");
1755 	if (!mask)
1756 		mask = &rte_flow_item_udp_mask;
1757 	ret = mlx5_flow_item_acceptable
1758 		(item, (const uint8_t *)mask,
1759 		 (const uint8_t *)&rte_flow_item_udp_mask,
1760 		 sizeof(struct rte_flow_item_udp), error);
1761 	if (ret < 0)
1762 		return ret;
1763 	return 0;
1764 }
1765 
1766 /**
1767  * Validate TCP item.
1768  *
1769  * @param[in] item
1770  *   Item specification.
1771  * @param[in] item_flags
1772  *   Bit-fields that holds the items detected until now.
1773  * @param[in] target_protocol
1774  *   The next protocol in the previous item.
1775  * @param[out] error
1776  *   Pointer to error structure.
1777  *
1778  * @return
1779  *   0 on success, a negative errno value otherwise and rte_errno is set.
1780  */
1781 int
1782 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
1783 			    uint64_t item_flags,
1784 			    uint8_t target_protocol,
1785 			    const struct rte_flow_item_tcp *flow_mask,
1786 			    struct rte_flow_error *error)
1787 {
1788 	const struct rte_flow_item_tcp *mask = item->mask;
1789 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1790 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1791 				      MLX5_FLOW_LAYER_OUTER_L3;
1792 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1793 				      MLX5_FLOW_LAYER_OUTER_L4;
1794 	int ret;
1795 
1796 	MLX5_ASSERT(flow_mask);
1797 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
1798 		return rte_flow_error_set(error, EINVAL,
1799 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1800 					  "protocol filtering not compatible"
1801 					  " with TCP layer");
1802 	if (!(item_flags & l3m))
1803 		return rte_flow_error_set(error, EINVAL,
1804 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1805 					  "L3 is mandatory to filter on L4");
1806 	if (item_flags & l4m)
1807 		return rte_flow_error_set(error, EINVAL,
1808 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1809 					  "multiple L4 layers not supported");
1810 	if (!mask)
1811 		mask = &rte_flow_item_tcp_mask;
1812 	ret = mlx5_flow_item_acceptable
1813 		(item, (const uint8_t *)mask,
1814 		 (const uint8_t *)flow_mask,
1815 		 sizeof(struct rte_flow_item_tcp), error);
1816 	if (ret < 0)
1817 		return ret;
1818 	return 0;
1819 }
1820 
1821 /**
1822  * Validate VXLAN item.
1823  *
1824  * @param[in] item
1825  *   Item specification.
1826  * @param[in] item_flags
1827  *   Bit-fields that holds the items detected until now.
1828  * @param[in] target_protocol
1829  *   The next protocol in the previous item.
1830  * @param[out] error
1831  *   Pointer to error structure.
1832  *
1833  * @return
1834  *   0 on success, a negative errno value otherwise and rte_errno is set.
1835  */
1836 int
1837 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
1838 			      uint64_t item_flags,
1839 			      struct rte_flow_error *error)
1840 {
1841 	const struct rte_flow_item_vxlan *spec = item->spec;
1842 	const struct rte_flow_item_vxlan *mask = item->mask;
1843 	int ret;
1844 	union vni {
1845 		uint32_t vlan_id;
1846 		uint8_t vni[4];
1847 	} id = { .vlan_id = 0, };
1848 
1849 
1850 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1851 		return rte_flow_error_set(error, ENOTSUP,
1852 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1853 					  "multiple tunnel layers not"
1854 					  " supported");
1855 	/*
1856 	 * Verify only UDPv4 is present as defined in
1857 	 * https://tools.ietf.org/html/rfc7348
1858 	 */
1859 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1860 		return rte_flow_error_set(error, EINVAL,
1861 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1862 					  "no outer UDP layer found");
1863 	if (!mask)
1864 		mask = &rte_flow_item_vxlan_mask;
1865 	ret = mlx5_flow_item_acceptable
1866 		(item, (const uint8_t *)mask,
1867 		 (const uint8_t *)&rte_flow_item_vxlan_mask,
1868 		 sizeof(struct rte_flow_item_vxlan),
1869 		 error);
1870 	if (ret < 0)
1871 		return ret;
1872 	if (spec) {
1873 		memcpy(&id.vni[1], spec->vni, 3);
1874 		memcpy(&id.vni[1], mask->vni, 3);
1875 	}
1876 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1877 		return rte_flow_error_set(error, ENOTSUP,
1878 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1879 					  "VXLAN tunnel must be fully defined");
1880 	return 0;
1881 }
1882 
1883 /**
1884  * Validate VXLAN_GPE item.
1885  *
1886  * @param[in] item
1887  *   Item specification.
1888  * @param[in] item_flags
1889  *   Bit-fields that holds the items detected until now.
1890  * @param[in] priv
1891  *   Pointer to the private data structure.
1892  * @param[in] target_protocol
1893  *   The next protocol in the previous item.
1894  * @param[out] error
1895  *   Pointer to error structure.
1896  *
1897  * @return
1898  *   0 on success, a negative errno value otherwise and rte_errno is set.
1899  */
1900 int
1901 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
1902 				  uint64_t item_flags,
1903 				  struct rte_eth_dev *dev,
1904 				  struct rte_flow_error *error)
1905 {
1906 	struct mlx5_priv *priv = dev->data->dev_private;
1907 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1908 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1909 	int ret;
1910 	union vni {
1911 		uint32_t vlan_id;
1912 		uint8_t vni[4];
1913 	} id = { .vlan_id = 0, };
1914 
1915 	if (!priv->config.l3_vxlan_en)
1916 		return rte_flow_error_set(error, ENOTSUP,
1917 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1918 					  "L3 VXLAN is not enabled by device"
1919 					  " parameter and/or not configured in"
1920 					  " firmware");
1921 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1922 		return rte_flow_error_set(error, ENOTSUP,
1923 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1924 					  "multiple tunnel layers not"
1925 					  " supported");
1926 	/*
1927 	 * Verify only UDPv4 is present as defined in
1928 	 * https://tools.ietf.org/html/rfc7348
1929 	 */
1930 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1931 		return rte_flow_error_set(error, EINVAL,
1932 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1933 					  "no outer UDP layer found");
1934 	if (!mask)
1935 		mask = &rte_flow_item_vxlan_gpe_mask;
1936 	ret = mlx5_flow_item_acceptable
1937 		(item, (const uint8_t *)mask,
1938 		 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
1939 		 sizeof(struct rte_flow_item_vxlan_gpe),
1940 		 error);
1941 	if (ret < 0)
1942 		return ret;
1943 	if (spec) {
1944 		if (spec->protocol)
1945 			return rte_flow_error_set(error, ENOTSUP,
1946 						  RTE_FLOW_ERROR_TYPE_ITEM,
1947 						  item,
1948 						  "VxLAN-GPE protocol"
1949 						  " not supported");
1950 		memcpy(&id.vni[1], spec->vni, 3);
1951 		memcpy(&id.vni[1], mask->vni, 3);
1952 	}
1953 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1954 		return rte_flow_error_set(error, ENOTSUP,
1955 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1956 					  "VXLAN-GPE tunnel must be fully"
1957 					  " defined");
1958 	return 0;
1959 }
1960 /**
1961  * Validate GRE Key item.
1962  *
1963  * @param[in] item
1964  *   Item specification.
1965  * @param[in] item_flags
1966  *   Bit flags to mark detected items.
1967  * @param[in] gre_item
1968  *   Pointer to gre_item
1969  * @param[out] error
1970  *   Pointer to error structure.
1971  *
1972  * @return
1973  *   0 on success, a negative errno value otherwise and rte_errno is set.
1974  */
1975 int
1976 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
1977 				uint64_t item_flags,
1978 				const struct rte_flow_item *gre_item,
1979 				struct rte_flow_error *error)
1980 {
1981 	const rte_be32_t *mask = item->mask;
1982 	int ret = 0;
1983 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
1984 	const struct rte_flow_item_gre *gre_spec;
1985 	const struct rte_flow_item_gre *gre_mask;
1986 
1987 	if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
1988 		return rte_flow_error_set(error, ENOTSUP,
1989 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1990 					  "Multiple GRE key not support");
1991 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
1992 		return rte_flow_error_set(error, ENOTSUP,
1993 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1994 					  "No preceding GRE header");
1995 	if (item_flags & MLX5_FLOW_LAYER_INNER)
1996 		return rte_flow_error_set(error, ENOTSUP,
1997 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1998 					  "GRE key following a wrong item");
1999 	gre_mask = gre_item->mask;
2000 	if (!gre_mask)
2001 		gre_mask = &rte_flow_item_gre_mask;
2002 	gre_spec = gre_item->spec;
2003 	if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2004 			 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2005 		return rte_flow_error_set(error, EINVAL,
2006 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2007 					  "Key bit must be on");
2008 
2009 	if (!mask)
2010 		mask = &gre_key_default_mask;
2011 	ret = mlx5_flow_item_acceptable
2012 		(item, (const uint8_t *)mask,
2013 		 (const uint8_t *)&gre_key_default_mask,
2014 		 sizeof(rte_be32_t), error);
2015 	return ret;
2016 }
2017 
2018 /**
2019  * Validate GRE item.
2020  *
2021  * @param[in] item
2022  *   Item specification.
2023  * @param[in] item_flags
2024  *   Bit flags to mark detected items.
2025  * @param[in] target_protocol
2026  *   The next protocol in the previous item.
2027  * @param[out] error
2028  *   Pointer to error structure.
2029  *
2030  * @return
2031  *   0 on success, a negative errno value otherwise and rte_errno is set.
2032  */
2033 int
2034 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2035 			    uint64_t item_flags,
2036 			    uint8_t target_protocol,
2037 			    struct rte_flow_error *error)
2038 {
2039 	const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2040 	const struct rte_flow_item_gre *mask = item->mask;
2041 	int ret;
2042 	const struct rte_flow_item_gre nic_mask = {
2043 		.c_rsvd0_ver = RTE_BE16(0xB000),
2044 		.protocol = RTE_BE16(UINT16_MAX),
2045 	};
2046 
2047 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2048 		return rte_flow_error_set(error, EINVAL,
2049 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2050 					  "protocol filtering not compatible"
2051 					  " with this GRE layer");
2052 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2053 		return rte_flow_error_set(error, ENOTSUP,
2054 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2055 					  "multiple tunnel layers not"
2056 					  " supported");
2057 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2058 		return rte_flow_error_set(error, ENOTSUP,
2059 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2060 					  "L3 Layer is missing");
2061 	if (!mask)
2062 		mask = &rte_flow_item_gre_mask;
2063 	ret = mlx5_flow_item_acceptable
2064 		(item, (const uint8_t *)mask,
2065 		 (const uint8_t *)&nic_mask,
2066 		 sizeof(struct rte_flow_item_gre), error);
2067 	if (ret < 0)
2068 		return ret;
2069 #ifndef HAVE_MLX5DV_DR
2070 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2071 	if (spec && (spec->protocol & mask->protocol))
2072 		return rte_flow_error_set(error, ENOTSUP,
2073 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2074 					  "without MPLS support the"
2075 					  " specification cannot be used for"
2076 					  " filtering");
2077 #endif
2078 #endif
2079 	return 0;
2080 }
2081 
2082 /**
2083  * Validate Geneve item.
2084  *
2085  * @param[in] item
2086  *   Item specification.
2087  * @param[in] itemFlags
2088  *   Bit-fields that holds the items detected until now.
2089  * @param[in] enPriv
2090  *   Pointer to the private data structure.
2091  * @param[out] error
2092  *   Pointer to error structure.
2093  *
2094  * @return
2095  *   0 on success, a negative errno value otherwise and rte_errno is set.
2096  */
2097 
2098 int
2099 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2100 			       uint64_t item_flags,
2101 			       struct rte_eth_dev *dev,
2102 			       struct rte_flow_error *error)
2103 {
2104 	struct mlx5_priv *priv = dev->data->dev_private;
2105 	const struct rte_flow_item_geneve *spec = item->spec;
2106 	const struct rte_flow_item_geneve *mask = item->mask;
2107 	int ret;
2108 	uint16_t gbhdr;
2109 	uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2110 			  MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2111 	const struct rte_flow_item_geneve nic_mask = {
2112 		.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2113 		.vni = "\xff\xff\xff",
2114 		.protocol = RTE_BE16(UINT16_MAX),
2115 	};
2116 
2117 	if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2118 		return rte_flow_error_set(error, ENOTSUP,
2119 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2120 					  "L3 Geneve is not enabled by device"
2121 					  " parameter and/or not configured in"
2122 					  " firmware");
2123 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2124 		return rte_flow_error_set(error, ENOTSUP,
2125 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2126 					  "multiple tunnel layers not"
2127 					  " supported");
2128 	/*
2129 	 * Verify only UDPv4 is present as defined in
2130 	 * https://tools.ietf.org/html/rfc7348
2131 	 */
2132 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2133 		return rte_flow_error_set(error, EINVAL,
2134 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2135 					  "no outer UDP layer found");
2136 	if (!mask)
2137 		mask = &rte_flow_item_geneve_mask;
2138 	ret = mlx5_flow_item_acceptable
2139 				  (item, (const uint8_t *)mask,
2140 				   (const uint8_t *)&nic_mask,
2141 				   sizeof(struct rte_flow_item_geneve), error);
2142 	if (ret)
2143 		return ret;
2144 	if (spec) {
2145 		gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2146 		if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2147 		     MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2148 		     MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2149 			return rte_flow_error_set(error, ENOTSUP,
2150 						  RTE_FLOW_ERROR_TYPE_ITEM,
2151 						  item,
2152 						  "Geneve protocol unsupported"
2153 						  " fields are being used");
2154 		if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2155 			return rte_flow_error_set
2156 					(error, ENOTSUP,
2157 					 RTE_FLOW_ERROR_TYPE_ITEM,
2158 					 item,
2159 					 "Unsupported Geneve options length");
2160 	}
2161 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2162 		return rte_flow_error_set
2163 				    (error, ENOTSUP,
2164 				     RTE_FLOW_ERROR_TYPE_ITEM, item,
2165 				     "Geneve tunnel must be fully defined");
2166 	return 0;
2167 }
2168 
2169 /**
2170  * Validate MPLS item.
2171  *
2172  * @param[in] dev
2173  *   Pointer to the rte_eth_dev structure.
2174  * @param[in] item
2175  *   Item specification.
2176  * @param[in] item_flags
2177  *   Bit-fields that holds the items detected until now.
2178  * @param[in] prev_layer
2179  *   The protocol layer indicated in previous item.
2180  * @param[out] error
2181  *   Pointer to error structure.
2182  *
2183  * @return
2184  *   0 on success, a negative errno value otherwise and rte_errno is set.
2185  */
2186 int
2187 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2188 			     const struct rte_flow_item *item __rte_unused,
2189 			     uint64_t item_flags __rte_unused,
2190 			     uint64_t prev_layer __rte_unused,
2191 			     struct rte_flow_error *error)
2192 {
2193 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2194 	const struct rte_flow_item_mpls *mask = item->mask;
2195 	struct mlx5_priv *priv = dev->data->dev_private;
2196 	int ret;
2197 
2198 	if (!priv->config.mpls_en)
2199 		return rte_flow_error_set(error, ENOTSUP,
2200 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2201 					  "MPLS not supported or"
2202 					  " disabled in firmware"
2203 					  " configuration.");
2204 	/* MPLS over IP, UDP, GRE is allowed */
2205 	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2206 			    MLX5_FLOW_LAYER_OUTER_L4_UDP |
2207 			    MLX5_FLOW_LAYER_GRE)))
2208 		return rte_flow_error_set(error, EINVAL,
2209 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2210 					  "protocol filtering not compatible"
2211 					  " with MPLS layer");
2212 	/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2213 	if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2214 	    !(item_flags & MLX5_FLOW_LAYER_GRE))
2215 		return rte_flow_error_set(error, ENOTSUP,
2216 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2217 					  "multiple tunnel layers not"
2218 					  " supported");
2219 	if (!mask)
2220 		mask = &rte_flow_item_mpls_mask;
2221 	ret = mlx5_flow_item_acceptable
2222 		(item, (const uint8_t *)mask,
2223 		 (const uint8_t *)&rte_flow_item_mpls_mask,
2224 		 sizeof(struct rte_flow_item_mpls), error);
2225 	if (ret < 0)
2226 		return ret;
2227 	return 0;
2228 #endif
2229 	return rte_flow_error_set(error, ENOTSUP,
2230 				  RTE_FLOW_ERROR_TYPE_ITEM, item,
2231 				  "MPLS is not supported by Verbs, please"
2232 				  " update.");
2233 }
2234 
2235 /**
2236  * Validate NVGRE item.
2237  *
2238  * @param[in] item
2239  *   Item specification.
2240  * @param[in] item_flags
2241  *   Bit flags to mark detected items.
2242  * @param[in] target_protocol
2243  *   The next protocol in the previous item.
2244  * @param[out] error
2245  *   Pointer to error structure.
2246  *
2247  * @return
2248  *   0 on success, a negative errno value otherwise and rte_errno is set.
2249  */
2250 int
2251 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2252 			      uint64_t item_flags,
2253 			      uint8_t target_protocol,
2254 			      struct rte_flow_error *error)
2255 {
2256 	const struct rte_flow_item_nvgre *mask = item->mask;
2257 	int ret;
2258 
2259 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2260 		return rte_flow_error_set(error, EINVAL,
2261 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2262 					  "protocol filtering not compatible"
2263 					  " with this GRE layer");
2264 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2265 		return rte_flow_error_set(error, ENOTSUP,
2266 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2267 					  "multiple tunnel layers not"
2268 					  " supported");
2269 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2270 		return rte_flow_error_set(error, ENOTSUP,
2271 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2272 					  "L3 Layer is missing");
2273 	if (!mask)
2274 		mask = &rte_flow_item_nvgre_mask;
2275 	ret = mlx5_flow_item_acceptable
2276 		(item, (const uint8_t *)mask,
2277 		 (const uint8_t *)&rte_flow_item_nvgre_mask,
2278 		 sizeof(struct rte_flow_item_nvgre), error);
2279 	if (ret < 0)
2280 		return ret;
2281 	return 0;
2282 }
2283 
2284 /* Allocate unique ID for the split Q/RSS subflows. */
2285 static uint32_t
2286 flow_qrss_get_id(struct rte_eth_dev *dev)
2287 {
2288 	struct mlx5_priv *priv = dev->data->dev_private;
2289 	uint32_t qrss_id, ret;
2290 
2291 	ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
2292 	if (ret)
2293 		return 0;
2294 	MLX5_ASSERT(qrss_id);
2295 	return qrss_id;
2296 }
2297 
2298 /* Free unique ID for the split Q/RSS subflows. */
2299 static void
2300 flow_qrss_free_id(struct rte_eth_dev *dev,  uint32_t qrss_id)
2301 {
2302 	struct mlx5_priv *priv = dev->data->dev_private;
2303 
2304 	if (qrss_id)
2305 		mlx5_flow_id_release(priv->qrss_id_pool, qrss_id);
2306 }
2307 
2308 /**
2309  * Release resource related QUEUE/RSS action split.
2310  *
2311  * @param dev
2312  *   Pointer to Ethernet device.
2313  * @param flow
2314  *   Flow to release id's from.
2315  */
2316 static void
2317 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2318 			     struct rte_flow *flow)
2319 {
2320 	struct mlx5_priv *priv = dev->data->dev_private;
2321 	uint32_t handle_idx;
2322 	struct mlx5_flow_handle *dev_handle;
2323 
2324 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2325 		       handle_idx, dev_handle, next)
2326 		if (dev_handle->split_flow_id)
2327 			flow_qrss_free_id(dev, dev_handle->split_flow_id);
2328 }
2329 
2330 static int
2331 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2332 		   const struct rte_flow_attr *attr __rte_unused,
2333 		   const struct rte_flow_item items[] __rte_unused,
2334 		   const struct rte_flow_action actions[] __rte_unused,
2335 		   bool external __rte_unused,
2336 		   struct rte_flow_error *error)
2337 {
2338 	return rte_flow_error_set(error, ENOTSUP,
2339 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2340 }
2341 
2342 static struct mlx5_flow *
2343 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
2344 		  const struct rte_flow_attr *attr __rte_unused,
2345 		  const struct rte_flow_item items[] __rte_unused,
2346 		  const struct rte_flow_action actions[] __rte_unused,
2347 		  struct rte_flow_error *error)
2348 {
2349 	rte_flow_error_set(error, ENOTSUP,
2350 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2351 	return NULL;
2352 }
2353 
2354 static int
2355 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2356 		    struct mlx5_flow *dev_flow __rte_unused,
2357 		    const struct rte_flow_attr *attr __rte_unused,
2358 		    const struct rte_flow_item items[] __rte_unused,
2359 		    const struct rte_flow_action actions[] __rte_unused,
2360 		    struct rte_flow_error *error)
2361 {
2362 	return rte_flow_error_set(error, ENOTSUP,
2363 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2364 }
2365 
2366 static int
2367 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
2368 		struct rte_flow *flow __rte_unused,
2369 		struct rte_flow_error *error)
2370 {
2371 	return rte_flow_error_set(error, ENOTSUP,
2372 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2373 }
2374 
2375 static void
2376 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
2377 		 struct rte_flow *flow __rte_unused)
2378 {
2379 }
2380 
2381 static void
2382 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
2383 		  struct rte_flow *flow __rte_unused)
2384 {
2385 }
2386 
2387 static int
2388 flow_null_query(struct rte_eth_dev *dev __rte_unused,
2389 		struct rte_flow *flow __rte_unused,
2390 		const struct rte_flow_action *actions __rte_unused,
2391 		void *data __rte_unused,
2392 		struct rte_flow_error *error)
2393 {
2394 	return rte_flow_error_set(error, ENOTSUP,
2395 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2396 }
2397 
2398 /* Void driver to protect from null pointer reference. */
2399 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
2400 	.validate = flow_null_validate,
2401 	.prepare = flow_null_prepare,
2402 	.translate = flow_null_translate,
2403 	.apply = flow_null_apply,
2404 	.remove = flow_null_remove,
2405 	.destroy = flow_null_destroy,
2406 	.query = flow_null_query,
2407 };
2408 
2409 /**
2410  * Select flow driver type according to flow attributes and device
2411  * configuration.
2412  *
2413  * @param[in] dev
2414  *   Pointer to the dev structure.
2415  * @param[in] attr
2416  *   Pointer to the flow attributes.
2417  *
2418  * @return
2419  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
2420  */
2421 static enum mlx5_flow_drv_type
2422 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
2423 {
2424 	struct mlx5_priv *priv = dev->data->dev_private;
2425 	enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
2426 
2427 	if (attr->transfer && priv->config.dv_esw_en)
2428 		type = MLX5_FLOW_TYPE_DV;
2429 	if (!attr->transfer)
2430 		type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
2431 						 MLX5_FLOW_TYPE_VERBS;
2432 	return type;
2433 }
2434 
2435 #define flow_get_drv_ops(type) flow_drv_ops[type]
2436 
2437 /**
2438  * Flow driver validation API. This abstracts calling driver specific functions.
2439  * The type of flow driver is determined according to flow attributes.
2440  *
2441  * @param[in] dev
2442  *   Pointer to the dev structure.
2443  * @param[in] attr
2444  *   Pointer to the flow attributes.
2445  * @param[in] items
2446  *   Pointer to the list of items.
2447  * @param[in] actions
2448  *   Pointer to the list of actions.
2449  * @param[in] external
2450  *   This flow rule is created by request external to PMD.
2451  * @param[out] error
2452  *   Pointer to the error structure.
2453  *
2454  * @return
2455  *   0 on success, a negative errno value otherwise and rte_errno is set.
2456  */
2457 static inline int
2458 flow_drv_validate(struct rte_eth_dev *dev,
2459 		  const struct rte_flow_attr *attr,
2460 		  const struct rte_flow_item items[],
2461 		  const struct rte_flow_action actions[],
2462 		  bool external, struct rte_flow_error *error)
2463 {
2464 	const struct mlx5_flow_driver_ops *fops;
2465 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
2466 
2467 	fops = flow_get_drv_ops(type);
2468 	return fops->validate(dev, attr, items, actions, external, error);
2469 }
2470 
2471 /**
2472  * Flow driver preparation API. This abstracts calling driver specific
2473  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2474  * calculates the size of memory required for device flow, allocates the memory,
2475  * initializes the device flow and returns the pointer.
2476  *
2477  * @note
2478  *   This function initializes device flow structure such as dv or verbs in
2479  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
2480  *   rest. For example, adding returning device flow to flow->dev_flow list and
2481  *   setting backward reference to the flow should be done out of this function.
2482  *   layers field is not filled either.
2483  *
2484  * @param[in] dev
2485  *   Pointer to the dev structure.
2486  * @param[in] attr
2487  *   Pointer to the flow attributes.
2488  * @param[in] items
2489  *   Pointer to the list of items.
2490  * @param[in] actions
2491  *   Pointer to the list of actions.
2492  * @param[out] error
2493  *   Pointer to the error structure.
2494  *
2495  * @return
2496  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
2497  */
2498 static inline struct mlx5_flow *
2499 flow_drv_prepare(struct rte_eth_dev *dev,
2500 		 const struct rte_flow *flow,
2501 		 const struct rte_flow_attr *attr,
2502 		 const struct rte_flow_item items[],
2503 		 const struct rte_flow_action actions[],
2504 		 struct rte_flow_error *error)
2505 {
2506 	const struct mlx5_flow_driver_ops *fops;
2507 	enum mlx5_flow_drv_type type = flow->drv_type;
2508 
2509 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2510 	fops = flow_get_drv_ops(type);
2511 	return fops->prepare(dev, attr, items, actions, error);
2512 }
2513 
2514 /**
2515  * Flow driver translation API. This abstracts calling driver specific
2516  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2517  * translates a generic flow into a driver flow. flow_drv_prepare() must
2518  * precede.
2519  *
2520  * @note
2521  *   dev_flow->layers could be filled as a result of parsing during translation
2522  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
2523  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
2524  *   flow->actions could be overwritten even though all the expanded dev_flows
2525  *   have the same actions.
2526  *
2527  * @param[in] dev
2528  *   Pointer to the rte dev structure.
2529  * @param[in, out] dev_flow
2530  *   Pointer to the mlx5 flow.
2531  * @param[in] attr
2532  *   Pointer to the flow attributes.
2533  * @param[in] items
2534  *   Pointer to the list of items.
2535  * @param[in] actions
2536  *   Pointer to the list of actions.
2537  * @param[out] error
2538  *   Pointer to the error structure.
2539  *
2540  * @return
2541  *   0 on success, a negative errno value otherwise and rte_errno is set.
2542  */
2543 static inline int
2544 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
2545 		   const struct rte_flow_attr *attr,
2546 		   const struct rte_flow_item items[],
2547 		   const struct rte_flow_action actions[],
2548 		   struct rte_flow_error *error)
2549 {
2550 	const struct mlx5_flow_driver_ops *fops;
2551 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
2552 
2553 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2554 	fops = flow_get_drv_ops(type);
2555 	return fops->translate(dev, dev_flow, attr, items, actions, error);
2556 }
2557 
2558 /**
2559  * Flow driver apply API. This abstracts calling driver specific functions.
2560  * Parent flow (rte_flow) should have driver type (drv_type). It applies
2561  * translated driver flows on to device. flow_drv_translate() must precede.
2562  *
2563  * @param[in] dev
2564  *   Pointer to Ethernet device structure.
2565  * @param[in, out] flow
2566  *   Pointer to flow structure.
2567  * @param[out] error
2568  *   Pointer to error structure.
2569  *
2570  * @return
2571  *   0 on success, a negative errno value otherwise and rte_errno is set.
2572  */
2573 static inline int
2574 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2575 	       struct rte_flow_error *error)
2576 {
2577 	const struct mlx5_flow_driver_ops *fops;
2578 	enum mlx5_flow_drv_type type = flow->drv_type;
2579 
2580 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2581 	fops = flow_get_drv_ops(type);
2582 	return fops->apply(dev, flow, error);
2583 }
2584 
2585 /**
2586  * Flow driver remove API. This abstracts calling driver specific functions.
2587  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2588  * on device. All the resources of the flow should be freed by calling
2589  * flow_drv_destroy().
2590  *
2591  * @param[in] dev
2592  *   Pointer to Ethernet device.
2593  * @param[in, out] flow
2594  *   Pointer to flow structure.
2595  */
2596 static inline void
2597 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2598 {
2599 	const struct mlx5_flow_driver_ops *fops;
2600 	enum mlx5_flow_drv_type type = flow->drv_type;
2601 
2602 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2603 	fops = flow_get_drv_ops(type);
2604 	fops->remove(dev, flow);
2605 }
2606 
2607 /**
2608  * Flow driver destroy API. This abstracts calling driver specific functions.
2609  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2610  * on device and releases resources of the flow.
2611  *
2612  * @param[in] dev
2613  *   Pointer to Ethernet device.
2614  * @param[in, out] flow
2615  *   Pointer to flow structure.
2616  */
2617 static inline void
2618 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2619 {
2620 	const struct mlx5_flow_driver_ops *fops;
2621 	enum mlx5_flow_drv_type type = flow->drv_type;
2622 
2623 	flow_mreg_split_qrss_release(dev, flow);
2624 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2625 	fops = flow_get_drv_ops(type);
2626 	fops->destroy(dev, flow);
2627 }
2628 
2629 /**
2630  * Validate a flow supported by the NIC.
2631  *
2632  * @see rte_flow_validate()
2633  * @see rte_flow_ops
2634  */
2635 int
2636 mlx5_flow_validate(struct rte_eth_dev *dev,
2637 		   const struct rte_flow_attr *attr,
2638 		   const struct rte_flow_item items[],
2639 		   const struct rte_flow_action actions[],
2640 		   struct rte_flow_error *error)
2641 {
2642 	int ret;
2643 
2644 	ret = flow_drv_validate(dev, attr, items, actions, true, error);
2645 	if (ret < 0)
2646 		return ret;
2647 	return 0;
2648 }
2649 
2650 /**
2651  * Get RSS action from the action list.
2652  *
2653  * @param[in] actions
2654  *   Pointer to the list of actions.
2655  *
2656  * @return
2657  *   Pointer to the RSS action if exist, else return NULL.
2658  */
2659 static const struct rte_flow_action_rss*
2660 flow_get_rss_action(const struct rte_flow_action actions[])
2661 {
2662 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2663 		switch (actions->type) {
2664 		case RTE_FLOW_ACTION_TYPE_RSS:
2665 			return (const struct rte_flow_action_rss *)
2666 			       actions->conf;
2667 		default:
2668 			break;
2669 		}
2670 	}
2671 	return NULL;
2672 }
2673 
2674 static unsigned int
2675 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
2676 {
2677 	const struct rte_flow_item *item;
2678 	unsigned int has_vlan = 0;
2679 
2680 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2681 		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2682 			has_vlan = 1;
2683 			break;
2684 		}
2685 	}
2686 	if (has_vlan)
2687 		return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
2688 				       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
2689 	return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2690 			       MLX5_EXPANSION_ROOT_OUTER;
2691 }
2692 
2693 /**
2694  *  Get layer flags from the prefix flow.
2695  *
2696  *  Some flows may be split to several subflows, the prefix subflow gets the
2697  *  match items and the suffix sub flow gets the actions.
2698  *  Some actions need the user defined match item flags to get the detail for
2699  *  the action.
2700  *  This function helps the suffix flow to get the item layer flags from prefix
2701  *  subflow.
2702  *
2703  * @param[in] dev_flow
2704  *   Pointer the created preifx subflow.
2705  *
2706  * @return
2707  *   The layers get from prefix subflow.
2708  */
2709 static inline uint64_t
2710 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
2711 {
2712 	uint64_t layers = 0;
2713 
2714 	/*
2715 	 * Layers bits could be localization, but usually the compiler will
2716 	 * help to do the optimization work for source code.
2717 	 * If no decap actions, use the layers directly.
2718 	 */
2719 	if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
2720 		return dev_flow->handle->layers;
2721 	/* Convert L3 layers with decap action. */
2722 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2723 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2724 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2725 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2726 	/* Convert L4 layers with decap action.  */
2727 	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
2728 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
2729 	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
2730 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
2731 	return layers;
2732 }
2733 
2734 /**
2735  * Get metadata split action information.
2736  *
2737  * @param[in] actions
2738  *   Pointer to the list of actions.
2739  * @param[out] qrss
2740  *   Pointer to the return pointer.
2741  * @param[out] qrss_type
2742  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
2743  *   if no QUEUE/RSS is found.
2744  * @param[out] encap_idx
2745  *   Pointer to the index of the encap action if exists, otherwise the last
2746  *   action index.
2747  *
2748  * @return
2749  *   Total number of actions.
2750  */
2751 static int
2752 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
2753 				       const struct rte_flow_action **qrss,
2754 				       int *encap_idx)
2755 {
2756 	const struct rte_flow_action_raw_encap *raw_encap;
2757 	int actions_n = 0;
2758 	int raw_decap_idx = -1;
2759 
2760 	*encap_idx = -1;
2761 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2762 		switch (actions->type) {
2763 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2764 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2765 			*encap_idx = actions_n;
2766 			break;
2767 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2768 			raw_decap_idx = actions_n;
2769 			break;
2770 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2771 			raw_encap = actions->conf;
2772 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2773 				*encap_idx = raw_decap_idx != -1 ?
2774 						      raw_decap_idx : actions_n;
2775 			break;
2776 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2777 		case RTE_FLOW_ACTION_TYPE_RSS:
2778 			*qrss = actions;
2779 			break;
2780 		default:
2781 			break;
2782 		}
2783 		actions_n++;
2784 	}
2785 	if (*encap_idx == -1)
2786 		*encap_idx = actions_n;
2787 	/* Count RTE_FLOW_ACTION_TYPE_END. */
2788 	return actions_n + 1;
2789 }
2790 
2791 /**
2792  * Check meter action from the action list.
2793  *
2794  * @param[in] actions
2795  *   Pointer to the list of actions.
2796  * @param[out] mtr
2797  *   Pointer to the meter exist flag.
2798  *
2799  * @return
2800  *   Total number of actions.
2801  */
2802 static int
2803 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
2804 {
2805 	int actions_n = 0;
2806 
2807 	MLX5_ASSERT(mtr);
2808 	*mtr = 0;
2809 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2810 		switch (actions->type) {
2811 		case RTE_FLOW_ACTION_TYPE_METER:
2812 			*mtr = 1;
2813 			break;
2814 		default:
2815 			break;
2816 		}
2817 		actions_n++;
2818 	}
2819 	/* Count RTE_FLOW_ACTION_TYPE_END. */
2820 	return actions_n + 1;
2821 }
2822 
2823 /**
2824  * Check if the flow should be splited due to hairpin.
2825  * The reason for the split is that in current HW we can't
2826  * support encap on Rx, so if a flow have encap we move it
2827  * to Tx.
2828  *
2829  * @param dev
2830  *   Pointer to Ethernet device.
2831  * @param[in] attr
2832  *   Flow rule attributes.
2833  * @param[in] actions
2834  *   Associated actions (list terminated by the END action).
2835  *
2836  * @return
2837  *   > 0 the number of actions and the flow should be split,
2838  *   0 when no split required.
2839  */
2840 static int
2841 flow_check_hairpin_split(struct rte_eth_dev *dev,
2842 			 const struct rte_flow_attr *attr,
2843 			 const struct rte_flow_action actions[])
2844 {
2845 	int queue_action = 0;
2846 	int action_n = 0;
2847 	int encap = 0;
2848 	const struct rte_flow_action_queue *queue;
2849 	const struct rte_flow_action_rss *rss;
2850 	const struct rte_flow_action_raw_encap *raw_encap;
2851 
2852 	if (!attr->ingress)
2853 		return 0;
2854 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2855 		switch (actions->type) {
2856 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2857 			queue = actions->conf;
2858 			if (queue == NULL)
2859 				return 0;
2860 			if (mlx5_rxq_get_type(dev, queue->index) !=
2861 			    MLX5_RXQ_TYPE_HAIRPIN)
2862 				return 0;
2863 			queue_action = 1;
2864 			action_n++;
2865 			break;
2866 		case RTE_FLOW_ACTION_TYPE_RSS:
2867 			rss = actions->conf;
2868 			if (rss == NULL || rss->queue_num == 0)
2869 				return 0;
2870 			if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
2871 			    MLX5_RXQ_TYPE_HAIRPIN)
2872 				return 0;
2873 			queue_action = 1;
2874 			action_n++;
2875 			break;
2876 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2877 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2878 			encap = 1;
2879 			action_n++;
2880 			break;
2881 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2882 			raw_encap = actions->conf;
2883 			if (raw_encap->size >
2884 			    (sizeof(struct rte_flow_item_eth) +
2885 			     sizeof(struct rte_flow_item_ipv4)))
2886 				encap = 1;
2887 			action_n++;
2888 			break;
2889 		default:
2890 			action_n++;
2891 			break;
2892 		}
2893 	}
2894 	if (encap == 1 && queue_action)
2895 		return action_n;
2896 	return 0;
2897 }
2898 
2899 /* Declare flow create/destroy prototype in advance. */
2900 static struct rte_flow *
2901 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
2902 		 const struct rte_flow_attr *attr,
2903 		 const struct rte_flow_item items[],
2904 		 const struct rte_flow_action actions[],
2905 		 bool external, struct rte_flow_error *error);
2906 
2907 static void
2908 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
2909 		  struct rte_flow *flow);
2910 
2911 /**
2912  * Add a flow of copying flow metadata registers in RX_CP_TBL.
2913  *
2914  * As mark_id is unique, if there's already a registered flow for the mark_id,
2915  * return by increasing the reference counter of the resource. Otherwise, create
2916  * the resource (mcp_res) and flow.
2917  *
2918  * Flow looks like,
2919  *   - If ingress port is ANY and reg_c[1] is mark_id,
2920  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
2921  *
2922  * For default flow (zero mark_id), flow is like,
2923  *   - If ingress port is ANY,
2924  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
2925  *
2926  * @param dev
2927  *   Pointer to Ethernet device.
2928  * @param mark_id
2929  *   ID of MARK action, zero means default flow for META.
2930  * @param[out] error
2931  *   Perform verbose error reporting if not NULL.
2932  *
2933  * @return
2934  *   Associated resource on success, NULL otherwise and rte_errno is set.
2935  */
2936 static struct mlx5_flow_mreg_copy_resource *
2937 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
2938 			  struct rte_flow_error *error)
2939 {
2940 	struct mlx5_priv *priv = dev->data->dev_private;
2941 	struct rte_flow_attr attr = {
2942 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
2943 		.ingress = 1,
2944 	};
2945 	struct mlx5_rte_flow_item_tag tag_spec = {
2946 		.data = mark_id,
2947 	};
2948 	struct rte_flow_item items[] = {
2949 		[1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
2950 	};
2951 	struct rte_flow_action_mark ftag = {
2952 		.id = mark_id,
2953 	};
2954 	struct mlx5_flow_action_copy_mreg cp_mreg = {
2955 		.dst = REG_B,
2956 		.src = 0,
2957 	};
2958 	struct rte_flow_action_jump jump = {
2959 		.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
2960 	};
2961 	struct rte_flow_action actions[] = {
2962 		[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
2963 	};
2964 	struct mlx5_flow_mreg_copy_resource *mcp_res;
2965 	uint32_t idx = 0;
2966 	int ret;
2967 
2968 	/* Fill the register fileds in the flow. */
2969 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2970 	if (ret < 0)
2971 		return NULL;
2972 	tag_spec.id = ret;
2973 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
2974 	if (ret < 0)
2975 		return NULL;
2976 	cp_mreg.src = ret;
2977 	/* Check if already registered. */
2978 	MLX5_ASSERT(priv->mreg_cp_tbl);
2979 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
2980 	if (mcp_res) {
2981 		/* For non-default rule. */
2982 		if (mark_id != MLX5_DEFAULT_COPY_ID)
2983 			mcp_res->refcnt++;
2984 		MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
2985 			    mcp_res->refcnt == 1);
2986 		return mcp_res;
2987 	}
2988 	/* Provide the full width of FLAG specific value. */
2989 	if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
2990 		tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
2991 	/* Build a new flow. */
2992 	if (mark_id != MLX5_DEFAULT_COPY_ID) {
2993 		items[0] = (struct rte_flow_item){
2994 			.type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
2995 			.spec = &tag_spec,
2996 		};
2997 		items[1] = (struct rte_flow_item){
2998 			.type = RTE_FLOW_ITEM_TYPE_END,
2999 		};
3000 		actions[0] = (struct rte_flow_action){
3001 			.type = MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3002 			.conf = &ftag,
3003 		};
3004 		actions[1] = (struct rte_flow_action){
3005 			.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3006 			.conf = &cp_mreg,
3007 		};
3008 		actions[2] = (struct rte_flow_action){
3009 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
3010 			.conf = &jump,
3011 		};
3012 		actions[3] = (struct rte_flow_action){
3013 			.type = RTE_FLOW_ACTION_TYPE_END,
3014 		};
3015 	} else {
3016 		/* Default rule, wildcard match. */
3017 		attr.priority = MLX5_FLOW_PRIO_RSVD;
3018 		items[0] = (struct rte_flow_item){
3019 			.type = RTE_FLOW_ITEM_TYPE_END,
3020 		};
3021 		actions[0] = (struct rte_flow_action){
3022 			.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3023 			.conf = &cp_mreg,
3024 		};
3025 		actions[1] = (struct rte_flow_action){
3026 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
3027 			.conf = &jump,
3028 		};
3029 		actions[2] = (struct rte_flow_action){
3030 			.type = RTE_FLOW_ACTION_TYPE_END,
3031 		};
3032 	}
3033 	/* Build a new entry. */
3034 	mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
3035 	if (!mcp_res) {
3036 		rte_errno = ENOMEM;
3037 		return NULL;
3038 	}
3039 	mcp_res->idx = idx;
3040 	/*
3041 	 * The copy Flows are not included in any list. There
3042 	 * ones are referenced from other Flows and can not
3043 	 * be applied, removed, deleted in ardbitrary order
3044 	 * by list traversing.
3045 	 */
3046 	mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
3047 					 actions, false, error);
3048 	if (!mcp_res->flow)
3049 		goto error;
3050 	mcp_res->refcnt++;
3051 	mcp_res->hlist_ent.key = mark_id;
3052 	ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
3053 				&mcp_res->hlist_ent);
3054 	MLX5_ASSERT(!ret);
3055 	if (ret)
3056 		goto error;
3057 	return mcp_res;
3058 error:
3059 	if (mcp_res->flow)
3060 		flow_list_destroy(dev, NULL, mcp_res->flow);
3061 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3062 	return NULL;
3063 }
3064 
3065 /**
3066  * Release flow in RX_CP_TBL.
3067  *
3068  * @param dev
3069  *   Pointer to Ethernet device.
3070  * @flow
3071  *   Parent flow for wich copying is provided.
3072  */
3073 static void
3074 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3075 			  struct rte_flow *flow)
3076 {
3077 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3078 	struct mlx5_priv *priv = dev->data->dev_private;
3079 
3080 	if (!flow->rix_mreg_copy)
3081 		return;
3082 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3083 				 flow->rix_mreg_copy);
3084 	if (!mcp_res || !priv->mreg_cp_tbl)
3085 		return;
3086 	if (flow->copy_applied) {
3087 		MLX5_ASSERT(mcp_res->appcnt);
3088 		flow->copy_applied = 0;
3089 		--mcp_res->appcnt;
3090 		if (!mcp_res->appcnt)
3091 			flow_drv_remove(dev, mcp_res->flow);
3092 	}
3093 	/*
3094 	 * We do not check availability of metadata registers here,
3095 	 * because copy resources are not allocated in this case.
3096 	 */
3097 	if (--mcp_res->refcnt)
3098 		return;
3099 	MLX5_ASSERT(mcp_res->flow);
3100 	flow_list_destroy(dev, NULL, mcp_res->flow);
3101 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3102 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3103 	flow->rix_mreg_copy = 0;
3104 }
3105 
3106 /**
3107  * Start flow in RX_CP_TBL.
3108  *
3109  * @param dev
3110  *   Pointer to Ethernet device.
3111  * @flow
3112  *   Parent flow for wich copying is provided.
3113  *
3114  * @return
3115  *   0 on success, a negative errno value otherwise and rte_errno is set.
3116  */
3117 static int
3118 flow_mreg_start_copy_action(struct rte_eth_dev *dev,
3119 			    struct rte_flow *flow)
3120 {
3121 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3122 	struct mlx5_priv *priv = dev->data->dev_private;
3123 	int ret;
3124 
3125 	if (!flow->rix_mreg_copy || flow->copy_applied)
3126 		return 0;
3127 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3128 				 flow->rix_mreg_copy);
3129 	if (!mcp_res)
3130 		return 0;
3131 	if (!mcp_res->appcnt) {
3132 		ret = flow_drv_apply(dev, mcp_res->flow, NULL);
3133 		if (ret)
3134 			return ret;
3135 	}
3136 	++mcp_res->appcnt;
3137 	flow->copy_applied = 1;
3138 	return 0;
3139 }
3140 
3141 /**
3142  * Stop flow in RX_CP_TBL.
3143  *
3144  * @param dev
3145  *   Pointer to Ethernet device.
3146  * @flow
3147  *   Parent flow for wich copying is provided.
3148  */
3149 static void
3150 flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
3151 			   struct rte_flow *flow)
3152 {
3153 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3154 	struct mlx5_priv *priv = dev->data->dev_private;
3155 
3156 	if (!flow->rix_mreg_copy || !flow->copy_applied)
3157 		return;
3158 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3159 				 flow->rix_mreg_copy);
3160 	if (!mcp_res)
3161 		return;
3162 	MLX5_ASSERT(mcp_res->appcnt);
3163 	--mcp_res->appcnt;
3164 	flow->copy_applied = 0;
3165 	if (!mcp_res->appcnt)
3166 		flow_drv_remove(dev, mcp_res->flow);
3167 }
3168 
3169 /**
3170  * Remove the default copy action from RX_CP_TBL.
3171  *
3172  * @param dev
3173  *   Pointer to Ethernet device.
3174  */
3175 static void
3176 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3177 {
3178 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3179 	struct mlx5_priv *priv = dev->data->dev_private;
3180 
3181 	/* Check if default flow is registered. */
3182 	if (!priv->mreg_cp_tbl)
3183 		return;
3184 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
3185 					    MLX5_DEFAULT_COPY_ID);
3186 	if (!mcp_res)
3187 		return;
3188 	MLX5_ASSERT(mcp_res->flow);
3189 	flow_list_destroy(dev, NULL, mcp_res->flow);
3190 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3191 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3192 }
3193 
3194 /**
3195  * Add the default copy action in in RX_CP_TBL.
3196  *
3197  * @param dev
3198  *   Pointer to Ethernet device.
3199  * @param[out] error
3200  *   Perform verbose error reporting if not NULL.
3201  *
3202  * @return
3203  *   0 for success, negative value otherwise and rte_errno is set.
3204  */
3205 static int
3206 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
3207 				  struct rte_flow_error *error)
3208 {
3209 	struct mlx5_priv *priv = dev->data->dev_private;
3210 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3211 
3212 	/* Check whether extensive metadata feature is engaged. */
3213 	if (!priv->config.dv_flow_en ||
3214 	    priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3215 	    !mlx5_flow_ext_mreg_supported(dev) ||
3216 	    !priv->sh->dv_regc0_mask)
3217 		return 0;
3218 	mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
3219 	if (!mcp_res)
3220 		return -rte_errno;
3221 	return 0;
3222 }
3223 
3224 /**
3225  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3226  *
3227  * All the flow having Q/RSS action should be split by
3228  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
3229  * performs the following,
3230  *   - CQE->flow_tag := reg_c[1] (MARK)
3231  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3232  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
3233  * but there should be a flow per each MARK ID set by MARK action.
3234  *
3235  * For the aforementioned reason, if there's a MARK action in flow's action
3236  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
3237  * the MARK ID to CQE's flow_tag like,
3238  *   - If reg_c[1] is mark_id,
3239  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3240  *
3241  * For SET_META action which stores value in reg_c[0], as the destination is
3242  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
3243  * MARK ID means the default flow. The default flow looks like,
3244  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3245  *
3246  * @param dev
3247  *   Pointer to Ethernet device.
3248  * @param flow
3249  *   Pointer to flow structure.
3250  * @param[in] actions
3251  *   Pointer to the list of actions.
3252  * @param[out] error
3253  *   Perform verbose error reporting if not NULL.
3254  *
3255  * @return
3256  *   0 on success, negative value otherwise and rte_errno is set.
3257  */
3258 static int
3259 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
3260 			    struct rte_flow *flow,
3261 			    const struct rte_flow_action *actions,
3262 			    struct rte_flow_error *error)
3263 {
3264 	struct mlx5_priv *priv = dev->data->dev_private;
3265 	struct mlx5_dev_config *config = &priv->config;
3266 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3267 	const struct rte_flow_action_mark *mark;
3268 
3269 	/* Check whether extensive metadata feature is engaged. */
3270 	if (!config->dv_flow_en ||
3271 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3272 	    !mlx5_flow_ext_mreg_supported(dev) ||
3273 	    !priv->sh->dv_regc0_mask)
3274 		return 0;
3275 	/* Find MARK action. */
3276 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3277 		switch (actions->type) {
3278 		case RTE_FLOW_ACTION_TYPE_FLAG:
3279 			mcp_res = flow_mreg_add_copy_action
3280 				(dev, MLX5_FLOW_MARK_DEFAULT, error);
3281 			if (!mcp_res)
3282 				return -rte_errno;
3283 			flow->rix_mreg_copy = mcp_res->idx;
3284 			if (dev->data->dev_started) {
3285 				mcp_res->appcnt++;
3286 				flow->copy_applied = 1;
3287 			}
3288 			return 0;
3289 		case RTE_FLOW_ACTION_TYPE_MARK:
3290 			mark = (const struct rte_flow_action_mark *)
3291 				actions->conf;
3292 			mcp_res =
3293 				flow_mreg_add_copy_action(dev, mark->id, error);
3294 			if (!mcp_res)
3295 				return -rte_errno;
3296 			flow->rix_mreg_copy = mcp_res->idx;
3297 			if (dev->data->dev_started) {
3298 				mcp_res->appcnt++;
3299 				flow->copy_applied = 1;
3300 			}
3301 			return 0;
3302 		default:
3303 			break;
3304 		}
3305 	}
3306 	return 0;
3307 }
3308 
3309 #define MLX5_MAX_SPLIT_ACTIONS 24
3310 #define MLX5_MAX_SPLIT_ITEMS 24
3311 
3312 /**
3313  * Split the hairpin flow.
3314  * Since HW can't support encap on Rx we move the encap to Tx.
3315  * If the count action is after the encap then we also
3316  * move the count action. in this case the count will also measure
3317  * the outer bytes.
3318  *
3319  * @param dev
3320  *   Pointer to Ethernet device.
3321  * @param[in] actions
3322  *   Associated actions (list terminated by the END action).
3323  * @param[out] actions_rx
3324  *   Rx flow actions.
3325  * @param[out] actions_tx
3326  *   Tx flow actions..
3327  * @param[out] pattern_tx
3328  *   The pattern items for the Tx flow.
3329  * @param[out] flow_id
3330  *   The flow ID connected to this flow.
3331  *
3332  * @return
3333  *   0 on success.
3334  */
3335 static int
3336 flow_hairpin_split(struct rte_eth_dev *dev,
3337 		   const struct rte_flow_action actions[],
3338 		   struct rte_flow_action actions_rx[],
3339 		   struct rte_flow_action actions_tx[],
3340 		   struct rte_flow_item pattern_tx[],
3341 		   uint32_t *flow_id)
3342 {
3343 	struct mlx5_priv *priv = dev->data->dev_private;
3344 	const struct rte_flow_action_raw_encap *raw_encap;
3345 	const struct rte_flow_action_raw_decap *raw_decap;
3346 	struct mlx5_rte_flow_action_set_tag *set_tag;
3347 	struct rte_flow_action *tag_action;
3348 	struct mlx5_rte_flow_item_tag *tag_item;
3349 	struct rte_flow_item *item;
3350 	char *addr;
3351 	int encap = 0;
3352 
3353 	mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
3354 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3355 		switch (actions->type) {
3356 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3357 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3358 			rte_memcpy(actions_tx, actions,
3359 			       sizeof(struct rte_flow_action));
3360 			actions_tx++;
3361 			break;
3362 		case RTE_FLOW_ACTION_TYPE_COUNT:
3363 			if (encap) {
3364 				rte_memcpy(actions_tx, actions,
3365 					   sizeof(struct rte_flow_action));
3366 				actions_tx++;
3367 			} else {
3368 				rte_memcpy(actions_rx, actions,
3369 					   sizeof(struct rte_flow_action));
3370 				actions_rx++;
3371 			}
3372 			break;
3373 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3374 			raw_encap = actions->conf;
3375 			if (raw_encap->size >
3376 			    (sizeof(struct rte_flow_item_eth) +
3377 			     sizeof(struct rte_flow_item_ipv4))) {
3378 				memcpy(actions_tx, actions,
3379 				       sizeof(struct rte_flow_action));
3380 				actions_tx++;
3381 				encap = 1;
3382 			} else {
3383 				rte_memcpy(actions_rx, actions,
3384 					   sizeof(struct rte_flow_action));
3385 				actions_rx++;
3386 			}
3387 			break;
3388 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3389 			raw_decap = actions->conf;
3390 			if (raw_decap->size <
3391 			    (sizeof(struct rte_flow_item_eth) +
3392 			     sizeof(struct rte_flow_item_ipv4))) {
3393 				memcpy(actions_tx, actions,
3394 				       sizeof(struct rte_flow_action));
3395 				actions_tx++;
3396 			} else {
3397 				rte_memcpy(actions_rx, actions,
3398 					   sizeof(struct rte_flow_action));
3399 				actions_rx++;
3400 			}
3401 			break;
3402 		default:
3403 			rte_memcpy(actions_rx, actions,
3404 				   sizeof(struct rte_flow_action));
3405 			actions_rx++;
3406 			break;
3407 		}
3408 	}
3409 	/* Add set meta action and end action for the Rx flow. */
3410 	tag_action = actions_rx;
3411 	tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3412 	actions_rx++;
3413 	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
3414 	actions_rx++;
3415 	set_tag = (void *)actions_rx;
3416 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
3417 	MLX5_ASSERT(set_tag->id > REG_NONE);
3418 	set_tag->data = *flow_id;
3419 	tag_action->conf = set_tag;
3420 	/* Create Tx item list. */
3421 	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
3422 	addr = (void *)&pattern_tx[2];
3423 	item = pattern_tx;
3424 	item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
3425 	tag_item = (void *)addr;
3426 	tag_item->data = *flow_id;
3427 	tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
3428 	MLX5_ASSERT(set_tag->id > REG_NONE);
3429 	item->spec = tag_item;
3430 	addr += sizeof(struct mlx5_rte_flow_item_tag);
3431 	tag_item = (void *)addr;
3432 	tag_item->data = UINT32_MAX;
3433 	tag_item->id = UINT16_MAX;
3434 	item->mask = tag_item;
3435 	addr += sizeof(struct mlx5_rte_flow_item_tag);
3436 	item->last = NULL;
3437 	item++;
3438 	item->type = RTE_FLOW_ITEM_TYPE_END;
3439 	return 0;
3440 }
3441 
3442 /**
3443  * The last stage of splitting chain, just creates the subflow
3444  * without any modification.
3445  *
3446  * @param[in] dev
3447  *   Pointer to Ethernet device.
3448  * @param[in] flow
3449  *   Parent flow structure pointer.
3450  * @param[in, out] sub_flow
3451  *   Pointer to return the created subflow, may be NULL.
3452  * @param[in] prefix_layers
3453  *   Prefix subflow layers, may be 0.
3454  * @param[in] attr
3455  *   Flow rule attributes.
3456  * @param[in] items
3457  *   Pattern specification (list terminated by the END pattern item).
3458  * @param[in] actions
3459  *   Associated actions (list terminated by the END action).
3460  * @param[in] external
3461  *   This flow rule is created by request external to PMD.
3462  * @param[out] error
3463  *   Perform verbose error reporting if not NULL.
3464  * @return
3465  *   0 on success, negative value otherwise
3466  */
3467 static int
3468 flow_create_split_inner(struct rte_eth_dev *dev,
3469 			struct rte_flow *flow,
3470 			struct mlx5_flow **sub_flow,
3471 			uint64_t prefix_layers,
3472 			const struct rte_flow_attr *attr,
3473 			const struct rte_flow_item items[],
3474 			const struct rte_flow_action actions[],
3475 			bool external, struct rte_flow_error *error)
3476 {
3477 	struct mlx5_flow *dev_flow;
3478 
3479 	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
3480 	if (!dev_flow)
3481 		return -rte_errno;
3482 	dev_flow->flow = flow;
3483 	dev_flow->external = external;
3484 	/* Subflow object was created, we must include one in the list. */
3485 	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
3486 		      dev_flow->handle, next);
3487 	/*
3488 	 * If dev_flow is as one of the suffix flow, some actions in suffix
3489 	 * flow may need some user defined item layer flags.
3490 	 */
3491 	if (prefix_layers)
3492 		dev_flow->handle->layers = prefix_layers;
3493 	if (sub_flow)
3494 		*sub_flow = dev_flow;
3495 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
3496 }
3497 
3498 /**
3499  * Split the meter flow.
3500  *
3501  * As meter flow will split to three sub flow, other than meter
3502  * action, the other actions make sense to only meter accepts
3503  * the packet. If it need to be dropped, no other additional
3504  * actions should be take.
3505  *
3506  * One kind of special action which decapsulates the L3 tunnel
3507  * header will be in the prefix sub flow, as not to take the
3508  * L3 tunnel header into account.
3509  *
3510  * @param dev
3511  *   Pointer to Ethernet device.
3512  * @param[in] items
3513  *   Pattern specification (list terminated by the END pattern item).
3514  * @param[out] sfx_items
3515  *   Suffix flow match items (list terminated by the END pattern item).
3516  * @param[in] actions
3517  *   Associated actions (list terminated by the END action).
3518  * @param[out] actions_sfx
3519  *   Suffix flow actions.
3520  * @param[out] actions_pre
3521  *   Prefix flow actions.
3522  * @param[out] pattern_sfx
3523  *   The pattern items for the suffix flow.
3524  * @param[out] tag_sfx
3525  *   Pointer to suffix flow tag.
3526  *
3527  * @return
3528  *   0 on success.
3529  */
3530 static int
3531 flow_meter_split_prep(struct rte_eth_dev *dev,
3532 		 const struct rte_flow_item items[],
3533 		 struct rte_flow_item sfx_items[],
3534 		 const struct rte_flow_action actions[],
3535 		 struct rte_flow_action actions_sfx[],
3536 		 struct rte_flow_action actions_pre[])
3537 {
3538 	struct rte_flow_action *tag_action = NULL;
3539 	struct rte_flow_item *tag_item;
3540 	struct mlx5_rte_flow_action_set_tag *set_tag;
3541 	struct rte_flow_error error;
3542 	const struct rte_flow_action_raw_encap *raw_encap;
3543 	const struct rte_flow_action_raw_decap *raw_decap;
3544 	struct mlx5_rte_flow_item_tag *tag_spec;
3545 	struct mlx5_rte_flow_item_tag *tag_mask;
3546 	uint32_t tag_id;
3547 	bool copy_vlan = false;
3548 
3549 	/* Prepare the actions for prefix and suffix flow. */
3550 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3551 		struct rte_flow_action **action_cur = NULL;
3552 
3553 		switch (actions->type) {
3554 		case RTE_FLOW_ACTION_TYPE_METER:
3555 			/* Add the extra tag action first. */
3556 			tag_action = actions_pre;
3557 			tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3558 			actions_pre++;
3559 			action_cur = &actions_pre;
3560 			break;
3561 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3562 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3563 			action_cur = &actions_pre;
3564 			break;
3565 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3566 			raw_encap = actions->conf;
3567 			if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
3568 				action_cur = &actions_pre;
3569 			break;
3570 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3571 			raw_decap = actions->conf;
3572 			if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3573 				action_cur = &actions_pre;
3574 			break;
3575 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3576 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3577 			copy_vlan = true;
3578 			break;
3579 		default:
3580 			break;
3581 		}
3582 		if (!action_cur)
3583 			action_cur = &actions_sfx;
3584 		memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
3585 		(*action_cur)++;
3586 	}
3587 	/* Add end action to the actions. */
3588 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
3589 	actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
3590 	actions_pre++;
3591 	/* Set the tag. */
3592 	set_tag = (void *)actions_pre;
3593 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
3594 	/*
3595 	 * Get the id from the qrss_pool to make qrss share the id with meter.
3596 	 */
3597 	tag_id = flow_qrss_get_id(dev);
3598 	set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
3599 	assert(tag_action);
3600 	tag_action->conf = set_tag;
3601 	/* Prepare the suffix subflow items. */
3602 	tag_item = sfx_items++;
3603 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3604 		int item_type = items->type;
3605 
3606 		switch (item_type) {
3607 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
3608 			memcpy(sfx_items, items, sizeof(*sfx_items));
3609 			sfx_items++;
3610 			break;
3611 		case RTE_FLOW_ITEM_TYPE_VLAN:
3612 			if (copy_vlan) {
3613 				memcpy(sfx_items, items, sizeof(*sfx_items));
3614 				/*
3615 				 * Convert to internal match item, it is used
3616 				 * for vlan push and set vid.
3617 				 */
3618 				sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
3619 				sfx_items++;
3620 			}
3621 			break;
3622 		default:
3623 			break;
3624 		}
3625 	}
3626 	sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
3627 	sfx_items++;
3628 	tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
3629 	tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
3630 	tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
3631 	tag_mask = tag_spec + 1;
3632 	tag_mask->data = 0xffffff00;
3633 	tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
3634 	tag_item->spec = tag_spec;
3635 	tag_item->last = NULL;
3636 	tag_item->mask = tag_mask;
3637 	return tag_id;
3638 }
3639 
3640 /**
3641  * Split action list having QUEUE/RSS for metadata register copy.
3642  *
3643  * Once Q/RSS action is detected in user's action list, the flow action
3644  * should be split in order to copy metadata registers, which will happen in
3645  * RX_CP_TBL like,
3646  *   - CQE->flow_tag := reg_c[1] (MARK)
3647  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3648  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
3649  * This is because the last action of each flow must be a terminal action
3650  * (QUEUE, RSS or DROP).
3651  *
3652  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
3653  * stored and kept in the mlx5_flow structure per each sub_flow.
3654  *
3655  * The Q/RSS action is replaced with,
3656  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
3657  * And the following JUMP action is added at the end,
3658  *   - JUMP, to RX_CP_TBL.
3659  *
3660  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
3661  * flow_create_split_metadata() routine. The flow will look like,
3662  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
3663  *
3664  * @param dev
3665  *   Pointer to Ethernet device.
3666  * @param[out] split_actions
3667  *   Pointer to store split actions to jump to CP_TBL.
3668  * @param[in] actions
3669  *   Pointer to the list of original flow actions.
3670  * @param[in] qrss
3671  *   Pointer to the Q/RSS action.
3672  * @param[in] actions_n
3673  *   Number of original actions.
3674  * @param[out] error
3675  *   Perform verbose error reporting if not NULL.
3676  *
3677  * @return
3678  *   non-zero unique flow_id on success, otherwise 0 and
3679  *   error/rte_error are set.
3680  */
3681 static uint32_t
3682 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
3683 			  struct rte_flow_action *split_actions,
3684 			  const struct rte_flow_action *actions,
3685 			  const struct rte_flow_action *qrss,
3686 			  int actions_n, struct rte_flow_error *error)
3687 {
3688 	struct mlx5_rte_flow_action_set_tag *set_tag;
3689 	struct rte_flow_action_jump *jump;
3690 	const int qrss_idx = qrss - actions;
3691 	uint32_t flow_id = 0;
3692 	int ret = 0;
3693 
3694 	/*
3695 	 * Given actions will be split
3696 	 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
3697 	 * - Add jump to mreg CP_TBL.
3698 	 * As a result, there will be one more action.
3699 	 */
3700 	++actions_n;
3701 	memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
3702 	set_tag = (void *)(split_actions + actions_n);
3703 	/*
3704 	 * If tag action is not set to void(it means we are not the meter
3705 	 * suffix flow), add the tag action. Since meter suffix flow already
3706 	 * has the tag added.
3707 	 */
3708 	if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
3709 		/*
3710 		 * Allocate the new subflow ID. This one is unique within
3711 		 * device and not shared with representors. Otherwise,
3712 		 * we would have to resolve multi-thread access synch
3713 		 * issue. Each flow on the shared device is appended
3714 		 * with source vport identifier, so the resulting
3715 		 * flows will be unique in the shared (by master and
3716 		 * representors) domain even if they have coinciding
3717 		 * IDs.
3718 		 */
3719 		flow_id = flow_qrss_get_id(dev);
3720 		if (!flow_id)
3721 			return rte_flow_error_set(error, ENOMEM,
3722 						  RTE_FLOW_ERROR_TYPE_ACTION,
3723 						  NULL, "can't allocate id "
3724 						  "for split Q/RSS subflow");
3725 		/* Internal SET_TAG action to set flow ID. */
3726 		*set_tag = (struct mlx5_rte_flow_action_set_tag){
3727 			.data = flow_id,
3728 		};
3729 		ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
3730 		if (ret < 0)
3731 			return ret;
3732 		set_tag->id = ret;
3733 		/* Construct new actions array. */
3734 		/* Replace QUEUE/RSS action. */
3735 		split_actions[qrss_idx] = (struct rte_flow_action){
3736 			.type = MLX5_RTE_FLOW_ACTION_TYPE_TAG,
3737 			.conf = set_tag,
3738 		};
3739 	}
3740 	/* JUMP action to jump to mreg copy table (CP_TBL). */
3741 	jump = (void *)(set_tag + 1);
3742 	*jump = (struct rte_flow_action_jump){
3743 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3744 	};
3745 	split_actions[actions_n - 2] = (struct rte_flow_action){
3746 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
3747 		.conf = jump,
3748 	};
3749 	split_actions[actions_n - 1] = (struct rte_flow_action){
3750 		.type = RTE_FLOW_ACTION_TYPE_END,
3751 	};
3752 	return flow_id;
3753 }
3754 
3755 /**
3756  * Extend the given action list for Tx metadata copy.
3757  *
3758  * Copy the given action list to the ext_actions and add flow metadata register
3759  * copy action in order to copy reg_a set by WQE to reg_c[0].
3760  *
3761  * @param[out] ext_actions
3762  *   Pointer to the extended action list.
3763  * @param[in] actions
3764  *   Pointer to the list of actions.
3765  * @param[in] actions_n
3766  *   Number of actions in the list.
3767  * @param[out] error
3768  *   Perform verbose error reporting if not NULL.
3769  * @param[in] encap_idx
3770  *   The encap action inndex.
3771  *
3772  * @return
3773  *   0 on success, negative value otherwise
3774  */
3775 static int
3776 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
3777 		       struct rte_flow_action *ext_actions,
3778 		       const struct rte_flow_action *actions,
3779 		       int actions_n, struct rte_flow_error *error,
3780 		       int encap_idx)
3781 {
3782 	struct mlx5_flow_action_copy_mreg *cp_mreg =
3783 		(struct mlx5_flow_action_copy_mreg *)
3784 			(ext_actions + actions_n + 1);
3785 	int ret;
3786 
3787 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3788 	if (ret < 0)
3789 		return ret;
3790 	cp_mreg->dst = ret;
3791 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
3792 	if (ret < 0)
3793 		return ret;
3794 	cp_mreg->src = ret;
3795 	if (encap_idx != 0)
3796 		memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
3797 	if (encap_idx == actions_n - 1) {
3798 		ext_actions[actions_n - 1] = (struct rte_flow_action){
3799 			.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3800 			.conf = cp_mreg,
3801 		};
3802 		ext_actions[actions_n] = (struct rte_flow_action){
3803 			.type = RTE_FLOW_ACTION_TYPE_END,
3804 		};
3805 	} else {
3806 		ext_actions[encap_idx] = (struct rte_flow_action){
3807 			.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3808 			.conf = cp_mreg,
3809 		};
3810 		memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
3811 				sizeof(*ext_actions) * (actions_n - encap_idx));
3812 	}
3813 	return 0;
3814 }
3815 
3816 /**
3817  * The splitting for metadata feature.
3818  *
3819  * - Q/RSS action on NIC Rx should be split in order to pass by
3820  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
3821  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
3822  *
3823  * - All the actions on NIC Tx should have a mreg copy action to
3824  *   copy reg_a from WQE to reg_c[0].
3825  *
3826  * @param dev
3827  *   Pointer to Ethernet device.
3828  * @param[in] flow
3829  *   Parent flow structure pointer.
3830  * @param[in] prefix_layers
3831  *   Prefix flow layer flags.
3832  * @param[in] attr
3833  *   Flow rule attributes.
3834  * @param[in] items
3835  *   Pattern specification (list terminated by the END pattern item).
3836  * @param[in] actions
3837  *   Associated actions (list terminated by the END action).
3838  * @param[in] external
3839  *   This flow rule is created by request external to PMD.
3840  * @param[out] error
3841  *   Perform verbose error reporting if not NULL.
3842  * @return
3843  *   0 on success, negative value otherwise
3844  */
3845 static int
3846 flow_create_split_metadata(struct rte_eth_dev *dev,
3847 			   struct rte_flow *flow,
3848 			   uint64_t prefix_layers,
3849 			   const struct rte_flow_attr *attr,
3850 			   const struct rte_flow_item items[],
3851 			   const struct rte_flow_action actions[],
3852 			   bool external, struct rte_flow_error *error)
3853 {
3854 	struct mlx5_priv *priv = dev->data->dev_private;
3855 	struct mlx5_dev_config *config = &priv->config;
3856 	const struct rte_flow_action *qrss = NULL;
3857 	struct rte_flow_action *ext_actions = NULL;
3858 	struct mlx5_flow *dev_flow = NULL;
3859 	uint32_t qrss_id = 0;
3860 	int mtr_sfx = 0;
3861 	size_t act_size;
3862 	int actions_n;
3863 	int encap_idx;
3864 	int ret;
3865 
3866 	/* Check whether extensive metadata feature is engaged. */
3867 	if (!config->dv_flow_en ||
3868 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3869 	    !mlx5_flow_ext_mreg_supported(dev))
3870 		return flow_create_split_inner(dev, flow, NULL, prefix_layers,
3871 					       attr, items, actions, external,
3872 					       error);
3873 	actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
3874 							   &encap_idx);
3875 	if (qrss) {
3876 		/* Exclude hairpin flows from splitting. */
3877 		if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3878 			const struct rte_flow_action_queue *queue;
3879 
3880 			queue = qrss->conf;
3881 			if (mlx5_rxq_get_type(dev, queue->index) ==
3882 			    MLX5_RXQ_TYPE_HAIRPIN)
3883 				qrss = NULL;
3884 		} else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
3885 			const struct rte_flow_action_rss *rss;
3886 
3887 			rss = qrss->conf;
3888 			if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
3889 			    MLX5_RXQ_TYPE_HAIRPIN)
3890 				qrss = NULL;
3891 		}
3892 	}
3893 	if (qrss) {
3894 		/* Check if it is in meter suffix table. */
3895 		mtr_sfx = attr->group == (attr->transfer ?
3896 			  (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
3897 			  MLX5_FLOW_TABLE_LEVEL_SUFFIX);
3898 		/*
3899 		 * Q/RSS action on NIC Rx should be split in order to pass by
3900 		 * the mreg copy table (RX_CP_TBL) and then it jumps to the
3901 		 * action table (RX_ACT_TBL) which has the split Q/RSS action.
3902 		 */
3903 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
3904 			   sizeof(struct rte_flow_action_set_tag) +
3905 			   sizeof(struct rte_flow_action_jump);
3906 		ext_actions = rte_zmalloc(__func__, act_size, 0);
3907 		if (!ext_actions)
3908 			return rte_flow_error_set(error, ENOMEM,
3909 						  RTE_FLOW_ERROR_TYPE_ACTION,
3910 						  NULL, "no memory to split "
3911 						  "metadata flow");
3912 		/*
3913 		 * If we are the suffix flow of meter, tag already exist.
3914 		 * Set the tag action to void.
3915 		 */
3916 		if (mtr_sfx)
3917 			ext_actions[qrss - actions].type =
3918 						RTE_FLOW_ACTION_TYPE_VOID;
3919 		else
3920 			ext_actions[qrss - actions].type =
3921 						MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3922 		/*
3923 		 * Create the new actions list with removed Q/RSS action
3924 		 * and appended set tag and jump to register copy table
3925 		 * (RX_CP_TBL). We should preallocate unique tag ID here
3926 		 * in advance, because it is needed for set tag action.
3927 		 */
3928 		qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
3929 						    qrss, actions_n, error);
3930 		if (!mtr_sfx && !qrss_id) {
3931 			ret = -rte_errno;
3932 			goto exit;
3933 		}
3934 	} else if (attr->egress && !attr->transfer) {
3935 		/*
3936 		 * All the actions on NIC Tx should have a metadata register
3937 		 * copy action to copy reg_a from WQE to reg_c[meta]
3938 		 */
3939 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
3940 			   sizeof(struct mlx5_flow_action_copy_mreg);
3941 		ext_actions = rte_zmalloc(__func__, act_size, 0);
3942 		if (!ext_actions)
3943 			return rte_flow_error_set(error, ENOMEM,
3944 						  RTE_FLOW_ERROR_TYPE_ACTION,
3945 						  NULL, "no memory to split "
3946 						  "metadata flow");
3947 		/* Create the action list appended with copy register. */
3948 		ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
3949 					     actions_n, error, encap_idx);
3950 		if (ret < 0)
3951 			goto exit;
3952 	}
3953 	/* Add the unmodified original or prefix subflow. */
3954 	ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr,
3955 				      items, ext_actions ? ext_actions :
3956 				      actions, external, error);
3957 	if (ret < 0)
3958 		goto exit;
3959 	MLX5_ASSERT(dev_flow);
3960 	if (qrss) {
3961 		const struct rte_flow_attr q_attr = {
3962 			.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3963 			.ingress = 1,
3964 		};
3965 		/* Internal PMD action to set register. */
3966 		struct mlx5_rte_flow_item_tag q_tag_spec = {
3967 			.data = qrss_id,
3968 			.id = 0,
3969 		};
3970 		struct rte_flow_item q_items[] = {
3971 			{
3972 				.type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3973 				.spec = &q_tag_spec,
3974 				.last = NULL,
3975 				.mask = NULL,
3976 			},
3977 			{
3978 				.type = RTE_FLOW_ITEM_TYPE_END,
3979 			},
3980 		};
3981 		struct rte_flow_action q_actions[] = {
3982 			{
3983 				.type = qrss->type,
3984 				.conf = qrss->conf,
3985 			},
3986 			{
3987 				.type = RTE_FLOW_ACTION_TYPE_END,
3988 			},
3989 		};
3990 		uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
3991 
3992 		/*
3993 		 * Configure the tag item only if there is no meter subflow.
3994 		 * Since tag is already marked in the meter suffix subflow
3995 		 * we can just use the meter suffix items as is.
3996 		 */
3997 		if (qrss_id) {
3998 			/* Not meter subflow. */
3999 			MLX5_ASSERT(!mtr_sfx);
4000 			/*
4001 			 * Put unique id in prefix flow due to it is destroyed
4002 			 * after suffix flow and id will be freed after there
4003 			 * is no actual flows with this id and identifier
4004 			 * reallocation becomes possible (for example, for
4005 			 * other flows in other threads).
4006 			 */
4007 			dev_flow->handle->split_flow_id = qrss_id;
4008 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
4009 						   error);
4010 			if (ret < 0)
4011 				goto exit;
4012 			q_tag_spec.id = ret;
4013 		}
4014 		dev_flow = NULL;
4015 		/* Add suffix subflow to execute Q/RSS. */
4016 		ret = flow_create_split_inner(dev, flow, &dev_flow, layers,
4017 					      &q_attr, mtr_sfx ? items :
4018 					      q_items, q_actions,
4019 					      external, error);
4020 		if (ret < 0)
4021 			goto exit;
4022 		/* qrss ID should be freed if failed. */
4023 		qrss_id = 0;
4024 		MLX5_ASSERT(dev_flow);
4025 	}
4026 
4027 exit:
4028 	/*
4029 	 * We do not destroy the partially created sub_flows in case of error.
4030 	 * These ones are included into parent flow list and will be destroyed
4031 	 * by flow_drv_destroy.
4032 	 */
4033 	flow_qrss_free_id(dev, qrss_id);
4034 	rte_free(ext_actions);
4035 	return ret;
4036 }
4037 
4038 /**
4039  * The splitting for meter feature.
4040  *
4041  * - The meter flow will be split to two flows as prefix and
4042  *   suffix flow. The packets make sense only it pass the prefix
4043  *   meter action.
4044  *
4045  * - Reg_C_5 is used for the packet to match betweend prefix and
4046  *   suffix flow.
4047  *
4048  * @param dev
4049  *   Pointer to Ethernet device.
4050  * @param[in] flow
4051  *   Parent flow structure pointer.
4052  * @param[in] attr
4053  *   Flow rule attributes.
4054  * @param[in] items
4055  *   Pattern specification (list terminated by the END pattern item).
4056  * @param[in] actions
4057  *   Associated actions (list terminated by the END action).
4058  * @param[in] external
4059  *   This flow rule is created by request external to PMD.
4060  * @param[out] error
4061  *   Perform verbose error reporting if not NULL.
4062  * @return
4063  *   0 on success, negative value otherwise
4064  */
4065 static int
4066 flow_create_split_meter(struct rte_eth_dev *dev,
4067 			   struct rte_flow *flow,
4068 			   const struct rte_flow_attr *attr,
4069 			   const struct rte_flow_item items[],
4070 			   const struct rte_flow_action actions[],
4071 			   bool external, struct rte_flow_error *error)
4072 {
4073 	struct mlx5_priv *priv = dev->data->dev_private;
4074 	struct rte_flow_action *sfx_actions = NULL;
4075 	struct rte_flow_action *pre_actions = NULL;
4076 	struct rte_flow_item *sfx_items = NULL;
4077 	struct mlx5_flow *dev_flow = NULL;
4078 	struct rte_flow_attr sfx_attr = *attr;
4079 	uint32_t mtr = 0;
4080 	uint32_t mtr_tag_id = 0;
4081 	size_t act_size;
4082 	size_t item_size;
4083 	int actions_n = 0;
4084 	int ret;
4085 
4086 	if (priv->mtr_en)
4087 		actions_n = flow_check_meter_action(actions, &mtr);
4088 	if (mtr) {
4089 		/* The five prefix actions: meter, decap, encap, tag, end. */
4090 		act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
4091 			   sizeof(struct mlx5_rte_flow_action_set_tag);
4092 		/* tag, vlan, port id, end. */
4093 #define METER_SUFFIX_ITEM 4
4094 		item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
4095 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
4096 		sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
4097 		if (!sfx_actions)
4098 			return rte_flow_error_set(error, ENOMEM,
4099 						  RTE_FLOW_ERROR_TYPE_ACTION,
4100 						  NULL, "no memory to split "
4101 						  "meter flow");
4102 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
4103 			     act_size);
4104 		pre_actions = sfx_actions + actions_n;
4105 		mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
4106 						   actions, sfx_actions,
4107 						   pre_actions);
4108 		if (!mtr_tag_id) {
4109 			ret = -rte_errno;
4110 			goto exit;
4111 		}
4112 		/* Add the prefix subflow. */
4113 		ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
4114 					      items, pre_actions, external,
4115 					      error);
4116 		if (ret) {
4117 			ret = -rte_errno;
4118 			goto exit;
4119 		}
4120 		dev_flow->handle->split_flow_id = mtr_tag_id;
4121 		/* Setting the sfx group atrr. */
4122 		sfx_attr.group = sfx_attr.transfer ?
4123 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4124 				 MLX5_FLOW_TABLE_LEVEL_SUFFIX;
4125 	}
4126 	/* Add the prefix subflow. */
4127 	ret = flow_create_split_metadata(dev, flow, dev_flow ?
4128 					 flow_get_prefix_layer_flags(dev_flow) :
4129 					 0, &sfx_attr,
4130 					 sfx_items ? sfx_items : items,
4131 					 sfx_actions ? sfx_actions : actions,
4132 					 external, error);
4133 exit:
4134 	if (sfx_actions)
4135 		rte_free(sfx_actions);
4136 	return ret;
4137 }
4138 
4139 /**
4140  * Split the flow to subflow set. The splitters might be linked
4141  * in the chain, like this:
4142  * flow_create_split_outer() calls:
4143  *   flow_create_split_meter() calls:
4144  *     flow_create_split_metadata(meter_subflow_0) calls:
4145  *       flow_create_split_inner(metadata_subflow_0)
4146  *       flow_create_split_inner(metadata_subflow_1)
4147  *       flow_create_split_inner(metadata_subflow_2)
4148  *     flow_create_split_metadata(meter_subflow_1) calls:
4149  *       flow_create_split_inner(metadata_subflow_0)
4150  *       flow_create_split_inner(metadata_subflow_1)
4151  *       flow_create_split_inner(metadata_subflow_2)
4152  *
4153  * This provide flexible way to add new levels of flow splitting.
4154  * The all of successfully created subflows are included to the
4155  * parent flow dev_flow list.
4156  *
4157  * @param dev
4158  *   Pointer to Ethernet device.
4159  * @param[in] flow
4160  *   Parent flow structure pointer.
4161  * @param[in] attr
4162  *   Flow rule attributes.
4163  * @param[in] items
4164  *   Pattern specification (list terminated by the END pattern item).
4165  * @param[in] actions
4166  *   Associated actions (list terminated by the END action).
4167  * @param[in] external
4168  *   This flow rule is created by request external to PMD.
4169  * @param[out] error
4170  *   Perform verbose error reporting if not NULL.
4171  * @return
4172  *   0 on success, negative value otherwise
4173  */
4174 static int
4175 flow_create_split_outer(struct rte_eth_dev *dev,
4176 			struct rte_flow *flow,
4177 			const struct rte_flow_attr *attr,
4178 			const struct rte_flow_item items[],
4179 			const struct rte_flow_action actions[],
4180 			bool external, struct rte_flow_error *error)
4181 {
4182 	int ret;
4183 
4184 	ret = flow_create_split_meter(dev, flow, attr, items,
4185 					 actions, external, error);
4186 	MLX5_ASSERT(ret <= 0);
4187 	return ret;
4188 }
4189 
4190 /**
4191  * Create a flow and add it to @p list.
4192  *
4193  * @param dev
4194  *   Pointer to Ethernet device.
4195  * @param list
4196  *   Pointer to a TAILQ flow list. If this parameter NULL,
4197  *   no list insertion occurred, flow is just created,
4198  *   this is caller's responsibility to track the
4199  *   created flow.
4200  * @param[in] attr
4201  *   Flow rule attributes.
4202  * @param[in] items
4203  *   Pattern specification (list terminated by the END pattern item).
4204  * @param[in] actions
4205  *   Associated actions (list terminated by the END action).
4206  * @param[in] external
4207  *   This flow rule is created by request external to PMD.
4208  * @param[out] error
4209  *   Perform verbose error reporting if not NULL.
4210  *
4211  * @return
4212  *   A flow on success, NULL otherwise and rte_errno is set.
4213  */
4214 static struct rte_flow *
4215 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
4216 		 const struct rte_flow_attr *attr,
4217 		 const struct rte_flow_item items[],
4218 		 const struct rte_flow_action actions[],
4219 		 bool external, struct rte_flow_error *error)
4220 {
4221 	struct mlx5_priv *priv = dev->data->dev_private;
4222 	struct rte_flow *flow = NULL;
4223 	struct mlx5_flow *dev_flow;
4224 	const struct rte_flow_action_rss *rss;
4225 	union {
4226 		struct rte_flow_expand_rss buf;
4227 		uint8_t buffer[2048];
4228 	} expand_buffer;
4229 	union {
4230 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
4231 		uint8_t buffer[2048];
4232 	} actions_rx;
4233 	union {
4234 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
4235 		uint8_t buffer[2048];
4236 	} actions_hairpin_tx;
4237 	union {
4238 		struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
4239 		uint8_t buffer[2048];
4240 	} items_tx;
4241 	struct rte_flow_expand_rss *buf = &expand_buffer.buf;
4242 	const struct rte_flow_action *p_actions_rx = actions;
4243 	uint32_t i;
4244 	uint32_t flow_size;
4245 	int hairpin_flow = 0;
4246 	uint32_t hairpin_id = 0;
4247 	struct rte_flow_attr attr_tx = { .priority = 0 };
4248 	int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
4249 				    error);
4250 
4251 	if (ret < 0)
4252 		return NULL;
4253 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
4254 	if (hairpin_flow > 0) {
4255 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
4256 			rte_errno = EINVAL;
4257 			return NULL;
4258 		}
4259 		flow_hairpin_split(dev, actions, actions_rx.actions,
4260 				   actions_hairpin_tx.actions, items_tx.items,
4261 				   &hairpin_id);
4262 		p_actions_rx = actions_rx.actions;
4263 	}
4264 	flow_size = sizeof(struct rte_flow);
4265 	rss = flow_get_rss_action(p_actions_rx);
4266 	if (rss)
4267 		flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
4268 					    sizeof(void *));
4269 	else
4270 		flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
4271 	flow = rte_calloc(__func__, 1, flow_size, 0);
4272 	if (!flow) {
4273 		rte_errno = ENOMEM;
4274 		goto error_before_flow;
4275 	}
4276 	flow->drv_type = flow_get_drv_type(dev, attr);
4277 	if (hairpin_id != 0)
4278 		flow->hairpin_flow_id = hairpin_id;
4279 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
4280 		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
4281 	flow->rss.queue = (void *)(flow + 1);
4282 	if (rss) {
4283 		/*
4284 		 * The following information is required by
4285 		 * mlx5_flow_hashfields_adjust() in advance.
4286 		 */
4287 		flow->rss.level = rss->level;
4288 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
4289 		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4290 	}
4291 	flow->dev_handles = 0;
4292 	if (rss && rss->types) {
4293 		unsigned int graph_root;
4294 
4295 		graph_root = find_graph_root(items, rss->level);
4296 		ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
4297 					  items, rss->types,
4298 					  mlx5_support_expansion,
4299 					  graph_root);
4300 		MLX5_ASSERT(ret > 0 &&
4301 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
4302 	} else {
4303 		buf->entries = 1;
4304 		buf->entry[0].pattern = (void *)(uintptr_t)items;
4305 	}
4306 	/*
4307 	 * Record the start index when there is a nested call. All sub-flows
4308 	 * need to be translated before another calling.
4309 	 * No need to use ping-pong buffer to save memory here.
4310 	 */
4311 	if (priv->flow_idx) {
4312 		MLX5_ASSERT(!priv->flow_nested_idx);
4313 		priv->flow_nested_idx = priv->flow_idx;
4314 	}
4315 	for (i = 0; i < buf->entries; ++i) {
4316 		/*
4317 		 * The splitter may create multiple dev_flows,
4318 		 * depending on configuration. In the simplest
4319 		 * case it just creates unmodified original flow.
4320 		 */
4321 		ret = flow_create_split_outer(dev, flow, attr,
4322 					      buf->entry[i].pattern,
4323 					      p_actions_rx, external,
4324 					      error);
4325 		if (ret < 0)
4326 			goto error;
4327 	}
4328 	/* Create the tx flow. */
4329 	if (hairpin_flow) {
4330 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
4331 		attr_tx.ingress = 0;
4332 		attr_tx.egress = 1;
4333 		dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
4334 					    actions_hairpin_tx.actions, error);
4335 		if (!dev_flow)
4336 			goto error;
4337 		dev_flow->flow = flow;
4338 		dev_flow->external = 0;
4339 		SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4340 			      dev_flow->handle, next);
4341 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
4342 					 items_tx.items,
4343 					 actions_hairpin_tx.actions, error);
4344 		if (ret < 0)
4345 			goto error;
4346 	}
4347 	/*
4348 	 * Update the metadata register copy table. If extensive
4349 	 * metadata feature is enabled and registers are supported
4350 	 * we might create the extra rte_flow for each unique
4351 	 * MARK/FLAG action ID.
4352 	 *
4353 	 * The table is updated for ingress Flows only, because
4354 	 * the egress Flows belong to the different device and
4355 	 * copy table should be updated in peer NIC Rx domain.
4356 	 */
4357 	if (attr->ingress &&
4358 	    (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
4359 		ret = flow_mreg_update_copy_table(dev, flow, actions, error);
4360 		if (ret)
4361 			goto error;
4362 	}
4363 	/*
4364 	 * If the flow is external (from application) OR device is started, then
4365 	 * the flow will be applied immediately.
4366 	 */
4367 	if (external || dev->data->dev_started) {
4368 		ret = flow_drv_apply(dev, flow, error);
4369 		if (ret < 0)
4370 			goto error;
4371 	}
4372 	if (list)
4373 		TAILQ_INSERT_TAIL(list, flow, next);
4374 	flow_rxq_flags_set(dev, flow);
4375 	/* Nested flow creation index recovery. */
4376 	priv->flow_idx = priv->flow_nested_idx;
4377 	if (priv->flow_nested_idx)
4378 		priv->flow_nested_idx = 0;
4379 	return flow;
4380 error:
4381 	MLX5_ASSERT(flow);
4382 	ret = rte_errno; /* Save rte_errno before cleanup. */
4383 	flow_mreg_del_copy_action(dev, flow);
4384 	flow_drv_destroy(dev, flow);
4385 	rte_free(flow);
4386 	rte_errno = ret; /* Restore rte_errno. */
4387 error_before_flow:
4388 	ret = rte_errno;
4389 	if (hairpin_id)
4390 		mlx5_flow_id_release(priv->sh->flow_id_pool,
4391 				     hairpin_id);
4392 	rte_errno = ret;
4393 	priv->flow_idx = priv->flow_nested_idx;
4394 	if (priv->flow_nested_idx)
4395 		priv->flow_nested_idx = 0;
4396 	return NULL;
4397 }
4398 
4399 /**
4400  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
4401  * incoming packets to table 1.
4402  *
4403  * Other flow rules, requested for group n, will be created in
4404  * e-switch table n+1.
4405  * Jump action to e-switch group n will be created to group n+1.
4406  *
4407  * Used when working in switchdev mode, to utilise advantages of table 1
4408  * and above.
4409  *
4410  * @param dev
4411  *   Pointer to Ethernet device.
4412  *
4413  * @return
4414  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
4415  */
4416 struct rte_flow *
4417 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
4418 {
4419 	const struct rte_flow_attr attr = {
4420 		.group = 0,
4421 		.priority = 0,
4422 		.ingress = 1,
4423 		.egress = 0,
4424 		.transfer = 1,
4425 	};
4426 	const struct rte_flow_item pattern = {
4427 		.type = RTE_FLOW_ITEM_TYPE_END,
4428 	};
4429 	struct rte_flow_action_jump jump = {
4430 		.group = 1,
4431 	};
4432 	const struct rte_flow_action actions[] = {
4433 		{
4434 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
4435 			.conf = &jump,
4436 		},
4437 		{
4438 			.type = RTE_FLOW_ACTION_TYPE_END,
4439 		},
4440 	};
4441 	struct mlx5_priv *priv = dev->data->dev_private;
4442 	struct rte_flow_error error;
4443 
4444 	return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
4445 				actions, false, &error);
4446 }
4447 
4448 /**
4449  * Create a flow.
4450  *
4451  * @see rte_flow_create()
4452  * @see rte_flow_ops
4453  */
4454 struct rte_flow *
4455 mlx5_flow_create(struct rte_eth_dev *dev,
4456 		 const struct rte_flow_attr *attr,
4457 		 const struct rte_flow_item items[],
4458 		 const struct rte_flow_action actions[],
4459 		 struct rte_flow_error *error)
4460 {
4461 	struct mlx5_priv *priv = dev->data->dev_private;
4462 
4463 	/*
4464 	 * If the device is not started yet, it is not allowed to created a
4465 	 * flow from application. PMD default flows and traffic control flows
4466 	 * are not affected.
4467 	 */
4468 	if (unlikely(!dev->data->dev_started)) {
4469 		rte_errno = ENODEV;
4470 		DRV_LOG(DEBUG, "port %u is not started when "
4471 			"inserting a flow", dev->data->port_id);
4472 		return NULL;
4473 	}
4474 	return flow_list_create(dev, &priv->flows,
4475 				attr, items, actions, true, error);
4476 }
4477 
4478 /**
4479  * Destroy a flow in a list.
4480  *
4481  * @param dev
4482  *   Pointer to Ethernet device.
4483  * @param list
4484  *   Pointer to a TAILQ flow list. If this parameter NULL,
4485  *   there is no flow removal from the list.
4486  * @param[in] flow
4487  *   Flow to destroy.
4488  */
4489 static void
4490 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
4491 		  struct rte_flow *flow)
4492 {
4493 	struct mlx5_priv *priv = dev->data->dev_private;
4494 
4495 	/*
4496 	 * Update RX queue flags only if port is started, otherwise it is
4497 	 * already clean.
4498 	 */
4499 	if (dev->data->dev_started)
4500 		flow_rxq_flags_trim(dev, flow);
4501 	if (flow->hairpin_flow_id)
4502 		mlx5_flow_id_release(priv->sh->flow_id_pool,
4503 				     flow->hairpin_flow_id);
4504 	flow_drv_destroy(dev, flow);
4505 	if (list)
4506 		TAILQ_REMOVE(list, flow, next);
4507 	flow_mreg_del_copy_action(dev, flow);
4508 	rte_free(flow->fdir);
4509 	rte_free(flow);
4510 }
4511 
4512 /**
4513  * Destroy all flows.
4514  *
4515  * @param dev
4516  *   Pointer to Ethernet device.
4517  * @param list
4518  *   Pointer to a TAILQ flow list.
4519  * @param active
4520  *   If flushing is called avtively.
4521  */
4522 void
4523 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
4524 		     bool active)
4525 {
4526 	uint32_t num_flushed = 0;
4527 
4528 	while (!TAILQ_EMPTY(list)) {
4529 		struct rte_flow *flow;
4530 
4531 		flow = TAILQ_FIRST(list);
4532 		flow_list_destroy(dev, list, flow);
4533 		num_flushed++;
4534 	}
4535 	if (active) {
4536 		DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
4537 			dev->data->port_id, num_flushed);
4538 	}
4539 }
4540 
4541 /**
4542  * Remove all flows.
4543  *
4544  * @param dev
4545  *   Pointer to Ethernet device.
4546  * @param list
4547  *   Pointer to a TAILQ flow list.
4548  */
4549 void
4550 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
4551 {
4552 	struct rte_flow *flow;
4553 
4554 	TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
4555 		flow_drv_remove(dev, flow);
4556 		flow_mreg_stop_copy_action(dev, flow);
4557 	}
4558 	flow_mreg_del_default_copy_action(dev);
4559 	flow_rxq_flags_clear(dev);
4560 }
4561 
4562 /**
4563  * Add all flows.
4564  *
4565  * @param dev
4566  *   Pointer to Ethernet device.
4567  * @param list
4568  *   Pointer to a TAILQ flow list.
4569  *
4570  * @return
4571  *   0 on success, a negative errno value otherwise and rte_errno is set.
4572  */
4573 int
4574 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
4575 {
4576 	struct rte_flow *flow;
4577 	struct rte_flow_error error;
4578 	int ret = 0;
4579 
4580 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
4581 	ret = flow_mreg_add_default_copy_action(dev, &error);
4582 	if (ret < 0)
4583 		return -rte_errno;
4584 	/* Apply Flows created by application. */
4585 	TAILQ_FOREACH(flow, list, next) {
4586 		ret = flow_mreg_start_copy_action(dev, flow);
4587 		if (ret < 0)
4588 			goto error;
4589 		ret = flow_drv_apply(dev, flow, &error);
4590 		if (ret < 0)
4591 			goto error;
4592 		flow_rxq_flags_set(dev, flow);
4593 	}
4594 	return 0;
4595 error:
4596 	ret = rte_errno; /* Save rte_errno before cleanup. */
4597 	mlx5_flow_stop(dev, list);
4598 	rte_errno = ret; /* Restore rte_errno. */
4599 	return -rte_errno;
4600 }
4601 
4602 /**
4603  * Stop all default actions for flows.
4604  *
4605  * @param dev
4606  *   Pointer to Ethernet device.
4607  */
4608 void
4609 mlx5_flow_stop_default(struct rte_eth_dev *dev)
4610 {
4611 	flow_mreg_del_default_copy_action(dev);
4612 }
4613 
4614 /**
4615  * Start all default actions for flows.
4616  *
4617  * @param dev
4618  *   Pointer to Ethernet device.
4619  * @return
4620  *   0 on success, a negative errno value otherwise and rte_errno is set.
4621  */
4622 int
4623 mlx5_flow_start_default(struct rte_eth_dev *dev)
4624 {
4625 	struct rte_flow_error error;
4626 
4627 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
4628 	return flow_mreg_add_default_copy_action(dev, &error);
4629 }
4630 
4631 /**
4632  * Allocate intermediate resources for flow creation.
4633  *
4634  * @param dev
4635  *   Pointer to Ethernet device.
4636  */
4637 void
4638 mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
4639 {
4640 	struct mlx5_priv *priv = dev->data->dev_private;
4641 
4642 	if (!priv->inter_flows)
4643 		priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
4644 					       sizeof(struct mlx5_flow), 0);
4645 	/* Reset the index. */
4646 	priv->flow_idx = 0;
4647 	priv->flow_nested_idx = 0;
4648 }
4649 
4650 /**
4651  * Free intermediate resources for flows.
4652  *
4653  * @param dev
4654  *   Pointer to Ethernet device.
4655  */
4656 void
4657 mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
4658 {
4659 	struct mlx5_priv *priv = dev->data->dev_private;
4660 
4661 	rte_free(priv->inter_flows);
4662 	priv->inter_flows = NULL;
4663 }
4664 
4665 /**
4666  * Verify the flow list is empty
4667  *
4668  * @param dev
4669  *  Pointer to Ethernet device.
4670  *
4671  * @return the number of flows not released.
4672  */
4673 int
4674 mlx5_flow_verify(struct rte_eth_dev *dev)
4675 {
4676 	struct mlx5_priv *priv = dev->data->dev_private;
4677 	struct rte_flow *flow;
4678 	int ret = 0;
4679 
4680 	TAILQ_FOREACH(flow, &priv->flows, next) {
4681 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
4682 			dev->data->port_id, (void *)flow);
4683 		++ret;
4684 	}
4685 	return ret;
4686 }
4687 
4688 /**
4689  * Enable default hairpin egress flow.
4690  *
4691  * @param dev
4692  *   Pointer to Ethernet device.
4693  * @param queue
4694  *   The queue index.
4695  *
4696  * @return
4697  *   0 on success, a negative errno value otherwise and rte_errno is set.
4698  */
4699 int
4700 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
4701 			    uint32_t queue)
4702 {
4703 	struct mlx5_priv *priv = dev->data->dev_private;
4704 	const struct rte_flow_attr attr = {
4705 		.egress = 1,
4706 		.priority = 0,
4707 	};
4708 	struct mlx5_rte_flow_item_tx_queue queue_spec = {
4709 		.queue = queue,
4710 	};
4711 	struct mlx5_rte_flow_item_tx_queue queue_mask = {
4712 		.queue = UINT32_MAX,
4713 	};
4714 	struct rte_flow_item items[] = {
4715 		{
4716 			.type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
4717 			.spec = &queue_spec,
4718 			.last = NULL,
4719 			.mask = &queue_mask,
4720 		},
4721 		{
4722 			.type = RTE_FLOW_ITEM_TYPE_END,
4723 		},
4724 	};
4725 	struct rte_flow_action_jump jump = {
4726 		.group = MLX5_HAIRPIN_TX_TABLE,
4727 	};
4728 	struct rte_flow_action actions[2];
4729 	struct rte_flow *flow;
4730 	struct rte_flow_error error;
4731 
4732 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
4733 	actions[0].conf = &jump;
4734 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
4735 	flow = flow_list_create(dev, &priv->ctrl_flows,
4736 				&attr, items, actions, false, &error);
4737 	if (!flow) {
4738 		DRV_LOG(DEBUG,
4739 			"Failed to create ctrl flow: rte_errno(%d),"
4740 			" type(%d), message(%s)",
4741 			rte_errno, error.type,
4742 			error.message ? error.message : " (no stated reason)");
4743 		return -rte_errno;
4744 	}
4745 	return 0;
4746 }
4747 
4748 /**
4749  * Enable a control flow configured from the control plane.
4750  *
4751  * @param dev
4752  *   Pointer to Ethernet device.
4753  * @param eth_spec
4754  *   An Ethernet flow spec to apply.
4755  * @param eth_mask
4756  *   An Ethernet flow mask to apply.
4757  * @param vlan_spec
4758  *   A VLAN flow spec to apply.
4759  * @param vlan_mask
4760  *   A VLAN flow mask to apply.
4761  *
4762  * @return
4763  *   0 on success, a negative errno value otherwise and rte_errno is set.
4764  */
4765 int
4766 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
4767 		    struct rte_flow_item_eth *eth_spec,
4768 		    struct rte_flow_item_eth *eth_mask,
4769 		    struct rte_flow_item_vlan *vlan_spec,
4770 		    struct rte_flow_item_vlan *vlan_mask)
4771 {
4772 	struct mlx5_priv *priv = dev->data->dev_private;
4773 	const struct rte_flow_attr attr = {
4774 		.ingress = 1,
4775 		.priority = MLX5_FLOW_PRIO_RSVD,
4776 	};
4777 	struct rte_flow_item items[] = {
4778 		{
4779 			.type = RTE_FLOW_ITEM_TYPE_ETH,
4780 			.spec = eth_spec,
4781 			.last = NULL,
4782 			.mask = eth_mask,
4783 		},
4784 		{
4785 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
4786 					      RTE_FLOW_ITEM_TYPE_END,
4787 			.spec = vlan_spec,
4788 			.last = NULL,
4789 			.mask = vlan_mask,
4790 		},
4791 		{
4792 			.type = RTE_FLOW_ITEM_TYPE_END,
4793 		},
4794 	};
4795 	uint16_t queue[priv->reta_idx_n];
4796 	struct rte_flow_action_rss action_rss = {
4797 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4798 		.level = 0,
4799 		.types = priv->rss_conf.rss_hf,
4800 		.key_len = priv->rss_conf.rss_key_len,
4801 		.queue_num = priv->reta_idx_n,
4802 		.key = priv->rss_conf.rss_key,
4803 		.queue = queue,
4804 	};
4805 	struct rte_flow_action actions[] = {
4806 		{
4807 			.type = RTE_FLOW_ACTION_TYPE_RSS,
4808 			.conf = &action_rss,
4809 		},
4810 		{
4811 			.type = RTE_FLOW_ACTION_TYPE_END,
4812 		},
4813 	};
4814 	struct rte_flow *flow;
4815 	struct rte_flow_error error;
4816 	unsigned int i;
4817 
4818 	if (!priv->reta_idx_n || !priv->rxqs_n) {
4819 		return 0;
4820 	}
4821 	for (i = 0; i != priv->reta_idx_n; ++i)
4822 		queue[i] = (*priv->reta_idx)[i];
4823 	flow = flow_list_create(dev, &priv->ctrl_flows,
4824 				&attr, items, actions, false, &error);
4825 	if (!flow)
4826 		return -rte_errno;
4827 	return 0;
4828 }
4829 
4830 /**
4831  * Enable a flow control configured from the control plane.
4832  *
4833  * @param dev
4834  *   Pointer to Ethernet device.
4835  * @param eth_spec
4836  *   An Ethernet flow spec to apply.
4837  * @param eth_mask
4838  *   An Ethernet flow mask to apply.
4839  *
4840  * @return
4841  *   0 on success, a negative errno value otherwise and rte_errno is set.
4842  */
4843 int
4844 mlx5_ctrl_flow(struct rte_eth_dev *dev,
4845 	       struct rte_flow_item_eth *eth_spec,
4846 	       struct rte_flow_item_eth *eth_mask)
4847 {
4848 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
4849 }
4850 
4851 /**
4852  * Destroy a flow.
4853  *
4854  * @see rte_flow_destroy()
4855  * @see rte_flow_ops
4856  */
4857 int
4858 mlx5_flow_destroy(struct rte_eth_dev *dev,
4859 		  struct rte_flow *flow,
4860 		  struct rte_flow_error *error __rte_unused)
4861 {
4862 	struct mlx5_priv *priv = dev->data->dev_private;
4863 
4864 	flow_list_destroy(dev, &priv->flows, flow);
4865 	return 0;
4866 }
4867 
4868 /**
4869  * Destroy all flows.
4870  *
4871  * @see rte_flow_flush()
4872  * @see rte_flow_ops
4873  */
4874 int
4875 mlx5_flow_flush(struct rte_eth_dev *dev,
4876 		struct rte_flow_error *error __rte_unused)
4877 {
4878 	struct mlx5_priv *priv = dev->data->dev_private;
4879 
4880 	mlx5_flow_list_flush(dev, &priv->flows, false);
4881 	return 0;
4882 }
4883 
4884 /**
4885  * Isolated mode.
4886  *
4887  * @see rte_flow_isolate()
4888  * @see rte_flow_ops
4889  */
4890 int
4891 mlx5_flow_isolate(struct rte_eth_dev *dev,
4892 		  int enable,
4893 		  struct rte_flow_error *error)
4894 {
4895 	struct mlx5_priv *priv = dev->data->dev_private;
4896 
4897 	if (dev->data->dev_started) {
4898 		rte_flow_error_set(error, EBUSY,
4899 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4900 				   NULL,
4901 				   "port must be stopped first");
4902 		return -rte_errno;
4903 	}
4904 	priv->isolated = !!enable;
4905 	if (enable)
4906 		dev->dev_ops = &mlx5_dev_ops_isolate;
4907 	else
4908 		dev->dev_ops = &mlx5_dev_ops;
4909 	return 0;
4910 }
4911 
4912 /**
4913  * Query a flow.
4914  *
4915  * @see rte_flow_query()
4916  * @see rte_flow_ops
4917  */
4918 static int
4919 flow_drv_query(struct rte_eth_dev *dev,
4920 	       struct rte_flow *flow,
4921 	       const struct rte_flow_action *actions,
4922 	       void *data,
4923 	       struct rte_flow_error *error)
4924 {
4925 	const struct mlx5_flow_driver_ops *fops;
4926 	enum mlx5_flow_drv_type ftype = flow->drv_type;
4927 
4928 	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
4929 	fops = flow_get_drv_ops(ftype);
4930 
4931 	return fops->query(dev, flow, actions, data, error);
4932 }
4933 
4934 /**
4935  * Query a flow.
4936  *
4937  * @see rte_flow_query()
4938  * @see rte_flow_ops
4939  */
4940 int
4941 mlx5_flow_query(struct rte_eth_dev *dev,
4942 		struct rte_flow *flow,
4943 		const struct rte_flow_action *actions,
4944 		void *data,
4945 		struct rte_flow_error *error)
4946 {
4947 	int ret;
4948 
4949 	ret = flow_drv_query(dev, flow, actions, data, error);
4950 	if (ret < 0)
4951 		return ret;
4952 	return 0;
4953 }
4954 
4955 /**
4956  * Convert a flow director filter to a generic flow.
4957  *
4958  * @param dev
4959  *   Pointer to Ethernet device.
4960  * @param fdir_filter
4961  *   Flow director filter to add.
4962  * @param attributes
4963  *   Generic flow parameters structure.
4964  *
4965  * @return
4966  *   0 on success, a negative errno value otherwise and rte_errno is set.
4967  */
4968 static int
4969 flow_fdir_filter_convert(struct rte_eth_dev *dev,
4970 			 const struct rte_eth_fdir_filter *fdir_filter,
4971 			 struct mlx5_fdir *attributes)
4972 {
4973 	struct mlx5_priv *priv = dev->data->dev_private;
4974 	const struct rte_eth_fdir_input *input = &fdir_filter->input;
4975 	const struct rte_eth_fdir_masks *mask =
4976 		&dev->data->dev_conf.fdir_conf.mask;
4977 
4978 	/* Validate queue number. */
4979 	if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
4980 		DRV_LOG(ERR, "port %u invalid queue number %d",
4981 			dev->data->port_id, fdir_filter->action.rx_queue);
4982 		rte_errno = EINVAL;
4983 		return -rte_errno;
4984 	}
4985 	attributes->attr.ingress = 1;
4986 	attributes->items[0] = (struct rte_flow_item) {
4987 		.type = RTE_FLOW_ITEM_TYPE_ETH,
4988 		.spec = &attributes->l2,
4989 		.mask = &attributes->l2_mask,
4990 	};
4991 	switch (fdir_filter->action.behavior) {
4992 	case RTE_ETH_FDIR_ACCEPT:
4993 		attributes->actions[0] = (struct rte_flow_action){
4994 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
4995 			.conf = &attributes->queue,
4996 		};
4997 		break;
4998 	case RTE_ETH_FDIR_REJECT:
4999 		attributes->actions[0] = (struct rte_flow_action){
5000 			.type = RTE_FLOW_ACTION_TYPE_DROP,
5001 		};
5002 		break;
5003 	default:
5004 		DRV_LOG(ERR, "port %u invalid behavior %d",
5005 			dev->data->port_id,
5006 			fdir_filter->action.behavior);
5007 		rte_errno = ENOTSUP;
5008 		return -rte_errno;
5009 	}
5010 	attributes->queue.index = fdir_filter->action.rx_queue;
5011 	/* Handle L3. */
5012 	switch (fdir_filter->input.flow_type) {
5013 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
5014 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
5015 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
5016 		attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
5017 			.src_addr = input->flow.ip4_flow.src_ip,
5018 			.dst_addr = input->flow.ip4_flow.dst_ip,
5019 			.time_to_live = input->flow.ip4_flow.ttl,
5020 			.type_of_service = input->flow.ip4_flow.tos,
5021 		};
5022 		attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
5023 			.src_addr = mask->ipv4_mask.src_ip,
5024 			.dst_addr = mask->ipv4_mask.dst_ip,
5025 			.time_to_live = mask->ipv4_mask.ttl,
5026 			.type_of_service = mask->ipv4_mask.tos,
5027 			.next_proto_id = mask->ipv4_mask.proto,
5028 		};
5029 		attributes->items[1] = (struct rte_flow_item){
5030 			.type = RTE_FLOW_ITEM_TYPE_IPV4,
5031 			.spec = &attributes->l3,
5032 			.mask = &attributes->l3_mask,
5033 		};
5034 		break;
5035 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
5036 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
5037 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
5038 		attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
5039 			.hop_limits = input->flow.ipv6_flow.hop_limits,
5040 			.proto = input->flow.ipv6_flow.proto,
5041 		};
5042 
5043 		memcpy(attributes->l3.ipv6.hdr.src_addr,
5044 		       input->flow.ipv6_flow.src_ip,
5045 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
5046 		memcpy(attributes->l3.ipv6.hdr.dst_addr,
5047 		       input->flow.ipv6_flow.dst_ip,
5048 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
5049 		memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
5050 		       mask->ipv6_mask.src_ip,
5051 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
5052 		memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
5053 		       mask->ipv6_mask.dst_ip,
5054 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
5055 		attributes->items[1] = (struct rte_flow_item){
5056 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
5057 			.spec = &attributes->l3,
5058 			.mask = &attributes->l3_mask,
5059 		};
5060 		break;
5061 	default:
5062 		DRV_LOG(ERR, "port %u invalid flow type%d",
5063 			dev->data->port_id, fdir_filter->input.flow_type);
5064 		rte_errno = ENOTSUP;
5065 		return -rte_errno;
5066 	}
5067 	/* Handle L4. */
5068 	switch (fdir_filter->input.flow_type) {
5069 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
5070 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
5071 			.src_port = input->flow.udp4_flow.src_port,
5072 			.dst_port = input->flow.udp4_flow.dst_port,
5073 		};
5074 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
5075 			.src_port = mask->src_port_mask,
5076 			.dst_port = mask->dst_port_mask,
5077 		};
5078 		attributes->items[2] = (struct rte_flow_item){
5079 			.type = RTE_FLOW_ITEM_TYPE_UDP,
5080 			.spec = &attributes->l4,
5081 			.mask = &attributes->l4_mask,
5082 		};
5083 		break;
5084 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
5085 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
5086 			.src_port = input->flow.tcp4_flow.src_port,
5087 			.dst_port = input->flow.tcp4_flow.dst_port,
5088 		};
5089 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
5090 			.src_port = mask->src_port_mask,
5091 			.dst_port = mask->dst_port_mask,
5092 		};
5093 		attributes->items[2] = (struct rte_flow_item){
5094 			.type = RTE_FLOW_ITEM_TYPE_TCP,
5095 			.spec = &attributes->l4,
5096 			.mask = &attributes->l4_mask,
5097 		};
5098 		break;
5099 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
5100 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
5101 			.src_port = input->flow.udp6_flow.src_port,
5102 			.dst_port = input->flow.udp6_flow.dst_port,
5103 		};
5104 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
5105 			.src_port = mask->src_port_mask,
5106 			.dst_port = mask->dst_port_mask,
5107 		};
5108 		attributes->items[2] = (struct rte_flow_item){
5109 			.type = RTE_FLOW_ITEM_TYPE_UDP,
5110 			.spec = &attributes->l4,
5111 			.mask = &attributes->l4_mask,
5112 		};
5113 		break;
5114 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
5115 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
5116 			.src_port = input->flow.tcp6_flow.src_port,
5117 			.dst_port = input->flow.tcp6_flow.dst_port,
5118 		};
5119 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
5120 			.src_port = mask->src_port_mask,
5121 			.dst_port = mask->dst_port_mask,
5122 		};
5123 		attributes->items[2] = (struct rte_flow_item){
5124 			.type = RTE_FLOW_ITEM_TYPE_TCP,
5125 			.spec = &attributes->l4,
5126 			.mask = &attributes->l4_mask,
5127 		};
5128 		break;
5129 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
5130 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
5131 		break;
5132 	default:
5133 		DRV_LOG(ERR, "port %u invalid flow type%d",
5134 			dev->data->port_id, fdir_filter->input.flow_type);
5135 		rte_errno = ENOTSUP;
5136 		return -rte_errno;
5137 	}
5138 	return 0;
5139 }
5140 
5141 #define FLOW_FDIR_CMP(f1, f2, fld) \
5142 	memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
5143 
5144 /**
5145  * Compare two FDIR flows. If items and actions are identical, the two flows are
5146  * regarded as same.
5147  *
5148  * @param dev
5149  *   Pointer to Ethernet device.
5150  * @param f1
5151  *   FDIR flow to compare.
5152  * @param f2
5153  *   FDIR flow to compare.
5154  *
5155  * @return
5156  *   Zero on match, 1 otherwise.
5157  */
5158 static int
5159 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
5160 {
5161 	if (FLOW_FDIR_CMP(f1, f2, attr) ||
5162 	    FLOW_FDIR_CMP(f1, f2, l2) ||
5163 	    FLOW_FDIR_CMP(f1, f2, l2_mask) ||
5164 	    FLOW_FDIR_CMP(f1, f2, l3) ||
5165 	    FLOW_FDIR_CMP(f1, f2, l3_mask) ||
5166 	    FLOW_FDIR_CMP(f1, f2, l4) ||
5167 	    FLOW_FDIR_CMP(f1, f2, l4_mask) ||
5168 	    FLOW_FDIR_CMP(f1, f2, actions[0].type))
5169 		return 1;
5170 	if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
5171 	    FLOW_FDIR_CMP(f1, f2, queue))
5172 		return 1;
5173 	return 0;
5174 }
5175 
5176 /**
5177  * Search device flow list to find out a matched FDIR flow.
5178  *
5179  * @param dev
5180  *   Pointer to Ethernet device.
5181  * @param fdir_flow
5182  *   FDIR flow to lookup.
5183  *
5184  * @return
5185  *   Pointer of flow if found, NULL otherwise.
5186  */
5187 static struct rte_flow *
5188 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
5189 {
5190 	struct mlx5_priv *priv = dev->data->dev_private;
5191 	struct rte_flow *flow = NULL;
5192 
5193 	MLX5_ASSERT(fdir_flow);
5194 	TAILQ_FOREACH(flow, &priv->flows, next) {
5195 		if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
5196 			DRV_LOG(DEBUG, "port %u found FDIR flow %p",
5197 				dev->data->port_id, (void *)flow);
5198 			break;
5199 		}
5200 	}
5201 	return flow;
5202 }
5203 
5204 /**
5205  * Add new flow director filter and store it in list.
5206  *
5207  * @param dev
5208  *   Pointer to Ethernet device.
5209  * @param fdir_filter
5210  *   Flow director filter to add.
5211  *
5212  * @return
5213  *   0 on success, a negative errno value otherwise and rte_errno is set.
5214  */
5215 static int
5216 flow_fdir_filter_add(struct rte_eth_dev *dev,
5217 		     const struct rte_eth_fdir_filter *fdir_filter)
5218 {
5219 	struct mlx5_priv *priv = dev->data->dev_private;
5220 	struct mlx5_fdir *fdir_flow;
5221 	struct rte_flow *flow;
5222 	int ret;
5223 
5224 	fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
5225 	if (!fdir_flow) {
5226 		rte_errno = ENOMEM;
5227 		return -rte_errno;
5228 	}
5229 	ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
5230 	if (ret)
5231 		goto error;
5232 	flow = flow_fdir_filter_lookup(dev, fdir_flow);
5233 	if (flow) {
5234 		rte_errno = EEXIST;
5235 		goto error;
5236 	}
5237 	flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
5238 				fdir_flow->items, fdir_flow->actions, true,
5239 				NULL);
5240 	if (!flow)
5241 		goto error;
5242 	MLX5_ASSERT(!flow->fdir);
5243 	flow->fdir = fdir_flow;
5244 	DRV_LOG(DEBUG, "port %u created FDIR flow %p",
5245 		dev->data->port_id, (void *)flow);
5246 	return 0;
5247 error:
5248 	rte_free(fdir_flow);
5249 	return -rte_errno;
5250 }
5251 
5252 /**
5253  * Delete specific filter.
5254  *
5255  * @param dev
5256  *   Pointer to Ethernet device.
5257  * @param fdir_filter
5258  *   Filter to be deleted.
5259  *
5260  * @return
5261  *   0 on success, a negative errno value otherwise and rte_errno is set.
5262  */
5263 static int
5264 flow_fdir_filter_delete(struct rte_eth_dev *dev,
5265 			const struct rte_eth_fdir_filter *fdir_filter)
5266 {
5267 	struct mlx5_priv *priv = dev->data->dev_private;
5268 	struct rte_flow *flow;
5269 	struct mlx5_fdir fdir_flow = {
5270 		.attr.group = 0,
5271 	};
5272 	int ret;
5273 
5274 	ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
5275 	if (ret)
5276 		return -rte_errno;
5277 	flow = flow_fdir_filter_lookup(dev, &fdir_flow);
5278 	if (!flow) {
5279 		rte_errno = ENOENT;
5280 		return -rte_errno;
5281 	}
5282 	flow_list_destroy(dev, &priv->flows, flow);
5283 	DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
5284 		dev->data->port_id, (void *)flow);
5285 	return 0;
5286 }
5287 
5288 /**
5289  * Update queue for specific filter.
5290  *
5291  * @param dev
5292  *   Pointer to Ethernet device.
5293  * @param fdir_filter
5294  *   Filter to be updated.
5295  *
5296  * @return
5297  *   0 on success, a negative errno value otherwise and rte_errno is set.
5298  */
5299 static int
5300 flow_fdir_filter_update(struct rte_eth_dev *dev,
5301 			const struct rte_eth_fdir_filter *fdir_filter)
5302 {
5303 	int ret;
5304 
5305 	ret = flow_fdir_filter_delete(dev, fdir_filter);
5306 	if (ret)
5307 		return ret;
5308 	return flow_fdir_filter_add(dev, fdir_filter);
5309 }
5310 
5311 /**
5312  * Flush all filters.
5313  *
5314  * @param dev
5315  *   Pointer to Ethernet device.
5316  */
5317 static void
5318 flow_fdir_filter_flush(struct rte_eth_dev *dev)
5319 {
5320 	struct mlx5_priv *priv = dev->data->dev_private;
5321 
5322 	mlx5_flow_list_flush(dev, &priv->flows, false);
5323 }
5324 
5325 /**
5326  * Get flow director information.
5327  *
5328  * @param dev
5329  *   Pointer to Ethernet device.
5330  * @param[out] fdir_info
5331  *   Resulting flow director information.
5332  */
5333 static void
5334 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
5335 {
5336 	struct rte_eth_fdir_masks *mask =
5337 		&dev->data->dev_conf.fdir_conf.mask;
5338 
5339 	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
5340 	fdir_info->guarant_spc = 0;
5341 	rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
5342 	fdir_info->max_flexpayload = 0;
5343 	fdir_info->flow_types_mask[0] = 0;
5344 	fdir_info->flex_payload_unit = 0;
5345 	fdir_info->max_flex_payload_segment_num = 0;
5346 	fdir_info->flex_payload_limit = 0;
5347 	memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
5348 }
5349 
5350 /**
5351  * Deal with flow director operations.
5352  *
5353  * @param dev
5354  *   Pointer to Ethernet device.
5355  * @param filter_op
5356  *   Operation to perform.
5357  * @param arg
5358  *   Pointer to operation-specific structure.
5359  *
5360  * @return
5361  *   0 on success, a negative errno value otherwise and rte_errno is set.
5362  */
5363 static int
5364 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5365 		    void *arg)
5366 {
5367 	enum rte_fdir_mode fdir_mode =
5368 		dev->data->dev_conf.fdir_conf.mode;
5369 
5370 	if (filter_op == RTE_ETH_FILTER_NOP)
5371 		return 0;
5372 	if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
5373 	    fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
5374 		DRV_LOG(ERR, "port %u flow director mode %d not supported",
5375 			dev->data->port_id, fdir_mode);
5376 		rte_errno = EINVAL;
5377 		return -rte_errno;
5378 	}
5379 	switch (filter_op) {
5380 	case RTE_ETH_FILTER_ADD:
5381 		return flow_fdir_filter_add(dev, arg);
5382 	case RTE_ETH_FILTER_UPDATE:
5383 		return flow_fdir_filter_update(dev, arg);
5384 	case RTE_ETH_FILTER_DELETE:
5385 		return flow_fdir_filter_delete(dev, arg);
5386 	case RTE_ETH_FILTER_FLUSH:
5387 		flow_fdir_filter_flush(dev);
5388 		break;
5389 	case RTE_ETH_FILTER_INFO:
5390 		flow_fdir_info_get(dev, arg);
5391 		break;
5392 	default:
5393 		DRV_LOG(DEBUG, "port %u unknown operation %u",
5394 			dev->data->port_id, filter_op);
5395 		rte_errno = EINVAL;
5396 		return -rte_errno;
5397 	}
5398 	return 0;
5399 }
5400 
5401 /**
5402  * Manage filter operations.
5403  *
5404  * @param dev
5405  *   Pointer to Ethernet device structure.
5406  * @param filter_type
5407  *   Filter type.
5408  * @param filter_op
5409  *   Operation to perform.
5410  * @param arg
5411  *   Pointer to operation-specific structure.
5412  *
5413  * @return
5414  *   0 on success, a negative errno value otherwise and rte_errno is set.
5415  */
5416 int
5417 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
5418 		     enum rte_filter_type filter_type,
5419 		     enum rte_filter_op filter_op,
5420 		     void *arg)
5421 {
5422 	switch (filter_type) {
5423 	case RTE_ETH_FILTER_GENERIC:
5424 		if (filter_op != RTE_ETH_FILTER_GET) {
5425 			rte_errno = EINVAL;
5426 			return -rte_errno;
5427 		}
5428 		*(const void **)arg = &mlx5_flow_ops;
5429 		return 0;
5430 	case RTE_ETH_FILTER_FDIR:
5431 		return flow_fdir_ctrl_func(dev, filter_op, arg);
5432 	default:
5433 		DRV_LOG(ERR, "port %u filter type (%d) not supported",
5434 			dev->data->port_id, filter_type);
5435 		rte_errno = ENOTSUP;
5436 		return -rte_errno;
5437 	}
5438 	return 0;
5439 }
5440 
5441 /**
5442  * Create the needed meter and suffix tables.
5443  *
5444  * @param[in] dev
5445  *   Pointer to Ethernet device.
5446  * @param[in] fm
5447  *   Pointer to the flow meter.
5448  *
5449  * @return
5450  *   Pointer to table set on success, NULL otherwise.
5451  */
5452 struct mlx5_meter_domains_infos *
5453 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
5454 			  const struct mlx5_flow_meter *fm)
5455 {
5456 	const struct mlx5_flow_driver_ops *fops;
5457 
5458 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5459 	return fops->create_mtr_tbls(dev, fm);
5460 }
5461 
5462 /**
5463  * Destroy the meter table set.
5464  *
5465  * @param[in] dev
5466  *   Pointer to Ethernet device.
5467  * @param[in] tbl
5468  *   Pointer to the meter table set.
5469  *
5470  * @return
5471  *   0 on success.
5472  */
5473 int
5474 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
5475 			   struct mlx5_meter_domains_infos *tbls)
5476 {
5477 	const struct mlx5_flow_driver_ops *fops;
5478 
5479 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5480 	return fops->destroy_mtr_tbls(dev, tbls);
5481 }
5482 
5483 /**
5484  * Create policer rules.
5485  *
5486  * @param[in] dev
5487  *   Pointer to Ethernet device.
5488  * @param[in] fm
5489  *   Pointer to flow meter structure.
5490  * @param[in] attr
5491  *   Pointer to flow attributes.
5492  *
5493  * @return
5494  *   0 on success, -1 otherwise.
5495  */
5496 int
5497 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
5498 			       struct mlx5_flow_meter *fm,
5499 			       const struct rte_flow_attr *attr)
5500 {
5501 	const struct mlx5_flow_driver_ops *fops;
5502 
5503 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5504 	return fops->create_policer_rules(dev, fm, attr);
5505 }
5506 
5507 /**
5508  * Destroy policer rules.
5509  *
5510  * @param[in] fm
5511  *   Pointer to flow meter structure.
5512  * @param[in] attr
5513  *   Pointer to flow attributes.
5514  *
5515  * @return
5516  *   0 on success, -1 otherwise.
5517  */
5518 int
5519 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
5520 				struct mlx5_flow_meter *fm,
5521 				const struct rte_flow_attr *attr)
5522 {
5523 	const struct mlx5_flow_driver_ops *fops;
5524 
5525 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5526 	return fops->destroy_policer_rules(dev, fm, attr);
5527 }
5528 
5529 /**
5530  * Allocate a counter.
5531  *
5532  * @param[in] dev
5533  *   Pointer to Ethernet device structure.
5534  *
5535  * @return
5536  *   Index to allocated counter  on success, 0 otherwise.
5537  */
5538 uint32_t
5539 mlx5_counter_alloc(struct rte_eth_dev *dev)
5540 {
5541 	const struct mlx5_flow_driver_ops *fops;
5542 	struct rte_flow_attr attr = { .transfer = 0 };
5543 
5544 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5545 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5546 		return fops->counter_alloc(dev);
5547 	}
5548 	DRV_LOG(ERR,
5549 		"port %u counter allocate is not supported.",
5550 		 dev->data->port_id);
5551 	return 0;
5552 }
5553 
5554 /**
5555  * Free a counter.
5556  *
5557  * @param[in] dev
5558  *   Pointer to Ethernet device structure.
5559  * @param[in] cnt
5560  *   Index to counter to be free.
5561  */
5562 void
5563 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
5564 {
5565 	const struct mlx5_flow_driver_ops *fops;
5566 	struct rte_flow_attr attr = { .transfer = 0 };
5567 
5568 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5569 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5570 		fops->counter_free(dev, cnt);
5571 		return;
5572 	}
5573 	DRV_LOG(ERR,
5574 		"port %u counter free is not supported.",
5575 		 dev->data->port_id);
5576 }
5577 
5578 /**
5579  * Query counter statistics.
5580  *
5581  * @param[in] dev
5582  *   Pointer to Ethernet device structure.
5583  * @param[in] cnt
5584  *   Index to counter to query.
5585  * @param[in] clear
5586  *   Set to clear counter statistics.
5587  * @param[out] pkts
5588  *   The counter hits packets number to save.
5589  * @param[out] bytes
5590  *   The counter hits bytes number to save.
5591  *
5592  * @return
5593  *   0 on success, a negative errno value otherwise.
5594  */
5595 int
5596 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
5597 		   bool clear, uint64_t *pkts, uint64_t *bytes)
5598 {
5599 	const struct mlx5_flow_driver_ops *fops;
5600 	struct rte_flow_attr attr = { .transfer = 0 };
5601 
5602 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5603 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5604 		return fops->counter_query(dev, cnt, clear, pkts, bytes);
5605 	}
5606 	DRV_LOG(ERR,
5607 		"port %u counter query is not supported.",
5608 		 dev->data->port_id);
5609 	return -ENOTSUP;
5610 }
5611 
5612 #define MLX5_POOL_QUERY_FREQ_US 1000000
5613 
5614 /**
5615  * Set the periodic procedure for triggering asynchronous batch queries for all
5616  * the counter pools.
5617  *
5618  * @param[in] sh
5619  *   Pointer to mlx5_ibv_shared object.
5620  */
5621 void
5622 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh)
5623 {
5624 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0);
5625 	uint32_t pools_n = rte_atomic16_read(&cont->n_valid);
5626 	uint32_t us;
5627 
5628 	cont = MLX5_CNT_CONTAINER(sh, 1, 0);
5629 	pools_n += rte_atomic16_read(&cont->n_valid);
5630 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
5631 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
5632 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
5633 		sh->cmng.query_thread_on = 0;
5634 		DRV_LOG(ERR, "Cannot reinitialize query alarm");
5635 	} else {
5636 		sh->cmng.query_thread_on = 1;
5637 	}
5638 }
5639 
5640 /**
5641  * The periodic procedure for triggering asynchronous batch queries for all the
5642  * counter pools. This function is probably called by the host thread.
5643  *
5644  * @param[in] arg
5645  *   The parameter for the alarm process.
5646  */
5647 void
5648 mlx5_flow_query_alarm(void *arg)
5649 {
5650 	struct mlx5_ibv_shared *sh = arg;
5651 	struct mlx5_devx_obj *dcs;
5652 	uint16_t offset;
5653 	int ret;
5654 	uint8_t batch = sh->cmng.batch;
5655 	uint16_t pool_index = sh->cmng.pool_index;
5656 	struct mlx5_pools_container *cont;
5657 	struct mlx5_pools_container *mcont;
5658 	struct mlx5_flow_counter_pool *pool;
5659 
5660 	if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
5661 		goto set_alarm;
5662 next_container:
5663 	cont = MLX5_CNT_CONTAINER(sh, batch, 1);
5664 	mcont = MLX5_CNT_CONTAINER(sh, batch, 0);
5665 	/* Check if resize was done and need to flip a container. */
5666 	if (cont != mcont) {
5667 		if (cont->pools) {
5668 			/* Clean the old container. */
5669 			rte_free(cont->pools);
5670 			memset(cont, 0, sizeof(*cont));
5671 		}
5672 		rte_cio_wmb();
5673 		 /* Flip the host container. */
5674 		sh->cmng.mhi[batch] ^= (uint8_t)2;
5675 		cont = mcont;
5676 	}
5677 	if (!cont->pools) {
5678 		/* 2 empty containers case is unexpected. */
5679 		if (unlikely(batch != sh->cmng.batch))
5680 			goto set_alarm;
5681 		batch ^= 0x1;
5682 		pool_index = 0;
5683 		goto next_container;
5684 	}
5685 	pool = cont->pools[pool_index];
5686 	if (pool->raw_hw)
5687 		/* There is a pool query in progress. */
5688 		goto set_alarm;
5689 	pool->raw_hw =
5690 		LIST_FIRST(&sh->cmng.free_stat_raws);
5691 	if (!pool->raw_hw)
5692 		/* No free counter statistics raw memory. */
5693 		goto set_alarm;
5694 	dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
5695 							      (&pool->a64_dcs);
5696 	offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
5697 	/*
5698 	 * Identify the counters released between query trigger and query
5699 	 * handle more effiecntly. The counter released in this gap period
5700 	 * should wait for a new round of query as the new arrived packets
5701 	 * will not be taken into account.
5702 	 */
5703 	rte_atomic64_add(&pool->start_query_gen, 1);
5704 	ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
5705 					       offset, NULL, NULL,
5706 					       pool->raw_hw->mem_mng->dm->id,
5707 					       (void *)(uintptr_t)
5708 					       (pool->raw_hw->data + offset),
5709 					       sh->devx_comp,
5710 					       (uint64_t)(uintptr_t)pool);
5711 	if (ret) {
5712 		rte_atomic64_sub(&pool->start_query_gen, 1);
5713 		DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
5714 			" %d", pool->min_dcs->id);
5715 		pool->raw_hw = NULL;
5716 		goto set_alarm;
5717 	}
5718 	pool->raw_hw->min_dcs_id = dcs->id;
5719 	LIST_REMOVE(pool->raw_hw, next);
5720 	sh->cmng.pending_queries++;
5721 	pool_index++;
5722 	if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
5723 		batch ^= 0x1;
5724 		pool_index = 0;
5725 	}
5726 set_alarm:
5727 	sh->cmng.batch = batch;
5728 	sh->cmng.pool_index = pool_index;
5729 	mlx5_set_query_alarm(sh);
5730 }
5731 
5732 /**
5733  * Handler for the HW respond about ready values from an asynchronous batch
5734  * query. This function is probably called by the host thread.
5735  *
5736  * @param[in] sh
5737  *   The pointer to the shared IB device context.
5738  * @param[in] async_id
5739  *   The Devx async ID.
5740  * @param[in] status
5741  *   The status of the completion.
5742  */
5743 void
5744 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
5745 				  uint64_t async_id, int status)
5746 {
5747 	struct mlx5_flow_counter_pool *pool =
5748 		(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
5749 	struct mlx5_counter_stats_raw *raw_to_free;
5750 
5751 	if (unlikely(status)) {
5752 		rte_atomic64_sub(&pool->start_query_gen, 1);
5753 		raw_to_free = pool->raw_hw;
5754 	} else {
5755 		raw_to_free = pool->raw;
5756 		rte_spinlock_lock(&pool->sl);
5757 		pool->raw = pool->raw_hw;
5758 		rte_spinlock_unlock(&pool->sl);
5759 		MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 ==
5760 			    rte_atomic64_read(&pool->start_query_gen));
5761 		rte_atomic64_set(&pool->end_query_gen,
5762 				 rte_atomic64_read(&pool->start_query_gen));
5763 		/* Be sure the new raw counters data is updated in memory. */
5764 		rte_cio_wmb();
5765 	}
5766 	LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
5767 	pool->raw_hw = NULL;
5768 	sh->cmng.pending_queries--;
5769 }
5770 
5771 /**
5772  * Translate the rte_flow group index to HW table value.
5773  *
5774  * @param[in] attributes
5775  *   Pointer to flow attributes
5776  * @param[in] external
5777  *   Value is part of flow rule created by request external to PMD.
5778  * @param[in] group
5779  *   rte_flow group index value.
5780  * @param[out] fdb_def_rule
5781  *   Whether fdb jump to table 1 is configured.
5782  * @param[out] table
5783  *   HW table value.
5784  * @param[out] error
5785  *   Pointer to error structure.
5786  *
5787  * @return
5788  *   0 on success, a negative errno value otherwise and rte_errno is set.
5789  */
5790 int
5791 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
5792 			 uint32_t group, bool fdb_def_rule, uint32_t *table,
5793 			 struct rte_flow_error *error)
5794 {
5795 	if (attributes->transfer && external && fdb_def_rule) {
5796 		if (group == UINT32_MAX)
5797 			return rte_flow_error_set
5798 						(error, EINVAL,
5799 						 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5800 						 NULL,
5801 						 "group index not supported");
5802 		*table = group + 1;
5803 	} else {
5804 		*table = group;
5805 	}
5806 	return 0;
5807 }
5808 
5809 /**
5810  * Discover availability of metadata reg_c's.
5811  *
5812  * Iteratively use test flows to check availability.
5813  *
5814  * @param[in] dev
5815  *   Pointer to the Ethernet device structure.
5816  *
5817  * @return
5818  *   0 on success, a negative errno value otherwise and rte_errno is set.
5819  */
5820 int
5821 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
5822 {
5823 	struct mlx5_priv *priv = dev->data->dev_private;
5824 	struct mlx5_dev_config *config = &priv->config;
5825 	enum modify_reg idx;
5826 	int n = 0;
5827 
5828 	/* reg_c[0] and reg_c[1] are reserved. */
5829 	config->flow_mreg_c[n++] = REG_C_0;
5830 	config->flow_mreg_c[n++] = REG_C_1;
5831 	/* Discover availability of other reg_c's. */
5832 	for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
5833 		struct rte_flow_attr attr = {
5834 			.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
5835 			.priority = MLX5_FLOW_PRIO_RSVD,
5836 			.ingress = 1,
5837 		};
5838 		struct rte_flow_item items[] = {
5839 			[0] = {
5840 				.type = RTE_FLOW_ITEM_TYPE_END,
5841 			},
5842 		};
5843 		struct rte_flow_action actions[] = {
5844 			[0] = {
5845 				.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5846 				.conf = &(struct mlx5_flow_action_copy_mreg){
5847 					.src = REG_C_1,
5848 					.dst = idx,
5849 				},
5850 			},
5851 			[1] = {
5852 				.type = RTE_FLOW_ACTION_TYPE_JUMP,
5853 				.conf = &(struct rte_flow_action_jump){
5854 					.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5855 				},
5856 			},
5857 			[2] = {
5858 				.type = RTE_FLOW_ACTION_TYPE_END,
5859 			},
5860 		};
5861 		struct rte_flow *flow;
5862 		struct rte_flow_error error;
5863 
5864 		if (!config->dv_flow_en)
5865 			break;
5866 		/* Create internal flow, validation skips copy action. */
5867 		flow = flow_list_create(dev, NULL, &attr, items,
5868 					actions, false, &error);
5869 		if (!flow)
5870 			continue;
5871 		if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
5872 			config->flow_mreg_c[n++] = idx;
5873 		flow_list_destroy(dev, NULL, flow);
5874 	}
5875 	for (; n < MLX5_MREG_C_NUM; ++n)
5876 		config->flow_mreg_c[n] = REG_NONE;
5877 	return 0;
5878 }
5879 
5880 /**
5881  * Dump flow raw hw data to file
5882  *
5883  * @param[in] dev
5884  *    The pointer to Ethernet device.
5885  * @param[in] file
5886  *   A pointer to a file for output.
5887  * @param[out] error
5888  *   Perform verbose error reporting if not NULL. PMDs initialize this
5889  *   structure in case of error only.
5890  * @return
5891  *   0 on success, a nagative value otherwise.
5892  */
5893 int
5894 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
5895 		   FILE *file,
5896 		   struct rte_flow_error *error __rte_unused)
5897 {
5898 	struct mlx5_priv *priv = dev->data->dev_private;
5899 	struct mlx5_ibv_shared *sh = priv->sh;
5900 
5901 	return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
5902 				       sh->tx_domain, file);
5903 }
5904