xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision bc42413bb9c0b767e40c5a52e529277c06608a3b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 
12 /* Verbs header. */
13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #ifdef PEDANTIC
15 #pragma GCC diagnostic ignored "-Wpedantic"
16 #endif
17 #include <infiniband/verbs.h>
18 #ifdef PEDANTIC
19 #pragma GCC diagnostic error "-Wpedantic"
20 #endif
21 
22 #include <rte_common.h>
23 #include <rte_ether.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29 
30 #include <mlx5_glue.h>
31 #include <mlx5_devx_cmds.h>
32 #include <mlx5_prm.h>
33 
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_rxtx.h"
38 
39 /* Dev ops structure defined in mlx5.c */
40 extern const struct eth_dev_ops mlx5_dev_ops;
41 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
42 
43 /** Device flow drivers. */
44 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
45 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
46 #endif
47 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
48 
49 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
50 
51 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
52 	[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
53 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
54 	[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
55 #endif
56 	[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
57 	[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
58 };
59 
60 enum mlx5_expansion {
61 	MLX5_EXPANSION_ROOT,
62 	MLX5_EXPANSION_ROOT_OUTER,
63 	MLX5_EXPANSION_ROOT_ETH_VLAN,
64 	MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
65 	MLX5_EXPANSION_OUTER_ETH,
66 	MLX5_EXPANSION_OUTER_ETH_VLAN,
67 	MLX5_EXPANSION_OUTER_VLAN,
68 	MLX5_EXPANSION_OUTER_IPV4,
69 	MLX5_EXPANSION_OUTER_IPV4_UDP,
70 	MLX5_EXPANSION_OUTER_IPV4_TCP,
71 	MLX5_EXPANSION_OUTER_IPV6,
72 	MLX5_EXPANSION_OUTER_IPV6_UDP,
73 	MLX5_EXPANSION_OUTER_IPV6_TCP,
74 	MLX5_EXPANSION_VXLAN,
75 	MLX5_EXPANSION_VXLAN_GPE,
76 	MLX5_EXPANSION_GRE,
77 	MLX5_EXPANSION_MPLS,
78 	MLX5_EXPANSION_ETH,
79 	MLX5_EXPANSION_ETH_VLAN,
80 	MLX5_EXPANSION_VLAN,
81 	MLX5_EXPANSION_IPV4,
82 	MLX5_EXPANSION_IPV4_UDP,
83 	MLX5_EXPANSION_IPV4_TCP,
84 	MLX5_EXPANSION_IPV6,
85 	MLX5_EXPANSION_IPV6_UDP,
86 	MLX5_EXPANSION_IPV6_TCP,
87 };
88 
89 /** Supported expansion of items. */
90 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
91 	[MLX5_EXPANSION_ROOT] = {
92 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
93 						 MLX5_EXPANSION_IPV4,
94 						 MLX5_EXPANSION_IPV6),
95 		.type = RTE_FLOW_ITEM_TYPE_END,
96 	},
97 	[MLX5_EXPANSION_ROOT_OUTER] = {
98 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
99 						 MLX5_EXPANSION_OUTER_IPV4,
100 						 MLX5_EXPANSION_OUTER_IPV6),
101 		.type = RTE_FLOW_ITEM_TYPE_END,
102 	},
103 	[MLX5_EXPANSION_ROOT_ETH_VLAN] = {
104 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
105 		.type = RTE_FLOW_ITEM_TYPE_END,
106 	},
107 	[MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
108 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
109 		.type = RTE_FLOW_ITEM_TYPE_END,
110 	},
111 	[MLX5_EXPANSION_OUTER_ETH] = {
112 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
113 						 MLX5_EXPANSION_OUTER_IPV6,
114 						 MLX5_EXPANSION_MPLS),
115 		.type = RTE_FLOW_ITEM_TYPE_ETH,
116 		.rss_types = 0,
117 	},
118 	[MLX5_EXPANSION_OUTER_ETH_VLAN] = {
119 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
120 		.type = RTE_FLOW_ITEM_TYPE_ETH,
121 		.rss_types = 0,
122 	},
123 	[MLX5_EXPANSION_OUTER_VLAN] = {
124 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
125 						 MLX5_EXPANSION_OUTER_IPV6),
126 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
127 	},
128 	[MLX5_EXPANSION_OUTER_IPV4] = {
129 		.next = RTE_FLOW_EXPAND_RSS_NEXT
130 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
131 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
132 			 MLX5_EXPANSION_GRE,
133 			 MLX5_EXPANSION_IPV4,
134 			 MLX5_EXPANSION_IPV6),
135 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
136 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
137 			ETH_RSS_NONFRAG_IPV4_OTHER,
138 	},
139 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
140 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
141 						 MLX5_EXPANSION_VXLAN_GPE),
142 		.type = RTE_FLOW_ITEM_TYPE_UDP,
143 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
144 	},
145 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
146 		.type = RTE_FLOW_ITEM_TYPE_TCP,
147 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
148 	},
149 	[MLX5_EXPANSION_OUTER_IPV6] = {
150 		.next = RTE_FLOW_EXPAND_RSS_NEXT
151 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
152 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
153 			 MLX5_EXPANSION_IPV4,
154 			 MLX5_EXPANSION_IPV6),
155 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
156 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
157 			ETH_RSS_NONFRAG_IPV6_OTHER,
158 	},
159 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
160 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
161 						 MLX5_EXPANSION_VXLAN_GPE),
162 		.type = RTE_FLOW_ITEM_TYPE_UDP,
163 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
164 	},
165 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
166 		.type = RTE_FLOW_ITEM_TYPE_TCP,
167 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
168 	},
169 	[MLX5_EXPANSION_VXLAN] = {
170 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
171 						 MLX5_EXPANSION_IPV4,
172 						 MLX5_EXPANSION_IPV6),
173 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
174 	},
175 	[MLX5_EXPANSION_VXLAN_GPE] = {
176 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
177 						 MLX5_EXPANSION_IPV4,
178 						 MLX5_EXPANSION_IPV6),
179 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
180 	},
181 	[MLX5_EXPANSION_GRE] = {
182 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
183 		.type = RTE_FLOW_ITEM_TYPE_GRE,
184 	},
185 	[MLX5_EXPANSION_MPLS] = {
186 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
187 						 MLX5_EXPANSION_IPV6),
188 		.type = RTE_FLOW_ITEM_TYPE_MPLS,
189 	},
190 	[MLX5_EXPANSION_ETH] = {
191 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
192 						 MLX5_EXPANSION_IPV6),
193 		.type = RTE_FLOW_ITEM_TYPE_ETH,
194 	},
195 	[MLX5_EXPANSION_ETH_VLAN] = {
196 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
197 		.type = RTE_FLOW_ITEM_TYPE_ETH,
198 	},
199 	[MLX5_EXPANSION_VLAN] = {
200 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
201 						 MLX5_EXPANSION_IPV6),
202 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
203 	},
204 	[MLX5_EXPANSION_IPV4] = {
205 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
206 						 MLX5_EXPANSION_IPV4_TCP),
207 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
208 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
209 			ETH_RSS_NONFRAG_IPV4_OTHER,
210 	},
211 	[MLX5_EXPANSION_IPV4_UDP] = {
212 		.type = RTE_FLOW_ITEM_TYPE_UDP,
213 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
214 	},
215 	[MLX5_EXPANSION_IPV4_TCP] = {
216 		.type = RTE_FLOW_ITEM_TYPE_TCP,
217 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
218 	},
219 	[MLX5_EXPANSION_IPV6] = {
220 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
221 						 MLX5_EXPANSION_IPV6_TCP),
222 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
223 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
224 			ETH_RSS_NONFRAG_IPV6_OTHER,
225 	},
226 	[MLX5_EXPANSION_IPV6_UDP] = {
227 		.type = RTE_FLOW_ITEM_TYPE_UDP,
228 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
229 	},
230 	[MLX5_EXPANSION_IPV6_TCP] = {
231 		.type = RTE_FLOW_ITEM_TYPE_TCP,
232 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
233 	},
234 };
235 
236 static const struct rte_flow_ops mlx5_flow_ops = {
237 	.validate = mlx5_flow_validate,
238 	.create = mlx5_flow_create,
239 	.destroy = mlx5_flow_destroy,
240 	.flush = mlx5_flow_flush,
241 	.isolate = mlx5_flow_isolate,
242 	.query = mlx5_flow_query,
243 	.dev_dump = mlx5_flow_dev_dump,
244 };
245 
246 /* Convert FDIR request to Generic flow. */
247 struct mlx5_fdir {
248 	struct rte_flow_attr attr;
249 	struct rte_flow_item items[4];
250 	struct rte_flow_item_eth l2;
251 	struct rte_flow_item_eth l2_mask;
252 	union {
253 		struct rte_flow_item_ipv4 ipv4;
254 		struct rte_flow_item_ipv6 ipv6;
255 	} l3;
256 	union {
257 		struct rte_flow_item_ipv4 ipv4;
258 		struct rte_flow_item_ipv6 ipv6;
259 	} l3_mask;
260 	union {
261 		struct rte_flow_item_udp udp;
262 		struct rte_flow_item_tcp tcp;
263 	} l4;
264 	union {
265 		struct rte_flow_item_udp udp;
266 		struct rte_flow_item_tcp tcp;
267 	} l4_mask;
268 	struct rte_flow_action actions[2];
269 	struct rte_flow_action_queue queue;
270 };
271 
272 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
273 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
274 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
275 };
276 
277 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
278 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
279 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
280 	{ 9, 10, 11 }, { 12, 13, 14 },
281 };
282 
283 /* Tunnel information. */
284 struct mlx5_flow_tunnel_info {
285 	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
286 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
287 };
288 
289 static struct mlx5_flow_tunnel_info tunnels_info[] = {
290 	{
291 		.tunnel = MLX5_FLOW_LAYER_VXLAN,
292 		.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
293 	},
294 	{
295 		.tunnel = MLX5_FLOW_LAYER_GENEVE,
296 		.ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
297 	},
298 	{
299 		.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
300 		.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
301 	},
302 	{
303 		.tunnel = MLX5_FLOW_LAYER_GRE,
304 		.ptype = RTE_PTYPE_TUNNEL_GRE,
305 	},
306 	{
307 		.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
308 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
309 	},
310 	{
311 		.tunnel = MLX5_FLOW_LAYER_MPLS,
312 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
313 	},
314 	{
315 		.tunnel = MLX5_FLOW_LAYER_NVGRE,
316 		.ptype = RTE_PTYPE_TUNNEL_NVGRE,
317 	},
318 	{
319 		.tunnel = MLX5_FLOW_LAYER_IPIP,
320 		.ptype = RTE_PTYPE_TUNNEL_IP,
321 	},
322 	{
323 		.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
324 		.ptype = RTE_PTYPE_TUNNEL_IP,
325 	},
326 	{
327 		.tunnel = MLX5_FLOW_LAYER_GTP,
328 		.ptype = RTE_PTYPE_TUNNEL_GTPU,
329 	},
330 };
331 
332 /**
333  * Translate tag ID to register.
334  *
335  * @param[in] dev
336  *   Pointer to the Ethernet device structure.
337  * @param[in] feature
338  *   The feature that request the register.
339  * @param[in] id
340  *   The request register ID.
341  * @param[out] error
342  *   Error description in case of any.
343  *
344  * @return
345  *   The request register on success, a negative errno
346  *   value otherwise and rte_errno is set.
347  */
348 int
349 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
350 		     enum mlx5_feature_name feature,
351 		     uint32_t id,
352 		     struct rte_flow_error *error)
353 {
354 	struct mlx5_priv *priv = dev->data->dev_private;
355 	struct mlx5_dev_config *config = &priv->config;
356 	enum modify_reg start_reg;
357 	bool skip_mtr_reg = false;
358 
359 	switch (feature) {
360 	case MLX5_HAIRPIN_RX:
361 		return REG_B;
362 	case MLX5_HAIRPIN_TX:
363 		return REG_A;
364 	case MLX5_METADATA_RX:
365 		switch (config->dv_xmeta_en) {
366 		case MLX5_XMETA_MODE_LEGACY:
367 			return REG_B;
368 		case MLX5_XMETA_MODE_META16:
369 			return REG_C_0;
370 		case MLX5_XMETA_MODE_META32:
371 			return REG_C_1;
372 		}
373 		break;
374 	case MLX5_METADATA_TX:
375 		return REG_A;
376 	case MLX5_METADATA_FDB:
377 		switch (config->dv_xmeta_en) {
378 		case MLX5_XMETA_MODE_LEGACY:
379 			return REG_NONE;
380 		case MLX5_XMETA_MODE_META16:
381 			return REG_C_0;
382 		case MLX5_XMETA_MODE_META32:
383 			return REG_C_1;
384 		}
385 		break;
386 	case MLX5_FLOW_MARK:
387 		switch (config->dv_xmeta_en) {
388 		case MLX5_XMETA_MODE_LEGACY:
389 			return REG_NONE;
390 		case MLX5_XMETA_MODE_META16:
391 			return REG_C_1;
392 		case MLX5_XMETA_MODE_META32:
393 			return REG_C_0;
394 		}
395 		break;
396 	case MLX5_MTR_SFX:
397 		/*
398 		 * If meter color and flow match share one register, flow match
399 		 * should use the meter color register for match.
400 		 */
401 		if (priv->mtr_reg_share)
402 			return priv->mtr_color_reg;
403 		else
404 			return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
405 			       REG_C_3;
406 	case MLX5_MTR_COLOR:
407 		MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
408 		return priv->mtr_color_reg;
409 	case MLX5_COPY_MARK:
410 		/*
411 		 * Metadata COPY_MARK register using is in meter suffix sub
412 		 * flow while with meter. It's safe to share the same register.
413 		 */
414 		return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
415 	case MLX5_APP_TAG:
416 		/*
417 		 * If meter is enable, it will engage the register for color
418 		 * match and flow match. If meter color match is not using the
419 		 * REG_C_2, need to skip the REG_C_x be used by meter color
420 		 * match.
421 		 * If meter is disable, free to use all available registers.
422 		 */
423 		start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
424 			    (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
425 		skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
426 		if (id > (REG_C_7 - start_reg))
427 			return rte_flow_error_set(error, EINVAL,
428 						  RTE_FLOW_ERROR_TYPE_ITEM,
429 						  NULL, "invalid tag id");
430 		if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE)
431 			return rte_flow_error_set(error, ENOTSUP,
432 						  RTE_FLOW_ERROR_TYPE_ITEM,
433 						  NULL, "unsupported tag id");
434 		/*
435 		 * This case means meter is using the REG_C_x great than 2.
436 		 * Take care not to conflict with meter color REG_C_x.
437 		 * If the available index REG_C_y >= REG_C_x, skip the
438 		 * color register.
439 		 */
440 		if (skip_mtr_reg && config->flow_mreg_c
441 		    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
442 			if (config->flow_mreg_c
443 			    [id + 1 + start_reg - REG_C_0] != REG_NONE)
444 				return config->flow_mreg_c
445 					       [id + 1 + start_reg - REG_C_0];
446 			return rte_flow_error_set(error, ENOTSUP,
447 						  RTE_FLOW_ERROR_TYPE_ITEM,
448 						  NULL, "unsupported tag id");
449 		}
450 		return config->flow_mreg_c[id + start_reg - REG_C_0];
451 	}
452 	MLX5_ASSERT(false);
453 	return rte_flow_error_set(error, EINVAL,
454 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
455 				  NULL, "invalid feature name");
456 }
457 
458 /**
459  * Check extensive flow metadata register support.
460  *
461  * @param dev
462  *   Pointer to rte_eth_dev structure.
463  *
464  * @return
465  *   True if device supports extensive flow metadata register, otherwise false.
466  */
467 bool
468 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
469 {
470 	struct mlx5_priv *priv = dev->data->dev_private;
471 	struct mlx5_dev_config *config = &priv->config;
472 
473 	/*
474 	 * Having available reg_c can be regarded inclusively as supporting
475 	 * extensive flow metadata register, which could mean,
476 	 * - metadata register copy action by modify header.
477 	 * - 16 modify header actions is supported.
478 	 * - reg_c's are preserved across different domain (FDB and NIC) on
479 	 *   packet loopback by flow lookup miss.
480 	 */
481 	return config->flow_mreg_c[2] != REG_NONE;
482 }
483 
484 /**
485  * Discover the maximum number of priority available.
486  *
487  * @param[in] dev
488  *   Pointer to the Ethernet device structure.
489  *
490  * @return
491  *   number of supported flow priority on success, a negative errno
492  *   value otherwise and rte_errno is set.
493  */
494 int
495 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
496 {
497 	struct mlx5_priv *priv = dev->data->dev_private;
498 	struct {
499 		struct ibv_flow_attr attr;
500 		struct ibv_flow_spec_eth eth;
501 		struct ibv_flow_spec_action_drop drop;
502 	} flow_attr = {
503 		.attr = {
504 			.num_of_specs = 2,
505 			.port = (uint8_t)priv->ibv_port,
506 		},
507 		.eth = {
508 			.type = IBV_FLOW_SPEC_ETH,
509 			.size = sizeof(struct ibv_flow_spec_eth),
510 		},
511 		.drop = {
512 			.size = sizeof(struct ibv_flow_spec_action_drop),
513 			.type = IBV_FLOW_SPEC_ACTION_DROP,
514 		},
515 	};
516 	struct ibv_flow *flow;
517 	struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
518 	uint16_t vprio[] = { 8, 16 };
519 	int i;
520 	int priority = 0;
521 
522 	if (!drop) {
523 		rte_errno = ENOTSUP;
524 		return -rte_errno;
525 	}
526 	for (i = 0; i != RTE_DIM(vprio); i++) {
527 		flow_attr.attr.priority = vprio[i] - 1;
528 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
529 		if (!flow)
530 			break;
531 		claim_zero(mlx5_glue->destroy_flow(flow));
532 		priority = vprio[i];
533 	}
534 	mlx5_hrxq_drop_release(dev);
535 	switch (priority) {
536 	case 8:
537 		priority = RTE_DIM(priority_map_3);
538 		break;
539 	case 16:
540 		priority = RTE_DIM(priority_map_5);
541 		break;
542 	default:
543 		rte_errno = ENOTSUP;
544 		DRV_LOG(ERR,
545 			"port %u verbs maximum priority: %d expected 8/16",
546 			dev->data->port_id, priority);
547 		return -rte_errno;
548 	}
549 	DRV_LOG(INFO, "port %u flow maximum priority: %d",
550 		dev->data->port_id, priority);
551 	return priority;
552 }
553 
554 /**
555  * Adjust flow priority based on the highest layer and the request priority.
556  *
557  * @param[in] dev
558  *   Pointer to the Ethernet device structure.
559  * @param[in] priority
560  *   The rule base priority.
561  * @param[in] subpriority
562  *   The priority based on the items.
563  *
564  * @return
565  *   The new priority.
566  */
567 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
568 				   uint32_t subpriority)
569 {
570 	uint32_t res = 0;
571 	struct mlx5_priv *priv = dev->data->dev_private;
572 
573 	switch (priv->config.flow_prio) {
574 	case RTE_DIM(priority_map_3):
575 		res = priority_map_3[priority][subpriority];
576 		break;
577 	case RTE_DIM(priority_map_5):
578 		res = priority_map_5[priority][subpriority];
579 		break;
580 	}
581 	return  res;
582 }
583 
584 /**
585  * Verify the @p item specifications (spec, last, mask) are compatible with the
586  * NIC capabilities.
587  *
588  * @param[in] item
589  *   Item specification.
590  * @param[in] mask
591  *   @p item->mask or flow default bit-masks.
592  * @param[in] nic_mask
593  *   Bit-masks covering supported fields by the NIC to compare with user mask.
594  * @param[in] size
595  *   Bit-masks size in bytes.
596  * @param[out] error
597  *   Pointer to error structure.
598  *
599  * @return
600  *   0 on success, a negative errno value otherwise and rte_errno is set.
601  */
602 int
603 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
604 			  const uint8_t *mask,
605 			  const uint8_t *nic_mask,
606 			  unsigned int size,
607 			  struct rte_flow_error *error)
608 {
609 	unsigned int i;
610 
611 	MLX5_ASSERT(nic_mask);
612 	for (i = 0; i < size; ++i)
613 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
614 			return rte_flow_error_set(error, ENOTSUP,
615 						  RTE_FLOW_ERROR_TYPE_ITEM,
616 						  item,
617 						  "mask enables non supported"
618 						  " bits");
619 	if (!item->spec && (item->mask || item->last))
620 		return rte_flow_error_set(error, EINVAL,
621 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
622 					  "mask/last without a spec is not"
623 					  " supported");
624 	if (item->spec && item->last) {
625 		uint8_t spec[size];
626 		uint8_t last[size];
627 		unsigned int i;
628 		int ret;
629 
630 		for (i = 0; i < size; ++i) {
631 			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
632 			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
633 		}
634 		ret = memcmp(spec, last, size);
635 		if (ret != 0)
636 			return rte_flow_error_set(error, EINVAL,
637 						  RTE_FLOW_ERROR_TYPE_ITEM,
638 						  item,
639 						  "range is not valid");
640 	}
641 	return 0;
642 }
643 
644 /**
645  * Adjust the hash fields according to the @p flow information.
646  *
647  * @param[in] dev_flow.
648  *   Pointer to the mlx5_flow.
649  * @param[in] tunnel
650  *   1 when the hash field is for a tunnel item.
651  * @param[in] layer_types
652  *   ETH_RSS_* types.
653  * @param[in] hash_fields
654  *   Item hash fields.
655  *
656  * @return
657  *   The hash fields that should be used.
658  */
659 uint64_t
660 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
661 			    int tunnel __rte_unused, uint64_t layer_types,
662 			    uint64_t hash_fields)
663 {
664 	struct rte_flow *flow = dev_flow->flow;
665 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
666 	int rss_request_inner = flow->rss.level >= 2;
667 
668 	/* Check RSS hash level for tunnel. */
669 	if (tunnel && rss_request_inner)
670 		hash_fields |= IBV_RX_HASH_INNER;
671 	else if (tunnel || rss_request_inner)
672 		return 0;
673 #endif
674 	/* Check if requested layer matches RSS hash fields. */
675 	if (!(flow->rss.types & layer_types))
676 		return 0;
677 	return hash_fields;
678 }
679 
680 /**
681  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
682  * if several tunnel rules are used on this queue, the tunnel ptype will be
683  * cleared.
684  *
685  * @param rxq_ctrl
686  *   Rx queue to update.
687  */
688 static void
689 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
690 {
691 	unsigned int i;
692 	uint32_t tunnel_ptype = 0;
693 
694 	/* Look up for the ptype to use. */
695 	for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
696 		if (!rxq_ctrl->flow_tunnels_n[i])
697 			continue;
698 		if (!tunnel_ptype) {
699 			tunnel_ptype = tunnels_info[i].ptype;
700 		} else {
701 			tunnel_ptype = 0;
702 			break;
703 		}
704 	}
705 	rxq_ctrl->rxq.tunnel = tunnel_ptype;
706 }
707 
708 /**
709  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
710  * flow.
711  *
712  * @param[in] dev
713  *   Pointer to the Ethernet device structure.
714  * @param[in] dev_flow
715  *   Pointer to device flow structure.
716  */
717 static void
718 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
719 {
720 	struct mlx5_priv *priv = dev->data->dev_private;
721 	struct rte_flow *flow = dev_flow->flow;
722 	const int mark = !!(dev_flow->actions &
723 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
724 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
725 	unsigned int i;
726 
727 	for (i = 0; i != flow->rss.queue_num; ++i) {
728 		int idx = (*flow->rss.queue)[i];
729 		struct mlx5_rxq_ctrl *rxq_ctrl =
730 			container_of((*priv->rxqs)[idx],
731 				     struct mlx5_rxq_ctrl, rxq);
732 
733 		/*
734 		 * To support metadata register copy on Tx loopback,
735 		 * this must be always enabled (metadata may arive
736 		 * from other port - not from local flows only.
737 		 */
738 		if (priv->config.dv_flow_en &&
739 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
740 		    mlx5_flow_ext_mreg_supported(dev)) {
741 			rxq_ctrl->rxq.mark = 1;
742 			rxq_ctrl->flow_mark_n = 1;
743 		} else if (mark) {
744 			rxq_ctrl->rxq.mark = 1;
745 			rxq_ctrl->flow_mark_n++;
746 		}
747 		if (tunnel) {
748 			unsigned int j;
749 
750 			/* Increase the counter matching the flow. */
751 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
752 				if ((tunnels_info[j].tunnel &
753 				     dev_flow->layers) ==
754 				    tunnels_info[j].tunnel) {
755 					rxq_ctrl->flow_tunnels_n[j]++;
756 					break;
757 				}
758 			}
759 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
760 		}
761 	}
762 }
763 
764 /**
765  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
766  *
767  * @param[in] dev
768  *   Pointer to the Ethernet device structure.
769  * @param[in] flow
770  *   Pointer to flow structure.
771  */
772 static void
773 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
774 {
775 	struct mlx5_flow *dev_flow;
776 
777 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
778 		flow_drv_rxq_flags_set(dev, dev_flow);
779 }
780 
781 /**
782  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
783  * device flow if no other flow uses it with the same kind of request.
784  *
785  * @param dev
786  *   Pointer to Ethernet device.
787  * @param[in] dev_flow
788  *   Pointer to the device flow.
789  */
790 static void
791 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
792 {
793 	struct mlx5_priv *priv = dev->data->dev_private;
794 	struct rte_flow *flow = dev_flow->flow;
795 	const int mark = !!(dev_flow->actions &
796 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
797 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
798 	unsigned int i;
799 
800 	MLX5_ASSERT(dev->data->dev_started);
801 	for (i = 0; i != flow->rss.queue_num; ++i) {
802 		int idx = (*flow->rss.queue)[i];
803 		struct mlx5_rxq_ctrl *rxq_ctrl =
804 			container_of((*priv->rxqs)[idx],
805 				     struct mlx5_rxq_ctrl, rxq);
806 
807 		if (priv->config.dv_flow_en &&
808 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
809 		    mlx5_flow_ext_mreg_supported(dev)) {
810 			rxq_ctrl->rxq.mark = 1;
811 			rxq_ctrl->flow_mark_n = 1;
812 		} else if (mark) {
813 			rxq_ctrl->flow_mark_n--;
814 			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
815 		}
816 		if (tunnel) {
817 			unsigned int j;
818 
819 			/* Decrease the counter matching the flow. */
820 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
821 				if ((tunnels_info[j].tunnel &
822 				     dev_flow->layers) ==
823 				    tunnels_info[j].tunnel) {
824 					rxq_ctrl->flow_tunnels_n[j]--;
825 					break;
826 				}
827 			}
828 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
829 		}
830 	}
831 }
832 
833 /**
834  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
835  * @p flow if no other flow uses it with the same kind of request.
836  *
837  * @param dev
838  *   Pointer to Ethernet device.
839  * @param[in] flow
840  *   Pointer to the flow.
841  */
842 static void
843 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
844 {
845 	struct mlx5_flow *dev_flow;
846 
847 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
848 		flow_drv_rxq_flags_trim(dev, dev_flow);
849 }
850 
851 /**
852  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
853  *
854  * @param dev
855  *   Pointer to Ethernet device.
856  */
857 static void
858 flow_rxq_flags_clear(struct rte_eth_dev *dev)
859 {
860 	struct mlx5_priv *priv = dev->data->dev_private;
861 	unsigned int i;
862 
863 	for (i = 0; i != priv->rxqs_n; ++i) {
864 		struct mlx5_rxq_ctrl *rxq_ctrl;
865 		unsigned int j;
866 
867 		if (!(*priv->rxqs)[i])
868 			continue;
869 		rxq_ctrl = container_of((*priv->rxqs)[i],
870 					struct mlx5_rxq_ctrl, rxq);
871 		rxq_ctrl->flow_mark_n = 0;
872 		rxq_ctrl->rxq.mark = 0;
873 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
874 			rxq_ctrl->flow_tunnels_n[j] = 0;
875 		rxq_ctrl->rxq.tunnel = 0;
876 	}
877 }
878 
879 /*
880  * return a pointer to the desired action in the list of actions.
881  *
882  * @param[in] actions
883  *   The list of actions to search the action in.
884  * @param[in] action
885  *   The action to find.
886  *
887  * @return
888  *   Pointer to the action in the list, if found. NULL otherwise.
889  */
890 const struct rte_flow_action *
891 mlx5_flow_find_action(const struct rte_flow_action *actions,
892 		      enum rte_flow_action_type action)
893 {
894 	if (actions == NULL)
895 		return NULL;
896 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
897 		if (actions->type == action)
898 			return actions;
899 	return NULL;
900 }
901 
902 /*
903  * Validate the flag action.
904  *
905  * @param[in] action_flags
906  *   Bit-fields that holds the actions detected until now.
907  * @param[in] attr
908  *   Attributes of flow that includes this action.
909  * @param[out] error
910  *   Pointer to error structure.
911  *
912  * @return
913  *   0 on success, a negative errno value otherwise and rte_errno is set.
914  */
915 int
916 mlx5_flow_validate_action_flag(uint64_t action_flags,
917 			       const struct rte_flow_attr *attr,
918 			       struct rte_flow_error *error)
919 {
920 	if (action_flags & MLX5_FLOW_ACTION_MARK)
921 		return rte_flow_error_set(error, EINVAL,
922 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
923 					  "can't mark and flag in same flow");
924 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
925 		return rte_flow_error_set(error, EINVAL,
926 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
927 					  "can't have 2 flag"
928 					  " actions in same flow");
929 	if (attr->egress)
930 		return rte_flow_error_set(error, ENOTSUP,
931 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
932 					  "flag action not supported for "
933 					  "egress");
934 	return 0;
935 }
936 
937 /*
938  * Validate the mark action.
939  *
940  * @param[in] action
941  *   Pointer to the queue action.
942  * @param[in] action_flags
943  *   Bit-fields that holds the actions detected until now.
944  * @param[in] attr
945  *   Attributes of flow that includes this action.
946  * @param[out] error
947  *   Pointer to error structure.
948  *
949  * @return
950  *   0 on success, a negative errno value otherwise and rte_errno is set.
951  */
952 int
953 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
954 			       uint64_t action_flags,
955 			       const struct rte_flow_attr *attr,
956 			       struct rte_flow_error *error)
957 {
958 	const struct rte_flow_action_mark *mark = action->conf;
959 
960 	if (!mark)
961 		return rte_flow_error_set(error, EINVAL,
962 					  RTE_FLOW_ERROR_TYPE_ACTION,
963 					  action,
964 					  "configuration cannot be null");
965 	if (mark->id >= MLX5_FLOW_MARK_MAX)
966 		return rte_flow_error_set(error, EINVAL,
967 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
968 					  &mark->id,
969 					  "mark id must in 0 <= id < "
970 					  RTE_STR(MLX5_FLOW_MARK_MAX));
971 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
972 		return rte_flow_error_set(error, EINVAL,
973 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
974 					  "can't flag and mark in same flow");
975 	if (action_flags & MLX5_FLOW_ACTION_MARK)
976 		return rte_flow_error_set(error, EINVAL,
977 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
978 					  "can't have 2 mark actions in same"
979 					  " flow");
980 	if (attr->egress)
981 		return rte_flow_error_set(error, ENOTSUP,
982 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
983 					  "mark action not supported for "
984 					  "egress");
985 	return 0;
986 }
987 
988 /*
989  * Validate the drop action.
990  *
991  * @param[in] action_flags
992  *   Bit-fields that holds the actions detected until now.
993  * @param[in] attr
994  *   Attributes of flow that includes this action.
995  * @param[out] error
996  *   Pointer to error structure.
997  *
998  * @return
999  *   0 on success, a negative errno value otherwise and rte_errno is set.
1000  */
1001 int
1002 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1003 			       const struct rte_flow_attr *attr,
1004 			       struct rte_flow_error *error)
1005 {
1006 	if (attr->egress)
1007 		return rte_flow_error_set(error, ENOTSUP,
1008 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1009 					  "drop action not supported for "
1010 					  "egress");
1011 	return 0;
1012 }
1013 
1014 /*
1015  * Validate the queue action.
1016  *
1017  * @param[in] action
1018  *   Pointer to the queue action.
1019  * @param[in] action_flags
1020  *   Bit-fields that holds the actions detected until now.
1021  * @param[in] dev
1022  *   Pointer to the Ethernet device structure.
1023  * @param[in] attr
1024  *   Attributes of flow that includes this action.
1025  * @param[out] error
1026  *   Pointer to error structure.
1027  *
1028  * @return
1029  *   0 on success, a negative errno value otherwise and rte_errno is set.
1030  */
1031 int
1032 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1033 				uint64_t action_flags,
1034 				struct rte_eth_dev *dev,
1035 				const struct rte_flow_attr *attr,
1036 				struct rte_flow_error *error)
1037 {
1038 	struct mlx5_priv *priv = dev->data->dev_private;
1039 	const struct rte_flow_action_queue *queue = action->conf;
1040 
1041 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1042 		return rte_flow_error_set(error, EINVAL,
1043 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1044 					  "can't have 2 fate actions in"
1045 					  " same flow");
1046 	if (!priv->rxqs_n)
1047 		return rte_flow_error_set(error, EINVAL,
1048 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1049 					  NULL, "No Rx queues configured");
1050 	if (queue->index >= priv->rxqs_n)
1051 		return rte_flow_error_set(error, EINVAL,
1052 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1053 					  &queue->index,
1054 					  "queue index out of range");
1055 	if (!(*priv->rxqs)[queue->index])
1056 		return rte_flow_error_set(error, EINVAL,
1057 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1058 					  &queue->index,
1059 					  "queue is not configured");
1060 	if (attr->egress)
1061 		return rte_flow_error_set(error, ENOTSUP,
1062 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1063 					  "queue action not supported for "
1064 					  "egress");
1065 	return 0;
1066 }
1067 
1068 /*
1069  * Validate the rss action.
1070  *
1071  * @param[in] action
1072  *   Pointer to the queue action.
1073  * @param[in] action_flags
1074  *   Bit-fields that holds the actions detected until now.
1075  * @param[in] dev
1076  *   Pointer to the Ethernet device structure.
1077  * @param[in] attr
1078  *   Attributes of flow that includes this action.
1079  * @param[in] item_flags
1080  *   Items that were detected.
1081  * @param[out] error
1082  *   Pointer to error structure.
1083  *
1084  * @return
1085  *   0 on success, a negative errno value otherwise and rte_errno is set.
1086  */
1087 int
1088 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1089 			      uint64_t action_flags,
1090 			      struct rte_eth_dev *dev,
1091 			      const struct rte_flow_attr *attr,
1092 			      uint64_t item_flags,
1093 			      struct rte_flow_error *error)
1094 {
1095 	struct mlx5_priv *priv = dev->data->dev_private;
1096 	const struct rte_flow_action_rss *rss = action->conf;
1097 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1098 	unsigned int i;
1099 
1100 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1101 		return rte_flow_error_set(error, EINVAL,
1102 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1103 					  "can't have 2 fate actions"
1104 					  " in same flow");
1105 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1106 	    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1107 		return rte_flow_error_set(error, ENOTSUP,
1108 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1109 					  &rss->func,
1110 					  "RSS hash function not supported");
1111 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1112 	if (rss->level > 2)
1113 #else
1114 	if (rss->level > 1)
1115 #endif
1116 		return rte_flow_error_set(error, ENOTSUP,
1117 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1118 					  &rss->level,
1119 					  "tunnel RSS is not supported");
1120 	/* allow RSS key_len 0 in case of NULL (default) RSS key. */
1121 	if (rss->key_len == 0 && rss->key != NULL)
1122 		return rte_flow_error_set(error, ENOTSUP,
1123 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1124 					  &rss->key_len,
1125 					  "RSS hash key length 0");
1126 	if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1127 		return rte_flow_error_set(error, ENOTSUP,
1128 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1129 					  &rss->key_len,
1130 					  "RSS hash key too small");
1131 	if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1132 		return rte_flow_error_set(error, ENOTSUP,
1133 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1134 					  &rss->key_len,
1135 					  "RSS hash key too large");
1136 	if (rss->queue_num > priv->config.ind_table_max_size)
1137 		return rte_flow_error_set(error, ENOTSUP,
1138 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1139 					  &rss->queue_num,
1140 					  "number of queues too large");
1141 	if (rss->types & MLX5_RSS_HF_MASK)
1142 		return rte_flow_error_set(error, ENOTSUP,
1143 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1144 					  &rss->types,
1145 					  "some RSS protocols are not"
1146 					  " supported");
1147 	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1148 	    !(rss->types & ETH_RSS_IP))
1149 		return rte_flow_error_set(error, EINVAL,
1150 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1151 					  "L3 partial RSS requested but L3 RSS"
1152 					  " type not specified");
1153 	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1154 	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1155 		return rte_flow_error_set(error, EINVAL,
1156 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1157 					  "L4 partial RSS requested but L4 RSS"
1158 					  " type not specified");
1159 	if (!priv->rxqs_n)
1160 		return rte_flow_error_set(error, EINVAL,
1161 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1162 					  NULL, "No Rx queues configured");
1163 	if (!rss->queue_num)
1164 		return rte_flow_error_set(error, EINVAL,
1165 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1166 					  NULL, "No queues configured");
1167 	for (i = 0; i != rss->queue_num; ++i) {
1168 		if (rss->queue[i] >= priv->rxqs_n)
1169 			return rte_flow_error_set
1170 				(error, EINVAL,
1171 				 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1172 				 &rss->queue[i], "queue index out of range");
1173 		if (!(*priv->rxqs)[rss->queue[i]])
1174 			return rte_flow_error_set
1175 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1176 				 &rss->queue[i], "queue is not configured");
1177 	}
1178 	if (attr->egress)
1179 		return rte_flow_error_set(error, ENOTSUP,
1180 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1181 					  "rss action not supported for "
1182 					  "egress");
1183 	if (rss->level > 1 &&  !tunnel)
1184 		return rte_flow_error_set(error, EINVAL,
1185 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1186 					  "inner RSS is not supported for "
1187 					  "non-tunnel flows");
1188 	return 0;
1189 }
1190 
1191 /*
1192  * Validate the count action.
1193  *
1194  * @param[in] dev
1195  *   Pointer to the Ethernet device structure.
1196  * @param[in] attr
1197  *   Attributes of flow that includes this action.
1198  * @param[out] error
1199  *   Pointer to error structure.
1200  *
1201  * @return
1202  *   0 on success, a negative errno value otherwise and rte_errno is set.
1203  */
1204 int
1205 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1206 				const struct rte_flow_attr *attr,
1207 				struct rte_flow_error *error)
1208 {
1209 	if (attr->egress)
1210 		return rte_flow_error_set(error, ENOTSUP,
1211 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1212 					  "count action not supported for "
1213 					  "egress");
1214 	return 0;
1215 }
1216 
1217 /**
1218  * Verify the @p attributes will be correctly understood by the NIC and store
1219  * them in the @p flow if everything is correct.
1220  *
1221  * @param[in] dev
1222  *   Pointer to the Ethernet device structure.
1223  * @param[in] attributes
1224  *   Pointer to flow attributes
1225  * @param[out] error
1226  *   Pointer to error structure.
1227  *
1228  * @return
1229  *   0 on success, a negative errno value otherwise and rte_errno is set.
1230  */
1231 int
1232 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1233 			      const struct rte_flow_attr *attributes,
1234 			      struct rte_flow_error *error)
1235 {
1236 	struct mlx5_priv *priv = dev->data->dev_private;
1237 	uint32_t priority_max = priv->config.flow_prio - 1;
1238 
1239 	if (attributes->group)
1240 		return rte_flow_error_set(error, ENOTSUP,
1241 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1242 					  NULL, "groups is not supported");
1243 	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1244 	    attributes->priority >= priority_max)
1245 		return rte_flow_error_set(error, ENOTSUP,
1246 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1247 					  NULL, "priority out of range");
1248 	if (attributes->egress)
1249 		return rte_flow_error_set(error, ENOTSUP,
1250 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1251 					  "egress is not supported");
1252 	if (attributes->transfer && !priv->config.dv_esw_en)
1253 		return rte_flow_error_set(error, ENOTSUP,
1254 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1255 					  NULL, "transfer is not supported");
1256 	if (!attributes->ingress)
1257 		return rte_flow_error_set(error, EINVAL,
1258 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1259 					  NULL,
1260 					  "ingress attribute is mandatory");
1261 	return 0;
1262 }
1263 
1264 /**
1265  * Validate ICMP6 item.
1266  *
1267  * @param[in] item
1268  *   Item specification.
1269  * @param[in] item_flags
1270  *   Bit-fields that holds the items detected until now.
1271  * @param[out] error
1272  *   Pointer to error structure.
1273  *
1274  * @return
1275  *   0 on success, a negative errno value otherwise and rte_errno is set.
1276  */
1277 int
1278 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1279 			       uint64_t item_flags,
1280 			       uint8_t target_protocol,
1281 			       struct rte_flow_error *error)
1282 {
1283 	const struct rte_flow_item_icmp6 *mask = item->mask;
1284 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1285 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1286 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1287 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1288 				      MLX5_FLOW_LAYER_OUTER_L4;
1289 	int ret;
1290 
1291 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1292 		return rte_flow_error_set(error, EINVAL,
1293 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1294 					  "protocol filtering not compatible"
1295 					  " with ICMP6 layer");
1296 	if (!(item_flags & l3m))
1297 		return rte_flow_error_set(error, EINVAL,
1298 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1299 					  "IPv6 is mandatory to filter on"
1300 					  " ICMP6");
1301 	if (item_flags & l4m)
1302 		return rte_flow_error_set(error, EINVAL,
1303 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1304 					  "multiple L4 layers not supported");
1305 	if (!mask)
1306 		mask = &rte_flow_item_icmp6_mask;
1307 	ret = mlx5_flow_item_acceptable
1308 		(item, (const uint8_t *)mask,
1309 		 (const uint8_t *)&rte_flow_item_icmp6_mask,
1310 		 sizeof(struct rte_flow_item_icmp6), error);
1311 	if (ret < 0)
1312 		return ret;
1313 	return 0;
1314 }
1315 
1316 /**
1317  * Validate ICMP item.
1318  *
1319  * @param[in] item
1320  *   Item specification.
1321  * @param[in] item_flags
1322  *   Bit-fields that holds the items detected until now.
1323  * @param[out] error
1324  *   Pointer to error structure.
1325  *
1326  * @return
1327  *   0 on success, a negative errno value otherwise and rte_errno is set.
1328  */
1329 int
1330 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1331 			     uint64_t item_flags,
1332 			     uint8_t target_protocol,
1333 			     struct rte_flow_error *error)
1334 {
1335 	const struct rte_flow_item_icmp *mask = item->mask;
1336 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1337 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1338 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1339 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1340 				      MLX5_FLOW_LAYER_OUTER_L4;
1341 	int ret;
1342 
1343 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1344 		return rte_flow_error_set(error, EINVAL,
1345 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1346 					  "protocol filtering not compatible"
1347 					  " with ICMP layer");
1348 	if (!(item_flags & l3m))
1349 		return rte_flow_error_set(error, EINVAL,
1350 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1351 					  "IPv4 is mandatory to filter"
1352 					  " on ICMP");
1353 	if (item_flags & l4m)
1354 		return rte_flow_error_set(error, EINVAL,
1355 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1356 					  "multiple L4 layers not supported");
1357 	if (!mask)
1358 		mask = &rte_flow_item_icmp_mask;
1359 	ret = mlx5_flow_item_acceptable
1360 		(item, (const uint8_t *)mask,
1361 		 (const uint8_t *)&rte_flow_item_icmp_mask,
1362 		 sizeof(struct rte_flow_item_icmp), error);
1363 	if (ret < 0)
1364 		return ret;
1365 	return 0;
1366 }
1367 
1368 /**
1369  * Validate Ethernet item.
1370  *
1371  * @param[in] item
1372  *   Item specification.
1373  * @param[in] item_flags
1374  *   Bit-fields that holds the items detected until now.
1375  * @param[out] error
1376  *   Pointer to error structure.
1377  *
1378  * @return
1379  *   0 on success, a negative errno value otherwise and rte_errno is set.
1380  */
1381 int
1382 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1383 			    uint64_t item_flags,
1384 			    struct rte_flow_error *error)
1385 {
1386 	const struct rte_flow_item_eth *mask = item->mask;
1387 	const struct rte_flow_item_eth nic_mask = {
1388 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1389 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1390 		.type = RTE_BE16(0xffff),
1391 	};
1392 	int ret;
1393 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1394 	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
1395 				       MLX5_FLOW_LAYER_OUTER_L2;
1396 
1397 	if (item_flags & ethm)
1398 		return rte_flow_error_set(error, ENOTSUP,
1399 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1400 					  "multiple L2 layers not supported");
1401 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1402 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1403 		return rte_flow_error_set(error, EINVAL,
1404 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1405 					  "L2 layer should not follow "
1406 					  "L3 layers");
1407 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1408 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1409 		return rte_flow_error_set(error, EINVAL,
1410 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1411 					  "L2 layer should not follow VLAN");
1412 	if (!mask)
1413 		mask = &rte_flow_item_eth_mask;
1414 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1415 					(const uint8_t *)&nic_mask,
1416 					sizeof(struct rte_flow_item_eth),
1417 					error);
1418 	return ret;
1419 }
1420 
1421 /**
1422  * Validate VLAN item.
1423  *
1424  * @param[in] item
1425  *   Item specification.
1426  * @param[in] item_flags
1427  *   Bit-fields that holds the items detected until now.
1428  * @param[in] dev
1429  *   Ethernet device flow is being created on.
1430  * @param[out] error
1431  *   Pointer to error structure.
1432  *
1433  * @return
1434  *   0 on success, a negative errno value otherwise and rte_errno is set.
1435  */
1436 int
1437 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1438 			     uint64_t item_flags,
1439 			     struct rte_eth_dev *dev,
1440 			     struct rte_flow_error *error)
1441 {
1442 	const struct rte_flow_item_vlan *spec = item->spec;
1443 	const struct rte_flow_item_vlan *mask = item->mask;
1444 	const struct rte_flow_item_vlan nic_mask = {
1445 		.tci = RTE_BE16(UINT16_MAX),
1446 		.inner_type = RTE_BE16(UINT16_MAX),
1447 	};
1448 	uint16_t vlan_tag = 0;
1449 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1450 	int ret;
1451 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1452 					MLX5_FLOW_LAYER_INNER_L4) :
1453 				       (MLX5_FLOW_LAYER_OUTER_L3 |
1454 					MLX5_FLOW_LAYER_OUTER_L4);
1455 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1456 					MLX5_FLOW_LAYER_OUTER_VLAN;
1457 
1458 	if (item_flags & vlanm)
1459 		return rte_flow_error_set(error, EINVAL,
1460 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1461 					  "multiple VLAN layers not supported");
1462 	else if ((item_flags & l34m) != 0)
1463 		return rte_flow_error_set(error, EINVAL,
1464 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1465 					  "VLAN cannot follow L3/L4 layer");
1466 	if (!mask)
1467 		mask = &rte_flow_item_vlan_mask;
1468 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1469 					(const uint8_t *)&nic_mask,
1470 					sizeof(struct rte_flow_item_vlan),
1471 					error);
1472 	if (ret)
1473 		return ret;
1474 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1475 		struct mlx5_priv *priv = dev->data->dev_private;
1476 
1477 		if (priv->vmwa_context) {
1478 			/*
1479 			 * Non-NULL context means we have a virtual machine
1480 			 * and SR-IOV enabled, we have to create VLAN interface
1481 			 * to make hypervisor to setup E-Switch vport
1482 			 * context correctly. We avoid creating the multiple
1483 			 * VLAN interfaces, so we cannot support VLAN tag mask.
1484 			 */
1485 			return rte_flow_error_set(error, EINVAL,
1486 						  RTE_FLOW_ERROR_TYPE_ITEM,
1487 						  item,
1488 						  "VLAN tag mask is not"
1489 						  " supported in virtual"
1490 						  " environment");
1491 		}
1492 	}
1493 	if (spec) {
1494 		vlan_tag = spec->tci;
1495 		vlan_tag &= mask->tci;
1496 	}
1497 	/*
1498 	 * From verbs perspective an empty VLAN is equivalent
1499 	 * to a packet without VLAN layer.
1500 	 */
1501 	if (!vlan_tag)
1502 		return rte_flow_error_set(error, EINVAL,
1503 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1504 					  item->spec,
1505 					  "VLAN cannot be empty");
1506 	return 0;
1507 }
1508 
1509 /**
1510  * Validate IPV4 item.
1511  *
1512  * @param[in] item
1513  *   Item specification.
1514  * @param[in] item_flags
1515  *   Bit-fields that holds the items detected until now.
1516  * @param[in] acc_mask
1517  *   Acceptable mask, if NULL default internal default mask
1518  *   will be used to check whether item fields are supported.
1519  * @param[out] error
1520  *   Pointer to error structure.
1521  *
1522  * @return
1523  *   0 on success, a negative errno value otherwise and rte_errno is set.
1524  */
1525 int
1526 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1527 			     uint64_t item_flags,
1528 			     uint64_t last_item,
1529 			     uint16_t ether_type,
1530 			     const struct rte_flow_item_ipv4 *acc_mask,
1531 			     struct rte_flow_error *error)
1532 {
1533 	const struct rte_flow_item_ipv4 *mask = item->mask;
1534 	const struct rte_flow_item_ipv4 *spec = item->spec;
1535 	const struct rte_flow_item_ipv4 nic_mask = {
1536 		.hdr = {
1537 			.src_addr = RTE_BE32(0xffffffff),
1538 			.dst_addr = RTE_BE32(0xffffffff),
1539 			.type_of_service = 0xff,
1540 			.next_proto_id = 0xff,
1541 		},
1542 	};
1543 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1544 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1545 				      MLX5_FLOW_LAYER_OUTER_L3;
1546 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1547 				      MLX5_FLOW_LAYER_OUTER_L4;
1548 	int ret;
1549 	uint8_t next_proto = 0xFF;
1550 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1551 				  MLX5_FLOW_LAYER_OUTER_VLAN |
1552 				  MLX5_FLOW_LAYER_INNER_VLAN);
1553 
1554 	if ((last_item & l2_vlan) && ether_type &&
1555 	    ether_type != RTE_ETHER_TYPE_IPV4)
1556 		return rte_flow_error_set(error, EINVAL,
1557 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1558 					  "IPv4 cannot follow L2/VLAN layer "
1559 					  "which ether type is not IPv4");
1560 	if (item_flags & MLX5_FLOW_LAYER_IPIP) {
1561 		if (mask && spec)
1562 			next_proto = mask->hdr.next_proto_id &
1563 				     spec->hdr.next_proto_id;
1564 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1565 			return rte_flow_error_set(error, EINVAL,
1566 						  RTE_FLOW_ERROR_TYPE_ITEM,
1567 						  item,
1568 						  "multiple tunnel "
1569 						  "not supported");
1570 	}
1571 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
1572 		return rte_flow_error_set(error, EINVAL,
1573 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1574 					  "wrong tunnel type - IPv6 specified "
1575 					  "but IPv4 item provided");
1576 	if (item_flags & l3m)
1577 		return rte_flow_error_set(error, ENOTSUP,
1578 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1579 					  "multiple L3 layers not supported");
1580 	else if (item_flags & l4m)
1581 		return rte_flow_error_set(error, EINVAL,
1582 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1583 					  "L3 cannot follow an L4 layer.");
1584 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1585 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1586 		return rte_flow_error_set(error, EINVAL,
1587 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1588 					  "L3 cannot follow an NVGRE layer.");
1589 	if (!mask)
1590 		mask = &rte_flow_item_ipv4_mask;
1591 	else if (mask->hdr.next_proto_id != 0 &&
1592 		 mask->hdr.next_proto_id != 0xff)
1593 		return rte_flow_error_set(error, EINVAL,
1594 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1595 					  "partial mask is not supported"
1596 					  " for protocol");
1597 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1598 					acc_mask ? (const uint8_t *)acc_mask
1599 						 : (const uint8_t *)&nic_mask,
1600 					sizeof(struct rte_flow_item_ipv4),
1601 					error);
1602 	if (ret < 0)
1603 		return ret;
1604 	return 0;
1605 }
1606 
1607 /**
1608  * Validate IPV6 item.
1609  *
1610  * @param[in] item
1611  *   Item specification.
1612  * @param[in] item_flags
1613  *   Bit-fields that holds the items detected until now.
1614  * @param[in] acc_mask
1615  *   Acceptable mask, if NULL default internal default mask
1616  *   will be used to check whether item fields are supported.
1617  * @param[out] error
1618  *   Pointer to error structure.
1619  *
1620  * @return
1621  *   0 on success, a negative errno value otherwise and rte_errno is set.
1622  */
1623 int
1624 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
1625 			     uint64_t item_flags,
1626 			     uint64_t last_item,
1627 			     uint16_t ether_type,
1628 			     const struct rte_flow_item_ipv6 *acc_mask,
1629 			     struct rte_flow_error *error)
1630 {
1631 	const struct rte_flow_item_ipv6 *mask = item->mask;
1632 	const struct rte_flow_item_ipv6 *spec = item->spec;
1633 	const struct rte_flow_item_ipv6 nic_mask = {
1634 		.hdr = {
1635 			.src_addr =
1636 				"\xff\xff\xff\xff\xff\xff\xff\xff"
1637 				"\xff\xff\xff\xff\xff\xff\xff\xff",
1638 			.dst_addr =
1639 				"\xff\xff\xff\xff\xff\xff\xff\xff"
1640 				"\xff\xff\xff\xff\xff\xff\xff\xff",
1641 			.vtc_flow = RTE_BE32(0xffffffff),
1642 			.proto = 0xff,
1643 			.hop_limits = 0xff,
1644 		},
1645 	};
1646 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1647 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1648 				      MLX5_FLOW_LAYER_OUTER_L3;
1649 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1650 				      MLX5_FLOW_LAYER_OUTER_L4;
1651 	int ret;
1652 	uint8_t next_proto = 0xFF;
1653 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1654 				  MLX5_FLOW_LAYER_OUTER_VLAN |
1655 				  MLX5_FLOW_LAYER_INNER_VLAN);
1656 
1657 	if ((last_item & l2_vlan) && ether_type &&
1658 	    ether_type != RTE_ETHER_TYPE_IPV6)
1659 		return rte_flow_error_set(error, EINVAL,
1660 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1661 					  "IPv6 cannot follow L2/VLAN layer "
1662 					  "which ether type is not IPv6");
1663 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
1664 		if (mask && spec)
1665 			next_proto = mask->hdr.proto & spec->hdr.proto;
1666 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1667 			return rte_flow_error_set(error, EINVAL,
1668 						  RTE_FLOW_ERROR_TYPE_ITEM,
1669 						  item,
1670 						  "multiple tunnel "
1671 						  "not supported");
1672 	}
1673 	if (item_flags & MLX5_FLOW_LAYER_IPIP)
1674 		return rte_flow_error_set(error, EINVAL,
1675 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1676 					  "wrong tunnel type - IPv4 specified "
1677 					  "but IPv6 item provided");
1678 	if (item_flags & l3m)
1679 		return rte_flow_error_set(error, ENOTSUP,
1680 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1681 					  "multiple L3 layers not supported");
1682 	else if (item_flags & l4m)
1683 		return rte_flow_error_set(error, EINVAL,
1684 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1685 					  "L3 cannot follow an L4 layer.");
1686 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1687 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1688 		return rte_flow_error_set(error, EINVAL,
1689 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1690 					  "L3 cannot follow an NVGRE layer.");
1691 	if (!mask)
1692 		mask = &rte_flow_item_ipv6_mask;
1693 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1694 					acc_mask ? (const uint8_t *)acc_mask
1695 						 : (const uint8_t *)&nic_mask,
1696 					sizeof(struct rte_flow_item_ipv6),
1697 					error);
1698 	if (ret < 0)
1699 		return ret;
1700 	return 0;
1701 }
1702 
1703 /**
1704  * Validate UDP item.
1705  *
1706  * @param[in] item
1707  *   Item specification.
1708  * @param[in] item_flags
1709  *   Bit-fields that holds the items detected until now.
1710  * @param[in] target_protocol
1711  *   The next protocol in the previous item.
1712  * @param[in] flow_mask
1713  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
1714  * @param[out] error
1715  *   Pointer to error structure.
1716  *
1717  * @return
1718  *   0 on success, a negative errno value otherwise and rte_errno is set.
1719  */
1720 int
1721 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
1722 			    uint64_t item_flags,
1723 			    uint8_t target_protocol,
1724 			    struct rte_flow_error *error)
1725 {
1726 	const struct rte_flow_item_udp *mask = item->mask;
1727 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1728 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1729 				      MLX5_FLOW_LAYER_OUTER_L3;
1730 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1731 				      MLX5_FLOW_LAYER_OUTER_L4;
1732 	int ret;
1733 
1734 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
1735 		return rte_flow_error_set(error, EINVAL,
1736 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1737 					  "protocol filtering not compatible"
1738 					  " with UDP layer");
1739 	if (!(item_flags & l3m))
1740 		return rte_flow_error_set(error, EINVAL,
1741 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1742 					  "L3 is mandatory to filter on L4");
1743 	if (item_flags & l4m)
1744 		return rte_flow_error_set(error, EINVAL,
1745 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1746 					  "multiple L4 layers not supported");
1747 	if (!mask)
1748 		mask = &rte_flow_item_udp_mask;
1749 	ret = mlx5_flow_item_acceptable
1750 		(item, (const uint8_t *)mask,
1751 		 (const uint8_t *)&rte_flow_item_udp_mask,
1752 		 sizeof(struct rte_flow_item_udp), error);
1753 	if (ret < 0)
1754 		return ret;
1755 	return 0;
1756 }
1757 
1758 /**
1759  * Validate TCP item.
1760  *
1761  * @param[in] item
1762  *   Item specification.
1763  * @param[in] item_flags
1764  *   Bit-fields that holds the items detected until now.
1765  * @param[in] target_protocol
1766  *   The next protocol in the previous item.
1767  * @param[out] error
1768  *   Pointer to error structure.
1769  *
1770  * @return
1771  *   0 on success, a negative errno value otherwise and rte_errno is set.
1772  */
1773 int
1774 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
1775 			    uint64_t item_flags,
1776 			    uint8_t target_protocol,
1777 			    const struct rte_flow_item_tcp *flow_mask,
1778 			    struct rte_flow_error *error)
1779 {
1780 	const struct rte_flow_item_tcp *mask = item->mask;
1781 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1782 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1783 				      MLX5_FLOW_LAYER_OUTER_L3;
1784 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1785 				      MLX5_FLOW_LAYER_OUTER_L4;
1786 	int ret;
1787 
1788 	MLX5_ASSERT(flow_mask);
1789 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
1790 		return rte_flow_error_set(error, EINVAL,
1791 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1792 					  "protocol filtering not compatible"
1793 					  " with TCP layer");
1794 	if (!(item_flags & l3m))
1795 		return rte_flow_error_set(error, EINVAL,
1796 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1797 					  "L3 is mandatory to filter on L4");
1798 	if (item_flags & l4m)
1799 		return rte_flow_error_set(error, EINVAL,
1800 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1801 					  "multiple L4 layers not supported");
1802 	if (!mask)
1803 		mask = &rte_flow_item_tcp_mask;
1804 	ret = mlx5_flow_item_acceptable
1805 		(item, (const uint8_t *)mask,
1806 		 (const uint8_t *)flow_mask,
1807 		 sizeof(struct rte_flow_item_tcp), error);
1808 	if (ret < 0)
1809 		return ret;
1810 	return 0;
1811 }
1812 
1813 /**
1814  * Validate VXLAN item.
1815  *
1816  * @param[in] item
1817  *   Item specification.
1818  * @param[in] item_flags
1819  *   Bit-fields that holds the items detected until now.
1820  * @param[in] target_protocol
1821  *   The next protocol in the previous item.
1822  * @param[out] error
1823  *   Pointer to error structure.
1824  *
1825  * @return
1826  *   0 on success, a negative errno value otherwise and rte_errno is set.
1827  */
1828 int
1829 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
1830 			      uint64_t item_flags,
1831 			      struct rte_flow_error *error)
1832 {
1833 	const struct rte_flow_item_vxlan *spec = item->spec;
1834 	const struct rte_flow_item_vxlan *mask = item->mask;
1835 	int ret;
1836 	union vni {
1837 		uint32_t vlan_id;
1838 		uint8_t vni[4];
1839 	} id = { .vlan_id = 0, };
1840 	uint32_t vlan_id = 0;
1841 
1842 
1843 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1844 		return rte_flow_error_set(error, ENOTSUP,
1845 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1846 					  "multiple tunnel layers not"
1847 					  " supported");
1848 	/*
1849 	 * Verify only UDPv4 is present as defined in
1850 	 * https://tools.ietf.org/html/rfc7348
1851 	 */
1852 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1853 		return rte_flow_error_set(error, EINVAL,
1854 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1855 					  "no outer UDP layer found");
1856 	if (!mask)
1857 		mask = &rte_flow_item_vxlan_mask;
1858 	ret = mlx5_flow_item_acceptable
1859 		(item, (const uint8_t *)mask,
1860 		 (const uint8_t *)&rte_flow_item_vxlan_mask,
1861 		 sizeof(struct rte_flow_item_vxlan),
1862 		 error);
1863 	if (ret < 0)
1864 		return ret;
1865 	if (spec) {
1866 		memcpy(&id.vni[1], spec->vni, 3);
1867 		vlan_id = id.vlan_id;
1868 		memcpy(&id.vni[1], mask->vni, 3);
1869 		vlan_id &= id.vlan_id;
1870 	}
1871 	/*
1872 	 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
1873 	 * only this layer is defined in the Verbs specification it is
1874 	 * interpreted as wildcard and all packets will match this
1875 	 * rule, if it follows a full stack layer (ex: eth / ipv4 /
1876 	 * udp), all packets matching the layers before will also
1877 	 * match this rule.  To avoid such situation, VNI 0 is
1878 	 * currently refused.
1879 	 */
1880 	if (!vlan_id)
1881 		return rte_flow_error_set(error, ENOTSUP,
1882 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1883 					  "VXLAN vni cannot be 0");
1884 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1885 		return rte_flow_error_set(error, ENOTSUP,
1886 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1887 					  "VXLAN tunnel must be fully defined");
1888 	return 0;
1889 }
1890 
1891 /**
1892  * Validate VXLAN_GPE item.
1893  *
1894  * @param[in] item
1895  *   Item specification.
1896  * @param[in] item_flags
1897  *   Bit-fields that holds the items detected until now.
1898  * @param[in] priv
1899  *   Pointer to the private data structure.
1900  * @param[in] target_protocol
1901  *   The next protocol in the previous item.
1902  * @param[out] error
1903  *   Pointer to error structure.
1904  *
1905  * @return
1906  *   0 on success, a negative errno value otherwise and rte_errno is set.
1907  */
1908 int
1909 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
1910 				  uint64_t item_flags,
1911 				  struct rte_eth_dev *dev,
1912 				  struct rte_flow_error *error)
1913 {
1914 	struct mlx5_priv *priv = dev->data->dev_private;
1915 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1916 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1917 	int ret;
1918 	union vni {
1919 		uint32_t vlan_id;
1920 		uint8_t vni[4];
1921 	} id = { .vlan_id = 0, };
1922 	uint32_t vlan_id = 0;
1923 
1924 	if (!priv->config.l3_vxlan_en)
1925 		return rte_flow_error_set(error, ENOTSUP,
1926 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1927 					  "L3 VXLAN is not enabled by device"
1928 					  " parameter and/or not configured in"
1929 					  " firmware");
1930 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1931 		return rte_flow_error_set(error, ENOTSUP,
1932 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1933 					  "multiple tunnel layers not"
1934 					  " supported");
1935 	/*
1936 	 * Verify only UDPv4 is present as defined in
1937 	 * https://tools.ietf.org/html/rfc7348
1938 	 */
1939 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1940 		return rte_flow_error_set(error, EINVAL,
1941 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1942 					  "no outer UDP layer found");
1943 	if (!mask)
1944 		mask = &rte_flow_item_vxlan_gpe_mask;
1945 	ret = mlx5_flow_item_acceptable
1946 		(item, (const uint8_t *)mask,
1947 		 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
1948 		 sizeof(struct rte_flow_item_vxlan_gpe),
1949 		 error);
1950 	if (ret < 0)
1951 		return ret;
1952 	if (spec) {
1953 		if (spec->protocol)
1954 			return rte_flow_error_set(error, ENOTSUP,
1955 						  RTE_FLOW_ERROR_TYPE_ITEM,
1956 						  item,
1957 						  "VxLAN-GPE protocol"
1958 						  " not supported");
1959 		memcpy(&id.vni[1], spec->vni, 3);
1960 		vlan_id = id.vlan_id;
1961 		memcpy(&id.vni[1], mask->vni, 3);
1962 		vlan_id &= id.vlan_id;
1963 	}
1964 	/*
1965 	 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
1966 	 * layer is defined in the Verbs specification it is interpreted as
1967 	 * wildcard and all packets will match this rule, if it follows a full
1968 	 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
1969 	 * before will also match this rule.  To avoid such situation, VNI 0
1970 	 * is currently refused.
1971 	 */
1972 	if (!vlan_id)
1973 		return rte_flow_error_set(error, ENOTSUP,
1974 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1975 					  "VXLAN-GPE vni cannot be 0");
1976 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1977 		return rte_flow_error_set(error, ENOTSUP,
1978 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1979 					  "VXLAN-GPE tunnel must be fully"
1980 					  " defined");
1981 	return 0;
1982 }
1983 /**
1984  * Validate GRE Key item.
1985  *
1986  * @param[in] item
1987  *   Item specification.
1988  * @param[in] item_flags
1989  *   Bit flags to mark detected items.
1990  * @param[in] gre_item
1991  *   Pointer to gre_item
1992  * @param[out] error
1993  *   Pointer to error structure.
1994  *
1995  * @return
1996  *   0 on success, a negative errno value otherwise and rte_errno is set.
1997  */
1998 int
1999 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2000 				uint64_t item_flags,
2001 				const struct rte_flow_item *gre_item,
2002 				struct rte_flow_error *error)
2003 {
2004 	const rte_be32_t *mask = item->mask;
2005 	int ret = 0;
2006 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2007 	const struct rte_flow_item_gre *gre_spec;
2008 	const struct rte_flow_item_gre *gre_mask;
2009 
2010 	if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2011 		return rte_flow_error_set(error, ENOTSUP,
2012 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2013 					  "Multiple GRE key not support");
2014 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2015 		return rte_flow_error_set(error, ENOTSUP,
2016 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2017 					  "No preceding GRE header");
2018 	if (item_flags & MLX5_FLOW_LAYER_INNER)
2019 		return rte_flow_error_set(error, ENOTSUP,
2020 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2021 					  "GRE key following a wrong item");
2022 	gre_mask = gre_item->mask;
2023 	if (!gre_mask)
2024 		gre_mask = &rte_flow_item_gre_mask;
2025 	gre_spec = gre_item->spec;
2026 	if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2027 			 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2028 		return rte_flow_error_set(error, EINVAL,
2029 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2030 					  "Key bit must be on");
2031 
2032 	if (!mask)
2033 		mask = &gre_key_default_mask;
2034 	ret = mlx5_flow_item_acceptable
2035 		(item, (const uint8_t *)mask,
2036 		 (const uint8_t *)&gre_key_default_mask,
2037 		 sizeof(rte_be32_t), error);
2038 	return ret;
2039 }
2040 
2041 /**
2042  * Validate GRE item.
2043  *
2044  * @param[in] item
2045  *   Item specification.
2046  * @param[in] item_flags
2047  *   Bit flags to mark detected items.
2048  * @param[in] target_protocol
2049  *   The next protocol in the previous item.
2050  * @param[out] error
2051  *   Pointer to error structure.
2052  *
2053  * @return
2054  *   0 on success, a negative errno value otherwise and rte_errno is set.
2055  */
2056 int
2057 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2058 			    uint64_t item_flags,
2059 			    uint8_t target_protocol,
2060 			    struct rte_flow_error *error)
2061 {
2062 	const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2063 	const struct rte_flow_item_gre *mask = item->mask;
2064 	int ret;
2065 	const struct rte_flow_item_gre nic_mask = {
2066 		.c_rsvd0_ver = RTE_BE16(0xB000),
2067 		.protocol = RTE_BE16(UINT16_MAX),
2068 	};
2069 
2070 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2071 		return rte_flow_error_set(error, EINVAL,
2072 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2073 					  "protocol filtering not compatible"
2074 					  " with this GRE layer");
2075 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2076 		return rte_flow_error_set(error, ENOTSUP,
2077 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2078 					  "multiple tunnel layers not"
2079 					  " supported");
2080 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2081 		return rte_flow_error_set(error, ENOTSUP,
2082 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2083 					  "L3 Layer is missing");
2084 	if (!mask)
2085 		mask = &rte_flow_item_gre_mask;
2086 	ret = mlx5_flow_item_acceptable
2087 		(item, (const uint8_t *)mask,
2088 		 (const uint8_t *)&nic_mask,
2089 		 sizeof(struct rte_flow_item_gre), error);
2090 	if (ret < 0)
2091 		return ret;
2092 #ifndef HAVE_MLX5DV_DR
2093 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2094 	if (spec && (spec->protocol & mask->protocol))
2095 		return rte_flow_error_set(error, ENOTSUP,
2096 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2097 					  "without MPLS support the"
2098 					  " specification cannot be used for"
2099 					  " filtering");
2100 #endif
2101 #endif
2102 	return 0;
2103 }
2104 
2105 /**
2106  * Validate Geneve item.
2107  *
2108  * @param[in] item
2109  *   Item specification.
2110  * @param[in] itemFlags
2111  *   Bit-fields that holds the items detected until now.
2112  * @param[in] enPriv
2113  *   Pointer to the private data structure.
2114  * @param[out] error
2115  *   Pointer to error structure.
2116  *
2117  * @return
2118  *   0 on success, a negative errno value otherwise and rte_errno is set.
2119  */
2120 
2121 int
2122 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2123 			       uint64_t item_flags,
2124 			       struct rte_eth_dev *dev,
2125 			       struct rte_flow_error *error)
2126 {
2127 	struct mlx5_priv *priv = dev->data->dev_private;
2128 	const struct rte_flow_item_geneve *spec = item->spec;
2129 	const struct rte_flow_item_geneve *mask = item->mask;
2130 	int ret;
2131 	uint16_t gbhdr;
2132 	uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2133 			  MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2134 	const struct rte_flow_item_geneve nic_mask = {
2135 		.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2136 		.vni = "\xff\xff\xff",
2137 		.protocol = RTE_BE16(UINT16_MAX),
2138 	};
2139 
2140 	if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2141 		return rte_flow_error_set(error, ENOTSUP,
2142 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2143 					  "L3 Geneve is not enabled by device"
2144 					  " parameter and/or not configured in"
2145 					  " firmware");
2146 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2147 		return rte_flow_error_set(error, ENOTSUP,
2148 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2149 					  "multiple tunnel layers not"
2150 					  " supported");
2151 	/*
2152 	 * Verify only UDPv4 is present as defined in
2153 	 * https://tools.ietf.org/html/rfc7348
2154 	 */
2155 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2156 		return rte_flow_error_set(error, EINVAL,
2157 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2158 					  "no outer UDP layer found");
2159 	if (!mask)
2160 		mask = &rte_flow_item_geneve_mask;
2161 	ret = mlx5_flow_item_acceptable
2162 				  (item, (const uint8_t *)mask,
2163 				   (const uint8_t *)&nic_mask,
2164 				   sizeof(struct rte_flow_item_geneve), error);
2165 	if (ret)
2166 		return ret;
2167 	if (spec) {
2168 		gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2169 		if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2170 		     MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2171 		     MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2172 			return rte_flow_error_set(error, ENOTSUP,
2173 						  RTE_FLOW_ERROR_TYPE_ITEM,
2174 						  item,
2175 						  "Geneve protocol unsupported"
2176 						  " fields are being used");
2177 		if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2178 			return rte_flow_error_set
2179 					(error, ENOTSUP,
2180 					 RTE_FLOW_ERROR_TYPE_ITEM,
2181 					 item,
2182 					 "Unsupported Geneve options length");
2183 	}
2184 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2185 		return rte_flow_error_set
2186 				    (error, ENOTSUP,
2187 				     RTE_FLOW_ERROR_TYPE_ITEM, item,
2188 				     "Geneve tunnel must be fully defined");
2189 	return 0;
2190 }
2191 
2192 /**
2193  * Validate MPLS item.
2194  *
2195  * @param[in] dev
2196  *   Pointer to the rte_eth_dev structure.
2197  * @param[in] item
2198  *   Item specification.
2199  * @param[in] item_flags
2200  *   Bit-fields that holds the items detected until now.
2201  * @param[in] prev_layer
2202  *   The protocol layer indicated in previous item.
2203  * @param[out] error
2204  *   Pointer to error structure.
2205  *
2206  * @return
2207  *   0 on success, a negative errno value otherwise and rte_errno is set.
2208  */
2209 int
2210 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2211 			     const struct rte_flow_item *item __rte_unused,
2212 			     uint64_t item_flags __rte_unused,
2213 			     uint64_t prev_layer __rte_unused,
2214 			     struct rte_flow_error *error)
2215 {
2216 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2217 	const struct rte_flow_item_mpls *mask = item->mask;
2218 	struct mlx5_priv *priv = dev->data->dev_private;
2219 	int ret;
2220 
2221 	if (!priv->config.mpls_en)
2222 		return rte_flow_error_set(error, ENOTSUP,
2223 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2224 					  "MPLS not supported or"
2225 					  " disabled in firmware"
2226 					  " configuration.");
2227 	/* MPLS over IP, UDP, GRE is allowed */
2228 	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2229 			    MLX5_FLOW_LAYER_OUTER_L4_UDP |
2230 			    MLX5_FLOW_LAYER_GRE)))
2231 		return rte_flow_error_set(error, EINVAL,
2232 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2233 					  "protocol filtering not compatible"
2234 					  " with MPLS layer");
2235 	/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2236 	if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2237 	    !(item_flags & MLX5_FLOW_LAYER_GRE))
2238 		return rte_flow_error_set(error, ENOTSUP,
2239 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2240 					  "multiple tunnel layers not"
2241 					  " supported");
2242 	if (!mask)
2243 		mask = &rte_flow_item_mpls_mask;
2244 	ret = mlx5_flow_item_acceptable
2245 		(item, (const uint8_t *)mask,
2246 		 (const uint8_t *)&rte_flow_item_mpls_mask,
2247 		 sizeof(struct rte_flow_item_mpls), error);
2248 	if (ret < 0)
2249 		return ret;
2250 	return 0;
2251 #endif
2252 	return rte_flow_error_set(error, ENOTSUP,
2253 				  RTE_FLOW_ERROR_TYPE_ITEM, item,
2254 				  "MPLS is not supported by Verbs, please"
2255 				  " update.");
2256 }
2257 
2258 /**
2259  * Validate NVGRE item.
2260  *
2261  * @param[in] item
2262  *   Item specification.
2263  * @param[in] item_flags
2264  *   Bit flags to mark detected items.
2265  * @param[in] target_protocol
2266  *   The next protocol in the previous item.
2267  * @param[out] error
2268  *   Pointer to error structure.
2269  *
2270  * @return
2271  *   0 on success, a negative errno value otherwise and rte_errno is set.
2272  */
2273 int
2274 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2275 			      uint64_t item_flags,
2276 			      uint8_t target_protocol,
2277 			      struct rte_flow_error *error)
2278 {
2279 	const struct rte_flow_item_nvgre *mask = item->mask;
2280 	int ret;
2281 
2282 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2283 		return rte_flow_error_set(error, EINVAL,
2284 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2285 					  "protocol filtering not compatible"
2286 					  " with this GRE layer");
2287 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2288 		return rte_flow_error_set(error, ENOTSUP,
2289 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2290 					  "multiple tunnel layers not"
2291 					  " supported");
2292 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2293 		return rte_flow_error_set(error, ENOTSUP,
2294 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2295 					  "L3 Layer is missing");
2296 	if (!mask)
2297 		mask = &rte_flow_item_nvgre_mask;
2298 	ret = mlx5_flow_item_acceptable
2299 		(item, (const uint8_t *)mask,
2300 		 (const uint8_t *)&rte_flow_item_nvgre_mask,
2301 		 sizeof(struct rte_flow_item_nvgre), error);
2302 	if (ret < 0)
2303 		return ret;
2304 	return 0;
2305 }
2306 
2307 /* Allocate unique ID for the split Q/RSS subflows. */
2308 static uint32_t
2309 flow_qrss_get_id(struct rte_eth_dev *dev)
2310 {
2311 	struct mlx5_priv *priv = dev->data->dev_private;
2312 	uint32_t qrss_id, ret;
2313 
2314 	ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
2315 	if (ret)
2316 		return 0;
2317 	MLX5_ASSERT(qrss_id);
2318 	return qrss_id;
2319 }
2320 
2321 /* Free unique ID for the split Q/RSS subflows. */
2322 static void
2323 flow_qrss_free_id(struct rte_eth_dev *dev,  uint32_t qrss_id)
2324 {
2325 	struct mlx5_priv *priv = dev->data->dev_private;
2326 
2327 	if (qrss_id)
2328 		mlx5_flow_id_release(priv->qrss_id_pool, qrss_id);
2329 }
2330 
2331 /**
2332  * Release resource related QUEUE/RSS action split.
2333  *
2334  * @param dev
2335  *   Pointer to Ethernet device.
2336  * @param flow
2337  *   Flow to release id's from.
2338  */
2339 static void
2340 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2341 			     struct rte_flow *flow)
2342 {
2343 	struct mlx5_flow *dev_flow;
2344 
2345 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
2346 		if (dev_flow->qrss_id)
2347 			flow_qrss_free_id(dev, dev_flow->qrss_id);
2348 }
2349 
2350 static int
2351 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2352 		   const struct rte_flow_attr *attr __rte_unused,
2353 		   const struct rte_flow_item items[] __rte_unused,
2354 		   const struct rte_flow_action actions[] __rte_unused,
2355 		   bool external __rte_unused,
2356 		   struct rte_flow_error *error)
2357 {
2358 	return rte_flow_error_set(error, ENOTSUP,
2359 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2360 }
2361 
2362 static struct mlx5_flow *
2363 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
2364 		  const struct rte_flow_item items[] __rte_unused,
2365 		  const struct rte_flow_action actions[] __rte_unused,
2366 		  struct rte_flow_error *error)
2367 {
2368 	rte_flow_error_set(error, ENOTSUP,
2369 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2370 	return NULL;
2371 }
2372 
2373 static int
2374 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2375 		    struct mlx5_flow *dev_flow __rte_unused,
2376 		    const struct rte_flow_attr *attr __rte_unused,
2377 		    const struct rte_flow_item items[] __rte_unused,
2378 		    const struct rte_flow_action actions[] __rte_unused,
2379 		    struct rte_flow_error *error)
2380 {
2381 	return rte_flow_error_set(error, ENOTSUP,
2382 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2383 }
2384 
2385 static int
2386 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
2387 		struct rte_flow *flow __rte_unused,
2388 		struct rte_flow_error *error)
2389 {
2390 	return rte_flow_error_set(error, ENOTSUP,
2391 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2392 }
2393 
2394 static void
2395 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
2396 		 struct rte_flow *flow __rte_unused)
2397 {
2398 }
2399 
2400 static void
2401 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
2402 		  struct rte_flow *flow __rte_unused)
2403 {
2404 }
2405 
2406 static int
2407 flow_null_query(struct rte_eth_dev *dev __rte_unused,
2408 		struct rte_flow *flow __rte_unused,
2409 		const struct rte_flow_action *actions __rte_unused,
2410 		void *data __rte_unused,
2411 		struct rte_flow_error *error)
2412 {
2413 	return rte_flow_error_set(error, ENOTSUP,
2414 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2415 }
2416 
2417 /* Void driver to protect from null pointer reference. */
2418 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
2419 	.validate = flow_null_validate,
2420 	.prepare = flow_null_prepare,
2421 	.translate = flow_null_translate,
2422 	.apply = flow_null_apply,
2423 	.remove = flow_null_remove,
2424 	.destroy = flow_null_destroy,
2425 	.query = flow_null_query,
2426 };
2427 
2428 /**
2429  * Select flow driver type according to flow attributes and device
2430  * configuration.
2431  *
2432  * @param[in] dev
2433  *   Pointer to the dev structure.
2434  * @param[in] attr
2435  *   Pointer to the flow attributes.
2436  *
2437  * @return
2438  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
2439  */
2440 static enum mlx5_flow_drv_type
2441 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
2442 {
2443 	struct mlx5_priv *priv = dev->data->dev_private;
2444 	enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
2445 
2446 	if (attr->transfer && priv->config.dv_esw_en)
2447 		type = MLX5_FLOW_TYPE_DV;
2448 	if (!attr->transfer)
2449 		type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
2450 						 MLX5_FLOW_TYPE_VERBS;
2451 	return type;
2452 }
2453 
2454 #define flow_get_drv_ops(type) flow_drv_ops[type]
2455 
2456 /**
2457  * Flow driver validation API. This abstracts calling driver specific functions.
2458  * The type of flow driver is determined according to flow attributes.
2459  *
2460  * @param[in] dev
2461  *   Pointer to the dev structure.
2462  * @param[in] attr
2463  *   Pointer to the flow attributes.
2464  * @param[in] items
2465  *   Pointer to the list of items.
2466  * @param[in] actions
2467  *   Pointer to the list of actions.
2468  * @param[in] external
2469  *   This flow rule is created by request external to PMD.
2470  * @param[out] error
2471  *   Pointer to the error structure.
2472  *
2473  * @return
2474  *   0 on success, a negative errno value otherwise and rte_errno is set.
2475  */
2476 static inline int
2477 flow_drv_validate(struct rte_eth_dev *dev,
2478 		  const struct rte_flow_attr *attr,
2479 		  const struct rte_flow_item items[],
2480 		  const struct rte_flow_action actions[],
2481 		  bool external, struct rte_flow_error *error)
2482 {
2483 	const struct mlx5_flow_driver_ops *fops;
2484 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
2485 
2486 	fops = flow_get_drv_ops(type);
2487 	return fops->validate(dev, attr, items, actions, external, error);
2488 }
2489 
2490 /**
2491  * Flow driver preparation API. This abstracts calling driver specific
2492  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2493  * calculates the size of memory required for device flow, allocates the memory,
2494  * initializes the device flow and returns the pointer.
2495  *
2496  * @note
2497  *   This function initializes device flow structure such as dv or verbs in
2498  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
2499  *   rest. For example, adding returning device flow to flow->dev_flow list and
2500  *   setting backward reference to the flow should be done out of this function.
2501  *   layers field is not filled either.
2502  *
2503  * @param[in] attr
2504  *   Pointer to the flow attributes.
2505  * @param[in] items
2506  *   Pointer to the list of items.
2507  * @param[in] actions
2508  *   Pointer to the list of actions.
2509  * @param[out] error
2510  *   Pointer to the error structure.
2511  *
2512  * @return
2513  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
2514  */
2515 static inline struct mlx5_flow *
2516 flow_drv_prepare(const struct rte_flow *flow,
2517 		 const struct rte_flow_attr *attr,
2518 		 const struct rte_flow_item items[],
2519 		 const struct rte_flow_action actions[],
2520 		 struct rte_flow_error *error)
2521 {
2522 	const struct mlx5_flow_driver_ops *fops;
2523 	enum mlx5_flow_drv_type type = flow->drv_type;
2524 
2525 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2526 	fops = flow_get_drv_ops(type);
2527 	return fops->prepare(attr, items, actions, error);
2528 }
2529 
2530 /**
2531  * Flow driver translation API. This abstracts calling driver specific
2532  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2533  * translates a generic flow into a driver flow. flow_drv_prepare() must
2534  * precede.
2535  *
2536  * @note
2537  *   dev_flow->layers could be filled as a result of parsing during translation
2538  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
2539  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
2540  *   flow->actions could be overwritten even though all the expanded dev_flows
2541  *   have the same actions.
2542  *
2543  * @param[in] dev
2544  *   Pointer to the rte dev structure.
2545  * @param[in, out] dev_flow
2546  *   Pointer to the mlx5 flow.
2547  * @param[in] attr
2548  *   Pointer to the flow attributes.
2549  * @param[in] items
2550  *   Pointer to the list of items.
2551  * @param[in] actions
2552  *   Pointer to the list of actions.
2553  * @param[out] error
2554  *   Pointer to the error structure.
2555  *
2556  * @return
2557  *   0 on success, a negative errno value otherwise and rte_errno is set.
2558  */
2559 static inline int
2560 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
2561 		   const struct rte_flow_attr *attr,
2562 		   const struct rte_flow_item items[],
2563 		   const struct rte_flow_action actions[],
2564 		   struct rte_flow_error *error)
2565 {
2566 	const struct mlx5_flow_driver_ops *fops;
2567 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
2568 
2569 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2570 	fops = flow_get_drv_ops(type);
2571 	return fops->translate(dev, dev_flow, attr, items, actions, error);
2572 }
2573 
2574 /**
2575  * Flow driver apply API. This abstracts calling driver specific functions.
2576  * Parent flow (rte_flow) should have driver type (drv_type). It applies
2577  * translated driver flows on to device. flow_drv_translate() must precede.
2578  *
2579  * @param[in] dev
2580  *   Pointer to Ethernet device structure.
2581  * @param[in, out] flow
2582  *   Pointer to flow structure.
2583  * @param[out] error
2584  *   Pointer to error structure.
2585  *
2586  * @return
2587  *   0 on success, a negative errno value otherwise and rte_errno is set.
2588  */
2589 static inline int
2590 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2591 	       struct rte_flow_error *error)
2592 {
2593 	const struct mlx5_flow_driver_ops *fops;
2594 	enum mlx5_flow_drv_type type = flow->drv_type;
2595 
2596 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2597 	fops = flow_get_drv_ops(type);
2598 	return fops->apply(dev, flow, error);
2599 }
2600 
2601 /**
2602  * Flow driver remove API. This abstracts calling driver specific functions.
2603  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2604  * on device. All the resources of the flow should be freed by calling
2605  * flow_drv_destroy().
2606  *
2607  * @param[in] dev
2608  *   Pointer to Ethernet device.
2609  * @param[in, out] flow
2610  *   Pointer to flow structure.
2611  */
2612 static inline void
2613 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2614 {
2615 	const struct mlx5_flow_driver_ops *fops;
2616 	enum mlx5_flow_drv_type type = flow->drv_type;
2617 
2618 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2619 	fops = flow_get_drv_ops(type);
2620 	fops->remove(dev, flow);
2621 }
2622 
2623 /**
2624  * Flow driver destroy API. This abstracts calling driver specific functions.
2625  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2626  * on device and releases resources of the flow.
2627  *
2628  * @param[in] dev
2629  *   Pointer to Ethernet device.
2630  * @param[in, out] flow
2631  *   Pointer to flow structure.
2632  */
2633 static inline void
2634 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2635 {
2636 	const struct mlx5_flow_driver_ops *fops;
2637 	enum mlx5_flow_drv_type type = flow->drv_type;
2638 
2639 	flow_mreg_split_qrss_release(dev, flow);
2640 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2641 	fops = flow_get_drv_ops(type);
2642 	fops->destroy(dev, flow);
2643 }
2644 
2645 /**
2646  * Validate a flow supported by the NIC.
2647  *
2648  * @see rte_flow_validate()
2649  * @see rte_flow_ops
2650  */
2651 int
2652 mlx5_flow_validate(struct rte_eth_dev *dev,
2653 		   const struct rte_flow_attr *attr,
2654 		   const struct rte_flow_item items[],
2655 		   const struct rte_flow_action actions[],
2656 		   struct rte_flow_error *error)
2657 {
2658 	int ret;
2659 
2660 	ret = flow_drv_validate(dev, attr, items, actions, true, error);
2661 	if (ret < 0)
2662 		return ret;
2663 	return 0;
2664 }
2665 
2666 /**
2667  * Get port id item from the item list.
2668  *
2669  * @param[in] item
2670  *   Pointer to the list of items.
2671  *
2672  * @return
2673  *   Pointer to the port id item if exist, else return NULL.
2674  */
2675 static const struct rte_flow_item *
2676 find_port_id_item(const struct rte_flow_item *item)
2677 {
2678 	MLX5_ASSERT(item);
2679 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2680 		if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID)
2681 			return item;
2682 	}
2683 	return NULL;
2684 }
2685 
2686 /**
2687  * Get RSS action from the action list.
2688  *
2689  * @param[in] actions
2690  *   Pointer to the list of actions.
2691  *
2692  * @return
2693  *   Pointer to the RSS action if exist, else return NULL.
2694  */
2695 static const struct rte_flow_action_rss*
2696 flow_get_rss_action(const struct rte_flow_action actions[])
2697 {
2698 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2699 		switch (actions->type) {
2700 		case RTE_FLOW_ACTION_TYPE_RSS:
2701 			return (const struct rte_flow_action_rss *)
2702 			       actions->conf;
2703 		default:
2704 			break;
2705 		}
2706 	}
2707 	return NULL;
2708 }
2709 
2710 static unsigned int
2711 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
2712 {
2713 	const struct rte_flow_item *item;
2714 	unsigned int has_vlan = 0;
2715 
2716 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2717 		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2718 			has_vlan = 1;
2719 			break;
2720 		}
2721 	}
2722 	if (has_vlan)
2723 		return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
2724 				       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
2725 	return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2726 			       MLX5_EXPANSION_ROOT_OUTER;
2727 }
2728 
2729 /**
2730  *  Get layer flags from the prefix flow.
2731  *
2732  *  Some flows may be split to several subflows, the prefix subflow gets the
2733  *  match items and the suffix sub flow gets the actions.
2734  *  Some actions need the user defined match item flags to get the detail for
2735  *  the action.
2736  *  This function helps the suffix flow to get the item layer flags from prefix
2737  *  subflow.
2738  *
2739  * @param[in] dev_flow
2740  *   Pointer the created preifx subflow.
2741  *
2742  * @return
2743  *   The layers get from prefix subflow.
2744  */
2745 static inline uint64_t
2746 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
2747 {
2748 	uint64_t layers = 0;
2749 
2750 	/* If no decap actions, use the layers directly. */
2751 	if (!(dev_flow->actions & MLX5_FLOW_ACTION_DECAP))
2752 		return dev_flow->layers;
2753 	/* Convert L3 layers with decap action. */
2754 	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2755 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2756 	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2757 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2758 	/* Convert L4 layers with decap action.  */
2759 	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
2760 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
2761 	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
2762 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
2763 	return layers;
2764 }
2765 
2766 /**
2767  * Get QUEUE/RSS action from the action list.
2768  *
2769  * @param[in] actions
2770  *   Pointer to the list of actions.
2771  * @param[out] qrss
2772  *   Pointer to the return pointer.
2773  * @param[out] qrss_type
2774  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
2775  *   if no QUEUE/RSS is found.
2776  *
2777  * @return
2778  *   Total number of actions.
2779  */
2780 static int
2781 flow_parse_qrss_action(const struct rte_flow_action actions[],
2782 		       const struct rte_flow_action **qrss)
2783 {
2784 	int actions_n = 0;
2785 
2786 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2787 		switch (actions->type) {
2788 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2789 		case RTE_FLOW_ACTION_TYPE_RSS:
2790 			*qrss = actions;
2791 			break;
2792 		default:
2793 			break;
2794 		}
2795 		actions_n++;
2796 	}
2797 	/* Count RTE_FLOW_ACTION_TYPE_END. */
2798 	return actions_n + 1;
2799 }
2800 
2801 /**
2802  * Check meter action from the action list.
2803  *
2804  * @param[in] actions
2805  *   Pointer to the list of actions.
2806  * @param[out] mtr
2807  *   Pointer to the meter exist flag.
2808  *
2809  * @return
2810  *   Total number of actions.
2811  */
2812 static int
2813 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
2814 {
2815 	int actions_n = 0;
2816 
2817 	MLX5_ASSERT(mtr);
2818 	*mtr = 0;
2819 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2820 		switch (actions->type) {
2821 		case RTE_FLOW_ACTION_TYPE_METER:
2822 			*mtr = 1;
2823 			break;
2824 		default:
2825 			break;
2826 		}
2827 		actions_n++;
2828 	}
2829 	/* Count RTE_FLOW_ACTION_TYPE_END. */
2830 	return actions_n + 1;
2831 }
2832 
2833 /**
2834  * Check if the flow should be splited due to hairpin.
2835  * The reason for the split is that in current HW we can't
2836  * support encap on Rx, so if a flow have encap we move it
2837  * to Tx.
2838  *
2839  * @param dev
2840  *   Pointer to Ethernet device.
2841  * @param[in] attr
2842  *   Flow rule attributes.
2843  * @param[in] actions
2844  *   Associated actions (list terminated by the END action).
2845  *
2846  * @return
2847  *   > 0 the number of actions and the flow should be split,
2848  *   0 when no split required.
2849  */
2850 static int
2851 flow_check_hairpin_split(struct rte_eth_dev *dev,
2852 			 const struct rte_flow_attr *attr,
2853 			 const struct rte_flow_action actions[])
2854 {
2855 	int queue_action = 0;
2856 	int action_n = 0;
2857 	int encap = 0;
2858 	const struct rte_flow_action_queue *queue;
2859 	const struct rte_flow_action_rss *rss;
2860 	const struct rte_flow_action_raw_encap *raw_encap;
2861 
2862 	if (!attr->ingress)
2863 		return 0;
2864 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2865 		switch (actions->type) {
2866 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2867 			queue = actions->conf;
2868 			if (queue == NULL)
2869 				return 0;
2870 			if (mlx5_rxq_get_type(dev, queue->index) !=
2871 			    MLX5_RXQ_TYPE_HAIRPIN)
2872 				return 0;
2873 			queue_action = 1;
2874 			action_n++;
2875 			break;
2876 		case RTE_FLOW_ACTION_TYPE_RSS:
2877 			rss = actions->conf;
2878 			if (rss == NULL || rss->queue_num == 0)
2879 				return 0;
2880 			if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
2881 			    MLX5_RXQ_TYPE_HAIRPIN)
2882 				return 0;
2883 			queue_action = 1;
2884 			action_n++;
2885 			break;
2886 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2887 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2888 			encap = 1;
2889 			action_n++;
2890 			break;
2891 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2892 			raw_encap = actions->conf;
2893 			if (raw_encap->size >
2894 			    (sizeof(struct rte_flow_item_eth) +
2895 			     sizeof(struct rte_flow_item_ipv4)))
2896 				encap = 1;
2897 			action_n++;
2898 			break;
2899 		default:
2900 			action_n++;
2901 			break;
2902 		}
2903 	}
2904 	if (encap == 1 && queue_action)
2905 		return action_n;
2906 	return 0;
2907 }
2908 
2909 /* Declare flow create/destroy prototype in advance. */
2910 static struct rte_flow *
2911 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
2912 		 const struct rte_flow_attr *attr,
2913 		 const struct rte_flow_item items[],
2914 		 const struct rte_flow_action actions[],
2915 		 bool external, struct rte_flow_error *error);
2916 
2917 static void
2918 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
2919 		  struct rte_flow *flow);
2920 
2921 /**
2922  * Add a flow of copying flow metadata registers in RX_CP_TBL.
2923  *
2924  * As mark_id is unique, if there's already a registered flow for the mark_id,
2925  * return by increasing the reference counter of the resource. Otherwise, create
2926  * the resource (mcp_res) and flow.
2927  *
2928  * Flow looks like,
2929  *   - If ingress port is ANY and reg_c[1] is mark_id,
2930  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
2931  *
2932  * For default flow (zero mark_id), flow is like,
2933  *   - If ingress port is ANY,
2934  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
2935  *
2936  * @param dev
2937  *   Pointer to Ethernet device.
2938  * @param mark_id
2939  *   ID of MARK action, zero means default flow for META.
2940  * @param[out] error
2941  *   Perform verbose error reporting if not NULL.
2942  *
2943  * @return
2944  *   Associated resource on success, NULL otherwise and rte_errno is set.
2945  */
2946 static struct mlx5_flow_mreg_copy_resource *
2947 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
2948 			  struct rte_flow_error *error)
2949 {
2950 	struct mlx5_priv *priv = dev->data->dev_private;
2951 	struct rte_flow_attr attr = {
2952 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
2953 		.ingress = 1,
2954 	};
2955 	struct mlx5_rte_flow_item_tag tag_spec = {
2956 		.data = mark_id,
2957 	};
2958 	struct rte_flow_item items[] = {
2959 		[1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
2960 	};
2961 	struct rte_flow_action_mark ftag = {
2962 		.id = mark_id,
2963 	};
2964 	struct mlx5_flow_action_copy_mreg cp_mreg = {
2965 		.dst = REG_B,
2966 		.src = 0,
2967 	};
2968 	struct rte_flow_action_jump jump = {
2969 		.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
2970 	};
2971 	struct rte_flow_action actions[] = {
2972 		[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
2973 	};
2974 	struct mlx5_flow_mreg_copy_resource *mcp_res;
2975 	int ret;
2976 
2977 	/* Fill the register fileds in the flow. */
2978 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2979 	if (ret < 0)
2980 		return NULL;
2981 	tag_spec.id = ret;
2982 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
2983 	if (ret < 0)
2984 		return NULL;
2985 	cp_mreg.src = ret;
2986 	/* Check if already registered. */
2987 	MLX5_ASSERT(priv->mreg_cp_tbl);
2988 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
2989 	if (mcp_res) {
2990 		/* For non-default rule. */
2991 		if (mark_id != MLX5_DEFAULT_COPY_ID)
2992 			mcp_res->refcnt++;
2993 		MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
2994 			    mcp_res->refcnt == 1);
2995 		return mcp_res;
2996 	}
2997 	/* Provide the full width of FLAG specific value. */
2998 	if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
2999 		tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
3000 	/* Build a new flow. */
3001 	if (mark_id != MLX5_DEFAULT_COPY_ID) {
3002 		items[0] = (struct rte_flow_item){
3003 			.type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3004 			.spec = &tag_spec,
3005 		};
3006 		items[1] = (struct rte_flow_item){
3007 			.type = RTE_FLOW_ITEM_TYPE_END,
3008 		};
3009 		actions[0] = (struct rte_flow_action){
3010 			.type = MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3011 			.conf = &ftag,
3012 		};
3013 		actions[1] = (struct rte_flow_action){
3014 			.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3015 			.conf = &cp_mreg,
3016 		};
3017 		actions[2] = (struct rte_flow_action){
3018 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
3019 			.conf = &jump,
3020 		};
3021 		actions[3] = (struct rte_flow_action){
3022 			.type = RTE_FLOW_ACTION_TYPE_END,
3023 		};
3024 	} else {
3025 		/* Default rule, wildcard match. */
3026 		attr.priority = MLX5_FLOW_PRIO_RSVD;
3027 		items[0] = (struct rte_flow_item){
3028 			.type = RTE_FLOW_ITEM_TYPE_END,
3029 		};
3030 		actions[0] = (struct rte_flow_action){
3031 			.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3032 			.conf = &cp_mreg,
3033 		};
3034 		actions[1] = (struct rte_flow_action){
3035 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
3036 			.conf = &jump,
3037 		};
3038 		actions[2] = (struct rte_flow_action){
3039 			.type = RTE_FLOW_ACTION_TYPE_END,
3040 		};
3041 	}
3042 	/* Build a new entry. */
3043 	mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0);
3044 	if (!mcp_res) {
3045 		rte_errno = ENOMEM;
3046 		return NULL;
3047 	}
3048 	/*
3049 	 * The copy Flows are not included in any list. There
3050 	 * ones are referenced from other Flows and can not
3051 	 * be applied, removed, deleted in ardbitrary order
3052 	 * by list traversing.
3053 	 */
3054 	mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
3055 					 actions, false, error);
3056 	if (!mcp_res->flow)
3057 		goto error;
3058 	mcp_res->refcnt++;
3059 	mcp_res->hlist_ent.key = mark_id;
3060 	ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
3061 				&mcp_res->hlist_ent);
3062 	MLX5_ASSERT(!ret);
3063 	if (ret)
3064 		goto error;
3065 	return mcp_res;
3066 error:
3067 	if (mcp_res->flow)
3068 		flow_list_destroy(dev, NULL, mcp_res->flow);
3069 	rte_free(mcp_res);
3070 	return NULL;
3071 }
3072 
3073 /**
3074  * Release flow in RX_CP_TBL.
3075  *
3076  * @param dev
3077  *   Pointer to Ethernet device.
3078  * @flow
3079  *   Parent flow for wich copying is provided.
3080  */
3081 static void
3082 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3083 			  struct rte_flow *flow)
3084 {
3085 	struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
3086 	struct mlx5_priv *priv = dev->data->dev_private;
3087 
3088 	if (!mcp_res || !priv->mreg_cp_tbl)
3089 		return;
3090 	if (flow->copy_applied) {
3091 		MLX5_ASSERT(mcp_res->appcnt);
3092 		flow->copy_applied = 0;
3093 		--mcp_res->appcnt;
3094 		if (!mcp_res->appcnt)
3095 			flow_drv_remove(dev, mcp_res->flow);
3096 	}
3097 	/*
3098 	 * We do not check availability of metadata registers here,
3099 	 * because copy resources are not allocated in this case.
3100 	 */
3101 	if (--mcp_res->refcnt)
3102 		return;
3103 	MLX5_ASSERT(mcp_res->flow);
3104 	flow_list_destroy(dev, NULL, mcp_res->flow);
3105 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3106 	rte_free(mcp_res);
3107 	flow->mreg_copy = NULL;
3108 }
3109 
3110 /**
3111  * Start flow in RX_CP_TBL.
3112  *
3113  * @param dev
3114  *   Pointer to Ethernet device.
3115  * @flow
3116  *   Parent flow for wich copying is provided.
3117  *
3118  * @return
3119  *   0 on success, a negative errno value otherwise and rte_errno is set.
3120  */
3121 static int
3122 flow_mreg_start_copy_action(struct rte_eth_dev *dev,
3123 			    struct rte_flow *flow)
3124 {
3125 	struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
3126 	int ret;
3127 
3128 	if (!mcp_res || flow->copy_applied)
3129 		return 0;
3130 	if (!mcp_res->appcnt) {
3131 		ret = flow_drv_apply(dev, mcp_res->flow, NULL);
3132 		if (ret)
3133 			return ret;
3134 	}
3135 	++mcp_res->appcnt;
3136 	flow->copy_applied = 1;
3137 	return 0;
3138 }
3139 
3140 /**
3141  * Stop flow in RX_CP_TBL.
3142  *
3143  * @param dev
3144  *   Pointer to Ethernet device.
3145  * @flow
3146  *   Parent flow for wich copying is provided.
3147  */
3148 static void
3149 flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
3150 			   struct rte_flow *flow)
3151 {
3152 	struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
3153 
3154 	if (!mcp_res || !flow->copy_applied)
3155 		return;
3156 	MLX5_ASSERT(mcp_res->appcnt);
3157 	--mcp_res->appcnt;
3158 	flow->copy_applied = 0;
3159 	if (!mcp_res->appcnt)
3160 		flow_drv_remove(dev, mcp_res->flow);
3161 }
3162 
3163 /**
3164  * Remove the default copy action from RX_CP_TBL.
3165  *
3166  * @param dev
3167  *   Pointer to Ethernet device.
3168  */
3169 static void
3170 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3171 {
3172 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3173 	struct mlx5_priv *priv = dev->data->dev_private;
3174 
3175 	/* Check if default flow is registered. */
3176 	if (!priv->mreg_cp_tbl)
3177 		return;
3178 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
3179 					    MLX5_DEFAULT_COPY_ID);
3180 	if (!mcp_res)
3181 		return;
3182 	MLX5_ASSERT(mcp_res->flow);
3183 	flow_list_destroy(dev, NULL, mcp_res->flow);
3184 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3185 	rte_free(mcp_res);
3186 }
3187 
3188 /**
3189  * Add the default copy action in in RX_CP_TBL.
3190  *
3191  * @param dev
3192  *   Pointer to Ethernet device.
3193  * @param[out] error
3194  *   Perform verbose error reporting if not NULL.
3195  *
3196  * @return
3197  *   0 for success, negative value otherwise and rte_errno is set.
3198  */
3199 static int
3200 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
3201 				  struct rte_flow_error *error)
3202 {
3203 	struct mlx5_priv *priv = dev->data->dev_private;
3204 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3205 
3206 	/* Check whether extensive metadata feature is engaged. */
3207 	if (!priv->config.dv_flow_en ||
3208 	    priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3209 	    !mlx5_flow_ext_mreg_supported(dev) ||
3210 	    !priv->sh->dv_regc0_mask)
3211 		return 0;
3212 	mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
3213 	if (!mcp_res)
3214 		return -rte_errno;
3215 	return 0;
3216 }
3217 
3218 /**
3219  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3220  *
3221  * All the flow having Q/RSS action should be split by
3222  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
3223  * performs the following,
3224  *   - CQE->flow_tag := reg_c[1] (MARK)
3225  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3226  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
3227  * but there should be a flow per each MARK ID set by MARK action.
3228  *
3229  * For the aforementioned reason, if there's a MARK action in flow's action
3230  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
3231  * the MARK ID to CQE's flow_tag like,
3232  *   - If reg_c[1] is mark_id,
3233  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3234  *
3235  * For SET_META action which stores value in reg_c[0], as the destination is
3236  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
3237  * MARK ID means the default flow. The default flow looks like,
3238  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3239  *
3240  * @param dev
3241  *   Pointer to Ethernet device.
3242  * @param flow
3243  *   Pointer to flow structure.
3244  * @param[in] actions
3245  *   Pointer to the list of actions.
3246  * @param[out] error
3247  *   Perform verbose error reporting if not NULL.
3248  *
3249  * @return
3250  *   0 on success, negative value otherwise and rte_errno is set.
3251  */
3252 static int
3253 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
3254 			    struct rte_flow *flow,
3255 			    const struct rte_flow_action *actions,
3256 			    struct rte_flow_error *error)
3257 {
3258 	struct mlx5_priv *priv = dev->data->dev_private;
3259 	struct mlx5_dev_config *config = &priv->config;
3260 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3261 	const struct rte_flow_action_mark *mark;
3262 
3263 	/* Check whether extensive metadata feature is engaged. */
3264 	if (!config->dv_flow_en ||
3265 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3266 	    !mlx5_flow_ext_mreg_supported(dev) ||
3267 	    !priv->sh->dv_regc0_mask)
3268 		return 0;
3269 	/* Find MARK action. */
3270 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3271 		switch (actions->type) {
3272 		case RTE_FLOW_ACTION_TYPE_FLAG:
3273 			mcp_res = flow_mreg_add_copy_action
3274 				(dev, MLX5_FLOW_MARK_DEFAULT, error);
3275 			if (!mcp_res)
3276 				return -rte_errno;
3277 			flow->mreg_copy = mcp_res;
3278 			if (dev->data->dev_started) {
3279 				mcp_res->appcnt++;
3280 				flow->copy_applied = 1;
3281 			}
3282 			return 0;
3283 		case RTE_FLOW_ACTION_TYPE_MARK:
3284 			mark = (const struct rte_flow_action_mark *)
3285 				actions->conf;
3286 			mcp_res =
3287 				flow_mreg_add_copy_action(dev, mark->id, error);
3288 			if (!mcp_res)
3289 				return -rte_errno;
3290 			flow->mreg_copy = mcp_res;
3291 			if (dev->data->dev_started) {
3292 				mcp_res->appcnt++;
3293 				flow->copy_applied = 1;
3294 			}
3295 			return 0;
3296 		default:
3297 			break;
3298 		}
3299 	}
3300 	return 0;
3301 }
3302 
3303 #define MLX5_MAX_SPLIT_ACTIONS 24
3304 #define MLX5_MAX_SPLIT_ITEMS 24
3305 
3306 /**
3307  * Split the hairpin flow.
3308  * Since HW can't support encap on Rx we move the encap to Tx.
3309  * If the count action is after the encap then we also
3310  * move the count action. in this case the count will also measure
3311  * the outer bytes.
3312  *
3313  * @param dev
3314  *   Pointer to Ethernet device.
3315  * @param[in] actions
3316  *   Associated actions (list terminated by the END action).
3317  * @param[out] actions_rx
3318  *   Rx flow actions.
3319  * @param[out] actions_tx
3320  *   Tx flow actions..
3321  * @param[out] pattern_tx
3322  *   The pattern items for the Tx flow.
3323  * @param[out] flow_id
3324  *   The flow ID connected to this flow.
3325  *
3326  * @return
3327  *   0 on success.
3328  */
3329 static int
3330 flow_hairpin_split(struct rte_eth_dev *dev,
3331 		   const struct rte_flow_action actions[],
3332 		   struct rte_flow_action actions_rx[],
3333 		   struct rte_flow_action actions_tx[],
3334 		   struct rte_flow_item pattern_tx[],
3335 		   uint32_t *flow_id)
3336 {
3337 	struct mlx5_priv *priv = dev->data->dev_private;
3338 	const struct rte_flow_action_raw_encap *raw_encap;
3339 	const struct rte_flow_action_raw_decap *raw_decap;
3340 	struct mlx5_rte_flow_action_set_tag *set_tag;
3341 	struct rte_flow_action *tag_action;
3342 	struct mlx5_rte_flow_item_tag *tag_item;
3343 	struct rte_flow_item *item;
3344 	char *addr;
3345 	int encap = 0;
3346 
3347 	mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
3348 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3349 		switch (actions->type) {
3350 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3351 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3352 			rte_memcpy(actions_tx, actions,
3353 			       sizeof(struct rte_flow_action));
3354 			actions_tx++;
3355 			break;
3356 		case RTE_FLOW_ACTION_TYPE_COUNT:
3357 			if (encap) {
3358 				rte_memcpy(actions_tx, actions,
3359 					   sizeof(struct rte_flow_action));
3360 				actions_tx++;
3361 			} else {
3362 				rte_memcpy(actions_rx, actions,
3363 					   sizeof(struct rte_flow_action));
3364 				actions_rx++;
3365 			}
3366 			break;
3367 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3368 			raw_encap = actions->conf;
3369 			if (raw_encap->size >
3370 			    (sizeof(struct rte_flow_item_eth) +
3371 			     sizeof(struct rte_flow_item_ipv4))) {
3372 				memcpy(actions_tx, actions,
3373 				       sizeof(struct rte_flow_action));
3374 				actions_tx++;
3375 				encap = 1;
3376 			} else {
3377 				rte_memcpy(actions_rx, actions,
3378 					   sizeof(struct rte_flow_action));
3379 				actions_rx++;
3380 			}
3381 			break;
3382 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3383 			raw_decap = actions->conf;
3384 			if (raw_decap->size <
3385 			    (sizeof(struct rte_flow_item_eth) +
3386 			     sizeof(struct rte_flow_item_ipv4))) {
3387 				memcpy(actions_tx, actions,
3388 				       sizeof(struct rte_flow_action));
3389 				actions_tx++;
3390 			} else {
3391 				rte_memcpy(actions_rx, actions,
3392 					   sizeof(struct rte_flow_action));
3393 				actions_rx++;
3394 			}
3395 			break;
3396 		default:
3397 			rte_memcpy(actions_rx, actions,
3398 				   sizeof(struct rte_flow_action));
3399 			actions_rx++;
3400 			break;
3401 		}
3402 	}
3403 	/* Add set meta action and end action for the Rx flow. */
3404 	tag_action = actions_rx;
3405 	tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3406 	actions_rx++;
3407 	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
3408 	actions_rx++;
3409 	set_tag = (void *)actions_rx;
3410 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
3411 	MLX5_ASSERT(set_tag->id > REG_NONE);
3412 	set_tag->data = *flow_id;
3413 	tag_action->conf = set_tag;
3414 	/* Create Tx item list. */
3415 	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
3416 	addr = (void *)&pattern_tx[2];
3417 	item = pattern_tx;
3418 	item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
3419 	tag_item = (void *)addr;
3420 	tag_item->data = *flow_id;
3421 	tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
3422 	MLX5_ASSERT(set_tag->id > REG_NONE);
3423 	item->spec = tag_item;
3424 	addr += sizeof(struct mlx5_rte_flow_item_tag);
3425 	tag_item = (void *)addr;
3426 	tag_item->data = UINT32_MAX;
3427 	tag_item->id = UINT16_MAX;
3428 	item->mask = tag_item;
3429 	addr += sizeof(struct mlx5_rte_flow_item_tag);
3430 	item->last = NULL;
3431 	item++;
3432 	item->type = RTE_FLOW_ITEM_TYPE_END;
3433 	return 0;
3434 }
3435 
3436 /**
3437  * The last stage of splitting chain, just creates the subflow
3438  * without any modification.
3439  *
3440  * @param dev
3441  *   Pointer to Ethernet device.
3442  * @param[in] flow
3443  *   Parent flow structure pointer.
3444  * @param[in, out] sub_flow
3445  *   Pointer to return the created subflow, may be NULL.
3446  * @param[in] prefix_layers
3447  *   Prefix subflow layers, may be 0.
3448  * @param[in] attr
3449  *   Flow rule attributes.
3450  * @param[in] items
3451  *   Pattern specification (list terminated by the END pattern item).
3452  * @param[in] actions
3453  *   Associated actions (list terminated by the END action).
3454  * @param[in] external
3455  *   This flow rule is created by request external to PMD.
3456  * @param[out] error
3457  *   Perform verbose error reporting if not NULL.
3458  * @return
3459  *   0 on success, negative value otherwise
3460  */
3461 static int
3462 flow_create_split_inner(struct rte_eth_dev *dev,
3463 			struct rte_flow *flow,
3464 			struct mlx5_flow **sub_flow,
3465 			uint64_t prefix_layers,
3466 			const struct rte_flow_attr *attr,
3467 			const struct rte_flow_item items[],
3468 			const struct rte_flow_action actions[],
3469 			bool external, struct rte_flow_error *error)
3470 {
3471 	struct mlx5_flow *dev_flow;
3472 
3473 	dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
3474 	if (!dev_flow)
3475 		return -rte_errno;
3476 	dev_flow->flow = flow;
3477 	dev_flow->external = external;
3478 	/* Subflow object was created, we must include one in the list. */
3479 	LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
3480 	/*
3481 	 * If dev_flow is as one of the suffix flow, some actions in suffix
3482 	 * flow may need some user defined item layer flags.
3483 	 */
3484 	if (prefix_layers)
3485 		dev_flow->layers = prefix_layers;
3486 	if (sub_flow)
3487 		*sub_flow = dev_flow;
3488 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
3489 }
3490 
3491 /**
3492  * Split the meter flow.
3493  *
3494  * As meter flow will split to three sub flow, other than meter
3495  * action, the other actions make sense to only meter accepts
3496  * the packet. If it need to be dropped, no other additional
3497  * actions should be take.
3498  *
3499  * One kind of special action which decapsulates the L3 tunnel
3500  * header will be in the prefix sub flow, as not to take the
3501  * L3 tunnel header into account.
3502  *
3503  * @param dev
3504  *   Pointer to Ethernet device.
3505  * @param[in] actions
3506  *   Associated actions (list terminated by the END action).
3507  * @param[out] actions_sfx
3508  *   Suffix flow actions.
3509  * @param[out] actions_pre
3510  *   Prefix flow actions.
3511  * @param[out] pattern_sfx
3512  *   The pattern items for the suffix flow.
3513  * @param[out] tag_sfx
3514  *   Pointer to suffix flow tag.
3515  *
3516  * @return
3517  *   0 on success.
3518  */
3519 static int
3520 flow_meter_split_prep(struct rte_eth_dev *dev,
3521 		 const struct rte_flow_action actions[],
3522 		 struct rte_flow_action actions_sfx[],
3523 		 struct rte_flow_action actions_pre[])
3524 {
3525 	struct rte_flow_action *tag_action = NULL;
3526 	struct mlx5_rte_flow_action_set_tag *set_tag;
3527 	struct rte_flow_error error;
3528 	const struct rte_flow_action_raw_encap *raw_encap;
3529 	const struct rte_flow_action_raw_decap *raw_decap;
3530 	uint32_t tag_id;
3531 
3532 	/* Prepare the actions for prefix and suffix flow. */
3533 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3534 		switch (actions->type) {
3535 		case RTE_FLOW_ACTION_TYPE_METER:
3536 			/* Add the extra tag action first. */
3537 			tag_action = actions_pre;
3538 			tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3539 			actions_pre++;
3540 			memcpy(actions_pre, actions,
3541 			       sizeof(struct rte_flow_action));
3542 			actions_pre++;
3543 			break;
3544 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3545 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3546 			memcpy(actions_pre, actions,
3547 			       sizeof(struct rte_flow_action));
3548 			actions_pre++;
3549 			break;
3550 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3551 			raw_encap = actions->conf;
3552 			if (raw_encap->size >
3553 			    (sizeof(struct rte_flow_item_eth) +
3554 			     sizeof(struct rte_flow_item_ipv4))) {
3555 				memcpy(actions_sfx, actions,
3556 				       sizeof(struct rte_flow_action));
3557 				actions_sfx++;
3558 			} else {
3559 				rte_memcpy(actions_pre, actions,
3560 					   sizeof(struct rte_flow_action));
3561 				actions_pre++;
3562 			}
3563 			break;
3564 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3565 			raw_decap = actions->conf;
3566 			/* Size 0 decap means 50 bytes as vxlan decap. */
3567 			if (raw_decap->size && (raw_decap->size <
3568 			    (sizeof(struct rte_flow_item_eth) +
3569 			     sizeof(struct rte_flow_item_ipv4)))) {
3570 				memcpy(actions_sfx, actions,
3571 				       sizeof(struct rte_flow_action));
3572 				actions_sfx++;
3573 			} else {
3574 				rte_memcpy(actions_pre, actions,
3575 					   sizeof(struct rte_flow_action));
3576 				actions_pre++;
3577 			}
3578 			break;
3579 		default:
3580 			memcpy(actions_sfx, actions,
3581 				sizeof(struct rte_flow_action));
3582 			actions_sfx++;
3583 			break;
3584 		}
3585 	}
3586 	/* Add end action to the actions. */
3587 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
3588 	actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
3589 	actions_pre++;
3590 	/* Set the tag. */
3591 	set_tag = (void *)actions_pre;
3592 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
3593 	/*
3594 	 * Get the id from the qrss_pool to make qrss share the id with meter.
3595 	 */
3596 	tag_id = flow_qrss_get_id(dev);
3597 	set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
3598 	assert(tag_action);
3599 	tag_action->conf = set_tag;
3600 	return tag_id;
3601 }
3602 
3603 /**
3604  * Split action list having QUEUE/RSS for metadata register copy.
3605  *
3606  * Once Q/RSS action is detected in user's action list, the flow action
3607  * should be split in order to copy metadata registers, which will happen in
3608  * RX_CP_TBL like,
3609  *   - CQE->flow_tag := reg_c[1] (MARK)
3610  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3611  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
3612  * This is because the last action of each flow must be a terminal action
3613  * (QUEUE, RSS or DROP).
3614  *
3615  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
3616  * stored and kept in the mlx5_flow structure per each sub_flow.
3617  *
3618  * The Q/RSS action is replaced with,
3619  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
3620  * And the following JUMP action is added at the end,
3621  *   - JUMP, to RX_CP_TBL.
3622  *
3623  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
3624  * flow_create_split_metadata() routine. The flow will look like,
3625  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
3626  *
3627  * @param dev
3628  *   Pointer to Ethernet device.
3629  * @param[out] split_actions
3630  *   Pointer to store split actions to jump to CP_TBL.
3631  * @param[in] actions
3632  *   Pointer to the list of original flow actions.
3633  * @param[in] qrss
3634  *   Pointer to the Q/RSS action.
3635  * @param[in] actions_n
3636  *   Number of original actions.
3637  * @param[out] error
3638  *   Perform verbose error reporting if not NULL.
3639  *
3640  * @return
3641  *   non-zero unique flow_id on success, otherwise 0 and
3642  *   error/rte_error are set.
3643  */
3644 static uint32_t
3645 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
3646 			  struct rte_flow_action *split_actions,
3647 			  const struct rte_flow_action *actions,
3648 			  const struct rte_flow_action *qrss,
3649 			  int actions_n, struct rte_flow_error *error)
3650 {
3651 	struct mlx5_rte_flow_action_set_tag *set_tag;
3652 	struct rte_flow_action_jump *jump;
3653 	const int qrss_idx = qrss - actions;
3654 	uint32_t flow_id = 0;
3655 	int ret = 0;
3656 
3657 	/*
3658 	 * Given actions will be split
3659 	 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
3660 	 * - Add jump to mreg CP_TBL.
3661 	 * As a result, there will be one more action.
3662 	 */
3663 	++actions_n;
3664 	memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
3665 	set_tag = (void *)(split_actions + actions_n);
3666 	/*
3667 	 * If tag action is not set to void(it means we are not the meter
3668 	 * suffix flow), add the tag action. Since meter suffix flow already
3669 	 * has the tag added.
3670 	 */
3671 	if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
3672 		/*
3673 		 * Allocate the new subflow ID. This one is unique within
3674 		 * device and not shared with representors. Otherwise,
3675 		 * we would have to resolve multi-thread access synch
3676 		 * issue. Each flow on the shared device is appended
3677 		 * with source vport identifier, so the resulting
3678 		 * flows will be unique in the shared (by master and
3679 		 * representors) domain even if they have coinciding
3680 		 * IDs.
3681 		 */
3682 		flow_id = flow_qrss_get_id(dev);
3683 		if (!flow_id)
3684 			return rte_flow_error_set(error, ENOMEM,
3685 						  RTE_FLOW_ERROR_TYPE_ACTION,
3686 						  NULL, "can't allocate id "
3687 						  "for split Q/RSS subflow");
3688 		/* Internal SET_TAG action to set flow ID. */
3689 		*set_tag = (struct mlx5_rte_flow_action_set_tag){
3690 			.data = flow_id,
3691 		};
3692 		ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
3693 		if (ret < 0)
3694 			return ret;
3695 		set_tag->id = ret;
3696 		/* Construct new actions array. */
3697 		/* Replace QUEUE/RSS action. */
3698 		split_actions[qrss_idx] = (struct rte_flow_action){
3699 			.type = MLX5_RTE_FLOW_ACTION_TYPE_TAG,
3700 			.conf = set_tag,
3701 		};
3702 	}
3703 	/* JUMP action to jump to mreg copy table (CP_TBL). */
3704 	jump = (void *)(set_tag + 1);
3705 	*jump = (struct rte_flow_action_jump){
3706 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3707 	};
3708 	split_actions[actions_n - 2] = (struct rte_flow_action){
3709 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
3710 		.conf = jump,
3711 	};
3712 	split_actions[actions_n - 1] = (struct rte_flow_action){
3713 		.type = RTE_FLOW_ACTION_TYPE_END,
3714 	};
3715 	return flow_id;
3716 }
3717 
3718 /**
3719  * Extend the given action list for Tx metadata copy.
3720  *
3721  * Copy the given action list to the ext_actions and add flow metadata register
3722  * copy action in order to copy reg_a set by WQE to reg_c[0].
3723  *
3724  * @param[out] ext_actions
3725  *   Pointer to the extended action list.
3726  * @param[in] actions
3727  *   Pointer to the list of actions.
3728  * @param[in] actions_n
3729  *   Number of actions in the list.
3730  * @param[out] error
3731  *   Perform verbose error reporting if not NULL.
3732  *
3733  * @return
3734  *   0 on success, negative value otherwise
3735  */
3736 static int
3737 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
3738 		       struct rte_flow_action *ext_actions,
3739 		       const struct rte_flow_action *actions,
3740 		       int actions_n, struct rte_flow_error *error)
3741 {
3742 	struct mlx5_flow_action_copy_mreg *cp_mreg =
3743 		(struct mlx5_flow_action_copy_mreg *)
3744 			(ext_actions + actions_n + 1);
3745 	int ret;
3746 
3747 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3748 	if (ret < 0)
3749 		return ret;
3750 	cp_mreg->dst = ret;
3751 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
3752 	if (ret < 0)
3753 		return ret;
3754 	cp_mreg->src = ret;
3755 	memcpy(ext_actions, actions,
3756 			sizeof(*ext_actions) * actions_n);
3757 	ext_actions[actions_n - 1] = (struct rte_flow_action){
3758 		.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3759 		.conf = cp_mreg,
3760 	};
3761 	ext_actions[actions_n] = (struct rte_flow_action){
3762 		.type = RTE_FLOW_ACTION_TYPE_END,
3763 	};
3764 	return 0;
3765 }
3766 
3767 /**
3768  * The splitting for metadata feature.
3769  *
3770  * - Q/RSS action on NIC Rx should be split in order to pass by
3771  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
3772  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
3773  *
3774  * - All the actions on NIC Tx should have a mreg copy action to
3775  *   copy reg_a from WQE to reg_c[0].
3776  *
3777  * @param dev
3778  *   Pointer to Ethernet device.
3779  * @param[in] flow
3780  *   Parent flow structure pointer.
3781  * @param[in] attr
3782  *   Flow rule attributes.
3783  * @param[in] items
3784  *   Pattern specification (list terminated by the END pattern item).
3785  * @param[in] actions
3786  *   Associated actions (list terminated by the END action).
3787  * @param[in] external
3788  *   This flow rule is created by request external to PMD.
3789  * @param[out] error
3790  *   Perform verbose error reporting if not NULL.
3791  * @return
3792  *   0 on success, negative value otherwise
3793  */
3794 static int
3795 flow_create_split_metadata(struct rte_eth_dev *dev,
3796 			   struct rte_flow *flow,
3797 			   const struct rte_flow_attr *attr,
3798 			   const struct rte_flow_item items[],
3799 			   const struct rte_flow_action actions[],
3800 			   bool external, struct rte_flow_error *error)
3801 {
3802 	struct mlx5_priv *priv = dev->data->dev_private;
3803 	struct mlx5_dev_config *config = &priv->config;
3804 	const struct rte_flow_action *qrss = NULL;
3805 	struct rte_flow_action *ext_actions = NULL;
3806 	struct mlx5_flow *dev_flow = NULL;
3807 	uint32_t qrss_id = 0;
3808 	int mtr_sfx = 0;
3809 	size_t act_size;
3810 	int actions_n;
3811 	int ret;
3812 
3813 	/* Check whether extensive metadata feature is engaged. */
3814 	if (!config->dv_flow_en ||
3815 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3816 	    !mlx5_flow_ext_mreg_supported(dev))
3817 		return flow_create_split_inner(dev, flow, NULL, 0,
3818 					       attr, items, actions, external,
3819 					       error);
3820 	actions_n = flow_parse_qrss_action(actions, &qrss);
3821 	if (qrss) {
3822 		/* Exclude hairpin flows from splitting. */
3823 		if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3824 			const struct rte_flow_action_queue *queue;
3825 
3826 			queue = qrss->conf;
3827 			if (mlx5_rxq_get_type(dev, queue->index) ==
3828 			    MLX5_RXQ_TYPE_HAIRPIN)
3829 				qrss = NULL;
3830 		} else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
3831 			const struct rte_flow_action_rss *rss;
3832 
3833 			rss = qrss->conf;
3834 			if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
3835 			    MLX5_RXQ_TYPE_HAIRPIN)
3836 				qrss = NULL;
3837 		}
3838 	}
3839 	if (qrss) {
3840 		/* Check if it is in meter suffix table. */
3841 		mtr_sfx = attr->group == (attr->transfer ?
3842 			  (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
3843 			  MLX5_FLOW_TABLE_LEVEL_SUFFIX);
3844 		/*
3845 		 * Q/RSS action on NIC Rx should be split in order to pass by
3846 		 * the mreg copy table (RX_CP_TBL) and then it jumps to the
3847 		 * action table (RX_ACT_TBL) which has the split Q/RSS action.
3848 		 */
3849 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
3850 			   sizeof(struct rte_flow_action_set_tag) +
3851 			   sizeof(struct rte_flow_action_jump);
3852 		ext_actions = rte_zmalloc(__func__, act_size, 0);
3853 		if (!ext_actions)
3854 			return rte_flow_error_set(error, ENOMEM,
3855 						  RTE_FLOW_ERROR_TYPE_ACTION,
3856 						  NULL, "no memory to split "
3857 						  "metadata flow");
3858 		/*
3859 		 * If we are the suffix flow of meter, tag already exist.
3860 		 * Set the tag action to void.
3861 		 */
3862 		if (mtr_sfx)
3863 			ext_actions[qrss - actions].type =
3864 						RTE_FLOW_ACTION_TYPE_VOID;
3865 		else
3866 			ext_actions[qrss - actions].type =
3867 						MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3868 		/*
3869 		 * Create the new actions list with removed Q/RSS action
3870 		 * and appended set tag and jump to register copy table
3871 		 * (RX_CP_TBL). We should preallocate unique tag ID here
3872 		 * in advance, because it is needed for set tag action.
3873 		 */
3874 		qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
3875 						    qrss, actions_n, error);
3876 		if (!mtr_sfx && !qrss_id) {
3877 			ret = -rte_errno;
3878 			goto exit;
3879 		}
3880 	} else if (attr->egress && !attr->transfer) {
3881 		/*
3882 		 * All the actions on NIC Tx should have a metadata register
3883 		 * copy action to copy reg_a from WQE to reg_c[meta]
3884 		 */
3885 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
3886 			   sizeof(struct mlx5_flow_action_copy_mreg);
3887 		ext_actions = rte_zmalloc(__func__, act_size, 0);
3888 		if (!ext_actions)
3889 			return rte_flow_error_set(error, ENOMEM,
3890 						  RTE_FLOW_ERROR_TYPE_ACTION,
3891 						  NULL, "no memory to split "
3892 						  "metadata flow");
3893 		/* Create the action list appended with copy register. */
3894 		ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
3895 					     actions_n, error);
3896 		if (ret < 0)
3897 			goto exit;
3898 	}
3899 	/* Add the unmodified original or prefix subflow. */
3900 	ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
3901 				      items, ext_actions ? ext_actions :
3902 				      actions, external, error);
3903 	if (ret < 0)
3904 		goto exit;
3905 	MLX5_ASSERT(dev_flow);
3906 	if (qrss) {
3907 		const struct rte_flow_attr q_attr = {
3908 			.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3909 			.ingress = 1,
3910 		};
3911 		/* Internal PMD action to set register. */
3912 		struct mlx5_rte_flow_item_tag q_tag_spec = {
3913 			.data = qrss_id,
3914 			.id = 0,
3915 		};
3916 		struct rte_flow_item q_items[] = {
3917 			{
3918 				.type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3919 				.spec = &q_tag_spec,
3920 				.last = NULL,
3921 				.mask = NULL,
3922 			},
3923 			{
3924 				.type = RTE_FLOW_ITEM_TYPE_END,
3925 			},
3926 		};
3927 		struct rte_flow_action q_actions[] = {
3928 			{
3929 				.type = qrss->type,
3930 				.conf = qrss->conf,
3931 			},
3932 			{
3933 				.type = RTE_FLOW_ACTION_TYPE_END,
3934 			},
3935 		};
3936 		uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
3937 
3938 		/*
3939 		 * Configure the tag item only if there is no meter subflow.
3940 		 * Since tag is already marked in the meter suffix subflow
3941 		 * we can just use the meter suffix items as is.
3942 		 */
3943 		if (qrss_id) {
3944 			/* Not meter subflow. */
3945 			MLX5_ASSERT(!mtr_sfx);
3946 			/*
3947 			 * Put unique id in prefix flow due to it is destroyed
3948 			 * after suffix flow and id will be freed after there
3949 			 * is no actual flows with this id and identifier
3950 			 * reallocation becomes possible (for example, for
3951 			 * other flows in other threads).
3952 			 */
3953 			dev_flow->qrss_id = qrss_id;
3954 			qrss_id = 0;
3955 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
3956 						   error);
3957 			if (ret < 0)
3958 				goto exit;
3959 			q_tag_spec.id = ret;
3960 		}
3961 		dev_flow = NULL;
3962 		/* Add suffix subflow to execute Q/RSS. */
3963 		ret = flow_create_split_inner(dev, flow, &dev_flow, layers,
3964 					      &q_attr, mtr_sfx ? items :
3965 					      q_items, q_actions,
3966 					      external, error);
3967 		if (ret < 0)
3968 			goto exit;
3969 		MLX5_ASSERT(dev_flow);
3970 	}
3971 
3972 exit:
3973 	/*
3974 	 * We do not destroy the partially created sub_flows in case of error.
3975 	 * These ones are included into parent flow list and will be destroyed
3976 	 * by flow_drv_destroy.
3977 	 */
3978 	flow_qrss_free_id(dev, qrss_id);
3979 	rte_free(ext_actions);
3980 	return ret;
3981 }
3982 
3983 /**
3984  * The splitting for meter feature.
3985  *
3986  * - The meter flow will be split to two flows as prefix and
3987  *   suffix flow. The packets make sense only it pass the prefix
3988  *   meter action.
3989  *
3990  * - Reg_C_5 is used for the packet to match betweend prefix and
3991  *   suffix flow.
3992  *
3993  * @param dev
3994  *   Pointer to Ethernet device.
3995  * @param[in] flow
3996  *   Parent flow structure pointer.
3997  * @param[in] attr
3998  *   Flow rule attributes.
3999  * @param[in] items
4000  *   Pattern specification (list terminated by the END pattern item).
4001  * @param[in] actions
4002  *   Associated actions (list terminated by the END action).
4003  * @param[in] external
4004  *   This flow rule is created by request external to PMD.
4005  * @param[out] error
4006  *   Perform verbose error reporting if not NULL.
4007  * @return
4008  *   0 on success, negative value otherwise
4009  */
4010 static int
4011 flow_create_split_meter(struct rte_eth_dev *dev,
4012 			   struct rte_flow *flow,
4013 			   const struct rte_flow_attr *attr,
4014 			   const struct rte_flow_item items[],
4015 			   const struct rte_flow_action actions[],
4016 			   bool external, struct rte_flow_error *error)
4017 {
4018 	struct mlx5_priv *priv = dev->data->dev_private;
4019 	struct rte_flow_action *sfx_actions = NULL;
4020 	struct rte_flow_action *pre_actions = NULL;
4021 	struct rte_flow_item *sfx_items = NULL;
4022 	const  struct rte_flow_item *sfx_port_id_item;
4023 	struct mlx5_flow *dev_flow = NULL;
4024 	struct rte_flow_attr sfx_attr = *attr;
4025 	uint32_t mtr = 0;
4026 	uint32_t mtr_tag_id = 0;
4027 	size_t act_size;
4028 	size_t item_size;
4029 	int actions_n = 0;
4030 	int ret;
4031 
4032 	if (priv->mtr_en)
4033 		actions_n = flow_check_meter_action(actions, &mtr);
4034 	if (mtr) {
4035 		struct mlx5_rte_flow_item_tag *tag_spec;
4036 		struct mlx5_rte_flow_item_tag *tag_mask;
4037 		/* The five prefix actions: meter, decap, encap, tag, end. */
4038 		act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
4039 			   sizeof(struct rte_flow_action_set_tag);
4040 		/* tag, end. */
4041 #define METER_SUFFIX_ITEM 3
4042 		item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
4043 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
4044 		sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
4045 		if (!sfx_actions)
4046 			return rte_flow_error_set(error, ENOMEM,
4047 						  RTE_FLOW_ERROR_TYPE_ACTION,
4048 						  NULL, "no memory to split "
4049 						  "meter flow");
4050 		pre_actions = sfx_actions + actions_n;
4051 		mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions,
4052 						     pre_actions);
4053 		if (!mtr_tag_id) {
4054 			ret = -rte_errno;
4055 			goto exit;
4056 		}
4057 		/* Add the prefix subflow. */
4058 		ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
4059 					      items, pre_actions, external,
4060 					      error);
4061 		if (ret) {
4062 			ret = -rte_errno;
4063 			goto exit;
4064 		}
4065 		dev_flow->mtr_flow_id = mtr_tag_id;
4066 		/* Prepare the suffix flow match pattern. */
4067 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
4068 			     act_size);
4069 		tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items +
4070 			    METER_SUFFIX_ITEM);
4071 		tag_spec->data = dev_flow->mtr_flow_id << MLX5_MTR_COLOR_BITS;
4072 		tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0,
4073 						    error);
4074 		tag_mask = tag_spec + 1;
4075 		tag_mask->data = 0xffffff00;
4076 		sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4077 		sfx_items->spec = tag_spec;
4078 		sfx_items->last = NULL;
4079 		sfx_items->mask = tag_mask;
4080 		sfx_items++;
4081 		sfx_port_id_item = find_port_id_item(items);
4082 		if (sfx_port_id_item) {
4083 			memcpy(sfx_items, sfx_port_id_item,
4084 			       sizeof(*sfx_items));
4085 			sfx_items++;
4086 		}
4087 		sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
4088 		sfx_items -= sfx_port_id_item ? 2 : 1;
4089 		/* Setting the sfx group atrr. */
4090 		sfx_attr.group = sfx_attr.transfer ?
4091 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4092 				 MLX5_FLOW_TABLE_LEVEL_SUFFIX;
4093 	}
4094 	/* Add the prefix subflow. */
4095 	ret = flow_create_split_metadata(dev, flow, &sfx_attr,
4096 					 sfx_items ? sfx_items : items,
4097 					 sfx_actions ? sfx_actions : actions,
4098 					 external, error);
4099 exit:
4100 	if (sfx_actions)
4101 		rte_free(sfx_actions);
4102 	return ret;
4103 }
4104 
4105 /**
4106  * Split the flow to subflow set. The splitters might be linked
4107  * in the chain, like this:
4108  * flow_create_split_outer() calls:
4109  *   flow_create_split_meter() calls:
4110  *     flow_create_split_metadata(meter_subflow_0) calls:
4111  *       flow_create_split_inner(metadata_subflow_0)
4112  *       flow_create_split_inner(metadata_subflow_1)
4113  *       flow_create_split_inner(metadata_subflow_2)
4114  *     flow_create_split_metadata(meter_subflow_1) calls:
4115  *       flow_create_split_inner(metadata_subflow_0)
4116  *       flow_create_split_inner(metadata_subflow_1)
4117  *       flow_create_split_inner(metadata_subflow_2)
4118  *
4119  * This provide flexible way to add new levels of flow splitting.
4120  * The all of successfully created subflows are included to the
4121  * parent flow dev_flow list.
4122  *
4123  * @param dev
4124  *   Pointer to Ethernet device.
4125  * @param[in] flow
4126  *   Parent flow structure pointer.
4127  * @param[in] attr
4128  *   Flow rule attributes.
4129  * @param[in] items
4130  *   Pattern specification (list terminated by the END pattern item).
4131  * @param[in] actions
4132  *   Associated actions (list terminated by the END action).
4133  * @param[in] external
4134  *   This flow rule is created by request external to PMD.
4135  * @param[out] error
4136  *   Perform verbose error reporting if not NULL.
4137  * @return
4138  *   0 on success, negative value otherwise
4139  */
4140 static int
4141 flow_create_split_outer(struct rte_eth_dev *dev,
4142 			struct rte_flow *flow,
4143 			const struct rte_flow_attr *attr,
4144 			const struct rte_flow_item items[],
4145 			const struct rte_flow_action actions[],
4146 			bool external, struct rte_flow_error *error)
4147 {
4148 	int ret;
4149 
4150 	ret = flow_create_split_meter(dev, flow, attr, items,
4151 					 actions, external, error);
4152 	MLX5_ASSERT(ret <= 0);
4153 	return ret;
4154 }
4155 
4156 /**
4157  * Create a flow and add it to @p list.
4158  *
4159  * @param dev
4160  *   Pointer to Ethernet device.
4161  * @param list
4162  *   Pointer to a TAILQ flow list. If this parameter NULL,
4163  *   no list insertion occurred, flow is just created,
4164  *   this is caller's responsibility to track the
4165  *   created flow.
4166  * @param[in] attr
4167  *   Flow rule attributes.
4168  * @param[in] items
4169  *   Pattern specification (list terminated by the END pattern item).
4170  * @param[in] actions
4171  *   Associated actions (list terminated by the END action).
4172  * @param[in] external
4173  *   This flow rule is created by request external to PMD.
4174  * @param[out] error
4175  *   Perform verbose error reporting if not NULL.
4176  *
4177  * @return
4178  *   A flow on success, NULL otherwise and rte_errno is set.
4179  */
4180 static struct rte_flow *
4181 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
4182 		 const struct rte_flow_attr *attr,
4183 		 const struct rte_flow_item items[],
4184 		 const struct rte_flow_action actions[],
4185 		 bool external, struct rte_flow_error *error)
4186 {
4187 	struct mlx5_priv *priv = dev->data->dev_private;
4188 	struct rte_flow *flow = NULL;
4189 	struct mlx5_flow *dev_flow;
4190 	const struct rte_flow_action_rss *rss;
4191 	union {
4192 		struct rte_flow_expand_rss buf;
4193 		uint8_t buffer[2048];
4194 	} expand_buffer;
4195 	union {
4196 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
4197 		uint8_t buffer[2048];
4198 	} actions_rx;
4199 	union {
4200 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
4201 		uint8_t buffer[2048];
4202 	} actions_hairpin_tx;
4203 	union {
4204 		struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
4205 		uint8_t buffer[2048];
4206 	} items_tx;
4207 	struct rte_flow_expand_rss *buf = &expand_buffer.buf;
4208 	const struct rte_flow_action *p_actions_rx = actions;
4209 	uint32_t i;
4210 	uint32_t flow_size;
4211 	int hairpin_flow = 0;
4212 	uint32_t hairpin_id = 0;
4213 	struct rte_flow_attr attr_tx = { .priority = 0 };
4214 	int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
4215 				    error);
4216 
4217 	if (ret < 0)
4218 		return NULL;
4219 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
4220 	if (hairpin_flow > 0) {
4221 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
4222 			rte_errno = EINVAL;
4223 			return NULL;
4224 		}
4225 		flow_hairpin_split(dev, actions, actions_rx.actions,
4226 				   actions_hairpin_tx.actions, items_tx.items,
4227 				   &hairpin_id);
4228 		p_actions_rx = actions_rx.actions;
4229 	}
4230 	flow_size = sizeof(struct rte_flow);
4231 	rss = flow_get_rss_action(p_actions_rx);
4232 	if (rss)
4233 		flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
4234 					    sizeof(void *));
4235 	else
4236 		flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
4237 	flow = rte_calloc(__func__, 1, flow_size, 0);
4238 	if (!flow) {
4239 		rte_errno = ENOMEM;
4240 		goto error_before_flow;
4241 	}
4242 	flow->drv_type = flow_get_drv_type(dev, attr);
4243 	if (hairpin_id != 0)
4244 		flow->hairpin_flow_id = hairpin_id;
4245 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
4246 		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
4247 	flow->rss.queue = (void *)(flow + 1);
4248 	if (rss) {
4249 		/*
4250 		 * The following information is required by
4251 		 * mlx5_flow_hashfields_adjust() in advance.
4252 		 */
4253 		flow->rss.level = rss->level;
4254 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
4255 		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4256 	}
4257 	LIST_INIT(&flow->dev_flows);
4258 	if (rss && rss->types) {
4259 		unsigned int graph_root;
4260 
4261 		graph_root = find_graph_root(items, rss->level);
4262 		ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
4263 					  items, rss->types,
4264 					  mlx5_support_expansion,
4265 					  graph_root);
4266 		MLX5_ASSERT(ret > 0 &&
4267 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
4268 	} else {
4269 		buf->entries = 1;
4270 		buf->entry[0].pattern = (void *)(uintptr_t)items;
4271 	}
4272 	for (i = 0; i < buf->entries; ++i) {
4273 		/*
4274 		 * The splitter may create multiple dev_flows,
4275 		 * depending on configuration. In the simplest
4276 		 * case it just creates unmodified original flow.
4277 		 */
4278 		ret = flow_create_split_outer(dev, flow, attr,
4279 					      buf->entry[i].pattern,
4280 					      p_actions_rx, external,
4281 					      error);
4282 		if (ret < 0)
4283 			goto error;
4284 	}
4285 	/* Create the tx flow. */
4286 	if (hairpin_flow) {
4287 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
4288 		attr_tx.ingress = 0;
4289 		attr_tx.egress = 1;
4290 		dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
4291 					    actions_hairpin_tx.actions, error);
4292 		if (!dev_flow)
4293 			goto error;
4294 		dev_flow->flow = flow;
4295 		dev_flow->external = 0;
4296 		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
4297 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
4298 					 items_tx.items,
4299 					 actions_hairpin_tx.actions, error);
4300 		if (ret < 0)
4301 			goto error;
4302 	}
4303 	/*
4304 	 * Update the metadata register copy table. If extensive
4305 	 * metadata feature is enabled and registers are supported
4306 	 * we might create the extra rte_flow for each unique
4307 	 * MARK/FLAG action ID.
4308 	 *
4309 	 * The table is updated for ingress Flows only, because
4310 	 * the egress Flows belong to the different device and
4311 	 * copy table should be updated in peer NIC Rx domain.
4312 	 */
4313 	if (attr->ingress &&
4314 	    (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
4315 		ret = flow_mreg_update_copy_table(dev, flow, actions, error);
4316 		if (ret)
4317 			goto error;
4318 	}
4319 	if (dev->data->dev_started) {
4320 		ret = flow_drv_apply(dev, flow, error);
4321 		if (ret < 0)
4322 			goto error;
4323 	}
4324 	if (list)
4325 		TAILQ_INSERT_TAIL(list, flow, next);
4326 	flow_rxq_flags_set(dev, flow);
4327 	return flow;
4328 error_before_flow:
4329 	if (hairpin_id)
4330 		mlx5_flow_id_release(priv->sh->flow_id_pool,
4331 				     hairpin_id);
4332 	return NULL;
4333 error:
4334 	MLX5_ASSERT(flow);
4335 	flow_mreg_del_copy_action(dev, flow);
4336 	ret = rte_errno; /* Save rte_errno before cleanup. */
4337 	if (flow->hairpin_flow_id)
4338 		mlx5_flow_id_release(priv->sh->flow_id_pool,
4339 				     flow->hairpin_flow_id);
4340 	MLX5_ASSERT(flow);
4341 	flow_drv_destroy(dev, flow);
4342 	rte_free(flow);
4343 	rte_errno = ret; /* Restore rte_errno. */
4344 	return NULL;
4345 }
4346 
4347 /**
4348  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
4349  * incoming packets to table 1.
4350  *
4351  * Other flow rules, requested for group n, will be created in
4352  * e-switch table n+1.
4353  * Jump action to e-switch group n will be created to group n+1.
4354  *
4355  * Used when working in switchdev mode, to utilise advantages of table 1
4356  * and above.
4357  *
4358  * @param dev
4359  *   Pointer to Ethernet device.
4360  *
4361  * @return
4362  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
4363  */
4364 struct rte_flow *
4365 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
4366 {
4367 	const struct rte_flow_attr attr = {
4368 		.group = 0,
4369 		.priority = 0,
4370 		.ingress = 1,
4371 		.egress = 0,
4372 		.transfer = 1,
4373 	};
4374 	const struct rte_flow_item pattern = {
4375 		.type = RTE_FLOW_ITEM_TYPE_END,
4376 	};
4377 	struct rte_flow_action_jump jump = {
4378 		.group = 1,
4379 	};
4380 	const struct rte_flow_action actions[] = {
4381 		{
4382 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
4383 			.conf = &jump,
4384 		},
4385 		{
4386 			.type = RTE_FLOW_ACTION_TYPE_END,
4387 		},
4388 	};
4389 	struct mlx5_priv *priv = dev->data->dev_private;
4390 	struct rte_flow_error error;
4391 
4392 	return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
4393 				actions, false, &error);
4394 }
4395 
4396 /**
4397  * Create a flow.
4398  *
4399  * @see rte_flow_create()
4400  * @see rte_flow_ops
4401  */
4402 struct rte_flow *
4403 mlx5_flow_create(struct rte_eth_dev *dev,
4404 		 const struct rte_flow_attr *attr,
4405 		 const struct rte_flow_item items[],
4406 		 const struct rte_flow_action actions[],
4407 		 struct rte_flow_error *error)
4408 {
4409 	struct mlx5_priv *priv = dev->data->dev_private;
4410 
4411 	return flow_list_create(dev, &priv->flows,
4412 				attr, items, actions, true, error);
4413 }
4414 
4415 /**
4416  * Destroy a flow in a list.
4417  *
4418  * @param dev
4419  *   Pointer to Ethernet device.
4420  * @param list
4421  *   Pointer to a TAILQ flow list. If this parameter NULL,
4422  *   there is no flow removal from the list.
4423  * @param[in] flow
4424  *   Flow to destroy.
4425  */
4426 static void
4427 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
4428 		  struct rte_flow *flow)
4429 {
4430 	struct mlx5_priv *priv = dev->data->dev_private;
4431 
4432 	/*
4433 	 * Update RX queue flags only if port is started, otherwise it is
4434 	 * already clean.
4435 	 */
4436 	if (dev->data->dev_started)
4437 		flow_rxq_flags_trim(dev, flow);
4438 	if (flow->hairpin_flow_id)
4439 		mlx5_flow_id_release(priv->sh->flow_id_pool,
4440 				     flow->hairpin_flow_id);
4441 	flow_drv_destroy(dev, flow);
4442 	if (list)
4443 		TAILQ_REMOVE(list, flow, next);
4444 	flow_mreg_del_copy_action(dev, flow);
4445 	rte_free(flow->fdir);
4446 	rte_free(flow);
4447 }
4448 
4449 /**
4450  * Destroy all flows.
4451  *
4452  * @param dev
4453  *   Pointer to Ethernet device.
4454  * @param list
4455  *   Pointer to a TAILQ flow list.
4456  */
4457 void
4458 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
4459 {
4460 	while (!TAILQ_EMPTY(list)) {
4461 		struct rte_flow *flow;
4462 
4463 		flow = TAILQ_FIRST(list);
4464 		flow_list_destroy(dev, list, flow);
4465 	}
4466 }
4467 
4468 /**
4469  * Remove all flows.
4470  *
4471  * @param dev
4472  *   Pointer to Ethernet device.
4473  * @param list
4474  *   Pointer to a TAILQ flow list.
4475  */
4476 void
4477 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
4478 {
4479 	struct rte_flow *flow;
4480 
4481 	TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
4482 		flow_drv_remove(dev, flow);
4483 		flow_mreg_stop_copy_action(dev, flow);
4484 	}
4485 	flow_mreg_del_default_copy_action(dev);
4486 	flow_rxq_flags_clear(dev);
4487 }
4488 
4489 /**
4490  * Add all flows.
4491  *
4492  * @param dev
4493  *   Pointer to Ethernet device.
4494  * @param list
4495  *   Pointer to a TAILQ flow list.
4496  *
4497  * @return
4498  *   0 on success, a negative errno value otherwise and rte_errno is set.
4499  */
4500 int
4501 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
4502 {
4503 	struct rte_flow *flow;
4504 	struct rte_flow_error error;
4505 	int ret = 0;
4506 
4507 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
4508 	ret = flow_mreg_add_default_copy_action(dev, &error);
4509 	if (ret < 0)
4510 		return -rte_errno;
4511 	/* Apply Flows created by application. */
4512 	TAILQ_FOREACH(flow, list, next) {
4513 		ret = flow_mreg_start_copy_action(dev, flow);
4514 		if (ret < 0)
4515 			goto error;
4516 		ret = flow_drv_apply(dev, flow, &error);
4517 		if (ret < 0)
4518 			goto error;
4519 		flow_rxq_flags_set(dev, flow);
4520 	}
4521 	return 0;
4522 error:
4523 	ret = rte_errno; /* Save rte_errno before cleanup. */
4524 	mlx5_flow_stop(dev, list);
4525 	rte_errno = ret; /* Restore rte_errno. */
4526 	return -rte_errno;
4527 }
4528 
4529 /**
4530  * Verify the flow list is empty
4531  *
4532  * @param dev
4533  *  Pointer to Ethernet device.
4534  *
4535  * @return the number of flows not released.
4536  */
4537 int
4538 mlx5_flow_verify(struct rte_eth_dev *dev)
4539 {
4540 	struct mlx5_priv *priv = dev->data->dev_private;
4541 	struct rte_flow *flow;
4542 	int ret = 0;
4543 
4544 	TAILQ_FOREACH(flow, &priv->flows, next) {
4545 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
4546 			dev->data->port_id, (void *)flow);
4547 		++ret;
4548 	}
4549 	return ret;
4550 }
4551 
4552 /**
4553  * Enable default hairpin egress flow.
4554  *
4555  * @param dev
4556  *   Pointer to Ethernet device.
4557  * @param queue
4558  *   The queue index.
4559  *
4560  * @return
4561  *   0 on success, a negative errno value otherwise and rte_errno is set.
4562  */
4563 int
4564 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
4565 			    uint32_t queue)
4566 {
4567 	struct mlx5_priv *priv = dev->data->dev_private;
4568 	const struct rte_flow_attr attr = {
4569 		.egress = 1,
4570 		.priority = 0,
4571 	};
4572 	struct mlx5_rte_flow_item_tx_queue queue_spec = {
4573 		.queue = queue,
4574 	};
4575 	struct mlx5_rte_flow_item_tx_queue queue_mask = {
4576 		.queue = UINT32_MAX,
4577 	};
4578 	struct rte_flow_item items[] = {
4579 		{
4580 			.type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
4581 			.spec = &queue_spec,
4582 			.last = NULL,
4583 			.mask = &queue_mask,
4584 		},
4585 		{
4586 			.type = RTE_FLOW_ITEM_TYPE_END,
4587 		},
4588 	};
4589 	struct rte_flow_action_jump jump = {
4590 		.group = MLX5_HAIRPIN_TX_TABLE,
4591 	};
4592 	struct rte_flow_action actions[2];
4593 	struct rte_flow *flow;
4594 	struct rte_flow_error error;
4595 
4596 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
4597 	actions[0].conf = &jump;
4598 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
4599 	flow = flow_list_create(dev, &priv->ctrl_flows,
4600 				&attr, items, actions, false, &error);
4601 	if (!flow) {
4602 		DRV_LOG(DEBUG,
4603 			"Failed to create ctrl flow: rte_errno(%d),"
4604 			" type(%d), message(%s)",
4605 			rte_errno, error.type,
4606 			error.message ? error.message : " (no stated reason)");
4607 		return -rte_errno;
4608 	}
4609 	return 0;
4610 }
4611 
4612 /**
4613  * Enable a control flow configured from the control plane.
4614  *
4615  * @param dev
4616  *   Pointer to Ethernet device.
4617  * @param eth_spec
4618  *   An Ethernet flow spec to apply.
4619  * @param eth_mask
4620  *   An Ethernet flow mask to apply.
4621  * @param vlan_spec
4622  *   A VLAN flow spec to apply.
4623  * @param vlan_mask
4624  *   A VLAN flow mask to apply.
4625  *
4626  * @return
4627  *   0 on success, a negative errno value otherwise and rte_errno is set.
4628  */
4629 int
4630 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
4631 		    struct rte_flow_item_eth *eth_spec,
4632 		    struct rte_flow_item_eth *eth_mask,
4633 		    struct rte_flow_item_vlan *vlan_spec,
4634 		    struct rte_flow_item_vlan *vlan_mask)
4635 {
4636 	struct mlx5_priv *priv = dev->data->dev_private;
4637 	const struct rte_flow_attr attr = {
4638 		.ingress = 1,
4639 		.priority = MLX5_FLOW_PRIO_RSVD,
4640 	};
4641 	struct rte_flow_item items[] = {
4642 		{
4643 			.type = RTE_FLOW_ITEM_TYPE_ETH,
4644 			.spec = eth_spec,
4645 			.last = NULL,
4646 			.mask = eth_mask,
4647 		},
4648 		{
4649 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
4650 					      RTE_FLOW_ITEM_TYPE_END,
4651 			.spec = vlan_spec,
4652 			.last = NULL,
4653 			.mask = vlan_mask,
4654 		},
4655 		{
4656 			.type = RTE_FLOW_ITEM_TYPE_END,
4657 		},
4658 	};
4659 	uint16_t queue[priv->reta_idx_n];
4660 	struct rte_flow_action_rss action_rss = {
4661 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4662 		.level = 0,
4663 		.types = priv->rss_conf.rss_hf,
4664 		.key_len = priv->rss_conf.rss_key_len,
4665 		.queue_num = priv->reta_idx_n,
4666 		.key = priv->rss_conf.rss_key,
4667 		.queue = queue,
4668 	};
4669 	struct rte_flow_action actions[] = {
4670 		{
4671 			.type = RTE_FLOW_ACTION_TYPE_RSS,
4672 			.conf = &action_rss,
4673 		},
4674 		{
4675 			.type = RTE_FLOW_ACTION_TYPE_END,
4676 		},
4677 	};
4678 	struct rte_flow *flow;
4679 	struct rte_flow_error error;
4680 	unsigned int i;
4681 
4682 	if (!priv->reta_idx_n || !priv->rxqs_n) {
4683 		return 0;
4684 	}
4685 	for (i = 0; i != priv->reta_idx_n; ++i)
4686 		queue[i] = (*priv->reta_idx)[i];
4687 	flow = flow_list_create(dev, &priv->ctrl_flows,
4688 				&attr, items, actions, false, &error);
4689 	if (!flow)
4690 		return -rte_errno;
4691 	return 0;
4692 }
4693 
4694 /**
4695  * Enable a flow control configured from the control plane.
4696  *
4697  * @param dev
4698  *   Pointer to Ethernet device.
4699  * @param eth_spec
4700  *   An Ethernet flow spec to apply.
4701  * @param eth_mask
4702  *   An Ethernet flow mask to apply.
4703  *
4704  * @return
4705  *   0 on success, a negative errno value otherwise and rte_errno is set.
4706  */
4707 int
4708 mlx5_ctrl_flow(struct rte_eth_dev *dev,
4709 	       struct rte_flow_item_eth *eth_spec,
4710 	       struct rte_flow_item_eth *eth_mask)
4711 {
4712 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
4713 }
4714 
4715 /**
4716  * Destroy a flow.
4717  *
4718  * @see rte_flow_destroy()
4719  * @see rte_flow_ops
4720  */
4721 int
4722 mlx5_flow_destroy(struct rte_eth_dev *dev,
4723 		  struct rte_flow *flow,
4724 		  struct rte_flow_error *error __rte_unused)
4725 {
4726 	struct mlx5_priv *priv = dev->data->dev_private;
4727 
4728 	flow_list_destroy(dev, &priv->flows, flow);
4729 	return 0;
4730 }
4731 
4732 /**
4733  * Destroy all flows.
4734  *
4735  * @see rte_flow_flush()
4736  * @see rte_flow_ops
4737  */
4738 int
4739 mlx5_flow_flush(struct rte_eth_dev *dev,
4740 		struct rte_flow_error *error __rte_unused)
4741 {
4742 	struct mlx5_priv *priv = dev->data->dev_private;
4743 
4744 	mlx5_flow_list_flush(dev, &priv->flows);
4745 	return 0;
4746 }
4747 
4748 /**
4749  * Isolated mode.
4750  *
4751  * @see rte_flow_isolate()
4752  * @see rte_flow_ops
4753  */
4754 int
4755 mlx5_flow_isolate(struct rte_eth_dev *dev,
4756 		  int enable,
4757 		  struct rte_flow_error *error)
4758 {
4759 	struct mlx5_priv *priv = dev->data->dev_private;
4760 
4761 	if (dev->data->dev_started) {
4762 		rte_flow_error_set(error, EBUSY,
4763 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4764 				   NULL,
4765 				   "port must be stopped first");
4766 		return -rte_errno;
4767 	}
4768 	priv->isolated = !!enable;
4769 	if (enable)
4770 		dev->dev_ops = &mlx5_dev_ops_isolate;
4771 	else
4772 		dev->dev_ops = &mlx5_dev_ops;
4773 	return 0;
4774 }
4775 
4776 /**
4777  * Query a flow.
4778  *
4779  * @see rte_flow_query()
4780  * @see rte_flow_ops
4781  */
4782 static int
4783 flow_drv_query(struct rte_eth_dev *dev,
4784 	       struct rte_flow *flow,
4785 	       const struct rte_flow_action *actions,
4786 	       void *data,
4787 	       struct rte_flow_error *error)
4788 {
4789 	const struct mlx5_flow_driver_ops *fops;
4790 	enum mlx5_flow_drv_type ftype = flow->drv_type;
4791 
4792 	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
4793 	fops = flow_get_drv_ops(ftype);
4794 
4795 	return fops->query(dev, flow, actions, data, error);
4796 }
4797 
4798 /**
4799  * Query a flow.
4800  *
4801  * @see rte_flow_query()
4802  * @see rte_flow_ops
4803  */
4804 int
4805 mlx5_flow_query(struct rte_eth_dev *dev,
4806 		struct rte_flow *flow,
4807 		const struct rte_flow_action *actions,
4808 		void *data,
4809 		struct rte_flow_error *error)
4810 {
4811 	int ret;
4812 
4813 	ret = flow_drv_query(dev, flow, actions, data, error);
4814 	if (ret < 0)
4815 		return ret;
4816 	return 0;
4817 }
4818 
4819 /**
4820  * Convert a flow director filter to a generic flow.
4821  *
4822  * @param dev
4823  *   Pointer to Ethernet device.
4824  * @param fdir_filter
4825  *   Flow director filter to add.
4826  * @param attributes
4827  *   Generic flow parameters structure.
4828  *
4829  * @return
4830  *   0 on success, a negative errno value otherwise and rte_errno is set.
4831  */
4832 static int
4833 flow_fdir_filter_convert(struct rte_eth_dev *dev,
4834 			 const struct rte_eth_fdir_filter *fdir_filter,
4835 			 struct mlx5_fdir *attributes)
4836 {
4837 	struct mlx5_priv *priv = dev->data->dev_private;
4838 	const struct rte_eth_fdir_input *input = &fdir_filter->input;
4839 	const struct rte_eth_fdir_masks *mask =
4840 		&dev->data->dev_conf.fdir_conf.mask;
4841 
4842 	/* Validate queue number. */
4843 	if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
4844 		DRV_LOG(ERR, "port %u invalid queue number %d",
4845 			dev->data->port_id, fdir_filter->action.rx_queue);
4846 		rte_errno = EINVAL;
4847 		return -rte_errno;
4848 	}
4849 	attributes->attr.ingress = 1;
4850 	attributes->items[0] = (struct rte_flow_item) {
4851 		.type = RTE_FLOW_ITEM_TYPE_ETH,
4852 		.spec = &attributes->l2,
4853 		.mask = &attributes->l2_mask,
4854 	};
4855 	switch (fdir_filter->action.behavior) {
4856 	case RTE_ETH_FDIR_ACCEPT:
4857 		attributes->actions[0] = (struct rte_flow_action){
4858 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
4859 			.conf = &attributes->queue,
4860 		};
4861 		break;
4862 	case RTE_ETH_FDIR_REJECT:
4863 		attributes->actions[0] = (struct rte_flow_action){
4864 			.type = RTE_FLOW_ACTION_TYPE_DROP,
4865 		};
4866 		break;
4867 	default:
4868 		DRV_LOG(ERR, "port %u invalid behavior %d",
4869 			dev->data->port_id,
4870 			fdir_filter->action.behavior);
4871 		rte_errno = ENOTSUP;
4872 		return -rte_errno;
4873 	}
4874 	attributes->queue.index = fdir_filter->action.rx_queue;
4875 	/* Handle L3. */
4876 	switch (fdir_filter->input.flow_type) {
4877 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
4878 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
4879 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
4880 		attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
4881 			.src_addr = input->flow.ip4_flow.src_ip,
4882 			.dst_addr = input->flow.ip4_flow.dst_ip,
4883 			.time_to_live = input->flow.ip4_flow.ttl,
4884 			.type_of_service = input->flow.ip4_flow.tos,
4885 		};
4886 		attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
4887 			.src_addr = mask->ipv4_mask.src_ip,
4888 			.dst_addr = mask->ipv4_mask.dst_ip,
4889 			.time_to_live = mask->ipv4_mask.ttl,
4890 			.type_of_service = mask->ipv4_mask.tos,
4891 			.next_proto_id = mask->ipv4_mask.proto,
4892 		};
4893 		attributes->items[1] = (struct rte_flow_item){
4894 			.type = RTE_FLOW_ITEM_TYPE_IPV4,
4895 			.spec = &attributes->l3,
4896 			.mask = &attributes->l3_mask,
4897 		};
4898 		break;
4899 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
4900 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
4901 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
4902 		attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
4903 			.hop_limits = input->flow.ipv6_flow.hop_limits,
4904 			.proto = input->flow.ipv6_flow.proto,
4905 		};
4906 
4907 		memcpy(attributes->l3.ipv6.hdr.src_addr,
4908 		       input->flow.ipv6_flow.src_ip,
4909 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
4910 		memcpy(attributes->l3.ipv6.hdr.dst_addr,
4911 		       input->flow.ipv6_flow.dst_ip,
4912 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
4913 		memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
4914 		       mask->ipv6_mask.src_ip,
4915 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
4916 		memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
4917 		       mask->ipv6_mask.dst_ip,
4918 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
4919 		attributes->items[1] = (struct rte_flow_item){
4920 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
4921 			.spec = &attributes->l3,
4922 			.mask = &attributes->l3_mask,
4923 		};
4924 		break;
4925 	default:
4926 		DRV_LOG(ERR, "port %u invalid flow type%d",
4927 			dev->data->port_id, fdir_filter->input.flow_type);
4928 		rte_errno = ENOTSUP;
4929 		return -rte_errno;
4930 	}
4931 	/* Handle L4. */
4932 	switch (fdir_filter->input.flow_type) {
4933 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
4934 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
4935 			.src_port = input->flow.udp4_flow.src_port,
4936 			.dst_port = input->flow.udp4_flow.dst_port,
4937 		};
4938 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
4939 			.src_port = mask->src_port_mask,
4940 			.dst_port = mask->dst_port_mask,
4941 		};
4942 		attributes->items[2] = (struct rte_flow_item){
4943 			.type = RTE_FLOW_ITEM_TYPE_UDP,
4944 			.spec = &attributes->l4,
4945 			.mask = &attributes->l4_mask,
4946 		};
4947 		break;
4948 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
4949 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
4950 			.src_port = input->flow.tcp4_flow.src_port,
4951 			.dst_port = input->flow.tcp4_flow.dst_port,
4952 		};
4953 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
4954 			.src_port = mask->src_port_mask,
4955 			.dst_port = mask->dst_port_mask,
4956 		};
4957 		attributes->items[2] = (struct rte_flow_item){
4958 			.type = RTE_FLOW_ITEM_TYPE_TCP,
4959 			.spec = &attributes->l4,
4960 			.mask = &attributes->l4_mask,
4961 		};
4962 		break;
4963 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
4964 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
4965 			.src_port = input->flow.udp6_flow.src_port,
4966 			.dst_port = input->flow.udp6_flow.dst_port,
4967 		};
4968 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
4969 			.src_port = mask->src_port_mask,
4970 			.dst_port = mask->dst_port_mask,
4971 		};
4972 		attributes->items[2] = (struct rte_flow_item){
4973 			.type = RTE_FLOW_ITEM_TYPE_UDP,
4974 			.spec = &attributes->l4,
4975 			.mask = &attributes->l4_mask,
4976 		};
4977 		break;
4978 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
4979 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
4980 			.src_port = input->flow.tcp6_flow.src_port,
4981 			.dst_port = input->flow.tcp6_flow.dst_port,
4982 		};
4983 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
4984 			.src_port = mask->src_port_mask,
4985 			.dst_port = mask->dst_port_mask,
4986 		};
4987 		attributes->items[2] = (struct rte_flow_item){
4988 			.type = RTE_FLOW_ITEM_TYPE_TCP,
4989 			.spec = &attributes->l4,
4990 			.mask = &attributes->l4_mask,
4991 		};
4992 		break;
4993 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
4994 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
4995 		break;
4996 	default:
4997 		DRV_LOG(ERR, "port %u invalid flow type%d",
4998 			dev->data->port_id, fdir_filter->input.flow_type);
4999 		rte_errno = ENOTSUP;
5000 		return -rte_errno;
5001 	}
5002 	return 0;
5003 }
5004 
5005 #define FLOW_FDIR_CMP(f1, f2, fld) \
5006 	memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
5007 
5008 /**
5009  * Compare two FDIR flows. If items and actions are identical, the two flows are
5010  * regarded as same.
5011  *
5012  * @param dev
5013  *   Pointer to Ethernet device.
5014  * @param f1
5015  *   FDIR flow to compare.
5016  * @param f2
5017  *   FDIR flow to compare.
5018  *
5019  * @return
5020  *   Zero on match, 1 otherwise.
5021  */
5022 static int
5023 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
5024 {
5025 	if (FLOW_FDIR_CMP(f1, f2, attr) ||
5026 	    FLOW_FDIR_CMP(f1, f2, l2) ||
5027 	    FLOW_FDIR_CMP(f1, f2, l2_mask) ||
5028 	    FLOW_FDIR_CMP(f1, f2, l3) ||
5029 	    FLOW_FDIR_CMP(f1, f2, l3_mask) ||
5030 	    FLOW_FDIR_CMP(f1, f2, l4) ||
5031 	    FLOW_FDIR_CMP(f1, f2, l4_mask) ||
5032 	    FLOW_FDIR_CMP(f1, f2, actions[0].type))
5033 		return 1;
5034 	if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
5035 	    FLOW_FDIR_CMP(f1, f2, queue))
5036 		return 1;
5037 	return 0;
5038 }
5039 
5040 /**
5041  * Search device flow list to find out a matched FDIR flow.
5042  *
5043  * @param dev
5044  *   Pointer to Ethernet device.
5045  * @param fdir_flow
5046  *   FDIR flow to lookup.
5047  *
5048  * @return
5049  *   Pointer of flow if found, NULL otherwise.
5050  */
5051 static struct rte_flow *
5052 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
5053 {
5054 	struct mlx5_priv *priv = dev->data->dev_private;
5055 	struct rte_flow *flow = NULL;
5056 
5057 	MLX5_ASSERT(fdir_flow);
5058 	TAILQ_FOREACH(flow, &priv->flows, next) {
5059 		if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
5060 			DRV_LOG(DEBUG, "port %u found FDIR flow %p",
5061 				dev->data->port_id, (void *)flow);
5062 			break;
5063 		}
5064 	}
5065 	return flow;
5066 }
5067 
5068 /**
5069  * Add new flow director filter and store it in list.
5070  *
5071  * @param dev
5072  *   Pointer to Ethernet device.
5073  * @param fdir_filter
5074  *   Flow director filter to add.
5075  *
5076  * @return
5077  *   0 on success, a negative errno value otherwise and rte_errno is set.
5078  */
5079 static int
5080 flow_fdir_filter_add(struct rte_eth_dev *dev,
5081 		     const struct rte_eth_fdir_filter *fdir_filter)
5082 {
5083 	struct mlx5_priv *priv = dev->data->dev_private;
5084 	struct mlx5_fdir *fdir_flow;
5085 	struct rte_flow *flow;
5086 	int ret;
5087 
5088 	fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
5089 	if (!fdir_flow) {
5090 		rte_errno = ENOMEM;
5091 		return -rte_errno;
5092 	}
5093 	ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
5094 	if (ret)
5095 		goto error;
5096 	flow = flow_fdir_filter_lookup(dev, fdir_flow);
5097 	if (flow) {
5098 		rte_errno = EEXIST;
5099 		goto error;
5100 	}
5101 	flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
5102 				fdir_flow->items, fdir_flow->actions, true,
5103 				NULL);
5104 	if (!flow)
5105 		goto error;
5106 	MLX5_ASSERT(!flow->fdir);
5107 	flow->fdir = fdir_flow;
5108 	DRV_LOG(DEBUG, "port %u created FDIR flow %p",
5109 		dev->data->port_id, (void *)flow);
5110 	return 0;
5111 error:
5112 	rte_free(fdir_flow);
5113 	return -rte_errno;
5114 }
5115 
5116 /**
5117  * Delete specific filter.
5118  *
5119  * @param dev
5120  *   Pointer to Ethernet device.
5121  * @param fdir_filter
5122  *   Filter to be deleted.
5123  *
5124  * @return
5125  *   0 on success, a negative errno value otherwise and rte_errno is set.
5126  */
5127 static int
5128 flow_fdir_filter_delete(struct rte_eth_dev *dev,
5129 			const struct rte_eth_fdir_filter *fdir_filter)
5130 {
5131 	struct mlx5_priv *priv = dev->data->dev_private;
5132 	struct rte_flow *flow;
5133 	struct mlx5_fdir fdir_flow = {
5134 		.attr.group = 0,
5135 	};
5136 	int ret;
5137 
5138 	ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
5139 	if (ret)
5140 		return -rte_errno;
5141 	flow = flow_fdir_filter_lookup(dev, &fdir_flow);
5142 	if (!flow) {
5143 		rte_errno = ENOENT;
5144 		return -rte_errno;
5145 	}
5146 	flow_list_destroy(dev, &priv->flows, flow);
5147 	DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
5148 		dev->data->port_id, (void *)flow);
5149 	return 0;
5150 }
5151 
5152 /**
5153  * Update queue for specific filter.
5154  *
5155  * @param dev
5156  *   Pointer to Ethernet device.
5157  * @param fdir_filter
5158  *   Filter to be updated.
5159  *
5160  * @return
5161  *   0 on success, a negative errno value otherwise and rte_errno is set.
5162  */
5163 static int
5164 flow_fdir_filter_update(struct rte_eth_dev *dev,
5165 			const struct rte_eth_fdir_filter *fdir_filter)
5166 {
5167 	int ret;
5168 
5169 	ret = flow_fdir_filter_delete(dev, fdir_filter);
5170 	if (ret)
5171 		return ret;
5172 	return flow_fdir_filter_add(dev, fdir_filter);
5173 }
5174 
5175 /**
5176  * Flush all filters.
5177  *
5178  * @param dev
5179  *   Pointer to Ethernet device.
5180  */
5181 static void
5182 flow_fdir_filter_flush(struct rte_eth_dev *dev)
5183 {
5184 	struct mlx5_priv *priv = dev->data->dev_private;
5185 
5186 	mlx5_flow_list_flush(dev, &priv->flows);
5187 }
5188 
5189 /**
5190  * Get flow director information.
5191  *
5192  * @param dev
5193  *   Pointer to Ethernet device.
5194  * @param[out] fdir_info
5195  *   Resulting flow director information.
5196  */
5197 static void
5198 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
5199 {
5200 	struct rte_eth_fdir_masks *mask =
5201 		&dev->data->dev_conf.fdir_conf.mask;
5202 
5203 	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
5204 	fdir_info->guarant_spc = 0;
5205 	rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
5206 	fdir_info->max_flexpayload = 0;
5207 	fdir_info->flow_types_mask[0] = 0;
5208 	fdir_info->flex_payload_unit = 0;
5209 	fdir_info->max_flex_payload_segment_num = 0;
5210 	fdir_info->flex_payload_limit = 0;
5211 	memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
5212 }
5213 
5214 /**
5215  * Deal with flow director operations.
5216  *
5217  * @param dev
5218  *   Pointer to Ethernet device.
5219  * @param filter_op
5220  *   Operation to perform.
5221  * @param arg
5222  *   Pointer to operation-specific structure.
5223  *
5224  * @return
5225  *   0 on success, a negative errno value otherwise and rte_errno is set.
5226  */
5227 static int
5228 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5229 		    void *arg)
5230 {
5231 	enum rte_fdir_mode fdir_mode =
5232 		dev->data->dev_conf.fdir_conf.mode;
5233 
5234 	if (filter_op == RTE_ETH_FILTER_NOP)
5235 		return 0;
5236 	if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
5237 	    fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
5238 		DRV_LOG(ERR, "port %u flow director mode %d not supported",
5239 			dev->data->port_id, fdir_mode);
5240 		rte_errno = EINVAL;
5241 		return -rte_errno;
5242 	}
5243 	switch (filter_op) {
5244 	case RTE_ETH_FILTER_ADD:
5245 		return flow_fdir_filter_add(dev, arg);
5246 	case RTE_ETH_FILTER_UPDATE:
5247 		return flow_fdir_filter_update(dev, arg);
5248 	case RTE_ETH_FILTER_DELETE:
5249 		return flow_fdir_filter_delete(dev, arg);
5250 	case RTE_ETH_FILTER_FLUSH:
5251 		flow_fdir_filter_flush(dev);
5252 		break;
5253 	case RTE_ETH_FILTER_INFO:
5254 		flow_fdir_info_get(dev, arg);
5255 		break;
5256 	default:
5257 		DRV_LOG(DEBUG, "port %u unknown operation %u",
5258 			dev->data->port_id, filter_op);
5259 		rte_errno = EINVAL;
5260 		return -rte_errno;
5261 	}
5262 	return 0;
5263 }
5264 
5265 /**
5266  * Manage filter operations.
5267  *
5268  * @param dev
5269  *   Pointer to Ethernet device structure.
5270  * @param filter_type
5271  *   Filter type.
5272  * @param filter_op
5273  *   Operation to perform.
5274  * @param arg
5275  *   Pointer to operation-specific structure.
5276  *
5277  * @return
5278  *   0 on success, a negative errno value otherwise and rte_errno is set.
5279  */
5280 int
5281 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
5282 		     enum rte_filter_type filter_type,
5283 		     enum rte_filter_op filter_op,
5284 		     void *arg)
5285 {
5286 	switch (filter_type) {
5287 	case RTE_ETH_FILTER_GENERIC:
5288 		if (filter_op != RTE_ETH_FILTER_GET) {
5289 			rte_errno = EINVAL;
5290 			return -rte_errno;
5291 		}
5292 		*(const void **)arg = &mlx5_flow_ops;
5293 		return 0;
5294 	case RTE_ETH_FILTER_FDIR:
5295 		return flow_fdir_ctrl_func(dev, filter_op, arg);
5296 	default:
5297 		DRV_LOG(ERR, "port %u filter type (%d) not supported",
5298 			dev->data->port_id, filter_type);
5299 		rte_errno = ENOTSUP;
5300 		return -rte_errno;
5301 	}
5302 	return 0;
5303 }
5304 
5305 /**
5306  * Create the needed meter and suffix tables.
5307  *
5308  * @param[in] dev
5309  *   Pointer to Ethernet device.
5310  * @param[in] fm
5311  *   Pointer to the flow meter.
5312  *
5313  * @return
5314  *   Pointer to table set on success, NULL otherwise.
5315  */
5316 struct mlx5_meter_domains_infos *
5317 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
5318 			  const struct mlx5_flow_meter *fm)
5319 {
5320 	const struct mlx5_flow_driver_ops *fops;
5321 
5322 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5323 	return fops->create_mtr_tbls(dev, fm);
5324 }
5325 
5326 /**
5327  * Destroy the meter table set.
5328  *
5329  * @param[in] dev
5330  *   Pointer to Ethernet device.
5331  * @param[in] tbl
5332  *   Pointer to the meter table set.
5333  *
5334  * @return
5335  *   0 on success.
5336  */
5337 int
5338 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
5339 			   struct mlx5_meter_domains_infos *tbls)
5340 {
5341 	const struct mlx5_flow_driver_ops *fops;
5342 
5343 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5344 	return fops->destroy_mtr_tbls(dev, tbls);
5345 }
5346 
5347 /**
5348  * Create policer rules.
5349  *
5350  * @param[in] dev
5351  *   Pointer to Ethernet device.
5352  * @param[in] fm
5353  *   Pointer to flow meter structure.
5354  * @param[in] attr
5355  *   Pointer to flow attributes.
5356  *
5357  * @return
5358  *   0 on success, -1 otherwise.
5359  */
5360 int
5361 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
5362 			       struct mlx5_flow_meter *fm,
5363 			       const struct rte_flow_attr *attr)
5364 {
5365 	const struct mlx5_flow_driver_ops *fops;
5366 
5367 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5368 	return fops->create_policer_rules(dev, fm, attr);
5369 }
5370 
5371 /**
5372  * Destroy policer rules.
5373  *
5374  * @param[in] fm
5375  *   Pointer to flow meter structure.
5376  * @param[in] attr
5377  *   Pointer to flow attributes.
5378  *
5379  * @return
5380  *   0 on success, -1 otherwise.
5381  */
5382 int
5383 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
5384 				struct mlx5_flow_meter *fm,
5385 				const struct rte_flow_attr *attr)
5386 {
5387 	const struct mlx5_flow_driver_ops *fops;
5388 
5389 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5390 	return fops->destroy_policer_rules(dev, fm, attr);
5391 }
5392 
5393 /**
5394  * Allocate a counter.
5395  *
5396  * @param[in] dev
5397  *   Pointer to Ethernet device structure.
5398  *
5399  * @return
5400  *   Pointer to allocated counter  on success, NULL otherwise.
5401  */
5402 struct mlx5_flow_counter *
5403 mlx5_counter_alloc(struct rte_eth_dev *dev)
5404 {
5405 	const struct mlx5_flow_driver_ops *fops;
5406 	struct rte_flow_attr attr = { .transfer = 0 };
5407 
5408 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5409 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5410 		return fops->counter_alloc(dev);
5411 	}
5412 	DRV_LOG(ERR,
5413 		"port %u counter allocate is not supported.",
5414 		 dev->data->port_id);
5415 	return NULL;
5416 }
5417 
5418 /**
5419  * Free a counter.
5420  *
5421  * @param[in] dev
5422  *   Pointer to Ethernet device structure.
5423  * @param[in] cnt
5424  *   Pointer to counter to be free.
5425  */
5426 void
5427 mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
5428 {
5429 	const struct mlx5_flow_driver_ops *fops;
5430 	struct rte_flow_attr attr = { .transfer = 0 };
5431 
5432 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5433 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5434 		fops->counter_free(dev, cnt);
5435 		return;
5436 	}
5437 	DRV_LOG(ERR,
5438 		"port %u counter free is not supported.",
5439 		 dev->data->port_id);
5440 }
5441 
5442 /**
5443  * Query counter statistics.
5444  *
5445  * @param[in] dev
5446  *   Pointer to Ethernet device structure.
5447  * @param[in] cnt
5448  *   Pointer to counter to query.
5449  * @param[in] clear
5450  *   Set to clear counter statistics.
5451  * @param[out] pkts
5452  *   The counter hits packets number to save.
5453  * @param[out] bytes
5454  *   The counter hits bytes number to save.
5455  *
5456  * @return
5457  *   0 on success, a negative errno value otherwise.
5458  */
5459 int
5460 mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt,
5461 		   bool clear, uint64_t *pkts, uint64_t *bytes)
5462 {
5463 	const struct mlx5_flow_driver_ops *fops;
5464 	struct rte_flow_attr attr = { .transfer = 0 };
5465 
5466 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5467 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5468 		return fops->counter_query(dev, cnt, clear, pkts, bytes);
5469 	}
5470 	DRV_LOG(ERR,
5471 		"port %u counter query is not supported.",
5472 		 dev->data->port_id);
5473 	return -ENOTSUP;
5474 }
5475 
5476 #define MLX5_POOL_QUERY_FREQ_US 1000000
5477 
5478 /**
5479  * Set the periodic procedure for triggering asynchronous batch queries for all
5480  * the counter pools.
5481  *
5482  * @param[in] sh
5483  *   Pointer to mlx5_ibv_shared object.
5484  */
5485 void
5486 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh)
5487 {
5488 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0);
5489 	uint32_t pools_n = rte_atomic16_read(&cont->n_valid);
5490 	uint32_t us;
5491 
5492 	cont = MLX5_CNT_CONTAINER(sh, 1, 0);
5493 	pools_n += rte_atomic16_read(&cont->n_valid);
5494 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
5495 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
5496 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
5497 		sh->cmng.query_thread_on = 0;
5498 		DRV_LOG(ERR, "Cannot reinitialize query alarm");
5499 	} else {
5500 		sh->cmng.query_thread_on = 1;
5501 	}
5502 }
5503 
5504 /**
5505  * The periodic procedure for triggering asynchronous batch queries for all the
5506  * counter pools. This function is probably called by the host thread.
5507  *
5508  * @param[in] arg
5509  *   The parameter for the alarm process.
5510  */
5511 void
5512 mlx5_flow_query_alarm(void *arg)
5513 {
5514 	struct mlx5_ibv_shared *sh = arg;
5515 	struct mlx5_devx_obj *dcs;
5516 	uint16_t offset;
5517 	int ret;
5518 	uint8_t batch = sh->cmng.batch;
5519 	uint16_t pool_index = sh->cmng.pool_index;
5520 	struct mlx5_pools_container *cont;
5521 	struct mlx5_pools_container *mcont;
5522 	struct mlx5_flow_counter_pool *pool;
5523 
5524 	if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
5525 		goto set_alarm;
5526 next_container:
5527 	cont = MLX5_CNT_CONTAINER(sh, batch, 1);
5528 	mcont = MLX5_CNT_CONTAINER(sh, batch, 0);
5529 	/* Check if resize was done and need to flip a container. */
5530 	if (cont != mcont) {
5531 		if (cont->pools) {
5532 			/* Clean the old container. */
5533 			rte_free(cont->pools);
5534 			memset(cont, 0, sizeof(*cont));
5535 		}
5536 		rte_cio_wmb();
5537 		 /* Flip the host container. */
5538 		sh->cmng.mhi[batch] ^= (uint8_t)2;
5539 		cont = mcont;
5540 	}
5541 	if (!cont->pools) {
5542 		/* 2 empty containers case is unexpected. */
5543 		if (unlikely(batch != sh->cmng.batch))
5544 			goto set_alarm;
5545 		batch ^= 0x1;
5546 		pool_index = 0;
5547 		goto next_container;
5548 	}
5549 	pool = cont->pools[pool_index];
5550 	if (pool->raw_hw)
5551 		/* There is a pool query in progress. */
5552 		goto set_alarm;
5553 	pool->raw_hw =
5554 		LIST_FIRST(&sh->cmng.free_stat_raws);
5555 	if (!pool->raw_hw)
5556 		/* No free counter statistics raw memory. */
5557 		goto set_alarm;
5558 	dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
5559 							      (&pool->a64_dcs);
5560 	offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
5561 	ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
5562 					       offset, NULL, NULL,
5563 					       pool->raw_hw->mem_mng->dm->id,
5564 					       (void *)(uintptr_t)
5565 					       (pool->raw_hw->data + offset),
5566 					       sh->devx_comp,
5567 					       (uint64_t)(uintptr_t)pool);
5568 	if (ret) {
5569 		DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
5570 			" %d", pool->min_dcs->id);
5571 		pool->raw_hw = NULL;
5572 		goto set_alarm;
5573 	}
5574 	pool->raw_hw->min_dcs_id = dcs->id;
5575 	LIST_REMOVE(pool->raw_hw, next);
5576 	sh->cmng.pending_queries++;
5577 	pool_index++;
5578 	if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
5579 		batch ^= 0x1;
5580 		pool_index = 0;
5581 	}
5582 set_alarm:
5583 	sh->cmng.batch = batch;
5584 	sh->cmng.pool_index = pool_index;
5585 	mlx5_set_query_alarm(sh);
5586 }
5587 
5588 /**
5589  * Handler for the HW respond about ready values from an asynchronous batch
5590  * query. This function is probably called by the host thread.
5591  *
5592  * @param[in] sh
5593  *   The pointer to the shared IB device context.
5594  * @param[in] async_id
5595  *   The Devx async ID.
5596  * @param[in] status
5597  *   The status of the completion.
5598  */
5599 void
5600 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
5601 				  uint64_t async_id, int status)
5602 {
5603 	struct mlx5_flow_counter_pool *pool =
5604 		(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
5605 	struct mlx5_counter_stats_raw *raw_to_free;
5606 
5607 	if (unlikely(status)) {
5608 		raw_to_free = pool->raw_hw;
5609 	} else {
5610 		raw_to_free = pool->raw;
5611 		rte_spinlock_lock(&pool->sl);
5612 		pool->raw = pool->raw_hw;
5613 		rte_spinlock_unlock(&pool->sl);
5614 		rte_atomic64_add(&pool->query_gen, 1);
5615 		/* Be sure the new raw counters data is updated in memory. */
5616 		rte_cio_wmb();
5617 	}
5618 	LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
5619 	pool->raw_hw = NULL;
5620 	sh->cmng.pending_queries--;
5621 }
5622 
5623 /**
5624  * Translate the rte_flow group index to HW table value.
5625  *
5626  * @param[in] attributes
5627  *   Pointer to flow attributes
5628  * @param[in] external
5629  *   Value is part of flow rule created by request external to PMD.
5630  * @param[in] group
5631  *   rte_flow group index value.
5632  * @param[out] fdb_def_rule
5633  *   Whether fdb jump to table 1 is configured.
5634  * @param[out] table
5635  *   HW table value.
5636  * @param[out] error
5637  *   Pointer to error structure.
5638  *
5639  * @return
5640  *   0 on success, a negative errno value otherwise and rte_errno is set.
5641  */
5642 int
5643 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
5644 			 uint32_t group, bool fdb_def_rule, uint32_t *table,
5645 			 struct rte_flow_error *error)
5646 {
5647 	if (attributes->transfer && external && fdb_def_rule) {
5648 		if (group == UINT32_MAX)
5649 			return rte_flow_error_set
5650 						(error, EINVAL,
5651 						 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5652 						 NULL,
5653 						 "group index not supported");
5654 		*table = group + 1;
5655 	} else {
5656 		*table = group;
5657 	}
5658 	return 0;
5659 }
5660 
5661 /**
5662  * Discover availability of metadata reg_c's.
5663  *
5664  * Iteratively use test flows to check availability.
5665  *
5666  * @param[in] dev
5667  *   Pointer to the Ethernet device structure.
5668  *
5669  * @return
5670  *   0 on success, a negative errno value otherwise and rte_errno is set.
5671  */
5672 int
5673 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
5674 {
5675 	struct mlx5_priv *priv = dev->data->dev_private;
5676 	struct mlx5_dev_config *config = &priv->config;
5677 	enum modify_reg idx;
5678 	int n = 0;
5679 
5680 	/* reg_c[0] and reg_c[1] are reserved. */
5681 	config->flow_mreg_c[n++] = REG_C_0;
5682 	config->flow_mreg_c[n++] = REG_C_1;
5683 	/* Discover availability of other reg_c's. */
5684 	for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
5685 		struct rte_flow_attr attr = {
5686 			.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
5687 			.priority = MLX5_FLOW_PRIO_RSVD,
5688 			.ingress = 1,
5689 		};
5690 		struct rte_flow_item items[] = {
5691 			[0] = {
5692 				.type = RTE_FLOW_ITEM_TYPE_END,
5693 			},
5694 		};
5695 		struct rte_flow_action actions[] = {
5696 			[0] = {
5697 				.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5698 				.conf = &(struct mlx5_flow_action_copy_mreg){
5699 					.src = REG_C_1,
5700 					.dst = idx,
5701 				},
5702 			},
5703 			[1] = {
5704 				.type = RTE_FLOW_ACTION_TYPE_JUMP,
5705 				.conf = &(struct rte_flow_action_jump){
5706 					.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5707 				},
5708 			},
5709 			[2] = {
5710 				.type = RTE_FLOW_ACTION_TYPE_END,
5711 			},
5712 		};
5713 		struct rte_flow *flow;
5714 		struct rte_flow_error error;
5715 
5716 		if (!config->dv_flow_en)
5717 			break;
5718 		/* Create internal flow, validation skips copy action. */
5719 		flow = flow_list_create(dev, NULL, &attr, items,
5720 					actions, false, &error);
5721 		if (!flow)
5722 			continue;
5723 		if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
5724 			config->flow_mreg_c[n++] = idx;
5725 		flow_list_destroy(dev, NULL, flow);
5726 	}
5727 	for (; n < MLX5_MREG_C_NUM; ++n)
5728 		config->flow_mreg_c[n] = REG_NONE;
5729 	return 0;
5730 }
5731 
5732 /**
5733  * Dump flow raw hw data to file
5734  *
5735  * @param[in] dev
5736  *    The pointer to Ethernet device.
5737  * @param[in] file
5738  *   A pointer to a file for output.
5739  * @param[out] error
5740  *   Perform verbose error reporting if not NULL. PMDs initialize this
5741  *   structure in case of error only.
5742  * @return
5743  *   0 on success, a nagative value otherwise.
5744  */
5745 int
5746 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
5747 		   FILE *file,
5748 		   struct rte_flow_error *error __rte_unused)
5749 {
5750 	struct mlx5_priv *priv = dev->data->dev_private;
5751 	struct mlx5_ibv_shared *sh = priv->sh;
5752 
5753 	return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
5754 				       sh->tx_domain, file);
5755 }
5756