xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision 50f576d657d78e87a3eac597353d236969969c17)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 
12 /* Verbs header. */
13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #ifdef PEDANTIC
15 #pragma GCC diagnostic ignored "-Wpedantic"
16 #endif
17 #include <infiniband/verbs.h>
18 #ifdef PEDANTIC
19 #pragma GCC diagnostic error "-Wpedantic"
20 #endif
21 
22 #include <rte_common.h>
23 #include <rte_ether.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29 
30 #include <mlx5_glue.h>
31 #include <mlx5_devx_cmds.h>
32 #include <mlx5_prm.h>
33 
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_rxtx.h"
38 
39 /* Dev ops structure defined in mlx5.c */
40 extern const struct eth_dev_ops mlx5_dev_ops;
41 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
42 
43 /** Device flow drivers. */
44 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
45 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
46 #endif
47 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
48 
49 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
50 
51 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
52 	[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
53 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
54 	[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
55 #endif
56 	[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
57 	[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
58 };
59 
60 enum mlx5_expansion {
61 	MLX5_EXPANSION_ROOT,
62 	MLX5_EXPANSION_ROOT_OUTER,
63 	MLX5_EXPANSION_ROOT_ETH_VLAN,
64 	MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
65 	MLX5_EXPANSION_OUTER_ETH,
66 	MLX5_EXPANSION_OUTER_ETH_VLAN,
67 	MLX5_EXPANSION_OUTER_VLAN,
68 	MLX5_EXPANSION_OUTER_IPV4,
69 	MLX5_EXPANSION_OUTER_IPV4_UDP,
70 	MLX5_EXPANSION_OUTER_IPV4_TCP,
71 	MLX5_EXPANSION_OUTER_IPV6,
72 	MLX5_EXPANSION_OUTER_IPV6_UDP,
73 	MLX5_EXPANSION_OUTER_IPV6_TCP,
74 	MLX5_EXPANSION_VXLAN,
75 	MLX5_EXPANSION_VXLAN_GPE,
76 	MLX5_EXPANSION_GRE,
77 	MLX5_EXPANSION_MPLS,
78 	MLX5_EXPANSION_ETH,
79 	MLX5_EXPANSION_ETH_VLAN,
80 	MLX5_EXPANSION_VLAN,
81 	MLX5_EXPANSION_IPV4,
82 	MLX5_EXPANSION_IPV4_UDP,
83 	MLX5_EXPANSION_IPV4_TCP,
84 	MLX5_EXPANSION_IPV6,
85 	MLX5_EXPANSION_IPV6_UDP,
86 	MLX5_EXPANSION_IPV6_TCP,
87 };
88 
89 /** Supported expansion of items. */
90 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
91 	[MLX5_EXPANSION_ROOT] = {
92 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
93 						 MLX5_EXPANSION_IPV4,
94 						 MLX5_EXPANSION_IPV6),
95 		.type = RTE_FLOW_ITEM_TYPE_END,
96 	},
97 	[MLX5_EXPANSION_ROOT_OUTER] = {
98 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
99 						 MLX5_EXPANSION_OUTER_IPV4,
100 						 MLX5_EXPANSION_OUTER_IPV6),
101 		.type = RTE_FLOW_ITEM_TYPE_END,
102 	},
103 	[MLX5_EXPANSION_ROOT_ETH_VLAN] = {
104 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
105 		.type = RTE_FLOW_ITEM_TYPE_END,
106 	},
107 	[MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
108 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
109 		.type = RTE_FLOW_ITEM_TYPE_END,
110 	},
111 	[MLX5_EXPANSION_OUTER_ETH] = {
112 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
113 						 MLX5_EXPANSION_OUTER_IPV6,
114 						 MLX5_EXPANSION_MPLS),
115 		.type = RTE_FLOW_ITEM_TYPE_ETH,
116 		.rss_types = 0,
117 	},
118 	[MLX5_EXPANSION_OUTER_ETH_VLAN] = {
119 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
120 		.type = RTE_FLOW_ITEM_TYPE_ETH,
121 		.rss_types = 0,
122 	},
123 	[MLX5_EXPANSION_OUTER_VLAN] = {
124 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
125 						 MLX5_EXPANSION_OUTER_IPV6),
126 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
127 	},
128 	[MLX5_EXPANSION_OUTER_IPV4] = {
129 		.next = RTE_FLOW_EXPAND_RSS_NEXT
130 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
131 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
132 			 MLX5_EXPANSION_GRE,
133 			 MLX5_EXPANSION_IPV4,
134 			 MLX5_EXPANSION_IPV6),
135 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
136 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
137 			ETH_RSS_NONFRAG_IPV4_OTHER,
138 	},
139 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
140 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
141 						 MLX5_EXPANSION_VXLAN_GPE),
142 		.type = RTE_FLOW_ITEM_TYPE_UDP,
143 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
144 	},
145 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
146 		.type = RTE_FLOW_ITEM_TYPE_TCP,
147 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
148 	},
149 	[MLX5_EXPANSION_OUTER_IPV6] = {
150 		.next = RTE_FLOW_EXPAND_RSS_NEXT
151 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
152 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
153 			 MLX5_EXPANSION_IPV4,
154 			 MLX5_EXPANSION_IPV6),
155 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
156 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
157 			ETH_RSS_NONFRAG_IPV6_OTHER,
158 	},
159 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
160 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
161 						 MLX5_EXPANSION_VXLAN_GPE),
162 		.type = RTE_FLOW_ITEM_TYPE_UDP,
163 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
164 	},
165 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
166 		.type = RTE_FLOW_ITEM_TYPE_TCP,
167 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
168 	},
169 	[MLX5_EXPANSION_VXLAN] = {
170 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
171 						 MLX5_EXPANSION_IPV4,
172 						 MLX5_EXPANSION_IPV6),
173 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
174 	},
175 	[MLX5_EXPANSION_VXLAN_GPE] = {
176 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
177 						 MLX5_EXPANSION_IPV4,
178 						 MLX5_EXPANSION_IPV6),
179 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
180 	},
181 	[MLX5_EXPANSION_GRE] = {
182 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
183 		.type = RTE_FLOW_ITEM_TYPE_GRE,
184 	},
185 	[MLX5_EXPANSION_MPLS] = {
186 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
187 						 MLX5_EXPANSION_IPV6),
188 		.type = RTE_FLOW_ITEM_TYPE_MPLS,
189 	},
190 	[MLX5_EXPANSION_ETH] = {
191 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
192 						 MLX5_EXPANSION_IPV6),
193 		.type = RTE_FLOW_ITEM_TYPE_ETH,
194 	},
195 	[MLX5_EXPANSION_ETH_VLAN] = {
196 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
197 		.type = RTE_FLOW_ITEM_TYPE_ETH,
198 	},
199 	[MLX5_EXPANSION_VLAN] = {
200 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
201 						 MLX5_EXPANSION_IPV6),
202 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
203 	},
204 	[MLX5_EXPANSION_IPV4] = {
205 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
206 						 MLX5_EXPANSION_IPV4_TCP),
207 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
208 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
209 			ETH_RSS_NONFRAG_IPV4_OTHER,
210 	},
211 	[MLX5_EXPANSION_IPV4_UDP] = {
212 		.type = RTE_FLOW_ITEM_TYPE_UDP,
213 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
214 	},
215 	[MLX5_EXPANSION_IPV4_TCP] = {
216 		.type = RTE_FLOW_ITEM_TYPE_TCP,
217 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
218 	},
219 	[MLX5_EXPANSION_IPV6] = {
220 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
221 						 MLX5_EXPANSION_IPV6_TCP),
222 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
223 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
224 			ETH_RSS_NONFRAG_IPV6_OTHER,
225 	},
226 	[MLX5_EXPANSION_IPV6_UDP] = {
227 		.type = RTE_FLOW_ITEM_TYPE_UDP,
228 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
229 	},
230 	[MLX5_EXPANSION_IPV6_TCP] = {
231 		.type = RTE_FLOW_ITEM_TYPE_TCP,
232 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
233 	},
234 };
235 
236 static const struct rte_flow_ops mlx5_flow_ops = {
237 	.validate = mlx5_flow_validate,
238 	.create = mlx5_flow_create,
239 	.destroy = mlx5_flow_destroy,
240 	.flush = mlx5_flow_flush,
241 	.isolate = mlx5_flow_isolate,
242 	.query = mlx5_flow_query,
243 	.dev_dump = mlx5_flow_dev_dump,
244 };
245 
246 /* Convert FDIR request to Generic flow. */
247 struct mlx5_fdir {
248 	struct rte_flow_attr attr;
249 	struct rte_flow_item items[4];
250 	struct rte_flow_item_eth l2;
251 	struct rte_flow_item_eth l2_mask;
252 	union {
253 		struct rte_flow_item_ipv4 ipv4;
254 		struct rte_flow_item_ipv6 ipv6;
255 	} l3;
256 	union {
257 		struct rte_flow_item_ipv4 ipv4;
258 		struct rte_flow_item_ipv6 ipv6;
259 	} l3_mask;
260 	union {
261 		struct rte_flow_item_udp udp;
262 		struct rte_flow_item_tcp tcp;
263 	} l4;
264 	union {
265 		struct rte_flow_item_udp udp;
266 		struct rte_flow_item_tcp tcp;
267 	} l4_mask;
268 	struct rte_flow_action actions[2];
269 	struct rte_flow_action_queue queue;
270 };
271 
272 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
273 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
274 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
275 };
276 
277 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
278 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
279 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
280 	{ 9, 10, 11 }, { 12, 13, 14 },
281 };
282 
283 /* Tunnel information. */
284 struct mlx5_flow_tunnel_info {
285 	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
286 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
287 };
288 
289 static struct mlx5_flow_tunnel_info tunnels_info[] = {
290 	{
291 		.tunnel = MLX5_FLOW_LAYER_VXLAN,
292 		.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
293 	},
294 	{
295 		.tunnel = MLX5_FLOW_LAYER_GENEVE,
296 		.ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
297 	},
298 	{
299 		.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
300 		.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
301 	},
302 	{
303 		.tunnel = MLX5_FLOW_LAYER_GRE,
304 		.ptype = RTE_PTYPE_TUNNEL_GRE,
305 	},
306 	{
307 		.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
308 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
309 	},
310 	{
311 		.tunnel = MLX5_FLOW_LAYER_MPLS,
312 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
313 	},
314 	{
315 		.tunnel = MLX5_FLOW_LAYER_NVGRE,
316 		.ptype = RTE_PTYPE_TUNNEL_NVGRE,
317 	},
318 	{
319 		.tunnel = MLX5_FLOW_LAYER_IPIP,
320 		.ptype = RTE_PTYPE_TUNNEL_IP,
321 	},
322 	{
323 		.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
324 		.ptype = RTE_PTYPE_TUNNEL_IP,
325 	},
326 	{
327 		.tunnel = MLX5_FLOW_LAYER_GTP,
328 		.ptype = RTE_PTYPE_TUNNEL_GTPU,
329 	},
330 };
331 
332 /**
333  * Translate tag ID to register.
334  *
335  * @param[in] dev
336  *   Pointer to the Ethernet device structure.
337  * @param[in] feature
338  *   The feature that request the register.
339  * @param[in] id
340  *   The request register ID.
341  * @param[out] error
342  *   Error description in case of any.
343  *
344  * @return
345  *   The request register on success, a negative errno
346  *   value otherwise and rte_errno is set.
347  */
348 int
349 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
350 		     enum mlx5_feature_name feature,
351 		     uint32_t id,
352 		     struct rte_flow_error *error)
353 {
354 	struct mlx5_priv *priv = dev->data->dev_private;
355 	struct mlx5_dev_config *config = &priv->config;
356 	enum modify_reg start_reg;
357 	bool skip_mtr_reg = false;
358 
359 	switch (feature) {
360 	case MLX5_HAIRPIN_RX:
361 		return REG_B;
362 	case MLX5_HAIRPIN_TX:
363 		return REG_A;
364 	case MLX5_METADATA_RX:
365 		switch (config->dv_xmeta_en) {
366 		case MLX5_XMETA_MODE_LEGACY:
367 			return REG_B;
368 		case MLX5_XMETA_MODE_META16:
369 			return REG_C_0;
370 		case MLX5_XMETA_MODE_META32:
371 			return REG_C_1;
372 		}
373 		break;
374 	case MLX5_METADATA_TX:
375 		return REG_A;
376 	case MLX5_METADATA_FDB:
377 		switch (config->dv_xmeta_en) {
378 		case MLX5_XMETA_MODE_LEGACY:
379 			return REG_NONE;
380 		case MLX5_XMETA_MODE_META16:
381 			return REG_C_0;
382 		case MLX5_XMETA_MODE_META32:
383 			return REG_C_1;
384 		}
385 		break;
386 	case MLX5_FLOW_MARK:
387 		switch (config->dv_xmeta_en) {
388 		case MLX5_XMETA_MODE_LEGACY:
389 			return REG_NONE;
390 		case MLX5_XMETA_MODE_META16:
391 			return REG_C_1;
392 		case MLX5_XMETA_MODE_META32:
393 			return REG_C_0;
394 		}
395 		break;
396 	case MLX5_MTR_SFX:
397 		/*
398 		 * If meter color and flow match share one register, flow match
399 		 * should use the meter color register for match.
400 		 */
401 		if (priv->mtr_reg_share)
402 			return priv->mtr_color_reg;
403 		else
404 			return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
405 			       REG_C_3;
406 	case MLX5_MTR_COLOR:
407 		MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
408 		return priv->mtr_color_reg;
409 	case MLX5_COPY_MARK:
410 		/*
411 		 * Metadata COPY_MARK register using is in meter suffix sub
412 		 * flow while with meter. It's safe to share the same register.
413 		 */
414 		return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
415 	case MLX5_APP_TAG:
416 		/*
417 		 * If meter is enable, it will engage the register for color
418 		 * match and flow match. If meter color match is not using the
419 		 * REG_C_2, need to skip the REG_C_x be used by meter color
420 		 * match.
421 		 * If meter is disable, free to use all available registers.
422 		 */
423 		start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
424 			    (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
425 		skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
426 		if (id > (REG_C_7 - start_reg))
427 			return rte_flow_error_set(error, EINVAL,
428 						  RTE_FLOW_ERROR_TYPE_ITEM,
429 						  NULL, "invalid tag id");
430 		if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE)
431 			return rte_flow_error_set(error, ENOTSUP,
432 						  RTE_FLOW_ERROR_TYPE_ITEM,
433 						  NULL, "unsupported tag id");
434 		/*
435 		 * This case means meter is using the REG_C_x great than 2.
436 		 * Take care not to conflict with meter color REG_C_x.
437 		 * If the available index REG_C_y >= REG_C_x, skip the
438 		 * color register.
439 		 */
440 		if (skip_mtr_reg && config->flow_mreg_c
441 		    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
442 			if (config->flow_mreg_c
443 			    [id + 1 + start_reg - REG_C_0] != REG_NONE)
444 				return config->flow_mreg_c
445 					       [id + 1 + start_reg - REG_C_0];
446 			return rte_flow_error_set(error, ENOTSUP,
447 						  RTE_FLOW_ERROR_TYPE_ITEM,
448 						  NULL, "unsupported tag id");
449 		}
450 		return config->flow_mreg_c[id + start_reg - REG_C_0];
451 	}
452 	MLX5_ASSERT(false);
453 	return rte_flow_error_set(error, EINVAL,
454 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
455 				  NULL, "invalid feature name");
456 }
457 
458 /**
459  * Check extensive flow metadata register support.
460  *
461  * @param dev
462  *   Pointer to rte_eth_dev structure.
463  *
464  * @return
465  *   True if device supports extensive flow metadata register, otherwise false.
466  */
467 bool
468 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
469 {
470 	struct mlx5_priv *priv = dev->data->dev_private;
471 	struct mlx5_dev_config *config = &priv->config;
472 
473 	/*
474 	 * Having available reg_c can be regarded inclusively as supporting
475 	 * extensive flow metadata register, which could mean,
476 	 * - metadata register copy action by modify header.
477 	 * - 16 modify header actions is supported.
478 	 * - reg_c's are preserved across different domain (FDB and NIC) on
479 	 *   packet loopback by flow lookup miss.
480 	 */
481 	return config->flow_mreg_c[2] != REG_NONE;
482 }
483 
484 /**
485  * Discover the maximum number of priority available.
486  *
487  * @param[in] dev
488  *   Pointer to the Ethernet device structure.
489  *
490  * @return
491  *   number of supported flow priority on success, a negative errno
492  *   value otherwise and rte_errno is set.
493  */
494 int
495 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
496 {
497 	struct mlx5_priv *priv = dev->data->dev_private;
498 	struct {
499 		struct ibv_flow_attr attr;
500 		struct ibv_flow_spec_eth eth;
501 		struct ibv_flow_spec_action_drop drop;
502 	} flow_attr = {
503 		.attr = {
504 			.num_of_specs = 2,
505 			.port = (uint8_t)priv->ibv_port,
506 		},
507 		.eth = {
508 			.type = IBV_FLOW_SPEC_ETH,
509 			.size = sizeof(struct ibv_flow_spec_eth),
510 		},
511 		.drop = {
512 			.size = sizeof(struct ibv_flow_spec_action_drop),
513 			.type = IBV_FLOW_SPEC_ACTION_DROP,
514 		},
515 	};
516 	struct ibv_flow *flow;
517 	struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
518 	uint16_t vprio[] = { 8, 16 };
519 	int i;
520 	int priority = 0;
521 
522 	if (!drop) {
523 		rte_errno = ENOTSUP;
524 		return -rte_errno;
525 	}
526 	for (i = 0; i != RTE_DIM(vprio); i++) {
527 		flow_attr.attr.priority = vprio[i] - 1;
528 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
529 		if (!flow)
530 			break;
531 		claim_zero(mlx5_glue->destroy_flow(flow));
532 		priority = vprio[i];
533 	}
534 	mlx5_hrxq_drop_release(dev);
535 	switch (priority) {
536 	case 8:
537 		priority = RTE_DIM(priority_map_3);
538 		break;
539 	case 16:
540 		priority = RTE_DIM(priority_map_5);
541 		break;
542 	default:
543 		rte_errno = ENOTSUP;
544 		DRV_LOG(ERR,
545 			"port %u verbs maximum priority: %d expected 8/16",
546 			dev->data->port_id, priority);
547 		return -rte_errno;
548 	}
549 	DRV_LOG(INFO, "port %u flow maximum priority: %d",
550 		dev->data->port_id, priority);
551 	return priority;
552 }
553 
554 /**
555  * Adjust flow priority based on the highest layer and the request priority.
556  *
557  * @param[in] dev
558  *   Pointer to the Ethernet device structure.
559  * @param[in] priority
560  *   The rule base priority.
561  * @param[in] subpriority
562  *   The priority based on the items.
563  *
564  * @return
565  *   The new priority.
566  */
567 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
568 				   uint32_t subpriority)
569 {
570 	uint32_t res = 0;
571 	struct mlx5_priv *priv = dev->data->dev_private;
572 
573 	switch (priv->config.flow_prio) {
574 	case RTE_DIM(priority_map_3):
575 		res = priority_map_3[priority][subpriority];
576 		break;
577 	case RTE_DIM(priority_map_5):
578 		res = priority_map_5[priority][subpriority];
579 		break;
580 	}
581 	return  res;
582 }
583 
584 /**
585  * Verify the @p item specifications (spec, last, mask) are compatible with the
586  * NIC capabilities.
587  *
588  * @param[in] item
589  *   Item specification.
590  * @param[in] mask
591  *   @p item->mask or flow default bit-masks.
592  * @param[in] nic_mask
593  *   Bit-masks covering supported fields by the NIC to compare with user mask.
594  * @param[in] size
595  *   Bit-masks size in bytes.
596  * @param[out] error
597  *   Pointer to error structure.
598  *
599  * @return
600  *   0 on success, a negative errno value otherwise and rte_errno is set.
601  */
602 int
603 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
604 			  const uint8_t *mask,
605 			  const uint8_t *nic_mask,
606 			  unsigned int size,
607 			  struct rte_flow_error *error)
608 {
609 	unsigned int i;
610 
611 	MLX5_ASSERT(nic_mask);
612 	for (i = 0; i < size; ++i)
613 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
614 			return rte_flow_error_set(error, ENOTSUP,
615 						  RTE_FLOW_ERROR_TYPE_ITEM,
616 						  item,
617 						  "mask enables non supported"
618 						  " bits");
619 	if (!item->spec && (item->mask || item->last))
620 		return rte_flow_error_set(error, EINVAL,
621 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
622 					  "mask/last without a spec is not"
623 					  " supported");
624 	if (item->spec && item->last) {
625 		uint8_t spec[size];
626 		uint8_t last[size];
627 		unsigned int i;
628 		int ret;
629 
630 		for (i = 0; i < size; ++i) {
631 			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
632 			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
633 		}
634 		ret = memcmp(spec, last, size);
635 		if (ret != 0)
636 			return rte_flow_error_set(error, EINVAL,
637 						  RTE_FLOW_ERROR_TYPE_ITEM,
638 						  item,
639 						  "range is not valid");
640 	}
641 	return 0;
642 }
643 
644 /**
645  * Adjust the hash fields according to the @p flow information.
646  *
647  * @param[in] dev_flow.
648  *   Pointer to the mlx5_flow.
649  * @param[in] tunnel
650  *   1 when the hash field is for a tunnel item.
651  * @param[in] layer_types
652  *   ETH_RSS_* types.
653  * @param[in] hash_fields
654  *   Item hash fields.
655  *
656  * @return
657  *   The hash fields that should be used.
658  */
659 uint64_t
660 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
661 			    int tunnel __rte_unused, uint64_t layer_types,
662 			    uint64_t hash_fields)
663 {
664 	struct rte_flow *flow = dev_flow->flow;
665 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
666 	int rss_request_inner = flow->rss.level >= 2;
667 
668 	/* Check RSS hash level for tunnel. */
669 	if (tunnel && rss_request_inner)
670 		hash_fields |= IBV_RX_HASH_INNER;
671 	else if (tunnel || rss_request_inner)
672 		return 0;
673 #endif
674 	/* Check if requested layer matches RSS hash fields. */
675 	if (!(flow->rss.types & layer_types))
676 		return 0;
677 	return hash_fields;
678 }
679 
680 /**
681  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
682  * if several tunnel rules are used on this queue, the tunnel ptype will be
683  * cleared.
684  *
685  * @param rxq_ctrl
686  *   Rx queue to update.
687  */
688 static void
689 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
690 {
691 	unsigned int i;
692 	uint32_t tunnel_ptype = 0;
693 
694 	/* Look up for the ptype to use. */
695 	for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
696 		if (!rxq_ctrl->flow_tunnels_n[i])
697 			continue;
698 		if (!tunnel_ptype) {
699 			tunnel_ptype = tunnels_info[i].ptype;
700 		} else {
701 			tunnel_ptype = 0;
702 			break;
703 		}
704 	}
705 	rxq_ctrl->rxq.tunnel = tunnel_ptype;
706 }
707 
708 /**
709  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
710  * flow.
711  *
712  * @param[in] dev
713  *   Pointer to the Ethernet device structure.
714  * @param[in] dev_flow
715  *   Pointer to device flow structure.
716  */
717 static void
718 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
719 {
720 	struct mlx5_priv *priv = dev->data->dev_private;
721 	struct rte_flow *flow = dev_flow->flow;
722 	const int mark = !!(dev_flow->actions &
723 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
724 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
725 	unsigned int i;
726 
727 	for (i = 0; i != flow->rss.queue_num; ++i) {
728 		int idx = (*flow->rss.queue)[i];
729 		struct mlx5_rxq_ctrl *rxq_ctrl =
730 			container_of((*priv->rxqs)[idx],
731 				     struct mlx5_rxq_ctrl, rxq);
732 
733 		/*
734 		 * To support metadata register copy on Tx loopback,
735 		 * this must be always enabled (metadata may arive
736 		 * from other port - not from local flows only.
737 		 */
738 		if (priv->config.dv_flow_en &&
739 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
740 		    mlx5_flow_ext_mreg_supported(dev)) {
741 			rxq_ctrl->rxq.mark = 1;
742 			rxq_ctrl->flow_mark_n = 1;
743 		} else if (mark) {
744 			rxq_ctrl->rxq.mark = 1;
745 			rxq_ctrl->flow_mark_n++;
746 		}
747 		if (tunnel) {
748 			unsigned int j;
749 
750 			/* Increase the counter matching the flow. */
751 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
752 				if ((tunnels_info[j].tunnel &
753 				     dev_flow->layers) ==
754 				    tunnels_info[j].tunnel) {
755 					rxq_ctrl->flow_tunnels_n[j]++;
756 					break;
757 				}
758 			}
759 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
760 		}
761 	}
762 }
763 
764 /**
765  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
766  *
767  * @param[in] dev
768  *   Pointer to the Ethernet device structure.
769  * @param[in] flow
770  *   Pointer to flow structure.
771  */
772 static void
773 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
774 {
775 	struct mlx5_flow *dev_flow;
776 
777 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
778 		flow_drv_rxq_flags_set(dev, dev_flow);
779 }
780 
781 /**
782  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
783  * device flow if no other flow uses it with the same kind of request.
784  *
785  * @param dev
786  *   Pointer to Ethernet device.
787  * @param[in] dev_flow
788  *   Pointer to the device flow.
789  */
790 static void
791 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
792 {
793 	struct mlx5_priv *priv = dev->data->dev_private;
794 	struct rte_flow *flow = dev_flow->flow;
795 	const int mark = !!(dev_flow->actions &
796 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
797 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
798 	unsigned int i;
799 
800 	MLX5_ASSERT(dev->data->dev_started);
801 	for (i = 0; i != flow->rss.queue_num; ++i) {
802 		int idx = (*flow->rss.queue)[i];
803 		struct mlx5_rxq_ctrl *rxq_ctrl =
804 			container_of((*priv->rxqs)[idx],
805 				     struct mlx5_rxq_ctrl, rxq);
806 
807 		if (priv->config.dv_flow_en &&
808 		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
809 		    mlx5_flow_ext_mreg_supported(dev)) {
810 			rxq_ctrl->rxq.mark = 1;
811 			rxq_ctrl->flow_mark_n = 1;
812 		} else if (mark) {
813 			rxq_ctrl->flow_mark_n--;
814 			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
815 		}
816 		if (tunnel) {
817 			unsigned int j;
818 
819 			/* Decrease the counter matching the flow. */
820 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
821 				if ((tunnels_info[j].tunnel &
822 				     dev_flow->layers) ==
823 				    tunnels_info[j].tunnel) {
824 					rxq_ctrl->flow_tunnels_n[j]--;
825 					break;
826 				}
827 			}
828 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
829 		}
830 	}
831 }
832 
833 /**
834  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
835  * @p flow if no other flow uses it with the same kind of request.
836  *
837  * @param dev
838  *   Pointer to Ethernet device.
839  * @param[in] flow
840  *   Pointer to the flow.
841  */
842 static void
843 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
844 {
845 	struct mlx5_flow *dev_flow;
846 
847 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
848 		flow_drv_rxq_flags_trim(dev, dev_flow);
849 }
850 
851 /**
852  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
853  *
854  * @param dev
855  *   Pointer to Ethernet device.
856  */
857 static void
858 flow_rxq_flags_clear(struct rte_eth_dev *dev)
859 {
860 	struct mlx5_priv *priv = dev->data->dev_private;
861 	unsigned int i;
862 
863 	for (i = 0; i != priv->rxqs_n; ++i) {
864 		struct mlx5_rxq_ctrl *rxq_ctrl;
865 		unsigned int j;
866 
867 		if (!(*priv->rxqs)[i])
868 			continue;
869 		rxq_ctrl = container_of((*priv->rxqs)[i],
870 					struct mlx5_rxq_ctrl, rxq);
871 		rxq_ctrl->flow_mark_n = 0;
872 		rxq_ctrl->rxq.mark = 0;
873 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
874 			rxq_ctrl->flow_tunnels_n[j] = 0;
875 		rxq_ctrl->rxq.tunnel = 0;
876 	}
877 }
878 
879 /*
880  * return a pointer to the desired action in the list of actions.
881  *
882  * @param[in] actions
883  *   The list of actions to search the action in.
884  * @param[in] action
885  *   The action to find.
886  *
887  * @return
888  *   Pointer to the action in the list, if found. NULL otherwise.
889  */
890 const struct rte_flow_action *
891 mlx5_flow_find_action(const struct rte_flow_action *actions,
892 		      enum rte_flow_action_type action)
893 {
894 	if (actions == NULL)
895 		return NULL;
896 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
897 		if (actions->type == action)
898 			return actions;
899 	return NULL;
900 }
901 
902 /*
903  * Validate the flag action.
904  *
905  * @param[in] action_flags
906  *   Bit-fields that holds the actions detected until now.
907  * @param[in] attr
908  *   Attributes of flow that includes this action.
909  * @param[out] error
910  *   Pointer to error structure.
911  *
912  * @return
913  *   0 on success, a negative errno value otherwise and rte_errno is set.
914  */
915 int
916 mlx5_flow_validate_action_flag(uint64_t action_flags,
917 			       const struct rte_flow_attr *attr,
918 			       struct rte_flow_error *error)
919 {
920 	if (action_flags & MLX5_FLOW_ACTION_MARK)
921 		return rte_flow_error_set(error, EINVAL,
922 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
923 					  "can't mark and flag in same flow");
924 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
925 		return rte_flow_error_set(error, EINVAL,
926 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
927 					  "can't have 2 flag"
928 					  " actions in same flow");
929 	if (attr->egress)
930 		return rte_flow_error_set(error, ENOTSUP,
931 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
932 					  "flag action not supported for "
933 					  "egress");
934 	return 0;
935 }
936 
937 /*
938  * Validate the mark action.
939  *
940  * @param[in] action
941  *   Pointer to the queue action.
942  * @param[in] action_flags
943  *   Bit-fields that holds the actions detected until now.
944  * @param[in] attr
945  *   Attributes of flow that includes this action.
946  * @param[out] error
947  *   Pointer to error structure.
948  *
949  * @return
950  *   0 on success, a negative errno value otherwise and rte_errno is set.
951  */
952 int
953 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
954 			       uint64_t action_flags,
955 			       const struct rte_flow_attr *attr,
956 			       struct rte_flow_error *error)
957 {
958 	const struct rte_flow_action_mark *mark = action->conf;
959 
960 	if (!mark)
961 		return rte_flow_error_set(error, EINVAL,
962 					  RTE_FLOW_ERROR_TYPE_ACTION,
963 					  action,
964 					  "configuration cannot be null");
965 	if (mark->id >= MLX5_FLOW_MARK_MAX)
966 		return rte_flow_error_set(error, EINVAL,
967 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
968 					  &mark->id,
969 					  "mark id must in 0 <= id < "
970 					  RTE_STR(MLX5_FLOW_MARK_MAX));
971 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
972 		return rte_flow_error_set(error, EINVAL,
973 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
974 					  "can't flag and mark in same flow");
975 	if (action_flags & MLX5_FLOW_ACTION_MARK)
976 		return rte_flow_error_set(error, EINVAL,
977 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
978 					  "can't have 2 mark actions in same"
979 					  " flow");
980 	if (attr->egress)
981 		return rte_flow_error_set(error, ENOTSUP,
982 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
983 					  "mark action not supported for "
984 					  "egress");
985 	return 0;
986 }
987 
988 /*
989  * Validate the drop action.
990  *
991  * @param[in] action_flags
992  *   Bit-fields that holds the actions detected until now.
993  * @param[in] attr
994  *   Attributes of flow that includes this action.
995  * @param[out] error
996  *   Pointer to error structure.
997  *
998  * @return
999  *   0 on success, a negative errno value otherwise and rte_errno is set.
1000  */
1001 int
1002 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1003 			       const struct rte_flow_attr *attr,
1004 			       struct rte_flow_error *error)
1005 {
1006 	if (attr->egress)
1007 		return rte_flow_error_set(error, ENOTSUP,
1008 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1009 					  "drop action not supported for "
1010 					  "egress");
1011 	return 0;
1012 }
1013 
1014 /*
1015  * Validate the queue action.
1016  *
1017  * @param[in] action
1018  *   Pointer to the queue action.
1019  * @param[in] action_flags
1020  *   Bit-fields that holds the actions detected until now.
1021  * @param[in] dev
1022  *   Pointer to the Ethernet device structure.
1023  * @param[in] attr
1024  *   Attributes of flow that includes this action.
1025  * @param[out] error
1026  *   Pointer to error structure.
1027  *
1028  * @return
1029  *   0 on success, a negative errno value otherwise and rte_errno is set.
1030  */
1031 int
1032 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1033 				uint64_t action_flags,
1034 				struct rte_eth_dev *dev,
1035 				const struct rte_flow_attr *attr,
1036 				struct rte_flow_error *error)
1037 {
1038 	struct mlx5_priv *priv = dev->data->dev_private;
1039 	const struct rte_flow_action_queue *queue = action->conf;
1040 
1041 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1042 		return rte_flow_error_set(error, EINVAL,
1043 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1044 					  "can't have 2 fate actions in"
1045 					  " same flow");
1046 	if (!priv->rxqs_n)
1047 		return rte_flow_error_set(error, EINVAL,
1048 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1049 					  NULL, "No Rx queues configured");
1050 	if (queue->index >= priv->rxqs_n)
1051 		return rte_flow_error_set(error, EINVAL,
1052 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1053 					  &queue->index,
1054 					  "queue index out of range");
1055 	if (!(*priv->rxqs)[queue->index])
1056 		return rte_flow_error_set(error, EINVAL,
1057 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1058 					  &queue->index,
1059 					  "queue is not configured");
1060 	if (attr->egress)
1061 		return rte_flow_error_set(error, ENOTSUP,
1062 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1063 					  "queue action not supported for "
1064 					  "egress");
1065 	return 0;
1066 }
1067 
1068 /*
1069  * Validate the rss action.
1070  *
1071  * @param[in] action
1072  *   Pointer to the queue action.
1073  * @param[in] action_flags
1074  *   Bit-fields that holds the actions detected until now.
1075  * @param[in] dev
1076  *   Pointer to the Ethernet device structure.
1077  * @param[in] attr
1078  *   Attributes of flow that includes this action.
1079  * @param[in] item_flags
1080  *   Items that were detected.
1081  * @param[out] error
1082  *   Pointer to error structure.
1083  *
1084  * @return
1085  *   0 on success, a negative errno value otherwise and rte_errno is set.
1086  */
1087 int
1088 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1089 			      uint64_t action_flags,
1090 			      struct rte_eth_dev *dev,
1091 			      const struct rte_flow_attr *attr,
1092 			      uint64_t item_flags,
1093 			      struct rte_flow_error *error)
1094 {
1095 	struct mlx5_priv *priv = dev->data->dev_private;
1096 	const struct rte_flow_action_rss *rss = action->conf;
1097 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1098 	unsigned int i;
1099 
1100 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1101 		return rte_flow_error_set(error, EINVAL,
1102 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1103 					  "can't have 2 fate actions"
1104 					  " in same flow");
1105 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1106 	    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1107 		return rte_flow_error_set(error, ENOTSUP,
1108 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1109 					  &rss->func,
1110 					  "RSS hash function not supported");
1111 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1112 	if (rss->level > 2)
1113 #else
1114 	if (rss->level > 1)
1115 #endif
1116 		return rte_flow_error_set(error, ENOTSUP,
1117 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1118 					  &rss->level,
1119 					  "tunnel RSS is not supported");
1120 	/* allow RSS key_len 0 in case of NULL (default) RSS key. */
1121 	if (rss->key_len == 0 && rss->key != NULL)
1122 		return rte_flow_error_set(error, ENOTSUP,
1123 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1124 					  &rss->key_len,
1125 					  "RSS hash key length 0");
1126 	if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1127 		return rte_flow_error_set(error, ENOTSUP,
1128 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1129 					  &rss->key_len,
1130 					  "RSS hash key too small");
1131 	if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1132 		return rte_flow_error_set(error, ENOTSUP,
1133 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1134 					  &rss->key_len,
1135 					  "RSS hash key too large");
1136 	if (rss->queue_num > priv->config.ind_table_max_size)
1137 		return rte_flow_error_set(error, ENOTSUP,
1138 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1139 					  &rss->queue_num,
1140 					  "number of queues too large");
1141 	if (rss->types & MLX5_RSS_HF_MASK)
1142 		return rte_flow_error_set(error, ENOTSUP,
1143 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1144 					  &rss->types,
1145 					  "some RSS protocols are not"
1146 					  " supported");
1147 	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1148 	    !(rss->types & ETH_RSS_IP))
1149 		return rte_flow_error_set(error, EINVAL,
1150 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1151 					  "L3 partial RSS requested but L3 RSS"
1152 					  " type not specified");
1153 	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1154 	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1155 		return rte_flow_error_set(error, EINVAL,
1156 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1157 					  "L4 partial RSS requested but L4 RSS"
1158 					  " type not specified");
1159 	if (!priv->rxqs_n)
1160 		return rte_flow_error_set(error, EINVAL,
1161 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1162 					  NULL, "No Rx queues configured");
1163 	if (!rss->queue_num)
1164 		return rte_flow_error_set(error, EINVAL,
1165 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1166 					  NULL, "No queues configured");
1167 	for (i = 0; i != rss->queue_num; ++i) {
1168 		if (rss->queue[i] >= priv->rxqs_n)
1169 			return rte_flow_error_set
1170 				(error, EINVAL,
1171 				 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1172 				 &rss->queue[i], "queue index out of range");
1173 		if (!(*priv->rxqs)[rss->queue[i]])
1174 			return rte_flow_error_set
1175 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1176 				 &rss->queue[i], "queue is not configured");
1177 	}
1178 	if (attr->egress)
1179 		return rte_flow_error_set(error, ENOTSUP,
1180 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1181 					  "rss action not supported for "
1182 					  "egress");
1183 	if (rss->level > 1 &&  !tunnel)
1184 		return rte_flow_error_set(error, EINVAL,
1185 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1186 					  "inner RSS is not supported for "
1187 					  "non-tunnel flows");
1188 	return 0;
1189 }
1190 
1191 /*
1192  * Validate the count action.
1193  *
1194  * @param[in] dev
1195  *   Pointer to the Ethernet device structure.
1196  * @param[in] attr
1197  *   Attributes of flow that includes this action.
1198  * @param[out] error
1199  *   Pointer to error structure.
1200  *
1201  * @return
1202  *   0 on success, a negative errno value otherwise and rte_errno is set.
1203  */
1204 int
1205 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1206 				const struct rte_flow_attr *attr,
1207 				struct rte_flow_error *error)
1208 {
1209 	if (attr->egress)
1210 		return rte_flow_error_set(error, ENOTSUP,
1211 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1212 					  "count action not supported for "
1213 					  "egress");
1214 	return 0;
1215 }
1216 
1217 /**
1218  * Verify the @p attributes will be correctly understood by the NIC and store
1219  * them in the @p flow if everything is correct.
1220  *
1221  * @param[in] dev
1222  *   Pointer to the Ethernet device structure.
1223  * @param[in] attributes
1224  *   Pointer to flow attributes
1225  * @param[out] error
1226  *   Pointer to error structure.
1227  *
1228  * @return
1229  *   0 on success, a negative errno value otherwise and rte_errno is set.
1230  */
1231 int
1232 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1233 			      const struct rte_flow_attr *attributes,
1234 			      struct rte_flow_error *error)
1235 {
1236 	struct mlx5_priv *priv = dev->data->dev_private;
1237 	uint32_t priority_max = priv->config.flow_prio - 1;
1238 
1239 	if (attributes->group)
1240 		return rte_flow_error_set(error, ENOTSUP,
1241 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1242 					  NULL, "groups is not supported");
1243 	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1244 	    attributes->priority >= priority_max)
1245 		return rte_flow_error_set(error, ENOTSUP,
1246 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1247 					  NULL, "priority out of range");
1248 	if (attributes->egress)
1249 		return rte_flow_error_set(error, ENOTSUP,
1250 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1251 					  "egress is not supported");
1252 	if (attributes->transfer && !priv->config.dv_esw_en)
1253 		return rte_flow_error_set(error, ENOTSUP,
1254 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1255 					  NULL, "transfer is not supported");
1256 	if (!attributes->ingress)
1257 		return rte_flow_error_set(error, EINVAL,
1258 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1259 					  NULL,
1260 					  "ingress attribute is mandatory");
1261 	return 0;
1262 }
1263 
1264 /**
1265  * Validate ICMP6 item.
1266  *
1267  * @param[in] item
1268  *   Item specification.
1269  * @param[in] item_flags
1270  *   Bit-fields that holds the items detected until now.
1271  * @param[out] error
1272  *   Pointer to error structure.
1273  *
1274  * @return
1275  *   0 on success, a negative errno value otherwise and rte_errno is set.
1276  */
1277 int
1278 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1279 			       uint64_t item_flags,
1280 			       uint8_t target_protocol,
1281 			       struct rte_flow_error *error)
1282 {
1283 	const struct rte_flow_item_icmp6 *mask = item->mask;
1284 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1285 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1286 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1287 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1288 				      MLX5_FLOW_LAYER_OUTER_L4;
1289 	int ret;
1290 
1291 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1292 		return rte_flow_error_set(error, EINVAL,
1293 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1294 					  "protocol filtering not compatible"
1295 					  " with ICMP6 layer");
1296 	if (!(item_flags & l3m))
1297 		return rte_flow_error_set(error, EINVAL,
1298 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1299 					  "IPv6 is mandatory to filter on"
1300 					  " ICMP6");
1301 	if (item_flags & l4m)
1302 		return rte_flow_error_set(error, EINVAL,
1303 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1304 					  "multiple L4 layers not supported");
1305 	if (!mask)
1306 		mask = &rte_flow_item_icmp6_mask;
1307 	ret = mlx5_flow_item_acceptable
1308 		(item, (const uint8_t *)mask,
1309 		 (const uint8_t *)&rte_flow_item_icmp6_mask,
1310 		 sizeof(struct rte_flow_item_icmp6), error);
1311 	if (ret < 0)
1312 		return ret;
1313 	return 0;
1314 }
1315 
1316 /**
1317  * Validate ICMP item.
1318  *
1319  * @param[in] item
1320  *   Item specification.
1321  * @param[in] item_flags
1322  *   Bit-fields that holds the items detected until now.
1323  * @param[out] error
1324  *   Pointer to error structure.
1325  *
1326  * @return
1327  *   0 on success, a negative errno value otherwise and rte_errno is set.
1328  */
1329 int
1330 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1331 			     uint64_t item_flags,
1332 			     uint8_t target_protocol,
1333 			     struct rte_flow_error *error)
1334 {
1335 	const struct rte_flow_item_icmp *mask = item->mask;
1336 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1337 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1338 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1339 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1340 				      MLX5_FLOW_LAYER_OUTER_L4;
1341 	int ret;
1342 
1343 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1344 		return rte_flow_error_set(error, EINVAL,
1345 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1346 					  "protocol filtering not compatible"
1347 					  " with ICMP layer");
1348 	if (!(item_flags & l3m))
1349 		return rte_flow_error_set(error, EINVAL,
1350 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1351 					  "IPv4 is mandatory to filter"
1352 					  " on ICMP");
1353 	if (item_flags & l4m)
1354 		return rte_flow_error_set(error, EINVAL,
1355 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1356 					  "multiple L4 layers not supported");
1357 	if (!mask)
1358 		mask = &rte_flow_item_icmp_mask;
1359 	ret = mlx5_flow_item_acceptable
1360 		(item, (const uint8_t *)mask,
1361 		 (const uint8_t *)&rte_flow_item_icmp_mask,
1362 		 sizeof(struct rte_flow_item_icmp), error);
1363 	if (ret < 0)
1364 		return ret;
1365 	return 0;
1366 }
1367 
1368 /**
1369  * Validate Ethernet item.
1370  *
1371  * @param[in] item
1372  *   Item specification.
1373  * @param[in] item_flags
1374  *   Bit-fields that holds the items detected until now.
1375  * @param[out] error
1376  *   Pointer to error structure.
1377  *
1378  * @return
1379  *   0 on success, a negative errno value otherwise and rte_errno is set.
1380  */
1381 int
1382 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1383 			    uint64_t item_flags,
1384 			    struct rte_flow_error *error)
1385 {
1386 	const struct rte_flow_item_eth *mask = item->mask;
1387 	const struct rte_flow_item_eth nic_mask = {
1388 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1389 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1390 		.type = RTE_BE16(0xffff),
1391 	};
1392 	int ret;
1393 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1394 	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
1395 				       MLX5_FLOW_LAYER_OUTER_L2;
1396 
1397 	if (item_flags & ethm)
1398 		return rte_flow_error_set(error, ENOTSUP,
1399 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1400 					  "multiple L2 layers not supported");
1401 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1402 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1403 		return rte_flow_error_set(error, EINVAL,
1404 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1405 					  "L2 layer should not follow "
1406 					  "L3 layers");
1407 	if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1408 	    (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1409 		return rte_flow_error_set(error, EINVAL,
1410 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1411 					  "L2 layer should not follow VLAN");
1412 	if (!mask)
1413 		mask = &rte_flow_item_eth_mask;
1414 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1415 					(const uint8_t *)&nic_mask,
1416 					sizeof(struct rte_flow_item_eth),
1417 					error);
1418 	return ret;
1419 }
1420 
1421 /**
1422  * Validate VLAN item.
1423  *
1424  * @param[in] item
1425  *   Item specification.
1426  * @param[in] item_flags
1427  *   Bit-fields that holds the items detected until now.
1428  * @param[in] dev
1429  *   Ethernet device flow is being created on.
1430  * @param[out] error
1431  *   Pointer to error structure.
1432  *
1433  * @return
1434  *   0 on success, a negative errno value otherwise and rte_errno is set.
1435  */
1436 int
1437 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1438 			     uint64_t item_flags,
1439 			     struct rte_eth_dev *dev,
1440 			     struct rte_flow_error *error)
1441 {
1442 	const struct rte_flow_item_vlan *spec = item->spec;
1443 	const struct rte_flow_item_vlan *mask = item->mask;
1444 	const struct rte_flow_item_vlan nic_mask = {
1445 		.tci = RTE_BE16(UINT16_MAX),
1446 		.inner_type = RTE_BE16(UINT16_MAX),
1447 	};
1448 	uint16_t vlan_tag = 0;
1449 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1450 	int ret;
1451 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1452 					MLX5_FLOW_LAYER_INNER_L4) :
1453 				       (MLX5_FLOW_LAYER_OUTER_L3 |
1454 					MLX5_FLOW_LAYER_OUTER_L4);
1455 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1456 					MLX5_FLOW_LAYER_OUTER_VLAN;
1457 
1458 	if (item_flags & vlanm)
1459 		return rte_flow_error_set(error, EINVAL,
1460 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1461 					  "multiple VLAN layers not supported");
1462 	else if ((item_flags & l34m) != 0)
1463 		return rte_flow_error_set(error, EINVAL,
1464 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1465 					  "VLAN cannot follow L3/L4 layer");
1466 	if (!mask)
1467 		mask = &rte_flow_item_vlan_mask;
1468 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1469 					(const uint8_t *)&nic_mask,
1470 					sizeof(struct rte_flow_item_vlan),
1471 					error);
1472 	if (ret)
1473 		return ret;
1474 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1475 		struct mlx5_priv *priv = dev->data->dev_private;
1476 
1477 		if (priv->vmwa_context) {
1478 			/*
1479 			 * Non-NULL context means we have a virtual machine
1480 			 * and SR-IOV enabled, we have to create VLAN interface
1481 			 * to make hypervisor to setup E-Switch vport
1482 			 * context correctly. We avoid creating the multiple
1483 			 * VLAN interfaces, so we cannot support VLAN tag mask.
1484 			 */
1485 			return rte_flow_error_set(error, EINVAL,
1486 						  RTE_FLOW_ERROR_TYPE_ITEM,
1487 						  item,
1488 						  "VLAN tag mask is not"
1489 						  " supported in virtual"
1490 						  " environment");
1491 		}
1492 	}
1493 	if (spec) {
1494 		vlan_tag = spec->tci;
1495 		vlan_tag &= mask->tci;
1496 	}
1497 	/*
1498 	 * From verbs perspective an empty VLAN is equivalent
1499 	 * to a packet without VLAN layer.
1500 	 */
1501 	if (!vlan_tag)
1502 		return rte_flow_error_set(error, EINVAL,
1503 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1504 					  item->spec,
1505 					  "VLAN cannot be empty");
1506 	return 0;
1507 }
1508 
1509 /**
1510  * Validate IPV4 item.
1511  *
1512  * @param[in] item
1513  *   Item specification.
1514  * @param[in] item_flags
1515  *   Bit-fields that holds the items detected until now.
1516  * @param[in] acc_mask
1517  *   Acceptable mask, if NULL default internal default mask
1518  *   will be used to check whether item fields are supported.
1519  * @param[out] error
1520  *   Pointer to error structure.
1521  *
1522  * @return
1523  *   0 on success, a negative errno value otherwise and rte_errno is set.
1524  */
1525 int
1526 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1527 			     uint64_t item_flags,
1528 			     uint64_t last_item,
1529 			     uint16_t ether_type,
1530 			     const struct rte_flow_item_ipv4 *acc_mask,
1531 			     struct rte_flow_error *error)
1532 {
1533 	const struct rte_flow_item_ipv4 *mask = item->mask;
1534 	const struct rte_flow_item_ipv4 *spec = item->spec;
1535 	const struct rte_flow_item_ipv4 nic_mask = {
1536 		.hdr = {
1537 			.src_addr = RTE_BE32(0xffffffff),
1538 			.dst_addr = RTE_BE32(0xffffffff),
1539 			.type_of_service = 0xff,
1540 			.next_proto_id = 0xff,
1541 		},
1542 	};
1543 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1544 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1545 				      MLX5_FLOW_LAYER_OUTER_L3;
1546 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1547 				      MLX5_FLOW_LAYER_OUTER_L4;
1548 	int ret;
1549 	uint8_t next_proto = 0xFF;
1550 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1551 				  MLX5_FLOW_LAYER_OUTER_VLAN |
1552 				  MLX5_FLOW_LAYER_INNER_VLAN);
1553 
1554 	if ((last_item & l2_vlan) && ether_type &&
1555 	    ether_type != RTE_ETHER_TYPE_IPV4)
1556 		return rte_flow_error_set(error, EINVAL,
1557 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1558 					  "IPv4 cannot follow L2/VLAN layer "
1559 					  "which ether type is not IPv4");
1560 	if (item_flags & MLX5_FLOW_LAYER_IPIP) {
1561 		if (mask && spec)
1562 			next_proto = mask->hdr.next_proto_id &
1563 				     spec->hdr.next_proto_id;
1564 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1565 			return rte_flow_error_set(error, EINVAL,
1566 						  RTE_FLOW_ERROR_TYPE_ITEM,
1567 						  item,
1568 						  "multiple tunnel "
1569 						  "not supported");
1570 	}
1571 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
1572 		return rte_flow_error_set(error, EINVAL,
1573 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1574 					  "wrong tunnel type - IPv6 specified "
1575 					  "but IPv4 item provided");
1576 	if (item_flags & l3m)
1577 		return rte_flow_error_set(error, ENOTSUP,
1578 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1579 					  "multiple L3 layers not supported");
1580 	else if (item_flags & l4m)
1581 		return rte_flow_error_set(error, EINVAL,
1582 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1583 					  "L3 cannot follow an L4 layer.");
1584 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1585 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1586 		return rte_flow_error_set(error, EINVAL,
1587 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1588 					  "L3 cannot follow an NVGRE layer.");
1589 	if (!mask)
1590 		mask = &rte_flow_item_ipv4_mask;
1591 	else if (mask->hdr.next_proto_id != 0 &&
1592 		 mask->hdr.next_proto_id != 0xff)
1593 		return rte_flow_error_set(error, EINVAL,
1594 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1595 					  "partial mask is not supported"
1596 					  " for protocol");
1597 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1598 					acc_mask ? (const uint8_t *)acc_mask
1599 						 : (const uint8_t *)&nic_mask,
1600 					sizeof(struct rte_flow_item_ipv4),
1601 					error);
1602 	if (ret < 0)
1603 		return ret;
1604 	return 0;
1605 }
1606 
1607 /**
1608  * Validate IPV6 item.
1609  *
1610  * @param[in] item
1611  *   Item specification.
1612  * @param[in] item_flags
1613  *   Bit-fields that holds the items detected until now.
1614  * @param[in] acc_mask
1615  *   Acceptable mask, if NULL default internal default mask
1616  *   will be used to check whether item fields are supported.
1617  * @param[out] error
1618  *   Pointer to error structure.
1619  *
1620  * @return
1621  *   0 on success, a negative errno value otherwise and rte_errno is set.
1622  */
1623 int
1624 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
1625 			     uint64_t item_flags,
1626 			     uint64_t last_item,
1627 			     uint16_t ether_type,
1628 			     const struct rte_flow_item_ipv6 *acc_mask,
1629 			     struct rte_flow_error *error)
1630 {
1631 	const struct rte_flow_item_ipv6 *mask = item->mask;
1632 	const struct rte_flow_item_ipv6 *spec = item->spec;
1633 	const struct rte_flow_item_ipv6 nic_mask = {
1634 		.hdr = {
1635 			.src_addr =
1636 				"\xff\xff\xff\xff\xff\xff\xff\xff"
1637 				"\xff\xff\xff\xff\xff\xff\xff\xff",
1638 			.dst_addr =
1639 				"\xff\xff\xff\xff\xff\xff\xff\xff"
1640 				"\xff\xff\xff\xff\xff\xff\xff\xff",
1641 			.vtc_flow = RTE_BE32(0xffffffff),
1642 			.proto = 0xff,
1643 			.hop_limits = 0xff,
1644 		},
1645 	};
1646 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1647 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1648 				      MLX5_FLOW_LAYER_OUTER_L3;
1649 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1650 				      MLX5_FLOW_LAYER_OUTER_L4;
1651 	int ret;
1652 	uint8_t next_proto = 0xFF;
1653 	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1654 				  MLX5_FLOW_LAYER_OUTER_VLAN |
1655 				  MLX5_FLOW_LAYER_INNER_VLAN);
1656 
1657 	if ((last_item & l2_vlan) && ether_type &&
1658 	    ether_type != RTE_ETHER_TYPE_IPV6)
1659 		return rte_flow_error_set(error, EINVAL,
1660 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1661 					  "IPv6 cannot follow L2/VLAN layer "
1662 					  "which ether type is not IPv6");
1663 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
1664 		if (mask && spec)
1665 			next_proto = mask->hdr.proto & spec->hdr.proto;
1666 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1667 			return rte_flow_error_set(error, EINVAL,
1668 						  RTE_FLOW_ERROR_TYPE_ITEM,
1669 						  item,
1670 						  "multiple tunnel "
1671 						  "not supported");
1672 	}
1673 	if (item_flags & MLX5_FLOW_LAYER_IPIP)
1674 		return rte_flow_error_set(error, EINVAL,
1675 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1676 					  "wrong tunnel type - IPv4 specified "
1677 					  "but IPv6 item provided");
1678 	if (item_flags & l3m)
1679 		return rte_flow_error_set(error, ENOTSUP,
1680 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1681 					  "multiple L3 layers not supported");
1682 	else if (item_flags & l4m)
1683 		return rte_flow_error_set(error, EINVAL,
1684 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1685 					  "L3 cannot follow an L4 layer.");
1686 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1687 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1688 		return rte_flow_error_set(error, EINVAL,
1689 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1690 					  "L3 cannot follow an NVGRE layer.");
1691 	if (!mask)
1692 		mask = &rte_flow_item_ipv6_mask;
1693 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1694 					acc_mask ? (const uint8_t *)acc_mask
1695 						 : (const uint8_t *)&nic_mask,
1696 					sizeof(struct rte_flow_item_ipv6),
1697 					error);
1698 	if (ret < 0)
1699 		return ret;
1700 	return 0;
1701 }
1702 
1703 /**
1704  * Validate UDP item.
1705  *
1706  * @param[in] item
1707  *   Item specification.
1708  * @param[in] item_flags
1709  *   Bit-fields that holds the items detected until now.
1710  * @param[in] target_protocol
1711  *   The next protocol in the previous item.
1712  * @param[in] flow_mask
1713  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
1714  * @param[out] error
1715  *   Pointer to error structure.
1716  *
1717  * @return
1718  *   0 on success, a negative errno value otherwise and rte_errno is set.
1719  */
1720 int
1721 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
1722 			    uint64_t item_flags,
1723 			    uint8_t target_protocol,
1724 			    struct rte_flow_error *error)
1725 {
1726 	const struct rte_flow_item_udp *mask = item->mask;
1727 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1728 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1729 				      MLX5_FLOW_LAYER_OUTER_L3;
1730 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1731 				      MLX5_FLOW_LAYER_OUTER_L4;
1732 	int ret;
1733 
1734 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
1735 		return rte_flow_error_set(error, EINVAL,
1736 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1737 					  "protocol filtering not compatible"
1738 					  " with UDP layer");
1739 	if (!(item_flags & l3m))
1740 		return rte_flow_error_set(error, EINVAL,
1741 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1742 					  "L3 is mandatory to filter on L4");
1743 	if (item_flags & l4m)
1744 		return rte_flow_error_set(error, EINVAL,
1745 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1746 					  "multiple L4 layers not supported");
1747 	if (!mask)
1748 		mask = &rte_flow_item_udp_mask;
1749 	ret = mlx5_flow_item_acceptable
1750 		(item, (const uint8_t *)mask,
1751 		 (const uint8_t *)&rte_flow_item_udp_mask,
1752 		 sizeof(struct rte_flow_item_udp), error);
1753 	if (ret < 0)
1754 		return ret;
1755 	return 0;
1756 }
1757 
1758 /**
1759  * Validate TCP item.
1760  *
1761  * @param[in] item
1762  *   Item specification.
1763  * @param[in] item_flags
1764  *   Bit-fields that holds the items detected until now.
1765  * @param[in] target_protocol
1766  *   The next protocol in the previous item.
1767  * @param[out] error
1768  *   Pointer to error structure.
1769  *
1770  * @return
1771  *   0 on success, a negative errno value otherwise and rte_errno is set.
1772  */
1773 int
1774 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
1775 			    uint64_t item_flags,
1776 			    uint8_t target_protocol,
1777 			    const struct rte_flow_item_tcp *flow_mask,
1778 			    struct rte_flow_error *error)
1779 {
1780 	const struct rte_flow_item_tcp *mask = item->mask;
1781 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1782 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1783 				      MLX5_FLOW_LAYER_OUTER_L3;
1784 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1785 				      MLX5_FLOW_LAYER_OUTER_L4;
1786 	int ret;
1787 
1788 	MLX5_ASSERT(flow_mask);
1789 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
1790 		return rte_flow_error_set(error, EINVAL,
1791 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1792 					  "protocol filtering not compatible"
1793 					  " with TCP layer");
1794 	if (!(item_flags & l3m))
1795 		return rte_flow_error_set(error, EINVAL,
1796 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1797 					  "L3 is mandatory to filter on L4");
1798 	if (item_flags & l4m)
1799 		return rte_flow_error_set(error, EINVAL,
1800 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1801 					  "multiple L4 layers not supported");
1802 	if (!mask)
1803 		mask = &rte_flow_item_tcp_mask;
1804 	ret = mlx5_flow_item_acceptable
1805 		(item, (const uint8_t *)mask,
1806 		 (const uint8_t *)flow_mask,
1807 		 sizeof(struct rte_flow_item_tcp), error);
1808 	if (ret < 0)
1809 		return ret;
1810 	return 0;
1811 }
1812 
1813 /**
1814  * Validate VXLAN item.
1815  *
1816  * @param[in] item
1817  *   Item specification.
1818  * @param[in] item_flags
1819  *   Bit-fields that holds the items detected until now.
1820  * @param[in] target_protocol
1821  *   The next protocol in the previous item.
1822  * @param[out] error
1823  *   Pointer to error structure.
1824  *
1825  * @return
1826  *   0 on success, a negative errno value otherwise and rte_errno is set.
1827  */
1828 int
1829 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
1830 			      uint64_t item_flags,
1831 			      struct rte_flow_error *error)
1832 {
1833 	const struct rte_flow_item_vxlan *spec = item->spec;
1834 	const struct rte_flow_item_vxlan *mask = item->mask;
1835 	int ret;
1836 	union vni {
1837 		uint32_t vlan_id;
1838 		uint8_t vni[4];
1839 	} id = { .vlan_id = 0, };
1840 	uint32_t vlan_id = 0;
1841 
1842 
1843 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1844 		return rte_flow_error_set(error, ENOTSUP,
1845 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1846 					  "multiple tunnel layers not"
1847 					  " supported");
1848 	/*
1849 	 * Verify only UDPv4 is present as defined in
1850 	 * https://tools.ietf.org/html/rfc7348
1851 	 */
1852 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1853 		return rte_flow_error_set(error, EINVAL,
1854 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1855 					  "no outer UDP layer found");
1856 	if (!mask)
1857 		mask = &rte_flow_item_vxlan_mask;
1858 	ret = mlx5_flow_item_acceptable
1859 		(item, (const uint8_t *)mask,
1860 		 (const uint8_t *)&rte_flow_item_vxlan_mask,
1861 		 sizeof(struct rte_flow_item_vxlan),
1862 		 error);
1863 	if (ret < 0)
1864 		return ret;
1865 	if (spec) {
1866 		memcpy(&id.vni[1], spec->vni, 3);
1867 		vlan_id = id.vlan_id;
1868 		memcpy(&id.vni[1], mask->vni, 3);
1869 		vlan_id &= id.vlan_id;
1870 	}
1871 	/*
1872 	 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
1873 	 * only this layer is defined in the Verbs specification it is
1874 	 * interpreted as wildcard and all packets will match this
1875 	 * rule, if it follows a full stack layer (ex: eth / ipv4 /
1876 	 * udp), all packets matching the layers before will also
1877 	 * match this rule.  To avoid such situation, VNI 0 is
1878 	 * currently refused.
1879 	 */
1880 	if (!vlan_id)
1881 		return rte_flow_error_set(error, ENOTSUP,
1882 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1883 					  "VXLAN vni cannot be 0");
1884 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1885 		return rte_flow_error_set(error, ENOTSUP,
1886 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1887 					  "VXLAN tunnel must be fully defined");
1888 	return 0;
1889 }
1890 
1891 /**
1892  * Validate VXLAN_GPE item.
1893  *
1894  * @param[in] item
1895  *   Item specification.
1896  * @param[in] item_flags
1897  *   Bit-fields that holds the items detected until now.
1898  * @param[in] priv
1899  *   Pointer to the private data structure.
1900  * @param[in] target_protocol
1901  *   The next protocol in the previous item.
1902  * @param[out] error
1903  *   Pointer to error structure.
1904  *
1905  * @return
1906  *   0 on success, a negative errno value otherwise and rte_errno is set.
1907  */
1908 int
1909 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
1910 				  uint64_t item_flags,
1911 				  struct rte_eth_dev *dev,
1912 				  struct rte_flow_error *error)
1913 {
1914 	struct mlx5_priv *priv = dev->data->dev_private;
1915 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1916 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1917 	int ret;
1918 	union vni {
1919 		uint32_t vlan_id;
1920 		uint8_t vni[4];
1921 	} id = { .vlan_id = 0, };
1922 	uint32_t vlan_id = 0;
1923 
1924 	if (!priv->config.l3_vxlan_en)
1925 		return rte_flow_error_set(error, ENOTSUP,
1926 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1927 					  "L3 VXLAN is not enabled by device"
1928 					  " parameter and/or not configured in"
1929 					  " firmware");
1930 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1931 		return rte_flow_error_set(error, ENOTSUP,
1932 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1933 					  "multiple tunnel layers not"
1934 					  " supported");
1935 	/*
1936 	 * Verify only UDPv4 is present as defined in
1937 	 * https://tools.ietf.org/html/rfc7348
1938 	 */
1939 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1940 		return rte_flow_error_set(error, EINVAL,
1941 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1942 					  "no outer UDP layer found");
1943 	if (!mask)
1944 		mask = &rte_flow_item_vxlan_gpe_mask;
1945 	ret = mlx5_flow_item_acceptable
1946 		(item, (const uint8_t *)mask,
1947 		 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
1948 		 sizeof(struct rte_flow_item_vxlan_gpe),
1949 		 error);
1950 	if (ret < 0)
1951 		return ret;
1952 	if (spec) {
1953 		if (spec->protocol)
1954 			return rte_flow_error_set(error, ENOTSUP,
1955 						  RTE_FLOW_ERROR_TYPE_ITEM,
1956 						  item,
1957 						  "VxLAN-GPE protocol"
1958 						  " not supported");
1959 		memcpy(&id.vni[1], spec->vni, 3);
1960 		vlan_id = id.vlan_id;
1961 		memcpy(&id.vni[1], mask->vni, 3);
1962 		vlan_id &= id.vlan_id;
1963 	}
1964 	/*
1965 	 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
1966 	 * layer is defined in the Verbs specification it is interpreted as
1967 	 * wildcard and all packets will match this rule, if it follows a full
1968 	 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
1969 	 * before will also match this rule.  To avoid such situation, VNI 0
1970 	 * is currently refused.
1971 	 */
1972 	if (!vlan_id)
1973 		return rte_flow_error_set(error, ENOTSUP,
1974 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1975 					  "VXLAN-GPE vni cannot be 0");
1976 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1977 		return rte_flow_error_set(error, ENOTSUP,
1978 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1979 					  "VXLAN-GPE tunnel must be fully"
1980 					  " defined");
1981 	return 0;
1982 }
1983 /**
1984  * Validate GRE Key item.
1985  *
1986  * @param[in] item
1987  *   Item specification.
1988  * @param[in] item_flags
1989  *   Bit flags to mark detected items.
1990  * @param[in] gre_item
1991  *   Pointer to gre_item
1992  * @param[out] error
1993  *   Pointer to error structure.
1994  *
1995  * @return
1996  *   0 on success, a negative errno value otherwise and rte_errno is set.
1997  */
1998 int
1999 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2000 				uint64_t item_flags,
2001 				const struct rte_flow_item *gre_item,
2002 				struct rte_flow_error *error)
2003 {
2004 	const rte_be32_t *mask = item->mask;
2005 	int ret = 0;
2006 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2007 	const struct rte_flow_item_gre *gre_spec;
2008 	const struct rte_flow_item_gre *gre_mask;
2009 
2010 	if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2011 		return rte_flow_error_set(error, ENOTSUP,
2012 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2013 					  "Multiple GRE key not support");
2014 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2015 		return rte_flow_error_set(error, ENOTSUP,
2016 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2017 					  "No preceding GRE header");
2018 	if (item_flags & MLX5_FLOW_LAYER_INNER)
2019 		return rte_flow_error_set(error, ENOTSUP,
2020 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2021 					  "GRE key following a wrong item");
2022 	gre_mask = gre_item->mask;
2023 	if (!gre_mask)
2024 		gre_mask = &rte_flow_item_gre_mask;
2025 	gre_spec = gre_item->spec;
2026 	if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2027 			 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2028 		return rte_flow_error_set(error, EINVAL,
2029 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2030 					  "Key bit must be on");
2031 
2032 	if (!mask)
2033 		mask = &gre_key_default_mask;
2034 	ret = mlx5_flow_item_acceptable
2035 		(item, (const uint8_t *)mask,
2036 		 (const uint8_t *)&gre_key_default_mask,
2037 		 sizeof(rte_be32_t), error);
2038 	return ret;
2039 }
2040 
2041 /**
2042  * Validate GRE item.
2043  *
2044  * @param[in] item
2045  *   Item specification.
2046  * @param[in] item_flags
2047  *   Bit flags to mark detected items.
2048  * @param[in] target_protocol
2049  *   The next protocol in the previous item.
2050  * @param[out] error
2051  *   Pointer to error structure.
2052  *
2053  * @return
2054  *   0 on success, a negative errno value otherwise and rte_errno is set.
2055  */
2056 int
2057 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2058 			    uint64_t item_flags,
2059 			    uint8_t target_protocol,
2060 			    struct rte_flow_error *error)
2061 {
2062 	const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2063 	const struct rte_flow_item_gre *mask = item->mask;
2064 	int ret;
2065 	const struct rte_flow_item_gre nic_mask = {
2066 		.c_rsvd0_ver = RTE_BE16(0xB000),
2067 		.protocol = RTE_BE16(UINT16_MAX),
2068 	};
2069 
2070 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2071 		return rte_flow_error_set(error, EINVAL,
2072 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2073 					  "protocol filtering not compatible"
2074 					  " with this GRE layer");
2075 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2076 		return rte_flow_error_set(error, ENOTSUP,
2077 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2078 					  "multiple tunnel layers not"
2079 					  " supported");
2080 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2081 		return rte_flow_error_set(error, ENOTSUP,
2082 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2083 					  "L3 Layer is missing");
2084 	if (!mask)
2085 		mask = &rte_flow_item_gre_mask;
2086 	ret = mlx5_flow_item_acceptable
2087 		(item, (const uint8_t *)mask,
2088 		 (const uint8_t *)&nic_mask,
2089 		 sizeof(struct rte_flow_item_gre), error);
2090 	if (ret < 0)
2091 		return ret;
2092 #ifndef HAVE_MLX5DV_DR
2093 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2094 	if (spec && (spec->protocol & mask->protocol))
2095 		return rte_flow_error_set(error, ENOTSUP,
2096 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2097 					  "without MPLS support the"
2098 					  " specification cannot be used for"
2099 					  " filtering");
2100 #endif
2101 #endif
2102 	return 0;
2103 }
2104 
2105 /**
2106  * Validate Geneve item.
2107  *
2108  * @param[in] item
2109  *   Item specification.
2110  * @param[in] itemFlags
2111  *   Bit-fields that holds the items detected until now.
2112  * @param[in] enPriv
2113  *   Pointer to the private data structure.
2114  * @param[out] error
2115  *   Pointer to error structure.
2116  *
2117  * @return
2118  *   0 on success, a negative errno value otherwise and rte_errno is set.
2119  */
2120 
2121 int
2122 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2123 			       uint64_t item_flags,
2124 			       struct rte_eth_dev *dev,
2125 			       struct rte_flow_error *error)
2126 {
2127 	struct mlx5_priv *priv = dev->data->dev_private;
2128 	const struct rte_flow_item_geneve *spec = item->spec;
2129 	const struct rte_flow_item_geneve *mask = item->mask;
2130 	int ret;
2131 	uint16_t gbhdr;
2132 	uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2133 			  MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2134 	const struct rte_flow_item_geneve nic_mask = {
2135 		.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2136 		.vni = "\xff\xff\xff",
2137 		.protocol = RTE_BE16(UINT16_MAX),
2138 	};
2139 
2140 	if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2141 		return rte_flow_error_set(error, ENOTSUP,
2142 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2143 					  "L3 Geneve is not enabled by device"
2144 					  " parameter and/or not configured in"
2145 					  " firmware");
2146 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2147 		return rte_flow_error_set(error, ENOTSUP,
2148 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2149 					  "multiple tunnel layers not"
2150 					  " supported");
2151 	/*
2152 	 * Verify only UDPv4 is present as defined in
2153 	 * https://tools.ietf.org/html/rfc7348
2154 	 */
2155 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2156 		return rte_flow_error_set(error, EINVAL,
2157 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2158 					  "no outer UDP layer found");
2159 	if (!mask)
2160 		mask = &rte_flow_item_geneve_mask;
2161 	ret = mlx5_flow_item_acceptable
2162 				  (item, (const uint8_t *)mask,
2163 				   (const uint8_t *)&nic_mask,
2164 				   sizeof(struct rte_flow_item_geneve), error);
2165 	if (ret)
2166 		return ret;
2167 	if (spec) {
2168 		gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2169 		if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2170 		     MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2171 		     MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2172 			return rte_flow_error_set(error, ENOTSUP,
2173 						  RTE_FLOW_ERROR_TYPE_ITEM,
2174 						  item,
2175 						  "Geneve protocol unsupported"
2176 						  " fields are being used");
2177 		if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2178 			return rte_flow_error_set
2179 					(error, ENOTSUP,
2180 					 RTE_FLOW_ERROR_TYPE_ITEM,
2181 					 item,
2182 					 "Unsupported Geneve options length");
2183 	}
2184 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2185 		return rte_flow_error_set
2186 				    (error, ENOTSUP,
2187 				     RTE_FLOW_ERROR_TYPE_ITEM, item,
2188 				     "Geneve tunnel must be fully defined");
2189 	return 0;
2190 }
2191 
2192 /**
2193  * Validate MPLS item.
2194  *
2195  * @param[in] dev
2196  *   Pointer to the rte_eth_dev structure.
2197  * @param[in] item
2198  *   Item specification.
2199  * @param[in] item_flags
2200  *   Bit-fields that holds the items detected until now.
2201  * @param[in] prev_layer
2202  *   The protocol layer indicated in previous item.
2203  * @param[out] error
2204  *   Pointer to error structure.
2205  *
2206  * @return
2207  *   0 on success, a negative errno value otherwise and rte_errno is set.
2208  */
2209 int
2210 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2211 			     const struct rte_flow_item *item __rte_unused,
2212 			     uint64_t item_flags __rte_unused,
2213 			     uint64_t prev_layer __rte_unused,
2214 			     struct rte_flow_error *error)
2215 {
2216 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2217 	const struct rte_flow_item_mpls *mask = item->mask;
2218 	struct mlx5_priv *priv = dev->data->dev_private;
2219 	int ret;
2220 
2221 	if (!priv->config.mpls_en)
2222 		return rte_flow_error_set(error, ENOTSUP,
2223 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2224 					  "MPLS not supported or"
2225 					  " disabled in firmware"
2226 					  " configuration.");
2227 	/* MPLS over IP, UDP, GRE is allowed */
2228 	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2229 			    MLX5_FLOW_LAYER_OUTER_L4_UDP |
2230 			    MLX5_FLOW_LAYER_GRE)))
2231 		return rte_flow_error_set(error, EINVAL,
2232 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2233 					  "protocol filtering not compatible"
2234 					  " with MPLS layer");
2235 	/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2236 	if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2237 	    !(item_flags & MLX5_FLOW_LAYER_GRE))
2238 		return rte_flow_error_set(error, ENOTSUP,
2239 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2240 					  "multiple tunnel layers not"
2241 					  " supported");
2242 	if (!mask)
2243 		mask = &rte_flow_item_mpls_mask;
2244 	ret = mlx5_flow_item_acceptable
2245 		(item, (const uint8_t *)mask,
2246 		 (const uint8_t *)&rte_flow_item_mpls_mask,
2247 		 sizeof(struct rte_flow_item_mpls), error);
2248 	if (ret < 0)
2249 		return ret;
2250 	return 0;
2251 #endif
2252 	return rte_flow_error_set(error, ENOTSUP,
2253 				  RTE_FLOW_ERROR_TYPE_ITEM, item,
2254 				  "MPLS is not supported by Verbs, please"
2255 				  " update.");
2256 }
2257 
2258 /**
2259  * Validate NVGRE item.
2260  *
2261  * @param[in] item
2262  *   Item specification.
2263  * @param[in] item_flags
2264  *   Bit flags to mark detected items.
2265  * @param[in] target_protocol
2266  *   The next protocol in the previous item.
2267  * @param[out] error
2268  *   Pointer to error structure.
2269  *
2270  * @return
2271  *   0 on success, a negative errno value otherwise and rte_errno is set.
2272  */
2273 int
2274 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2275 			      uint64_t item_flags,
2276 			      uint8_t target_protocol,
2277 			      struct rte_flow_error *error)
2278 {
2279 	const struct rte_flow_item_nvgre *mask = item->mask;
2280 	int ret;
2281 
2282 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2283 		return rte_flow_error_set(error, EINVAL,
2284 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2285 					  "protocol filtering not compatible"
2286 					  " with this GRE layer");
2287 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2288 		return rte_flow_error_set(error, ENOTSUP,
2289 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2290 					  "multiple tunnel layers not"
2291 					  " supported");
2292 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2293 		return rte_flow_error_set(error, ENOTSUP,
2294 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2295 					  "L3 Layer is missing");
2296 	if (!mask)
2297 		mask = &rte_flow_item_nvgre_mask;
2298 	ret = mlx5_flow_item_acceptable
2299 		(item, (const uint8_t *)mask,
2300 		 (const uint8_t *)&rte_flow_item_nvgre_mask,
2301 		 sizeof(struct rte_flow_item_nvgre), error);
2302 	if (ret < 0)
2303 		return ret;
2304 	return 0;
2305 }
2306 
2307 /* Allocate unique ID for the split Q/RSS subflows. */
2308 static uint32_t
2309 flow_qrss_get_id(struct rte_eth_dev *dev)
2310 {
2311 	struct mlx5_priv *priv = dev->data->dev_private;
2312 	uint32_t qrss_id, ret;
2313 
2314 	ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
2315 	if (ret)
2316 		return 0;
2317 	MLX5_ASSERT(qrss_id);
2318 	return qrss_id;
2319 }
2320 
2321 /* Free unique ID for the split Q/RSS subflows. */
2322 static void
2323 flow_qrss_free_id(struct rte_eth_dev *dev,  uint32_t qrss_id)
2324 {
2325 	struct mlx5_priv *priv = dev->data->dev_private;
2326 
2327 	if (qrss_id)
2328 		mlx5_flow_id_release(priv->qrss_id_pool, qrss_id);
2329 }
2330 
2331 /**
2332  * Release resource related QUEUE/RSS action split.
2333  *
2334  * @param dev
2335  *   Pointer to Ethernet device.
2336  * @param flow
2337  *   Flow to release id's from.
2338  */
2339 static void
2340 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2341 			     struct rte_flow *flow)
2342 {
2343 	struct mlx5_flow *dev_flow;
2344 
2345 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
2346 		if (dev_flow->qrss_id)
2347 			flow_qrss_free_id(dev, dev_flow->qrss_id);
2348 }
2349 
2350 static int
2351 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2352 		   const struct rte_flow_attr *attr __rte_unused,
2353 		   const struct rte_flow_item items[] __rte_unused,
2354 		   const struct rte_flow_action actions[] __rte_unused,
2355 		   bool external __rte_unused,
2356 		   struct rte_flow_error *error)
2357 {
2358 	return rte_flow_error_set(error, ENOTSUP,
2359 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2360 }
2361 
2362 static struct mlx5_flow *
2363 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
2364 		  const struct rte_flow_item items[] __rte_unused,
2365 		  const struct rte_flow_action actions[] __rte_unused,
2366 		  struct rte_flow_error *error)
2367 {
2368 	rte_flow_error_set(error, ENOTSUP,
2369 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2370 	return NULL;
2371 }
2372 
2373 static int
2374 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2375 		    struct mlx5_flow *dev_flow __rte_unused,
2376 		    const struct rte_flow_attr *attr __rte_unused,
2377 		    const struct rte_flow_item items[] __rte_unused,
2378 		    const struct rte_flow_action actions[] __rte_unused,
2379 		    struct rte_flow_error *error)
2380 {
2381 	return rte_flow_error_set(error, ENOTSUP,
2382 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2383 }
2384 
2385 static int
2386 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
2387 		struct rte_flow *flow __rte_unused,
2388 		struct rte_flow_error *error)
2389 {
2390 	return rte_flow_error_set(error, ENOTSUP,
2391 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2392 }
2393 
2394 static void
2395 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
2396 		 struct rte_flow *flow __rte_unused)
2397 {
2398 }
2399 
2400 static void
2401 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
2402 		  struct rte_flow *flow __rte_unused)
2403 {
2404 }
2405 
2406 static int
2407 flow_null_query(struct rte_eth_dev *dev __rte_unused,
2408 		struct rte_flow *flow __rte_unused,
2409 		const struct rte_flow_action *actions __rte_unused,
2410 		void *data __rte_unused,
2411 		struct rte_flow_error *error)
2412 {
2413 	return rte_flow_error_set(error, ENOTSUP,
2414 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2415 }
2416 
2417 /* Void driver to protect from null pointer reference. */
2418 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
2419 	.validate = flow_null_validate,
2420 	.prepare = flow_null_prepare,
2421 	.translate = flow_null_translate,
2422 	.apply = flow_null_apply,
2423 	.remove = flow_null_remove,
2424 	.destroy = flow_null_destroy,
2425 	.query = flow_null_query,
2426 };
2427 
2428 /**
2429  * Select flow driver type according to flow attributes and device
2430  * configuration.
2431  *
2432  * @param[in] dev
2433  *   Pointer to the dev structure.
2434  * @param[in] attr
2435  *   Pointer to the flow attributes.
2436  *
2437  * @return
2438  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
2439  */
2440 static enum mlx5_flow_drv_type
2441 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
2442 {
2443 	struct mlx5_priv *priv = dev->data->dev_private;
2444 	enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
2445 
2446 	if (attr->transfer && priv->config.dv_esw_en)
2447 		type = MLX5_FLOW_TYPE_DV;
2448 	if (!attr->transfer)
2449 		type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
2450 						 MLX5_FLOW_TYPE_VERBS;
2451 	return type;
2452 }
2453 
2454 #define flow_get_drv_ops(type) flow_drv_ops[type]
2455 
2456 /**
2457  * Flow driver validation API. This abstracts calling driver specific functions.
2458  * The type of flow driver is determined according to flow attributes.
2459  *
2460  * @param[in] dev
2461  *   Pointer to the dev structure.
2462  * @param[in] attr
2463  *   Pointer to the flow attributes.
2464  * @param[in] items
2465  *   Pointer to the list of items.
2466  * @param[in] actions
2467  *   Pointer to the list of actions.
2468  * @param[in] external
2469  *   This flow rule is created by request external to PMD.
2470  * @param[out] error
2471  *   Pointer to the error structure.
2472  *
2473  * @return
2474  *   0 on success, a negative errno value otherwise and rte_errno is set.
2475  */
2476 static inline int
2477 flow_drv_validate(struct rte_eth_dev *dev,
2478 		  const struct rte_flow_attr *attr,
2479 		  const struct rte_flow_item items[],
2480 		  const struct rte_flow_action actions[],
2481 		  bool external, struct rte_flow_error *error)
2482 {
2483 	const struct mlx5_flow_driver_ops *fops;
2484 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
2485 
2486 	fops = flow_get_drv_ops(type);
2487 	return fops->validate(dev, attr, items, actions, external, error);
2488 }
2489 
2490 /**
2491  * Flow driver preparation API. This abstracts calling driver specific
2492  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2493  * calculates the size of memory required for device flow, allocates the memory,
2494  * initializes the device flow and returns the pointer.
2495  *
2496  * @note
2497  *   This function initializes device flow structure such as dv or verbs in
2498  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
2499  *   rest. For example, adding returning device flow to flow->dev_flow list and
2500  *   setting backward reference to the flow should be done out of this function.
2501  *   layers field is not filled either.
2502  *
2503  * @param[in] attr
2504  *   Pointer to the flow attributes.
2505  * @param[in] items
2506  *   Pointer to the list of items.
2507  * @param[in] actions
2508  *   Pointer to the list of actions.
2509  * @param[out] error
2510  *   Pointer to the error structure.
2511  *
2512  * @return
2513  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
2514  */
2515 static inline struct mlx5_flow *
2516 flow_drv_prepare(const struct rte_flow *flow,
2517 		 const struct rte_flow_attr *attr,
2518 		 const struct rte_flow_item items[],
2519 		 const struct rte_flow_action actions[],
2520 		 struct rte_flow_error *error)
2521 {
2522 	const struct mlx5_flow_driver_ops *fops;
2523 	enum mlx5_flow_drv_type type = flow->drv_type;
2524 
2525 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2526 	fops = flow_get_drv_ops(type);
2527 	return fops->prepare(attr, items, actions, error);
2528 }
2529 
2530 /**
2531  * Flow driver translation API. This abstracts calling driver specific
2532  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2533  * translates a generic flow into a driver flow. flow_drv_prepare() must
2534  * precede.
2535  *
2536  * @note
2537  *   dev_flow->layers could be filled as a result of parsing during translation
2538  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
2539  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
2540  *   flow->actions could be overwritten even though all the expanded dev_flows
2541  *   have the same actions.
2542  *
2543  * @param[in] dev
2544  *   Pointer to the rte dev structure.
2545  * @param[in, out] dev_flow
2546  *   Pointer to the mlx5 flow.
2547  * @param[in] attr
2548  *   Pointer to the flow attributes.
2549  * @param[in] items
2550  *   Pointer to the list of items.
2551  * @param[in] actions
2552  *   Pointer to the list of actions.
2553  * @param[out] error
2554  *   Pointer to the error structure.
2555  *
2556  * @return
2557  *   0 on success, a negative errno value otherwise and rte_errno is set.
2558  */
2559 static inline int
2560 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
2561 		   const struct rte_flow_attr *attr,
2562 		   const struct rte_flow_item items[],
2563 		   const struct rte_flow_action actions[],
2564 		   struct rte_flow_error *error)
2565 {
2566 	const struct mlx5_flow_driver_ops *fops;
2567 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
2568 
2569 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2570 	fops = flow_get_drv_ops(type);
2571 	return fops->translate(dev, dev_flow, attr, items, actions, error);
2572 }
2573 
2574 /**
2575  * Flow driver apply API. This abstracts calling driver specific functions.
2576  * Parent flow (rte_flow) should have driver type (drv_type). It applies
2577  * translated driver flows on to device. flow_drv_translate() must precede.
2578  *
2579  * @param[in] dev
2580  *   Pointer to Ethernet device structure.
2581  * @param[in, out] flow
2582  *   Pointer to flow structure.
2583  * @param[out] error
2584  *   Pointer to error structure.
2585  *
2586  * @return
2587  *   0 on success, a negative errno value otherwise and rte_errno is set.
2588  */
2589 static inline int
2590 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2591 	       struct rte_flow_error *error)
2592 {
2593 	const struct mlx5_flow_driver_ops *fops;
2594 	enum mlx5_flow_drv_type type = flow->drv_type;
2595 
2596 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2597 	fops = flow_get_drv_ops(type);
2598 	return fops->apply(dev, flow, error);
2599 }
2600 
2601 /**
2602  * Flow driver remove API. This abstracts calling driver specific functions.
2603  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2604  * on device. All the resources of the flow should be freed by calling
2605  * flow_drv_destroy().
2606  *
2607  * @param[in] dev
2608  *   Pointer to Ethernet device.
2609  * @param[in, out] flow
2610  *   Pointer to flow structure.
2611  */
2612 static inline void
2613 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2614 {
2615 	const struct mlx5_flow_driver_ops *fops;
2616 	enum mlx5_flow_drv_type type = flow->drv_type;
2617 
2618 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2619 	fops = flow_get_drv_ops(type);
2620 	fops->remove(dev, flow);
2621 }
2622 
2623 /**
2624  * Flow driver destroy API. This abstracts calling driver specific functions.
2625  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2626  * on device and releases resources of the flow.
2627  *
2628  * @param[in] dev
2629  *   Pointer to Ethernet device.
2630  * @param[in, out] flow
2631  *   Pointer to flow structure.
2632  */
2633 static inline void
2634 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2635 {
2636 	const struct mlx5_flow_driver_ops *fops;
2637 	enum mlx5_flow_drv_type type = flow->drv_type;
2638 
2639 	flow_mreg_split_qrss_release(dev, flow);
2640 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2641 	fops = flow_get_drv_ops(type);
2642 	fops->destroy(dev, flow);
2643 }
2644 
2645 /**
2646  * Validate a flow supported by the NIC.
2647  *
2648  * @see rte_flow_validate()
2649  * @see rte_flow_ops
2650  */
2651 int
2652 mlx5_flow_validate(struct rte_eth_dev *dev,
2653 		   const struct rte_flow_attr *attr,
2654 		   const struct rte_flow_item items[],
2655 		   const struct rte_flow_action actions[],
2656 		   struct rte_flow_error *error)
2657 {
2658 	int ret;
2659 
2660 	ret = flow_drv_validate(dev, attr, items, actions, true, error);
2661 	if (ret < 0)
2662 		return ret;
2663 	return 0;
2664 }
2665 
2666 /**
2667  * Get RSS action from the action list.
2668  *
2669  * @param[in] actions
2670  *   Pointer to the list of actions.
2671  *
2672  * @return
2673  *   Pointer to the RSS action if exist, else return NULL.
2674  */
2675 static const struct rte_flow_action_rss*
2676 flow_get_rss_action(const struct rte_flow_action actions[])
2677 {
2678 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2679 		switch (actions->type) {
2680 		case RTE_FLOW_ACTION_TYPE_RSS:
2681 			return (const struct rte_flow_action_rss *)
2682 			       actions->conf;
2683 		default:
2684 			break;
2685 		}
2686 	}
2687 	return NULL;
2688 }
2689 
2690 static unsigned int
2691 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
2692 {
2693 	const struct rte_flow_item *item;
2694 	unsigned int has_vlan = 0;
2695 
2696 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2697 		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2698 			has_vlan = 1;
2699 			break;
2700 		}
2701 	}
2702 	if (has_vlan)
2703 		return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
2704 				       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
2705 	return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2706 			       MLX5_EXPANSION_ROOT_OUTER;
2707 }
2708 
2709 /**
2710  *  Get layer flags from the prefix flow.
2711  *
2712  *  Some flows may be split to several subflows, the prefix subflow gets the
2713  *  match items and the suffix sub flow gets the actions.
2714  *  Some actions need the user defined match item flags to get the detail for
2715  *  the action.
2716  *  This function helps the suffix flow to get the item layer flags from prefix
2717  *  subflow.
2718  *
2719  * @param[in] dev_flow
2720  *   Pointer the created preifx subflow.
2721  *
2722  * @return
2723  *   The layers get from prefix subflow.
2724  */
2725 static inline uint64_t
2726 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
2727 {
2728 	uint64_t layers = 0;
2729 
2730 	/* If no decap actions, use the layers directly. */
2731 	if (!(dev_flow->actions & MLX5_FLOW_ACTION_DECAP))
2732 		return dev_flow->layers;
2733 	/* Convert L3 layers with decap action. */
2734 	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2735 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2736 	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2737 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2738 	/* Convert L4 layers with decap action.  */
2739 	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
2740 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
2741 	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
2742 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
2743 	return layers;
2744 }
2745 
2746 /**
2747  * Get QUEUE/RSS action from the action list.
2748  *
2749  * @param[in] actions
2750  *   Pointer to the list of actions.
2751  * @param[out] qrss
2752  *   Pointer to the return pointer.
2753  * @param[out] qrss_type
2754  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
2755  *   if no QUEUE/RSS is found.
2756  *
2757  * @return
2758  *   Total number of actions.
2759  */
2760 static int
2761 flow_parse_qrss_action(const struct rte_flow_action actions[],
2762 		       const struct rte_flow_action **qrss)
2763 {
2764 	int actions_n = 0;
2765 
2766 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2767 		switch (actions->type) {
2768 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2769 		case RTE_FLOW_ACTION_TYPE_RSS:
2770 			*qrss = actions;
2771 			break;
2772 		default:
2773 			break;
2774 		}
2775 		actions_n++;
2776 	}
2777 	/* Count RTE_FLOW_ACTION_TYPE_END. */
2778 	return actions_n + 1;
2779 }
2780 
2781 /**
2782  * Check meter action from the action list.
2783  *
2784  * @param[in] actions
2785  *   Pointer to the list of actions.
2786  * @param[out] mtr
2787  *   Pointer to the meter exist flag.
2788  *
2789  * @return
2790  *   Total number of actions.
2791  */
2792 static int
2793 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
2794 {
2795 	int actions_n = 0;
2796 
2797 	MLX5_ASSERT(mtr);
2798 	*mtr = 0;
2799 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2800 		switch (actions->type) {
2801 		case RTE_FLOW_ACTION_TYPE_METER:
2802 			*mtr = 1;
2803 			break;
2804 		default:
2805 			break;
2806 		}
2807 		actions_n++;
2808 	}
2809 	/* Count RTE_FLOW_ACTION_TYPE_END. */
2810 	return actions_n + 1;
2811 }
2812 
2813 /**
2814  * Check if the flow should be splited due to hairpin.
2815  * The reason for the split is that in current HW we can't
2816  * support encap on Rx, so if a flow have encap we move it
2817  * to Tx.
2818  *
2819  * @param dev
2820  *   Pointer to Ethernet device.
2821  * @param[in] attr
2822  *   Flow rule attributes.
2823  * @param[in] actions
2824  *   Associated actions (list terminated by the END action).
2825  *
2826  * @return
2827  *   > 0 the number of actions and the flow should be split,
2828  *   0 when no split required.
2829  */
2830 static int
2831 flow_check_hairpin_split(struct rte_eth_dev *dev,
2832 			 const struct rte_flow_attr *attr,
2833 			 const struct rte_flow_action actions[])
2834 {
2835 	int queue_action = 0;
2836 	int action_n = 0;
2837 	int encap = 0;
2838 	const struct rte_flow_action_queue *queue;
2839 	const struct rte_flow_action_rss *rss;
2840 	const struct rte_flow_action_raw_encap *raw_encap;
2841 
2842 	if (!attr->ingress)
2843 		return 0;
2844 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2845 		switch (actions->type) {
2846 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2847 			queue = actions->conf;
2848 			if (queue == NULL)
2849 				return 0;
2850 			if (mlx5_rxq_get_type(dev, queue->index) !=
2851 			    MLX5_RXQ_TYPE_HAIRPIN)
2852 				return 0;
2853 			queue_action = 1;
2854 			action_n++;
2855 			break;
2856 		case RTE_FLOW_ACTION_TYPE_RSS:
2857 			rss = actions->conf;
2858 			if (rss == NULL || rss->queue_num == 0)
2859 				return 0;
2860 			if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
2861 			    MLX5_RXQ_TYPE_HAIRPIN)
2862 				return 0;
2863 			queue_action = 1;
2864 			action_n++;
2865 			break;
2866 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2867 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2868 			encap = 1;
2869 			action_n++;
2870 			break;
2871 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2872 			raw_encap = actions->conf;
2873 			if (raw_encap->size >
2874 			    (sizeof(struct rte_flow_item_eth) +
2875 			     sizeof(struct rte_flow_item_ipv4)))
2876 				encap = 1;
2877 			action_n++;
2878 			break;
2879 		default:
2880 			action_n++;
2881 			break;
2882 		}
2883 	}
2884 	if (encap == 1 && queue_action)
2885 		return action_n;
2886 	return 0;
2887 }
2888 
2889 /* Declare flow create/destroy prototype in advance. */
2890 static struct rte_flow *
2891 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
2892 		 const struct rte_flow_attr *attr,
2893 		 const struct rte_flow_item items[],
2894 		 const struct rte_flow_action actions[],
2895 		 bool external, struct rte_flow_error *error);
2896 
2897 static void
2898 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
2899 		  struct rte_flow *flow);
2900 
2901 /**
2902  * Add a flow of copying flow metadata registers in RX_CP_TBL.
2903  *
2904  * As mark_id is unique, if there's already a registered flow for the mark_id,
2905  * return by increasing the reference counter of the resource. Otherwise, create
2906  * the resource (mcp_res) and flow.
2907  *
2908  * Flow looks like,
2909  *   - If ingress port is ANY and reg_c[1] is mark_id,
2910  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
2911  *
2912  * For default flow (zero mark_id), flow is like,
2913  *   - If ingress port is ANY,
2914  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
2915  *
2916  * @param dev
2917  *   Pointer to Ethernet device.
2918  * @param mark_id
2919  *   ID of MARK action, zero means default flow for META.
2920  * @param[out] error
2921  *   Perform verbose error reporting if not NULL.
2922  *
2923  * @return
2924  *   Associated resource on success, NULL otherwise and rte_errno is set.
2925  */
2926 static struct mlx5_flow_mreg_copy_resource *
2927 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
2928 			  struct rte_flow_error *error)
2929 {
2930 	struct mlx5_priv *priv = dev->data->dev_private;
2931 	struct rte_flow_attr attr = {
2932 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
2933 		.ingress = 1,
2934 	};
2935 	struct mlx5_rte_flow_item_tag tag_spec = {
2936 		.data = mark_id,
2937 	};
2938 	struct rte_flow_item items[] = {
2939 		[1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
2940 	};
2941 	struct rte_flow_action_mark ftag = {
2942 		.id = mark_id,
2943 	};
2944 	struct mlx5_flow_action_copy_mreg cp_mreg = {
2945 		.dst = REG_B,
2946 		.src = 0,
2947 	};
2948 	struct rte_flow_action_jump jump = {
2949 		.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
2950 	};
2951 	struct rte_flow_action actions[] = {
2952 		[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
2953 	};
2954 	struct mlx5_flow_mreg_copy_resource *mcp_res;
2955 	int ret;
2956 
2957 	/* Fill the register fileds in the flow. */
2958 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2959 	if (ret < 0)
2960 		return NULL;
2961 	tag_spec.id = ret;
2962 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
2963 	if (ret < 0)
2964 		return NULL;
2965 	cp_mreg.src = ret;
2966 	/* Check if already registered. */
2967 	MLX5_ASSERT(priv->mreg_cp_tbl);
2968 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
2969 	if (mcp_res) {
2970 		/* For non-default rule. */
2971 		if (mark_id != MLX5_DEFAULT_COPY_ID)
2972 			mcp_res->refcnt++;
2973 		MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
2974 			    mcp_res->refcnt == 1);
2975 		return mcp_res;
2976 	}
2977 	/* Provide the full width of FLAG specific value. */
2978 	if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
2979 		tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
2980 	/* Build a new flow. */
2981 	if (mark_id != MLX5_DEFAULT_COPY_ID) {
2982 		items[0] = (struct rte_flow_item){
2983 			.type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
2984 			.spec = &tag_spec,
2985 		};
2986 		items[1] = (struct rte_flow_item){
2987 			.type = RTE_FLOW_ITEM_TYPE_END,
2988 		};
2989 		actions[0] = (struct rte_flow_action){
2990 			.type = MLX5_RTE_FLOW_ACTION_TYPE_MARK,
2991 			.conf = &ftag,
2992 		};
2993 		actions[1] = (struct rte_flow_action){
2994 			.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
2995 			.conf = &cp_mreg,
2996 		};
2997 		actions[2] = (struct rte_flow_action){
2998 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
2999 			.conf = &jump,
3000 		};
3001 		actions[3] = (struct rte_flow_action){
3002 			.type = RTE_FLOW_ACTION_TYPE_END,
3003 		};
3004 	} else {
3005 		/* Default rule, wildcard match. */
3006 		attr.priority = MLX5_FLOW_PRIO_RSVD;
3007 		items[0] = (struct rte_flow_item){
3008 			.type = RTE_FLOW_ITEM_TYPE_END,
3009 		};
3010 		actions[0] = (struct rte_flow_action){
3011 			.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3012 			.conf = &cp_mreg,
3013 		};
3014 		actions[1] = (struct rte_flow_action){
3015 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
3016 			.conf = &jump,
3017 		};
3018 		actions[2] = (struct rte_flow_action){
3019 			.type = RTE_FLOW_ACTION_TYPE_END,
3020 		};
3021 	}
3022 	/* Build a new entry. */
3023 	mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0);
3024 	if (!mcp_res) {
3025 		rte_errno = ENOMEM;
3026 		return NULL;
3027 	}
3028 	/*
3029 	 * The copy Flows are not included in any list. There
3030 	 * ones are referenced from other Flows and can not
3031 	 * be applied, removed, deleted in ardbitrary order
3032 	 * by list traversing.
3033 	 */
3034 	mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
3035 					 actions, false, error);
3036 	if (!mcp_res->flow)
3037 		goto error;
3038 	mcp_res->refcnt++;
3039 	mcp_res->hlist_ent.key = mark_id;
3040 	ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
3041 				&mcp_res->hlist_ent);
3042 	MLX5_ASSERT(!ret);
3043 	if (ret)
3044 		goto error;
3045 	return mcp_res;
3046 error:
3047 	if (mcp_res->flow)
3048 		flow_list_destroy(dev, NULL, mcp_res->flow);
3049 	rte_free(mcp_res);
3050 	return NULL;
3051 }
3052 
3053 /**
3054  * Release flow in RX_CP_TBL.
3055  *
3056  * @param dev
3057  *   Pointer to Ethernet device.
3058  * @flow
3059  *   Parent flow for wich copying is provided.
3060  */
3061 static void
3062 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3063 			  struct rte_flow *flow)
3064 {
3065 	struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
3066 	struct mlx5_priv *priv = dev->data->dev_private;
3067 
3068 	if (!mcp_res || !priv->mreg_cp_tbl)
3069 		return;
3070 	if (flow->copy_applied) {
3071 		MLX5_ASSERT(mcp_res->appcnt);
3072 		flow->copy_applied = 0;
3073 		--mcp_res->appcnt;
3074 		if (!mcp_res->appcnt)
3075 			flow_drv_remove(dev, mcp_res->flow);
3076 	}
3077 	/*
3078 	 * We do not check availability of metadata registers here,
3079 	 * because copy resources are not allocated in this case.
3080 	 */
3081 	if (--mcp_res->refcnt)
3082 		return;
3083 	MLX5_ASSERT(mcp_res->flow);
3084 	flow_list_destroy(dev, NULL, mcp_res->flow);
3085 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3086 	rte_free(mcp_res);
3087 	flow->mreg_copy = NULL;
3088 }
3089 
3090 /**
3091  * Start flow in RX_CP_TBL.
3092  *
3093  * @param dev
3094  *   Pointer to Ethernet device.
3095  * @flow
3096  *   Parent flow for wich copying is provided.
3097  *
3098  * @return
3099  *   0 on success, a negative errno value otherwise and rte_errno is set.
3100  */
3101 static int
3102 flow_mreg_start_copy_action(struct rte_eth_dev *dev,
3103 			    struct rte_flow *flow)
3104 {
3105 	struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
3106 	int ret;
3107 
3108 	if (!mcp_res || flow->copy_applied)
3109 		return 0;
3110 	if (!mcp_res->appcnt) {
3111 		ret = flow_drv_apply(dev, mcp_res->flow, NULL);
3112 		if (ret)
3113 			return ret;
3114 	}
3115 	++mcp_res->appcnt;
3116 	flow->copy_applied = 1;
3117 	return 0;
3118 }
3119 
3120 /**
3121  * Stop flow in RX_CP_TBL.
3122  *
3123  * @param dev
3124  *   Pointer to Ethernet device.
3125  * @flow
3126  *   Parent flow for wich copying is provided.
3127  */
3128 static void
3129 flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
3130 			   struct rte_flow *flow)
3131 {
3132 	struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
3133 
3134 	if (!mcp_res || !flow->copy_applied)
3135 		return;
3136 	MLX5_ASSERT(mcp_res->appcnt);
3137 	--mcp_res->appcnt;
3138 	flow->copy_applied = 0;
3139 	if (!mcp_res->appcnt)
3140 		flow_drv_remove(dev, mcp_res->flow);
3141 }
3142 
3143 /**
3144  * Remove the default copy action from RX_CP_TBL.
3145  *
3146  * @param dev
3147  *   Pointer to Ethernet device.
3148  */
3149 static void
3150 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3151 {
3152 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3153 	struct mlx5_priv *priv = dev->data->dev_private;
3154 
3155 	/* Check if default flow is registered. */
3156 	if (!priv->mreg_cp_tbl)
3157 		return;
3158 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
3159 					    MLX5_DEFAULT_COPY_ID);
3160 	if (!mcp_res)
3161 		return;
3162 	MLX5_ASSERT(mcp_res->flow);
3163 	flow_list_destroy(dev, NULL, mcp_res->flow);
3164 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3165 	rte_free(mcp_res);
3166 }
3167 
3168 /**
3169  * Add the default copy action in in RX_CP_TBL.
3170  *
3171  * @param dev
3172  *   Pointer to Ethernet device.
3173  * @param[out] error
3174  *   Perform verbose error reporting if not NULL.
3175  *
3176  * @return
3177  *   0 for success, negative value otherwise and rte_errno is set.
3178  */
3179 static int
3180 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
3181 				  struct rte_flow_error *error)
3182 {
3183 	struct mlx5_priv *priv = dev->data->dev_private;
3184 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3185 
3186 	/* Check whether extensive metadata feature is engaged. */
3187 	if (!priv->config.dv_flow_en ||
3188 	    priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3189 	    !mlx5_flow_ext_mreg_supported(dev) ||
3190 	    !priv->sh->dv_regc0_mask)
3191 		return 0;
3192 	mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
3193 	if (!mcp_res)
3194 		return -rte_errno;
3195 	return 0;
3196 }
3197 
3198 /**
3199  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3200  *
3201  * All the flow having Q/RSS action should be split by
3202  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
3203  * performs the following,
3204  *   - CQE->flow_tag := reg_c[1] (MARK)
3205  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3206  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
3207  * but there should be a flow per each MARK ID set by MARK action.
3208  *
3209  * For the aforementioned reason, if there's a MARK action in flow's action
3210  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
3211  * the MARK ID to CQE's flow_tag like,
3212  *   - If reg_c[1] is mark_id,
3213  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3214  *
3215  * For SET_META action which stores value in reg_c[0], as the destination is
3216  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
3217  * MARK ID means the default flow. The default flow looks like,
3218  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3219  *
3220  * @param dev
3221  *   Pointer to Ethernet device.
3222  * @param flow
3223  *   Pointer to flow structure.
3224  * @param[in] actions
3225  *   Pointer to the list of actions.
3226  * @param[out] error
3227  *   Perform verbose error reporting if not NULL.
3228  *
3229  * @return
3230  *   0 on success, negative value otherwise and rte_errno is set.
3231  */
3232 static int
3233 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
3234 			    struct rte_flow *flow,
3235 			    const struct rte_flow_action *actions,
3236 			    struct rte_flow_error *error)
3237 {
3238 	struct mlx5_priv *priv = dev->data->dev_private;
3239 	struct mlx5_dev_config *config = &priv->config;
3240 	struct mlx5_flow_mreg_copy_resource *mcp_res;
3241 	const struct rte_flow_action_mark *mark;
3242 
3243 	/* Check whether extensive metadata feature is engaged. */
3244 	if (!config->dv_flow_en ||
3245 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3246 	    !mlx5_flow_ext_mreg_supported(dev) ||
3247 	    !priv->sh->dv_regc0_mask)
3248 		return 0;
3249 	/* Find MARK action. */
3250 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3251 		switch (actions->type) {
3252 		case RTE_FLOW_ACTION_TYPE_FLAG:
3253 			mcp_res = flow_mreg_add_copy_action
3254 				(dev, MLX5_FLOW_MARK_DEFAULT, error);
3255 			if (!mcp_res)
3256 				return -rte_errno;
3257 			flow->mreg_copy = mcp_res;
3258 			if (dev->data->dev_started) {
3259 				mcp_res->appcnt++;
3260 				flow->copy_applied = 1;
3261 			}
3262 			return 0;
3263 		case RTE_FLOW_ACTION_TYPE_MARK:
3264 			mark = (const struct rte_flow_action_mark *)
3265 				actions->conf;
3266 			mcp_res =
3267 				flow_mreg_add_copy_action(dev, mark->id, error);
3268 			if (!mcp_res)
3269 				return -rte_errno;
3270 			flow->mreg_copy = mcp_res;
3271 			if (dev->data->dev_started) {
3272 				mcp_res->appcnt++;
3273 				flow->copy_applied = 1;
3274 			}
3275 			return 0;
3276 		default:
3277 			break;
3278 		}
3279 	}
3280 	return 0;
3281 }
3282 
3283 #define MLX5_MAX_SPLIT_ACTIONS 24
3284 #define MLX5_MAX_SPLIT_ITEMS 24
3285 
3286 /**
3287  * Split the hairpin flow.
3288  * Since HW can't support encap on Rx we move the encap to Tx.
3289  * If the count action is after the encap then we also
3290  * move the count action. in this case the count will also measure
3291  * the outer bytes.
3292  *
3293  * @param dev
3294  *   Pointer to Ethernet device.
3295  * @param[in] actions
3296  *   Associated actions (list terminated by the END action).
3297  * @param[out] actions_rx
3298  *   Rx flow actions.
3299  * @param[out] actions_tx
3300  *   Tx flow actions..
3301  * @param[out] pattern_tx
3302  *   The pattern items for the Tx flow.
3303  * @param[out] flow_id
3304  *   The flow ID connected to this flow.
3305  *
3306  * @return
3307  *   0 on success.
3308  */
3309 static int
3310 flow_hairpin_split(struct rte_eth_dev *dev,
3311 		   const struct rte_flow_action actions[],
3312 		   struct rte_flow_action actions_rx[],
3313 		   struct rte_flow_action actions_tx[],
3314 		   struct rte_flow_item pattern_tx[],
3315 		   uint32_t *flow_id)
3316 {
3317 	struct mlx5_priv *priv = dev->data->dev_private;
3318 	const struct rte_flow_action_raw_encap *raw_encap;
3319 	const struct rte_flow_action_raw_decap *raw_decap;
3320 	struct mlx5_rte_flow_action_set_tag *set_tag;
3321 	struct rte_flow_action *tag_action;
3322 	struct mlx5_rte_flow_item_tag *tag_item;
3323 	struct rte_flow_item *item;
3324 	char *addr;
3325 	int encap = 0;
3326 
3327 	mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
3328 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3329 		switch (actions->type) {
3330 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3331 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3332 			rte_memcpy(actions_tx, actions,
3333 			       sizeof(struct rte_flow_action));
3334 			actions_tx++;
3335 			break;
3336 		case RTE_FLOW_ACTION_TYPE_COUNT:
3337 			if (encap) {
3338 				rte_memcpy(actions_tx, actions,
3339 					   sizeof(struct rte_flow_action));
3340 				actions_tx++;
3341 			} else {
3342 				rte_memcpy(actions_rx, actions,
3343 					   sizeof(struct rte_flow_action));
3344 				actions_rx++;
3345 			}
3346 			break;
3347 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3348 			raw_encap = actions->conf;
3349 			if (raw_encap->size >
3350 			    (sizeof(struct rte_flow_item_eth) +
3351 			     sizeof(struct rte_flow_item_ipv4))) {
3352 				memcpy(actions_tx, actions,
3353 				       sizeof(struct rte_flow_action));
3354 				actions_tx++;
3355 				encap = 1;
3356 			} else {
3357 				rte_memcpy(actions_rx, actions,
3358 					   sizeof(struct rte_flow_action));
3359 				actions_rx++;
3360 			}
3361 			break;
3362 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3363 			raw_decap = actions->conf;
3364 			if (raw_decap->size <
3365 			    (sizeof(struct rte_flow_item_eth) +
3366 			     sizeof(struct rte_flow_item_ipv4))) {
3367 				memcpy(actions_tx, actions,
3368 				       sizeof(struct rte_flow_action));
3369 				actions_tx++;
3370 			} else {
3371 				rte_memcpy(actions_rx, actions,
3372 					   sizeof(struct rte_flow_action));
3373 				actions_rx++;
3374 			}
3375 			break;
3376 		default:
3377 			rte_memcpy(actions_rx, actions,
3378 				   sizeof(struct rte_flow_action));
3379 			actions_rx++;
3380 			break;
3381 		}
3382 	}
3383 	/* Add set meta action and end action for the Rx flow. */
3384 	tag_action = actions_rx;
3385 	tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3386 	actions_rx++;
3387 	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
3388 	actions_rx++;
3389 	set_tag = (void *)actions_rx;
3390 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
3391 	MLX5_ASSERT(set_tag->id > REG_NONE);
3392 	set_tag->data = *flow_id;
3393 	tag_action->conf = set_tag;
3394 	/* Create Tx item list. */
3395 	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
3396 	addr = (void *)&pattern_tx[2];
3397 	item = pattern_tx;
3398 	item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
3399 	tag_item = (void *)addr;
3400 	tag_item->data = *flow_id;
3401 	tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
3402 	MLX5_ASSERT(set_tag->id > REG_NONE);
3403 	item->spec = tag_item;
3404 	addr += sizeof(struct mlx5_rte_flow_item_tag);
3405 	tag_item = (void *)addr;
3406 	tag_item->data = UINT32_MAX;
3407 	tag_item->id = UINT16_MAX;
3408 	item->mask = tag_item;
3409 	addr += sizeof(struct mlx5_rte_flow_item_tag);
3410 	item->last = NULL;
3411 	item++;
3412 	item->type = RTE_FLOW_ITEM_TYPE_END;
3413 	return 0;
3414 }
3415 
3416 /**
3417  * The last stage of splitting chain, just creates the subflow
3418  * without any modification.
3419  *
3420  * @param dev
3421  *   Pointer to Ethernet device.
3422  * @param[in] flow
3423  *   Parent flow structure pointer.
3424  * @param[in, out] sub_flow
3425  *   Pointer to return the created subflow, may be NULL.
3426  * @param[in] prefix_layers
3427  *   Prefix subflow layers, may be 0.
3428  * @param[in] attr
3429  *   Flow rule attributes.
3430  * @param[in] items
3431  *   Pattern specification (list terminated by the END pattern item).
3432  * @param[in] actions
3433  *   Associated actions (list terminated by the END action).
3434  * @param[in] external
3435  *   This flow rule is created by request external to PMD.
3436  * @param[out] error
3437  *   Perform verbose error reporting if not NULL.
3438  * @return
3439  *   0 on success, negative value otherwise
3440  */
3441 static int
3442 flow_create_split_inner(struct rte_eth_dev *dev,
3443 			struct rte_flow *flow,
3444 			struct mlx5_flow **sub_flow,
3445 			uint64_t prefix_layers,
3446 			const struct rte_flow_attr *attr,
3447 			const struct rte_flow_item items[],
3448 			const struct rte_flow_action actions[],
3449 			bool external, struct rte_flow_error *error)
3450 {
3451 	struct mlx5_flow *dev_flow;
3452 
3453 	dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
3454 	if (!dev_flow)
3455 		return -rte_errno;
3456 	dev_flow->flow = flow;
3457 	dev_flow->external = external;
3458 	/* Subflow object was created, we must include one in the list. */
3459 	LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
3460 	/*
3461 	 * If dev_flow is as one of the suffix flow, some actions in suffix
3462 	 * flow may need some user defined item layer flags.
3463 	 */
3464 	if (prefix_layers)
3465 		dev_flow->layers = prefix_layers;
3466 	if (sub_flow)
3467 		*sub_flow = dev_flow;
3468 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
3469 }
3470 
3471 /**
3472  * Split the meter flow.
3473  *
3474  * As meter flow will split to three sub flow, other than meter
3475  * action, the other actions make sense to only meter accepts
3476  * the packet. If it need to be dropped, no other additional
3477  * actions should be take.
3478  *
3479  * One kind of special action which decapsulates the L3 tunnel
3480  * header will be in the prefix sub flow, as not to take the
3481  * L3 tunnel header into account.
3482  *
3483  * @param dev
3484  *   Pointer to Ethernet device.
3485  * @param[in] items
3486  *   Pattern specification (list terminated by the END pattern item).
3487  * @param[out] sfx_items
3488  *   Suffix flow match items (list terminated by the END pattern item).
3489  * @param[in] actions
3490  *   Associated actions (list terminated by the END action).
3491  * @param[out] actions_sfx
3492  *   Suffix flow actions.
3493  * @param[out] actions_pre
3494  *   Prefix flow actions.
3495  * @param[out] pattern_sfx
3496  *   The pattern items for the suffix flow.
3497  * @param[out] tag_sfx
3498  *   Pointer to suffix flow tag.
3499  *
3500  * @return
3501  *   0 on success.
3502  */
3503 static int
3504 flow_meter_split_prep(struct rte_eth_dev *dev,
3505 		 const struct rte_flow_item items[],
3506 		 struct rte_flow_item sfx_items[],
3507 		 const struct rte_flow_action actions[],
3508 		 struct rte_flow_action actions_sfx[],
3509 		 struct rte_flow_action actions_pre[])
3510 {
3511 	struct rte_flow_action *tag_action = NULL;
3512 	struct rte_flow_item *tag_item;
3513 	struct mlx5_rte_flow_action_set_tag *set_tag;
3514 	struct rte_flow_error error;
3515 	const struct rte_flow_action_raw_encap *raw_encap;
3516 	const struct rte_flow_action_raw_decap *raw_decap;
3517 	struct mlx5_rte_flow_item_tag *tag_spec;
3518 	struct mlx5_rte_flow_item_tag *tag_mask;
3519 	uint32_t tag_id;
3520 	bool copy_vlan = false;
3521 
3522 	/* Prepare the actions for prefix and suffix flow. */
3523 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3524 		struct rte_flow_action **action_cur = NULL;
3525 
3526 		switch (actions->type) {
3527 		case RTE_FLOW_ACTION_TYPE_METER:
3528 			/* Add the extra tag action first. */
3529 			tag_action = actions_pre;
3530 			tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3531 			actions_pre++;
3532 			action_cur = &actions_pre;
3533 			break;
3534 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3535 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3536 			action_cur = &actions_pre;
3537 			break;
3538 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3539 			raw_encap = actions->conf;
3540 			if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
3541 				action_cur = &actions_pre;
3542 			break;
3543 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3544 			raw_decap = actions->conf;
3545 			if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3546 				action_cur = &actions_pre;
3547 			break;
3548 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3549 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3550 			copy_vlan = true;
3551 			break;
3552 		default:
3553 			break;
3554 		}
3555 		if (!action_cur)
3556 			action_cur = &actions_sfx;
3557 		memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
3558 		(*action_cur)++;
3559 	}
3560 	/* Add end action to the actions. */
3561 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
3562 	actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
3563 	actions_pre++;
3564 	/* Set the tag. */
3565 	set_tag = (void *)actions_pre;
3566 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
3567 	/*
3568 	 * Get the id from the qrss_pool to make qrss share the id with meter.
3569 	 */
3570 	tag_id = flow_qrss_get_id(dev);
3571 	set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
3572 	assert(tag_action);
3573 	tag_action->conf = set_tag;
3574 	/* Prepare the suffix subflow items. */
3575 	tag_item = sfx_items++;
3576 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3577 		int item_type = items->type;
3578 
3579 		switch (item_type) {
3580 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
3581 			memcpy(sfx_items, items, sizeof(*sfx_items));
3582 			sfx_items++;
3583 			break;
3584 		case RTE_FLOW_ITEM_TYPE_VLAN:
3585 			if (copy_vlan) {
3586 				memcpy(sfx_items, items, sizeof(*sfx_items));
3587 				/*
3588 				 * Convert to internal match item, it is used
3589 				 * for vlan push and set vid.
3590 				 */
3591 				sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
3592 				sfx_items++;
3593 			}
3594 			break;
3595 		default:
3596 			break;
3597 		}
3598 	}
3599 	sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
3600 	sfx_items++;
3601 	tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
3602 	tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
3603 	tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
3604 	tag_mask = tag_spec + 1;
3605 	tag_mask->data = 0xffffff00;
3606 	tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
3607 	tag_item->spec = tag_spec;
3608 	tag_item->last = NULL;
3609 	tag_item->mask = tag_mask;
3610 	return tag_id;
3611 }
3612 
3613 /**
3614  * Split action list having QUEUE/RSS for metadata register copy.
3615  *
3616  * Once Q/RSS action is detected in user's action list, the flow action
3617  * should be split in order to copy metadata registers, which will happen in
3618  * RX_CP_TBL like,
3619  *   - CQE->flow_tag := reg_c[1] (MARK)
3620  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3621  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
3622  * This is because the last action of each flow must be a terminal action
3623  * (QUEUE, RSS or DROP).
3624  *
3625  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
3626  * stored and kept in the mlx5_flow structure per each sub_flow.
3627  *
3628  * The Q/RSS action is replaced with,
3629  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
3630  * And the following JUMP action is added at the end,
3631  *   - JUMP, to RX_CP_TBL.
3632  *
3633  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
3634  * flow_create_split_metadata() routine. The flow will look like,
3635  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
3636  *
3637  * @param dev
3638  *   Pointer to Ethernet device.
3639  * @param[out] split_actions
3640  *   Pointer to store split actions to jump to CP_TBL.
3641  * @param[in] actions
3642  *   Pointer to the list of original flow actions.
3643  * @param[in] qrss
3644  *   Pointer to the Q/RSS action.
3645  * @param[in] actions_n
3646  *   Number of original actions.
3647  * @param[out] error
3648  *   Perform verbose error reporting if not NULL.
3649  *
3650  * @return
3651  *   non-zero unique flow_id on success, otherwise 0 and
3652  *   error/rte_error are set.
3653  */
3654 static uint32_t
3655 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
3656 			  struct rte_flow_action *split_actions,
3657 			  const struct rte_flow_action *actions,
3658 			  const struct rte_flow_action *qrss,
3659 			  int actions_n, struct rte_flow_error *error)
3660 {
3661 	struct mlx5_rte_flow_action_set_tag *set_tag;
3662 	struct rte_flow_action_jump *jump;
3663 	const int qrss_idx = qrss - actions;
3664 	uint32_t flow_id = 0;
3665 	int ret = 0;
3666 
3667 	/*
3668 	 * Given actions will be split
3669 	 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
3670 	 * - Add jump to mreg CP_TBL.
3671 	 * As a result, there will be one more action.
3672 	 */
3673 	++actions_n;
3674 	memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
3675 	set_tag = (void *)(split_actions + actions_n);
3676 	/*
3677 	 * If tag action is not set to void(it means we are not the meter
3678 	 * suffix flow), add the tag action. Since meter suffix flow already
3679 	 * has the tag added.
3680 	 */
3681 	if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
3682 		/*
3683 		 * Allocate the new subflow ID. This one is unique within
3684 		 * device and not shared with representors. Otherwise,
3685 		 * we would have to resolve multi-thread access synch
3686 		 * issue. Each flow on the shared device is appended
3687 		 * with source vport identifier, so the resulting
3688 		 * flows will be unique in the shared (by master and
3689 		 * representors) domain even if they have coinciding
3690 		 * IDs.
3691 		 */
3692 		flow_id = flow_qrss_get_id(dev);
3693 		if (!flow_id)
3694 			return rte_flow_error_set(error, ENOMEM,
3695 						  RTE_FLOW_ERROR_TYPE_ACTION,
3696 						  NULL, "can't allocate id "
3697 						  "for split Q/RSS subflow");
3698 		/* Internal SET_TAG action to set flow ID. */
3699 		*set_tag = (struct mlx5_rte_flow_action_set_tag){
3700 			.data = flow_id,
3701 		};
3702 		ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
3703 		if (ret < 0)
3704 			return ret;
3705 		set_tag->id = ret;
3706 		/* Construct new actions array. */
3707 		/* Replace QUEUE/RSS action. */
3708 		split_actions[qrss_idx] = (struct rte_flow_action){
3709 			.type = MLX5_RTE_FLOW_ACTION_TYPE_TAG,
3710 			.conf = set_tag,
3711 		};
3712 	}
3713 	/* JUMP action to jump to mreg copy table (CP_TBL). */
3714 	jump = (void *)(set_tag + 1);
3715 	*jump = (struct rte_flow_action_jump){
3716 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3717 	};
3718 	split_actions[actions_n - 2] = (struct rte_flow_action){
3719 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
3720 		.conf = jump,
3721 	};
3722 	split_actions[actions_n - 1] = (struct rte_flow_action){
3723 		.type = RTE_FLOW_ACTION_TYPE_END,
3724 	};
3725 	return flow_id;
3726 }
3727 
3728 /**
3729  * Extend the given action list for Tx metadata copy.
3730  *
3731  * Copy the given action list to the ext_actions and add flow metadata register
3732  * copy action in order to copy reg_a set by WQE to reg_c[0].
3733  *
3734  * @param[out] ext_actions
3735  *   Pointer to the extended action list.
3736  * @param[in] actions
3737  *   Pointer to the list of actions.
3738  * @param[in] actions_n
3739  *   Number of actions in the list.
3740  * @param[out] error
3741  *   Perform verbose error reporting if not NULL.
3742  *
3743  * @return
3744  *   0 on success, negative value otherwise
3745  */
3746 static int
3747 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
3748 		       struct rte_flow_action *ext_actions,
3749 		       const struct rte_flow_action *actions,
3750 		       int actions_n, struct rte_flow_error *error)
3751 {
3752 	struct mlx5_flow_action_copy_mreg *cp_mreg =
3753 		(struct mlx5_flow_action_copy_mreg *)
3754 			(ext_actions + actions_n + 1);
3755 	int ret;
3756 
3757 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3758 	if (ret < 0)
3759 		return ret;
3760 	cp_mreg->dst = ret;
3761 	ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
3762 	if (ret < 0)
3763 		return ret;
3764 	cp_mreg->src = ret;
3765 	memcpy(ext_actions, actions,
3766 			sizeof(*ext_actions) * actions_n);
3767 	ext_actions[actions_n - 1] = (struct rte_flow_action){
3768 		.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3769 		.conf = cp_mreg,
3770 	};
3771 	ext_actions[actions_n] = (struct rte_flow_action){
3772 		.type = RTE_FLOW_ACTION_TYPE_END,
3773 	};
3774 	return 0;
3775 }
3776 
3777 /**
3778  * The splitting for metadata feature.
3779  *
3780  * - Q/RSS action on NIC Rx should be split in order to pass by
3781  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
3782  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
3783  *
3784  * - All the actions on NIC Tx should have a mreg copy action to
3785  *   copy reg_a from WQE to reg_c[0].
3786  *
3787  * @param dev
3788  *   Pointer to Ethernet device.
3789  * @param[in] flow
3790  *   Parent flow structure pointer.
3791  * @param[in] prefix_layers
3792  *   Prefix flow layer flags.
3793  * @param[in] attr
3794  *   Flow rule attributes.
3795  * @param[in] items
3796  *   Pattern specification (list terminated by the END pattern item).
3797  * @param[in] actions
3798  *   Associated actions (list terminated by the END action).
3799  * @param[in] external
3800  *   This flow rule is created by request external to PMD.
3801  * @param[out] error
3802  *   Perform verbose error reporting if not NULL.
3803  * @return
3804  *   0 on success, negative value otherwise
3805  */
3806 static int
3807 flow_create_split_metadata(struct rte_eth_dev *dev,
3808 			   struct rte_flow *flow,
3809 			   uint64_t prefix_layers,
3810 			   const struct rte_flow_attr *attr,
3811 			   const struct rte_flow_item items[],
3812 			   const struct rte_flow_action actions[],
3813 			   bool external, struct rte_flow_error *error)
3814 {
3815 	struct mlx5_priv *priv = dev->data->dev_private;
3816 	struct mlx5_dev_config *config = &priv->config;
3817 	const struct rte_flow_action *qrss = NULL;
3818 	struct rte_flow_action *ext_actions = NULL;
3819 	struct mlx5_flow *dev_flow = NULL;
3820 	uint32_t qrss_id = 0;
3821 	int mtr_sfx = 0;
3822 	size_t act_size;
3823 	int actions_n;
3824 	int ret;
3825 
3826 	/* Check whether extensive metadata feature is engaged. */
3827 	if (!config->dv_flow_en ||
3828 	    config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3829 	    !mlx5_flow_ext_mreg_supported(dev))
3830 		return flow_create_split_inner(dev, flow, NULL, prefix_layers,
3831 					       attr, items, actions, external,
3832 					       error);
3833 	actions_n = flow_parse_qrss_action(actions, &qrss);
3834 	if (qrss) {
3835 		/* Exclude hairpin flows from splitting. */
3836 		if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3837 			const struct rte_flow_action_queue *queue;
3838 
3839 			queue = qrss->conf;
3840 			if (mlx5_rxq_get_type(dev, queue->index) ==
3841 			    MLX5_RXQ_TYPE_HAIRPIN)
3842 				qrss = NULL;
3843 		} else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
3844 			const struct rte_flow_action_rss *rss;
3845 
3846 			rss = qrss->conf;
3847 			if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
3848 			    MLX5_RXQ_TYPE_HAIRPIN)
3849 				qrss = NULL;
3850 		}
3851 	}
3852 	if (qrss) {
3853 		/* Check if it is in meter suffix table. */
3854 		mtr_sfx = attr->group == (attr->transfer ?
3855 			  (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
3856 			  MLX5_FLOW_TABLE_LEVEL_SUFFIX);
3857 		/*
3858 		 * Q/RSS action on NIC Rx should be split in order to pass by
3859 		 * the mreg copy table (RX_CP_TBL) and then it jumps to the
3860 		 * action table (RX_ACT_TBL) which has the split Q/RSS action.
3861 		 */
3862 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
3863 			   sizeof(struct rte_flow_action_set_tag) +
3864 			   sizeof(struct rte_flow_action_jump);
3865 		ext_actions = rte_zmalloc(__func__, act_size, 0);
3866 		if (!ext_actions)
3867 			return rte_flow_error_set(error, ENOMEM,
3868 						  RTE_FLOW_ERROR_TYPE_ACTION,
3869 						  NULL, "no memory to split "
3870 						  "metadata flow");
3871 		/*
3872 		 * If we are the suffix flow of meter, tag already exist.
3873 		 * Set the tag action to void.
3874 		 */
3875 		if (mtr_sfx)
3876 			ext_actions[qrss - actions].type =
3877 						RTE_FLOW_ACTION_TYPE_VOID;
3878 		else
3879 			ext_actions[qrss - actions].type =
3880 						MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3881 		/*
3882 		 * Create the new actions list with removed Q/RSS action
3883 		 * and appended set tag and jump to register copy table
3884 		 * (RX_CP_TBL). We should preallocate unique tag ID here
3885 		 * in advance, because it is needed for set tag action.
3886 		 */
3887 		qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
3888 						    qrss, actions_n, error);
3889 		if (!mtr_sfx && !qrss_id) {
3890 			ret = -rte_errno;
3891 			goto exit;
3892 		}
3893 	} else if (attr->egress && !attr->transfer) {
3894 		/*
3895 		 * All the actions on NIC Tx should have a metadata register
3896 		 * copy action to copy reg_a from WQE to reg_c[meta]
3897 		 */
3898 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
3899 			   sizeof(struct mlx5_flow_action_copy_mreg);
3900 		ext_actions = rte_zmalloc(__func__, act_size, 0);
3901 		if (!ext_actions)
3902 			return rte_flow_error_set(error, ENOMEM,
3903 						  RTE_FLOW_ERROR_TYPE_ACTION,
3904 						  NULL, "no memory to split "
3905 						  "metadata flow");
3906 		/* Create the action list appended with copy register. */
3907 		ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
3908 					     actions_n, error);
3909 		if (ret < 0)
3910 			goto exit;
3911 	}
3912 	/* Add the unmodified original or prefix subflow. */
3913 	ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr,
3914 				      items, ext_actions ? ext_actions :
3915 				      actions, external, error);
3916 	if (ret < 0)
3917 		goto exit;
3918 	MLX5_ASSERT(dev_flow);
3919 	if (qrss) {
3920 		const struct rte_flow_attr q_attr = {
3921 			.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3922 			.ingress = 1,
3923 		};
3924 		/* Internal PMD action to set register. */
3925 		struct mlx5_rte_flow_item_tag q_tag_spec = {
3926 			.data = qrss_id,
3927 			.id = 0,
3928 		};
3929 		struct rte_flow_item q_items[] = {
3930 			{
3931 				.type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3932 				.spec = &q_tag_spec,
3933 				.last = NULL,
3934 				.mask = NULL,
3935 			},
3936 			{
3937 				.type = RTE_FLOW_ITEM_TYPE_END,
3938 			},
3939 		};
3940 		struct rte_flow_action q_actions[] = {
3941 			{
3942 				.type = qrss->type,
3943 				.conf = qrss->conf,
3944 			},
3945 			{
3946 				.type = RTE_FLOW_ACTION_TYPE_END,
3947 			},
3948 		};
3949 		uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
3950 
3951 		/*
3952 		 * Configure the tag item only if there is no meter subflow.
3953 		 * Since tag is already marked in the meter suffix subflow
3954 		 * we can just use the meter suffix items as is.
3955 		 */
3956 		if (qrss_id) {
3957 			/* Not meter subflow. */
3958 			MLX5_ASSERT(!mtr_sfx);
3959 			/*
3960 			 * Put unique id in prefix flow due to it is destroyed
3961 			 * after suffix flow and id will be freed after there
3962 			 * is no actual flows with this id and identifier
3963 			 * reallocation becomes possible (for example, for
3964 			 * other flows in other threads).
3965 			 */
3966 			dev_flow->qrss_id = qrss_id;
3967 			qrss_id = 0;
3968 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
3969 						   error);
3970 			if (ret < 0)
3971 				goto exit;
3972 			q_tag_spec.id = ret;
3973 		}
3974 		dev_flow = NULL;
3975 		/* Add suffix subflow to execute Q/RSS. */
3976 		ret = flow_create_split_inner(dev, flow, &dev_flow, layers,
3977 					      &q_attr, mtr_sfx ? items :
3978 					      q_items, q_actions,
3979 					      external, error);
3980 		if (ret < 0)
3981 			goto exit;
3982 		MLX5_ASSERT(dev_flow);
3983 	}
3984 
3985 exit:
3986 	/*
3987 	 * We do not destroy the partially created sub_flows in case of error.
3988 	 * These ones are included into parent flow list and will be destroyed
3989 	 * by flow_drv_destroy.
3990 	 */
3991 	flow_qrss_free_id(dev, qrss_id);
3992 	rte_free(ext_actions);
3993 	return ret;
3994 }
3995 
3996 /**
3997  * The splitting for meter feature.
3998  *
3999  * - The meter flow will be split to two flows as prefix and
4000  *   suffix flow. The packets make sense only it pass the prefix
4001  *   meter action.
4002  *
4003  * - Reg_C_5 is used for the packet to match betweend prefix and
4004  *   suffix flow.
4005  *
4006  * @param dev
4007  *   Pointer to Ethernet device.
4008  * @param[in] flow
4009  *   Parent flow structure pointer.
4010  * @param[in] attr
4011  *   Flow rule attributes.
4012  * @param[in] items
4013  *   Pattern specification (list terminated by the END pattern item).
4014  * @param[in] actions
4015  *   Associated actions (list terminated by the END action).
4016  * @param[in] external
4017  *   This flow rule is created by request external to PMD.
4018  * @param[out] error
4019  *   Perform verbose error reporting if not NULL.
4020  * @return
4021  *   0 on success, negative value otherwise
4022  */
4023 static int
4024 flow_create_split_meter(struct rte_eth_dev *dev,
4025 			   struct rte_flow *flow,
4026 			   const struct rte_flow_attr *attr,
4027 			   const struct rte_flow_item items[],
4028 			   const struct rte_flow_action actions[],
4029 			   bool external, struct rte_flow_error *error)
4030 {
4031 	struct mlx5_priv *priv = dev->data->dev_private;
4032 	struct rte_flow_action *sfx_actions = NULL;
4033 	struct rte_flow_action *pre_actions = NULL;
4034 	struct rte_flow_item *sfx_items = NULL;
4035 	struct mlx5_flow *dev_flow = NULL;
4036 	struct rte_flow_attr sfx_attr = *attr;
4037 	uint32_t mtr = 0;
4038 	uint32_t mtr_tag_id = 0;
4039 	size_t act_size;
4040 	size_t item_size;
4041 	int actions_n = 0;
4042 	int ret;
4043 
4044 	if (priv->mtr_en)
4045 		actions_n = flow_check_meter_action(actions, &mtr);
4046 	if (mtr) {
4047 		/* The five prefix actions: meter, decap, encap, tag, end. */
4048 		act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
4049 			   sizeof(struct mlx5_rte_flow_action_set_tag);
4050 		/* tag, vlan, port id, end. */
4051 #define METER_SUFFIX_ITEM 4
4052 		item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
4053 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
4054 		sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
4055 		if (!sfx_actions)
4056 			return rte_flow_error_set(error, ENOMEM,
4057 						  RTE_FLOW_ERROR_TYPE_ACTION,
4058 						  NULL, "no memory to split "
4059 						  "meter flow");
4060 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
4061 			     act_size);
4062 		pre_actions = sfx_actions + actions_n;
4063 		mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
4064 						   actions, sfx_actions,
4065 						   pre_actions);
4066 		if (!mtr_tag_id) {
4067 			ret = -rte_errno;
4068 			goto exit;
4069 		}
4070 		/* Add the prefix subflow. */
4071 		ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
4072 					      items, pre_actions, external,
4073 					      error);
4074 		if (ret) {
4075 			ret = -rte_errno;
4076 			goto exit;
4077 		}
4078 		dev_flow->mtr_flow_id = mtr_tag_id;
4079 		/* Setting the sfx group atrr. */
4080 		sfx_attr.group = sfx_attr.transfer ?
4081 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4082 				 MLX5_FLOW_TABLE_LEVEL_SUFFIX;
4083 	}
4084 	/* Add the prefix subflow. */
4085 	ret = flow_create_split_metadata(dev, flow, dev_flow ?
4086 					 flow_get_prefix_layer_flags(dev_flow) :
4087 					 0, &sfx_attr,
4088 					 sfx_items ? sfx_items : items,
4089 					 sfx_actions ? sfx_actions : actions,
4090 					 external, error);
4091 exit:
4092 	if (sfx_actions)
4093 		rte_free(sfx_actions);
4094 	return ret;
4095 }
4096 
4097 /**
4098  * Split the flow to subflow set. The splitters might be linked
4099  * in the chain, like this:
4100  * flow_create_split_outer() calls:
4101  *   flow_create_split_meter() calls:
4102  *     flow_create_split_metadata(meter_subflow_0) calls:
4103  *       flow_create_split_inner(metadata_subflow_0)
4104  *       flow_create_split_inner(metadata_subflow_1)
4105  *       flow_create_split_inner(metadata_subflow_2)
4106  *     flow_create_split_metadata(meter_subflow_1) calls:
4107  *       flow_create_split_inner(metadata_subflow_0)
4108  *       flow_create_split_inner(metadata_subflow_1)
4109  *       flow_create_split_inner(metadata_subflow_2)
4110  *
4111  * This provide flexible way to add new levels of flow splitting.
4112  * The all of successfully created subflows are included to the
4113  * parent flow dev_flow list.
4114  *
4115  * @param dev
4116  *   Pointer to Ethernet device.
4117  * @param[in] flow
4118  *   Parent flow structure pointer.
4119  * @param[in] attr
4120  *   Flow rule attributes.
4121  * @param[in] items
4122  *   Pattern specification (list terminated by the END pattern item).
4123  * @param[in] actions
4124  *   Associated actions (list terminated by the END action).
4125  * @param[in] external
4126  *   This flow rule is created by request external to PMD.
4127  * @param[out] error
4128  *   Perform verbose error reporting if not NULL.
4129  * @return
4130  *   0 on success, negative value otherwise
4131  */
4132 static int
4133 flow_create_split_outer(struct rte_eth_dev *dev,
4134 			struct rte_flow *flow,
4135 			const struct rte_flow_attr *attr,
4136 			const struct rte_flow_item items[],
4137 			const struct rte_flow_action actions[],
4138 			bool external, struct rte_flow_error *error)
4139 {
4140 	int ret;
4141 
4142 	ret = flow_create_split_meter(dev, flow, attr, items,
4143 					 actions, external, error);
4144 	MLX5_ASSERT(ret <= 0);
4145 	return ret;
4146 }
4147 
4148 /**
4149  * Create a flow and add it to @p list.
4150  *
4151  * @param dev
4152  *   Pointer to Ethernet device.
4153  * @param list
4154  *   Pointer to a TAILQ flow list. If this parameter NULL,
4155  *   no list insertion occurred, flow is just created,
4156  *   this is caller's responsibility to track the
4157  *   created flow.
4158  * @param[in] attr
4159  *   Flow rule attributes.
4160  * @param[in] items
4161  *   Pattern specification (list terminated by the END pattern item).
4162  * @param[in] actions
4163  *   Associated actions (list terminated by the END action).
4164  * @param[in] external
4165  *   This flow rule is created by request external to PMD.
4166  * @param[out] error
4167  *   Perform verbose error reporting if not NULL.
4168  *
4169  * @return
4170  *   A flow on success, NULL otherwise and rte_errno is set.
4171  */
4172 static struct rte_flow *
4173 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
4174 		 const struct rte_flow_attr *attr,
4175 		 const struct rte_flow_item items[],
4176 		 const struct rte_flow_action actions[],
4177 		 bool external, struct rte_flow_error *error)
4178 {
4179 	struct mlx5_priv *priv = dev->data->dev_private;
4180 	struct rte_flow *flow = NULL;
4181 	struct mlx5_flow *dev_flow;
4182 	const struct rte_flow_action_rss *rss;
4183 	union {
4184 		struct rte_flow_expand_rss buf;
4185 		uint8_t buffer[2048];
4186 	} expand_buffer;
4187 	union {
4188 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
4189 		uint8_t buffer[2048];
4190 	} actions_rx;
4191 	union {
4192 		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
4193 		uint8_t buffer[2048];
4194 	} actions_hairpin_tx;
4195 	union {
4196 		struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
4197 		uint8_t buffer[2048];
4198 	} items_tx;
4199 	struct rte_flow_expand_rss *buf = &expand_buffer.buf;
4200 	const struct rte_flow_action *p_actions_rx = actions;
4201 	uint32_t i;
4202 	uint32_t flow_size;
4203 	int hairpin_flow = 0;
4204 	uint32_t hairpin_id = 0;
4205 	struct rte_flow_attr attr_tx = { .priority = 0 };
4206 	int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
4207 				    error);
4208 
4209 	if (ret < 0)
4210 		return NULL;
4211 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
4212 	if (hairpin_flow > 0) {
4213 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
4214 			rte_errno = EINVAL;
4215 			return NULL;
4216 		}
4217 		flow_hairpin_split(dev, actions, actions_rx.actions,
4218 				   actions_hairpin_tx.actions, items_tx.items,
4219 				   &hairpin_id);
4220 		p_actions_rx = actions_rx.actions;
4221 	}
4222 	flow_size = sizeof(struct rte_flow);
4223 	rss = flow_get_rss_action(p_actions_rx);
4224 	if (rss)
4225 		flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
4226 					    sizeof(void *));
4227 	else
4228 		flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
4229 	flow = rte_calloc(__func__, 1, flow_size, 0);
4230 	if (!flow) {
4231 		rte_errno = ENOMEM;
4232 		goto error_before_flow;
4233 	}
4234 	flow->drv_type = flow_get_drv_type(dev, attr);
4235 	if (hairpin_id != 0)
4236 		flow->hairpin_flow_id = hairpin_id;
4237 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
4238 		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
4239 	flow->rss.queue = (void *)(flow + 1);
4240 	if (rss) {
4241 		/*
4242 		 * The following information is required by
4243 		 * mlx5_flow_hashfields_adjust() in advance.
4244 		 */
4245 		flow->rss.level = rss->level;
4246 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
4247 		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4248 	}
4249 	LIST_INIT(&flow->dev_flows);
4250 	if (rss && rss->types) {
4251 		unsigned int graph_root;
4252 
4253 		graph_root = find_graph_root(items, rss->level);
4254 		ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
4255 					  items, rss->types,
4256 					  mlx5_support_expansion,
4257 					  graph_root);
4258 		MLX5_ASSERT(ret > 0 &&
4259 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
4260 	} else {
4261 		buf->entries = 1;
4262 		buf->entry[0].pattern = (void *)(uintptr_t)items;
4263 	}
4264 	for (i = 0; i < buf->entries; ++i) {
4265 		/*
4266 		 * The splitter may create multiple dev_flows,
4267 		 * depending on configuration. In the simplest
4268 		 * case it just creates unmodified original flow.
4269 		 */
4270 		ret = flow_create_split_outer(dev, flow, attr,
4271 					      buf->entry[i].pattern,
4272 					      p_actions_rx, external,
4273 					      error);
4274 		if (ret < 0)
4275 			goto error;
4276 	}
4277 	/* Create the tx flow. */
4278 	if (hairpin_flow) {
4279 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
4280 		attr_tx.ingress = 0;
4281 		attr_tx.egress = 1;
4282 		dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
4283 					    actions_hairpin_tx.actions, error);
4284 		if (!dev_flow)
4285 			goto error;
4286 		dev_flow->flow = flow;
4287 		dev_flow->external = 0;
4288 		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
4289 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
4290 					 items_tx.items,
4291 					 actions_hairpin_tx.actions, error);
4292 		if (ret < 0)
4293 			goto error;
4294 	}
4295 	/*
4296 	 * Update the metadata register copy table. If extensive
4297 	 * metadata feature is enabled and registers are supported
4298 	 * we might create the extra rte_flow for each unique
4299 	 * MARK/FLAG action ID.
4300 	 *
4301 	 * The table is updated for ingress Flows only, because
4302 	 * the egress Flows belong to the different device and
4303 	 * copy table should be updated in peer NIC Rx domain.
4304 	 */
4305 	if (attr->ingress &&
4306 	    (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
4307 		ret = flow_mreg_update_copy_table(dev, flow, actions, error);
4308 		if (ret)
4309 			goto error;
4310 	}
4311 	if (dev->data->dev_started) {
4312 		ret = flow_drv_apply(dev, flow, error);
4313 		if (ret < 0)
4314 			goto error;
4315 	}
4316 	if (list)
4317 		TAILQ_INSERT_TAIL(list, flow, next);
4318 	flow_rxq_flags_set(dev, flow);
4319 	return flow;
4320 error_before_flow:
4321 	if (hairpin_id)
4322 		mlx5_flow_id_release(priv->sh->flow_id_pool,
4323 				     hairpin_id);
4324 	return NULL;
4325 error:
4326 	MLX5_ASSERT(flow);
4327 	flow_mreg_del_copy_action(dev, flow);
4328 	ret = rte_errno; /* Save rte_errno before cleanup. */
4329 	if (flow->hairpin_flow_id)
4330 		mlx5_flow_id_release(priv->sh->flow_id_pool,
4331 				     flow->hairpin_flow_id);
4332 	MLX5_ASSERT(flow);
4333 	flow_drv_destroy(dev, flow);
4334 	rte_free(flow);
4335 	rte_errno = ret; /* Restore rte_errno. */
4336 	return NULL;
4337 }
4338 
4339 /**
4340  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
4341  * incoming packets to table 1.
4342  *
4343  * Other flow rules, requested for group n, will be created in
4344  * e-switch table n+1.
4345  * Jump action to e-switch group n will be created to group n+1.
4346  *
4347  * Used when working in switchdev mode, to utilise advantages of table 1
4348  * and above.
4349  *
4350  * @param dev
4351  *   Pointer to Ethernet device.
4352  *
4353  * @return
4354  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
4355  */
4356 struct rte_flow *
4357 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
4358 {
4359 	const struct rte_flow_attr attr = {
4360 		.group = 0,
4361 		.priority = 0,
4362 		.ingress = 1,
4363 		.egress = 0,
4364 		.transfer = 1,
4365 	};
4366 	const struct rte_flow_item pattern = {
4367 		.type = RTE_FLOW_ITEM_TYPE_END,
4368 	};
4369 	struct rte_flow_action_jump jump = {
4370 		.group = 1,
4371 	};
4372 	const struct rte_flow_action actions[] = {
4373 		{
4374 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
4375 			.conf = &jump,
4376 		},
4377 		{
4378 			.type = RTE_FLOW_ACTION_TYPE_END,
4379 		},
4380 	};
4381 	struct mlx5_priv *priv = dev->data->dev_private;
4382 	struct rte_flow_error error;
4383 
4384 	return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
4385 				actions, false, &error);
4386 }
4387 
4388 /**
4389  * Create a flow.
4390  *
4391  * @see rte_flow_create()
4392  * @see rte_flow_ops
4393  */
4394 struct rte_flow *
4395 mlx5_flow_create(struct rte_eth_dev *dev,
4396 		 const struct rte_flow_attr *attr,
4397 		 const struct rte_flow_item items[],
4398 		 const struct rte_flow_action actions[],
4399 		 struct rte_flow_error *error)
4400 {
4401 	struct mlx5_priv *priv = dev->data->dev_private;
4402 
4403 	return flow_list_create(dev, &priv->flows,
4404 				attr, items, actions, true, error);
4405 }
4406 
4407 /**
4408  * Destroy a flow in a list.
4409  *
4410  * @param dev
4411  *   Pointer to Ethernet device.
4412  * @param list
4413  *   Pointer to a TAILQ flow list. If this parameter NULL,
4414  *   there is no flow removal from the list.
4415  * @param[in] flow
4416  *   Flow to destroy.
4417  */
4418 static void
4419 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
4420 		  struct rte_flow *flow)
4421 {
4422 	struct mlx5_priv *priv = dev->data->dev_private;
4423 
4424 	/*
4425 	 * Update RX queue flags only if port is started, otherwise it is
4426 	 * already clean.
4427 	 */
4428 	if (dev->data->dev_started)
4429 		flow_rxq_flags_trim(dev, flow);
4430 	if (flow->hairpin_flow_id)
4431 		mlx5_flow_id_release(priv->sh->flow_id_pool,
4432 				     flow->hairpin_flow_id);
4433 	flow_drv_destroy(dev, flow);
4434 	if (list)
4435 		TAILQ_REMOVE(list, flow, next);
4436 	flow_mreg_del_copy_action(dev, flow);
4437 	rte_free(flow->fdir);
4438 	rte_free(flow);
4439 }
4440 
4441 /**
4442  * Destroy all flows.
4443  *
4444  * @param dev
4445  *   Pointer to Ethernet device.
4446  * @param list
4447  *   Pointer to a TAILQ flow list.
4448  */
4449 void
4450 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
4451 {
4452 	while (!TAILQ_EMPTY(list)) {
4453 		struct rte_flow *flow;
4454 
4455 		flow = TAILQ_FIRST(list);
4456 		flow_list_destroy(dev, list, flow);
4457 	}
4458 }
4459 
4460 /**
4461  * Remove all flows.
4462  *
4463  * @param dev
4464  *   Pointer to Ethernet device.
4465  * @param list
4466  *   Pointer to a TAILQ flow list.
4467  */
4468 void
4469 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
4470 {
4471 	struct rte_flow *flow;
4472 
4473 	TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
4474 		flow_drv_remove(dev, flow);
4475 		flow_mreg_stop_copy_action(dev, flow);
4476 	}
4477 	flow_mreg_del_default_copy_action(dev);
4478 	flow_rxq_flags_clear(dev);
4479 }
4480 
4481 /**
4482  * Add all flows.
4483  *
4484  * @param dev
4485  *   Pointer to Ethernet device.
4486  * @param list
4487  *   Pointer to a TAILQ flow list.
4488  *
4489  * @return
4490  *   0 on success, a negative errno value otherwise and rte_errno is set.
4491  */
4492 int
4493 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
4494 {
4495 	struct rte_flow *flow;
4496 	struct rte_flow_error error;
4497 	int ret = 0;
4498 
4499 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
4500 	ret = flow_mreg_add_default_copy_action(dev, &error);
4501 	if (ret < 0)
4502 		return -rte_errno;
4503 	/* Apply Flows created by application. */
4504 	TAILQ_FOREACH(flow, list, next) {
4505 		ret = flow_mreg_start_copy_action(dev, flow);
4506 		if (ret < 0)
4507 			goto error;
4508 		ret = flow_drv_apply(dev, flow, &error);
4509 		if (ret < 0)
4510 			goto error;
4511 		flow_rxq_flags_set(dev, flow);
4512 	}
4513 	return 0;
4514 error:
4515 	ret = rte_errno; /* Save rte_errno before cleanup. */
4516 	mlx5_flow_stop(dev, list);
4517 	rte_errno = ret; /* Restore rte_errno. */
4518 	return -rte_errno;
4519 }
4520 
4521 /**
4522  * Verify the flow list is empty
4523  *
4524  * @param dev
4525  *  Pointer to Ethernet device.
4526  *
4527  * @return the number of flows not released.
4528  */
4529 int
4530 mlx5_flow_verify(struct rte_eth_dev *dev)
4531 {
4532 	struct mlx5_priv *priv = dev->data->dev_private;
4533 	struct rte_flow *flow;
4534 	int ret = 0;
4535 
4536 	TAILQ_FOREACH(flow, &priv->flows, next) {
4537 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
4538 			dev->data->port_id, (void *)flow);
4539 		++ret;
4540 	}
4541 	return ret;
4542 }
4543 
4544 /**
4545  * Enable default hairpin egress flow.
4546  *
4547  * @param dev
4548  *   Pointer to Ethernet device.
4549  * @param queue
4550  *   The queue index.
4551  *
4552  * @return
4553  *   0 on success, a negative errno value otherwise and rte_errno is set.
4554  */
4555 int
4556 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
4557 			    uint32_t queue)
4558 {
4559 	struct mlx5_priv *priv = dev->data->dev_private;
4560 	const struct rte_flow_attr attr = {
4561 		.egress = 1,
4562 		.priority = 0,
4563 	};
4564 	struct mlx5_rte_flow_item_tx_queue queue_spec = {
4565 		.queue = queue,
4566 	};
4567 	struct mlx5_rte_flow_item_tx_queue queue_mask = {
4568 		.queue = UINT32_MAX,
4569 	};
4570 	struct rte_flow_item items[] = {
4571 		{
4572 			.type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
4573 			.spec = &queue_spec,
4574 			.last = NULL,
4575 			.mask = &queue_mask,
4576 		},
4577 		{
4578 			.type = RTE_FLOW_ITEM_TYPE_END,
4579 		},
4580 	};
4581 	struct rte_flow_action_jump jump = {
4582 		.group = MLX5_HAIRPIN_TX_TABLE,
4583 	};
4584 	struct rte_flow_action actions[2];
4585 	struct rte_flow *flow;
4586 	struct rte_flow_error error;
4587 
4588 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
4589 	actions[0].conf = &jump;
4590 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
4591 	flow = flow_list_create(dev, &priv->ctrl_flows,
4592 				&attr, items, actions, false, &error);
4593 	if (!flow) {
4594 		DRV_LOG(DEBUG,
4595 			"Failed to create ctrl flow: rte_errno(%d),"
4596 			" type(%d), message(%s)",
4597 			rte_errno, error.type,
4598 			error.message ? error.message : " (no stated reason)");
4599 		return -rte_errno;
4600 	}
4601 	return 0;
4602 }
4603 
4604 /**
4605  * Enable a control flow configured from the control plane.
4606  *
4607  * @param dev
4608  *   Pointer to Ethernet device.
4609  * @param eth_spec
4610  *   An Ethernet flow spec to apply.
4611  * @param eth_mask
4612  *   An Ethernet flow mask to apply.
4613  * @param vlan_spec
4614  *   A VLAN flow spec to apply.
4615  * @param vlan_mask
4616  *   A VLAN flow mask to apply.
4617  *
4618  * @return
4619  *   0 on success, a negative errno value otherwise and rte_errno is set.
4620  */
4621 int
4622 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
4623 		    struct rte_flow_item_eth *eth_spec,
4624 		    struct rte_flow_item_eth *eth_mask,
4625 		    struct rte_flow_item_vlan *vlan_spec,
4626 		    struct rte_flow_item_vlan *vlan_mask)
4627 {
4628 	struct mlx5_priv *priv = dev->data->dev_private;
4629 	const struct rte_flow_attr attr = {
4630 		.ingress = 1,
4631 		.priority = MLX5_FLOW_PRIO_RSVD,
4632 	};
4633 	struct rte_flow_item items[] = {
4634 		{
4635 			.type = RTE_FLOW_ITEM_TYPE_ETH,
4636 			.spec = eth_spec,
4637 			.last = NULL,
4638 			.mask = eth_mask,
4639 		},
4640 		{
4641 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
4642 					      RTE_FLOW_ITEM_TYPE_END,
4643 			.spec = vlan_spec,
4644 			.last = NULL,
4645 			.mask = vlan_mask,
4646 		},
4647 		{
4648 			.type = RTE_FLOW_ITEM_TYPE_END,
4649 		},
4650 	};
4651 	uint16_t queue[priv->reta_idx_n];
4652 	struct rte_flow_action_rss action_rss = {
4653 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4654 		.level = 0,
4655 		.types = priv->rss_conf.rss_hf,
4656 		.key_len = priv->rss_conf.rss_key_len,
4657 		.queue_num = priv->reta_idx_n,
4658 		.key = priv->rss_conf.rss_key,
4659 		.queue = queue,
4660 	};
4661 	struct rte_flow_action actions[] = {
4662 		{
4663 			.type = RTE_FLOW_ACTION_TYPE_RSS,
4664 			.conf = &action_rss,
4665 		},
4666 		{
4667 			.type = RTE_FLOW_ACTION_TYPE_END,
4668 		},
4669 	};
4670 	struct rte_flow *flow;
4671 	struct rte_flow_error error;
4672 	unsigned int i;
4673 
4674 	if (!priv->reta_idx_n || !priv->rxqs_n) {
4675 		return 0;
4676 	}
4677 	for (i = 0; i != priv->reta_idx_n; ++i)
4678 		queue[i] = (*priv->reta_idx)[i];
4679 	flow = flow_list_create(dev, &priv->ctrl_flows,
4680 				&attr, items, actions, false, &error);
4681 	if (!flow)
4682 		return -rte_errno;
4683 	return 0;
4684 }
4685 
4686 /**
4687  * Enable a flow control configured from the control plane.
4688  *
4689  * @param dev
4690  *   Pointer to Ethernet device.
4691  * @param eth_spec
4692  *   An Ethernet flow spec to apply.
4693  * @param eth_mask
4694  *   An Ethernet flow mask to apply.
4695  *
4696  * @return
4697  *   0 on success, a negative errno value otherwise and rte_errno is set.
4698  */
4699 int
4700 mlx5_ctrl_flow(struct rte_eth_dev *dev,
4701 	       struct rte_flow_item_eth *eth_spec,
4702 	       struct rte_flow_item_eth *eth_mask)
4703 {
4704 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
4705 }
4706 
4707 /**
4708  * Destroy a flow.
4709  *
4710  * @see rte_flow_destroy()
4711  * @see rte_flow_ops
4712  */
4713 int
4714 mlx5_flow_destroy(struct rte_eth_dev *dev,
4715 		  struct rte_flow *flow,
4716 		  struct rte_flow_error *error __rte_unused)
4717 {
4718 	struct mlx5_priv *priv = dev->data->dev_private;
4719 
4720 	flow_list_destroy(dev, &priv->flows, flow);
4721 	return 0;
4722 }
4723 
4724 /**
4725  * Destroy all flows.
4726  *
4727  * @see rte_flow_flush()
4728  * @see rte_flow_ops
4729  */
4730 int
4731 mlx5_flow_flush(struct rte_eth_dev *dev,
4732 		struct rte_flow_error *error __rte_unused)
4733 {
4734 	struct mlx5_priv *priv = dev->data->dev_private;
4735 
4736 	mlx5_flow_list_flush(dev, &priv->flows);
4737 	return 0;
4738 }
4739 
4740 /**
4741  * Isolated mode.
4742  *
4743  * @see rte_flow_isolate()
4744  * @see rte_flow_ops
4745  */
4746 int
4747 mlx5_flow_isolate(struct rte_eth_dev *dev,
4748 		  int enable,
4749 		  struct rte_flow_error *error)
4750 {
4751 	struct mlx5_priv *priv = dev->data->dev_private;
4752 
4753 	if (dev->data->dev_started) {
4754 		rte_flow_error_set(error, EBUSY,
4755 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4756 				   NULL,
4757 				   "port must be stopped first");
4758 		return -rte_errno;
4759 	}
4760 	priv->isolated = !!enable;
4761 	if (enable)
4762 		dev->dev_ops = &mlx5_dev_ops_isolate;
4763 	else
4764 		dev->dev_ops = &mlx5_dev_ops;
4765 	return 0;
4766 }
4767 
4768 /**
4769  * Query a flow.
4770  *
4771  * @see rte_flow_query()
4772  * @see rte_flow_ops
4773  */
4774 static int
4775 flow_drv_query(struct rte_eth_dev *dev,
4776 	       struct rte_flow *flow,
4777 	       const struct rte_flow_action *actions,
4778 	       void *data,
4779 	       struct rte_flow_error *error)
4780 {
4781 	const struct mlx5_flow_driver_ops *fops;
4782 	enum mlx5_flow_drv_type ftype = flow->drv_type;
4783 
4784 	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
4785 	fops = flow_get_drv_ops(ftype);
4786 
4787 	return fops->query(dev, flow, actions, data, error);
4788 }
4789 
4790 /**
4791  * Query a flow.
4792  *
4793  * @see rte_flow_query()
4794  * @see rte_flow_ops
4795  */
4796 int
4797 mlx5_flow_query(struct rte_eth_dev *dev,
4798 		struct rte_flow *flow,
4799 		const struct rte_flow_action *actions,
4800 		void *data,
4801 		struct rte_flow_error *error)
4802 {
4803 	int ret;
4804 
4805 	ret = flow_drv_query(dev, flow, actions, data, error);
4806 	if (ret < 0)
4807 		return ret;
4808 	return 0;
4809 }
4810 
4811 /**
4812  * Convert a flow director filter to a generic flow.
4813  *
4814  * @param dev
4815  *   Pointer to Ethernet device.
4816  * @param fdir_filter
4817  *   Flow director filter to add.
4818  * @param attributes
4819  *   Generic flow parameters structure.
4820  *
4821  * @return
4822  *   0 on success, a negative errno value otherwise and rte_errno is set.
4823  */
4824 static int
4825 flow_fdir_filter_convert(struct rte_eth_dev *dev,
4826 			 const struct rte_eth_fdir_filter *fdir_filter,
4827 			 struct mlx5_fdir *attributes)
4828 {
4829 	struct mlx5_priv *priv = dev->data->dev_private;
4830 	const struct rte_eth_fdir_input *input = &fdir_filter->input;
4831 	const struct rte_eth_fdir_masks *mask =
4832 		&dev->data->dev_conf.fdir_conf.mask;
4833 
4834 	/* Validate queue number. */
4835 	if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
4836 		DRV_LOG(ERR, "port %u invalid queue number %d",
4837 			dev->data->port_id, fdir_filter->action.rx_queue);
4838 		rte_errno = EINVAL;
4839 		return -rte_errno;
4840 	}
4841 	attributes->attr.ingress = 1;
4842 	attributes->items[0] = (struct rte_flow_item) {
4843 		.type = RTE_FLOW_ITEM_TYPE_ETH,
4844 		.spec = &attributes->l2,
4845 		.mask = &attributes->l2_mask,
4846 	};
4847 	switch (fdir_filter->action.behavior) {
4848 	case RTE_ETH_FDIR_ACCEPT:
4849 		attributes->actions[0] = (struct rte_flow_action){
4850 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
4851 			.conf = &attributes->queue,
4852 		};
4853 		break;
4854 	case RTE_ETH_FDIR_REJECT:
4855 		attributes->actions[0] = (struct rte_flow_action){
4856 			.type = RTE_FLOW_ACTION_TYPE_DROP,
4857 		};
4858 		break;
4859 	default:
4860 		DRV_LOG(ERR, "port %u invalid behavior %d",
4861 			dev->data->port_id,
4862 			fdir_filter->action.behavior);
4863 		rte_errno = ENOTSUP;
4864 		return -rte_errno;
4865 	}
4866 	attributes->queue.index = fdir_filter->action.rx_queue;
4867 	/* Handle L3. */
4868 	switch (fdir_filter->input.flow_type) {
4869 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
4870 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
4871 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
4872 		attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
4873 			.src_addr = input->flow.ip4_flow.src_ip,
4874 			.dst_addr = input->flow.ip4_flow.dst_ip,
4875 			.time_to_live = input->flow.ip4_flow.ttl,
4876 			.type_of_service = input->flow.ip4_flow.tos,
4877 		};
4878 		attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
4879 			.src_addr = mask->ipv4_mask.src_ip,
4880 			.dst_addr = mask->ipv4_mask.dst_ip,
4881 			.time_to_live = mask->ipv4_mask.ttl,
4882 			.type_of_service = mask->ipv4_mask.tos,
4883 			.next_proto_id = mask->ipv4_mask.proto,
4884 		};
4885 		attributes->items[1] = (struct rte_flow_item){
4886 			.type = RTE_FLOW_ITEM_TYPE_IPV4,
4887 			.spec = &attributes->l3,
4888 			.mask = &attributes->l3_mask,
4889 		};
4890 		break;
4891 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
4892 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
4893 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
4894 		attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
4895 			.hop_limits = input->flow.ipv6_flow.hop_limits,
4896 			.proto = input->flow.ipv6_flow.proto,
4897 		};
4898 
4899 		memcpy(attributes->l3.ipv6.hdr.src_addr,
4900 		       input->flow.ipv6_flow.src_ip,
4901 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
4902 		memcpy(attributes->l3.ipv6.hdr.dst_addr,
4903 		       input->flow.ipv6_flow.dst_ip,
4904 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
4905 		memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
4906 		       mask->ipv6_mask.src_ip,
4907 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
4908 		memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
4909 		       mask->ipv6_mask.dst_ip,
4910 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
4911 		attributes->items[1] = (struct rte_flow_item){
4912 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
4913 			.spec = &attributes->l3,
4914 			.mask = &attributes->l3_mask,
4915 		};
4916 		break;
4917 	default:
4918 		DRV_LOG(ERR, "port %u invalid flow type%d",
4919 			dev->data->port_id, fdir_filter->input.flow_type);
4920 		rte_errno = ENOTSUP;
4921 		return -rte_errno;
4922 	}
4923 	/* Handle L4. */
4924 	switch (fdir_filter->input.flow_type) {
4925 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
4926 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
4927 			.src_port = input->flow.udp4_flow.src_port,
4928 			.dst_port = input->flow.udp4_flow.dst_port,
4929 		};
4930 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
4931 			.src_port = mask->src_port_mask,
4932 			.dst_port = mask->dst_port_mask,
4933 		};
4934 		attributes->items[2] = (struct rte_flow_item){
4935 			.type = RTE_FLOW_ITEM_TYPE_UDP,
4936 			.spec = &attributes->l4,
4937 			.mask = &attributes->l4_mask,
4938 		};
4939 		break;
4940 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
4941 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
4942 			.src_port = input->flow.tcp4_flow.src_port,
4943 			.dst_port = input->flow.tcp4_flow.dst_port,
4944 		};
4945 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
4946 			.src_port = mask->src_port_mask,
4947 			.dst_port = mask->dst_port_mask,
4948 		};
4949 		attributes->items[2] = (struct rte_flow_item){
4950 			.type = RTE_FLOW_ITEM_TYPE_TCP,
4951 			.spec = &attributes->l4,
4952 			.mask = &attributes->l4_mask,
4953 		};
4954 		break;
4955 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
4956 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
4957 			.src_port = input->flow.udp6_flow.src_port,
4958 			.dst_port = input->flow.udp6_flow.dst_port,
4959 		};
4960 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
4961 			.src_port = mask->src_port_mask,
4962 			.dst_port = mask->dst_port_mask,
4963 		};
4964 		attributes->items[2] = (struct rte_flow_item){
4965 			.type = RTE_FLOW_ITEM_TYPE_UDP,
4966 			.spec = &attributes->l4,
4967 			.mask = &attributes->l4_mask,
4968 		};
4969 		break;
4970 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
4971 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
4972 			.src_port = input->flow.tcp6_flow.src_port,
4973 			.dst_port = input->flow.tcp6_flow.dst_port,
4974 		};
4975 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
4976 			.src_port = mask->src_port_mask,
4977 			.dst_port = mask->dst_port_mask,
4978 		};
4979 		attributes->items[2] = (struct rte_flow_item){
4980 			.type = RTE_FLOW_ITEM_TYPE_TCP,
4981 			.spec = &attributes->l4,
4982 			.mask = &attributes->l4_mask,
4983 		};
4984 		break;
4985 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
4986 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
4987 		break;
4988 	default:
4989 		DRV_LOG(ERR, "port %u invalid flow type%d",
4990 			dev->data->port_id, fdir_filter->input.flow_type);
4991 		rte_errno = ENOTSUP;
4992 		return -rte_errno;
4993 	}
4994 	return 0;
4995 }
4996 
4997 #define FLOW_FDIR_CMP(f1, f2, fld) \
4998 	memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
4999 
5000 /**
5001  * Compare two FDIR flows. If items and actions are identical, the two flows are
5002  * regarded as same.
5003  *
5004  * @param dev
5005  *   Pointer to Ethernet device.
5006  * @param f1
5007  *   FDIR flow to compare.
5008  * @param f2
5009  *   FDIR flow to compare.
5010  *
5011  * @return
5012  *   Zero on match, 1 otherwise.
5013  */
5014 static int
5015 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
5016 {
5017 	if (FLOW_FDIR_CMP(f1, f2, attr) ||
5018 	    FLOW_FDIR_CMP(f1, f2, l2) ||
5019 	    FLOW_FDIR_CMP(f1, f2, l2_mask) ||
5020 	    FLOW_FDIR_CMP(f1, f2, l3) ||
5021 	    FLOW_FDIR_CMP(f1, f2, l3_mask) ||
5022 	    FLOW_FDIR_CMP(f1, f2, l4) ||
5023 	    FLOW_FDIR_CMP(f1, f2, l4_mask) ||
5024 	    FLOW_FDIR_CMP(f1, f2, actions[0].type))
5025 		return 1;
5026 	if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
5027 	    FLOW_FDIR_CMP(f1, f2, queue))
5028 		return 1;
5029 	return 0;
5030 }
5031 
5032 /**
5033  * Search device flow list to find out a matched FDIR flow.
5034  *
5035  * @param dev
5036  *   Pointer to Ethernet device.
5037  * @param fdir_flow
5038  *   FDIR flow to lookup.
5039  *
5040  * @return
5041  *   Pointer of flow if found, NULL otherwise.
5042  */
5043 static struct rte_flow *
5044 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
5045 {
5046 	struct mlx5_priv *priv = dev->data->dev_private;
5047 	struct rte_flow *flow = NULL;
5048 
5049 	MLX5_ASSERT(fdir_flow);
5050 	TAILQ_FOREACH(flow, &priv->flows, next) {
5051 		if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
5052 			DRV_LOG(DEBUG, "port %u found FDIR flow %p",
5053 				dev->data->port_id, (void *)flow);
5054 			break;
5055 		}
5056 	}
5057 	return flow;
5058 }
5059 
5060 /**
5061  * Add new flow director filter and store it in list.
5062  *
5063  * @param dev
5064  *   Pointer to Ethernet device.
5065  * @param fdir_filter
5066  *   Flow director filter to add.
5067  *
5068  * @return
5069  *   0 on success, a negative errno value otherwise and rte_errno is set.
5070  */
5071 static int
5072 flow_fdir_filter_add(struct rte_eth_dev *dev,
5073 		     const struct rte_eth_fdir_filter *fdir_filter)
5074 {
5075 	struct mlx5_priv *priv = dev->data->dev_private;
5076 	struct mlx5_fdir *fdir_flow;
5077 	struct rte_flow *flow;
5078 	int ret;
5079 
5080 	fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
5081 	if (!fdir_flow) {
5082 		rte_errno = ENOMEM;
5083 		return -rte_errno;
5084 	}
5085 	ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
5086 	if (ret)
5087 		goto error;
5088 	flow = flow_fdir_filter_lookup(dev, fdir_flow);
5089 	if (flow) {
5090 		rte_errno = EEXIST;
5091 		goto error;
5092 	}
5093 	flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
5094 				fdir_flow->items, fdir_flow->actions, true,
5095 				NULL);
5096 	if (!flow)
5097 		goto error;
5098 	MLX5_ASSERT(!flow->fdir);
5099 	flow->fdir = fdir_flow;
5100 	DRV_LOG(DEBUG, "port %u created FDIR flow %p",
5101 		dev->data->port_id, (void *)flow);
5102 	return 0;
5103 error:
5104 	rte_free(fdir_flow);
5105 	return -rte_errno;
5106 }
5107 
5108 /**
5109  * Delete specific filter.
5110  *
5111  * @param dev
5112  *   Pointer to Ethernet device.
5113  * @param fdir_filter
5114  *   Filter to be deleted.
5115  *
5116  * @return
5117  *   0 on success, a negative errno value otherwise and rte_errno is set.
5118  */
5119 static int
5120 flow_fdir_filter_delete(struct rte_eth_dev *dev,
5121 			const struct rte_eth_fdir_filter *fdir_filter)
5122 {
5123 	struct mlx5_priv *priv = dev->data->dev_private;
5124 	struct rte_flow *flow;
5125 	struct mlx5_fdir fdir_flow = {
5126 		.attr.group = 0,
5127 	};
5128 	int ret;
5129 
5130 	ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
5131 	if (ret)
5132 		return -rte_errno;
5133 	flow = flow_fdir_filter_lookup(dev, &fdir_flow);
5134 	if (!flow) {
5135 		rte_errno = ENOENT;
5136 		return -rte_errno;
5137 	}
5138 	flow_list_destroy(dev, &priv->flows, flow);
5139 	DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
5140 		dev->data->port_id, (void *)flow);
5141 	return 0;
5142 }
5143 
5144 /**
5145  * Update queue for specific filter.
5146  *
5147  * @param dev
5148  *   Pointer to Ethernet device.
5149  * @param fdir_filter
5150  *   Filter to be updated.
5151  *
5152  * @return
5153  *   0 on success, a negative errno value otherwise and rte_errno is set.
5154  */
5155 static int
5156 flow_fdir_filter_update(struct rte_eth_dev *dev,
5157 			const struct rte_eth_fdir_filter *fdir_filter)
5158 {
5159 	int ret;
5160 
5161 	ret = flow_fdir_filter_delete(dev, fdir_filter);
5162 	if (ret)
5163 		return ret;
5164 	return flow_fdir_filter_add(dev, fdir_filter);
5165 }
5166 
5167 /**
5168  * Flush all filters.
5169  *
5170  * @param dev
5171  *   Pointer to Ethernet device.
5172  */
5173 static void
5174 flow_fdir_filter_flush(struct rte_eth_dev *dev)
5175 {
5176 	struct mlx5_priv *priv = dev->data->dev_private;
5177 
5178 	mlx5_flow_list_flush(dev, &priv->flows);
5179 }
5180 
5181 /**
5182  * Get flow director information.
5183  *
5184  * @param dev
5185  *   Pointer to Ethernet device.
5186  * @param[out] fdir_info
5187  *   Resulting flow director information.
5188  */
5189 static void
5190 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
5191 {
5192 	struct rte_eth_fdir_masks *mask =
5193 		&dev->data->dev_conf.fdir_conf.mask;
5194 
5195 	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
5196 	fdir_info->guarant_spc = 0;
5197 	rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
5198 	fdir_info->max_flexpayload = 0;
5199 	fdir_info->flow_types_mask[0] = 0;
5200 	fdir_info->flex_payload_unit = 0;
5201 	fdir_info->max_flex_payload_segment_num = 0;
5202 	fdir_info->flex_payload_limit = 0;
5203 	memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
5204 }
5205 
5206 /**
5207  * Deal with flow director operations.
5208  *
5209  * @param dev
5210  *   Pointer to Ethernet device.
5211  * @param filter_op
5212  *   Operation to perform.
5213  * @param arg
5214  *   Pointer to operation-specific structure.
5215  *
5216  * @return
5217  *   0 on success, a negative errno value otherwise and rte_errno is set.
5218  */
5219 static int
5220 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5221 		    void *arg)
5222 {
5223 	enum rte_fdir_mode fdir_mode =
5224 		dev->data->dev_conf.fdir_conf.mode;
5225 
5226 	if (filter_op == RTE_ETH_FILTER_NOP)
5227 		return 0;
5228 	if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
5229 	    fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
5230 		DRV_LOG(ERR, "port %u flow director mode %d not supported",
5231 			dev->data->port_id, fdir_mode);
5232 		rte_errno = EINVAL;
5233 		return -rte_errno;
5234 	}
5235 	switch (filter_op) {
5236 	case RTE_ETH_FILTER_ADD:
5237 		return flow_fdir_filter_add(dev, arg);
5238 	case RTE_ETH_FILTER_UPDATE:
5239 		return flow_fdir_filter_update(dev, arg);
5240 	case RTE_ETH_FILTER_DELETE:
5241 		return flow_fdir_filter_delete(dev, arg);
5242 	case RTE_ETH_FILTER_FLUSH:
5243 		flow_fdir_filter_flush(dev);
5244 		break;
5245 	case RTE_ETH_FILTER_INFO:
5246 		flow_fdir_info_get(dev, arg);
5247 		break;
5248 	default:
5249 		DRV_LOG(DEBUG, "port %u unknown operation %u",
5250 			dev->data->port_id, filter_op);
5251 		rte_errno = EINVAL;
5252 		return -rte_errno;
5253 	}
5254 	return 0;
5255 }
5256 
5257 /**
5258  * Manage filter operations.
5259  *
5260  * @param dev
5261  *   Pointer to Ethernet device structure.
5262  * @param filter_type
5263  *   Filter type.
5264  * @param filter_op
5265  *   Operation to perform.
5266  * @param arg
5267  *   Pointer to operation-specific structure.
5268  *
5269  * @return
5270  *   0 on success, a negative errno value otherwise and rte_errno is set.
5271  */
5272 int
5273 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
5274 		     enum rte_filter_type filter_type,
5275 		     enum rte_filter_op filter_op,
5276 		     void *arg)
5277 {
5278 	switch (filter_type) {
5279 	case RTE_ETH_FILTER_GENERIC:
5280 		if (filter_op != RTE_ETH_FILTER_GET) {
5281 			rte_errno = EINVAL;
5282 			return -rte_errno;
5283 		}
5284 		*(const void **)arg = &mlx5_flow_ops;
5285 		return 0;
5286 	case RTE_ETH_FILTER_FDIR:
5287 		return flow_fdir_ctrl_func(dev, filter_op, arg);
5288 	default:
5289 		DRV_LOG(ERR, "port %u filter type (%d) not supported",
5290 			dev->data->port_id, filter_type);
5291 		rte_errno = ENOTSUP;
5292 		return -rte_errno;
5293 	}
5294 	return 0;
5295 }
5296 
5297 /**
5298  * Create the needed meter and suffix tables.
5299  *
5300  * @param[in] dev
5301  *   Pointer to Ethernet device.
5302  * @param[in] fm
5303  *   Pointer to the flow meter.
5304  *
5305  * @return
5306  *   Pointer to table set on success, NULL otherwise.
5307  */
5308 struct mlx5_meter_domains_infos *
5309 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
5310 			  const struct mlx5_flow_meter *fm)
5311 {
5312 	const struct mlx5_flow_driver_ops *fops;
5313 
5314 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5315 	return fops->create_mtr_tbls(dev, fm);
5316 }
5317 
5318 /**
5319  * Destroy the meter table set.
5320  *
5321  * @param[in] dev
5322  *   Pointer to Ethernet device.
5323  * @param[in] tbl
5324  *   Pointer to the meter table set.
5325  *
5326  * @return
5327  *   0 on success.
5328  */
5329 int
5330 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
5331 			   struct mlx5_meter_domains_infos *tbls)
5332 {
5333 	const struct mlx5_flow_driver_ops *fops;
5334 
5335 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5336 	return fops->destroy_mtr_tbls(dev, tbls);
5337 }
5338 
5339 /**
5340  * Create policer rules.
5341  *
5342  * @param[in] dev
5343  *   Pointer to Ethernet device.
5344  * @param[in] fm
5345  *   Pointer to flow meter structure.
5346  * @param[in] attr
5347  *   Pointer to flow attributes.
5348  *
5349  * @return
5350  *   0 on success, -1 otherwise.
5351  */
5352 int
5353 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
5354 			       struct mlx5_flow_meter *fm,
5355 			       const struct rte_flow_attr *attr)
5356 {
5357 	const struct mlx5_flow_driver_ops *fops;
5358 
5359 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5360 	return fops->create_policer_rules(dev, fm, attr);
5361 }
5362 
5363 /**
5364  * Destroy policer rules.
5365  *
5366  * @param[in] fm
5367  *   Pointer to flow meter structure.
5368  * @param[in] attr
5369  *   Pointer to flow attributes.
5370  *
5371  * @return
5372  *   0 on success, -1 otherwise.
5373  */
5374 int
5375 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
5376 				struct mlx5_flow_meter *fm,
5377 				const struct rte_flow_attr *attr)
5378 {
5379 	const struct mlx5_flow_driver_ops *fops;
5380 
5381 	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5382 	return fops->destroy_policer_rules(dev, fm, attr);
5383 }
5384 
5385 /**
5386  * Allocate a counter.
5387  *
5388  * @param[in] dev
5389  *   Pointer to Ethernet device structure.
5390  *
5391  * @return
5392  *   Pointer to allocated counter  on success, NULL otherwise.
5393  */
5394 struct mlx5_flow_counter *
5395 mlx5_counter_alloc(struct rte_eth_dev *dev)
5396 {
5397 	const struct mlx5_flow_driver_ops *fops;
5398 	struct rte_flow_attr attr = { .transfer = 0 };
5399 
5400 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5401 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5402 		return fops->counter_alloc(dev);
5403 	}
5404 	DRV_LOG(ERR,
5405 		"port %u counter allocate is not supported.",
5406 		 dev->data->port_id);
5407 	return NULL;
5408 }
5409 
5410 /**
5411  * Free a counter.
5412  *
5413  * @param[in] dev
5414  *   Pointer to Ethernet device structure.
5415  * @param[in] cnt
5416  *   Pointer to counter to be free.
5417  */
5418 void
5419 mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
5420 {
5421 	const struct mlx5_flow_driver_ops *fops;
5422 	struct rte_flow_attr attr = { .transfer = 0 };
5423 
5424 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5425 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5426 		fops->counter_free(dev, cnt);
5427 		return;
5428 	}
5429 	DRV_LOG(ERR,
5430 		"port %u counter free is not supported.",
5431 		 dev->data->port_id);
5432 }
5433 
5434 /**
5435  * Query counter statistics.
5436  *
5437  * @param[in] dev
5438  *   Pointer to Ethernet device structure.
5439  * @param[in] cnt
5440  *   Pointer to counter to query.
5441  * @param[in] clear
5442  *   Set to clear counter statistics.
5443  * @param[out] pkts
5444  *   The counter hits packets number to save.
5445  * @param[out] bytes
5446  *   The counter hits bytes number to save.
5447  *
5448  * @return
5449  *   0 on success, a negative errno value otherwise.
5450  */
5451 int
5452 mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt,
5453 		   bool clear, uint64_t *pkts, uint64_t *bytes)
5454 {
5455 	const struct mlx5_flow_driver_ops *fops;
5456 	struct rte_flow_attr attr = { .transfer = 0 };
5457 
5458 	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5459 		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5460 		return fops->counter_query(dev, cnt, clear, pkts, bytes);
5461 	}
5462 	DRV_LOG(ERR,
5463 		"port %u counter query is not supported.",
5464 		 dev->data->port_id);
5465 	return -ENOTSUP;
5466 }
5467 
5468 #define MLX5_POOL_QUERY_FREQ_US 1000000
5469 
5470 /**
5471  * Set the periodic procedure for triggering asynchronous batch queries for all
5472  * the counter pools.
5473  *
5474  * @param[in] sh
5475  *   Pointer to mlx5_ibv_shared object.
5476  */
5477 void
5478 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh)
5479 {
5480 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0);
5481 	uint32_t pools_n = rte_atomic16_read(&cont->n_valid);
5482 	uint32_t us;
5483 
5484 	cont = MLX5_CNT_CONTAINER(sh, 1, 0);
5485 	pools_n += rte_atomic16_read(&cont->n_valid);
5486 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
5487 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
5488 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
5489 		sh->cmng.query_thread_on = 0;
5490 		DRV_LOG(ERR, "Cannot reinitialize query alarm");
5491 	} else {
5492 		sh->cmng.query_thread_on = 1;
5493 	}
5494 }
5495 
5496 /**
5497  * The periodic procedure for triggering asynchronous batch queries for all the
5498  * counter pools. This function is probably called by the host thread.
5499  *
5500  * @param[in] arg
5501  *   The parameter for the alarm process.
5502  */
5503 void
5504 mlx5_flow_query_alarm(void *arg)
5505 {
5506 	struct mlx5_ibv_shared *sh = arg;
5507 	struct mlx5_devx_obj *dcs;
5508 	uint16_t offset;
5509 	int ret;
5510 	uint8_t batch = sh->cmng.batch;
5511 	uint16_t pool_index = sh->cmng.pool_index;
5512 	struct mlx5_pools_container *cont;
5513 	struct mlx5_pools_container *mcont;
5514 	struct mlx5_flow_counter_pool *pool;
5515 
5516 	if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
5517 		goto set_alarm;
5518 next_container:
5519 	cont = MLX5_CNT_CONTAINER(sh, batch, 1);
5520 	mcont = MLX5_CNT_CONTAINER(sh, batch, 0);
5521 	/* Check if resize was done and need to flip a container. */
5522 	if (cont != mcont) {
5523 		if (cont->pools) {
5524 			/* Clean the old container. */
5525 			rte_free(cont->pools);
5526 			memset(cont, 0, sizeof(*cont));
5527 		}
5528 		rte_cio_wmb();
5529 		 /* Flip the host container. */
5530 		sh->cmng.mhi[batch] ^= (uint8_t)2;
5531 		cont = mcont;
5532 	}
5533 	if (!cont->pools) {
5534 		/* 2 empty containers case is unexpected. */
5535 		if (unlikely(batch != sh->cmng.batch))
5536 			goto set_alarm;
5537 		batch ^= 0x1;
5538 		pool_index = 0;
5539 		goto next_container;
5540 	}
5541 	pool = cont->pools[pool_index];
5542 	if (pool->raw_hw)
5543 		/* There is a pool query in progress. */
5544 		goto set_alarm;
5545 	pool->raw_hw =
5546 		LIST_FIRST(&sh->cmng.free_stat_raws);
5547 	if (!pool->raw_hw)
5548 		/* No free counter statistics raw memory. */
5549 		goto set_alarm;
5550 	dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
5551 							      (&pool->a64_dcs);
5552 	offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
5553 	ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
5554 					       offset, NULL, NULL,
5555 					       pool->raw_hw->mem_mng->dm->id,
5556 					       (void *)(uintptr_t)
5557 					       (pool->raw_hw->data + offset),
5558 					       sh->devx_comp,
5559 					       (uint64_t)(uintptr_t)pool);
5560 	if (ret) {
5561 		DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
5562 			" %d", pool->min_dcs->id);
5563 		pool->raw_hw = NULL;
5564 		goto set_alarm;
5565 	}
5566 	pool->raw_hw->min_dcs_id = dcs->id;
5567 	LIST_REMOVE(pool->raw_hw, next);
5568 	sh->cmng.pending_queries++;
5569 	pool_index++;
5570 	if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
5571 		batch ^= 0x1;
5572 		pool_index = 0;
5573 	}
5574 set_alarm:
5575 	sh->cmng.batch = batch;
5576 	sh->cmng.pool_index = pool_index;
5577 	mlx5_set_query_alarm(sh);
5578 }
5579 
5580 /**
5581  * Handler for the HW respond about ready values from an asynchronous batch
5582  * query. This function is probably called by the host thread.
5583  *
5584  * @param[in] sh
5585  *   The pointer to the shared IB device context.
5586  * @param[in] async_id
5587  *   The Devx async ID.
5588  * @param[in] status
5589  *   The status of the completion.
5590  */
5591 void
5592 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
5593 				  uint64_t async_id, int status)
5594 {
5595 	struct mlx5_flow_counter_pool *pool =
5596 		(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
5597 	struct mlx5_counter_stats_raw *raw_to_free;
5598 
5599 	if (unlikely(status)) {
5600 		raw_to_free = pool->raw_hw;
5601 	} else {
5602 		raw_to_free = pool->raw;
5603 		rte_spinlock_lock(&pool->sl);
5604 		pool->raw = pool->raw_hw;
5605 		rte_spinlock_unlock(&pool->sl);
5606 		rte_atomic64_add(&pool->query_gen, 1);
5607 		/* Be sure the new raw counters data is updated in memory. */
5608 		rte_cio_wmb();
5609 	}
5610 	LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
5611 	pool->raw_hw = NULL;
5612 	sh->cmng.pending_queries--;
5613 }
5614 
5615 /**
5616  * Translate the rte_flow group index to HW table value.
5617  *
5618  * @param[in] attributes
5619  *   Pointer to flow attributes
5620  * @param[in] external
5621  *   Value is part of flow rule created by request external to PMD.
5622  * @param[in] group
5623  *   rte_flow group index value.
5624  * @param[out] fdb_def_rule
5625  *   Whether fdb jump to table 1 is configured.
5626  * @param[out] table
5627  *   HW table value.
5628  * @param[out] error
5629  *   Pointer to error structure.
5630  *
5631  * @return
5632  *   0 on success, a negative errno value otherwise and rte_errno is set.
5633  */
5634 int
5635 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
5636 			 uint32_t group, bool fdb_def_rule, uint32_t *table,
5637 			 struct rte_flow_error *error)
5638 {
5639 	if (attributes->transfer && external && fdb_def_rule) {
5640 		if (group == UINT32_MAX)
5641 			return rte_flow_error_set
5642 						(error, EINVAL,
5643 						 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5644 						 NULL,
5645 						 "group index not supported");
5646 		*table = group + 1;
5647 	} else {
5648 		*table = group;
5649 	}
5650 	return 0;
5651 }
5652 
5653 /**
5654  * Discover availability of metadata reg_c's.
5655  *
5656  * Iteratively use test flows to check availability.
5657  *
5658  * @param[in] dev
5659  *   Pointer to the Ethernet device structure.
5660  *
5661  * @return
5662  *   0 on success, a negative errno value otherwise and rte_errno is set.
5663  */
5664 int
5665 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
5666 {
5667 	struct mlx5_priv *priv = dev->data->dev_private;
5668 	struct mlx5_dev_config *config = &priv->config;
5669 	enum modify_reg idx;
5670 	int n = 0;
5671 
5672 	/* reg_c[0] and reg_c[1] are reserved. */
5673 	config->flow_mreg_c[n++] = REG_C_0;
5674 	config->flow_mreg_c[n++] = REG_C_1;
5675 	/* Discover availability of other reg_c's. */
5676 	for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
5677 		struct rte_flow_attr attr = {
5678 			.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
5679 			.priority = MLX5_FLOW_PRIO_RSVD,
5680 			.ingress = 1,
5681 		};
5682 		struct rte_flow_item items[] = {
5683 			[0] = {
5684 				.type = RTE_FLOW_ITEM_TYPE_END,
5685 			},
5686 		};
5687 		struct rte_flow_action actions[] = {
5688 			[0] = {
5689 				.type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5690 				.conf = &(struct mlx5_flow_action_copy_mreg){
5691 					.src = REG_C_1,
5692 					.dst = idx,
5693 				},
5694 			},
5695 			[1] = {
5696 				.type = RTE_FLOW_ACTION_TYPE_JUMP,
5697 				.conf = &(struct rte_flow_action_jump){
5698 					.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5699 				},
5700 			},
5701 			[2] = {
5702 				.type = RTE_FLOW_ACTION_TYPE_END,
5703 			},
5704 		};
5705 		struct rte_flow *flow;
5706 		struct rte_flow_error error;
5707 
5708 		if (!config->dv_flow_en)
5709 			break;
5710 		/* Create internal flow, validation skips copy action. */
5711 		flow = flow_list_create(dev, NULL, &attr, items,
5712 					actions, false, &error);
5713 		if (!flow)
5714 			continue;
5715 		if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
5716 			config->flow_mreg_c[n++] = idx;
5717 		flow_list_destroy(dev, NULL, flow);
5718 	}
5719 	for (; n < MLX5_MREG_C_NUM; ++n)
5720 		config->flow_mreg_c[n] = REG_NONE;
5721 	return 0;
5722 }
5723 
5724 /**
5725  * Dump flow raw hw data to file
5726  *
5727  * @param[in] dev
5728  *    The pointer to Ethernet device.
5729  * @param[in] file
5730  *   A pointer to a file for output.
5731  * @param[out] error
5732  *   Perform verbose error reporting if not NULL. PMDs initialize this
5733  *   structure in case of error only.
5734  * @return
5735  *   0 on success, a nagative value otherwise.
5736  */
5737 int
5738 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
5739 		   FILE *file,
5740 		   struct rte_flow_error *error __rte_unused)
5741 {
5742 	struct mlx5_priv *priv = dev->data->dev_private;
5743 	struct mlx5_ibv_shared *sh = priv->sh;
5744 
5745 	return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
5746 				       sh->tx_domain, file);
5747 }
5748