xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision 487eec3401b7a1664982f39da139980a4f5b3adc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 
12 /* Verbs header. */
13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #ifdef PEDANTIC
15 #pragma GCC diagnostic ignored "-Wpedantic"
16 #endif
17 #include <infiniband/verbs.h>
18 #ifdef PEDANTIC
19 #pragma GCC diagnostic error "-Wpedantic"
20 #endif
21 
22 #include <rte_common.h>
23 #include <rte_ether.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29 
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_prm.h"
35 #include "mlx5_rxtx.h"
36 
37 /* Dev ops structure defined in mlx5.c */
38 extern const struct eth_dev_ops mlx5_dev_ops;
39 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
40 
41 /** Device flow drivers. */
42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
44 #endif
45 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
46 
47 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
48 
49 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
50 	[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
51 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
52 	[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
53 #endif
54 	[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
55 	[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
56 };
57 
58 enum mlx5_expansion {
59 	MLX5_EXPANSION_ROOT,
60 	MLX5_EXPANSION_ROOT_OUTER,
61 	MLX5_EXPANSION_ROOT_ETH_VLAN,
62 	MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
63 	MLX5_EXPANSION_OUTER_ETH,
64 	MLX5_EXPANSION_OUTER_ETH_VLAN,
65 	MLX5_EXPANSION_OUTER_VLAN,
66 	MLX5_EXPANSION_OUTER_IPV4,
67 	MLX5_EXPANSION_OUTER_IPV4_UDP,
68 	MLX5_EXPANSION_OUTER_IPV4_TCP,
69 	MLX5_EXPANSION_OUTER_IPV6,
70 	MLX5_EXPANSION_OUTER_IPV6_UDP,
71 	MLX5_EXPANSION_OUTER_IPV6_TCP,
72 	MLX5_EXPANSION_VXLAN,
73 	MLX5_EXPANSION_VXLAN_GPE,
74 	MLX5_EXPANSION_GRE,
75 	MLX5_EXPANSION_MPLS,
76 	MLX5_EXPANSION_ETH,
77 	MLX5_EXPANSION_ETH_VLAN,
78 	MLX5_EXPANSION_VLAN,
79 	MLX5_EXPANSION_IPV4,
80 	MLX5_EXPANSION_IPV4_UDP,
81 	MLX5_EXPANSION_IPV4_TCP,
82 	MLX5_EXPANSION_IPV6,
83 	MLX5_EXPANSION_IPV6_UDP,
84 	MLX5_EXPANSION_IPV6_TCP,
85 };
86 
87 /** Supported expansion of items. */
88 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
89 	[MLX5_EXPANSION_ROOT] = {
90 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
91 						 MLX5_EXPANSION_IPV4,
92 						 MLX5_EXPANSION_IPV6),
93 		.type = RTE_FLOW_ITEM_TYPE_END,
94 	},
95 	[MLX5_EXPANSION_ROOT_OUTER] = {
96 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
97 						 MLX5_EXPANSION_OUTER_IPV4,
98 						 MLX5_EXPANSION_OUTER_IPV6),
99 		.type = RTE_FLOW_ITEM_TYPE_END,
100 	},
101 	[MLX5_EXPANSION_ROOT_ETH_VLAN] = {
102 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
103 		.type = RTE_FLOW_ITEM_TYPE_END,
104 	},
105 	[MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
106 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
107 		.type = RTE_FLOW_ITEM_TYPE_END,
108 	},
109 	[MLX5_EXPANSION_OUTER_ETH] = {
110 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
111 						 MLX5_EXPANSION_OUTER_IPV6,
112 						 MLX5_EXPANSION_MPLS),
113 		.type = RTE_FLOW_ITEM_TYPE_ETH,
114 		.rss_types = 0,
115 	},
116 	[MLX5_EXPANSION_OUTER_ETH_VLAN] = {
117 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
118 		.type = RTE_FLOW_ITEM_TYPE_ETH,
119 		.rss_types = 0,
120 	},
121 	[MLX5_EXPANSION_OUTER_VLAN] = {
122 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
123 						 MLX5_EXPANSION_OUTER_IPV6),
124 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
125 	},
126 	[MLX5_EXPANSION_OUTER_IPV4] = {
127 		.next = RTE_FLOW_EXPAND_RSS_NEXT
128 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
129 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
130 			 MLX5_EXPANSION_GRE,
131 			 MLX5_EXPANSION_IPV4,
132 			 MLX5_EXPANSION_IPV6),
133 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
134 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
135 			ETH_RSS_NONFRAG_IPV4_OTHER,
136 	},
137 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
138 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
139 						 MLX5_EXPANSION_VXLAN_GPE),
140 		.type = RTE_FLOW_ITEM_TYPE_UDP,
141 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
142 	},
143 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
144 		.type = RTE_FLOW_ITEM_TYPE_TCP,
145 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
146 	},
147 	[MLX5_EXPANSION_OUTER_IPV6] = {
148 		.next = RTE_FLOW_EXPAND_RSS_NEXT
149 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
150 			 MLX5_EXPANSION_OUTER_IPV6_TCP,
151 			 MLX5_EXPANSION_IPV4,
152 			 MLX5_EXPANSION_IPV6),
153 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
154 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
155 			ETH_RSS_NONFRAG_IPV6_OTHER,
156 	},
157 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
158 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
159 						 MLX5_EXPANSION_VXLAN_GPE),
160 		.type = RTE_FLOW_ITEM_TYPE_UDP,
161 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
162 	},
163 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
164 		.type = RTE_FLOW_ITEM_TYPE_TCP,
165 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
166 	},
167 	[MLX5_EXPANSION_VXLAN] = {
168 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
169 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
170 	},
171 	[MLX5_EXPANSION_VXLAN_GPE] = {
172 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
173 						 MLX5_EXPANSION_IPV4,
174 						 MLX5_EXPANSION_IPV6),
175 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
176 	},
177 	[MLX5_EXPANSION_GRE] = {
178 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
179 		.type = RTE_FLOW_ITEM_TYPE_GRE,
180 	},
181 	[MLX5_EXPANSION_MPLS] = {
182 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
183 						 MLX5_EXPANSION_IPV6),
184 		.type = RTE_FLOW_ITEM_TYPE_MPLS,
185 	},
186 	[MLX5_EXPANSION_ETH] = {
187 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
188 						 MLX5_EXPANSION_IPV6),
189 		.type = RTE_FLOW_ITEM_TYPE_ETH,
190 	},
191 	[MLX5_EXPANSION_ETH_VLAN] = {
192 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
193 		.type = RTE_FLOW_ITEM_TYPE_ETH,
194 	},
195 	[MLX5_EXPANSION_VLAN] = {
196 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
197 						 MLX5_EXPANSION_IPV6),
198 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
199 	},
200 	[MLX5_EXPANSION_IPV4] = {
201 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
202 						 MLX5_EXPANSION_IPV4_TCP),
203 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
204 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
205 			ETH_RSS_NONFRAG_IPV4_OTHER,
206 	},
207 	[MLX5_EXPANSION_IPV4_UDP] = {
208 		.type = RTE_FLOW_ITEM_TYPE_UDP,
209 		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
210 	},
211 	[MLX5_EXPANSION_IPV4_TCP] = {
212 		.type = RTE_FLOW_ITEM_TYPE_TCP,
213 		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
214 	},
215 	[MLX5_EXPANSION_IPV6] = {
216 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
217 						 MLX5_EXPANSION_IPV6_TCP),
218 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
219 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
220 			ETH_RSS_NONFRAG_IPV6_OTHER,
221 	},
222 	[MLX5_EXPANSION_IPV6_UDP] = {
223 		.type = RTE_FLOW_ITEM_TYPE_UDP,
224 		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
225 	},
226 	[MLX5_EXPANSION_IPV6_TCP] = {
227 		.type = RTE_FLOW_ITEM_TYPE_TCP,
228 		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
229 	},
230 };
231 
232 static const struct rte_flow_ops mlx5_flow_ops = {
233 	.validate = mlx5_flow_validate,
234 	.create = mlx5_flow_create,
235 	.destroy = mlx5_flow_destroy,
236 	.flush = mlx5_flow_flush,
237 	.isolate = mlx5_flow_isolate,
238 	.query = mlx5_flow_query,
239 };
240 
241 /* Convert FDIR request to Generic flow. */
242 struct mlx5_fdir {
243 	struct rte_flow_attr attr;
244 	struct rte_flow_item items[4];
245 	struct rte_flow_item_eth l2;
246 	struct rte_flow_item_eth l2_mask;
247 	union {
248 		struct rte_flow_item_ipv4 ipv4;
249 		struct rte_flow_item_ipv6 ipv6;
250 	} l3;
251 	union {
252 		struct rte_flow_item_ipv4 ipv4;
253 		struct rte_flow_item_ipv6 ipv6;
254 	} l3_mask;
255 	union {
256 		struct rte_flow_item_udp udp;
257 		struct rte_flow_item_tcp tcp;
258 	} l4;
259 	union {
260 		struct rte_flow_item_udp udp;
261 		struct rte_flow_item_tcp tcp;
262 	} l4_mask;
263 	struct rte_flow_action actions[2];
264 	struct rte_flow_action_queue queue;
265 };
266 
267 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
268 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
269 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
270 };
271 
272 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
273 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
274 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
275 	{ 9, 10, 11 }, { 12, 13, 14 },
276 };
277 
278 /* Tunnel information. */
279 struct mlx5_flow_tunnel_info {
280 	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
281 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
282 };
283 
284 static struct mlx5_flow_tunnel_info tunnels_info[] = {
285 	{
286 		.tunnel = MLX5_FLOW_LAYER_VXLAN,
287 		.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
288 	},
289 	{
290 		.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
291 		.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
292 	},
293 	{
294 		.tunnel = MLX5_FLOW_LAYER_GRE,
295 		.ptype = RTE_PTYPE_TUNNEL_GRE,
296 	},
297 	{
298 		.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
299 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
300 	},
301 	{
302 		.tunnel = MLX5_FLOW_LAYER_MPLS,
303 		.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
304 	},
305 	{
306 		.tunnel = MLX5_FLOW_LAYER_NVGRE,
307 		.ptype = RTE_PTYPE_TUNNEL_NVGRE,
308 	},
309 	{
310 		.tunnel = MLX5_FLOW_LAYER_IPIP,
311 		.ptype = RTE_PTYPE_TUNNEL_IP,
312 	},
313 	{
314 		.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
315 		.ptype = RTE_PTYPE_TUNNEL_IP,
316 	},
317 };
318 
319 /**
320  * Discover the maximum number of priority available.
321  *
322  * @param[in] dev
323  *   Pointer to the Ethernet device structure.
324  *
325  * @return
326  *   number of supported flow priority on success, a negative errno
327  *   value otherwise and rte_errno is set.
328  */
329 int
330 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
331 {
332 	struct mlx5_priv *priv = dev->data->dev_private;
333 	struct {
334 		struct ibv_flow_attr attr;
335 		struct ibv_flow_spec_eth eth;
336 		struct ibv_flow_spec_action_drop drop;
337 	} flow_attr = {
338 		.attr = {
339 			.num_of_specs = 2,
340 			.port = (uint8_t)priv->ibv_port,
341 		},
342 		.eth = {
343 			.type = IBV_FLOW_SPEC_ETH,
344 			.size = sizeof(struct ibv_flow_spec_eth),
345 		},
346 		.drop = {
347 			.size = sizeof(struct ibv_flow_spec_action_drop),
348 			.type = IBV_FLOW_SPEC_ACTION_DROP,
349 		},
350 	};
351 	struct ibv_flow *flow;
352 	struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
353 	uint16_t vprio[] = { 8, 16 };
354 	int i;
355 	int priority = 0;
356 
357 	if (!drop) {
358 		rte_errno = ENOTSUP;
359 		return -rte_errno;
360 	}
361 	for (i = 0; i != RTE_DIM(vprio); i++) {
362 		flow_attr.attr.priority = vprio[i] - 1;
363 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
364 		if (!flow)
365 			break;
366 		claim_zero(mlx5_glue->destroy_flow(flow));
367 		priority = vprio[i];
368 	}
369 	mlx5_hrxq_drop_release(dev);
370 	switch (priority) {
371 	case 8:
372 		priority = RTE_DIM(priority_map_3);
373 		break;
374 	case 16:
375 		priority = RTE_DIM(priority_map_5);
376 		break;
377 	default:
378 		rte_errno = ENOTSUP;
379 		DRV_LOG(ERR,
380 			"port %u verbs maximum priority: %d expected 8/16",
381 			dev->data->port_id, priority);
382 		return -rte_errno;
383 	}
384 	DRV_LOG(INFO, "port %u flow maximum priority: %d",
385 		dev->data->port_id, priority);
386 	return priority;
387 }
388 
389 /**
390  * Adjust flow priority based on the highest layer and the request priority.
391  *
392  * @param[in] dev
393  *   Pointer to the Ethernet device structure.
394  * @param[in] priority
395  *   The rule base priority.
396  * @param[in] subpriority
397  *   The priority based on the items.
398  *
399  * @return
400  *   The new priority.
401  */
402 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
403 				   uint32_t subpriority)
404 {
405 	uint32_t res = 0;
406 	struct mlx5_priv *priv = dev->data->dev_private;
407 
408 	switch (priv->config.flow_prio) {
409 	case RTE_DIM(priority_map_3):
410 		res = priority_map_3[priority][subpriority];
411 		break;
412 	case RTE_DIM(priority_map_5):
413 		res = priority_map_5[priority][subpriority];
414 		break;
415 	}
416 	return  res;
417 }
418 
419 /**
420  * Verify the @p item specifications (spec, last, mask) are compatible with the
421  * NIC capabilities.
422  *
423  * @param[in] item
424  *   Item specification.
425  * @param[in] mask
426  *   @p item->mask or flow default bit-masks.
427  * @param[in] nic_mask
428  *   Bit-masks covering supported fields by the NIC to compare with user mask.
429  * @param[in] size
430  *   Bit-masks size in bytes.
431  * @param[out] error
432  *   Pointer to error structure.
433  *
434  * @return
435  *   0 on success, a negative errno value otherwise and rte_errno is set.
436  */
437 int
438 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
439 			  const uint8_t *mask,
440 			  const uint8_t *nic_mask,
441 			  unsigned int size,
442 			  struct rte_flow_error *error)
443 {
444 	unsigned int i;
445 
446 	assert(nic_mask);
447 	for (i = 0; i < size; ++i)
448 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
449 			return rte_flow_error_set(error, ENOTSUP,
450 						  RTE_FLOW_ERROR_TYPE_ITEM,
451 						  item,
452 						  "mask enables non supported"
453 						  " bits");
454 	if (!item->spec && (item->mask || item->last))
455 		return rte_flow_error_set(error, EINVAL,
456 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
457 					  "mask/last without a spec is not"
458 					  " supported");
459 	if (item->spec && item->last) {
460 		uint8_t spec[size];
461 		uint8_t last[size];
462 		unsigned int i;
463 		int ret;
464 
465 		for (i = 0; i < size; ++i) {
466 			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
467 			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
468 		}
469 		ret = memcmp(spec, last, size);
470 		if (ret != 0)
471 			return rte_flow_error_set(error, EINVAL,
472 						  RTE_FLOW_ERROR_TYPE_ITEM,
473 						  item,
474 						  "range is not valid");
475 	}
476 	return 0;
477 }
478 
479 /**
480  * Adjust the hash fields according to the @p flow information.
481  *
482  * @param[in] dev_flow.
483  *   Pointer to the mlx5_flow.
484  * @param[in] tunnel
485  *   1 when the hash field is for a tunnel item.
486  * @param[in] layer_types
487  *   ETH_RSS_* types.
488  * @param[in] hash_fields
489  *   Item hash fields.
490  *
491  * @return
492  *   The hash fields that should be used.
493  */
494 uint64_t
495 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
496 			    int tunnel __rte_unused, uint64_t layer_types,
497 			    uint64_t hash_fields)
498 {
499 	struct rte_flow *flow = dev_flow->flow;
500 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
501 	int rss_request_inner = flow->rss.level >= 2;
502 
503 	/* Check RSS hash level for tunnel. */
504 	if (tunnel && rss_request_inner)
505 		hash_fields |= IBV_RX_HASH_INNER;
506 	else if (tunnel || rss_request_inner)
507 		return 0;
508 #endif
509 	/* Check if requested layer matches RSS hash fields. */
510 	if (!(flow->rss.types & layer_types))
511 		return 0;
512 	return hash_fields;
513 }
514 
515 /**
516  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
517  * if several tunnel rules are used on this queue, the tunnel ptype will be
518  * cleared.
519  *
520  * @param rxq_ctrl
521  *   Rx queue to update.
522  */
523 static void
524 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
525 {
526 	unsigned int i;
527 	uint32_t tunnel_ptype = 0;
528 
529 	/* Look up for the ptype to use. */
530 	for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
531 		if (!rxq_ctrl->flow_tunnels_n[i])
532 			continue;
533 		if (!tunnel_ptype) {
534 			tunnel_ptype = tunnels_info[i].ptype;
535 		} else {
536 			tunnel_ptype = 0;
537 			break;
538 		}
539 	}
540 	rxq_ctrl->rxq.tunnel = tunnel_ptype;
541 }
542 
543 /**
544  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
545  * flow.
546  *
547  * @param[in] dev
548  *   Pointer to the Ethernet device structure.
549  * @param[in] dev_flow
550  *   Pointer to device flow structure.
551  */
552 static void
553 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
554 {
555 	struct mlx5_priv *priv = dev->data->dev_private;
556 	struct rte_flow *flow = dev_flow->flow;
557 	const int mark = !!(flow->actions &
558 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
559 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
560 	unsigned int i;
561 
562 	for (i = 0; i != flow->rss.queue_num; ++i) {
563 		int idx = (*flow->queue)[i];
564 		struct mlx5_rxq_ctrl *rxq_ctrl =
565 			container_of((*priv->rxqs)[idx],
566 				     struct mlx5_rxq_ctrl, rxq);
567 
568 		if (mark) {
569 			rxq_ctrl->rxq.mark = 1;
570 			rxq_ctrl->flow_mark_n++;
571 		}
572 		if (tunnel) {
573 			unsigned int j;
574 
575 			/* Increase the counter matching the flow. */
576 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
577 				if ((tunnels_info[j].tunnel &
578 				     dev_flow->layers) ==
579 				    tunnels_info[j].tunnel) {
580 					rxq_ctrl->flow_tunnels_n[j]++;
581 					break;
582 				}
583 			}
584 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
585 		}
586 	}
587 }
588 
589 /**
590  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
591  *
592  * @param[in] dev
593  *   Pointer to the Ethernet device structure.
594  * @param[in] flow
595  *   Pointer to flow structure.
596  */
597 static void
598 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
599 {
600 	struct mlx5_flow *dev_flow;
601 
602 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
603 		flow_drv_rxq_flags_set(dev, dev_flow);
604 }
605 
606 /**
607  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
608  * device flow if no other flow uses it with the same kind of request.
609  *
610  * @param dev
611  *   Pointer to Ethernet device.
612  * @param[in] dev_flow
613  *   Pointer to the device flow.
614  */
615 static void
616 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
617 {
618 	struct mlx5_priv *priv = dev->data->dev_private;
619 	struct rte_flow *flow = dev_flow->flow;
620 	const int mark = !!(flow->actions &
621 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
622 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
623 	unsigned int i;
624 
625 	assert(dev->data->dev_started);
626 	for (i = 0; i != flow->rss.queue_num; ++i) {
627 		int idx = (*flow->queue)[i];
628 		struct mlx5_rxq_ctrl *rxq_ctrl =
629 			container_of((*priv->rxqs)[idx],
630 				     struct mlx5_rxq_ctrl, rxq);
631 
632 		if (mark) {
633 			rxq_ctrl->flow_mark_n--;
634 			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
635 		}
636 		if (tunnel) {
637 			unsigned int j;
638 
639 			/* Decrease the counter matching the flow. */
640 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
641 				if ((tunnels_info[j].tunnel &
642 				     dev_flow->layers) ==
643 				    tunnels_info[j].tunnel) {
644 					rxq_ctrl->flow_tunnels_n[j]--;
645 					break;
646 				}
647 			}
648 			flow_rxq_tunnel_ptype_update(rxq_ctrl);
649 		}
650 	}
651 }
652 
653 /**
654  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
655  * @p flow if no other flow uses it with the same kind of request.
656  *
657  * @param dev
658  *   Pointer to Ethernet device.
659  * @param[in] flow
660  *   Pointer to the flow.
661  */
662 static void
663 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
664 {
665 	struct mlx5_flow *dev_flow;
666 
667 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
668 		flow_drv_rxq_flags_trim(dev, dev_flow);
669 }
670 
671 /**
672  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
673  *
674  * @param dev
675  *   Pointer to Ethernet device.
676  */
677 static void
678 flow_rxq_flags_clear(struct rte_eth_dev *dev)
679 {
680 	struct mlx5_priv *priv = dev->data->dev_private;
681 	unsigned int i;
682 
683 	for (i = 0; i != priv->rxqs_n; ++i) {
684 		struct mlx5_rxq_ctrl *rxq_ctrl;
685 		unsigned int j;
686 
687 		if (!(*priv->rxqs)[i])
688 			continue;
689 		rxq_ctrl = container_of((*priv->rxqs)[i],
690 					struct mlx5_rxq_ctrl, rxq);
691 		rxq_ctrl->flow_mark_n = 0;
692 		rxq_ctrl->rxq.mark = 0;
693 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
694 			rxq_ctrl->flow_tunnels_n[j] = 0;
695 		rxq_ctrl->rxq.tunnel = 0;
696 	}
697 }
698 
699 /*
700  * return a pointer to the desired action in the list of actions.
701  *
702  * @param[in] actions
703  *   The list of actions to search the action in.
704  * @param[in] action
705  *   The action to find.
706  *
707  * @return
708  *   Pointer to the action in the list, if found. NULL otherwise.
709  */
710 const struct rte_flow_action *
711 mlx5_flow_find_action(const struct rte_flow_action *actions,
712 		      enum rte_flow_action_type action)
713 {
714 	if (actions == NULL)
715 		return NULL;
716 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
717 		if (actions->type == action)
718 			return actions;
719 	return NULL;
720 }
721 
722 /*
723  * Validate the flag action.
724  *
725  * @param[in] action_flags
726  *   Bit-fields that holds the actions detected until now.
727  * @param[in] attr
728  *   Attributes of flow that includes this action.
729  * @param[out] error
730  *   Pointer to error structure.
731  *
732  * @return
733  *   0 on success, a negative errno value otherwise and rte_errno is set.
734  */
735 int
736 mlx5_flow_validate_action_flag(uint64_t action_flags,
737 			       const struct rte_flow_attr *attr,
738 			       struct rte_flow_error *error)
739 {
740 
741 	if (action_flags & MLX5_FLOW_ACTION_DROP)
742 		return rte_flow_error_set(error, EINVAL,
743 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
744 					  "can't drop and flag in same flow");
745 	if (action_flags & MLX5_FLOW_ACTION_MARK)
746 		return rte_flow_error_set(error, EINVAL,
747 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
748 					  "can't mark and flag in same flow");
749 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
750 		return rte_flow_error_set(error, EINVAL,
751 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
752 					  "can't have 2 flag"
753 					  " actions in same flow");
754 	if (attr->egress)
755 		return rte_flow_error_set(error, ENOTSUP,
756 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
757 					  "flag action not supported for "
758 					  "egress");
759 	return 0;
760 }
761 
762 /*
763  * Validate the mark action.
764  *
765  * @param[in] action
766  *   Pointer to the queue action.
767  * @param[in] action_flags
768  *   Bit-fields that holds the actions detected until now.
769  * @param[in] attr
770  *   Attributes of flow that includes this action.
771  * @param[out] error
772  *   Pointer to error structure.
773  *
774  * @return
775  *   0 on success, a negative errno value otherwise and rte_errno is set.
776  */
777 int
778 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
779 			       uint64_t action_flags,
780 			       const struct rte_flow_attr *attr,
781 			       struct rte_flow_error *error)
782 {
783 	const struct rte_flow_action_mark *mark = action->conf;
784 
785 	if (!mark)
786 		return rte_flow_error_set(error, EINVAL,
787 					  RTE_FLOW_ERROR_TYPE_ACTION,
788 					  action,
789 					  "configuration cannot be null");
790 	if (mark->id >= MLX5_FLOW_MARK_MAX)
791 		return rte_flow_error_set(error, EINVAL,
792 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
793 					  &mark->id,
794 					  "mark id must in 0 <= id < "
795 					  RTE_STR(MLX5_FLOW_MARK_MAX));
796 	if (action_flags & MLX5_FLOW_ACTION_DROP)
797 		return rte_flow_error_set(error, EINVAL,
798 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
799 					  "can't drop and mark in same flow");
800 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
801 		return rte_flow_error_set(error, EINVAL,
802 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
803 					  "can't flag and mark in same flow");
804 	if (action_flags & MLX5_FLOW_ACTION_MARK)
805 		return rte_flow_error_set(error, EINVAL,
806 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
807 					  "can't have 2 mark actions in same"
808 					  " flow");
809 	if (attr->egress)
810 		return rte_flow_error_set(error, ENOTSUP,
811 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
812 					  "mark action not supported for "
813 					  "egress");
814 	return 0;
815 }
816 
817 /*
818  * Validate the drop action.
819  *
820  * @param[in] action_flags
821  *   Bit-fields that holds the actions detected until now.
822  * @param[in] attr
823  *   Attributes of flow that includes this action.
824  * @param[out] error
825  *   Pointer to error structure.
826  *
827  * @return
828  *   0 on success, a negative errno value otherwise and rte_errno is set.
829  */
830 int
831 mlx5_flow_validate_action_drop(uint64_t action_flags,
832 			       const struct rte_flow_attr *attr,
833 			       struct rte_flow_error *error)
834 {
835 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
836 		return rte_flow_error_set(error, EINVAL,
837 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
838 					  "can't drop and flag in same flow");
839 	if (action_flags & MLX5_FLOW_ACTION_MARK)
840 		return rte_flow_error_set(error, EINVAL,
841 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
842 					  "can't drop and mark in same flow");
843 	if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
844 			    MLX5_FLOW_FATE_ESWITCH_ACTIONS))
845 		return rte_flow_error_set(error, EINVAL,
846 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
847 					  "can't have 2 fate actions in"
848 					  " same flow");
849 	if (attr->egress)
850 		return rte_flow_error_set(error, ENOTSUP,
851 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
852 					  "drop action not supported for "
853 					  "egress");
854 	return 0;
855 }
856 
857 /*
858  * Validate the queue action.
859  *
860  * @param[in] action
861  *   Pointer to the queue action.
862  * @param[in] action_flags
863  *   Bit-fields that holds the actions detected until now.
864  * @param[in] dev
865  *   Pointer to the Ethernet device structure.
866  * @param[in] attr
867  *   Attributes of flow that includes this action.
868  * @param[out] error
869  *   Pointer to error structure.
870  *
871  * @return
872  *   0 on success, a negative errno value otherwise and rte_errno is set.
873  */
874 int
875 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
876 				uint64_t action_flags,
877 				struct rte_eth_dev *dev,
878 				const struct rte_flow_attr *attr,
879 				struct rte_flow_error *error)
880 {
881 	struct mlx5_priv *priv = dev->data->dev_private;
882 	const struct rte_flow_action_queue *queue = action->conf;
883 
884 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
885 		return rte_flow_error_set(error, EINVAL,
886 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
887 					  "can't have 2 fate actions in"
888 					  " same flow");
889 	if (!priv->rxqs_n)
890 		return rte_flow_error_set(error, EINVAL,
891 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
892 					  NULL, "No Rx queues configured");
893 	if (queue->index >= priv->rxqs_n)
894 		return rte_flow_error_set(error, EINVAL,
895 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
896 					  &queue->index,
897 					  "queue index out of range");
898 	if (!(*priv->rxqs)[queue->index])
899 		return rte_flow_error_set(error, EINVAL,
900 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
901 					  &queue->index,
902 					  "queue is not configured");
903 	if (attr->egress)
904 		return rte_flow_error_set(error, ENOTSUP,
905 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
906 					  "queue action not supported for "
907 					  "egress");
908 	return 0;
909 }
910 
911 /*
912  * Validate the rss action.
913  *
914  * @param[in] action
915  *   Pointer to the queue action.
916  * @param[in] action_flags
917  *   Bit-fields that holds the actions detected until now.
918  * @param[in] dev
919  *   Pointer to the Ethernet device structure.
920  * @param[in] attr
921  *   Attributes of flow that includes this action.
922  * @param[in] item_flags
923  *   Items that were detected.
924  * @param[out] error
925  *   Pointer to error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 int
931 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
932 			      uint64_t action_flags,
933 			      struct rte_eth_dev *dev,
934 			      const struct rte_flow_attr *attr,
935 			      uint64_t item_flags,
936 			      struct rte_flow_error *error)
937 {
938 	struct mlx5_priv *priv = dev->data->dev_private;
939 	const struct rte_flow_action_rss *rss = action->conf;
940 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
941 	unsigned int i;
942 
943 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
944 		return rte_flow_error_set(error, EINVAL,
945 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
946 					  "can't have 2 fate actions"
947 					  " in same flow");
948 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
949 	    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
950 		return rte_flow_error_set(error, ENOTSUP,
951 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
952 					  &rss->func,
953 					  "RSS hash function not supported");
954 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
955 	if (rss->level > 2)
956 #else
957 	if (rss->level > 1)
958 #endif
959 		return rte_flow_error_set(error, ENOTSUP,
960 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
961 					  &rss->level,
962 					  "tunnel RSS is not supported");
963 	/* allow RSS key_len 0 in case of NULL (default) RSS key. */
964 	if (rss->key_len == 0 && rss->key != NULL)
965 		return rte_flow_error_set(error, ENOTSUP,
966 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
967 					  &rss->key_len,
968 					  "RSS hash key length 0");
969 	if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
970 		return rte_flow_error_set(error, ENOTSUP,
971 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
972 					  &rss->key_len,
973 					  "RSS hash key too small");
974 	if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
975 		return rte_flow_error_set(error, ENOTSUP,
976 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
977 					  &rss->key_len,
978 					  "RSS hash key too large");
979 	if (rss->queue_num > priv->config.ind_table_max_size)
980 		return rte_flow_error_set(error, ENOTSUP,
981 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
982 					  &rss->queue_num,
983 					  "number of queues too large");
984 	if (rss->types & MLX5_RSS_HF_MASK)
985 		return rte_flow_error_set(error, ENOTSUP,
986 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
987 					  &rss->types,
988 					  "some RSS protocols are not"
989 					  " supported");
990 	if (!priv->rxqs_n)
991 		return rte_flow_error_set(error, EINVAL,
992 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
993 					  NULL, "No Rx queues configured");
994 	if (!rss->queue_num)
995 		return rte_flow_error_set(error, EINVAL,
996 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
997 					  NULL, "No queues configured");
998 	for (i = 0; i != rss->queue_num; ++i) {
999 		if (!(*priv->rxqs)[rss->queue[i]])
1000 			return rte_flow_error_set
1001 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1002 				 &rss->queue[i], "queue is not configured");
1003 	}
1004 	if (attr->egress)
1005 		return rte_flow_error_set(error, ENOTSUP,
1006 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1007 					  "rss action not supported for "
1008 					  "egress");
1009 	if (rss->level > 1 &&  !tunnel)
1010 		return rte_flow_error_set(error, EINVAL,
1011 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1012 					  "inner RSS is not supported for "
1013 					  "non-tunnel flows");
1014 	return 0;
1015 }
1016 
1017 /*
1018  * Validate the count action.
1019  *
1020  * @param[in] dev
1021  *   Pointer to the Ethernet device structure.
1022  * @param[in] attr
1023  *   Attributes of flow that includes this action.
1024  * @param[out] error
1025  *   Pointer to error structure.
1026  *
1027  * @return
1028  *   0 on success, a negative errno value otherwise and rte_errno is set.
1029  */
1030 int
1031 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1032 				const struct rte_flow_attr *attr,
1033 				struct rte_flow_error *error)
1034 {
1035 	if (attr->egress)
1036 		return rte_flow_error_set(error, ENOTSUP,
1037 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1038 					  "count action not supported for "
1039 					  "egress");
1040 	return 0;
1041 }
1042 
1043 /**
1044  * Verify the @p attributes will be correctly understood by the NIC and store
1045  * them in the @p flow if everything is correct.
1046  *
1047  * @param[in] dev
1048  *   Pointer to the Ethernet device structure.
1049  * @param[in] attributes
1050  *   Pointer to flow attributes
1051  * @param[out] error
1052  *   Pointer to error structure.
1053  *
1054  * @return
1055  *   0 on success, a negative errno value otherwise and rte_errno is set.
1056  */
1057 int
1058 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1059 			      const struct rte_flow_attr *attributes,
1060 			      struct rte_flow_error *error)
1061 {
1062 	struct mlx5_priv *priv = dev->data->dev_private;
1063 	uint32_t priority_max = priv->config.flow_prio - 1;
1064 
1065 	if (attributes->group)
1066 		return rte_flow_error_set(error, ENOTSUP,
1067 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1068 					  NULL, "groups is not supported");
1069 	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1070 	    attributes->priority >= priority_max)
1071 		return rte_flow_error_set(error, ENOTSUP,
1072 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1073 					  NULL, "priority out of range");
1074 	if (attributes->egress)
1075 		return rte_flow_error_set(error, ENOTSUP,
1076 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1077 					  "egress is not supported");
1078 	if (attributes->transfer && !priv->config.dv_esw_en)
1079 		return rte_flow_error_set(error, ENOTSUP,
1080 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1081 					  NULL, "transfer is not supported");
1082 	if (!attributes->ingress)
1083 		return rte_flow_error_set(error, EINVAL,
1084 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1085 					  NULL,
1086 					  "ingress attribute is mandatory");
1087 	return 0;
1088 }
1089 
1090 /**
1091  * Validate ICMP6 item.
1092  *
1093  * @param[in] item
1094  *   Item specification.
1095  * @param[in] item_flags
1096  *   Bit-fields that holds the items detected until now.
1097  * @param[out] error
1098  *   Pointer to error structure.
1099  *
1100  * @return
1101  *   0 on success, a negative errno value otherwise and rte_errno is set.
1102  */
1103 int
1104 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1105 			       uint64_t item_flags,
1106 			       uint8_t target_protocol,
1107 			       struct rte_flow_error *error)
1108 {
1109 	const struct rte_flow_item_icmp6 *mask = item->mask;
1110 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1111 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1112 				      MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1113 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1114 				      MLX5_FLOW_LAYER_OUTER_L4;
1115 	int ret;
1116 
1117 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1118 		return rte_flow_error_set(error, EINVAL,
1119 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1120 					  "protocol filtering not compatible"
1121 					  " with ICMP6 layer");
1122 	if (!(item_flags & l3m))
1123 		return rte_flow_error_set(error, EINVAL,
1124 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1125 					  "IPv6 is mandatory to filter on"
1126 					  " ICMP6");
1127 	if (item_flags & l4m)
1128 		return rte_flow_error_set(error, EINVAL,
1129 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1130 					  "multiple L4 layers not supported");
1131 	if (!mask)
1132 		mask = &rte_flow_item_icmp6_mask;
1133 	ret = mlx5_flow_item_acceptable
1134 		(item, (const uint8_t *)mask,
1135 		 (const uint8_t *)&rte_flow_item_icmp6_mask,
1136 		 sizeof(struct rte_flow_item_icmp6), error);
1137 	if (ret < 0)
1138 		return ret;
1139 	return 0;
1140 }
1141 
1142 /**
1143  * Validate ICMP item.
1144  *
1145  * @param[in] item
1146  *   Item specification.
1147  * @param[in] item_flags
1148  *   Bit-fields that holds the items detected until now.
1149  * @param[out] error
1150  *   Pointer to error structure.
1151  *
1152  * @return
1153  *   0 on success, a negative errno value otherwise and rte_errno is set.
1154  */
1155 int
1156 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1157 			     uint64_t item_flags,
1158 			     uint8_t target_protocol,
1159 			     struct rte_flow_error *error)
1160 {
1161 	const struct rte_flow_item_icmp *mask = item->mask;
1162 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1163 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1164 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1165 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1166 				      MLX5_FLOW_LAYER_OUTER_L4;
1167 	int ret;
1168 
1169 	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1170 		return rte_flow_error_set(error, EINVAL,
1171 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1172 					  "protocol filtering not compatible"
1173 					  " with ICMP layer");
1174 	if (!(item_flags & l3m))
1175 		return rte_flow_error_set(error, EINVAL,
1176 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1177 					  "IPv4 is mandatory to filter"
1178 					  " on ICMP");
1179 	if (item_flags & l4m)
1180 		return rte_flow_error_set(error, EINVAL,
1181 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1182 					  "multiple L4 layers not supported");
1183 	if (!mask)
1184 		mask = &rte_flow_item_icmp_mask;
1185 	ret = mlx5_flow_item_acceptable
1186 		(item, (const uint8_t *)mask,
1187 		 (const uint8_t *)&rte_flow_item_icmp_mask,
1188 		 sizeof(struct rte_flow_item_icmp), error);
1189 	if (ret < 0)
1190 		return ret;
1191 	return 0;
1192 }
1193 
1194 /**
1195  * Validate Ethernet item.
1196  *
1197  * @param[in] item
1198  *   Item specification.
1199  * @param[in] item_flags
1200  *   Bit-fields that holds the items detected until now.
1201  * @param[out] error
1202  *   Pointer to error structure.
1203  *
1204  * @return
1205  *   0 on success, a negative errno value otherwise and rte_errno is set.
1206  */
1207 int
1208 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1209 			    uint64_t item_flags,
1210 			    struct rte_flow_error *error)
1211 {
1212 	const struct rte_flow_item_eth *mask = item->mask;
1213 	const struct rte_flow_item_eth nic_mask = {
1214 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1215 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1216 		.type = RTE_BE16(0xffff),
1217 	};
1218 	int ret;
1219 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1220 	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
1221 				       MLX5_FLOW_LAYER_OUTER_L2;
1222 
1223 	if (item_flags & ethm)
1224 		return rte_flow_error_set(error, ENOTSUP,
1225 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1226 					  "multiple L2 layers not supported");
1227 	if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))
1228 		return rte_flow_error_set(error, EINVAL,
1229 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1230 					  "inner L2 layer should not "
1231 					  "follow inner L3 layers");
1232 	if (!mask)
1233 		mask = &rte_flow_item_eth_mask;
1234 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1235 					(const uint8_t *)&nic_mask,
1236 					sizeof(struct rte_flow_item_eth),
1237 					error);
1238 	return ret;
1239 }
1240 
1241 /**
1242  * Validate VLAN item.
1243  *
1244  * @param[in] item
1245  *   Item specification.
1246  * @param[in] item_flags
1247  *   Bit-fields that holds the items detected until now.
1248  * @param[in] dev
1249  *   Ethernet device flow is being created on.
1250  * @param[out] error
1251  *   Pointer to error structure.
1252  *
1253  * @return
1254  *   0 on success, a negative errno value otherwise and rte_errno is set.
1255  */
1256 int
1257 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1258 			     uint64_t item_flags,
1259 			     struct rte_eth_dev *dev,
1260 			     struct rte_flow_error *error)
1261 {
1262 	const struct rte_flow_item_vlan *spec = item->spec;
1263 	const struct rte_flow_item_vlan *mask = item->mask;
1264 	const struct rte_flow_item_vlan nic_mask = {
1265 		.tci = RTE_BE16(UINT16_MAX),
1266 		.inner_type = RTE_BE16(UINT16_MAX),
1267 	};
1268 	uint16_t vlan_tag = 0;
1269 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1270 	int ret;
1271 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1272 					MLX5_FLOW_LAYER_INNER_L4) :
1273 				       (MLX5_FLOW_LAYER_OUTER_L3 |
1274 					MLX5_FLOW_LAYER_OUTER_L4);
1275 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1276 					MLX5_FLOW_LAYER_OUTER_VLAN;
1277 
1278 	const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1279 				      MLX5_FLOW_LAYER_OUTER_L2;
1280 	if (item_flags & vlanm)
1281 		return rte_flow_error_set(error, EINVAL,
1282 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1283 					  "multiple VLAN layers not supported");
1284 	else if ((item_flags & l34m) != 0)
1285 		return rte_flow_error_set(error, EINVAL,
1286 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1287 					  "L2 layer cannot follow L3/L4 layer");
1288 	else if ((item_flags & l2m) == 0)
1289 		return rte_flow_error_set(error, EINVAL,
1290 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1291 					  "no L2 layer before VLAN");
1292 	if (!mask)
1293 		mask = &rte_flow_item_vlan_mask;
1294 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1295 					(const uint8_t *)&nic_mask,
1296 					sizeof(struct rte_flow_item_vlan),
1297 					error);
1298 	if (ret)
1299 		return ret;
1300 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1301 		struct mlx5_priv *priv = dev->data->dev_private;
1302 
1303 		if (priv->vmwa_context) {
1304 			/*
1305 			 * Non-NULL context means we have a virtual machine
1306 			 * and SR-IOV enabled, we have to create VLAN interface
1307 			 * to make hypervisor to setup E-Switch vport
1308 			 * context correctly. We avoid creating the multiple
1309 			 * VLAN interfaces, so we cannot support VLAN tag mask.
1310 			 */
1311 			return rte_flow_error_set(error, EINVAL,
1312 						  RTE_FLOW_ERROR_TYPE_ITEM,
1313 						  item,
1314 						  "VLAN tag mask is not"
1315 						  " supported in virtual"
1316 						  " environment");
1317 		}
1318 	}
1319 	if (spec) {
1320 		vlan_tag = spec->tci;
1321 		vlan_tag &= mask->tci;
1322 	}
1323 	/*
1324 	 * From verbs perspective an empty VLAN is equivalent
1325 	 * to a packet without VLAN layer.
1326 	 */
1327 	if (!vlan_tag)
1328 		return rte_flow_error_set(error, EINVAL,
1329 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1330 					  item->spec,
1331 					  "VLAN cannot be empty");
1332 	return 0;
1333 }
1334 
1335 /**
1336  * Validate IPV4 item.
1337  *
1338  * @param[in] item
1339  *   Item specification.
1340  * @param[in] item_flags
1341  *   Bit-fields that holds the items detected until now.
1342  * @param[in] acc_mask
1343  *   Acceptable mask, if NULL default internal default mask
1344  *   will be used to check whether item fields are supported.
1345  * @param[out] error
1346  *   Pointer to error structure.
1347  *
1348  * @return
1349  *   0 on success, a negative errno value otherwise and rte_errno is set.
1350  */
1351 int
1352 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1353 			     uint64_t item_flags,
1354 			     const struct rte_flow_item_ipv4 *acc_mask,
1355 			     struct rte_flow_error *error)
1356 {
1357 	const struct rte_flow_item_ipv4 *mask = item->mask;
1358 	const struct rte_flow_item_ipv4 *spec = item->spec;
1359 	const struct rte_flow_item_ipv4 nic_mask = {
1360 		.hdr = {
1361 			.src_addr = RTE_BE32(0xffffffff),
1362 			.dst_addr = RTE_BE32(0xffffffff),
1363 			.type_of_service = 0xff,
1364 			.next_proto_id = 0xff,
1365 		},
1366 	};
1367 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1368 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1369 				      MLX5_FLOW_LAYER_OUTER_L3;
1370 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1371 				      MLX5_FLOW_LAYER_OUTER_L4;
1372 	int ret;
1373 	uint8_t next_proto = 0xFF;
1374 
1375 	if (item_flags & MLX5_FLOW_LAYER_IPIP) {
1376 		if (mask && spec)
1377 			next_proto = mask->hdr.next_proto_id &
1378 				     spec->hdr.next_proto_id;
1379 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1380 			return rte_flow_error_set(error, EINVAL,
1381 						  RTE_FLOW_ERROR_TYPE_ITEM,
1382 						  item,
1383 						  "multiple tunnel "
1384 						  "not supported");
1385 	}
1386 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
1387 		return rte_flow_error_set(error, EINVAL,
1388 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1389 					  "wrong tunnel type - IPv6 specified "
1390 					  "but IPv4 item provided");
1391 	if (item_flags & l3m)
1392 		return rte_flow_error_set(error, ENOTSUP,
1393 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1394 					  "multiple L3 layers not supported");
1395 	else if (item_flags & l4m)
1396 		return rte_flow_error_set(error, EINVAL,
1397 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1398 					  "L3 cannot follow an L4 layer.");
1399 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1400 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1401 		return rte_flow_error_set(error, EINVAL,
1402 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1403 					  "L3 cannot follow an NVGRE layer.");
1404 	else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
1405 		return rte_flow_error_set(error, EINVAL,
1406 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1407 					  "no L2 layer before IPV4");
1408 	if (!mask)
1409 		mask = &rte_flow_item_ipv4_mask;
1410 	else if (mask->hdr.next_proto_id != 0 &&
1411 		 mask->hdr.next_proto_id != 0xff)
1412 		return rte_flow_error_set(error, EINVAL,
1413 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1414 					  "partial mask is not supported"
1415 					  " for protocol");
1416 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1417 					acc_mask ? (const uint8_t *)acc_mask
1418 						 : (const uint8_t *)&nic_mask,
1419 					sizeof(struct rte_flow_item_ipv4),
1420 					error);
1421 	if (ret < 0)
1422 		return ret;
1423 	return 0;
1424 }
1425 
1426 /**
1427  * Validate IPV6 item.
1428  *
1429  * @param[in] item
1430  *   Item specification.
1431  * @param[in] item_flags
1432  *   Bit-fields that holds the items detected until now.
1433  * @param[in] acc_mask
1434  *   Acceptable mask, if NULL default internal default mask
1435  *   will be used to check whether item fields are supported.
1436  * @param[out] error
1437  *   Pointer to error structure.
1438  *
1439  * @return
1440  *   0 on success, a negative errno value otherwise and rte_errno is set.
1441  */
1442 int
1443 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
1444 			     uint64_t item_flags,
1445 			     const struct rte_flow_item_ipv6 *acc_mask,
1446 			     struct rte_flow_error *error)
1447 {
1448 	const struct rte_flow_item_ipv6 *mask = item->mask;
1449 	const struct rte_flow_item_ipv6 *spec = item->spec;
1450 	const struct rte_flow_item_ipv6 nic_mask = {
1451 		.hdr = {
1452 			.src_addr =
1453 				"\xff\xff\xff\xff\xff\xff\xff\xff"
1454 				"\xff\xff\xff\xff\xff\xff\xff\xff",
1455 			.dst_addr =
1456 				"\xff\xff\xff\xff\xff\xff\xff\xff"
1457 				"\xff\xff\xff\xff\xff\xff\xff\xff",
1458 			.vtc_flow = RTE_BE32(0xffffffff),
1459 			.proto = 0xff,
1460 			.hop_limits = 0xff,
1461 		},
1462 	};
1463 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1464 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1465 				      MLX5_FLOW_LAYER_OUTER_L3;
1466 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1467 				      MLX5_FLOW_LAYER_OUTER_L4;
1468 	int ret;
1469 	uint8_t next_proto = 0xFF;
1470 
1471 	if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
1472 		if (mask && spec)
1473 			next_proto = mask->hdr.proto & spec->hdr.proto;
1474 		if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1475 			return rte_flow_error_set(error, EINVAL,
1476 						  RTE_FLOW_ERROR_TYPE_ITEM,
1477 						  item,
1478 						  "multiple tunnel "
1479 						  "not supported");
1480 	}
1481 	if (item_flags & MLX5_FLOW_LAYER_IPIP)
1482 		return rte_flow_error_set(error, EINVAL,
1483 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1484 					  "wrong tunnel type - IPv4 specified "
1485 					  "but IPv6 item provided");
1486 	if (item_flags & l3m)
1487 		return rte_flow_error_set(error, ENOTSUP,
1488 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1489 					  "multiple L3 layers not supported");
1490 	else if (item_flags & l4m)
1491 		return rte_flow_error_set(error, EINVAL,
1492 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1493 					  "L3 cannot follow an L4 layer.");
1494 	else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1495 		  !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1496 		return rte_flow_error_set(error, EINVAL,
1497 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1498 					  "L3 cannot follow an NVGRE layer.");
1499 	else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
1500 		return rte_flow_error_set(error, EINVAL,
1501 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1502 					  "no L2 layer before IPV6");
1503 	if (!mask)
1504 		mask = &rte_flow_item_ipv6_mask;
1505 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1506 					acc_mask ? (const uint8_t *)acc_mask
1507 						 : (const uint8_t *)&nic_mask,
1508 					sizeof(struct rte_flow_item_ipv6),
1509 					error);
1510 	if (ret < 0)
1511 		return ret;
1512 	return 0;
1513 }
1514 
1515 /**
1516  * Validate UDP item.
1517  *
1518  * @param[in] item
1519  *   Item specification.
1520  * @param[in] item_flags
1521  *   Bit-fields that holds the items detected until now.
1522  * @param[in] target_protocol
1523  *   The next protocol in the previous item.
1524  * @param[in] flow_mask
1525  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
1526  * @param[out] error
1527  *   Pointer to error structure.
1528  *
1529  * @return
1530  *   0 on success, a negative errno value otherwise and rte_errno is set.
1531  */
1532 int
1533 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
1534 			    uint64_t item_flags,
1535 			    uint8_t target_protocol,
1536 			    struct rte_flow_error *error)
1537 {
1538 	const struct rte_flow_item_udp *mask = item->mask;
1539 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1540 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1541 				      MLX5_FLOW_LAYER_OUTER_L3;
1542 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1543 				      MLX5_FLOW_LAYER_OUTER_L4;
1544 	int ret;
1545 
1546 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
1547 		return rte_flow_error_set(error, EINVAL,
1548 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1549 					  "protocol filtering not compatible"
1550 					  " with UDP layer");
1551 	if (!(item_flags & l3m))
1552 		return rte_flow_error_set(error, EINVAL,
1553 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1554 					  "L3 is mandatory to filter on L4");
1555 	if (item_flags & l4m)
1556 		return rte_flow_error_set(error, EINVAL,
1557 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1558 					  "multiple L4 layers not supported");
1559 	if (!mask)
1560 		mask = &rte_flow_item_udp_mask;
1561 	ret = mlx5_flow_item_acceptable
1562 		(item, (const uint8_t *)mask,
1563 		 (const uint8_t *)&rte_flow_item_udp_mask,
1564 		 sizeof(struct rte_flow_item_udp), error);
1565 	if (ret < 0)
1566 		return ret;
1567 	return 0;
1568 }
1569 
1570 /**
1571  * Validate TCP item.
1572  *
1573  * @param[in] item
1574  *   Item specification.
1575  * @param[in] item_flags
1576  *   Bit-fields that holds the items detected until now.
1577  * @param[in] target_protocol
1578  *   The next protocol in the previous item.
1579  * @param[out] error
1580  *   Pointer to error structure.
1581  *
1582  * @return
1583  *   0 on success, a negative errno value otherwise and rte_errno is set.
1584  */
1585 int
1586 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
1587 			    uint64_t item_flags,
1588 			    uint8_t target_protocol,
1589 			    const struct rte_flow_item_tcp *flow_mask,
1590 			    struct rte_flow_error *error)
1591 {
1592 	const struct rte_flow_item_tcp *mask = item->mask;
1593 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1594 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1595 				      MLX5_FLOW_LAYER_OUTER_L3;
1596 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1597 				      MLX5_FLOW_LAYER_OUTER_L4;
1598 	int ret;
1599 
1600 	assert(flow_mask);
1601 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
1602 		return rte_flow_error_set(error, EINVAL,
1603 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1604 					  "protocol filtering not compatible"
1605 					  " with TCP layer");
1606 	if (!(item_flags & l3m))
1607 		return rte_flow_error_set(error, EINVAL,
1608 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1609 					  "L3 is mandatory to filter on L4");
1610 	if (item_flags & l4m)
1611 		return rte_flow_error_set(error, EINVAL,
1612 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1613 					  "multiple L4 layers not supported");
1614 	if (!mask)
1615 		mask = &rte_flow_item_tcp_mask;
1616 	ret = mlx5_flow_item_acceptable
1617 		(item, (const uint8_t *)mask,
1618 		 (const uint8_t *)flow_mask,
1619 		 sizeof(struct rte_flow_item_tcp), error);
1620 	if (ret < 0)
1621 		return ret;
1622 	return 0;
1623 }
1624 
1625 /**
1626  * Validate VXLAN item.
1627  *
1628  * @param[in] item
1629  *   Item specification.
1630  * @param[in] item_flags
1631  *   Bit-fields that holds the items detected until now.
1632  * @param[in] target_protocol
1633  *   The next protocol in the previous item.
1634  * @param[out] error
1635  *   Pointer to error structure.
1636  *
1637  * @return
1638  *   0 on success, a negative errno value otherwise and rte_errno is set.
1639  */
1640 int
1641 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
1642 			      uint64_t item_flags,
1643 			      struct rte_flow_error *error)
1644 {
1645 	const struct rte_flow_item_vxlan *spec = item->spec;
1646 	const struct rte_flow_item_vxlan *mask = item->mask;
1647 	int ret;
1648 	union vni {
1649 		uint32_t vlan_id;
1650 		uint8_t vni[4];
1651 	} id = { .vlan_id = 0, };
1652 	uint32_t vlan_id = 0;
1653 
1654 
1655 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1656 		return rte_flow_error_set(error, ENOTSUP,
1657 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1658 					  "multiple tunnel layers not"
1659 					  " supported");
1660 	/*
1661 	 * Verify only UDPv4 is present as defined in
1662 	 * https://tools.ietf.org/html/rfc7348
1663 	 */
1664 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1665 		return rte_flow_error_set(error, EINVAL,
1666 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1667 					  "no outer UDP layer found");
1668 	if (!mask)
1669 		mask = &rte_flow_item_vxlan_mask;
1670 	ret = mlx5_flow_item_acceptable
1671 		(item, (const uint8_t *)mask,
1672 		 (const uint8_t *)&rte_flow_item_vxlan_mask,
1673 		 sizeof(struct rte_flow_item_vxlan),
1674 		 error);
1675 	if (ret < 0)
1676 		return ret;
1677 	if (spec) {
1678 		memcpy(&id.vni[1], spec->vni, 3);
1679 		vlan_id = id.vlan_id;
1680 		memcpy(&id.vni[1], mask->vni, 3);
1681 		vlan_id &= id.vlan_id;
1682 	}
1683 	/*
1684 	 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
1685 	 * only this layer is defined in the Verbs specification it is
1686 	 * interpreted as wildcard and all packets will match this
1687 	 * rule, if it follows a full stack layer (ex: eth / ipv4 /
1688 	 * udp), all packets matching the layers before will also
1689 	 * match this rule.  To avoid such situation, VNI 0 is
1690 	 * currently refused.
1691 	 */
1692 	if (!vlan_id)
1693 		return rte_flow_error_set(error, ENOTSUP,
1694 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1695 					  "VXLAN vni cannot be 0");
1696 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1697 		return rte_flow_error_set(error, ENOTSUP,
1698 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1699 					  "VXLAN tunnel must be fully defined");
1700 	return 0;
1701 }
1702 
1703 /**
1704  * Validate VXLAN_GPE item.
1705  *
1706  * @param[in] item
1707  *   Item specification.
1708  * @param[in] item_flags
1709  *   Bit-fields that holds the items detected until now.
1710  * @param[in] priv
1711  *   Pointer to the private data structure.
1712  * @param[in] target_protocol
1713  *   The next protocol in the previous item.
1714  * @param[out] error
1715  *   Pointer to error structure.
1716  *
1717  * @return
1718  *   0 on success, a negative errno value otherwise and rte_errno is set.
1719  */
1720 int
1721 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
1722 				  uint64_t item_flags,
1723 				  struct rte_eth_dev *dev,
1724 				  struct rte_flow_error *error)
1725 {
1726 	struct mlx5_priv *priv = dev->data->dev_private;
1727 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1728 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1729 	int ret;
1730 	union vni {
1731 		uint32_t vlan_id;
1732 		uint8_t vni[4];
1733 	} id = { .vlan_id = 0, };
1734 	uint32_t vlan_id = 0;
1735 
1736 	if (!priv->config.l3_vxlan_en)
1737 		return rte_flow_error_set(error, ENOTSUP,
1738 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1739 					  "L3 VXLAN is not enabled by device"
1740 					  " parameter and/or not configured in"
1741 					  " firmware");
1742 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1743 		return rte_flow_error_set(error, ENOTSUP,
1744 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1745 					  "multiple tunnel layers not"
1746 					  " supported");
1747 	/*
1748 	 * Verify only UDPv4 is present as defined in
1749 	 * https://tools.ietf.org/html/rfc7348
1750 	 */
1751 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1752 		return rte_flow_error_set(error, EINVAL,
1753 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1754 					  "no outer UDP layer found");
1755 	if (!mask)
1756 		mask = &rte_flow_item_vxlan_gpe_mask;
1757 	ret = mlx5_flow_item_acceptable
1758 		(item, (const uint8_t *)mask,
1759 		 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
1760 		 sizeof(struct rte_flow_item_vxlan_gpe),
1761 		 error);
1762 	if (ret < 0)
1763 		return ret;
1764 	if (spec) {
1765 		if (spec->protocol)
1766 			return rte_flow_error_set(error, ENOTSUP,
1767 						  RTE_FLOW_ERROR_TYPE_ITEM,
1768 						  item,
1769 						  "VxLAN-GPE protocol"
1770 						  " not supported");
1771 		memcpy(&id.vni[1], spec->vni, 3);
1772 		vlan_id = id.vlan_id;
1773 		memcpy(&id.vni[1], mask->vni, 3);
1774 		vlan_id &= id.vlan_id;
1775 	}
1776 	/*
1777 	 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
1778 	 * layer is defined in the Verbs specification it is interpreted as
1779 	 * wildcard and all packets will match this rule, if it follows a full
1780 	 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
1781 	 * before will also match this rule.  To avoid such situation, VNI 0
1782 	 * is currently refused.
1783 	 */
1784 	if (!vlan_id)
1785 		return rte_flow_error_set(error, ENOTSUP,
1786 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1787 					  "VXLAN-GPE vni cannot be 0");
1788 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1789 		return rte_flow_error_set(error, ENOTSUP,
1790 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1791 					  "VXLAN-GPE tunnel must be fully"
1792 					  " defined");
1793 	return 0;
1794 }
1795 /**
1796  * Validate GRE Key item.
1797  *
1798  * @param[in] item
1799  *   Item specification.
1800  * @param[in] item_flags
1801  *   Bit flags to mark detected items.
1802  * @param[in] gre_item
1803  *   Pointer to gre_item
1804  * @param[out] error
1805  *   Pointer to error structure.
1806  *
1807  * @return
1808  *   0 on success, a negative errno value otherwise and rte_errno is set.
1809  */
1810 int
1811 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
1812 				uint64_t item_flags,
1813 				const struct rte_flow_item *gre_item,
1814 				struct rte_flow_error *error)
1815 {
1816 	const rte_be32_t *mask = item->mask;
1817 	int ret = 0;
1818 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
1819 	const struct rte_flow_item_gre *gre_spec = gre_item->spec;
1820 	const struct rte_flow_item_gre *gre_mask = gre_item->mask;
1821 
1822 	if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
1823 		return rte_flow_error_set(error, ENOTSUP,
1824 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1825 					  "Multiple GRE key not support");
1826 	if (!(item_flags & MLX5_FLOW_LAYER_GRE))
1827 		return rte_flow_error_set(error, ENOTSUP,
1828 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1829 					  "No preceding GRE header");
1830 	if (item_flags & MLX5_FLOW_LAYER_INNER)
1831 		return rte_flow_error_set(error, ENOTSUP,
1832 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1833 					  "GRE key following a wrong item");
1834 	if (!gre_mask)
1835 		gre_mask = &rte_flow_item_gre_mask;
1836 	if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
1837 			 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
1838 		return rte_flow_error_set(error, EINVAL,
1839 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1840 					  "Key bit must be on");
1841 
1842 	if (!mask)
1843 		mask = &gre_key_default_mask;
1844 	ret = mlx5_flow_item_acceptable
1845 		(item, (const uint8_t *)mask,
1846 		 (const uint8_t *)&gre_key_default_mask,
1847 		 sizeof(rte_be32_t), error);
1848 	return ret;
1849 }
1850 
1851 /**
1852  * Validate GRE item.
1853  *
1854  * @param[in] item
1855  *   Item specification.
1856  * @param[in] item_flags
1857  *   Bit flags to mark detected items.
1858  * @param[in] target_protocol
1859  *   The next protocol in the previous item.
1860  * @param[out] error
1861  *   Pointer to error structure.
1862  *
1863  * @return
1864  *   0 on success, a negative errno value otherwise and rte_errno is set.
1865  */
1866 int
1867 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
1868 			    uint64_t item_flags,
1869 			    uint8_t target_protocol,
1870 			    struct rte_flow_error *error)
1871 {
1872 	const struct rte_flow_item_gre *spec __rte_unused = item->spec;
1873 	const struct rte_flow_item_gre *mask = item->mask;
1874 	int ret;
1875 	const struct rte_flow_item_gre nic_mask = {
1876 		.c_rsvd0_ver = RTE_BE16(0xB000),
1877 		.protocol = RTE_BE16(UINT16_MAX),
1878 	};
1879 
1880 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
1881 		return rte_flow_error_set(error, EINVAL,
1882 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1883 					  "protocol filtering not compatible"
1884 					  " with this GRE layer");
1885 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1886 		return rte_flow_error_set(error, ENOTSUP,
1887 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1888 					  "multiple tunnel layers not"
1889 					  " supported");
1890 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
1891 		return rte_flow_error_set(error, ENOTSUP,
1892 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1893 					  "L3 Layer is missing");
1894 	if (!mask)
1895 		mask = &rte_flow_item_gre_mask;
1896 	ret = mlx5_flow_item_acceptable
1897 		(item, (const uint8_t *)mask,
1898 		 (const uint8_t *)&nic_mask,
1899 		 sizeof(struct rte_flow_item_gre), error);
1900 	if (ret < 0)
1901 		return ret;
1902 #ifndef HAVE_MLX5DV_DR
1903 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
1904 	if (spec && (spec->protocol & mask->protocol))
1905 		return rte_flow_error_set(error, ENOTSUP,
1906 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1907 					  "without MPLS support the"
1908 					  " specification cannot be used for"
1909 					  " filtering");
1910 #endif
1911 #endif
1912 	return 0;
1913 }
1914 
1915 /**
1916  * Validate Geneve item.
1917  *
1918  * @param[in] item
1919  *   Item specification.
1920  * @param[in] itemFlags
1921  *   Bit-fields that holds the items detected until now.
1922  * @param[in] enPriv
1923  *   Pointer to the private data structure.
1924  * @param[out] error
1925  *   Pointer to error structure.
1926  *
1927  * @return
1928  *   0 on success, a negative errno value otherwise and rte_errno is set.
1929  */
1930 
1931 int
1932 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
1933 			       uint64_t item_flags,
1934 			       struct rte_eth_dev *dev,
1935 			       struct rte_flow_error *error)
1936 {
1937 	struct mlx5_priv *priv = dev->data->dev_private;
1938 	const struct rte_flow_item_geneve *spec = item->spec;
1939 	const struct rte_flow_item_geneve *mask = item->mask;
1940 	int ret;
1941 	uint16_t gbhdr;
1942 	uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
1943 			  MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
1944 	const struct rte_flow_item_geneve nic_mask = {
1945 		.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
1946 		.vni = "\xff\xff\xff",
1947 		.protocol = RTE_BE16(UINT16_MAX),
1948 	};
1949 
1950 	if (!(priv->config.hca_attr.flex_parser_protocols &
1951 	      MLX5_HCA_FLEX_GENEVE_ENABLED) ||
1952 	    !priv->config.hca_attr.tunnel_stateless_geneve_rx)
1953 		return rte_flow_error_set(error, ENOTSUP,
1954 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1955 					  "L3 Geneve is not enabled by device"
1956 					  " parameter and/or not configured in"
1957 					  " firmware");
1958 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1959 		return rte_flow_error_set(error, ENOTSUP,
1960 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1961 					  "multiple tunnel layers not"
1962 					  " supported");
1963 	/*
1964 	 * Verify only UDPv4 is present as defined in
1965 	 * https://tools.ietf.org/html/rfc7348
1966 	 */
1967 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1968 		return rte_flow_error_set(error, EINVAL,
1969 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1970 					  "no outer UDP layer found");
1971 	if (!mask)
1972 		mask = &rte_flow_item_geneve_mask;
1973 	ret = mlx5_flow_item_acceptable
1974 				  (item, (const uint8_t *)mask,
1975 				   (const uint8_t *)&nic_mask,
1976 				   sizeof(struct rte_flow_item_geneve), error);
1977 	if (ret)
1978 		return ret;
1979 	if (spec) {
1980 		gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
1981 		if (MLX5_GENEVE_VER_VAL(gbhdr) ||
1982 		     MLX5_GENEVE_CRITO_VAL(gbhdr) ||
1983 		     MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
1984 			return rte_flow_error_set(error, ENOTSUP,
1985 						  RTE_FLOW_ERROR_TYPE_ITEM,
1986 						  item,
1987 						  "Geneve protocol unsupported"
1988 						  " fields are being used");
1989 		if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
1990 			return rte_flow_error_set
1991 					(error, ENOTSUP,
1992 					 RTE_FLOW_ERROR_TYPE_ITEM,
1993 					 item,
1994 					 "Unsupported Geneve options length");
1995 	}
1996 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1997 		return rte_flow_error_set
1998 				    (error, ENOTSUP,
1999 				     RTE_FLOW_ERROR_TYPE_ITEM, item,
2000 				     "Geneve tunnel must be fully defined");
2001 	return 0;
2002 }
2003 
2004 /**
2005  * Validate MPLS item.
2006  *
2007  * @param[in] dev
2008  *   Pointer to the rte_eth_dev structure.
2009  * @param[in] item
2010  *   Item specification.
2011  * @param[in] item_flags
2012  *   Bit-fields that holds the items detected until now.
2013  * @param[in] prev_layer
2014  *   The protocol layer indicated in previous item.
2015  * @param[out] error
2016  *   Pointer to error structure.
2017  *
2018  * @return
2019  *   0 on success, a negative errno value otherwise and rte_errno is set.
2020  */
2021 int
2022 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2023 			     const struct rte_flow_item *item __rte_unused,
2024 			     uint64_t item_flags __rte_unused,
2025 			     uint64_t prev_layer __rte_unused,
2026 			     struct rte_flow_error *error)
2027 {
2028 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2029 	const struct rte_flow_item_mpls *mask = item->mask;
2030 	struct mlx5_priv *priv = dev->data->dev_private;
2031 	int ret;
2032 
2033 	if (!priv->config.mpls_en)
2034 		return rte_flow_error_set(error, ENOTSUP,
2035 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2036 					  "MPLS not supported or"
2037 					  " disabled in firmware"
2038 					  " configuration.");
2039 	/* MPLS over IP, UDP, GRE is allowed */
2040 	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2041 			    MLX5_FLOW_LAYER_OUTER_L4_UDP |
2042 			    MLX5_FLOW_LAYER_GRE)))
2043 		return rte_flow_error_set(error, EINVAL,
2044 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2045 					  "protocol filtering not compatible"
2046 					  " with MPLS layer");
2047 	/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2048 	if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2049 	    !(item_flags & MLX5_FLOW_LAYER_GRE))
2050 		return rte_flow_error_set(error, ENOTSUP,
2051 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2052 					  "multiple tunnel layers not"
2053 					  " supported");
2054 	if (!mask)
2055 		mask = &rte_flow_item_mpls_mask;
2056 	ret = mlx5_flow_item_acceptable
2057 		(item, (const uint8_t *)mask,
2058 		 (const uint8_t *)&rte_flow_item_mpls_mask,
2059 		 sizeof(struct rte_flow_item_mpls), error);
2060 	if (ret < 0)
2061 		return ret;
2062 	return 0;
2063 #endif
2064 	return rte_flow_error_set(error, ENOTSUP,
2065 				  RTE_FLOW_ERROR_TYPE_ITEM, item,
2066 				  "MPLS is not supported by Verbs, please"
2067 				  " update.");
2068 }
2069 
2070 /**
2071  * Validate NVGRE item.
2072  *
2073  * @param[in] item
2074  *   Item specification.
2075  * @param[in] item_flags
2076  *   Bit flags to mark detected items.
2077  * @param[in] target_protocol
2078  *   The next protocol in the previous item.
2079  * @param[out] error
2080  *   Pointer to error structure.
2081  *
2082  * @return
2083  *   0 on success, a negative errno value otherwise and rte_errno is set.
2084  */
2085 int
2086 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2087 			      uint64_t item_flags,
2088 			      uint8_t target_protocol,
2089 			      struct rte_flow_error *error)
2090 {
2091 	const struct rte_flow_item_nvgre *mask = item->mask;
2092 	int ret;
2093 
2094 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2095 		return rte_flow_error_set(error, EINVAL,
2096 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2097 					  "protocol filtering not compatible"
2098 					  " with this GRE layer");
2099 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2100 		return rte_flow_error_set(error, ENOTSUP,
2101 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2102 					  "multiple tunnel layers not"
2103 					  " supported");
2104 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2105 		return rte_flow_error_set(error, ENOTSUP,
2106 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2107 					  "L3 Layer is missing");
2108 	if (!mask)
2109 		mask = &rte_flow_item_nvgre_mask;
2110 	ret = mlx5_flow_item_acceptable
2111 		(item, (const uint8_t *)mask,
2112 		 (const uint8_t *)&rte_flow_item_nvgre_mask,
2113 		 sizeof(struct rte_flow_item_nvgre), error);
2114 	if (ret < 0)
2115 		return ret;
2116 	return 0;
2117 }
2118 
2119 static int
2120 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2121 		   const struct rte_flow_attr *attr __rte_unused,
2122 		   const struct rte_flow_item items[] __rte_unused,
2123 		   const struct rte_flow_action actions[] __rte_unused,
2124 		   bool external __rte_unused,
2125 		   struct rte_flow_error *error)
2126 {
2127 	return rte_flow_error_set(error, ENOTSUP,
2128 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2129 }
2130 
2131 static struct mlx5_flow *
2132 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
2133 		  const struct rte_flow_item items[] __rte_unused,
2134 		  const struct rte_flow_action actions[] __rte_unused,
2135 		  struct rte_flow_error *error)
2136 {
2137 	rte_flow_error_set(error, ENOTSUP,
2138 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2139 	return NULL;
2140 }
2141 
2142 static int
2143 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2144 		    struct mlx5_flow *dev_flow __rte_unused,
2145 		    const struct rte_flow_attr *attr __rte_unused,
2146 		    const struct rte_flow_item items[] __rte_unused,
2147 		    const struct rte_flow_action actions[] __rte_unused,
2148 		    struct rte_flow_error *error)
2149 {
2150 	return rte_flow_error_set(error, ENOTSUP,
2151 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2152 }
2153 
2154 static int
2155 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
2156 		struct rte_flow *flow __rte_unused,
2157 		struct rte_flow_error *error)
2158 {
2159 	return rte_flow_error_set(error, ENOTSUP,
2160 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2161 }
2162 
2163 static void
2164 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
2165 		 struct rte_flow *flow __rte_unused)
2166 {
2167 }
2168 
2169 static void
2170 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
2171 		  struct rte_flow *flow __rte_unused)
2172 {
2173 }
2174 
2175 static int
2176 flow_null_query(struct rte_eth_dev *dev __rte_unused,
2177 		struct rte_flow *flow __rte_unused,
2178 		const struct rte_flow_action *actions __rte_unused,
2179 		void *data __rte_unused,
2180 		struct rte_flow_error *error)
2181 {
2182 	return rte_flow_error_set(error, ENOTSUP,
2183 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2184 }
2185 
2186 /* Void driver to protect from null pointer reference. */
2187 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
2188 	.validate = flow_null_validate,
2189 	.prepare = flow_null_prepare,
2190 	.translate = flow_null_translate,
2191 	.apply = flow_null_apply,
2192 	.remove = flow_null_remove,
2193 	.destroy = flow_null_destroy,
2194 	.query = flow_null_query,
2195 };
2196 
2197 /**
2198  * Select flow driver type according to flow attributes and device
2199  * configuration.
2200  *
2201  * @param[in] dev
2202  *   Pointer to the dev structure.
2203  * @param[in] attr
2204  *   Pointer to the flow attributes.
2205  *
2206  * @return
2207  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
2208  */
2209 static enum mlx5_flow_drv_type
2210 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
2211 {
2212 	struct mlx5_priv *priv = dev->data->dev_private;
2213 	enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
2214 
2215 	if (attr->transfer && priv->config.dv_esw_en)
2216 		type = MLX5_FLOW_TYPE_DV;
2217 	if (!attr->transfer)
2218 		type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
2219 						 MLX5_FLOW_TYPE_VERBS;
2220 	return type;
2221 }
2222 
2223 #define flow_get_drv_ops(type) flow_drv_ops[type]
2224 
2225 /**
2226  * Flow driver validation API. This abstracts calling driver specific functions.
2227  * The type of flow driver is determined according to flow attributes.
2228  *
2229  * @param[in] dev
2230  *   Pointer to the dev structure.
2231  * @param[in] attr
2232  *   Pointer to the flow attributes.
2233  * @param[in] items
2234  *   Pointer to the list of items.
2235  * @param[in] actions
2236  *   Pointer to the list of actions.
2237  * @param[in] external
2238  *   This flow rule is created by request external to PMD.
2239  * @param[out] error
2240  *   Pointer to the error structure.
2241  *
2242  * @return
2243  *   0 on success, a negative errno value otherwise and rte_errno is set.
2244  */
2245 static inline int
2246 flow_drv_validate(struct rte_eth_dev *dev,
2247 		  const struct rte_flow_attr *attr,
2248 		  const struct rte_flow_item items[],
2249 		  const struct rte_flow_action actions[],
2250 		  bool external, struct rte_flow_error *error)
2251 {
2252 	const struct mlx5_flow_driver_ops *fops;
2253 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
2254 
2255 	fops = flow_get_drv_ops(type);
2256 	return fops->validate(dev, attr, items, actions, external, error);
2257 }
2258 
2259 /**
2260  * Flow driver preparation API. This abstracts calling driver specific
2261  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2262  * calculates the size of memory required for device flow, allocates the memory,
2263  * initializes the device flow and returns the pointer.
2264  *
2265  * @note
2266  *   This function initializes device flow structure such as dv or verbs in
2267  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
2268  *   rest. For example, adding returning device flow to flow->dev_flow list and
2269  *   setting backward reference to the flow should be done out of this function.
2270  *   layers field is not filled either.
2271  *
2272  * @param[in] attr
2273  *   Pointer to the flow attributes.
2274  * @param[in] items
2275  *   Pointer to the list of items.
2276  * @param[in] actions
2277  *   Pointer to the list of actions.
2278  * @param[out] error
2279  *   Pointer to the error structure.
2280  *
2281  * @return
2282  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
2283  */
2284 static inline struct mlx5_flow *
2285 flow_drv_prepare(const struct rte_flow *flow,
2286 		 const struct rte_flow_attr *attr,
2287 		 const struct rte_flow_item items[],
2288 		 const struct rte_flow_action actions[],
2289 		 struct rte_flow_error *error)
2290 {
2291 	const struct mlx5_flow_driver_ops *fops;
2292 	enum mlx5_flow_drv_type type = flow->drv_type;
2293 
2294 	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2295 	fops = flow_get_drv_ops(type);
2296 	return fops->prepare(attr, items, actions, error);
2297 }
2298 
2299 /**
2300  * Flow driver translation API. This abstracts calling driver specific
2301  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2302  * translates a generic flow into a driver flow. flow_drv_prepare() must
2303  * precede.
2304  *
2305  * @note
2306  *   dev_flow->layers could be filled as a result of parsing during translation
2307  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
2308  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
2309  *   flow->actions could be overwritten even though all the expanded dev_flows
2310  *   have the same actions.
2311  *
2312  * @param[in] dev
2313  *   Pointer to the rte dev structure.
2314  * @param[in, out] dev_flow
2315  *   Pointer to the mlx5 flow.
2316  * @param[in] attr
2317  *   Pointer to the flow attributes.
2318  * @param[in] items
2319  *   Pointer to the list of items.
2320  * @param[in] actions
2321  *   Pointer to the list of actions.
2322  * @param[out] error
2323  *   Pointer to the error structure.
2324  *
2325  * @return
2326  *   0 on success, a negative errno value otherwise and rte_errno is set.
2327  */
2328 static inline int
2329 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
2330 		   const struct rte_flow_attr *attr,
2331 		   const struct rte_flow_item items[],
2332 		   const struct rte_flow_action actions[],
2333 		   struct rte_flow_error *error)
2334 {
2335 	const struct mlx5_flow_driver_ops *fops;
2336 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
2337 
2338 	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2339 	fops = flow_get_drv_ops(type);
2340 	return fops->translate(dev, dev_flow, attr, items, actions, error);
2341 }
2342 
2343 /**
2344  * Flow driver apply API. This abstracts calling driver specific functions.
2345  * Parent flow (rte_flow) should have driver type (drv_type). It applies
2346  * translated driver flows on to device. flow_drv_translate() must precede.
2347  *
2348  * @param[in] dev
2349  *   Pointer to Ethernet device structure.
2350  * @param[in, out] flow
2351  *   Pointer to flow structure.
2352  * @param[out] error
2353  *   Pointer to error structure.
2354  *
2355  * @return
2356  *   0 on success, a negative errno value otherwise and rte_errno is set.
2357  */
2358 static inline int
2359 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2360 	       struct rte_flow_error *error)
2361 {
2362 	const struct mlx5_flow_driver_ops *fops;
2363 	enum mlx5_flow_drv_type type = flow->drv_type;
2364 
2365 	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2366 	fops = flow_get_drv_ops(type);
2367 	return fops->apply(dev, flow, error);
2368 }
2369 
2370 /**
2371  * Flow driver remove API. This abstracts calling driver specific functions.
2372  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2373  * on device. All the resources of the flow should be freed by calling
2374  * flow_drv_destroy().
2375  *
2376  * @param[in] dev
2377  *   Pointer to Ethernet device.
2378  * @param[in, out] flow
2379  *   Pointer to flow structure.
2380  */
2381 static inline void
2382 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2383 {
2384 	const struct mlx5_flow_driver_ops *fops;
2385 	enum mlx5_flow_drv_type type = flow->drv_type;
2386 
2387 	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2388 	fops = flow_get_drv_ops(type);
2389 	fops->remove(dev, flow);
2390 }
2391 
2392 /**
2393  * Flow driver destroy API. This abstracts calling driver specific functions.
2394  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2395  * on device and releases resources of the flow.
2396  *
2397  * @param[in] dev
2398  *   Pointer to Ethernet device.
2399  * @param[in, out] flow
2400  *   Pointer to flow structure.
2401  */
2402 static inline void
2403 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2404 {
2405 	const struct mlx5_flow_driver_ops *fops;
2406 	enum mlx5_flow_drv_type type = flow->drv_type;
2407 
2408 	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2409 	fops = flow_get_drv_ops(type);
2410 	fops->destroy(dev, flow);
2411 }
2412 
2413 /**
2414  * Validate a flow supported by the NIC.
2415  *
2416  * @see rte_flow_validate()
2417  * @see rte_flow_ops
2418  */
2419 int
2420 mlx5_flow_validate(struct rte_eth_dev *dev,
2421 		   const struct rte_flow_attr *attr,
2422 		   const struct rte_flow_item items[],
2423 		   const struct rte_flow_action actions[],
2424 		   struct rte_flow_error *error)
2425 {
2426 	int ret;
2427 
2428 	ret = flow_drv_validate(dev, attr, items, actions, true, error);
2429 	if (ret < 0)
2430 		return ret;
2431 	return 0;
2432 }
2433 
2434 /**
2435  * Get RSS action from the action list.
2436  *
2437  * @param[in] actions
2438  *   Pointer to the list of actions.
2439  *
2440  * @return
2441  *   Pointer to the RSS action if exist, else return NULL.
2442  */
2443 static const struct rte_flow_action_rss*
2444 flow_get_rss_action(const struct rte_flow_action actions[])
2445 {
2446 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2447 		switch (actions->type) {
2448 		case RTE_FLOW_ACTION_TYPE_RSS:
2449 			return (const struct rte_flow_action_rss *)
2450 			       actions->conf;
2451 		default:
2452 			break;
2453 		}
2454 	}
2455 	return NULL;
2456 }
2457 
2458 static unsigned int
2459 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
2460 {
2461 	const struct rte_flow_item *item;
2462 	unsigned int has_vlan = 0;
2463 
2464 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2465 		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2466 			has_vlan = 1;
2467 			break;
2468 		}
2469 	}
2470 	if (has_vlan)
2471 		return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
2472 				       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
2473 	return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2474 			       MLX5_EXPANSION_ROOT_OUTER;
2475 }
2476 
2477 /**
2478  * Create a flow and add it to @p list.
2479  *
2480  * @param dev
2481  *   Pointer to Ethernet device.
2482  * @param list
2483  *   Pointer to a TAILQ flow list.
2484  * @param[in] attr
2485  *   Flow rule attributes.
2486  * @param[in] items
2487  *   Pattern specification (list terminated by the END pattern item).
2488  * @param[in] actions
2489  *   Associated actions (list terminated by the END action).
2490  * @param[in] external
2491  *   This flow rule is created by request external to PMD.
2492  * @param[out] error
2493  *   Perform verbose error reporting if not NULL.
2494  *
2495  * @return
2496  *   A flow on success, NULL otherwise and rte_errno is set.
2497  */
2498 static struct rte_flow *
2499 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
2500 		 const struct rte_flow_attr *attr,
2501 		 const struct rte_flow_item items[],
2502 		 const struct rte_flow_action actions[],
2503 		 bool external, struct rte_flow_error *error)
2504 {
2505 	struct rte_flow *flow = NULL;
2506 	struct mlx5_flow *dev_flow;
2507 	const struct rte_flow_action_rss *rss;
2508 	union {
2509 		struct rte_flow_expand_rss buf;
2510 		uint8_t buffer[2048];
2511 	} expand_buffer;
2512 	struct rte_flow_expand_rss *buf = &expand_buffer.buf;
2513 	int ret;
2514 	uint32_t i;
2515 	uint32_t flow_size;
2516 
2517 	ret = flow_drv_validate(dev, attr, items, actions, external, error);
2518 	if (ret < 0)
2519 		return NULL;
2520 	flow_size = sizeof(struct rte_flow);
2521 	rss = flow_get_rss_action(actions);
2522 	if (rss)
2523 		flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
2524 					    sizeof(void *));
2525 	else
2526 		flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
2527 	flow = rte_calloc(__func__, 1, flow_size, 0);
2528 	if (!flow) {
2529 		rte_errno = ENOMEM;
2530 		return NULL;
2531 	}
2532 	flow->drv_type = flow_get_drv_type(dev, attr);
2533 	flow->ingress = attr->ingress;
2534 	flow->transfer = attr->transfer;
2535 	assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
2536 	       flow->drv_type < MLX5_FLOW_TYPE_MAX);
2537 	flow->queue = (void *)(flow + 1);
2538 	LIST_INIT(&flow->dev_flows);
2539 	if (rss && rss->types) {
2540 		unsigned int graph_root;
2541 
2542 		graph_root = find_graph_root(items, rss->level);
2543 		ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
2544 					  items, rss->types,
2545 					  mlx5_support_expansion,
2546 					  graph_root);
2547 		assert(ret > 0 &&
2548 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
2549 	} else {
2550 		buf->entries = 1;
2551 		buf->entry[0].pattern = (void *)(uintptr_t)items;
2552 	}
2553 	for (i = 0; i < buf->entries; ++i) {
2554 		dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
2555 					    actions, error);
2556 		if (!dev_flow)
2557 			goto error;
2558 		dev_flow->flow = flow;
2559 		dev_flow->external = external;
2560 		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
2561 		ret = flow_drv_translate(dev, dev_flow, attr,
2562 					 buf->entry[i].pattern,
2563 					 actions, error);
2564 		if (ret < 0)
2565 			goto error;
2566 	}
2567 	if (dev->data->dev_started) {
2568 		ret = flow_drv_apply(dev, flow, error);
2569 		if (ret < 0)
2570 			goto error;
2571 	}
2572 	TAILQ_INSERT_TAIL(list, flow, next);
2573 	flow_rxq_flags_set(dev, flow);
2574 	return flow;
2575 error:
2576 	ret = rte_errno; /* Save rte_errno before cleanup. */
2577 	assert(flow);
2578 	flow_drv_destroy(dev, flow);
2579 	rte_free(flow);
2580 	rte_errno = ret; /* Restore rte_errno. */
2581 	return NULL;
2582 }
2583 
2584 /**
2585  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
2586  * incoming packets to table 1.
2587  *
2588  * Other flow rules, requested for group n, will be created in
2589  * e-switch table n+1.
2590  * Jump action to e-switch group n will be created to group n+1.
2591  *
2592  * Used when working in switchdev mode, to utilise advantages of table 1
2593  * and above.
2594  *
2595  * @param dev
2596  *   Pointer to Ethernet device.
2597  *
2598  * @return
2599  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
2600  */
2601 struct rte_flow *
2602 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
2603 {
2604 	const struct rte_flow_attr attr = {
2605 		.group = 0,
2606 		.priority = 0,
2607 		.ingress = 1,
2608 		.egress = 0,
2609 		.transfer = 1,
2610 	};
2611 	const struct rte_flow_item pattern = {
2612 		.type = RTE_FLOW_ITEM_TYPE_END,
2613 	};
2614 	struct rte_flow_action_jump jump = {
2615 		.group = 1,
2616 	};
2617 	const struct rte_flow_action actions[] = {
2618 		{
2619 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
2620 			.conf = &jump,
2621 		},
2622 		{
2623 			.type = RTE_FLOW_ACTION_TYPE_END,
2624 		},
2625 	};
2626 	struct mlx5_priv *priv = dev->data->dev_private;
2627 	struct rte_flow_error error;
2628 
2629 	return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
2630 				actions, false, &error);
2631 }
2632 
2633 /**
2634  * Create a flow.
2635  *
2636  * @see rte_flow_create()
2637  * @see rte_flow_ops
2638  */
2639 struct rte_flow *
2640 mlx5_flow_create(struct rte_eth_dev *dev,
2641 		 const struct rte_flow_attr *attr,
2642 		 const struct rte_flow_item items[],
2643 		 const struct rte_flow_action actions[],
2644 		 struct rte_flow_error *error)
2645 {
2646 	struct mlx5_priv *priv = dev->data->dev_private;
2647 
2648 	return flow_list_create(dev, &priv->flows,
2649 				attr, items, actions, true, error);
2650 }
2651 
2652 /**
2653  * Destroy a flow in a list.
2654  *
2655  * @param dev
2656  *   Pointer to Ethernet device.
2657  * @param list
2658  *   Pointer to a TAILQ flow list.
2659  * @param[in] flow
2660  *   Flow to destroy.
2661  */
2662 static void
2663 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
2664 		  struct rte_flow *flow)
2665 {
2666 	/*
2667 	 * Update RX queue flags only if port is started, otherwise it is
2668 	 * already clean.
2669 	 */
2670 	if (dev->data->dev_started)
2671 		flow_rxq_flags_trim(dev, flow);
2672 	flow_drv_destroy(dev, flow);
2673 	TAILQ_REMOVE(list, flow, next);
2674 	rte_free(flow->fdir);
2675 	rte_free(flow);
2676 }
2677 
2678 /**
2679  * Destroy all flows.
2680  *
2681  * @param dev
2682  *   Pointer to Ethernet device.
2683  * @param list
2684  *   Pointer to a TAILQ flow list.
2685  */
2686 void
2687 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
2688 {
2689 	while (!TAILQ_EMPTY(list)) {
2690 		struct rte_flow *flow;
2691 
2692 		flow = TAILQ_FIRST(list);
2693 		flow_list_destroy(dev, list, flow);
2694 	}
2695 }
2696 
2697 /**
2698  * Remove all flows.
2699  *
2700  * @param dev
2701  *   Pointer to Ethernet device.
2702  * @param list
2703  *   Pointer to a TAILQ flow list.
2704  */
2705 void
2706 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
2707 {
2708 	struct rte_flow *flow;
2709 
2710 	TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
2711 		flow_drv_remove(dev, flow);
2712 	flow_rxq_flags_clear(dev);
2713 }
2714 
2715 /**
2716  * Add all flows.
2717  *
2718  * @param dev
2719  *   Pointer to Ethernet device.
2720  * @param list
2721  *   Pointer to a TAILQ flow list.
2722  *
2723  * @return
2724  *   0 on success, a negative errno value otherwise and rte_errno is set.
2725  */
2726 int
2727 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
2728 {
2729 	struct rte_flow *flow;
2730 	struct rte_flow_error error;
2731 	int ret = 0;
2732 
2733 	TAILQ_FOREACH(flow, list, next) {
2734 		ret = flow_drv_apply(dev, flow, &error);
2735 		if (ret < 0)
2736 			goto error;
2737 		flow_rxq_flags_set(dev, flow);
2738 	}
2739 	return 0;
2740 error:
2741 	ret = rte_errno; /* Save rte_errno before cleanup. */
2742 	mlx5_flow_stop(dev, list);
2743 	rte_errno = ret; /* Restore rte_errno. */
2744 	return -rte_errno;
2745 }
2746 
2747 /**
2748  * Verify the flow list is empty
2749  *
2750  * @param dev
2751  *  Pointer to Ethernet device.
2752  *
2753  * @return the number of flows not released.
2754  */
2755 int
2756 mlx5_flow_verify(struct rte_eth_dev *dev)
2757 {
2758 	struct mlx5_priv *priv = dev->data->dev_private;
2759 	struct rte_flow *flow;
2760 	int ret = 0;
2761 
2762 	TAILQ_FOREACH(flow, &priv->flows, next) {
2763 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
2764 			dev->data->port_id, (void *)flow);
2765 		++ret;
2766 	}
2767 	return ret;
2768 }
2769 
2770 /**
2771  * Enable a control flow configured from the control plane.
2772  *
2773  * @param dev
2774  *   Pointer to Ethernet device.
2775  * @param eth_spec
2776  *   An Ethernet flow spec to apply.
2777  * @param eth_mask
2778  *   An Ethernet flow mask to apply.
2779  * @param vlan_spec
2780  *   A VLAN flow spec to apply.
2781  * @param vlan_mask
2782  *   A VLAN flow mask to apply.
2783  *
2784  * @return
2785  *   0 on success, a negative errno value otherwise and rte_errno is set.
2786  */
2787 int
2788 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
2789 		    struct rte_flow_item_eth *eth_spec,
2790 		    struct rte_flow_item_eth *eth_mask,
2791 		    struct rte_flow_item_vlan *vlan_spec,
2792 		    struct rte_flow_item_vlan *vlan_mask)
2793 {
2794 	struct mlx5_priv *priv = dev->data->dev_private;
2795 	const struct rte_flow_attr attr = {
2796 		.ingress = 1,
2797 		.priority = MLX5_FLOW_PRIO_RSVD,
2798 	};
2799 	struct rte_flow_item items[] = {
2800 		{
2801 			.type = RTE_FLOW_ITEM_TYPE_ETH,
2802 			.spec = eth_spec,
2803 			.last = NULL,
2804 			.mask = eth_mask,
2805 		},
2806 		{
2807 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
2808 					      RTE_FLOW_ITEM_TYPE_END,
2809 			.spec = vlan_spec,
2810 			.last = NULL,
2811 			.mask = vlan_mask,
2812 		},
2813 		{
2814 			.type = RTE_FLOW_ITEM_TYPE_END,
2815 		},
2816 	};
2817 	uint16_t queue[priv->reta_idx_n];
2818 	struct rte_flow_action_rss action_rss = {
2819 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2820 		.level = 0,
2821 		.types = priv->rss_conf.rss_hf,
2822 		.key_len = priv->rss_conf.rss_key_len,
2823 		.queue_num = priv->reta_idx_n,
2824 		.key = priv->rss_conf.rss_key,
2825 		.queue = queue,
2826 	};
2827 	struct rte_flow_action actions[] = {
2828 		{
2829 			.type = RTE_FLOW_ACTION_TYPE_RSS,
2830 			.conf = &action_rss,
2831 		},
2832 		{
2833 			.type = RTE_FLOW_ACTION_TYPE_END,
2834 		},
2835 	};
2836 	struct rte_flow *flow;
2837 	struct rte_flow_error error;
2838 	unsigned int i;
2839 
2840 	if (!priv->reta_idx_n || !priv->rxqs_n) {
2841 		return 0;
2842 	}
2843 	for (i = 0; i != priv->reta_idx_n; ++i)
2844 		queue[i] = (*priv->reta_idx)[i];
2845 	flow = flow_list_create(dev, &priv->ctrl_flows,
2846 				&attr, items, actions, false, &error);
2847 	if (!flow)
2848 		return -rte_errno;
2849 	return 0;
2850 }
2851 
2852 /**
2853  * Enable a flow control configured from the control plane.
2854  *
2855  * @param dev
2856  *   Pointer to Ethernet device.
2857  * @param eth_spec
2858  *   An Ethernet flow spec to apply.
2859  * @param eth_mask
2860  *   An Ethernet flow mask to apply.
2861  *
2862  * @return
2863  *   0 on success, a negative errno value otherwise and rte_errno is set.
2864  */
2865 int
2866 mlx5_ctrl_flow(struct rte_eth_dev *dev,
2867 	       struct rte_flow_item_eth *eth_spec,
2868 	       struct rte_flow_item_eth *eth_mask)
2869 {
2870 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
2871 }
2872 
2873 /**
2874  * Destroy a flow.
2875  *
2876  * @see rte_flow_destroy()
2877  * @see rte_flow_ops
2878  */
2879 int
2880 mlx5_flow_destroy(struct rte_eth_dev *dev,
2881 		  struct rte_flow *flow,
2882 		  struct rte_flow_error *error __rte_unused)
2883 {
2884 	struct mlx5_priv *priv = dev->data->dev_private;
2885 
2886 	flow_list_destroy(dev, &priv->flows, flow);
2887 	return 0;
2888 }
2889 
2890 /**
2891  * Destroy all flows.
2892  *
2893  * @see rte_flow_flush()
2894  * @see rte_flow_ops
2895  */
2896 int
2897 mlx5_flow_flush(struct rte_eth_dev *dev,
2898 		struct rte_flow_error *error __rte_unused)
2899 {
2900 	struct mlx5_priv *priv = dev->data->dev_private;
2901 
2902 	mlx5_flow_list_flush(dev, &priv->flows);
2903 	return 0;
2904 }
2905 
2906 /**
2907  * Isolated mode.
2908  *
2909  * @see rte_flow_isolate()
2910  * @see rte_flow_ops
2911  */
2912 int
2913 mlx5_flow_isolate(struct rte_eth_dev *dev,
2914 		  int enable,
2915 		  struct rte_flow_error *error)
2916 {
2917 	struct mlx5_priv *priv = dev->data->dev_private;
2918 
2919 	if (dev->data->dev_started) {
2920 		rte_flow_error_set(error, EBUSY,
2921 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2922 				   NULL,
2923 				   "port must be stopped first");
2924 		return -rte_errno;
2925 	}
2926 	priv->isolated = !!enable;
2927 	if (enable)
2928 		dev->dev_ops = &mlx5_dev_ops_isolate;
2929 	else
2930 		dev->dev_ops = &mlx5_dev_ops;
2931 	return 0;
2932 }
2933 
2934 /**
2935  * Query a flow.
2936  *
2937  * @see rte_flow_query()
2938  * @see rte_flow_ops
2939  */
2940 static int
2941 flow_drv_query(struct rte_eth_dev *dev,
2942 	       struct rte_flow *flow,
2943 	       const struct rte_flow_action *actions,
2944 	       void *data,
2945 	       struct rte_flow_error *error)
2946 {
2947 	const struct mlx5_flow_driver_ops *fops;
2948 	enum mlx5_flow_drv_type ftype = flow->drv_type;
2949 
2950 	assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
2951 	fops = flow_get_drv_ops(ftype);
2952 
2953 	return fops->query(dev, flow, actions, data, error);
2954 }
2955 
2956 /**
2957  * Query a flow.
2958  *
2959  * @see rte_flow_query()
2960  * @see rte_flow_ops
2961  */
2962 int
2963 mlx5_flow_query(struct rte_eth_dev *dev,
2964 		struct rte_flow *flow,
2965 		const struct rte_flow_action *actions,
2966 		void *data,
2967 		struct rte_flow_error *error)
2968 {
2969 	int ret;
2970 
2971 	ret = flow_drv_query(dev, flow, actions, data, error);
2972 	if (ret < 0)
2973 		return ret;
2974 	return 0;
2975 }
2976 
2977 /**
2978  * Convert a flow director filter to a generic flow.
2979  *
2980  * @param dev
2981  *   Pointer to Ethernet device.
2982  * @param fdir_filter
2983  *   Flow director filter to add.
2984  * @param attributes
2985  *   Generic flow parameters structure.
2986  *
2987  * @return
2988  *   0 on success, a negative errno value otherwise and rte_errno is set.
2989  */
2990 static int
2991 flow_fdir_filter_convert(struct rte_eth_dev *dev,
2992 			 const struct rte_eth_fdir_filter *fdir_filter,
2993 			 struct mlx5_fdir *attributes)
2994 {
2995 	struct mlx5_priv *priv = dev->data->dev_private;
2996 	const struct rte_eth_fdir_input *input = &fdir_filter->input;
2997 	const struct rte_eth_fdir_masks *mask =
2998 		&dev->data->dev_conf.fdir_conf.mask;
2999 
3000 	/* Validate queue number. */
3001 	if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
3002 		DRV_LOG(ERR, "port %u invalid queue number %d",
3003 			dev->data->port_id, fdir_filter->action.rx_queue);
3004 		rte_errno = EINVAL;
3005 		return -rte_errno;
3006 	}
3007 	attributes->attr.ingress = 1;
3008 	attributes->items[0] = (struct rte_flow_item) {
3009 		.type = RTE_FLOW_ITEM_TYPE_ETH,
3010 		.spec = &attributes->l2,
3011 		.mask = &attributes->l2_mask,
3012 	};
3013 	switch (fdir_filter->action.behavior) {
3014 	case RTE_ETH_FDIR_ACCEPT:
3015 		attributes->actions[0] = (struct rte_flow_action){
3016 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
3017 			.conf = &attributes->queue,
3018 		};
3019 		break;
3020 	case RTE_ETH_FDIR_REJECT:
3021 		attributes->actions[0] = (struct rte_flow_action){
3022 			.type = RTE_FLOW_ACTION_TYPE_DROP,
3023 		};
3024 		break;
3025 	default:
3026 		DRV_LOG(ERR, "port %u invalid behavior %d",
3027 			dev->data->port_id,
3028 			fdir_filter->action.behavior);
3029 		rte_errno = ENOTSUP;
3030 		return -rte_errno;
3031 	}
3032 	attributes->queue.index = fdir_filter->action.rx_queue;
3033 	/* Handle L3. */
3034 	switch (fdir_filter->input.flow_type) {
3035 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3036 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3037 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3038 		attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
3039 			.src_addr = input->flow.ip4_flow.src_ip,
3040 			.dst_addr = input->flow.ip4_flow.dst_ip,
3041 			.time_to_live = input->flow.ip4_flow.ttl,
3042 			.type_of_service = input->flow.ip4_flow.tos,
3043 		};
3044 		attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
3045 			.src_addr = mask->ipv4_mask.src_ip,
3046 			.dst_addr = mask->ipv4_mask.dst_ip,
3047 			.time_to_live = mask->ipv4_mask.ttl,
3048 			.type_of_service = mask->ipv4_mask.tos,
3049 			.next_proto_id = mask->ipv4_mask.proto,
3050 		};
3051 		attributes->items[1] = (struct rte_flow_item){
3052 			.type = RTE_FLOW_ITEM_TYPE_IPV4,
3053 			.spec = &attributes->l3,
3054 			.mask = &attributes->l3_mask,
3055 		};
3056 		break;
3057 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3058 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3059 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3060 		attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
3061 			.hop_limits = input->flow.ipv6_flow.hop_limits,
3062 			.proto = input->flow.ipv6_flow.proto,
3063 		};
3064 
3065 		memcpy(attributes->l3.ipv6.hdr.src_addr,
3066 		       input->flow.ipv6_flow.src_ip,
3067 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
3068 		memcpy(attributes->l3.ipv6.hdr.dst_addr,
3069 		       input->flow.ipv6_flow.dst_ip,
3070 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
3071 		memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
3072 		       mask->ipv6_mask.src_ip,
3073 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
3074 		memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
3075 		       mask->ipv6_mask.dst_ip,
3076 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
3077 		attributes->items[1] = (struct rte_flow_item){
3078 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
3079 			.spec = &attributes->l3,
3080 			.mask = &attributes->l3_mask,
3081 		};
3082 		break;
3083 	default:
3084 		DRV_LOG(ERR, "port %u invalid flow type%d",
3085 			dev->data->port_id, fdir_filter->input.flow_type);
3086 		rte_errno = ENOTSUP;
3087 		return -rte_errno;
3088 	}
3089 	/* Handle L4. */
3090 	switch (fdir_filter->input.flow_type) {
3091 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3092 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
3093 			.src_port = input->flow.udp4_flow.src_port,
3094 			.dst_port = input->flow.udp4_flow.dst_port,
3095 		};
3096 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
3097 			.src_port = mask->src_port_mask,
3098 			.dst_port = mask->dst_port_mask,
3099 		};
3100 		attributes->items[2] = (struct rte_flow_item){
3101 			.type = RTE_FLOW_ITEM_TYPE_UDP,
3102 			.spec = &attributes->l4,
3103 			.mask = &attributes->l4_mask,
3104 		};
3105 		break;
3106 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3107 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
3108 			.src_port = input->flow.tcp4_flow.src_port,
3109 			.dst_port = input->flow.tcp4_flow.dst_port,
3110 		};
3111 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
3112 			.src_port = mask->src_port_mask,
3113 			.dst_port = mask->dst_port_mask,
3114 		};
3115 		attributes->items[2] = (struct rte_flow_item){
3116 			.type = RTE_FLOW_ITEM_TYPE_TCP,
3117 			.spec = &attributes->l4,
3118 			.mask = &attributes->l4_mask,
3119 		};
3120 		break;
3121 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3122 		attributes->l4.udp.hdr = (struct rte_udp_hdr){
3123 			.src_port = input->flow.udp6_flow.src_port,
3124 			.dst_port = input->flow.udp6_flow.dst_port,
3125 		};
3126 		attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
3127 			.src_port = mask->src_port_mask,
3128 			.dst_port = mask->dst_port_mask,
3129 		};
3130 		attributes->items[2] = (struct rte_flow_item){
3131 			.type = RTE_FLOW_ITEM_TYPE_UDP,
3132 			.spec = &attributes->l4,
3133 			.mask = &attributes->l4_mask,
3134 		};
3135 		break;
3136 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3137 		attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
3138 			.src_port = input->flow.tcp6_flow.src_port,
3139 			.dst_port = input->flow.tcp6_flow.dst_port,
3140 		};
3141 		attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
3142 			.src_port = mask->src_port_mask,
3143 			.dst_port = mask->dst_port_mask,
3144 		};
3145 		attributes->items[2] = (struct rte_flow_item){
3146 			.type = RTE_FLOW_ITEM_TYPE_TCP,
3147 			.spec = &attributes->l4,
3148 			.mask = &attributes->l4_mask,
3149 		};
3150 		break;
3151 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3152 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3153 		break;
3154 	default:
3155 		DRV_LOG(ERR, "port %u invalid flow type%d",
3156 			dev->data->port_id, fdir_filter->input.flow_type);
3157 		rte_errno = ENOTSUP;
3158 		return -rte_errno;
3159 	}
3160 	return 0;
3161 }
3162 
3163 #define FLOW_FDIR_CMP(f1, f2, fld) \
3164 	memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
3165 
3166 /**
3167  * Compare two FDIR flows. If items and actions are identical, the two flows are
3168  * regarded as same.
3169  *
3170  * @param dev
3171  *   Pointer to Ethernet device.
3172  * @param f1
3173  *   FDIR flow to compare.
3174  * @param f2
3175  *   FDIR flow to compare.
3176  *
3177  * @return
3178  *   Zero on match, 1 otherwise.
3179  */
3180 static int
3181 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
3182 {
3183 	if (FLOW_FDIR_CMP(f1, f2, attr) ||
3184 	    FLOW_FDIR_CMP(f1, f2, l2) ||
3185 	    FLOW_FDIR_CMP(f1, f2, l2_mask) ||
3186 	    FLOW_FDIR_CMP(f1, f2, l3) ||
3187 	    FLOW_FDIR_CMP(f1, f2, l3_mask) ||
3188 	    FLOW_FDIR_CMP(f1, f2, l4) ||
3189 	    FLOW_FDIR_CMP(f1, f2, l4_mask) ||
3190 	    FLOW_FDIR_CMP(f1, f2, actions[0].type))
3191 		return 1;
3192 	if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
3193 	    FLOW_FDIR_CMP(f1, f2, queue))
3194 		return 1;
3195 	return 0;
3196 }
3197 
3198 /**
3199  * Search device flow list to find out a matched FDIR flow.
3200  *
3201  * @param dev
3202  *   Pointer to Ethernet device.
3203  * @param fdir_flow
3204  *   FDIR flow to lookup.
3205  *
3206  * @return
3207  *   Pointer of flow if found, NULL otherwise.
3208  */
3209 static struct rte_flow *
3210 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
3211 {
3212 	struct mlx5_priv *priv = dev->data->dev_private;
3213 	struct rte_flow *flow = NULL;
3214 
3215 	assert(fdir_flow);
3216 	TAILQ_FOREACH(flow, &priv->flows, next) {
3217 		if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
3218 			DRV_LOG(DEBUG, "port %u found FDIR flow %p",
3219 				dev->data->port_id, (void *)flow);
3220 			break;
3221 		}
3222 	}
3223 	return flow;
3224 }
3225 
3226 /**
3227  * Add new flow director filter and store it in list.
3228  *
3229  * @param dev
3230  *   Pointer to Ethernet device.
3231  * @param fdir_filter
3232  *   Flow director filter to add.
3233  *
3234  * @return
3235  *   0 on success, a negative errno value otherwise and rte_errno is set.
3236  */
3237 static int
3238 flow_fdir_filter_add(struct rte_eth_dev *dev,
3239 		     const struct rte_eth_fdir_filter *fdir_filter)
3240 {
3241 	struct mlx5_priv *priv = dev->data->dev_private;
3242 	struct mlx5_fdir *fdir_flow;
3243 	struct rte_flow *flow;
3244 	int ret;
3245 
3246 	fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
3247 	if (!fdir_flow) {
3248 		rte_errno = ENOMEM;
3249 		return -rte_errno;
3250 	}
3251 	ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
3252 	if (ret)
3253 		goto error;
3254 	flow = flow_fdir_filter_lookup(dev, fdir_flow);
3255 	if (flow) {
3256 		rte_errno = EEXIST;
3257 		goto error;
3258 	}
3259 	flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
3260 				fdir_flow->items, fdir_flow->actions, true,
3261 				NULL);
3262 	if (!flow)
3263 		goto error;
3264 	assert(!flow->fdir);
3265 	flow->fdir = fdir_flow;
3266 	DRV_LOG(DEBUG, "port %u created FDIR flow %p",
3267 		dev->data->port_id, (void *)flow);
3268 	return 0;
3269 error:
3270 	rte_free(fdir_flow);
3271 	return -rte_errno;
3272 }
3273 
3274 /**
3275  * Delete specific filter.
3276  *
3277  * @param dev
3278  *   Pointer to Ethernet device.
3279  * @param fdir_filter
3280  *   Filter to be deleted.
3281  *
3282  * @return
3283  *   0 on success, a negative errno value otherwise and rte_errno is set.
3284  */
3285 static int
3286 flow_fdir_filter_delete(struct rte_eth_dev *dev,
3287 			const struct rte_eth_fdir_filter *fdir_filter)
3288 {
3289 	struct mlx5_priv *priv = dev->data->dev_private;
3290 	struct rte_flow *flow;
3291 	struct mlx5_fdir fdir_flow = {
3292 		.attr.group = 0,
3293 	};
3294 	int ret;
3295 
3296 	ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
3297 	if (ret)
3298 		return -rte_errno;
3299 	flow = flow_fdir_filter_lookup(dev, &fdir_flow);
3300 	if (!flow) {
3301 		rte_errno = ENOENT;
3302 		return -rte_errno;
3303 	}
3304 	flow_list_destroy(dev, &priv->flows, flow);
3305 	DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
3306 		dev->data->port_id, (void *)flow);
3307 	return 0;
3308 }
3309 
3310 /**
3311  * Update queue for specific filter.
3312  *
3313  * @param dev
3314  *   Pointer to Ethernet device.
3315  * @param fdir_filter
3316  *   Filter to be updated.
3317  *
3318  * @return
3319  *   0 on success, a negative errno value otherwise and rte_errno is set.
3320  */
3321 static int
3322 flow_fdir_filter_update(struct rte_eth_dev *dev,
3323 			const struct rte_eth_fdir_filter *fdir_filter)
3324 {
3325 	int ret;
3326 
3327 	ret = flow_fdir_filter_delete(dev, fdir_filter);
3328 	if (ret)
3329 		return ret;
3330 	return flow_fdir_filter_add(dev, fdir_filter);
3331 }
3332 
3333 /**
3334  * Flush all filters.
3335  *
3336  * @param dev
3337  *   Pointer to Ethernet device.
3338  */
3339 static void
3340 flow_fdir_filter_flush(struct rte_eth_dev *dev)
3341 {
3342 	struct mlx5_priv *priv = dev->data->dev_private;
3343 
3344 	mlx5_flow_list_flush(dev, &priv->flows);
3345 }
3346 
3347 /**
3348  * Get flow director information.
3349  *
3350  * @param dev
3351  *   Pointer to Ethernet device.
3352  * @param[out] fdir_info
3353  *   Resulting flow director information.
3354  */
3355 static void
3356 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
3357 {
3358 	struct rte_eth_fdir_masks *mask =
3359 		&dev->data->dev_conf.fdir_conf.mask;
3360 
3361 	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
3362 	fdir_info->guarant_spc = 0;
3363 	rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
3364 	fdir_info->max_flexpayload = 0;
3365 	fdir_info->flow_types_mask[0] = 0;
3366 	fdir_info->flex_payload_unit = 0;
3367 	fdir_info->max_flex_payload_segment_num = 0;
3368 	fdir_info->flex_payload_limit = 0;
3369 	memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
3370 }
3371 
3372 /**
3373  * Deal with flow director operations.
3374  *
3375  * @param dev
3376  *   Pointer to Ethernet device.
3377  * @param filter_op
3378  *   Operation to perform.
3379  * @param arg
3380  *   Pointer to operation-specific structure.
3381  *
3382  * @return
3383  *   0 on success, a negative errno value otherwise and rte_errno is set.
3384  */
3385 static int
3386 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3387 		    void *arg)
3388 {
3389 	enum rte_fdir_mode fdir_mode =
3390 		dev->data->dev_conf.fdir_conf.mode;
3391 
3392 	if (filter_op == RTE_ETH_FILTER_NOP)
3393 		return 0;
3394 	if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
3395 	    fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3396 		DRV_LOG(ERR, "port %u flow director mode %d not supported",
3397 			dev->data->port_id, fdir_mode);
3398 		rte_errno = EINVAL;
3399 		return -rte_errno;
3400 	}
3401 	switch (filter_op) {
3402 	case RTE_ETH_FILTER_ADD:
3403 		return flow_fdir_filter_add(dev, arg);
3404 	case RTE_ETH_FILTER_UPDATE:
3405 		return flow_fdir_filter_update(dev, arg);
3406 	case RTE_ETH_FILTER_DELETE:
3407 		return flow_fdir_filter_delete(dev, arg);
3408 	case RTE_ETH_FILTER_FLUSH:
3409 		flow_fdir_filter_flush(dev);
3410 		break;
3411 	case RTE_ETH_FILTER_INFO:
3412 		flow_fdir_info_get(dev, arg);
3413 		break;
3414 	default:
3415 		DRV_LOG(DEBUG, "port %u unknown operation %u",
3416 			dev->data->port_id, filter_op);
3417 		rte_errno = EINVAL;
3418 		return -rte_errno;
3419 	}
3420 	return 0;
3421 }
3422 
3423 /**
3424  * Manage filter operations.
3425  *
3426  * @param dev
3427  *   Pointer to Ethernet device structure.
3428  * @param filter_type
3429  *   Filter type.
3430  * @param filter_op
3431  *   Operation to perform.
3432  * @param arg
3433  *   Pointer to operation-specific structure.
3434  *
3435  * @return
3436  *   0 on success, a negative errno value otherwise and rte_errno is set.
3437  */
3438 int
3439 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
3440 		     enum rte_filter_type filter_type,
3441 		     enum rte_filter_op filter_op,
3442 		     void *arg)
3443 {
3444 	switch (filter_type) {
3445 	case RTE_ETH_FILTER_GENERIC:
3446 		if (filter_op != RTE_ETH_FILTER_GET) {
3447 			rte_errno = EINVAL;
3448 			return -rte_errno;
3449 		}
3450 		*(const void **)arg = &mlx5_flow_ops;
3451 		return 0;
3452 	case RTE_ETH_FILTER_FDIR:
3453 		return flow_fdir_ctrl_func(dev, filter_op, arg);
3454 	default:
3455 		DRV_LOG(ERR, "port %u filter type (%d) not supported",
3456 			dev->data->port_id, filter_type);
3457 		rte_errno = ENOTSUP;
3458 		return -rte_errno;
3459 	}
3460 	return 0;
3461 }
3462 
3463 #define MLX5_POOL_QUERY_FREQ_US 1000000
3464 
3465 /**
3466  * Set the periodic procedure for triggering asynchronous batch queries for all
3467  * the counter pools.
3468  *
3469  * @param[in] sh
3470  *   Pointer to mlx5_ibv_shared object.
3471  */
3472 void
3473 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh)
3474 {
3475 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0);
3476 	uint32_t pools_n = rte_atomic16_read(&cont->n_valid);
3477 	uint32_t us;
3478 
3479 	cont = MLX5_CNT_CONTAINER(sh, 1, 0);
3480 	pools_n += rte_atomic16_read(&cont->n_valid);
3481 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
3482 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us\n", pools_n, us);
3483 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
3484 		sh->cmng.query_thread_on = 0;
3485 		DRV_LOG(ERR, "Cannot reinitialize query alarm\n");
3486 	} else {
3487 		sh->cmng.query_thread_on = 1;
3488 	}
3489 }
3490 
3491 /**
3492  * The periodic procedure for triggering asynchronous batch queries for all the
3493  * counter pools. This function is probably called by the host thread.
3494  *
3495  * @param[in] arg
3496  *   The parameter for the alarm process.
3497  */
3498 void
3499 mlx5_flow_query_alarm(void *arg)
3500 {
3501 	struct mlx5_ibv_shared *sh = arg;
3502 	struct mlx5_devx_obj *dcs;
3503 	uint16_t offset;
3504 	int ret;
3505 	uint8_t batch = sh->cmng.batch;
3506 	uint16_t pool_index = sh->cmng.pool_index;
3507 	struct mlx5_pools_container *cont;
3508 	struct mlx5_pools_container *mcont;
3509 	struct mlx5_flow_counter_pool *pool;
3510 
3511 	if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
3512 		goto set_alarm;
3513 next_container:
3514 	cont = MLX5_CNT_CONTAINER(sh, batch, 1);
3515 	mcont = MLX5_CNT_CONTAINER(sh, batch, 0);
3516 	/* Check if resize was done and need to flip a container. */
3517 	if (cont != mcont) {
3518 		if (cont->pools) {
3519 			/* Clean the old container. */
3520 			rte_free(cont->pools);
3521 			memset(cont, 0, sizeof(*cont));
3522 		}
3523 		rte_cio_wmb();
3524 		 /* Flip the host container. */
3525 		sh->cmng.mhi[batch] ^= (uint8_t)2;
3526 		cont = mcont;
3527 	}
3528 	if (!cont->pools) {
3529 		/* 2 empty containers case is unexpected. */
3530 		if (unlikely(batch != sh->cmng.batch))
3531 			goto set_alarm;
3532 		batch ^= 0x1;
3533 		pool_index = 0;
3534 		goto next_container;
3535 	}
3536 	pool = cont->pools[pool_index];
3537 	if (pool->raw_hw)
3538 		/* There is a pool query in progress. */
3539 		goto set_alarm;
3540 	pool->raw_hw =
3541 		LIST_FIRST(&sh->cmng.free_stat_raws);
3542 	if (!pool->raw_hw)
3543 		/* No free counter statistics raw memory. */
3544 		goto set_alarm;
3545 	dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
3546 							      (&pool->a64_dcs);
3547 	offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
3548 	ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
3549 					       offset, NULL, NULL,
3550 					       pool->raw_hw->mem_mng->dm->id,
3551 					       (void *)(uintptr_t)
3552 					       (pool->raw_hw->data + offset),
3553 					       sh->devx_comp,
3554 					       (uint64_t)(uintptr_t)pool);
3555 	if (ret) {
3556 		DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
3557 			" %d\n", pool->min_dcs->id);
3558 		pool->raw_hw = NULL;
3559 		goto set_alarm;
3560 	}
3561 	pool->raw_hw->min_dcs_id = dcs->id;
3562 	LIST_REMOVE(pool->raw_hw, next);
3563 	sh->cmng.pending_queries++;
3564 	pool_index++;
3565 	if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
3566 		batch ^= 0x1;
3567 		pool_index = 0;
3568 	}
3569 set_alarm:
3570 	sh->cmng.batch = batch;
3571 	sh->cmng.pool_index = pool_index;
3572 	mlx5_set_query_alarm(sh);
3573 }
3574 
3575 /**
3576  * Handler for the HW respond about ready values from an asynchronous batch
3577  * query. This function is probably called by the host thread.
3578  *
3579  * @param[in] sh
3580  *   The pointer to the shared IB device context.
3581  * @param[in] async_id
3582  *   The Devx async ID.
3583  * @param[in] status
3584  *   The status of the completion.
3585  */
3586 void
3587 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
3588 				  uint64_t async_id, int status)
3589 {
3590 	struct mlx5_flow_counter_pool *pool =
3591 		(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
3592 	struct mlx5_counter_stats_raw *raw_to_free;
3593 
3594 	if (unlikely(status)) {
3595 		raw_to_free = pool->raw_hw;
3596 	} else {
3597 		raw_to_free = pool->raw;
3598 		rte_spinlock_lock(&pool->sl);
3599 		pool->raw = pool->raw_hw;
3600 		rte_spinlock_unlock(&pool->sl);
3601 		rte_atomic64_add(&pool->query_gen, 1);
3602 		/* Be sure the new raw counters data is updated in memory. */
3603 		rte_cio_wmb();
3604 	}
3605 	LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
3606 	pool->raw_hw = NULL;
3607 	sh->cmng.pending_queries--;
3608 }
3609 
3610 /**
3611  * Translate the rte_flow group index to HW table value.
3612  *
3613  * @param[in] attributes
3614  *   Pointer to flow attributes
3615  * @param[in] external
3616  *   Value is part of flow rule created by request external to PMD.
3617  * @param[in] group
3618  *   rte_flow group index value.
3619  * @param[out] table
3620  *   HW table value.
3621  * @param[out] error
3622  *   Pointer to error structure.
3623  *
3624  * @return
3625  *   0 on success, a negative errno value otherwise and rte_errno is set.
3626  */
3627 int
3628 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
3629 			 uint32_t group, uint32_t *table,
3630 			 struct rte_flow_error *error)
3631 {
3632 	if (attributes->transfer && external) {
3633 		if (group == UINT32_MAX)
3634 			return rte_flow_error_set
3635 						(error, EINVAL,
3636 						 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3637 						 NULL,
3638 						 "group index not supported");
3639 		*table = group + 1;
3640 	} else {
3641 		*table = group;
3642 	}
3643 	return 0;
3644 }
3645