xref: /dpdk/drivers/net/mlx5/mlx5_flow_verbs.c (revision d56ec3dcad056c47cef4e837d5191d04c936d87e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10 
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20 
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_flow.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_rxtx.h"
35 
36 #define VERBS_SPEC_INNER(item_flags) \
37 	(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
38 
39 /**
40  * Create Verbs flow counter with Verbs library.
41  *
42  * @param[in] dev
43  *   Pointer to the Ethernet device structure.
44  * @param[in, out] counter
45  *   mlx5 flow counter object, contains the counter id,
46  *   handle of created Verbs flow counter is returned
47  *   in cs field (if counters are supported).
48  *
49  * @return
50  *   0 On success else a negative errno value is returned
51  *   and rte_errno is set.
52  */
53 static int
54 flow_verbs_counter_create(struct rte_eth_dev *dev,
55 			  struct mlx5_flow_counter *counter)
56 {
57 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
58 	struct mlx5_priv *priv = dev->data->dev_private;
59 	struct ibv_context *ctx = priv->sh->ctx;
60 	struct ibv_counter_set_init_attr init = {
61 			 .counter_set_id = counter->id};
62 
63 	counter->cs = mlx5_glue->create_counter_set(ctx, &init);
64 	if (!counter->cs) {
65 		rte_errno = ENOTSUP;
66 		return -ENOTSUP;
67 	}
68 	return 0;
69 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
70 	struct mlx5_priv *priv = dev->data->dev_private;
71 	struct ibv_context *ctx = priv->sh->ctx;
72 	struct ibv_counters_init_attr init = {0};
73 	struct ibv_counter_attach_attr attach;
74 	int ret;
75 
76 	memset(&attach, 0, sizeof(attach));
77 	counter->cs = mlx5_glue->create_counters(ctx, &init);
78 	if (!counter->cs) {
79 		rte_errno = ENOTSUP;
80 		return -ENOTSUP;
81 	}
82 	attach.counter_desc = IBV_COUNTER_PACKETS;
83 	attach.index = 0;
84 	ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL);
85 	if (!ret) {
86 		attach.counter_desc = IBV_COUNTER_BYTES;
87 		attach.index = 1;
88 		ret = mlx5_glue->attach_counters
89 					(counter->cs, &attach, NULL);
90 	}
91 	if (ret) {
92 		claim_zero(mlx5_glue->destroy_counters(counter->cs));
93 		counter->cs = NULL;
94 		rte_errno = ret;
95 		return -ret;
96 	}
97 	return 0;
98 #else
99 	(void)dev;
100 	(void)counter;
101 	rte_errno = ENOTSUP;
102 	return -ENOTSUP;
103 #endif
104 }
105 
106 /**
107  * Get a flow counter.
108  *
109  * @param[in] dev
110  *   Pointer to the Ethernet device structure.
111  * @param[in] shared
112  *   Indicate if this counter is shared with other flows.
113  * @param[in] id
114  *   Counter identifier.
115  *
116  * @return
117  *   A pointer to the counter, NULL otherwise and rte_errno is set.
118  */
119 static struct mlx5_flow_counter *
120 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
121 {
122 	struct mlx5_priv *priv = dev->data->dev_private;
123 	struct mlx5_flow_counter *cnt;
124 	int ret;
125 
126 	if (shared) {
127 		TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
128 			if (cnt->shared && cnt->id == id) {
129 				cnt->ref_cnt++;
130 				return cnt;
131 			}
132 		}
133 	}
134 	cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
135 	if (!cnt) {
136 		rte_errno = ENOMEM;
137 		return NULL;
138 	}
139 	cnt->id = id;
140 	cnt->shared = shared;
141 	cnt->ref_cnt = 1;
142 	cnt->hits = 0;
143 	cnt->bytes = 0;
144 	/* Create counter with Verbs. */
145 	ret = flow_verbs_counter_create(dev, cnt);
146 	if (!ret) {
147 		TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
148 		return cnt;
149 	}
150 	/* Some error occurred in Verbs library. */
151 	rte_free(cnt);
152 	rte_errno = -ret;
153 	return NULL;
154 }
155 
156 /**
157  * Release a flow counter.
158  *
159  * @param[in] dev
160  *   Pointer to the Ethernet device structure.
161  * @param[in] counter
162  *   Pointer to the counter handler.
163  */
164 static void
165 flow_verbs_counter_release(struct rte_eth_dev *dev,
166 			   struct mlx5_flow_counter *counter)
167 {
168 	struct mlx5_priv *priv = dev->data->dev_private;
169 
170 	if (--counter->ref_cnt == 0) {
171 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
172 		claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
173 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
174 		claim_zero(mlx5_glue->destroy_counters(counter->cs));
175 #endif
176 		TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
177 		rte_free(counter);
178 	}
179 }
180 
181 /**
182  * Query a flow counter via Verbs library call.
183  *
184  * @see rte_flow_query()
185  * @see rte_flow_ops
186  */
187 static int
188 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
189 			 struct rte_flow *flow, void *data,
190 			 struct rte_flow_error *error)
191 {
192 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
193 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
194 	if (flow->counter && flow->counter->cs) {
195 		struct rte_flow_query_count *qc = data;
196 		uint64_t counters[2] = {0, 0};
197 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
198 		struct ibv_query_counter_set_attr query_cs_attr = {
199 			.cs = flow->counter->cs,
200 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
201 		};
202 		struct ibv_counter_set_data query_out = {
203 			.out = counters,
204 			.outlen = 2 * sizeof(uint64_t),
205 		};
206 		int err = mlx5_glue->query_counter_set(&query_cs_attr,
207 						       &query_out);
208 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
209 		int err = mlx5_glue->query_counters
210 			       (flow->counter->cs, counters,
211 				RTE_DIM(counters),
212 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
213 #endif
214 		if (err)
215 			return rte_flow_error_set
216 				(error, err,
217 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
218 				 NULL,
219 				 "cannot read counter");
220 		qc->hits_set = 1;
221 		qc->bytes_set = 1;
222 		qc->hits = counters[0] - flow->counter->hits;
223 		qc->bytes = counters[1] - flow->counter->bytes;
224 		if (qc->reset) {
225 			flow->counter->hits = counters[0];
226 			flow->counter->bytes = counters[1];
227 		}
228 		return 0;
229 	}
230 	return rte_flow_error_set(error, EINVAL,
231 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
232 				  NULL,
233 				  "flow does not have counter");
234 #else
235 	(void)flow;
236 	(void)data;
237 	return rte_flow_error_set(error, ENOTSUP,
238 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239 				  NULL,
240 				  "counters are not available");
241 #endif
242 }
243 
244 /**
245  * Add a verbs item specification into @p verbs.
246  *
247  * @param[out] verbs
248  *   Pointer to verbs structure.
249  * @param[in] src
250  *   Create specification.
251  * @param[in] size
252  *   Size in bytes of the specification to copy.
253  */
254 static void
255 flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
256 {
257 	void *dst;
258 
259 	if (!verbs)
260 		return;
261 	assert(verbs->specs);
262 	dst = (void *)(verbs->specs + verbs->size);
263 	memcpy(dst, src, size);
264 	++verbs->attr->num_of_specs;
265 	verbs->size += size;
266 }
267 
268 /**
269  * Convert the @p item into a Verbs specification. This function assumes that
270  * the input is valid and that there is space to insert the requested item
271  * into the flow.
272  *
273  * @param[in, out] dev_flow
274  *   Pointer to dev_flow structure.
275  * @param[in] item
276  *   Item specification.
277  * @param[in] item_flags
278  *   Parsed item flags.
279  */
280 static void
281 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
282 			      const struct rte_flow_item *item,
283 			      uint64_t item_flags)
284 {
285 	const struct rte_flow_item_eth *spec = item->spec;
286 	const struct rte_flow_item_eth *mask = item->mask;
287 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
288 	struct ibv_flow_spec_eth eth = {
289 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
290 		.size = size,
291 	};
292 
293 	if (!mask)
294 		mask = &rte_flow_item_eth_mask;
295 	if (spec) {
296 		unsigned int i;
297 
298 		memcpy(&eth.val.dst_mac, spec->dst.addr_bytes,
299 			RTE_ETHER_ADDR_LEN);
300 		memcpy(&eth.val.src_mac, spec->src.addr_bytes,
301 			RTE_ETHER_ADDR_LEN);
302 		eth.val.ether_type = spec->type;
303 		memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes,
304 			RTE_ETHER_ADDR_LEN);
305 		memcpy(&eth.mask.src_mac, mask->src.addr_bytes,
306 			RTE_ETHER_ADDR_LEN);
307 		eth.mask.ether_type = mask->type;
308 		/* Remove unwanted bits from values. */
309 		for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
310 			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
311 			eth.val.src_mac[i] &= eth.mask.src_mac[i];
312 		}
313 		eth.val.ether_type &= eth.mask.ether_type;
314 	}
315 	flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
316 }
317 
318 /**
319  * Update the VLAN tag in the Verbs Ethernet specification.
320  * This function assumes that the input is valid and there is space to add
321  * the requested item.
322  *
323  * @param[in, out] attr
324  *   Pointer to Verbs attributes structure.
325  * @param[in] eth
326  *   Verbs structure containing the VLAN information to copy.
327  */
328 static void
329 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
330 			    struct ibv_flow_spec_eth *eth)
331 {
332 	unsigned int i;
333 	const enum ibv_flow_spec_type search = eth->type;
334 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
335 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
336 
337 	for (i = 0; i != attr->num_of_specs; ++i) {
338 		if (hdr->type == search) {
339 			struct ibv_flow_spec_eth *e =
340 				(struct ibv_flow_spec_eth *)hdr;
341 
342 			e->val.vlan_tag = eth->val.vlan_tag;
343 			e->mask.vlan_tag = eth->mask.vlan_tag;
344 			e->val.ether_type = eth->val.ether_type;
345 			e->mask.ether_type = eth->mask.ether_type;
346 			break;
347 		}
348 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
349 	}
350 }
351 
352 /**
353  * Convert the @p item into a Verbs specification. This function assumes that
354  * the input is valid and that there is space to insert the requested item
355  * into the flow.
356  *
357  * @param[in, out] dev_flow
358  *   Pointer to dev_flow structure.
359  * @param[in] item
360  *   Item specification.
361  * @param[in] item_flags
362  *   Parsed item flags.
363  */
364 static void
365 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
366 			       const struct rte_flow_item *item,
367 			       uint64_t item_flags)
368 {
369 	const struct rte_flow_item_vlan *spec = item->spec;
370 	const struct rte_flow_item_vlan *mask = item->mask;
371 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
372 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
373 	struct ibv_flow_spec_eth eth = {
374 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
375 		.size = size,
376 	};
377 	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
378 				      MLX5_FLOW_LAYER_OUTER_L2;
379 
380 	if (!mask)
381 		mask = &rte_flow_item_vlan_mask;
382 	if (spec) {
383 		eth.val.vlan_tag = spec->tci;
384 		eth.mask.vlan_tag = mask->tci;
385 		eth.val.vlan_tag &= eth.mask.vlan_tag;
386 		eth.val.ether_type = spec->inner_type;
387 		eth.mask.ether_type = mask->inner_type;
388 		eth.val.ether_type &= eth.mask.ether_type;
389 	}
390 	if (!(item_flags & l2m))
391 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
392 	else
393 		flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
394 	if (!tunnel)
395 		dev_flow->verbs.vf_vlan.tag =
396 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
397 }
398 
399 /**
400  * Convert the @p item into a Verbs specification. This function assumes that
401  * the input is valid and that there is space to insert the requested item
402  * into the flow.
403  *
404  * @param[in, out] dev_flow
405  *   Pointer to dev_flow structure.
406  * @param[in] item
407  *   Item specification.
408  * @param[in] item_flags
409  *   Parsed item flags.
410  */
411 static void
412 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
413 			       const struct rte_flow_item *item,
414 			       uint64_t item_flags)
415 {
416 	const struct rte_flow_item_ipv4 *spec = item->spec;
417 	const struct rte_flow_item_ipv4 *mask = item->mask;
418 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
419 	struct ibv_flow_spec_ipv4_ext ipv4 = {
420 		.type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
421 		.size = size,
422 	};
423 
424 	if (!mask)
425 		mask = &rte_flow_item_ipv4_mask;
426 	if (spec) {
427 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
428 			.src_ip = spec->hdr.src_addr,
429 			.dst_ip = spec->hdr.dst_addr,
430 			.proto = spec->hdr.next_proto_id,
431 			.tos = spec->hdr.type_of_service,
432 		};
433 		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
434 			.src_ip = mask->hdr.src_addr,
435 			.dst_ip = mask->hdr.dst_addr,
436 			.proto = mask->hdr.next_proto_id,
437 			.tos = mask->hdr.type_of_service,
438 		};
439 		/* Remove unwanted bits from values. */
440 		ipv4.val.src_ip &= ipv4.mask.src_ip;
441 		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
442 		ipv4.val.proto &= ipv4.mask.proto;
443 		ipv4.val.tos &= ipv4.mask.tos;
444 	}
445 	flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
446 }
447 
448 /**
449  * Convert the @p item into a Verbs specification. This function assumes that
450  * the input is valid and that there is space to insert the requested item
451  * into the flow.
452  *
453  * @param[in, out] dev_flow
454  *   Pointer to dev_flow structure.
455  * @param[in] item
456  *   Item specification.
457  * @param[in] item_flags
458  *   Parsed item flags.
459  */
460 static void
461 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
462 			       const struct rte_flow_item *item,
463 			       uint64_t item_flags)
464 {
465 	const struct rte_flow_item_ipv6 *spec = item->spec;
466 	const struct rte_flow_item_ipv6 *mask = item->mask;
467 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
468 	struct ibv_flow_spec_ipv6 ipv6 = {
469 		.type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
470 		.size = size,
471 	};
472 
473 	if (!mask)
474 		mask = &rte_flow_item_ipv6_mask;
475 	if (spec) {
476 		unsigned int i;
477 		uint32_t vtc_flow_val;
478 		uint32_t vtc_flow_mask;
479 
480 		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
481 		       RTE_DIM(ipv6.val.src_ip));
482 		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
483 		       RTE_DIM(ipv6.val.dst_ip));
484 		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
485 		       RTE_DIM(ipv6.mask.src_ip));
486 		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
487 		       RTE_DIM(ipv6.mask.dst_ip));
488 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
489 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
490 		ipv6.val.flow_label =
491 			rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
492 					 RTE_IPV6_HDR_FL_SHIFT);
493 		ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
494 					 RTE_IPV6_HDR_TC_SHIFT;
495 		ipv6.val.next_hdr = spec->hdr.proto;
496 		ipv6.val.hop_limit = spec->hdr.hop_limits;
497 		ipv6.mask.flow_label =
498 			rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
499 					 RTE_IPV6_HDR_FL_SHIFT);
500 		ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
501 					  RTE_IPV6_HDR_TC_SHIFT;
502 		ipv6.mask.next_hdr = mask->hdr.proto;
503 		ipv6.mask.hop_limit = mask->hdr.hop_limits;
504 		/* Remove unwanted bits from values. */
505 		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
506 			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
507 			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
508 		}
509 		ipv6.val.flow_label &= ipv6.mask.flow_label;
510 		ipv6.val.traffic_class &= ipv6.mask.traffic_class;
511 		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
512 		ipv6.val.hop_limit &= ipv6.mask.hop_limit;
513 	}
514 	flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
515 }
516 
517 /**
518  * Convert the @p item into a Verbs specification. This function assumes that
519  * the input is valid and that there is space to insert the requested item
520  * into the flow.
521  *
522  * @param[in, out] dev_flow
523  *   Pointer to dev_flow structure.
524  * @param[in] item
525  *   Item specification.
526  * @param[in] item_flags
527  *   Parsed item flags.
528  */
529 static void
530 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
531 			      const struct rte_flow_item *item,
532 			      uint64_t item_flags __rte_unused)
533 {
534 	const struct rte_flow_item_tcp *spec = item->spec;
535 	const struct rte_flow_item_tcp *mask = item->mask;
536 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
537 	struct ibv_flow_spec_tcp_udp tcp = {
538 		.type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
539 		.size = size,
540 	};
541 
542 	if (!mask)
543 		mask = &rte_flow_item_tcp_mask;
544 	if (spec) {
545 		tcp.val.dst_port = spec->hdr.dst_port;
546 		tcp.val.src_port = spec->hdr.src_port;
547 		tcp.mask.dst_port = mask->hdr.dst_port;
548 		tcp.mask.src_port = mask->hdr.src_port;
549 		/* Remove unwanted bits from values. */
550 		tcp.val.src_port &= tcp.mask.src_port;
551 		tcp.val.dst_port &= tcp.mask.dst_port;
552 	}
553 	flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
554 }
555 
556 /**
557  * Convert the @p item into a Verbs specification. This function assumes that
558  * the input is valid and that there is space to insert the requested item
559  * into the flow.
560  *
561  * @param[in, out] dev_flow
562  *   Pointer to dev_flow structure.
563  * @param[in] item
564  *   Item specification.
565  * @param[in] item_flags
566  *   Parsed item flags.
567  */
568 static void
569 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
570 			      const struct rte_flow_item *item,
571 			      uint64_t item_flags __rte_unused)
572 {
573 	const struct rte_flow_item_udp *spec = item->spec;
574 	const struct rte_flow_item_udp *mask = item->mask;
575 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
576 	struct ibv_flow_spec_tcp_udp udp = {
577 		.type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
578 		.size = size,
579 	};
580 
581 	if (!mask)
582 		mask = &rte_flow_item_udp_mask;
583 	if (spec) {
584 		udp.val.dst_port = spec->hdr.dst_port;
585 		udp.val.src_port = spec->hdr.src_port;
586 		udp.mask.dst_port = mask->hdr.dst_port;
587 		udp.mask.src_port = mask->hdr.src_port;
588 		/* Remove unwanted bits from values. */
589 		udp.val.src_port &= udp.mask.src_port;
590 		udp.val.dst_port &= udp.mask.dst_port;
591 	}
592 	flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
593 }
594 
595 /**
596  * Convert the @p item into a Verbs specification. This function assumes that
597  * the input is valid and that there is space to insert the requested item
598  * into the flow.
599  *
600  * @param[in, out] dev_flow
601  *   Pointer to dev_flow structure.
602  * @param[in] item
603  *   Item specification.
604  * @param[in] item_flags
605  *   Parsed item flags.
606  */
607 static void
608 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
609 				const struct rte_flow_item *item,
610 				uint64_t item_flags __rte_unused)
611 {
612 	const struct rte_flow_item_vxlan *spec = item->spec;
613 	const struct rte_flow_item_vxlan *mask = item->mask;
614 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
615 	struct ibv_flow_spec_tunnel vxlan = {
616 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
617 		.size = size,
618 	};
619 	union vni {
620 		uint32_t vlan_id;
621 		uint8_t vni[4];
622 	} id = { .vlan_id = 0, };
623 
624 	if (!mask)
625 		mask = &rte_flow_item_vxlan_mask;
626 	if (spec) {
627 		memcpy(&id.vni[1], spec->vni, 3);
628 		vxlan.val.tunnel_id = id.vlan_id;
629 		memcpy(&id.vni[1], mask->vni, 3);
630 		vxlan.mask.tunnel_id = id.vlan_id;
631 		/* Remove unwanted bits from values. */
632 		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
633 	}
634 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
635 }
636 
637 /**
638  * Convert the @p item into a Verbs specification. This function assumes that
639  * the input is valid and that there is space to insert the requested item
640  * into the flow.
641  *
642  * @param[in, out] dev_flow
643  *   Pointer to dev_flow structure.
644  * @param[in] item
645  *   Item specification.
646  * @param[in] item_flags
647  *   Parsed item flags.
648  */
649 static void
650 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
651 				    const struct rte_flow_item *item,
652 				    uint64_t item_flags __rte_unused)
653 {
654 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
655 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
656 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
657 	struct ibv_flow_spec_tunnel vxlan_gpe = {
658 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
659 		.size = size,
660 	};
661 	union vni {
662 		uint32_t vlan_id;
663 		uint8_t vni[4];
664 	} id = { .vlan_id = 0, };
665 
666 	if (!mask)
667 		mask = &rte_flow_item_vxlan_gpe_mask;
668 	if (spec) {
669 		memcpy(&id.vni[1], spec->vni, 3);
670 		vxlan_gpe.val.tunnel_id = id.vlan_id;
671 		memcpy(&id.vni[1], mask->vni, 3);
672 		vxlan_gpe.mask.tunnel_id = id.vlan_id;
673 		/* Remove unwanted bits from values. */
674 		vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
675 	}
676 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
677 }
678 
679 /**
680  * Update the protocol in Verbs IPv4/IPv6 spec.
681  *
682  * @param[in, out] attr
683  *   Pointer to Verbs attributes structure.
684  * @param[in] search
685  *   Specification type to search in order to update the IP protocol.
686  * @param[in] protocol
687  *   Protocol value to set if none is present in the specification.
688  */
689 static void
690 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
691 				       enum ibv_flow_spec_type search,
692 				       uint8_t protocol)
693 {
694 	unsigned int i;
695 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
696 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
697 
698 	if (!attr)
699 		return;
700 	for (i = 0; i != attr->num_of_specs; ++i) {
701 		if (hdr->type == search) {
702 			union {
703 				struct ibv_flow_spec_ipv4_ext *ipv4;
704 				struct ibv_flow_spec_ipv6 *ipv6;
705 			} ip;
706 
707 			switch (search) {
708 			case IBV_FLOW_SPEC_IPV4_EXT:
709 				ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
710 				if (!ip.ipv4->val.proto) {
711 					ip.ipv4->val.proto = protocol;
712 					ip.ipv4->mask.proto = 0xff;
713 				}
714 				break;
715 			case IBV_FLOW_SPEC_IPV6:
716 				ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
717 				if (!ip.ipv6->val.next_hdr) {
718 					ip.ipv6->val.next_hdr = protocol;
719 					ip.ipv6->mask.next_hdr = 0xff;
720 				}
721 				break;
722 			default:
723 				break;
724 			}
725 			break;
726 		}
727 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
728 	}
729 }
730 
731 /**
732  * Convert the @p item into a Verbs specification. This function assumes that
733  * the input is valid and that there is space to insert the requested item
734  * into the flow.
735  *
736  * @param[in, out] dev_flow
737  *   Pointer to dev_flow structure.
738  * @param[in] item
739  *   Item specification.
740  * @param[in] item_flags
741  *   Parsed item flags.
742  */
743 static void
744 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
745 			      const struct rte_flow_item *item __rte_unused,
746 			      uint64_t item_flags)
747 {
748 	struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
749 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
750 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
751 	struct ibv_flow_spec_tunnel tunnel = {
752 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
753 		.size = size,
754 	};
755 #else
756 	const struct rte_flow_item_gre *spec = item->spec;
757 	const struct rte_flow_item_gre *mask = item->mask;
758 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
759 	struct ibv_flow_spec_gre tunnel = {
760 		.type = IBV_FLOW_SPEC_GRE,
761 		.size = size,
762 	};
763 
764 	if (!mask)
765 		mask = &rte_flow_item_gre_mask;
766 	if (spec) {
767 		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
768 		tunnel.val.protocol = spec->protocol;
769 		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
770 		tunnel.mask.protocol = mask->protocol;
771 		/* Remove unwanted bits from values. */
772 		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
773 		tunnel.val.protocol &= tunnel.mask.protocol;
774 		tunnel.val.key &= tunnel.mask.key;
775 	}
776 #endif
777 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
778 		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
779 						       IBV_FLOW_SPEC_IPV4_EXT,
780 						       IPPROTO_GRE);
781 	else
782 		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
783 						       IBV_FLOW_SPEC_IPV6,
784 						       IPPROTO_GRE);
785 	flow_verbs_spec_add(verbs, &tunnel, size);
786 }
787 
788 /**
789  * Convert the @p action into a Verbs specification. This function assumes that
790  * the input is valid and that there is space to insert the requested action
791  * into the flow. This function also return the action that was added.
792  *
793  * @param[in, out] dev_flow
794  *   Pointer to dev_flow structure.
795  * @param[in] item
796  *   Item specification.
797  * @param[in] item_flags
798  *   Parsed item flags.
799  */
800 static void
801 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
802 			       const struct rte_flow_item *item __rte_unused,
803 			       uint64_t item_flags __rte_unused)
804 {
805 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
806 	const struct rte_flow_item_mpls *spec = item->spec;
807 	const struct rte_flow_item_mpls *mask = item->mask;
808 	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
809 	struct ibv_flow_spec_mpls mpls = {
810 		.type = IBV_FLOW_SPEC_MPLS,
811 		.size = size,
812 	};
813 
814 	if (!mask)
815 		mask = &rte_flow_item_mpls_mask;
816 	if (spec) {
817 		memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
818 		memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
819 		/* Remove unwanted bits from values.  */
820 		mpls.val.label &= mpls.mask.label;
821 	}
822 	flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
823 #endif
824 }
825 
826 /**
827  * Convert the @p action into a Verbs specification. This function assumes that
828  * the input is valid and that there is space to insert the requested action
829  * into the flow.
830  *
831  * @param[in] dev_flow
832  *   Pointer to mlx5_flow.
833  * @param[in] action
834  *   Action configuration.
835  */
836 static void
837 flow_verbs_translate_action_drop
838 	(struct mlx5_flow *dev_flow,
839 	 const struct rte_flow_action *action __rte_unused)
840 {
841 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
842 	struct ibv_flow_spec_action_drop drop = {
843 			.type = IBV_FLOW_SPEC_ACTION_DROP,
844 			.size = size,
845 	};
846 
847 	flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
848 }
849 
850 /**
851  * Convert the @p action into a Verbs specification. This function assumes that
852  * the input is valid and that there is space to insert the requested action
853  * into the flow.
854  *
855  * @param[in] dev_flow
856  *   Pointer to mlx5_flow.
857  * @param[in] action
858  *   Action configuration.
859  */
860 static void
861 flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow,
862 				  const struct rte_flow_action *action)
863 {
864 	const struct rte_flow_action_queue *queue = action->conf;
865 	struct rte_flow *flow = dev_flow->flow;
866 
867 	if (flow->rss.queue)
868 		(*flow->rss.queue)[0] = queue->index;
869 	flow->rss.queue_num = 1;
870 }
871 
872 /**
873  * Convert the @p action into a Verbs specification. This function assumes that
874  * the input is valid and that there is space to insert the requested action
875  * into the flow.
876  *
877  * @param[in] action
878  *   Action configuration.
879  * @param[in, out] action_flags
880  *   Pointer to the detected actions.
881  * @param[in] dev_flow
882  *   Pointer to mlx5_flow.
883  */
884 static void
885 flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow,
886 				const struct rte_flow_action *action)
887 {
888 	const struct rte_flow_action_rss *rss = action->conf;
889 	const uint8_t *rss_key;
890 	struct rte_flow *flow = dev_flow->flow;
891 
892 	if (flow->rss.queue)
893 		memcpy((*flow->rss.queue), rss->queue,
894 		       rss->queue_num * sizeof(uint16_t));
895 	flow->rss.queue_num = rss->queue_num;
896 	/* NULL RSS key indicates default RSS key. */
897 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
898 	memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
899 	/*
900 	 * rss->level and rss.types should be set in advance when expanding
901 	 * items for RSS.
902 	 */
903 }
904 
905 /**
906  * Convert the @p action into a Verbs specification. This function assumes that
907  * the input is valid and that there is space to insert the requested action
908  * into the flow.
909  *
910  * @param[in] dev_flow
911  *   Pointer to mlx5_flow.
912  * @param[in] action
913  *   Action configuration.
914  */
915 static void
916 flow_verbs_translate_action_flag
917 	(struct mlx5_flow *dev_flow,
918 	 const struct rte_flow_action *action __rte_unused)
919 {
920 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
921 	struct ibv_flow_spec_action_tag tag = {
922 		.type = IBV_FLOW_SPEC_ACTION_TAG,
923 		.size = size,
924 		.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
925 	};
926 
927 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
928 }
929 
930 /**
931  * Convert the @p action into a Verbs specification. This function assumes that
932  * the input is valid and that there is space to insert the requested action
933  * into the flow.
934  *
935  * @param[in] dev_flow
936  *   Pointer to mlx5_flow.
937  * @param[in] action
938  *   Action configuration.
939  */
940 static void
941 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
942 				 const struct rte_flow_action *action)
943 {
944 	const struct rte_flow_action_mark *mark = action->conf;
945 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
946 	struct ibv_flow_spec_action_tag tag = {
947 		.type = IBV_FLOW_SPEC_ACTION_TAG,
948 		.size = size,
949 		.tag_id = mlx5_flow_mark_set(mark->id),
950 	};
951 
952 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
953 }
954 
955 /**
956  * Convert the @p action into a Verbs specification. This function assumes that
957  * the input is valid and that there is space to insert the requested action
958  * into the flow.
959  *
960  * @param[in] dev
961  *   Pointer to the Ethernet device structure.
962  * @param[in] action
963  *   Action configuration.
964  * @param[in] dev_flow
965  *   Pointer to mlx5_flow.
966  * @param[out] error
967  *   Pointer to error structure.
968  *
969  * @return
970  *   0 On success else a negative errno value is returned and rte_errno is set.
971  */
972 static int
973 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
974 				  const struct rte_flow_action *action,
975 				  struct rte_eth_dev *dev,
976 				  struct rte_flow_error *error)
977 {
978 	const struct rte_flow_action_count *count = action->conf;
979 	struct rte_flow *flow = dev_flow->flow;
980 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
981 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
982 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
983 	struct ibv_flow_spec_counter_action counter = {
984 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
985 		.size = size,
986 	};
987 #endif
988 
989 	if (!flow->counter) {
990 		flow->counter = flow_verbs_counter_new(dev, count->shared,
991 						       count->id);
992 		if (!flow->counter)
993 			return rte_flow_error_set(error, rte_errno,
994 						  RTE_FLOW_ERROR_TYPE_ACTION,
995 						  action,
996 						  "cannot get counter"
997 						  " context.");
998 	}
999 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1000 	counter.counter_set_handle = flow->counter->cs->handle;
1001 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1002 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1003 	counter.counters = flow->counter->cs;
1004 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1005 #endif
1006 	return 0;
1007 }
1008 
1009 /**
1010  * Internal validation function. For validating both actions and items.
1011  *
1012  * @param[in] dev
1013  *   Pointer to the Ethernet device structure.
1014  * @param[in] attr
1015  *   Pointer to the flow attributes.
1016  * @param[in] items
1017  *   Pointer to the list of items.
1018  * @param[in] actions
1019  *   Pointer to the list of actions.
1020  * @param[in] external
1021  *   This flow rule is created by request external to PMD.
1022  * @param[out] error
1023  *   Pointer to the error structure.
1024  *
1025  * @return
1026  *   0 on success, a negative errno value otherwise and rte_errno is set.
1027  */
1028 static int
1029 flow_verbs_validate(struct rte_eth_dev *dev,
1030 		    const struct rte_flow_attr *attr,
1031 		    const struct rte_flow_item items[],
1032 		    const struct rte_flow_action actions[],
1033 		    bool external __rte_unused,
1034 		    struct rte_flow_error *error)
1035 {
1036 	int ret;
1037 	uint64_t action_flags = 0;
1038 	uint64_t item_flags = 0;
1039 	uint64_t last_item = 0;
1040 	uint8_t next_protocol = 0xff;
1041 	uint16_t ether_type = 0;
1042 
1043 	if (items == NULL)
1044 		return -1;
1045 	ret = mlx5_flow_validate_attributes(dev, attr, error);
1046 	if (ret < 0)
1047 		return ret;
1048 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1049 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1050 		int ret = 0;
1051 
1052 		switch (items->type) {
1053 		case RTE_FLOW_ITEM_TYPE_VOID:
1054 			break;
1055 		case RTE_FLOW_ITEM_TYPE_ETH:
1056 			ret = mlx5_flow_validate_item_eth(items, item_flags,
1057 							  error);
1058 			if (ret < 0)
1059 				return ret;
1060 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1061 					     MLX5_FLOW_LAYER_OUTER_L2;
1062 			if (items->mask != NULL && items->spec != NULL) {
1063 				ether_type =
1064 					((const struct rte_flow_item_eth *)
1065 					 items->spec)->type;
1066 				ether_type &=
1067 					((const struct rte_flow_item_eth *)
1068 					 items->mask)->type;
1069 				ether_type = rte_be_to_cpu_16(ether_type);
1070 			} else {
1071 				ether_type = 0;
1072 			}
1073 			break;
1074 		case RTE_FLOW_ITEM_TYPE_VLAN:
1075 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
1076 							   dev, error);
1077 			if (ret < 0)
1078 				return ret;
1079 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1080 					      MLX5_FLOW_LAYER_INNER_VLAN) :
1081 					     (MLX5_FLOW_LAYER_OUTER_L2 |
1082 					      MLX5_FLOW_LAYER_OUTER_VLAN);
1083 			if (items->mask != NULL && items->spec != NULL) {
1084 				ether_type =
1085 					((const struct rte_flow_item_vlan *)
1086 					 items->spec)->inner_type;
1087 				ether_type &=
1088 					((const struct rte_flow_item_vlan *)
1089 					 items->mask)->inner_type;
1090 				ether_type = rte_be_to_cpu_16(ether_type);
1091 			} else {
1092 				ether_type = 0;
1093 			}
1094 			break;
1095 		case RTE_FLOW_ITEM_TYPE_IPV4:
1096 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1097 							   last_item,
1098 							   ether_type, NULL,
1099 							   error);
1100 			if (ret < 0)
1101 				return ret;
1102 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1103 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1104 			if (items->mask != NULL &&
1105 			    ((const struct rte_flow_item_ipv4 *)
1106 			     items->mask)->hdr.next_proto_id) {
1107 				next_protocol =
1108 					((const struct rte_flow_item_ipv4 *)
1109 					 (items->spec))->hdr.next_proto_id;
1110 				next_protocol &=
1111 					((const struct rte_flow_item_ipv4 *)
1112 					 (items->mask))->hdr.next_proto_id;
1113 			} else {
1114 				/* Reset for inner layer. */
1115 				next_protocol = 0xff;
1116 			}
1117 			break;
1118 		case RTE_FLOW_ITEM_TYPE_IPV6:
1119 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1120 							   last_item,
1121 							   ether_type, NULL,
1122 							   error);
1123 			if (ret < 0)
1124 				return ret;
1125 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1126 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1127 			if (items->mask != NULL &&
1128 			    ((const struct rte_flow_item_ipv6 *)
1129 			     items->mask)->hdr.proto) {
1130 				next_protocol =
1131 					((const struct rte_flow_item_ipv6 *)
1132 					 items->spec)->hdr.proto;
1133 				next_protocol &=
1134 					((const struct rte_flow_item_ipv6 *)
1135 					 items->mask)->hdr.proto;
1136 			} else {
1137 				/* Reset for inner layer. */
1138 				next_protocol = 0xff;
1139 			}
1140 			break;
1141 		case RTE_FLOW_ITEM_TYPE_UDP:
1142 			ret = mlx5_flow_validate_item_udp(items, item_flags,
1143 							  next_protocol,
1144 							  error);
1145 			if (ret < 0)
1146 				return ret;
1147 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1148 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
1149 			break;
1150 		case RTE_FLOW_ITEM_TYPE_TCP:
1151 			ret = mlx5_flow_validate_item_tcp
1152 						(items, item_flags,
1153 						 next_protocol,
1154 						 &rte_flow_item_tcp_mask,
1155 						 error);
1156 			if (ret < 0)
1157 				return ret;
1158 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1159 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
1160 			break;
1161 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1162 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1163 							    error);
1164 			if (ret < 0)
1165 				return ret;
1166 			last_item = MLX5_FLOW_LAYER_VXLAN;
1167 			break;
1168 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1169 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
1170 								item_flags,
1171 								dev, error);
1172 			if (ret < 0)
1173 				return ret;
1174 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1175 			break;
1176 		case RTE_FLOW_ITEM_TYPE_GRE:
1177 			ret = mlx5_flow_validate_item_gre(items, item_flags,
1178 							  next_protocol, error);
1179 			if (ret < 0)
1180 				return ret;
1181 			last_item = MLX5_FLOW_LAYER_GRE;
1182 			break;
1183 		case RTE_FLOW_ITEM_TYPE_MPLS:
1184 			ret = mlx5_flow_validate_item_mpls(dev, items,
1185 							   item_flags,
1186 							   last_item, error);
1187 			if (ret < 0)
1188 				return ret;
1189 			last_item = MLX5_FLOW_LAYER_MPLS;
1190 			break;
1191 		default:
1192 			return rte_flow_error_set(error, ENOTSUP,
1193 						  RTE_FLOW_ERROR_TYPE_ITEM,
1194 						  NULL, "item not supported");
1195 		}
1196 		item_flags |= last_item;
1197 	}
1198 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1199 		switch (actions->type) {
1200 		case RTE_FLOW_ACTION_TYPE_VOID:
1201 			break;
1202 		case RTE_FLOW_ACTION_TYPE_FLAG:
1203 			ret = mlx5_flow_validate_action_flag(action_flags,
1204 							     attr,
1205 							     error);
1206 			if (ret < 0)
1207 				return ret;
1208 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1209 			break;
1210 		case RTE_FLOW_ACTION_TYPE_MARK:
1211 			ret = mlx5_flow_validate_action_mark(actions,
1212 							     action_flags,
1213 							     attr,
1214 							     error);
1215 			if (ret < 0)
1216 				return ret;
1217 			action_flags |= MLX5_FLOW_ACTION_MARK;
1218 			break;
1219 		case RTE_FLOW_ACTION_TYPE_DROP:
1220 			ret = mlx5_flow_validate_action_drop(action_flags,
1221 							     attr,
1222 							     error);
1223 			if (ret < 0)
1224 				return ret;
1225 			action_flags |= MLX5_FLOW_ACTION_DROP;
1226 			break;
1227 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1228 			ret = mlx5_flow_validate_action_queue(actions,
1229 							      action_flags, dev,
1230 							      attr,
1231 							      error);
1232 			if (ret < 0)
1233 				return ret;
1234 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1235 			break;
1236 		case RTE_FLOW_ACTION_TYPE_RSS:
1237 			ret = mlx5_flow_validate_action_rss(actions,
1238 							    action_flags, dev,
1239 							    attr, item_flags,
1240 							    error);
1241 			if (ret < 0)
1242 				return ret;
1243 			action_flags |= MLX5_FLOW_ACTION_RSS;
1244 			break;
1245 		case RTE_FLOW_ACTION_TYPE_COUNT:
1246 			ret = mlx5_flow_validate_action_count(dev, attr, error);
1247 			if (ret < 0)
1248 				return ret;
1249 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1250 			break;
1251 		default:
1252 			return rte_flow_error_set(error, ENOTSUP,
1253 						  RTE_FLOW_ERROR_TYPE_ACTION,
1254 						  actions,
1255 						  "action not supported");
1256 		}
1257 	}
1258 	if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1259 		return rte_flow_error_set(error, EINVAL,
1260 					  RTE_FLOW_ERROR_TYPE_ACTION, actions,
1261 					  "no fate action is found");
1262 	return 0;
1263 }
1264 
1265 /**
1266  * Calculate the required bytes that are needed for the action part of the verbs
1267  * flow.
1268  *
1269  * @param[in] actions
1270  *   Pointer to the list of actions.
1271  *
1272  * @return
1273  *   The size of the memory needed for all actions.
1274  */
1275 static int
1276 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1277 {
1278 	int size = 0;
1279 
1280 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1281 		switch (actions->type) {
1282 		case RTE_FLOW_ACTION_TYPE_VOID:
1283 			break;
1284 		case RTE_FLOW_ACTION_TYPE_FLAG:
1285 			size += sizeof(struct ibv_flow_spec_action_tag);
1286 			break;
1287 		case RTE_FLOW_ACTION_TYPE_MARK:
1288 			size += sizeof(struct ibv_flow_spec_action_tag);
1289 			break;
1290 		case RTE_FLOW_ACTION_TYPE_DROP:
1291 			size += sizeof(struct ibv_flow_spec_action_drop);
1292 			break;
1293 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1294 			break;
1295 		case RTE_FLOW_ACTION_TYPE_RSS:
1296 			break;
1297 		case RTE_FLOW_ACTION_TYPE_COUNT:
1298 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1299 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1300 			size += sizeof(struct ibv_flow_spec_counter_action);
1301 #endif
1302 			break;
1303 		default:
1304 			break;
1305 		}
1306 	}
1307 	return size;
1308 }
1309 
1310 /**
1311  * Calculate the required bytes that are needed for the item part of the verbs
1312  * flow.
1313  *
1314  * @param[in] items
1315  *   Pointer to the list of items.
1316  *
1317  * @return
1318  *   The size of the memory needed for all items.
1319  */
1320 static int
1321 flow_verbs_get_items_size(const struct rte_flow_item items[])
1322 {
1323 	int size = 0;
1324 
1325 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1326 		switch (items->type) {
1327 		case RTE_FLOW_ITEM_TYPE_VOID:
1328 			break;
1329 		case RTE_FLOW_ITEM_TYPE_ETH:
1330 			size += sizeof(struct ibv_flow_spec_eth);
1331 			break;
1332 		case RTE_FLOW_ITEM_TYPE_VLAN:
1333 			size += sizeof(struct ibv_flow_spec_eth);
1334 			break;
1335 		case RTE_FLOW_ITEM_TYPE_IPV4:
1336 			size += sizeof(struct ibv_flow_spec_ipv4_ext);
1337 			break;
1338 		case RTE_FLOW_ITEM_TYPE_IPV6:
1339 			size += sizeof(struct ibv_flow_spec_ipv6);
1340 			break;
1341 		case RTE_FLOW_ITEM_TYPE_UDP:
1342 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1343 			break;
1344 		case RTE_FLOW_ITEM_TYPE_TCP:
1345 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1346 			break;
1347 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1348 			size += sizeof(struct ibv_flow_spec_tunnel);
1349 			break;
1350 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1351 			size += sizeof(struct ibv_flow_spec_tunnel);
1352 			break;
1353 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1354 		case RTE_FLOW_ITEM_TYPE_GRE:
1355 			size += sizeof(struct ibv_flow_spec_gre);
1356 			break;
1357 		case RTE_FLOW_ITEM_TYPE_MPLS:
1358 			size += sizeof(struct ibv_flow_spec_mpls);
1359 			break;
1360 #else
1361 		case RTE_FLOW_ITEM_TYPE_GRE:
1362 			size += sizeof(struct ibv_flow_spec_tunnel);
1363 			break;
1364 #endif
1365 		default:
1366 			break;
1367 		}
1368 	}
1369 	return size;
1370 }
1371 
1372 /**
1373  * Internal preparation function. Allocate mlx5_flow with the required size.
1374  * The required size is calculate based on the actions and items. This function
1375  * also returns the detected actions and items for later use.
1376  *
1377  * @param[in] attr
1378  *   Pointer to the flow attributes.
1379  * @param[in] items
1380  *   Pointer to the list of items.
1381  * @param[in] actions
1382  *   Pointer to the list of actions.
1383  * @param[out] error
1384  *   Pointer to the error structure.
1385  *
1386  * @return
1387  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1388  *   is set.
1389  */
1390 static struct mlx5_flow *
1391 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1392 		   const struct rte_flow_item items[],
1393 		   const struct rte_flow_action actions[],
1394 		   struct rte_flow_error *error)
1395 {
1396 	size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1397 	struct mlx5_flow *dev_flow;
1398 
1399 	size += flow_verbs_get_actions_size(actions);
1400 	size += flow_verbs_get_items_size(items);
1401 	dev_flow = rte_calloc(__func__, 1, size, 0);
1402 	if (!dev_flow) {
1403 		rte_flow_error_set(error, ENOMEM,
1404 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1405 				   "not enough memory to create flow");
1406 		return NULL;
1407 	}
1408 	dev_flow->verbs.attr = (void *)(dev_flow + 1);
1409 	dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
1410 	dev_flow->ingress = attr->ingress;
1411 	dev_flow->transfer = attr->transfer;
1412 	return dev_flow;
1413 }
1414 
1415 /**
1416  * Fill the flow with verb spec.
1417  *
1418  * @param[in] dev
1419  *   Pointer to Ethernet device.
1420  * @param[in, out] dev_flow
1421  *   Pointer to the mlx5 flow.
1422  * @param[in] attr
1423  *   Pointer to the flow attributes.
1424  * @param[in] items
1425  *   Pointer to the list of items.
1426  * @param[in] actions
1427  *   Pointer to the list of actions.
1428  * @param[out] error
1429  *   Pointer to the error structure.
1430  *
1431  * @return
1432  *   0 on success, else a negative errno value otherwise and rte_errno is set.
1433  */
1434 static int
1435 flow_verbs_translate(struct rte_eth_dev *dev,
1436 		     struct mlx5_flow *dev_flow,
1437 		     const struct rte_flow_attr *attr,
1438 		     const struct rte_flow_item items[],
1439 		     const struct rte_flow_action actions[],
1440 		     struct rte_flow_error *error)
1441 {
1442 	uint64_t item_flags = 0;
1443 	uint64_t action_flags = 0;
1444 	uint64_t priority = attr->priority;
1445 	uint32_t subpriority = 0;
1446 	struct mlx5_priv *priv = dev->data->dev_private;
1447 
1448 	if (priority == MLX5_FLOW_PRIO_RSVD)
1449 		priority = priv->config.flow_prio - 1;
1450 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1451 		int ret;
1452 
1453 		switch (actions->type) {
1454 		case RTE_FLOW_ACTION_TYPE_VOID:
1455 			break;
1456 		case RTE_FLOW_ACTION_TYPE_FLAG:
1457 			flow_verbs_translate_action_flag(dev_flow, actions);
1458 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1459 			break;
1460 		case RTE_FLOW_ACTION_TYPE_MARK:
1461 			flow_verbs_translate_action_mark(dev_flow, actions);
1462 			action_flags |= MLX5_FLOW_ACTION_MARK;
1463 			break;
1464 		case RTE_FLOW_ACTION_TYPE_DROP:
1465 			flow_verbs_translate_action_drop(dev_flow, actions);
1466 			action_flags |= MLX5_FLOW_ACTION_DROP;
1467 			break;
1468 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1469 			flow_verbs_translate_action_queue(dev_flow, actions);
1470 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1471 			break;
1472 		case RTE_FLOW_ACTION_TYPE_RSS:
1473 			flow_verbs_translate_action_rss(dev_flow, actions);
1474 			action_flags |= MLX5_FLOW_ACTION_RSS;
1475 			break;
1476 		case RTE_FLOW_ACTION_TYPE_COUNT:
1477 			ret = flow_verbs_translate_action_count(dev_flow,
1478 								actions,
1479 								dev, error);
1480 			if (ret < 0)
1481 				return ret;
1482 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1483 			break;
1484 		default:
1485 			return rte_flow_error_set(error, ENOTSUP,
1486 						  RTE_FLOW_ERROR_TYPE_ACTION,
1487 						  actions,
1488 						  "action not supported");
1489 		}
1490 	}
1491 	dev_flow->actions = action_flags;
1492 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1493 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1494 
1495 		switch (items->type) {
1496 		case RTE_FLOW_ITEM_TYPE_VOID:
1497 			break;
1498 		case RTE_FLOW_ITEM_TYPE_ETH:
1499 			flow_verbs_translate_item_eth(dev_flow, items,
1500 						      item_flags);
1501 			subpriority = MLX5_PRIORITY_MAP_L2;
1502 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1503 					       MLX5_FLOW_LAYER_OUTER_L2;
1504 			break;
1505 		case RTE_FLOW_ITEM_TYPE_VLAN:
1506 			flow_verbs_translate_item_vlan(dev_flow, items,
1507 						       item_flags);
1508 			subpriority = MLX5_PRIORITY_MAP_L2;
1509 			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1510 						MLX5_FLOW_LAYER_INNER_VLAN) :
1511 					       (MLX5_FLOW_LAYER_OUTER_L2 |
1512 						MLX5_FLOW_LAYER_OUTER_VLAN);
1513 			break;
1514 		case RTE_FLOW_ITEM_TYPE_IPV4:
1515 			flow_verbs_translate_item_ipv4(dev_flow, items,
1516 						       item_flags);
1517 			subpriority = MLX5_PRIORITY_MAP_L3;
1518 			dev_flow->hash_fields |=
1519 				mlx5_flow_hashfields_adjust
1520 					(dev_flow, tunnel,
1521 					 MLX5_IPV4_LAYER_TYPES,
1522 					 MLX5_IPV4_IBV_RX_HASH);
1523 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1524 					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1525 			break;
1526 		case RTE_FLOW_ITEM_TYPE_IPV6:
1527 			flow_verbs_translate_item_ipv6(dev_flow, items,
1528 						       item_flags);
1529 			subpriority = MLX5_PRIORITY_MAP_L3;
1530 			dev_flow->hash_fields |=
1531 				mlx5_flow_hashfields_adjust
1532 					(dev_flow, tunnel,
1533 					 MLX5_IPV6_LAYER_TYPES,
1534 					 MLX5_IPV6_IBV_RX_HASH);
1535 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1536 					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1537 			break;
1538 		case RTE_FLOW_ITEM_TYPE_TCP:
1539 			flow_verbs_translate_item_tcp(dev_flow, items,
1540 						      item_flags);
1541 			subpriority = MLX5_PRIORITY_MAP_L4;
1542 			dev_flow->hash_fields |=
1543 				mlx5_flow_hashfields_adjust
1544 					(dev_flow, tunnel, ETH_RSS_TCP,
1545 					 (IBV_RX_HASH_SRC_PORT_TCP |
1546 					  IBV_RX_HASH_DST_PORT_TCP));
1547 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1548 					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
1549 			break;
1550 		case RTE_FLOW_ITEM_TYPE_UDP:
1551 			flow_verbs_translate_item_udp(dev_flow, items,
1552 						      item_flags);
1553 			subpriority = MLX5_PRIORITY_MAP_L4;
1554 			dev_flow->hash_fields |=
1555 				mlx5_flow_hashfields_adjust
1556 					(dev_flow, tunnel, ETH_RSS_UDP,
1557 					 (IBV_RX_HASH_SRC_PORT_UDP |
1558 					  IBV_RX_HASH_DST_PORT_UDP));
1559 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1560 					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
1561 			break;
1562 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1563 			flow_verbs_translate_item_vxlan(dev_flow, items,
1564 							item_flags);
1565 			subpriority = MLX5_PRIORITY_MAP_L2;
1566 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
1567 			break;
1568 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1569 			flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1570 							    item_flags);
1571 			subpriority = MLX5_PRIORITY_MAP_L2;
1572 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1573 			break;
1574 		case RTE_FLOW_ITEM_TYPE_GRE:
1575 			flow_verbs_translate_item_gre(dev_flow, items,
1576 						      item_flags);
1577 			subpriority = MLX5_PRIORITY_MAP_L2;
1578 			item_flags |= MLX5_FLOW_LAYER_GRE;
1579 			break;
1580 		case RTE_FLOW_ITEM_TYPE_MPLS:
1581 			flow_verbs_translate_item_mpls(dev_flow, items,
1582 						       item_flags);
1583 			subpriority = MLX5_PRIORITY_MAP_L2;
1584 			item_flags |= MLX5_FLOW_LAYER_MPLS;
1585 			break;
1586 		default:
1587 			return rte_flow_error_set(error, ENOTSUP,
1588 						  RTE_FLOW_ERROR_TYPE_ITEM,
1589 						  NULL,
1590 						  "item not supported");
1591 		}
1592 	}
1593 	dev_flow->layers = item_flags;
1594 	dev_flow->verbs.attr->priority =
1595 		mlx5_flow_adjust_priority(dev, priority, subpriority);
1596 	dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
1597 	return 0;
1598 }
1599 
1600 /**
1601  * Remove the flow from the NIC but keeps it in memory.
1602  *
1603  * @param[in] dev
1604  *   Pointer to the Ethernet device structure.
1605  * @param[in, out] flow
1606  *   Pointer to flow structure.
1607  */
1608 static void
1609 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1610 {
1611 	struct mlx5_flow_verbs *verbs;
1612 	struct mlx5_flow *dev_flow;
1613 
1614 	if (!flow)
1615 		return;
1616 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1617 		verbs = &dev_flow->verbs;
1618 		if (verbs->flow) {
1619 			claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1620 			verbs->flow = NULL;
1621 		}
1622 		if (verbs->hrxq) {
1623 			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
1624 				mlx5_hrxq_drop_release(dev);
1625 			else
1626 				mlx5_hrxq_release(dev, verbs->hrxq);
1627 			verbs->hrxq = NULL;
1628 		}
1629 		if (dev_flow->verbs.vf_vlan.tag &&
1630 		    dev_flow->verbs.vf_vlan.created) {
1631 			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
1632 		}
1633 	}
1634 }
1635 
1636 /**
1637  * Remove the flow from the NIC and the memory.
1638  *
1639  * @param[in] dev
1640  *   Pointer to the Ethernet device structure.
1641  * @param[in, out] flow
1642  *   Pointer to flow structure.
1643  */
1644 static void
1645 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1646 {
1647 	struct mlx5_flow *dev_flow;
1648 
1649 	if (!flow)
1650 		return;
1651 	flow_verbs_remove(dev, flow);
1652 	while (!LIST_EMPTY(&flow->dev_flows)) {
1653 		dev_flow = LIST_FIRST(&flow->dev_flows);
1654 		LIST_REMOVE(dev_flow, next);
1655 		rte_free(dev_flow);
1656 	}
1657 	if (flow->counter) {
1658 		flow_verbs_counter_release(dev, flow->counter);
1659 		flow->counter = NULL;
1660 	}
1661 }
1662 
1663 /**
1664  * Apply the flow to the NIC.
1665  *
1666  * @param[in] dev
1667  *   Pointer to the Ethernet device structure.
1668  * @param[in, out] flow
1669  *   Pointer to flow structure.
1670  * @param[out] error
1671  *   Pointer to error structure.
1672  *
1673  * @return
1674  *   0 on success, a negative errno value otherwise and rte_errno is set.
1675  */
1676 static int
1677 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1678 		 struct rte_flow_error *error)
1679 {
1680 	struct mlx5_priv *priv = dev->data->dev_private;
1681 	struct mlx5_flow_verbs *verbs;
1682 	struct mlx5_flow *dev_flow;
1683 	int err;
1684 
1685 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1686 		verbs = &dev_flow->verbs;
1687 		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
1688 			verbs->hrxq = mlx5_hrxq_drop_new(dev);
1689 			if (!verbs->hrxq) {
1690 				rte_flow_error_set
1691 					(error, errno,
1692 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1693 					 "cannot get drop hash queue");
1694 				goto error;
1695 			}
1696 		} else {
1697 			struct mlx5_hrxq *hrxq;
1698 
1699 			assert(flow->rss.queue);
1700 			hrxq = mlx5_hrxq_get(dev, flow->rss.key,
1701 					     MLX5_RSS_HASH_KEY_LEN,
1702 					     dev_flow->hash_fields,
1703 					     (*flow->rss.queue),
1704 					     flow->rss.queue_num);
1705 			if (!hrxq)
1706 				hrxq = mlx5_hrxq_new(dev, flow->rss.key,
1707 						     MLX5_RSS_HASH_KEY_LEN,
1708 						     dev_flow->hash_fields,
1709 						     (*flow->rss.queue),
1710 						     flow->rss.queue_num,
1711 						     !!(dev_flow->layers &
1712 						       MLX5_FLOW_LAYER_TUNNEL));
1713 			if (!hrxq) {
1714 				rte_flow_error_set
1715 					(error, rte_errno,
1716 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1717 					 "cannot get hash queue");
1718 				goto error;
1719 			}
1720 			verbs->hrxq = hrxq;
1721 		}
1722 		verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1723 						     verbs->attr);
1724 		if (!verbs->flow) {
1725 			rte_flow_error_set(error, errno,
1726 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1727 					   NULL,
1728 					   "hardware refuses to create flow");
1729 			goto error;
1730 		}
1731 		if (priv->vmwa_context &&
1732 		    dev_flow->verbs.vf_vlan.tag &&
1733 		    !dev_flow->verbs.vf_vlan.created) {
1734 			/*
1735 			 * The rule contains the VLAN pattern.
1736 			 * For VF we are going to create VLAN
1737 			 * interface to make hypervisor set correct
1738 			 * e-Switch vport context.
1739 			 */
1740 			mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan);
1741 		}
1742 	}
1743 	return 0;
1744 error:
1745 	err = rte_errno; /* Save rte_errno before cleanup. */
1746 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1747 		verbs = &dev_flow->verbs;
1748 		if (verbs->hrxq) {
1749 			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
1750 				mlx5_hrxq_drop_release(dev);
1751 			else
1752 				mlx5_hrxq_release(dev, verbs->hrxq);
1753 			verbs->hrxq = NULL;
1754 		}
1755 		if (dev_flow->verbs.vf_vlan.tag &&
1756 		    dev_flow->verbs.vf_vlan.created) {
1757 			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
1758 		}
1759 	}
1760 	rte_errno = err; /* Restore rte_errno. */
1761 	return -rte_errno;
1762 }
1763 
1764 /**
1765  * Query a flow.
1766  *
1767  * @see rte_flow_query()
1768  * @see rte_flow_ops
1769  */
1770 static int
1771 flow_verbs_query(struct rte_eth_dev *dev,
1772 		 struct rte_flow *flow,
1773 		 const struct rte_flow_action *actions,
1774 		 void *data,
1775 		 struct rte_flow_error *error)
1776 {
1777 	int ret = -EINVAL;
1778 
1779 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1780 		switch (actions->type) {
1781 		case RTE_FLOW_ACTION_TYPE_VOID:
1782 			break;
1783 		case RTE_FLOW_ACTION_TYPE_COUNT:
1784 			ret = flow_verbs_counter_query(dev, flow, data, error);
1785 			break;
1786 		default:
1787 			return rte_flow_error_set(error, ENOTSUP,
1788 						  RTE_FLOW_ERROR_TYPE_ACTION,
1789 						  actions,
1790 						  "action not supported");
1791 		}
1792 	}
1793 	return ret;
1794 }
1795 
1796 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
1797 	.validate = flow_verbs_validate,
1798 	.prepare = flow_verbs_prepare,
1799 	.translate = flow_verbs_translate,
1800 	.apply = flow_verbs_apply,
1801 	.remove = flow_verbs_remove,
1802 	.destroy = flow_verbs_destroy,
1803 	.query = flow_verbs_query,
1804 };
1805