xref: /dpdk/drivers/net/mlx5/mlx5_flow_verbs.c (revision 9e991f217fc8719e38a812dc280dba5f84db9f59)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10 
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20 
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 
29 #include <mlx5_glue.h>
30 #include <mlx5_prm.h>
31 
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_flow.h"
35 #include "mlx5_rxtx.h"
36 
37 #define VERBS_SPEC_INNER(item_flags) \
38 	(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
39 
40 /**
41  * Create Verbs flow counter with Verbs library.
42  *
43  * @param[in] dev
44  *   Pointer to the Ethernet device structure.
45  * @param[in, out] counter
46  *   mlx5 flow counter object, contains the counter id,
47  *   handle of created Verbs flow counter is returned
48  *   in cs field (if counters are supported).
49  *
50  * @return
51  *   0 On success else a negative errno value is returned
52  *   and rte_errno is set.
53  */
54 static int
55 flow_verbs_counter_create(struct rte_eth_dev *dev,
56 			  struct mlx5_flow_counter *counter)
57 {
58 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
59 	struct mlx5_priv *priv = dev->data->dev_private;
60 	struct ibv_context *ctx = priv->sh->ctx;
61 	struct ibv_counter_set_init_attr init = {
62 			 .counter_set_id = counter->id};
63 
64 	counter->cs = mlx5_glue->create_counter_set(ctx, &init);
65 	if (!counter->cs) {
66 		rte_errno = ENOTSUP;
67 		return -ENOTSUP;
68 	}
69 	return 0;
70 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
71 	struct mlx5_priv *priv = dev->data->dev_private;
72 	struct ibv_context *ctx = priv->sh->ctx;
73 	struct ibv_counters_init_attr init = {0};
74 	struct ibv_counter_attach_attr attach;
75 	int ret;
76 
77 	memset(&attach, 0, sizeof(attach));
78 	counter->cs = mlx5_glue->create_counters(ctx, &init);
79 	if (!counter->cs) {
80 		rte_errno = ENOTSUP;
81 		return -ENOTSUP;
82 	}
83 	attach.counter_desc = IBV_COUNTER_PACKETS;
84 	attach.index = 0;
85 	ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL);
86 	if (!ret) {
87 		attach.counter_desc = IBV_COUNTER_BYTES;
88 		attach.index = 1;
89 		ret = mlx5_glue->attach_counters
90 					(counter->cs, &attach, NULL);
91 	}
92 	if (ret) {
93 		claim_zero(mlx5_glue->destroy_counters(counter->cs));
94 		counter->cs = NULL;
95 		rte_errno = ret;
96 		return -ret;
97 	}
98 	return 0;
99 #else
100 	(void)dev;
101 	(void)counter;
102 	rte_errno = ENOTSUP;
103 	return -ENOTSUP;
104 #endif
105 }
106 
107 /**
108  * Get a flow counter.
109  *
110  * @param[in] dev
111  *   Pointer to the Ethernet device structure.
112  * @param[in] shared
113  *   Indicate if this counter is shared with other flows.
114  * @param[in] id
115  *   Counter identifier.
116  *
117  * @return
118  *   A pointer to the counter, NULL otherwise and rte_errno is set.
119  */
120 static struct mlx5_flow_counter *
121 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
122 {
123 	struct mlx5_priv *priv = dev->data->dev_private;
124 	struct mlx5_flow_counter *cnt;
125 	int ret;
126 
127 	if (shared) {
128 		TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
129 			if (cnt->shared && cnt->id == id) {
130 				cnt->ref_cnt++;
131 				return cnt;
132 			}
133 		}
134 	}
135 	cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
136 	if (!cnt) {
137 		rte_errno = ENOMEM;
138 		return NULL;
139 	}
140 	cnt->id = id;
141 	cnt->shared = shared;
142 	cnt->ref_cnt = 1;
143 	cnt->hits = 0;
144 	cnt->bytes = 0;
145 	/* Create counter with Verbs. */
146 	ret = flow_verbs_counter_create(dev, cnt);
147 	if (!ret) {
148 		TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
149 		return cnt;
150 	}
151 	/* Some error occurred in Verbs library. */
152 	rte_free(cnt);
153 	rte_errno = -ret;
154 	return NULL;
155 }
156 
157 /**
158  * Release a flow counter.
159  *
160  * @param[in] dev
161  *   Pointer to the Ethernet device structure.
162  * @param[in] counter
163  *   Pointer to the counter handler.
164  */
165 static void
166 flow_verbs_counter_release(struct rte_eth_dev *dev,
167 			   struct mlx5_flow_counter *counter)
168 {
169 	struct mlx5_priv *priv = dev->data->dev_private;
170 
171 	if (--counter->ref_cnt == 0) {
172 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
173 		claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
174 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
175 		claim_zero(mlx5_glue->destroy_counters(counter->cs));
176 #endif
177 		TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
178 		rte_free(counter);
179 	}
180 }
181 
182 /**
183  * Query a flow counter via Verbs library call.
184  *
185  * @see rte_flow_query()
186  * @see rte_flow_ops
187  */
188 static int
189 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
190 			 struct rte_flow *flow, void *data,
191 			 struct rte_flow_error *error)
192 {
193 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
194 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
195 	if (flow->counter && flow->counter->cs) {
196 		struct rte_flow_query_count *qc = data;
197 		uint64_t counters[2] = {0, 0};
198 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
199 		struct ibv_query_counter_set_attr query_cs_attr = {
200 			.cs = flow->counter->cs,
201 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
202 		};
203 		struct ibv_counter_set_data query_out = {
204 			.out = counters,
205 			.outlen = 2 * sizeof(uint64_t),
206 		};
207 		int err = mlx5_glue->query_counter_set(&query_cs_attr,
208 						       &query_out);
209 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
210 		int err = mlx5_glue->query_counters
211 			       (flow->counter->cs, counters,
212 				RTE_DIM(counters),
213 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
214 #endif
215 		if (err)
216 			return rte_flow_error_set
217 				(error, err,
218 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
219 				 NULL,
220 				 "cannot read counter");
221 		qc->hits_set = 1;
222 		qc->bytes_set = 1;
223 		qc->hits = counters[0] - flow->counter->hits;
224 		qc->bytes = counters[1] - flow->counter->bytes;
225 		if (qc->reset) {
226 			flow->counter->hits = counters[0];
227 			flow->counter->bytes = counters[1];
228 		}
229 		return 0;
230 	}
231 	return rte_flow_error_set(error, EINVAL,
232 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233 				  NULL,
234 				  "flow does not have counter");
235 #else
236 	(void)flow;
237 	(void)data;
238 	return rte_flow_error_set(error, ENOTSUP,
239 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
240 				  NULL,
241 				  "counters are not available");
242 #endif
243 }
244 
245 /**
246  * Add a verbs item specification into @p verbs.
247  *
248  * @param[out] verbs
249  *   Pointer to verbs structure.
250  * @param[in] src
251  *   Create specification.
252  * @param[in] size
253  *   Size in bytes of the specification to copy.
254  */
255 static void
256 flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
257 {
258 	void *dst;
259 
260 	if (!verbs)
261 		return;
262 	MLX5_ASSERT(verbs->specs);
263 	dst = (void *)(verbs->specs + verbs->size);
264 	memcpy(dst, src, size);
265 	++verbs->attr->num_of_specs;
266 	verbs->size += size;
267 }
268 
269 /**
270  * Convert the @p item into a Verbs specification. This function assumes that
271  * the input is valid and that there is space to insert the requested item
272  * into the flow.
273  *
274  * @param[in, out] dev_flow
275  *   Pointer to dev_flow structure.
276  * @param[in] item
277  *   Item specification.
278  * @param[in] item_flags
279  *   Parsed item flags.
280  */
281 static void
282 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
283 			      const struct rte_flow_item *item,
284 			      uint64_t item_flags)
285 {
286 	const struct rte_flow_item_eth *spec = item->spec;
287 	const struct rte_flow_item_eth *mask = item->mask;
288 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
289 	struct ibv_flow_spec_eth eth = {
290 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
291 		.size = size,
292 	};
293 
294 	if (!mask)
295 		mask = &rte_flow_item_eth_mask;
296 	if (spec) {
297 		unsigned int i;
298 
299 		memcpy(&eth.val.dst_mac, spec->dst.addr_bytes,
300 			RTE_ETHER_ADDR_LEN);
301 		memcpy(&eth.val.src_mac, spec->src.addr_bytes,
302 			RTE_ETHER_ADDR_LEN);
303 		eth.val.ether_type = spec->type;
304 		memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes,
305 			RTE_ETHER_ADDR_LEN);
306 		memcpy(&eth.mask.src_mac, mask->src.addr_bytes,
307 			RTE_ETHER_ADDR_LEN);
308 		eth.mask.ether_type = mask->type;
309 		/* Remove unwanted bits from values. */
310 		for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
311 			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
312 			eth.val.src_mac[i] &= eth.mask.src_mac[i];
313 		}
314 		eth.val.ether_type &= eth.mask.ether_type;
315 	}
316 	flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
317 }
318 
319 /**
320  * Update the VLAN tag in the Verbs Ethernet specification.
321  * This function assumes that the input is valid and there is space to add
322  * the requested item.
323  *
324  * @param[in, out] attr
325  *   Pointer to Verbs attributes structure.
326  * @param[in] eth
327  *   Verbs structure containing the VLAN information to copy.
328  */
329 static void
330 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
331 			    struct ibv_flow_spec_eth *eth)
332 {
333 	unsigned int i;
334 	const enum ibv_flow_spec_type search = eth->type;
335 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
336 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
337 
338 	for (i = 0; i != attr->num_of_specs; ++i) {
339 		if (hdr->type == search) {
340 			struct ibv_flow_spec_eth *e =
341 				(struct ibv_flow_spec_eth *)hdr;
342 
343 			e->val.vlan_tag = eth->val.vlan_tag;
344 			e->mask.vlan_tag = eth->mask.vlan_tag;
345 			e->val.ether_type = eth->val.ether_type;
346 			e->mask.ether_type = eth->mask.ether_type;
347 			break;
348 		}
349 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
350 	}
351 }
352 
353 /**
354  * Convert the @p item into a Verbs specification. This function assumes that
355  * the input is valid and that there is space to insert the requested item
356  * into the flow.
357  *
358  * @param[in, out] dev_flow
359  *   Pointer to dev_flow structure.
360  * @param[in] item
361  *   Item specification.
362  * @param[in] item_flags
363  *   Parsed item flags.
364  */
365 static void
366 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
367 			       const struct rte_flow_item *item,
368 			       uint64_t item_flags)
369 {
370 	const struct rte_flow_item_vlan *spec = item->spec;
371 	const struct rte_flow_item_vlan *mask = item->mask;
372 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
373 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
374 	struct ibv_flow_spec_eth eth = {
375 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
376 		.size = size,
377 	};
378 	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
379 				      MLX5_FLOW_LAYER_OUTER_L2;
380 
381 	if (!mask)
382 		mask = &rte_flow_item_vlan_mask;
383 	if (spec) {
384 		eth.val.vlan_tag = spec->tci;
385 		eth.mask.vlan_tag = mask->tci;
386 		eth.val.vlan_tag &= eth.mask.vlan_tag;
387 		eth.val.ether_type = spec->inner_type;
388 		eth.mask.ether_type = mask->inner_type;
389 		eth.val.ether_type &= eth.mask.ether_type;
390 	}
391 	if (!(item_flags & l2m))
392 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
393 	else
394 		flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
395 	if (!tunnel)
396 		dev_flow->verbs.vf_vlan.tag =
397 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
398 }
399 
400 /**
401  * Convert the @p item into a Verbs specification. This function assumes that
402  * the input is valid and that there is space to insert the requested item
403  * into the flow.
404  *
405  * @param[in, out] dev_flow
406  *   Pointer to dev_flow structure.
407  * @param[in] item
408  *   Item specification.
409  * @param[in] item_flags
410  *   Parsed item flags.
411  */
412 static void
413 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
414 			       const struct rte_flow_item *item,
415 			       uint64_t item_flags)
416 {
417 	const struct rte_flow_item_ipv4 *spec = item->spec;
418 	const struct rte_flow_item_ipv4 *mask = item->mask;
419 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
420 	struct ibv_flow_spec_ipv4_ext ipv4 = {
421 		.type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
422 		.size = size,
423 	};
424 
425 	if (!mask)
426 		mask = &rte_flow_item_ipv4_mask;
427 	if (spec) {
428 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
429 			.src_ip = spec->hdr.src_addr,
430 			.dst_ip = spec->hdr.dst_addr,
431 			.proto = spec->hdr.next_proto_id,
432 			.tos = spec->hdr.type_of_service,
433 		};
434 		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
435 			.src_ip = mask->hdr.src_addr,
436 			.dst_ip = mask->hdr.dst_addr,
437 			.proto = mask->hdr.next_proto_id,
438 			.tos = mask->hdr.type_of_service,
439 		};
440 		/* Remove unwanted bits from values. */
441 		ipv4.val.src_ip &= ipv4.mask.src_ip;
442 		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
443 		ipv4.val.proto &= ipv4.mask.proto;
444 		ipv4.val.tos &= ipv4.mask.tos;
445 	}
446 	flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
447 }
448 
449 /**
450  * Convert the @p item into a Verbs specification. This function assumes that
451  * the input is valid and that there is space to insert the requested item
452  * into the flow.
453  *
454  * @param[in, out] dev_flow
455  *   Pointer to dev_flow structure.
456  * @param[in] item
457  *   Item specification.
458  * @param[in] item_flags
459  *   Parsed item flags.
460  */
461 static void
462 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
463 			       const struct rte_flow_item *item,
464 			       uint64_t item_flags)
465 {
466 	const struct rte_flow_item_ipv6 *spec = item->spec;
467 	const struct rte_flow_item_ipv6 *mask = item->mask;
468 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
469 	struct ibv_flow_spec_ipv6 ipv6 = {
470 		.type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
471 		.size = size,
472 	};
473 
474 	if (!mask)
475 		mask = &rte_flow_item_ipv6_mask;
476 	if (spec) {
477 		unsigned int i;
478 		uint32_t vtc_flow_val;
479 		uint32_t vtc_flow_mask;
480 
481 		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
482 		       RTE_DIM(ipv6.val.src_ip));
483 		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
484 		       RTE_DIM(ipv6.val.dst_ip));
485 		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
486 		       RTE_DIM(ipv6.mask.src_ip));
487 		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
488 		       RTE_DIM(ipv6.mask.dst_ip));
489 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
490 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
491 		ipv6.val.flow_label =
492 			rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
493 					 RTE_IPV6_HDR_FL_SHIFT);
494 		ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
495 					 RTE_IPV6_HDR_TC_SHIFT;
496 		ipv6.val.next_hdr = spec->hdr.proto;
497 		ipv6.val.hop_limit = spec->hdr.hop_limits;
498 		ipv6.mask.flow_label =
499 			rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
500 					 RTE_IPV6_HDR_FL_SHIFT);
501 		ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
502 					  RTE_IPV6_HDR_TC_SHIFT;
503 		ipv6.mask.next_hdr = mask->hdr.proto;
504 		ipv6.mask.hop_limit = mask->hdr.hop_limits;
505 		/* Remove unwanted bits from values. */
506 		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
507 			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
508 			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
509 		}
510 		ipv6.val.flow_label &= ipv6.mask.flow_label;
511 		ipv6.val.traffic_class &= ipv6.mask.traffic_class;
512 		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
513 		ipv6.val.hop_limit &= ipv6.mask.hop_limit;
514 	}
515 	flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
516 }
517 
518 /**
519  * Convert the @p item into a Verbs specification. This function assumes that
520  * the input is valid and that there is space to insert the requested item
521  * into the flow.
522  *
523  * @param[in, out] dev_flow
524  *   Pointer to dev_flow structure.
525  * @param[in] item
526  *   Item specification.
527  * @param[in] item_flags
528  *   Parsed item flags.
529  */
530 static void
531 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
532 			      const struct rte_flow_item *item,
533 			      uint64_t item_flags __rte_unused)
534 {
535 	const struct rte_flow_item_tcp *spec = item->spec;
536 	const struct rte_flow_item_tcp *mask = item->mask;
537 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
538 	struct ibv_flow_spec_tcp_udp tcp = {
539 		.type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
540 		.size = size,
541 	};
542 
543 	if (!mask)
544 		mask = &rte_flow_item_tcp_mask;
545 	if (spec) {
546 		tcp.val.dst_port = spec->hdr.dst_port;
547 		tcp.val.src_port = spec->hdr.src_port;
548 		tcp.mask.dst_port = mask->hdr.dst_port;
549 		tcp.mask.src_port = mask->hdr.src_port;
550 		/* Remove unwanted bits from values. */
551 		tcp.val.src_port &= tcp.mask.src_port;
552 		tcp.val.dst_port &= tcp.mask.dst_port;
553 	}
554 	flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
555 }
556 
557 /**
558  * Convert the @p item into a Verbs specification. This function assumes that
559  * the input is valid and that there is space to insert the requested item
560  * into the flow.
561  *
562  * @param[in, out] dev_flow
563  *   Pointer to dev_flow structure.
564  * @param[in] item
565  *   Item specification.
566  * @param[in] item_flags
567  *   Parsed item flags.
568  */
569 static void
570 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
571 			      const struct rte_flow_item *item,
572 			      uint64_t item_flags __rte_unused)
573 {
574 	const struct rte_flow_item_udp *spec = item->spec;
575 	const struct rte_flow_item_udp *mask = item->mask;
576 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
577 	struct ibv_flow_spec_tcp_udp udp = {
578 		.type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
579 		.size = size,
580 	};
581 
582 	if (!mask)
583 		mask = &rte_flow_item_udp_mask;
584 	if (spec) {
585 		udp.val.dst_port = spec->hdr.dst_port;
586 		udp.val.src_port = spec->hdr.src_port;
587 		udp.mask.dst_port = mask->hdr.dst_port;
588 		udp.mask.src_port = mask->hdr.src_port;
589 		/* Remove unwanted bits from values. */
590 		udp.val.src_port &= udp.mask.src_port;
591 		udp.val.dst_port &= udp.mask.dst_port;
592 	}
593 	flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
594 }
595 
596 /**
597  * Convert the @p item into a Verbs specification. This function assumes that
598  * the input is valid and that there is space to insert the requested item
599  * into the flow.
600  *
601  * @param[in, out] dev_flow
602  *   Pointer to dev_flow structure.
603  * @param[in] item
604  *   Item specification.
605  * @param[in] item_flags
606  *   Parsed item flags.
607  */
608 static void
609 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
610 				const struct rte_flow_item *item,
611 				uint64_t item_flags __rte_unused)
612 {
613 	const struct rte_flow_item_vxlan *spec = item->spec;
614 	const struct rte_flow_item_vxlan *mask = item->mask;
615 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
616 	struct ibv_flow_spec_tunnel vxlan = {
617 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
618 		.size = size,
619 	};
620 	union vni {
621 		uint32_t vlan_id;
622 		uint8_t vni[4];
623 	} id = { .vlan_id = 0, };
624 
625 	if (!mask)
626 		mask = &rte_flow_item_vxlan_mask;
627 	if (spec) {
628 		memcpy(&id.vni[1], spec->vni, 3);
629 		vxlan.val.tunnel_id = id.vlan_id;
630 		memcpy(&id.vni[1], mask->vni, 3);
631 		vxlan.mask.tunnel_id = id.vlan_id;
632 		/* Remove unwanted bits from values. */
633 		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
634 	}
635 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
636 }
637 
638 /**
639  * Convert the @p item into a Verbs specification. This function assumes that
640  * the input is valid and that there is space to insert the requested item
641  * into the flow.
642  *
643  * @param[in, out] dev_flow
644  *   Pointer to dev_flow structure.
645  * @param[in] item
646  *   Item specification.
647  * @param[in] item_flags
648  *   Parsed item flags.
649  */
650 static void
651 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
652 				    const struct rte_flow_item *item,
653 				    uint64_t item_flags __rte_unused)
654 {
655 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
656 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
657 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
658 	struct ibv_flow_spec_tunnel vxlan_gpe = {
659 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
660 		.size = size,
661 	};
662 	union vni {
663 		uint32_t vlan_id;
664 		uint8_t vni[4];
665 	} id = { .vlan_id = 0, };
666 
667 	if (!mask)
668 		mask = &rte_flow_item_vxlan_gpe_mask;
669 	if (spec) {
670 		memcpy(&id.vni[1], spec->vni, 3);
671 		vxlan_gpe.val.tunnel_id = id.vlan_id;
672 		memcpy(&id.vni[1], mask->vni, 3);
673 		vxlan_gpe.mask.tunnel_id = id.vlan_id;
674 		/* Remove unwanted bits from values. */
675 		vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
676 	}
677 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
678 }
679 
680 /**
681  * Update the protocol in Verbs IPv4/IPv6 spec.
682  *
683  * @param[in, out] attr
684  *   Pointer to Verbs attributes structure.
685  * @param[in] search
686  *   Specification type to search in order to update the IP protocol.
687  * @param[in] protocol
688  *   Protocol value to set if none is present in the specification.
689  */
690 static void
691 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
692 				       enum ibv_flow_spec_type search,
693 				       uint8_t protocol)
694 {
695 	unsigned int i;
696 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
697 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
698 
699 	if (!attr)
700 		return;
701 	for (i = 0; i != attr->num_of_specs; ++i) {
702 		if (hdr->type == search) {
703 			union {
704 				struct ibv_flow_spec_ipv4_ext *ipv4;
705 				struct ibv_flow_spec_ipv6 *ipv6;
706 			} ip;
707 
708 			switch (search) {
709 			case IBV_FLOW_SPEC_IPV4_EXT:
710 				ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
711 				if (!ip.ipv4->val.proto) {
712 					ip.ipv4->val.proto = protocol;
713 					ip.ipv4->mask.proto = 0xff;
714 				}
715 				break;
716 			case IBV_FLOW_SPEC_IPV6:
717 				ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
718 				if (!ip.ipv6->val.next_hdr) {
719 					ip.ipv6->val.next_hdr = protocol;
720 					ip.ipv6->mask.next_hdr = 0xff;
721 				}
722 				break;
723 			default:
724 				break;
725 			}
726 			break;
727 		}
728 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
729 	}
730 }
731 
732 /**
733  * Convert the @p item into a Verbs specification. This function assumes that
734  * the input is valid and that there is space to insert the requested item
735  * into the flow.
736  *
737  * @param[in, out] dev_flow
738  *   Pointer to dev_flow structure.
739  * @param[in] item
740  *   Item specification.
741  * @param[in] item_flags
742  *   Parsed item flags.
743  */
744 static void
745 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
746 			      const struct rte_flow_item *item __rte_unused,
747 			      uint64_t item_flags)
748 {
749 	struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
750 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
751 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
752 	struct ibv_flow_spec_tunnel tunnel = {
753 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
754 		.size = size,
755 	};
756 #else
757 	const struct rte_flow_item_gre *spec = item->spec;
758 	const struct rte_flow_item_gre *mask = item->mask;
759 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
760 	struct ibv_flow_spec_gre tunnel = {
761 		.type = IBV_FLOW_SPEC_GRE,
762 		.size = size,
763 	};
764 
765 	if (!mask)
766 		mask = &rte_flow_item_gre_mask;
767 	if (spec) {
768 		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
769 		tunnel.val.protocol = spec->protocol;
770 		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
771 		tunnel.mask.protocol = mask->protocol;
772 		/* Remove unwanted bits from values. */
773 		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
774 		tunnel.val.protocol &= tunnel.mask.protocol;
775 		tunnel.val.key &= tunnel.mask.key;
776 	}
777 #endif
778 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
779 		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
780 						       IBV_FLOW_SPEC_IPV4_EXT,
781 						       IPPROTO_GRE);
782 	else
783 		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
784 						       IBV_FLOW_SPEC_IPV6,
785 						       IPPROTO_GRE);
786 	flow_verbs_spec_add(verbs, &tunnel, size);
787 }
788 
789 /**
790  * Convert the @p action into a Verbs specification. This function assumes that
791  * the input is valid and that there is space to insert the requested action
792  * into the flow. This function also return the action that was added.
793  *
794  * @param[in, out] dev_flow
795  *   Pointer to dev_flow structure.
796  * @param[in] item
797  *   Item specification.
798  * @param[in] item_flags
799  *   Parsed item flags.
800  */
801 static void
802 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
803 			       const struct rte_flow_item *item __rte_unused,
804 			       uint64_t item_flags __rte_unused)
805 {
806 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
807 	const struct rte_flow_item_mpls *spec = item->spec;
808 	const struct rte_flow_item_mpls *mask = item->mask;
809 	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
810 	struct ibv_flow_spec_mpls mpls = {
811 		.type = IBV_FLOW_SPEC_MPLS,
812 		.size = size,
813 	};
814 
815 	if (!mask)
816 		mask = &rte_flow_item_mpls_mask;
817 	if (spec) {
818 		memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
819 		memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
820 		/* Remove unwanted bits from values.  */
821 		mpls.val.label &= mpls.mask.label;
822 	}
823 	flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
824 #endif
825 }
826 
827 /**
828  * Convert the @p action into a Verbs specification. This function assumes that
829  * the input is valid and that there is space to insert the requested action
830  * into the flow.
831  *
832  * @param[in] dev_flow
833  *   Pointer to mlx5_flow.
834  * @param[in] action
835  *   Action configuration.
836  */
837 static void
838 flow_verbs_translate_action_drop
839 	(struct mlx5_flow *dev_flow,
840 	 const struct rte_flow_action *action __rte_unused)
841 {
842 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
843 	struct ibv_flow_spec_action_drop drop = {
844 			.type = IBV_FLOW_SPEC_ACTION_DROP,
845 			.size = size,
846 	};
847 
848 	flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
849 }
850 
851 /**
852  * Convert the @p action into a Verbs specification. This function assumes that
853  * the input is valid and that there is space to insert the requested action
854  * into the flow.
855  *
856  * @param[in] dev_flow
857  *   Pointer to mlx5_flow.
858  * @param[in] action
859  *   Action configuration.
860  */
861 static void
862 flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow,
863 				  const struct rte_flow_action *action)
864 {
865 	const struct rte_flow_action_queue *queue = action->conf;
866 	struct rte_flow *flow = dev_flow->flow;
867 
868 	if (flow->rss.queue)
869 		(*flow->rss.queue)[0] = queue->index;
870 	flow->rss.queue_num = 1;
871 }
872 
873 /**
874  * Convert the @p action into a Verbs specification. This function assumes that
875  * the input is valid and that there is space to insert the requested action
876  * into the flow.
877  *
878  * @param[in] action
879  *   Action configuration.
880  * @param[in, out] action_flags
881  *   Pointer to the detected actions.
882  * @param[in] dev_flow
883  *   Pointer to mlx5_flow.
884  */
885 static void
886 flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow,
887 				const struct rte_flow_action *action)
888 {
889 	const struct rte_flow_action_rss *rss = action->conf;
890 	const uint8_t *rss_key;
891 	struct rte_flow *flow = dev_flow->flow;
892 
893 	if (flow->rss.queue)
894 		memcpy((*flow->rss.queue), rss->queue,
895 		       rss->queue_num * sizeof(uint16_t));
896 	flow->rss.queue_num = rss->queue_num;
897 	/* NULL RSS key indicates default RSS key. */
898 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
899 	memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
900 	/*
901 	 * rss->level and rss.types should be set in advance when expanding
902 	 * items for RSS.
903 	 */
904 }
905 
906 /**
907  * Convert the @p action into a Verbs specification. This function assumes that
908  * the input is valid and that there is space to insert the requested action
909  * into the flow.
910  *
911  * @param[in] dev_flow
912  *   Pointer to mlx5_flow.
913  * @param[in] action
914  *   Action configuration.
915  */
916 static void
917 flow_verbs_translate_action_flag
918 	(struct mlx5_flow *dev_flow,
919 	 const struct rte_flow_action *action __rte_unused)
920 {
921 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
922 	struct ibv_flow_spec_action_tag tag = {
923 		.type = IBV_FLOW_SPEC_ACTION_TAG,
924 		.size = size,
925 		.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
926 	};
927 
928 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
929 }
930 
931 /**
932  * Convert the @p action into a Verbs specification. This function assumes that
933  * the input is valid and that there is space to insert the requested action
934  * into the flow.
935  *
936  * @param[in] dev_flow
937  *   Pointer to mlx5_flow.
938  * @param[in] action
939  *   Action configuration.
940  */
941 static void
942 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
943 				 const struct rte_flow_action *action)
944 {
945 	const struct rte_flow_action_mark *mark = action->conf;
946 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
947 	struct ibv_flow_spec_action_tag tag = {
948 		.type = IBV_FLOW_SPEC_ACTION_TAG,
949 		.size = size,
950 		.tag_id = mlx5_flow_mark_set(mark->id),
951 	};
952 
953 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
954 }
955 
956 /**
957  * Convert the @p action into a Verbs specification. This function assumes that
958  * the input is valid and that there is space to insert the requested action
959  * into the flow.
960  *
961  * @param[in] dev
962  *   Pointer to the Ethernet device structure.
963  * @param[in] action
964  *   Action configuration.
965  * @param[in] dev_flow
966  *   Pointer to mlx5_flow.
967  * @param[out] error
968  *   Pointer to error structure.
969  *
970  * @return
971  *   0 On success else a negative errno value is returned and rte_errno is set.
972  */
973 static int
974 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
975 				  const struct rte_flow_action *action,
976 				  struct rte_eth_dev *dev,
977 				  struct rte_flow_error *error)
978 {
979 	const struct rte_flow_action_count *count = action->conf;
980 	struct rte_flow *flow = dev_flow->flow;
981 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
982 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
983 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
984 	struct ibv_flow_spec_counter_action counter = {
985 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
986 		.size = size,
987 	};
988 #endif
989 
990 	if (!flow->counter) {
991 		flow->counter = flow_verbs_counter_new(dev, count->shared,
992 						       count->id);
993 		if (!flow->counter)
994 			return rte_flow_error_set(error, rte_errno,
995 						  RTE_FLOW_ERROR_TYPE_ACTION,
996 						  action,
997 						  "cannot get counter"
998 						  " context.");
999 	}
1000 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1001 	counter.counter_set_handle = flow->counter->cs->handle;
1002 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1003 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1004 	counter.counters = flow->counter->cs;
1005 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1006 #endif
1007 	return 0;
1008 }
1009 
1010 /**
1011  * Internal validation function. For validating both actions and items.
1012  *
1013  * @param[in] dev
1014  *   Pointer to the Ethernet device structure.
1015  * @param[in] attr
1016  *   Pointer to the flow attributes.
1017  * @param[in] items
1018  *   Pointer to the list of items.
1019  * @param[in] actions
1020  *   Pointer to the list of actions.
1021  * @param[in] external
1022  *   This flow rule is created by request external to PMD.
1023  * @param[out] error
1024  *   Pointer to the error structure.
1025  *
1026  * @return
1027  *   0 on success, a negative errno value otherwise and rte_errno is set.
1028  */
1029 static int
1030 flow_verbs_validate(struct rte_eth_dev *dev,
1031 		    const struct rte_flow_attr *attr,
1032 		    const struct rte_flow_item items[],
1033 		    const struct rte_flow_action actions[],
1034 		    bool external __rte_unused,
1035 		    struct rte_flow_error *error)
1036 {
1037 	int ret;
1038 	uint64_t action_flags = 0;
1039 	uint64_t item_flags = 0;
1040 	uint64_t last_item = 0;
1041 	uint8_t next_protocol = 0xff;
1042 	uint16_t ether_type = 0;
1043 
1044 	if (items == NULL)
1045 		return -1;
1046 	ret = mlx5_flow_validate_attributes(dev, attr, error);
1047 	if (ret < 0)
1048 		return ret;
1049 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1050 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1051 		int ret = 0;
1052 
1053 		switch (items->type) {
1054 		case RTE_FLOW_ITEM_TYPE_VOID:
1055 			break;
1056 		case RTE_FLOW_ITEM_TYPE_ETH:
1057 			ret = mlx5_flow_validate_item_eth(items, item_flags,
1058 							  error);
1059 			if (ret < 0)
1060 				return ret;
1061 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1062 					     MLX5_FLOW_LAYER_OUTER_L2;
1063 			if (items->mask != NULL && items->spec != NULL) {
1064 				ether_type =
1065 					((const struct rte_flow_item_eth *)
1066 					 items->spec)->type;
1067 				ether_type &=
1068 					((const struct rte_flow_item_eth *)
1069 					 items->mask)->type;
1070 				ether_type = rte_be_to_cpu_16(ether_type);
1071 			} else {
1072 				ether_type = 0;
1073 			}
1074 			break;
1075 		case RTE_FLOW_ITEM_TYPE_VLAN:
1076 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
1077 							   dev, error);
1078 			if (ret < 0)
1079 				return ret;
1080 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1081 					      MLX5_FLOW_LAYER_INNER_VLAN) :
1082 					     (MLX5_FLOW_LAYER_OUTER_L2 |
1083 					      MLX5_FLOW_LAYER_OUTER_VLAN);
1084 			if (items->mask != NULL && items->spec != NULL) {
1085 				ether_type =
1086 					((const struct rte_flow_item_vlan *)
1087 					 items->spec)->inner_type;
1088 				ether_type &=
1089 					((const struct rte_flow_item_vlan *)
1090 					 items->mask)->inner_type;
1091 				ether_type = rte_be_to_cpu_16(ether_type);
1092 			} else {
1093 				ether_type = 0;
1094 			}
1095 			break;
1096 		case RTE_FLOW_ITEM_TYPE_IPV4:
1097 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1098 							   last_item,
1099 							   ether_type, NULL,
1100 							   error);
1101 			if (ret < 0)
1102 				return ret;
1103 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1104 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1105 			if (items->mask != NULL &&
1106 			    ((const struct rte_flow_item_ipv4 *)
1107 			     items->mask)->hdr.next_proto_id) {
1108 				next_protocol =
1109 					((const struct rte_flow_item_ipv4 *)
1110 					 (items->spec))->hdr.next_proto_id;
1111 				next_protocol &=
1112 					((const struct rte_flow_item_ipv4 *)
1113 					 (items->mask))->hdr.next_proto_id;
1114 			} else {
1115 				/* Reset for inner layer. */
1116 				next_protocol = 0xff;
1117 			}
1118 			break;
1119 		case RTE_FLOW_ITEM_TYPE_IPV6:
1120 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1121 							   last_item,
1122 							   ether_type, NULL,
1123 							   error);
1124 			if (ret < 0)
1125 				return ret;
1126 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1127 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1128 			if (items->mask != NULL &&
1129 			    ((const struct rte_flow_item_ipv6 *)
1130 			     items->mask)->hdr.proto) {
1131 				next_protocol =
1132 					((const struct rte_flow_item_ipv6 *)
1133 					 items->spec)->hdr.proto;
1134 				next_protocol &=
1135 					((const struct rte_flow_item_ipv6 *)
1136 					 items->mask)->hdr.proto;
1137 			} else {
1138 				/* Reset for inner layer. */
1139 				next_protocol = 0xff;
1140 			}
1141 			break;
1142 		case RTE_FLOW_ITEM_TYPE_UDP:
1143 			ret = mlx5_flow_validate_item_udp(items, item_flags,
1144 							  next_protocol,
1145 							  error);
1146 			if (ret < 0)
1147 				return ret;
1148 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1149 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
1150 			break;
1151 		case RTE_FLOW_ITEM_TYPE_TCP:
1152 			ret = mlx5_flow_validate_item_tcp
1153 						(items, item_flags,
1154 						 next_protocol,
1155 						 &rte_flow_item_tcp_mask,
1156 						 error);
1157 			if (ret < 0)
1158 				return ret;
1159 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1160 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
1161 			break;
1162 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1163 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1164 							    error);
1165 			if (ret < 0)
1166 				return ret;
1167 			last_item = MLX5_FLOW_LAYER_VXLAN;
1168 			break;
1169 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1170 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
1171 								item_flags,
1172 								dev, error);
1173 			if (ret < 0)
1174 				return ret;
1175 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1176 			break;
1177 		case RTE_FLOW_ITEM_TYPE_GRE:
1178 			ret = mlx5_flow_validate_item_gre(items, item_flags,
1179 							  next_protocol, error);
1180 			if (ret < 0)
1181 				return ret;
1182 			last_item = MLX5_FLOW_LAYER_GRE;
1183 			break;
1184 		case RTE_FLOW_ITEM_TYPE_MPLS:
1185 			ret = mlx5_flow_validate_item_mpls(dev, items,
1186 							   item_flags,
1187 							   last_item, error);
1188 			if (ret < 0)
1189 				return ret;
1190 			last_item = MLX5_FLOW_LAYER_MPLS;
1191 			break;
1192 		default:
1193 			return rte_flow_error_set(error, ENOTSUP,
1194 						  RTE_FLOW_ERROR_TYPE_ITEM,
1195 						  NULL, "item not supported");
1196 		}
1197 		item_flags |= last_item;
1198 	}
1199 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1200 		switch (actions->type) {
1201 		case RTE_FLOW_ACTION_TYPE_VOID:
1202 			break;
1203 		case RTE_FLOW_ACTION_TYPE_FLAG:
1204 			ret = mlx5_flow_validate_action_flag(action_flags,
1205 							     attr,
1206 							     error);
1207 			if (ret < 0)
1208 				return ret;
1209 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1210 			break;
1211 		case RTE_FLOW_ACTION_TYPE_MARK:
1212 			ret = mlx5_flow_validate_action_mark(actions,
1213 							     action_flags,
1214 							     attr,
1215 							     error);
1216 			if (ret < 0)
1217 				return ret;
1218 			action_flags |= MLX5_FLOW_ACTION_MARK;
1219 			break;
1220 		case RTE_FLOW_ACTION_TYPE_DROP:
1221 			ret = mlx5_flow_validate_action_drop(action_flags,
1222 							     attr,
1223 							     error);
1224 			if (ret < 0)
1225 				return ret;
1226 			action_flags |= MLX5_FLOW_ACTION_DROP;
1227 			break;
1228 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1229 			ret = mlx5_flow_validate_action_queue(actions,
1230 							      action_flags, dev,
1231 							      attr,
1232 							      error);
1233 			if (ret < 0)
1234 				return ret;
1235 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1236 			break;
1237 		case RTE_FLOW_ACTION_TYPE_RSS:
1238 			ret = mlx5_flow_validate_action_rss(actions,
1239 							    action_flags, dev,
1240 							    attr, item_flags,
1241 							    error);
1242 			if (ret < 0)
1243 				return ret;
1244 			action_flags |= MLX5_FLOW_ACTION_RSS;
1245 			break;
1246 		case RTE_FLOW_ACTION_TYPE_COUNT:
1247 			ret = mlx5_flow_validate_action_count(dev, attr, error);
1248 			if (ret < 0)
1249 				return ret;
1250 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1251 			break;
1252 		default:
1253 			return rte_flow_error_set(error, ENOTSUP,
1254 						  RTE_FLOW_ERROR_TYPE_ACTION,
1255 						  actions,
1256 						  "action not supported");
1257 		}
1258 	}
1259 	/*
1260 	 * Validate the drop action mutual exclusion with other actions.
1261 	 * Drop action is mutually-exclusive with any other action, except for
1262 	 * Count action.
1263 	 */
1264 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1265 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1266 		return rte_flow_error_set(error, EINVAL,
1267 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1268 					  "Drop action is mutually-exclusive "
1269 					  "with any other action, except for "
1270 					  "Count action");
1271 	if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1272 		return rte_flow_error_set(error, EINVAL,
1273 					  RTE_FLOW_ERROR_TYPE_ACTION, actions,
1274 					  "no fate action is found");
1275 	return 0;
1276 }
1277 
1278 /**
1279  * Calculate the required bytes that are needed for the action part of the verbs
1280  * flow.
1281  *
1282  * @param[in] actions
1283  *   Pointer to the list of actions.
1284  *
1285  * @return
1286  *   The size of the memory needed for all actions.
1287  */
1288 static int
1289 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1290 {
1291 	int size = 0;
1292 
1293 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1294 		switch (actions->type) {
1295 		case RTE_FLOW_ACTION_TYPE_VOID:
1296 			break;
1297 		case RTE_FLOW_ACTION_TYPE_FLAG:
1298 			size += sizeof(struct ibv_flow_spec_action_tag);
1299 			break;
1300 		case RTE_FLOW_ACTION_TYPE_MARK:
1301 			size += sizeof(struct ibv_flow_spec_action_tag);
1302 			break;
1303 		case RTE_FLOW_ACTION_TYPE_DROP:
1304 			size += sizeof(struct ibv_flow_spec_action_drop);
1305 			break;
1306 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1307 			break;
1308 		case RTE_FLOW_ACTION_TYPE_RSS:
1309 			break;
1310 		case RTE_FLOW_ACTION_TYPE_COUNT:
1311 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1312 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1313 			size += sizeof(struct ibv_flow_spec_counter_action);
1314 #endif
1315 			break;
1316 		default:
1317 			break;
1318 		}
1319 	}
1320 	return size;
1321 }
1322 
1323 /**
1324  * Calculate the required bytes that are needed for the item part of the verbs
1325  * flow.
1326  *
1327  * @param[in] items
1328  *   Pointer to the list of items.
1329  *
1330  * @return
1331  *   The size of the memory needed for all items.
1332  */
1333 static int
1334 flow_verbs_get_items_size(const struct rte_flow_item items[])
1335 {
1336 	int size = 0;
1337 
1338 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1339 		switch (items->type) {
1340 		case RTE_FLOW_ITEM_TYPE_VOID:
1341 			break;
1342 		case RTE_FLOW_ITEM_TYPE_ETH:
1343 			size += sizeof(struct ibv_flow_spec_eth);
1344 			break;
1345 		case RTE_FLOW_ITEM_TYPE_VLAN:
1346 			size += sizeof(struct ibv_flow_spec_eth);
1347 			break;
1348 		case RTE_FLOW_ITEM_TYPE_IPV4:
1349 			size += sizeof(struct ibv_flow_spec_ipv4_ext);
1350 			break;
1351 		case RTE_FLOW_ITEM_TYPE_IPV6:
1352 			size += sizeof(struct ibv_flow_spec_ipv6);
1353 			break;
1354 		case RTE_FLOW_ITEM_TYPE_UDP:
1355 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1356 			break;
1357 		case RTE_FLOW_ITEM_TYPE_TCP:
1358 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1359 			break;
1360 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1361 			size += sizeof(struct ibv_flow_spec_tunnel);
1362 			break;
1363 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1364 			size += sizeof(struct ibv_flow_spec_tunnel);
1365 			break;
1366 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1367 		case RTE_FLOW_ITEM_TYPE_GRE:
1368 			size += sizeof(struct ibv_flow_spec_gre);
1369 			break;
1370 		case RTE_FLOW_ITEM_TYPE_MPLS:
1371 			size += sizeof(struct ibv_flow_spec_mpls);
1372 			break;
1373 #else
1374 		case RTE_FLOW_ITEM_TYPE_GRE:
1375 			size += sizeof(struct ibv_flow_spec_tunnel);
1376 			break;
1377 #endif
1378 		default:
1379 			break;
1380 		}
1381 	}
1382 	return size;
1383 }
1384 
1385 /**
1386  * Internal preparation function. Allocate mlx5_flow with the required size.
1387  * The required size is calculate based on the actions and items. This function
1388  * also returns the detected actions and items for later use.
1389  *
1390  * @param[in] attr
1391  *   Pointer to the flow attributes.
1392  * @param[in] items
1393  *   Pointer to the list of items.
1394  * @param[in] actions
1395  *   Pointer to the list of actions.
1396  * @param[out] error
1397  *   Pointer to the error structure.
1398  *
1399  * @return
1400  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1401  *   is set.
1402  */
1403 static struct mlx5_flow *
1404 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1405 		   const struct rte_flow_item items[],
1406 		   const struct rte_flow_action actions[],
1407 		   struct rte_flow_error *error)
1408 {
1409 	size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1410 	struct mlx5_flow *dev_flow;
1411 
1412 	size += flow_verbs_get_actions_size(actions);
1413 	size += flow_verbs_get_items_size(items);
1414 	dev_flow = rte_calloc(__func__, 1, size, 0);
1415 	if (!dev_flow) {
1416 		rte_flow_error_set(error, ENOMEM,
1417 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1418 				   "not enough memory to create flow");
1419 		return NULL;
1420 	}
1421 	dev_flow->verbs.attr = (void *)(dev_flow + 1);
1422 	dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
1423 	dev_flow->ingress = attr->ingress;
1424 	dev_flow->transfer = attr->transfer;
1425 	return dev_flow;
1426 }
1427 
1428 /**
1429  * Fill the flow with verb spec.
1430  *
1431  * @param[in] dev
1432  *   Pointer to Ethernet device.
1433  * @param[in, out] dev_flow
1434  *   Pointer to the mlx5 flow.
1435  * @param[in] attr
1436  *   Pointer to the flow attributes.
1437  * @param[in] items
1438  *   Pointer to the list of items.
1439  * @param[in] actions
1440  *   Pointer to the list of actions.
1441  * @param[out] error
1442  *   Pointer to the error structure.
1443  *
1444  * @return
1445  *   0 on success, else a negative errno value otherwise and rte_errno is set.
1446  */
1447 static int
1448 flow_verbs_translate(struct rte_eth_dev *dev,
1449 		     struct mlx5_flow *dev_flow,
1450 		     const struct rte_flow_attr *attr,
1451 		     const struct rte_flow_item items[],
1452 		     const struct rte_flow_action actions[],
1453 		     struct rte_flow_error *error)
1454 {
1455 	uint64_t item_flags = 0;
1456 	uint64_t action_flags = 0;
1457 	uint64_t priority = attr->priority;
1458 	uint32_t subpriority = 0;
1459 	struct mlx5_priv *priv = dev->data->dev_private;
1460 
1461 	if (priority == MLX5_FLOW_PRIO_RSVD)
1462 		priority = priv->config.flow_prio - 1;
1463 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1464 		int ret;
1465 
1466 		switch (actions->type) {
1467 		case RTE_FLOW_ACTION_TYPE_VOID:
1468 			break;
1469 		case RTE_FLOW_ACTION_TYPE_FLAG:
1470 			flow_verbs_translate_action_flag(dev_flow, actions);
1471 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1472 			break;
1473 		case RTE_FLOW_ACTION_TYPE_MARK:
1474 			flow_verbs_translate_action_mark(dev_flow, actions);
1475 			action_flags |= MLX5_FLOW_ACTION_MARK;
1476 			break;
1477 		case RTE_FLOW_ACTION_TYPE_DROP:
1478 			flow_verbs_translate_action_drop(dev_flow, actions);
1479 			action_flags |= MLX5_FLOW_ACTION_DROP;
1480 			break;
1481 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1482 			flow_verbs_translate_action_queue(dev_flow, actions);
1483 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1484 			break;
1485 		case RTE_FLOW_ACTION_TYPE_RSS:
1486 			flow_verbs_translate_action_rss(dev_flow, actions);
1487 			action_flags |= MLX5_FLOW_ACTION_RSS;
1488 			break;
1489 		case RTE_FLOW_ACTION_TYPE_COUNT:
1490 			ret = flow_verbs_translate_action_count(dev_flow,
1491 								actions,
1492 								dev, error);
1493 			if (ret < 0)
1494 				return ret;
1495 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1496 			break;
1497 		default:
1498 			return rte_flow_error_set(error, ENOTSUP,
1499 						  RTE_FLOW_ERROR_TYPE_ACTION,
1500 						  actions,
1501 						  "action not supported");
1502 		}
1503 	}
1504 	dev_flow->actions = action_flags;
1505 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1506 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1507 
1508 		switch (items->type) {
1509 		case RTE_FLOW_ITEM_TYPE_VOID:
1510 			break;
1511 		case RTE_FLOW_ITEM_TYPE_ETH:
1512 			flow_verbs_translate_item_eth(dev_flow, items,
1513 						      item_flags);
1514 			subpriority = MLX5_PRIORITY_MAP_L2;
1515 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1516 					       MLX5_FLOW_LAYER_OUTER_L2;
1517 			break;
1518 		case RTE_FLOW_ITEM_TYPE_VLAN:
1519 			flow_verbs_translate_item_vlan(dev_flow, items,
1520 						       item_flags);
1521 			subpriority = MLX5_PRIORITY_MAP_L2;
1522 			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1523 						MLX5_FLOW_LAYER_INNER_VLAN) :
1524 					       (MLX5_FLOW_LAYER_OUTER_L2 |
1525 						MLX5_FLOW_LAYER_OUTER_VLAN);
1526 			break;
1527 		case RTE_FLOW_ITEM_TYPE_IPV4:
1528 			flow_verbs_translate_item_ipv4(dev_flow, items,
1529 						       item_flags);
1530 			subpriority = MLX5_PRIORITY_MAP_L3;
1531 			dev_flow->hash_fields |=
1532 				mlx5_flow_hashfields_adjust
1533 					(dev_flow, tunnel,
1534 					 MLX5_IPV4_LAYER_TYPES,
1535 					 MLX5_IPV4_IBV_RX_HASH);
1536 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1537 					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1538 			break;
1539 		case RTE_FLOW_ITEM_TYPE_IPV6:
1540 			flow_verbs_translate_item_ipv6(dev_flow, items,
1541 						       item_flags);
1542 			subpriority = MLX5_PRIORITY_MAP_L3;
1543 			dev_flow->hash_fields |=
1544 				mlx5_flow_hashfields_adjust
1545 					(dev_flow, tunnel,
1546 					 MLX5_IPV6_LAYER_TYPES,
1547 					 MLX5_IPV6_IBV_RX_HASH);
1548 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1549 					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1550 			break;
1551 		case RTE_FLOW_ITEM_TYPE_TCP:
1552 			flow_verbs_translate_item_tcp(dev_flow, items,
1553 						      item_flags);
1554 			subpriority = MLX5_PRIORITY_MAP_L4;
1555 			dev_flow->hash_fields |=
1556 				mlx5_flow_hashfields_adjust
1557 					(dev_flow, tunnel, ETH_RSS_TCP,
1558 					 (IBV_RX_HASH_SRC_PORT_TCP |
1559 					  IBV_RX_HASH_DST_PORT_TCP));
1560 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1561 					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
1562 			break;
1563 		case RTE_FLOW_ITEM_TYPE_UDP:
1564 			flow_verbs_translate_item_udp(dev_flow, items,
1565 						      item_flags);
1566 			subpriority = MLX5_PRIORITY_MAP_L4;
1567 			dev_flow->hash_fields |=
1568 				mlx5_flow_hashfields_adjust
1569 					(dev_flow, tunnel, ETH_RSS_UDP,
1570 					 (IBV_RX_HASH_SRC_PORT_UDP |
1571 					  IBV_RX_HASH_DST_PORT_UDP));
1572 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1573 					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
1574 			break;
1575 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1576 			flow_verbs_translate_item_vxlan(dev_flow, items,
1577 							item_flags);
1578 			subpriority = MLX5_PRIORITY_MAP_L2;
1579 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
1580 			break;
1581 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1582 			flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1583 							    item_flags);
1584 			subpriority = MLX5_PRIORITY_MAP_L2;
1585 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1586 			break;
1587 		case RTE_FLOW_ITEM_TYPE_GRE:
1588 			flow_verbs_translate_item_gre(dev_flow, items,
1589 						      item_flags);
1590 			subpriority = MLX5_PRIORITY_MAP_L2;
1591 			item_flags |= MLX5_FLOW_LAYER_GRE;
1592 			break;
1593 		case RTE_FLOW_ITEM_TYPE_MPLS:
1594 			flow_verbs_translate_item_mpls(dev_flow, items,
1595 						       item_flags);
1596 			subpriority = MLX5_PRIORITY_MAP_L2;
1597 			item_flags |= MLX5_FLOW_LAYER_MPLS;
1598 			break;
1599 		default:
1600 			return rte_flow_error_set(error, ENOTSUP,
1601 						  RTE_FLOW_ERROR_TYPE_ITEM,
1602 						  NULL,
1603 						  "item not supported");
1604 		}
1605 	}
1606 	dev_flow->layers = item_flags;
1607 	dev_flow->verbs.attr->priority =
1608 		mlx5_flow_adjust_priority(dev, priority, subpriority);
1609 	dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
1610 	return 0;
1611 }
1612 
1613 /**
1614  * Remove the flow from the NIC but keeps it in memory.
1615  *
1616  * @param[in] dev
1617  *   Pointer to the Ethernet device structure.
1618  * @param[in, out] flow
1619  *   Pointer to flow structure.
1620  */
1621 static void
1622 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1623 {
1624 	struct mlx5_flow_verbs *verbs;
1625 	struct mlx5_flow *dev_flow;
1626 
1627 	if (!flow)
1628 		return;
1629 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1630 		verbs = &dev_flow->verbs;
1631 		if (verbs->flow) {
1632 			claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1633 			verbs->flow = NULL;
1634 		}
1635 		if (verbs->hrxq) {
1636 			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
1637 				mlx5_hrxq_drop_release(dev);
1638 			else
1639 				mlx5_hrxq_release(dev, verbs->hrxq);
1640 			verbs->hrxq = NULL;
1641 		}
1642 		if (dev_flow->verbs.vf_vlan.tag &&
1643 		    dev_flow->verbs.vf_vlan.created) {
1644 			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
1645 		}
1646 	}
1647 }
1648 
1649 /**
1650  * Remove the flow from the NIC and the memory.
1651  *
1652  * @param[in] dev
1653  *   Pointer to the Ethernet device structure.
1654  * @param[in, out] flow
1655  *   Pointer to flow structure.
1656  */
1657 static void
1658 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1659 {
1660 	struct mlx5_flow *dev_flow;
1661 
1662 	if (!flow)
1663 		return;
1664 	flow_verbs_remove(dev, flow);
1665 	while (!LIST_EMPTY(&flow->dev_flows)) {
1666 		dev_flow = LIST_FIRST(&flow->dev_flows);
1667 		LIST_REMOVE(dev_flow, next);
1668 		rte_free(dev_flow);
1669 	}
1670 	if (flow->counter) {
1671 		flow_verbs_counter_release(dev, flow->counter);
1672 		flow->counter = NULL;
1673 	}
1674 }
1675 
1676 /**
1677  * Apply the flow to the NIC.
1678  *
1679  * @param[in] dev
1680  *   Pointer to the Ethernet device structure.
1681  * @param[in, out] flow
1682  *   Pointer to flow structure.
1683  * @param[out] error
1684  *   Pointer to error structure.
1685  *
1686  * @return
1687  *   0 on success, a negative errno value otherwise and rte_errno is set.
1688  */
1689 static int
1690 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1691 		 struct rte_flow_error *error)
1692 {
1693 	struct mlx5_priv *priv = dev->data->dev_private;
1694 	struct mlx5_flow_verbs *verbs;
1695 	struct mlx5_flow *dev_flow;
1696 	int err;
1697 
1698 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1699 		verbs = &dev_flow->verbs;
1700 		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
1701 			verbs->hrxq = mlx5_hrxq_drop_new(dev);
1702 			if (!verbs->hrxq) {
1703 				rte_flow_error_set
1704 					(error, errno,
1705 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1706 					 "cannot get drop hash queue");
1707 				goto error;
1708 			}
1709 		} else {
1710 			struct mlx5_hrxq *hrxq;
1711 
1712 			MLX5_ASSERT(flow->rss.queue);
1713 			hrxq = mlx5_hrxq_get(dev, flow->rss.key,
1714 					     MLX5_RSS_HASH_KEY_LEN,
1715 					     dev_flow->hash_fields,
1716 					     (*flow->rss.queue),
1717 					     flow->rss.queue_num);
1718 			if (!hrxq)
1719 				hrxq = mlx5_hrxq_new(dev, flow->rss.key,
1720 						     MLX5_RSS_HASH_KEY_LEN,
1721 						     dev_flow->hash_fields,
1722 						     (*flow->rss.queue),
1723 						     flow->rss.queue_num,
1724 						     !!(dev_flow->layers &
1725 						       MLX5_FLOW_LAYER_TUNNEL));
1726 			if (!hrxq) {
1727 				rte_flow_error_set
1728 					(error, rte_errno,
1729 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1730 					 "cannot get hash queue");
1731 				goto error;
1732 			}
1733 			verbs->hrxq = hrxq;
1734 		}
1735 		verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1736 						     verbs->attr);
1737 		if (!verbs->flow) {
1738 			rte_flow_error_set(error, errno,
1739 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1740 					   NULL,
1741 					   "hardware refuses to create flow");
1742 			goto error;
1743 		}
1744 		if (priv->vmwa_context &&
1745 		    dev_flow->verbs.vf_vlan.tag &&
1746 		    !dev_flow->verbs.vf_vlan.created) {
1747 			/*
1748 			 * The rule contains the VLAN pattern.
1749 			 * For VF we are going to create VLAN
1750 			 * interface to make hypervisor set correct
1751 			 * e-Switch vport context.
1752 			 */
1753 			mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan);
1754 		}
1755 	}
1756 	return 0;
1757 error:
1758 	err = rte_errno; /* Save rte_errno before cleanup. */
1759 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1760 		verbs = &dev_flow->verbs;
1761 		if (verbs->hrxq) {
1762 			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
1763 				mlx5_hrxq_drop_release(dev);
1764 			else
1765 				mlx5_hrxq_release(dev, verbs->hrxq);
1766 			verbs->hrxq = NULL;
1767 		}
1768 		if (dev_flow->verbs.vf_vlan.tag &&
1769 		    dev_flow->verbs.vf_vlan.created) {
1770 			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
1771 		}
1772 	}
1773 	rte_errno = err; /* Restore rte_errno. */
1774 	return -rte_errno;
1775 }
1776 
1777 /**
1778  * Query a flow.
1779  *
1780  * @see rte_flow_query()
1781  * @see rte_flow_ops
1782  */
1783 static int
1784 flow_verbs_query(struct rte_eth_dev *dev,
1785 		 struct rte_flow *flow,
1786 		 const struct rte_flow_action *actions,
1787 		 void *data,
1788 		 struct rte_flow_error *error)
1789 {
1790 	int ret = -EINVAL;
1791 
1792 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1793 		switch (actions->type) {
1794 		case RTE_FLOW_ACTION_TYPE_VOID:
1795 			break;
1796 		case RTE_FLOW_ACTION_TYPE_COUNT:
1797 			ret = flow_verbs_counter_query(dev, flow, data, error);
1798 			break;
1799 		default:
1800 			return rte_flow_error_set(error, ENOTSUP,
1801 						  RTE_FLOW_ERROR_TYPE_ACTION,
1802 						  actions,
1803 						  "action not supported");
1804 		}
1805 	}
1806 	return ret;
1807 }
1808 
1809 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
1810 	.validate = flow_verbs_validate,
1811 	.prepare = flow_verbs_prepare,
1812 	.translate = flow_verbs_translate,
1813 	.apply = flow_verbs_apply,
1814 	.remove = flow_verbs_remove,
1815 	.destroy = flow_verbs_destroy,
1816 	.query = flow_verbs_query,
1817 };
1818