xref: /dpdk/drivers/net/mlx5/mlx5_flow_verbs.c (revision 081e42dab11d1add2d038fdf2bd4c86b20043d08)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10 
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_ip.h>
18 
19 #include <mlx5_glue.h>
20 #include <mlx5_prm.h>
21 #include <mlx5_malloc.h>
22 
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rx.h"
27 
28 #define VERBS_SPEC_INNER(item_flags) \
29 	(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
30 
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
34 };
35 
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 	{ 9, 10, 11 }, { 12, 13, 14 },
40 };
41 
42 /* Verbs specification header. */
43 struct ibv_spec_header {
44 	enum ibv_flow_spec_type type;
45 	uint16_t size;
46 };
47 
48 /**
49  * Discover the maximum number of priority available.
50  *
51  * @param[in] dev
52  *   Pointer to the Ethernet device structure.
53  *
54  * @return
55  *   number of supported flow priority on success, a negative errno
56  *   value otherwise and rte_errno is set.
57  */
58 int
59 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
60 {
61 	struct mlx5_priv *priv = dev->data->dev_private;
62 	struct {
63 		struct ibv_flow_attr attr;
64 		struct ibv_flow_spec_eth eth;
65 		struct ibv_flow_spec_action_drop drop;
66 	} flow_attr = {
67 		.attr = {
68 			.num_of_specs = 2,
69 			.port = (uint8_t)priv->dev_port,
70 		},
71 		.eth = {
72 			.type = IBV_FLOW_SPEC_ETH,
73 			.size = sizeof(struct ibv_flow_spec_eth),
74 		},
75 		.drop = {
76 			.size = sizeof(struct ibv_flow_spec_action_drop),
77 			.type = IBV_FLOW_SPEC_ACTION_DROP,
78 		},
79 	};
80 	struct ibv_flow *flow;
81 	struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
82 	uint16_t vprio[] = { 8, 16 };
83 	int i;
84 	int priority = 0;
85 
86 	if (!drop->qp) {
87 		rte_errno = ENOTSUP;
88 		return -rte_errno;
89 	}
90 	for (i = 0; i != RTE_DIM(vprio); i++) {
91 		flow_attr.attr.priority = vprio[i] - 1;
92 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
93 		if (!flow)
94 			break;
95 		claim_zero(mlx5_glue->destroy_flow(flow));
96 		priority = vprio[i];
97 	}
98 	switch (priority) {
99 	case 8:
100 		priority = RTE_DIM(priority_map_3);
101 		break;
102 	case 16:
103 		priority = RTE_DIM(priority_map_5);
104 		break;
105 	default:
106 		rte_errno = ENOTSUP;
107 		DRV_LOG(ERR,
108 			"port %u verbs maximum priority: %d expected 8/16",
109 			dev->data->port_id, priority);
110 		return -rte_errno;
111 	}
112 	DRV_LOG(INFO, "port %u supported flow priorities:"
113 		" 0-%d for ingress or egress root table,"
114 		" 0-%d for non-root table or transfer root table.",
115 		dev->data->port_id, priority - 2,
116 		MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
117 	return priority;
118 }
119 
120 /**
121  * Adjust flow priority based on the highest layer and the request priority.
122  *
123  * @param[in] dev
124  *   Pointer to the Ethernet device structure.
125  * @param[in] priority
126  *   The rule base priority.
127  * @param[in] subpriority
128  *   The priority based on the items.
129  *
130  * @return
131  *   The new priority.
132  */
133 uint32_t
134 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
135 				   uint32_t subpriority)
136 {
137 	uint32_t res = 0;
138 	struct mlx5_priv *priv = dev->data->dev_private;
139 
140 	switch (priv->config.flow_prio) {
141 	case RTE_DIM(priority_map_3):
142 		res = priority_map_3[priority][subpriority];
143 		break;
144 	case RTE_DIM(priority_map_5):
145 		res = priority_map_5[priority][subpriority];
146 		break;
147 	}
148 	return  res;
149 }
150 
151 /**
152  * Get Verbs flow counter by index.
153  *
154  * @param[in] dev
155  *   Pointer to the Ethernet device structure.
156  * @param[in] idx
157  *   mlx5 flow counter index in the container.
158  * @param[out] ppool
159  *   mlx5 flow counter pool in the container,
160  *
161  * @return
162  *   A pointer to the counter, NULL otherwise.
163  */
164 static struct mlx5_flow_counter *
165 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
166 			      uint32_t idx,
167 			      struct mlx5_flow_counter_pool **ppool)
168 {
169 	struct mlx5_priv *priv = dev->data->dev_private;
170 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
171 	struct mlx5_flow_counter_pool *pool;
172 
173 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
174 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
175 	MLX5_ASSERT(pool);
176 	if (ppool)
177 		*ppool = pool;
178 	return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
179 }
180 
181 /**
182  * Create Verbs flow counter with Verbs library.
183  *
184  * @param[in] dev
185  *   Pointer to the Ethernet device structure.
186  * @param[in, out] counter
187  *   mlx5 flow counter object, contains the counter id,
188  *   handle of created Verbs flow counter is returned
189  *   in cs field (if counters are supported).
190  *
191  * @return
192  *   0 On success else a negative errno value is returned
193  *   and rte_errno is set.
194  */
195 static int
196 flow_verbs_counter_create(struct rte_eth_dev *dev,
197 			  struct mlx5_flow_counter *counter)
198 {
199 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
200 	struct mlx5_priv *priv = dev->data->dev_private;
201 	struct ibv_context *ctx = priv->sh->ctx;
202 	struct ibv_counter_set_init_attr init = {
203 			 .counter_set_id = counter->shared_info.id};
204 
205 	counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
206 	if (!counter->dcs_when_free) {
207 		rte_errno = ENOTSUP;
208 		return -ENOTSUP;
209 	}
210 	return 0;
211 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
212 	struct mlx5_priv *priv = dev->data->dev_private;
213 	struct ibv_context *ctx = priv->sh->ctx;
214 	struct ibv_counters_init_attr init = {0};
215 	struct ibv_counter_attach_attr attach;
216 	int ret;
217 
218 	memset(&attach, 0, sizeof(attach));
219 	counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
220 	if (!counter->dcs_when_free) {
221 		rte_errno = ENOTSUP;
222 		return -ENOTSUP;
223 	}
224 	attach.counter_desc = IBV_COUNTER_PACKETS;
225 	attach.index = 0;
226 	ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
227 	if (!ret) {
228 		attach.counter_desc = IBV_COUNTER_BYTES;
229 		attach.index = 1;
230 		ret = mlx5_glue->attach_counters
231 					(counter->dcs_when_free, &attach, NULL);
232 	}
233 	if (ret) {
234 		claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
235 		counter->dcs_when_free = NULL;
236 		rte_errno = ret;
237 		return -ret;
238 	}
239 	return 0;
240 #else
241 	(void)dev;
242 	(void)counter;
243 	rte_errno = ENOTSUP;
244 	return -ENOTSUP;
245 #endif
246 }
247 
248 /**
249  * Get a flow counter.
250  *
251  * @param[in] dev
252  *   Pointer to the Ethernet device structure.
253  * @param[in] id
254  *   Counter identifier.
255  *
256  * @return
257  *   Index to the counter, 0 otherwise and rte_errno is set.
258  */
259 static uint32_t
260 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
261 {
262 	struct mlx5_priv *priv = dev->data->dev_private;
263 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
264 	struct mlx5_flow_counter_pool *pool = NULL;
265 	struct mlx5_flow_counter *cnt = NULL;
266 	uint32_t n_valid = cmng->n_valid;
267 	uint32_t pool_idx, cnt_idx;
268 	uint32_t i;
269 	int ret;
270 
271 	for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
272 		pool = cmng->pools[pool_idx];
273 		if (!pool)
274 			continue;
275 		cnt = TAILQ_FIRST(&pool->counters[0]);
276 		if (cnt)
277 			break;
278 	}
279 	if (!cnt) {
280 		struct mlx5_flow_counter_pool **pools;
281 		uint32_t size;
282 
283 		if (n_valid == cmng->n) {
284 			/* Resize the container pool array. */
285 			size = sizeof(struct mlx5_flow_counter_pool *) *
286 				     (n_valid + MLX5_CNT_CONTAINER_RESIZE);
287 			pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
288 					    SOCKET_ID_ANY);
289 			if (!pools)
290 				return 0;
291 			if (n_valid) {
292 				memcpy(pools, cmng->pools,
293 				       sizeof(struct mlx5_flow_counter_pool *) *
294 				       n_valid);
295 				mlx5_free(cmng->pools);
296 			}
297 			cmng->pools = pools;
298 			cmng->n += MLX5_CNT_CONTAINER_RESIZE;
299 		}
300 		/* Allocate memory for new pool*/
301 		size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
302 		pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
303 		if (!pool)
304 			return 0;
305 		for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
306 			cnt = MLX5_POOL_GET_CNT(pool, i);
307 			TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
308 		}
309 		cnt = MLX5_POOL_GET_CNT(pool, 0);
310 		cmng->pools[n_valid] = pool;
311 		pool_idx = n_valid;
312 		cmng->n_valid++;
313 	}
314 	TAILQ_REMOVE(&pool->counters[0], cnt, next);
315 	i = MLX5_CNT_ARRAY_IDX(pool, cnt);
316 	cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
317 	/* Create counter with Verbs. */
318 	ret = flow_verbs_counter_create(dev, cnt);
319 	if (!ret) {
320 		cnt->dcs_when_active = cnt->dcs_when_free;
321 		cnt->hits = 0;
322 		cnt->bytes = 0;
323 		return cnt_idx;
324 	}
325 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
326 	/* Some error occurred in Verbs library. */
327 	rte_errno = -ret;
328 	return 0;
329 }
330 
331 /**
332  * Release a flow counter.
333  *
334  * @param[in] dev
335  *   Pointer to the Ethernet device structure.
336  * @param[in] counter
337  *   Index to the counter handler.
338  */
339 static void
340 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
341 {
342 	struct mlx5_flow_counter_pool *pool;
343 	struct mlx5_flow_counter *cnt;
344 
345 	cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
346 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
347 	claim_zero(mlx5_glue->destroy_counter_set
348 			((struct ibv_counter_set *)cnt->dcs_when_active));
349 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
350 	claim_zero(mlx5_glue->destroy_counters
351 				((struct ibv_counters *)cnt->dcs_when_active));
352 #endif
353 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
354 }
355 
356 /**
357  * Query a flow counter via Verbs library call.
358  *
359  * @see rte_flow_query()
360  * @see rte_flow_ops
361  */
362 static int
363 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
364 			 struct rte_flow *flow, void *data,
365 			 struct rte_flow_error *error)
366 {
367 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
368 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
369 	if (flow->counter) {
370 		struct mlx5_flow_counter_pool *pool;
371 		struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
372 						(dev, flow->counter, &pool);
373 		struct rte_flow_query_count *qc = data;
374 		uint64_t counters[2] = {0, 0};
375 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
376 		struct ibv_query_counter_set_attr query_cs_attr = {
377 			.dcs_when_free = (struct ibv_counter_set *)
378 						cnt->dcs_when_active,
379 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
380 		};
381 		struct ibv_counter_set_data query_out = {
382 			.out = counters,
383 			.outlen = 2 * sizeof(uint64_t),
384 		};
385 		int err = mlx5_glue->query_counter_set(&query_cs_attr,
386 						       &query_out);
387 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
388 		int err = mlx5_glue->query_counters
389 			((struct ibv_counters *)cnt->dcs_when_active, counters,
390 				RTE_DIM(counters),
391 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
392 #endif
393 		if (err)
394 			return rte_flow_error_set
395 				(error, err,
396 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
397 				 NULL,
398 				 "cannot read counter");
399 		qc->hits_set = 1;
400 		qc->bytes_set = 1;
401 		qc->hits = counters[0] - cnt->hits;
402 		qc->bytes = counters[1] - cnt->bytes;
403 		if (qc->reset) {
404 			cnt->hits = counters[0];
405 			cnt->bytes = counters[1];
406 		}
407 		return 0;
408 	}
409 	return rte_flow_error_set(error, EINVAL,
410 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
411 				  NULL,
412 				  "flow does not have counter");
413 #else
414 	(void)flow;
415 	(void)data;
416 	return rte_flow_error_set(error, ENOTSUP,
417 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
418 				  NULL,
419 				  "counters are not available");
420 #endif
421 }
422 
423 /**
424  * Add a verbs item specification into @p verbs.
425  *
426  * @param[out] verbs
427  *   Pointer to verbs structure.
428  * @param[in] src
429  *   Create specification.
430  * @param[in] size
431  *   Size in bytes of the specification to copy.
432  */
433 static void
434 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
435 		    void *src, unsigned int size)
436 {
437 	void *dst;
438 
439 	if (!verbs)
440 		return;
441 	MLX5_ASSERT(verbs->specs);
442 	dst = (void *)(verbs->specs + verbs->size);
443 	memcpy(dst, src, size);
444 	++verbs->attr.num_of_specs;
445 	verbs->size += size;
446 }
447 
448 /**
449  * Convert the @p item into a Verbs specification. This function assumes that
450  * the input is valid and that there is space to insert the requested item
451  * into the flow.
452  *
453  * @param[in, out] dev_flow
454  *   Pointer to dev_flow structure.
455  * @param[in] item
456  *   Item specification.
457  * @param[in] item_flags
458  *   Parsed item flags.
459  */
460 static void
461 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
462 			      const struct rte_flow_item *item,
463 			      uint64_t item_flags)
464 {
465 	const struct rte_flow_item_eth *spec = item->spec;
466 	const struct rte_flow_item_eth *mask = item->mask;
467 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
468 	struct ibv_flow_spec_eth eth = {
469 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
470 		.size = size,
471 	};
472 
473 	if (!mask)
474 		mask = &rte_flow_item_eth_mask;
475 	if (spec) {
476 		unsigned int i;
477 
478 		memcpy(&eth.val.dst_mac, spec->dst.addr_bytes,
479 			RTE_ETHER_ADDR_LEN);
480 		memcpy(&eth.val.src_mac, spec->src.addr_bytes,
481 			RTE_ETHER_ADDR_LEN);
482 		eth.val.ether_type = spec->type;
483 		memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes,
484 			RTE_ETHER_ADDR_LEN);
485 		memcpy(&eth.mask.src_mac, mask->src.addr_bytes,
486 			RTE_ETHER_ADDR_LEN);
487 		eth.mask.ether_type = mask->type;
488 		/* Remove unwanted bits from values. */
489 		for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
490 			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
491 			eth.val.src_mac[i] &= eth.mask.src_mac[i];
492 		}
493 		eth.val.ether_type &= eth.mask.ether_type;
494 	}
495 	flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
496 }
497 
498 /**
499  * Update the VLAN tag in the Verbs Ethernet specification.
500  * This function assumes that the input is valid and there is space to add
501  * the requested item.
502  *
503  * @param[in, out] attr
504  *   Pointer to Verbs attributes structure.
505  * @param[in] eth
506  *   Verbs structure containing the VLAN information to copy.
507  */
508 static void
509 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
510 			    struct ibv_flow_spec_eth *eth)
511 {
512 	unsigned int i;
513 	const enum ibv_flow_spec_type search = eth->type;
514 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
515 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
516 
517 	for (i = 0; i != attr->num_of_specs; ++i) {
518 		if (hdr->type == search) {
519 			struct ibv_flow_spec_eth *e =
520 				(struct ibv_flow_spec_eth *)hdr;
521 
522 			e->val.vlan_tag = eth->val.vlan_tag;
523 			e->mask.vlan_tag = eth->mask.vlan_tag;
524 			e->val.ether_type = eth->val.ether_type;
525 			e->mask.ether_type = eth->mask.ether_type;
526 			break;
527 		}
528 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
529 	}
530 }
531 
532 /**
533  * Convert the @p item into a Verbs specification. This function assumes that
534  * the input is valid and that there is space to insert the requested item
535  * into the flow.
536  *
537  * @param[in, out] dev_flow
538  *   Pointer to dev_flow structure.
539  * @param[in] item
540  *   Item specification.
541  * @param[in] item_flags
542  *   Parsed item flags.
543  */
544 static void
545 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
546 			       const struct rte_flow_item *item,
547 			       uint64_t item_flags)
548 {
549 	const struct rte_flow_item_vlan *spec = item->spec;
550 	const struct rte_flow_item_vlan *mask = item->mask;
551 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
552 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
553 	struct ibv_flow_spec_eth eth = {
554 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
555 		.size = size,
556 	};
557 	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
558 				      MLX5_FLOW_LAYER_OUTER_L2;
559 
560 	if (!mask)
561 		mask = &rte_flow_item_vlan_mask;
562 	if (spec) {
563 		eth.val.vlan_tag = spec->tci;
564 		eth.mask.vlan_tag = mask->tci;
565 		eth.val.vlan_tag &= eth.mask.vlan_tag;
566 		eth.val.ether_type = spec->inner_type;
567 		eth.mask.ether_type = mask->inner_type;
568 		eth.val.ether_type &= eth.mask.ether_type;
569 	}
570 	if (!(item_flags & l2m))
571 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
572 	else
573 		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
574 	if (!tunnel)
575 		dev_flow->handle->vf_vlan.tag =
576 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
577 }
578 
579 /**
580  * Convert the @p item into a Verbs specification. This function assumes that
581  * the input is valid and that there is space to insert the requested item
582  * into the flow.
583  *
584  * @param[in, out] dev_flow
585  *   Pointer to dev_flow structure.
586  * @param[in] item
587  *   Item specification.
588  * @param[in] item_flags
589  *   Parsed item flags.
590  */
591 static void
592 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
593 			       const struct rte_flow_item *item,
594 			       uint64_t item_flags)
595 {
596 	const struct rte_flow_item_ipv4 *spec = item->spec;
597 	const struct rte_flow_item_ipv4 *mask = item->mask;
598 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
599 	struct ibv_flow_spec_ipv4_ext ipv4 = {
600 		.type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
601 		.size = size,
602 	};
603 
604 	if (!mask)
605 		mask = &rte_flow_item_ipv4_mask;
606 	if (spec) {
607 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
608 			.src_ip = spec->hdr.src_addr,
609 			.dst_ip = spec->hdr.dst_addr,
610 			.proto = spec->hdr.next_proto_id,
611 			.tos = spec->hdr.type_of_service,
612 		};
613 		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
614 			.src_ip = mask->hdr.src_addr,
615 			.dst_ip = mask->hdr.dst_addr,
616 			.proto = mask->hdr.next_proto_id,
617 			.tos = mask->hdr.type_of_service,
618 		};
619 		/* Remove unwanted bits from values. */
620 		ipv4.val.src_ip &= ipv4.mask.src_ip;
621 		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
622 		ipv4.val.proto &= ipv4.mask.proto;
623 		ipv4.val.tos &= ipv4.mask.tos;
624 	}
625 	flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
626 }
627 
628 /**
629  * Convert the @p item into a Verbs specification. This function assumes that
630  * the input is valid and that there is space to insert the requested item
631  * into the flow.
632  *
633  * @param[in, out] dev_flow
634  *   Pointer to dev_flow structure.
635  * @param[in] item
636  *   Item specification.
637  * @param[in] item_flags
638  *   Parsed item flags.
639  */
640 static void
641 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
642 			       const struct rte_flow_item *item,
643 			       uint64_t item_flags)
644 {
645 	const struct rte_flow_item_ipv6 *spec = item->spec;
646 	const struct rte_flow_item_ipv6 *mask = item->mask;
647 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
648 	struct ibv_flow_spec_ipv6 ipv6 = {
649 		.type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
650 		.size = size,
651 	};
652 
653 	if (!mask)
654 		mask = &rte_flow_item_ipv6_mask;
655 	if (spec) {
656 		unsigned int i;
657 		uint32_t vtc_flow_val;
658 		uint32_t vtc_flow_mask;
659 
660 		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
661 		       RTE_DIM(ipv6.val.src_ip));
662 		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
663 		       RTE_DIM(ipv6.val.dst_ip));
664 		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
665 		       RTE_DIM(ipv6.mask.src_ip));
666 		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
667 		       RTE_DIM(ipv6.mask.dst_ip));
668 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
669 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
670 		ipv6.val.flow_label =
671 			rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
672 					 RTE_IPV6_HDR_FL_SHIFT);
673 		ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
674 					 RTE_IPV6_HDR_TC_SHIFT;
675 		ipv6.val.next_hdr = spec->hdr.proto;
676 		ipv6.mask.flow_label =
677 			rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
678 					 RTE_IPV6_HDR_FL_SHIFT);
679 		ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
680 					  RTE_IPV6_HDR_TC_SHIFT;
681 		ipv6.mask.next_hdr = mask->hdr.proto;
682 		/* Remove unwanted bits from values. */
683 		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
684 			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
685 			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
686 		}
687 		ipv6.val.flow_label &= ipv6.mask.flow_label;
688 		ipv6.val.traffic_class &= ipv6.mask.traffic_class;
689 		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
690 	}
691 	flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
692 }
693 
694 /**
695  * Convert the @p item into a Verbs specification. This function assumes that
696  * the input is valid and that there is space to insert the requested item
697  * into the flow.
698  *
699  * @param[in, out] dev_flow
700  *   Pointer to dev_flow structure.
701  * @param[in] item
702  *   Item specification.
703  * @param[in] item_flags
704  *   Parsed item flags.
705  */
706 static void
707 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
708 			      const struct rte_flow_item *item,
709 			      uint64_t item_flags __rte_unused)
710 {
711 	const struct rte_flow_item_tcp *spec = item->spec;
712 	const struct rte_flow_item_tcp *mask = item->mask;
713 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
714 	struct ibv_flow_spec_tcp_udp tcp = {
715 		.type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
716 		.size = size,
717 	};
718 
719 	if (!mask)
720 		mask = &rte_flow_item_tcp_mask;
721 	if (spec) {
722 		tcp.val.dst_port = spec->hdr.dst_port;
723 		tcp.val.src_port = spec->hdr.src_port;
724 		tcp.mask.dst_port = mask->hdr.dst_port;
725 		tcp.mask.src_port = mask->hdr.src_port;
726 		/* Remove unwanted bits from values. */
727 		tcp.val.src_port &= tcp.mask.src_port;
728 		tcp.val.dst_port &= tcp.mask.dst_port;
729 	}
730 	flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
731 }
732 
733 /**
734  * Convert the @p item into a Verbs specification. This function assumes that
735  * the input is valid and that there is space to insert the requested item
736  * into the flow.
737  *
738  * @param[in, out] dev_flow
739  *   Pointer to dev_flow structure.
740  * @param[in] item
741  *   Item specification.
742  * @param[in] item_flags
743  *   Parsed item flags.
744  */
745 static void
746 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
747 			      const struct rte_flow_item *item,
748 			      uint64_t item_flags __rte_unused)
749 {
750 	const struct rte_flow_item_udp *spec = item->spec;
751 	const struct rte_flow_item_udp *mask = item->mask;
752 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
753 	struct ibv_flow_spec_tcp_udp udp = {
754 		.type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
755 		.size = size,
756 	};
757 
758 	if (!mask)
759 		mask = &rte_flow_item_udp_mask;
760 	if (spec) {
761 		udp.val.dst_port = spec->hdr.dst_port;
762 		udp.val.src_port = spec->hdr.src_port;
763 		udp.mask.dst_port = mask->hdr.dst_port;
764 		udp.mask.src_port = mask->hdr.src_port;
765 		/* Remove unwanted bits from values. */
766 		udp.val.src_port &= udp.mask.src_port;
767 		udp.val.dst_port &= udp.mask.dst_port;
768 	}
769 	item++;
770 	while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
771 		item++;
772 	if (!(udp.val.dst_port & udp.mask.dst_port)) {
773 		switch ((item)->type) {
774 		case RTE_FLOW_ITEM_TYPE_VXLAN:
775 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
776 			udp.mask.dst_port = 0xffff;
777 			break;
778 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
779 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
780 			udp.mask.dst_port = 0xffff;
781 			break;
782 		case RTE_FLOW_ITEM_TYPE_MPLS:
783 			udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
784 			udp.mask.dst_port = 0xffff;
785 			break;
786 		default:
787 			break;
788 		}
789 	}
790 
791 	flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
792 }
793 
794 /**
795  * Convert the @p item into a Verbs specification. This function assumes that
796  * the input is valid and that there is space to insert the requested item
797  * into the flow.
798  *
799  * @param[in, out] dev_flow
800  *   Pointer to dev_flow structure.
801  * @param[in] item
802  *   Item specification.
803  * @param[in] item_flags
804  *   Parsed item flags.
805  */
806 static void
807 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
808 				const struct rte_flow_item *item,
809 				uint64_t item_flags __rte_unused)
810 {
811 	const struct rte_flow_item_vxlan *spec = item->spec;
812 	const struct rte_flow_item_vxlan *mask = item->mask;
813 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
814 	struct ibv_flow_spec_tunnel vxlan = {
815 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
816 		.size = size,
817 	};
818 	union vni {
819 		uint32_t vlan_id;
820 		uint8_t vni[4];
821 	} id = { .vlan_id = 0, };
822 
823 	if (!mask)
824 		mask = &rte_flow_item_vxlan_mask;
825 	if (spec) {
826 		memcpy(&id.vni[1], spec->vni, 3);
827 		vxlan.val.tunnel_id = id.vlan_id;
828 		memcpy(&id.vni[1], mask->vni, 3);
829 		vxlan.mask.tunnel_id = id.vlan_id;
830 		/* Remove unwanted bits from values. */
831 		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
832 	}
833 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
834 }
835 
836 /**
837  * Convert the @p item into a Verbs specification. This function assumes that
838  * the input is valid and that there is space to insert the requested item
839  * into the flow.
840  *
841  * @param[in, out] dev_flow
842  *   Pointer to dev_flow structure.
843  * @param[in] item
844  *   Item specification.
845  * @param[in] item_flags
846  *   Parsed item flags.
847  */
848 static void
849 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
850 				    const struct rte_flow_item *item,
851 				    uint64_t item_flags __rte_unused)
852 {
853 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
854 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
855 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
856 	struct ibv_flow_spec_tunnel vxlan_gpe = {
857 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
858 		.size = size,
859 	};
860 	union vni {
861 		uint32_t vlan_id;
862 		uint8_t vni[4];
863 	} id = { .vlan_id = 0, };
864 
865 	if (!mask)
866 		mask = &rte_flow_item_vxlan_gpe_mask;
867 	if (spec) {
868 		memcpy(&id.vni[1], spec->vni, 3);
869 		vxlan_gpe.val.tunnel_id = id.vlan_id;
870 		memcpy(&id.vni[1], mask->vni, 3);
871 		vxlan_gpe.mask.tunnel_id = id.vlan_id;
872 		/* Remove unwanted bits from values. */
873 		vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
874 	}
875 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
876 }
877 
878 /**
879  * Update the protocol in Verbs IPv4/IPv6 spec.
880  *
881  * @param[in, out] attr
882  *   Pointer to Verbs attributes structure.
883  * @param[in] search
884  *   Specification type to search in order to update the IP protocol.
885  * @param[in] protocol
886  *   Protocol value to set if none is present in the specification.
887  */
888 static void
889 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
890 				       enum ibv_flow_spec_type search,
891 				       uint8_t protocol)
892 {
893 	unsigned int i;
894 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
895 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
896 
897 	if (!attr)
898 		return;
899 	for (i = 0; i != attr->num_of_specs; ++i) {
900 		if (hdr->type == search) {
901 			union {
902 				struct ibv_flow_spec_ipv4_ext *ipv4;
903 				struct ibv_flow_spec_ipv6 *ipv6;
904 			} ip;
905 
906 			switch (search) {
907 			case IBV_FLOW_SPEC_IPV4_EXT:
908 				ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
909 				if (!ip.ipv4->val.proto) {
910 					ip.ipv4->val.proto = protocol;
911 					ip.ipv4->mask.proto = 0xff;
912 				}
913 				break;
914 			case IBV_FLOW_SPEC_IPV6:
915 				ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
916 				if (!ip.ipv6->val.next_hdr) {
917 					ip.ipv6->val.next_hdr = protocol;
918 					ip.ipv6->mask.next_hdr = 0xff;
919 				}
920 				break;
921 			default:
922 				break;
923 			}
924 			break;
925 		}
926 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
927 	}
928 }
929 
930 /**
931  * Convert the @p item into a Verbs specification. This function assumes that
932  * the input is valid and that there is space to insert the requested item
933  * into the flow.
934  *
935  * @param[in, out] dev_flow
936  *   Pointer to dev_flow structure.
937  * @param[in] item
938  *   Item specification.
939  * @param[in] item_flags
940  *   Parsed item flags.
941  */
942 static void
943 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
944 			      const struct rte_flow_item *item __rte_unused,
945 			      uint64_t item_flags)
946 {
947 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
948 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
949 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
950 	struct ibv_flow_spec_tunnel tunnel = {
951 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
952 		.size = size,
953 	};
954 #else
955 	const struct rte_flow_item_gre *spec = item->spec;
956 	const struct rte_flow_item_gre *mask = item->mask;
957 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
958 	struct ibv_flow_spec_gre tunnel = {
959 		.type = IBV_FLOW_SPEC_GRE,
960 		.size = size,
961 	};
962 
963 	if (!mask)
964 		mask = &rte_flow_item_gre_mask;
965 	if (spec) {
966 		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
967 		tunnel.val.protocol = spec->protocol;
968 		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
969 		tunnel.mask.protocol = mask->protocol;
970 		/* Remove unwanted bits from values. */
971 		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
972 		tunnel.val.protocol &= tunnel.mask.protocol;
973 		tunnel.val.key &= tunnel.mask.key;
974 	}
975 #endif
976 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
977 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
978 						       IBV_FLOW_SPEC_IPV4_EXT,
979 						       IPPROTO_GRE);
980 	else
981 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
982 						       IBV_FLOW_SPEC_IPV6,
983 						       IPPROTO_GRE);
984 	flow_verbs_spec_add(verbs, &tunnel, size);
985 }
986 
987 /**
988  * Convert the @p action into a Verbs specification. This function assumes that
989  * the input is valid and that there is space to insert the requested action
990  * into the flow. This function also return the action that was added.
991  *
992  * @param[in, out] dev_flow
993  *   Pointer to dev_flow structure.
994  * @param[in] item
995  *   Item specification.
996  * @param[in] item_flags
997  *   Parsed item flags.
998  */
999 static void
1000 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1001 			       const struct rte_flow_item *item __rte_unused,
1002 			       uint64_t item_flags __rte_unused)
1003 {
1004 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1005 	const struct rte_flow_item_mpls *spec = item->spec;
1006 	const struct rte_flow_item_mpls *mask = item->mask;
1007 	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1008 	struct ibv_flow_spec_mpls mpls = {
1009 		.type = IBV_FLOW_SPEC_MPLS,
1010 		.size = size,
1011 	};
1012 
1013 	if (!mask)
1014 		mask = &rte_flow_item_mpls_mask;
1015 	if (spec) {
1016 		memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1017 		memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1018 		/* Remove unwanted bits from values.  */
1019 		mpls.val.label &= mpls.mask.label;
1020 	}
1021 	flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1022 #endif
1023 }
1024 
1025 /**
1026  * Convert the @p action into a Verbs specification. This function assumes that
1027  * the input is valid and that there is space to insert the requested action
1028  * into the flow.
1029  *
1030  * @param[in] dev_flow
1031  *   Pointer to mlx5_flow.
1032  * @param[in] action
1033  *   Action configuration.
1034  */
1035 static void
1036 flow_verbs_translate_action_drop
1037 	(struct mlx5_flow *dev_flow,
1038 	 const struct rte_flow_action *action __rte_unused)
1039 {
1040 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1041 	struct ibv_flow_spec_action_drop drop = {
1042 			.type = IBV_FLOW_SPEC_ACTION_DROP,
1043 			.size = size,
1044 	};
1045 
1046 	flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1047 }
1048 
1049 /**
1050  * Convert the @p action into a Verbs specification. This function assumes that
1051  * the input is valid and that there is space to insert the requested action
1052  * into the flow.
1053  *
1054  * @param[in] rss_desc
1055  *   Pointer to mlx5_flow_rss_desc.
1056  * @param[in] action
1057  *   Action configuration.
1058  */
1059 static void
1060 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1061 				  const struct rte_flow_action *action)
1062 {
1063 	const struct rte_flow_action_queue *queue = action->conf;
1064 
1065 	rss_desc->queue[0] = queue->index;
1066 	rss_desc->queue_num = 1;
1067 }
1068 
1069 /**
1070  * Convert the @p action into a Verbs specification. This function assumes that
1071  * the input is valid and that there is space to insert the requested action
1072  * into the flow.
1073  *
1074  * @param[in] rss_desc
1075  *   Pointer to mlx5_flow_rss_desc.
1076  * @param[in] action
1077  *   Action configuration.
1078  */
1079 static void
1080 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1081 				const struct rte_flow_action *action)
1082 {
1083 	const struct rte_flow_action_rss *rss = action->conf;
1084 	const uint8_t *rss_key;
1085 
1086 	memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1087 	rss_desc->queue_num = rss->queue_num;
1088 	/* NULL RSS key indicates default RSS key. */
1089 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
1090 	memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1091 	/*
1092 	 * rss->level and rss.types should be set in advance when expanding
1093 	 * items for RSS.
1094 	 */
1095 }
1096 
1097 /**
1098  * Convert the @p action into a Verbs specification. This function assumes that
1099  * the input is valid and that there is space to insert the requested action
1100  * into the flow.
1101  *
1102  * @param[in] dev_flow
1103  *   Pointer to mlx5_flow.
1104  * @param[in] action
1105  *   Action configuration.
1106  */
1107 static void
1108 flow_verbs_translate_action_flag
1109 	(struct mlx5_flow *dev_flow,
1110 	 const struct rte_flow_action *action __rte_unused)
1111 {
1112 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1113 	struct ibv_flow_spec_action_tag tag = {
1114 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1115 		.size = size,
1116 		.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1117 	};
1118 
1119 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1120 }
1121 
1122 /**
1123  * Convert the @p action into a Verbs specification. This function assumes that
1124  * the input is valid and that there is space to insert the requested action
1125  * into the flow.
1126  *
1127  * @param[in] dev_flow
1128  *   Pointer to mlx5_flow.
1129  * @param[in] action
1130  *   Action configuration.
1131  */
1132 static void
1133 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1134 				 const struct rte_flow_action *action)
1135 {
1136 	const struct rte_flow_action_mark *mark = action->conf;
1137 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1138 	struct ibv_flow_spec_action_tag tag = {
1139 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1140 		.size = size,
1141 		.tag_id = mlx5_flow_mark_set(mark->id),
1142 	};
1143 
1144 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1145 }
1146 
1147 /**
1148  * Convert the @p action into a Verbs specification. This function assumes that
1149  * the input is valid and that there is space to insert the requested action
1150  * into the flow.
1151  *
1152  * @param[in] dev
1153  *   Pointer to the Ethernet device structure.
1154  * @param[in] action
1155  *   Action configuration.
1156  * @param[in] dev_flow
1157  *   Pointer to mlx5_flow.
1158  * @param[out] error
1159  *   Pointer to error structure.
1160  *
1161  * @return
1162  *   0 On success else a negative errno value is returned and rte_errno is set.
1163  */
1164 static int
1165 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1166 				  const struct rte_flow_action *action,
1167 				  struct rte_eth_dev *dev,
1168 				  struct rte_flow_error *error)
1169 {
1170 	const struct rte_flow_action_count *count = action->conf;
1171 	struct rte_flow *flow = dev_flow->flow;
1172 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1173 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1174 	struct mlx5_flow_counter_pool *pool;
1175 	struct mlx5_flow_counter *cnt = NULL;
1176 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1177 	struct ibv_flow_spec_counter_action counter = {
1178 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
1179 		.size = size,
1180 	};
1181 #endif
1182 
1183 	if (!flow->counter) {
1184 		flow->counter = flow_verbs_counter_new(dev, count->id);
1185 		if (!flow->counter)
1186 			return rte_flow_error_set(error, rte_errno,
1187 						  RTE_FLOW_ERROR_TYPE_ACTION,
1188 						  action,
1189 						  "cannot get counter"
1190 						  " context.");
1191 	}
1192 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1193 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1194 	counter.counter_set_handle =
1195 		((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1196 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1197 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1198 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1199 	counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1200 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1201 #endif
1202 	return 0;
1203 }
1204 
1205 /**
1206  * Internal validation function. For validating both actions and items.
1207  *
1208  * @param[in] dev
1209  *   Pointer to the Ethernet device structure.
1210  * @param[in] attr
1211  *   Pointer to the flow attributes.
1212  * @param[in] items
1213  *   Pointer to the list of items.
1214  * @param[in] actions
1215  *   Pointer to the list of actions.
1216  * @param[in] external
1217  *   This flow rule is created by request external to PMD.
1218  * @param[in] hairpin
1219  *   Number of hairpin TX actions, 0 means classic flow.
1220  * @param[out] error
1221  *   Pointer to the error structure.
1222  *
1223  * @return
1224  *   0 on success, a negative errno value otherwise and rte_errno is set.
1225  */
1226 static int
1227 flow_verbs_validate(struct rte_eth_dev *dev,
1228 		    const struct rte_flow_attr *attr,
1229 		    const struct rte_flow_item items[],
1230 		    const struct rte_flow_action actions[],
1231 		    bool external __rte_unused,
1232 		    int hairpin __rte_unused,
1233 		    struct rte_flow_error *error)
1234 {
1235 	int ret;
1236 	uint64_t action_flags = 0;
1237 	uint64_t item_flags = 0;
1238 	uint64_t last_item = 0;
1239 	uint8_t next_protocol = 0xff;
1240 	uint16_t ether_type = 0;
1241 	bool is_empty_vlan = false;
1242 	uint16_t udp_dport = 0;
1243 
1244 	if (items == NULL)
1245 		return -1;
1246 	ret = mlx5_flow_validate_attributes(dev, attr, error);
1247 	if (ret < 0)
1248 		return ret;
1249 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1250 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1251 		int ret = 0;
1252 
1253 		switch (items->type) {
1254 		case RTE_FLOW_ITEM_TYPE_VOID:
1255 			break;
1256 		case RTE_FLOW_ITEM_TYPE_ETH:
1257 			ret = mlx5_flow_validate_item_eth(items, item_flags,
1258 							  false, error);
1259 			if (ret < 0)
1260 				return ret;
1261 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1262 					     MLX5_FLOW_LAYER_OUTER_L2;
1263 			if (items->mask != NULL && items->spec != NULL) {
1264 				ether_type =
1265 					((const struct rte_flow_item_eth *)
1266 					 items->spec)->type;
1267 				ether_type &=
1268 					((const struct rte_flow_item_eth *)
1269 					 items->mask)->type;
1270 				if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
1271 					is_empty_vlan = true;
1272 				ether_type = rte_be_to_cpu_16(ether_type);
1273 			} else {
1274 				ether_type = 0;
1275 			}
1276 			break;
1277 		case RTE_FLOW_ITEM_TYPE_VLAN:
1278 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
1279 							   dev, error);
1280 			if (ret < 0)
1281 				return ret;
1282 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1283 					      MLX5_FLOW_LAYER_INNER_VLAN) :
1284 					     (MLX5_FLOW_LAYER_OUTER_L2 |
1285 					      MLX5_FLOW_LAYER_OUTER_VLAN);
1286 			if (items->mask != NULL && items->spec != NULL) {
1287 				ether_type =
1288 					((const struct rte_flow_item_vlan *)
1289 					 items->spec)->inner_type;
1290 				ether_type &=
1291 					((const struct rte_flow_item_vlan *)
1292 					 items->mask)->inner_type;
1293 				ether_type = rte_be_to_cpu_16(ether_type);
1294 			} else {
1295 				ether_type = 0;
1296 			}
1297 			is_empty_vlan = false;
1298 			break;
1299 		case RTE_FLOW_ITEM_TYPE_IPV4:
1300 			ret = mlx5_flow_validate_item_ipv4
1301 						(items, item_flags,
1302 						 last_item, ether_type, NULL,
1303 						 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1304 						 error);
1305 			if (ret < 0)
1306 				return ret;
1307 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1308 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1309 			if (items->mask != NULL &&
1310 			    ((const struct rte_flow_item_ipv4 *)
1311 			     items->mask)->hdr.next_proto_id) {
1312 				next_protocol =
1313 					((const struct rte_flow_item_ipv4 *)
1314 					 (items->spec))->hdr.next_proto_id;
1315 				next_protocol &=
1316 					((const struct rte_flow_item_ipv4 *)
1317 					 (items->mask))->hdr.next_proto_id;
1318 			} else {
1319 				/* Reset for inner layer. */
1320 				next_protocol = 0xff;
1321 			}
1322 			break;
1323 		case RTE_FLOW_ITEM_TYPE_IPV6:
1324 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1325 							   last_item,
1326 							   ether_type, NULL,
1327 							   error);
1328 			if (ret < 0)
1329 				return ret;
1330 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1331 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1332 			if (items->mask != NULL &&
1333 			    ((const struct rte_flow_item_ipv6 *)
1334 			     items->mask)->hdr.proto) {
1335 				next_protocol =
1336 					((const struct rte_flow_item_ipv6 *)
1337 					 items->spec)->hdr.proto;
1338 				next_protocol &=
1339 					((const struct rte_flow_item_ipv6 *)
1340 					 items->mask)->hdr.proto;
1341 			} else {
1342 				/* Reset for inner layer. */
1343 				next_protocol = 0xff;
1344 			}
1345 			break;
1346 		case RTE_FLOW_ITEM_TYPE_UDP:
1347 			ret = mlx5_flow_validate_item_udp(items, item_flags,
1348 							  next_protocol,
1349 							  error);
1350 			const struct rte_flow_item_udp *spec = items->spec;
1351 			const struct rte_flow_item_udp *mask = items->mask;
1352 			if (!mask)
1353 				mask = &rte_flow_item_udp_mask;
1354 			if (spec != NULL)
1355 				udp_dport = rte_be_to_cpu_16
1356 						(spec->hdr.dst_port &
1357 						 mask->hdr.dst_port);
1358 
1359 			if (ret < 0)
1360 				return ret;
1361 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1362 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
1363 			break;
1364 		case RTE_FLOW_ITEM_TYPE_TCP:
1365 			ret = mlx5_flow_validate_item_tcp
1366 						(items, item_flags,
1367 						 next_protocol,
1368 						 &rte_flow_item_tcp_mask,
1369 						 error);
1370 			if (ret < 0)
1371 				return ret;
1372 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1373 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
1374 			break;
1375 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1376 			ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
1377 							    items, item_flags,
1378 							    attr, error);
1379 			if (ret < 0)
1380 				return ret;
1381 			last_item = MLX5_FLOW_LAYER_VXLAN;
1382 			break;
1383 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1384 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
1385 								item_flags,
1386 								dev, error);
1387 			if (ret < 0)
1388 				return ret;
1389 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1390 			break;
1391 		case RTE_FLOW_ITEM_TYPE_GRE:
1392 			ret = mlx5_flow_validate_item_gre(items, item_flags,
1393 							  next_protocol, error);
1394 			if (ret < 0)
1395 				return ret;
1396 			last_item = MLX5_FLOW_LAYER_GRE;
1397 			break;
1398 		case RTE_FLOW_ITEM_TYPE_MPLS:
1399 			ret = mlx5_flow_validate_item_mpls(dev, items,
1400 							   item_flags,
1401 							   last_item, error);
1402 			if (ret < 0)
1403 				return ret;
1404 			last_item = MLX5_FLOW_LAYER_MPLS;
1405 			break;
1406 		case RTE_FLOW_ITEM_TYPE_ICMP:
1407 		case RTE_FLOW_ITEM_TYPE_ICMP6:
1408 			return rte_flow_error_set(error, ENOTSUP,
1409 						  RTE_FLOW_ERROR_TYPE_ITEM,
1410 						  NULL, "ICMP/ICMP6 "
1411 						  "item not supported");
1412 		default:
1413 			return rte_flow_error_set(error, ENOTSUP,
1414 						  RTE_FLOW_ERROR_TYPE_ITEM,
1415 						  NULL, "item not supported");
1416 		}
1417 		item_flags |= last_item;
1418 	}
1419 	if (is_empty_vlan)
1420 		return rte_flow_error_set(error, ENOTSUP,
1421 						 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1422 		    "VLAN matching without vid specification is not supported");
1423 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1424 		switch (actions->type) {
1425 		case RTE_FLOW_ACTION_TYPE_VOID:
1426 			break;
1427 		case RTE_FLOW_ACTION_TYPE_FLAG:
1428 			ret = mlx5_flow_validate_action_flag(action_flags,
1429 							     attr,
1430 							     error);
1431 			if (ret < 0)
1432 				return ret;
1433 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1434 			break;
1435 		case RTE_FLOW_ACTION_TYPE_MARK:
1436 			ret = mlx5_flow_validate_action_mark(actions,
1437 							     action_flags,
1438 							     attr,
1439 							     error);
1440 			if (ret < 0)
1441 				return ret;
1442 			action_flags |= MLX5_FLOW_ACTION_MARK;
1443 			break;
1444 		case RTE_FLOW_ACTION_TYPE_DROP:
1445 			ret = mlx5_flow_validate_action_drop(action_flags,
1446 							     attr,
1447 							     error);
1448 			if (ret < 0)
1449 				return ret;
1450 			action_flags |= MLX5_FLOW_ACTION_DROP;
1451 			break;
1452 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1453 			ret = mlx5_flow_validate_action_queue(actions,
1454 							      action_flags, dev,
1455 							      attr,
1456 							      error);
1457 			if (ret < 0)
1458 				return ret;
1459 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1460 			break;
1461 		case RTE_FLOW_ACTION_TYPE_RSS:
1462 			ret = mlx5_flow_validate_action_rss(actions,
1463 							    action_flags, dev,
1464 							    attr, item_flags,
1465 							    error);
1466 			if (ret < 0)
1467 				return ret;
1468 			action_flags |= MLX5_FLOW_ACTION_RSS;
1469 			break;
1470 		case RTE_FLOW_ACTION_TYPE_COUNT:
1471 			ret = mlx5_flow_validate_action_count(dev, attr, error);
1472 			if (ret < 0)
1473 				return ret;
1474 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1475 			break;
1476 		default:
1477 			return rte_flow_error_set(error, ENOTSUP,
1478 						  RTE_FLOW_ERROR_TYPE_ACTION,
1479 						  actions,
1480 						  "action not supported");
1481 		}
1482 	}
1483 	/*
1484 	 * Validate the drop action mutual exclusion with other actions.
1485 	 * Drop action is mutually-exclusive with any other action, except for
1486 	 * Count action.
1487 	 */
1488 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1489 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1490 		return rte_flow_error_set(error, EINVAL,
1491 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1492 					  "Drop action is mutually-exclusive "
1493 					  "with any other action, except for "
1494 					  "Count action");
1495 	if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1496 		return rte_flow_error_set(error, EINVAL,
1497 					  RTE_FLOW_ERROR_TYPE_ACTION, actions,
1498 					  "no fate action is found");
1499 	return 0;
1500 }
1501 
1502 /**
1503  * Calculate the required bytes that are needed for the action part of the verbs
1504  * flow.
1505  *
1506  * @param[in] actions
1507  *   Pointer to the list of actions.
1508  *
1509  * @return
1510  *   The size of the memory needed for all actions.
1511  */
1512 static int
1513 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1514 {
1515 	int size = 0;
1516 
1517 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1518 		switch (actions->type) {
1519 		case RTE_FLOW_ACTION_TYPE_VOID:
1520 			break;
1521 		case RTE_FLOW_ACTION_TYPE_FLAG:
1522 			size += sizeof(struct ibv_flow_spec_action_tag);
1523 			break;
1524 		case RTE_FLOW_ACTION_TYPE_MARK:
1525 			size += sizeof(struct ibv_flow_spec_action_tag);
1526 			break;
1527 		case RTE_FLOW_ACTION_TYPE_DROP:
1528 			size += sizeof(struct ibv_flow_spec_action_drop);
1529 			break;
1530 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1531 			break;
1532 		case RTE_FLOW_ACTION_TYPE_RSS:
1533 			break;
1534 		case RTE_FLOW_ACTION_TYPE_COUNT:
1535 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1536 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1537 			size += sizeof(struct ibv_flow_spec_counter_action);
1538 #endif
1539 			break;
1540 		default:
1541 			break;
1542 		}
1543 	}
1544 	return size;
1545 }
1546 
1547 /**
1548  * Calculate the required bytes that are needed for the item part of the verbs
1549  * flow.
1550  *
1551  * @param[in] items
1552  *   Pointer to the list of items.
1553  *
1554  * @return
1555  *   The size of the memory needed for all items.
1556  */
1557 static int
1558 flow_verbs_get_items_size(const struct rte_flow_item items[])
1559 {
1560 	int size = 0;
1561 
1562 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1563 		switch (items->type) {
1564 		case RTE_FLOW_ITEM_TYPE_VOID:
1565 			break;
1566 		case RTE_FLOW_ITEM_TYPE_ETH:
1567 			size += sizeof(struct ibv_flow_spec_eth);
1568 			break;
1569 		case RTE_FLOW_ITEM_TYPE_VLAN:
1570 			size += sizeof(struct ibv_flow_spec_eth);
1571 			break;
1572 		case RTE_FLOW_ITEM_TYPE_IPV4:
1573 			size += sizeof(struct ibv_flow_spec_ipv4_ext);
1574 			break;
1575 		case RTE_FLOW_ITEM_TYPE_IPV6:
1576 			size += sizeof(struct ibv_flow_spec_ipv6);
1577 			break;
1578 		case RTE_FLOW_ITEM_TYPE_UDP:
1579 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1580 			break;
1581 		case RTE_FLOW_ITEM_TYPE_TCP:
1582 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1583 			break;
1584 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1585 			size += sizeof(struct ibv_flow_spec_tunnel);
1586 			break;
1587 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1588 			size += sizeof(struct ibv_flow_spec_tunnel);
1589 			break;
1590 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1591 		case RTE_FLOW_ITEM_TYPE_GRE:
1592 			size += sizeof(struct ibv_flow_spec_gre);
1593 			break;
1594 		case RTE_FLOW_ITEM_TYPE_MPLS:
1595 			size += sizeof(struct ibv_flow_spec_mpls);
1596 			break;
1597 #else
1598 		case RTE_FLOW_ITEM_TYPE_GRE:
1599 			size += sizeof(struct ibv_flow_spec_tunnel);
1600 			break;
1601 #endif
1602 		default:
1603 			break;
1604 		}
1605 	}
1606 	return size;
1607 }
1608 
1609 /**
1610  * Internal preparation function. Allocate mlx5_flow with the required size.
1611  * The required size is calculate based on the actions and items. This function
1612  * also returns the detected actions and items for later use.
1613  *
1614  * @param[in] dev
1615  *   Pointer to Ethernet device.
1616  * @param[in] attr
1617  *   Pointer to the flow attributes.
1618  * @param[in] items
1619  *   Pointer to the list of items.
1620  * @param[in] actions
1621  *   Pointer to the list of actions.
1622  * @param[out] error
1623  *   Pointer to the error structure.
1624  *
1625  * @return
1626  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1627  *   is set.
1628  */
1629 static struct mlx5_flow *
1630 flow_verbs_prepare(struct rte_eth_dev *dev,
1631 		   const struct rte_flow_attr *attr __rte_unused,
1632 		   const struct rte_flow_item items[],
1633 		   const struct rte_flow_action actions[],
1634 		   struct rte_flow_error *error)
1635 {
1636 	size_t size = 0;
1637 	uint32_t handle_idx = 0;
1638 	struct mlx5_flow *dev_flow;
1639 	struct mlx5_flow_handle *dev_handle;
1640 	struct mlx5_priv *priv = dev->data->dev_private;
1641 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1642 
1643 	MLX5_ASSERT(wks);
1644 	size += flow_verbs_get_actions_size(actions);
1645 	size += flow_verbs_get_items_size(items);
1646 	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1647 		rte_flow_error_set(error, E2BIG,
1648 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1649 				   "Verbs spec/action size too large");
1650 		return NULL;
1651 	}
1652 	/* In case of corrupting the memory. */
1653 	if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1654 		rte_flow_error_set(error, ENOSPC,
1655 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1656 				   "not free temporary device flow");
1657 		return NULL;
1658 	}
1659 	dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1660 				   &handle_idx);
1661 	if (!dev_handle) {
1662 		rte_flow_error_set(error, ENOMEM,
1663 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1664 				   "not enough memory to create flow handle");
1665 		return NULL;
1666 	}
1667 	MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1668 	dev_flow = &wks->flows[wks->flow_idx++];
1669 	dev_flow->handle = dev_handle;
1670 	dev_flow->handle_idx = handle_idx;
1671 	/* Memcpy is used, only size needs to be cleared to 0. */
1672 	dev_flow->verbs.size = 0;
1673 	dev_flow->verbs.attr.num_of_specs = 0;
1674 	dev_flow->ingress = attr->ingress;
1675 	dev_flow->hash_fields = 0;
1676 	/* Need to set transfer attribute: not supported in Verbs mode. */
1677 	return dev_flow;
1678 }
1679 
1680 /**
1681  * Fill the flow with verb spec.
1682  *
1683  * @param[in] dev
1684  *   Pointer to Ethernet device.
1685  * @param[in, out] dev_flow
1686  *   Pointer to the mlx5 flow.
1687  * @param[in] attr
1688  *   Pointer to the flow attributes.
1689  * @param[in] items
1690  *   Pointer to the list of items.
1691  * @param[in] actions
1692  *   Pointer to the list of actions.
1693  * @param[out] error
1694  *   Pointer to the error structure.
1695  *
1696  * @return
1697  *   0 on success, else a negative errno value otherwise and rte_errno is set.
1698  */
1699 static int
1700 flow_verbs_translate(struct rte_eth_dev *dev,
1701 		     struct mlx5_flow *dev_flow,
1702 		     const struct rte_flow_attr *attr,
1703 		     const struct rte_flow_item items[],
1704 		     const struct rte_flow_action actions[],
1705 		     struct rte_flow_error *error)
1706 {
1707 	uint64_t item_flags = 0;
1708 	uint64_t action_flags = 0;
1709 	uint64_t priority = attr->priority;
1710 	uint32_t subpriority = 0;
1711 	struct mlx5_priv *priv = dev->data->dev_private;
1712 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1713 	struct mlx5_flow_rss_desc *rss_desc;
1714 
1715 	MLX5_ASSERT(wks);
1716 	rss_desc = &wks->rss_desc;
1717 	if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1718 		priority = priv->config.flow_prio - 1;
1719 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1720 		int ret;
1721 
1722 		switch (actions->type) {
1723 		case RTE_FLOW_ACTION_TYPE_VOID:
1724 			break;
1725 		case RTE_FLOW_ACTION_TYPE_FLAG:
1726 			flow_verbs_translate_action_flag(dev_flow, actions);
1727 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1728 			dev_flow->handle->mark = 1;
1729 			break;
1730 		case RTE_FLOW_ACTION_TYPE_MARK:
1731 			flow_verbs_translate_action_mark(dev_flow, actions);
1732 			action_flags |= MLX5_FLOW_ACTION_MARK;
1733 			dev_flow->handle->mark = 1;
1734 			break;
1735 		case RTE_FLOW_ACTION_TYPE_DROP:
1736 			flow_verbs_translate_action_drop(dev_flow, actions);
1737 			action_flags |= MLX5_FLOW_ACTION_DROP;
1738 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1739 			break;
1740 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1741 			flow_verbs_translate_action_queue(rss_desc, actions);
1742 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1743 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1744 			break;
1745 		case RTE_FLOW_ACTION_TYPE_RSS:
1746 			flow_verbs_translate_action_rss(rss_desc, actions);
1747 			action_flags |= MLX5_FLOW_ACTION_RSS;
1748 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1749 			break;
1750 		case RTE_FLOW_ACTION_TYPE_COUNT:
1751 			ret = flow_verbs_translate_action_count(dev_flow,
1752 								actions,
1753 								dev, error);
1754 			if (ret < 0)
1755 				return ret;
1756 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1757 			break;
1758 		default:
1759 			return rte_flow_error_set(error, ENOTSUP,
1760 						  RTE_FLOW_ERROR_TYPE_ACTION,
1761 						  actions,
1762 						  "action not supported");
1763 		}
1764 	}
1765 	dev_flow->act_flags = action_flags;
1766 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1767 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1768 
1769 		switch (items->type) {
1770 		case RTE_FLOW_ITEM_TYPE_VOID:
1771 			break;
1772 		case RTE_FLOW_ITEM_TYPE_ETH:
1773 			flow_verbs_translate_item_eth(dev_flow, items,
1774 						      item_flags);
1775 			subpriority = MLX5_PRIORITY_MAP_L2;
1776 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1777 					       MLX5_FLOW_LAYER_OUTER_L2;
1778 			break;
1779 		case RTE_FLOW_ITEM_TYPE_VLAN:
1780 			flow_verbs_translate_item_vlan(dev_flow, items,
1781 						       item_flags);
1782 			subpriority = MLX5_PRIORITY_MAP_L2;
1783 			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1784 						MLX5_FLOW_LAYER_INNER_VLAN) :
1785 					       (MLX5_FLOW_LAYER_OUTER_L2 |
1786 						MLX5_FLOW_LAYER_OUTER_VLAN);
1787 			break;
1788 		case RTE_FLOW_ITEM_TYPE_IPV4:
1789 			flow_verbs_translate_item_ipv4(dev_flow, items,
1790 						       item_flags);
1791 			subpriority = MLX5_PRIORITY_MAP_L3;
1792 			dev_flow->hash_fields |=
1793 				mlx5_flow_hashfields_adjust
1794 					(rss_desc, tunnel,
1795 					 MLX5_IPV4_LAYER_TYPES,
1796 					 MLX5_IPV4_IBV_RX_HASH);
1797 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1798 					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1799 			break;
1800 		case RTE_FLOW_ITEM_TYPE_IPV6:
1801 			flow_verbs_translate_item_ipv6(dev_flow, items,
1802 						       item_flags);
1803 			subpriority = MLX5_PRIORITY_MAP_L3;
1804 			dev_flow->hash_fields |=
1805 				mlx5_flow_hashfields_adjust
1806 					(rss_desc, tunnel,
1807 					 MLX5_IPV6_LAYER_TYPES,
1808 					 MLX5_IPV6_IBV_RX_HASH);
1809 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1810 					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1811 			break;
1812 		case RTE_FLOW_ITEM_TYPE_TCP:
1813 			flow_verbs_translate_item_tcp(dev_flow, items,
1814 						      item_flags);
1815 			subpriority = MLX5_PRIORITY_MAP_L4;
1816 			if (dev_flow->hash_fields != 0)
1817 				dev_flow->hash_fields |=
1818 					mlx5_flow_hashfields_adjust
1819 					(rss_desc, tunnel, ETH_RSS_TCP,
1820 					 (IBV_RX_HASH_SRC_PORT_TCP |
1821 					  IBV_RX_HASH_DST_PORT_TCP));
1822 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1823 					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
1824 			break;
1825 		case RTE_FLOW_ITEM_TYPE_UDP:
1826 			flow_verbs_translate_item_udp(dev_flow, items,
1827 						      item_flags);
1828 			subpriority = MLX5_PRIORITY_MAP_L4;
1829 			if (dev_flow->hash_fields != 0)
1830 				dev_flow->hash_fields |=
1831 					mlx5_flow_hashfields_adjust
1832 					(rss_desc, tunnel, ETH_RSS_UDP,
1833 					 (IBV_RX_HASH_SRC_PORT_UDP |
1834 					  IBV_RX_HASH_DST_PORT_UDP));
1835 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1836 					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
1837 			break;
1838 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1839 			flow_verbs_translate_item_vxlan(dev_flow, items,
1840 							item_flags);
1841 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1842 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
1843 			break;
1844 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1845 			flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1846 							    item_flags);
1847 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1848 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1849 			break;
1850 		case RTE_FLOW_ITEM_TYPE_GRE:
1851 			flow_verbs_translate_item_gre(dev_flow, items,
1852 						      item_flags);
1853 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1854 			item_flags |= MLX5_FLOW_LAYER_GRE;
1855 			break;
1856 		case RTE_FLOW_ITEM_TYPE_MPLS:
1857 			flow_verbs_translate_item_mpls(dev_flow, items,
1858 						       item_flags);
1859 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1860 			item_flags |= MLX5_FLOW_LAYER_MPLS;
1861 			break;
1862 		default:
1863 			return rte_flow_error_set(error, ENOTSUP,
1864 						  RTE_FLOW_ERROR_TYPE_ITEM,
1865 						  NULL, "item not supported");
1866 		}
1867 	}
1868 	dev_flow->handle->layers = item_flags;
1869 	/* Other members of attr will be ignored. */
1870 	dev_flow->verbs.attr.priority =
1871 		mlx5_flow_adjust_priority(dev, priority, subpriority);
1872 	dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1873 	return 0;
1874 }
1875 
1876 /**
1877  * Remove the flow from the NIC but keeps it in memory.
1878  *
1879  * @param[in] dev
1880  *   Pointer to the Ethernet device structure.
1881  * @param[in, out] flow
1882  *   Pointer to flow structure.
1883  */
1884 static void
1885 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1886 {
1887 	struct mlx5_priv *priv = dev->data->dev_private;
1888 	struct mlx5_flow_handle *handle;
1889 	uint32_t handle_idx;
1890 
1891 	if (!flow)
1892 		return;
1893 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1894 		       handle_idx, handle, next) {
1895 		if (handle->drv_flow) {
1896 			claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1897 			handle->drv_flow = NULL;
1898 		}
1899 		/* hrxq is union, don't touch it only the flag is set. */
1900 		if (handle->rix_hrxq &&
1901 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1902 			mlx5_hrxq_release(dev, handle->rix_hrxq);
1903 			handle->rix_hrxq = 0;
1904 		}
1905 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
1906 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1907 	}
1908 }
1909 
1910 /**
1911  * Remove the flow from the NIC and the memory.
1912  *
1913  * @param[in] dev
1914  *   Pointer to the Ethernet device structure.
1915  * @param[in, out] flow
1916  *   Pointer to flow structure.
1917  */
1918 static void
1919 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1920 {
1921 	struct mlx5_priv *priv = dev->data->dev_private;
1922 	struct mlx5_flow_handle *handle;
1923 
1924 	if (!flow)
1925 		return;
1926 	flow_verbs_remove(dev, flow);
1927 	while (flow->dev_handles) {
1928 		uint32_t tmp_idx = flow->dev_handles;
1929 
1930 		handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1931 				   tmp_idx);
1932 		if (!handle)
1933 			return;
1934 		flow->dev_handles = handle->next.next;
1935 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1936 			   tmp_idx);
1937 	}
1938 	if (flow->counter) {
1939 		flow_verbs_counter_release(dev, flow->counter);
1940 		flow->counter = 0;
1941 	}
1942 }
1943 
1944 /**
1945  * Apply the flow to the NIC.
1946  *
1947  * @param[in] dev
1948  *   Pointer to the Ethernet device structure.
1949  * @param[in, out] flow
1950  *   Pointer to flow structure.
1951  * @param[out] error
1952  *   Pointer to error structure.
1953  *
1954  * @return
1955  *   0 on success, a negative errno value otherwise and rte_errno is set.
1956  */
1957 static int
1958 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1959 		 struct rte_flow_error *error)
1960 {
1961 	struct mlx5_priv *priv = dev->data->dev_private;
1962 	struct mlx5_flow_handle *handle;
1963 	struct mlx5_flow *dev_flow;
1964 	struct mlx5_hrxq *hrxq;
1965 	uint32_t dev_handles;
1966 	int err;
1967 	int idx;
1968 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1969 
1970 	MLX5_ASSERT(wks);
1971 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1972 		dev_flow = &wks->flows[idx];
1973 		handle = dev_flow->handle;
1974 		if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1975 			MLX5_ASSERT(priv->drop_queue.hrxq);
1976 			hrxq = priv->drop_queue.hrxq;
1977 		} else {
1978 			uint32_t hrxq_idx;
1979 			struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1980 
1981 			MLX5_ASSERT(rss_desc->queue_num);
1982 			rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1983 			rss_desc->hash_fields = dev_flow->hash_fields;
1984 			rss_desc->tunnel = !!(handle->layers &
1985 					      MLX5_FLOW_LAYER_TUNNEL);
1986 			rss_desc->shared_rss = 0;
1987 			hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
1988 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1989 					      hrxq_idx);
1990 			if (!hrxq) {
1991 				rte_flow_error_set
1992 					(error, rte_errno,
1993 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1994 					 "cannot get hash queue");
1995 				goto error;
1996 			}
1997 			handle->rix_hrxq = hrxq_idx;
1998 		}
1999 		MLX5_ASSERT(hrxq);
2000 		handle->drv_flow = mlx5_glue->create_flow
2001 					(hrxq->qp, &dev_flow->verbs.attr);
2002 		if (!handle->drv_flow) {
2003 			rte_flow_error_set(error, errno,
2004 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2005 					   NULL,
2006 					   "hardware refuses to create flow");
2007 			goto error;
2008 		}
2009 		if (priv->vmwa_context &&
2010 		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
2011 			/*
2012 			 * The rule contains the VLAN pattern.
2013 			 * For VF we are going to create VLAN
2014 			 * interface to make hypervisor set correct
2015 			 * e-Switch vport context.
2016 			 */
2017 			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2018 		}
2019 	}
2020 	return 0;
2021 error:
2022 	err = rte_errno; /* Save rte_errno before cleanup. */
2023 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2024 		       dev_handles, handle, next) {
2025 		/* hrxq is union, don't touch it only the flag is set. */
2026 		if (handle->rix_hrxq &&
2027 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2028 			mlx5_hrxq_release(dev, handle->rix_hrxq);
2029 			handle->rix_hrxq = 0;
2030 		}
2031 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
2032 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2033 	}
2034 	rte_errno = err; /* Restore rte_errno. */
2035 	return -rte_errno;
2036 }
2037 
2038 /**
2039  * Query a flow.
2040  *
2041  * @see rte_flow_query()
2042  * @see rte_flow_ops
2043  */
2044 static int
2045 flow_verbs_query(struct rte_eth_dev *dev,
2046 		 struct rte_flow *flow,
2047 		 const struct rte_flow_action *actions,
2048 		 void *data,
2049 		 struct rte_flow_error *error)
2050 {
2051 	int ret = -EINVAL;
2052 
2053 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2054 		switch (actions->type) {
2055 		case RTE_FLOW_ACTION_TYPE_VOID:
2056 			break;
2057 		case RTE_FLOW_ACTION_TYPE_COUNT:
2058 			ret = flow_verbs_counter_query(dev, flow, data, error);
2059 			break;
2060 		default:
2061 			return rte_flow_error_set(error, ENOTSUP,
2062 						  RTE_FLOW_ERROR_TYPE_ACTION,
2063 						  actions,
2064 						  "action not supported");
2065 		}
2066 	}
2067 	return ret;
2068 }
2069 
2070 static int
2071 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2072 		       uint32_t flags)
2073 {
2074 	RTE_SET_USED(dev);
2075 	RTE_SET_USED(domains);
2076 	RTE_SET_USED(flags);
2077 
2078 	return 0;
2079 }
2080 
2081 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2082 	.validate = flow_verbs_validate,
2083 	.prepare = flow_verbs_prepare,
2084 	.translate = flow_verbs_translate,
2085 	.apply = flow_verbs_apply,
2086 	.remove = flow_verbs_remove,
2087 	.destroy = flow_verbs_destroy,
2088 	.query = flow_verbs_query,
2089 	.sync_domain = flow_verbs_sync_domain,
2090 };
2091