xref: /dpdk/drivers/net/mlx5/mlx5_flow_verbs.c (revision 4aa10e5dc1b0fd6cc5b1b18770ac603e2c33a66c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10 
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_ip.h>
18 
19 #include <mlx5_glue.h>
20 #include <mlx5_prm.h>
21 #include <mlx5_malloc.h>
22 
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rx.h"
27 
28 #define VERBS_SPEC_INNER(item_flags) \
29 	(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
30 
31 /* Verbs specification header. */
32 struct ibv_spec_header {
33 	enum ibv_flow_spec_type type;
34 	uint16_t size;
35 };
36 
37 /**
38  * Discover the maximum number of priority available.
39  *
40  * @param[in] dev
41  *   Pointer to the Ethernet device structure.
42  * @param[in] vprio
43  *   Expected result variants.
44  * @param[in] vprio_n
45  *   Number of entries in @p vprio array.
46  * @return
47  *   Number of supported flow priority on success, a negative errno
48  *   value otherwise and rte_errno is set.
49  */
50 static int
51 flow_verbs_discover_priorities(struct rte_eth_dev *dev,
52 			       const uint16_t *vprio, int vprio_n)
53 {
54 	struct mlx5_priv *priv = dev->data->dev_private;
55 	struct {
56 		struct ibv_flow_attr attr;
57 		struct ibv_flow_spec_eth eth;
58 		struct ibv_flow_spec_action_drop drop;
59 	} flow_attr = {
60 		.attr = {
61 			.num_of_specs = 2,
62 			.port = (uint8_t)priv->dev_port,
63 		},
64 		.eth = {
65 			.type = IBV_FLOW_SPEC_ETH,
66 			.size = sizeof(struct ibv_flow_spec_eth),
67 		},
68 		.drop = {
69 			.size = sizeof(struct ibv_flow_spec_action_drop),
70 			.type = IBV_FLOW_SPEC_ACTION_DROP,
71 		},
72 	};
73 	struct ibv_flow *flow;
74 	struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
75 	int i;
76 	int priority = 0;
77 
78 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
79 	/* If DevX supported, driver must support 16 verbs flow priorities. */
80 	priority = 16;
81 	goto out;
82 #endif
83 	if (!drop->qp) {
84 		rte_errno = ENOTSUP;
85 		return -rte_errno;
86 	}
87 	for (i = 0; i != vprio_n; i++) {
88 		flow_attr.attr.priority = vprio[i] - 1;
89 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
90 		if (!flow)
91 			break;
92 		claim_zero(mlx5_glue->destroy_flow(flow));
93 		priority = vprio[i];
94 	}
95 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
96 out:
97 #endif
98 	DRV_LOG(INFO, "port %u supported flow priorities:"
99 		" 0-%d for ingress or egress root table,"
100 		" 0-%d for non-root table or transfer root table.",
101 		dev->data->port_id, priority - 2,
102 		MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
103 	return priority;
104 }
105 
106 /**
107  * Get Verbs flow counter by index.
108  *
109  * @param[in] dev
110  *   Pointer to the Ethernet device structure.
111  * @param[in] idx
112  *   mlx5 flow counter index in the container.
113  * @param[out] ppool
114  *   mlx5 flow counter pool in the container,
115  *
116  * @return
117  *   A pointer to the counter, NULL otherwise.
118  */
119 static struct mlx5_flow_counter *
120 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
121 			      uint32_t idx,
122 			      struct mlx5_flow_counter_pool **ppool)
123 {
124 	struct mlx5_priv *priv = dev->data->dev_private;
125 	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
126 	struct mlx5_flow_counter_pool *pool;
127 
128 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
129 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
130 	MLX5_ASSERT(pool);
131 	if (ppool)
132 		*ppool = pool;
133 	return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
134 }
135 
136 /**
137  * Create Verbs flow counter with Verbs library.
138  *
139  * @param[in] dev
140  *   Pointer to the Ethernet device structure.
141  * @param[in, out] counter
142  *   mlx5 flow counter object, contains the counter id,
143  *   handle of created Verbs flow counter is returned
144  *   in cs field (if counters are supported).
145  *
146  * @return
147  *   0 On success else a negative errno value is returned
148  *   and rte_errno is set.
149  */
150 static int
151 flow_verbs_counter_create(struct rte_eth_dev *dev,
152 			  struct mlx5_flow_counter *counter)
153 {
154 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
155 	struct mlx5_priv *priv = dev->data->dev_private;
156 	struct ibv_context *ctx = priv->sh->cdev->ctx;
157 	struct ibv_counter_set_init_attr init = {
158 			 .counter_set_id = counter->shared_info.id};
159 
160 	counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
161 	if (!counter->dcs_when_free) {
162 		rte_errno = ENOTSUP;
163 		return -ENOTSUP;
164 	}
165 	return 0;
166 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
167 	struct mlx5_priv *priv = dev->data->dev_private;
168 	struct ibv_context *ctx = priv->sh->cdev->ctx;
169 	struct ibv_counters_init_attr init = {0};
170 	struct ibv_counter_attach_attr attach;
171 	int ret;
172 
173 	memset(&attach, 0, sizeof(attach));
174 	counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
175 	if (!counter->dcs_when_free) {
176 		rte_errno = ENOTSUP;
177 		return -ENOTSUP;
178 	}
179 	attach.counter_desc = IBV_COUNTER_PACKETS;
180 	attach.index = 0;
181 	ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
182 	if (!ret) {
183 		attach.counter_desc = IBV_COUNTER_BYTES;
184 		attach.index = 1;
185 		ret = mlx5_glue->attach_counters
186 					(counter->dcs_when_free, &attach, NULL);
187 	}
188 	if (ret) {
189 		claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
190 		counter->dcs_when_free = NULL;
191 		rte_errno = ret;
192 		return -ret;
193 	}
194 	return 0;
195 #else
196 	(void)dev;
197 	(void)counter;
198 	rte_errno = ENOTSUP;
199 	return -ENOTSUP;
200 #endif
201 }
202 
203 /**
204  * Get a flow counter.
205  *
206  * @param[in] dev
207  *   Pointer to the Ethernet device structure.
208  * @param[in] id
209  *   Counter identifier.
210  *
211  * @return
212  *   Index to the counter, 0 otherwise and rte_errno is set.
213  */
214 static uint32_t
215 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
216 {
217 	struct mlx5_priv *priv = dev->data->dev_private;
218 	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
219 	struct mlx5_flow_counter_pool *pool = NULL;
220 	struct mlx5_flow_counter *cnt = NULL;
221 	uint32_t n_valid = cmng->n_valid;
222 	uint32_t pool_idx, cnt_idx;
223 	uint32_t i;
224 	int ret;
225 
226 	for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
227 		pool = cmng->pools[pool_idx];
228 		if (!pool)
229 			continue;
230 		cnt = TAILQ_FIRST(&pool->counters[0]);
231 		if (cnt)
232 			break;
233 	}
234 	if (!cnt) {
235 		uint32_t size;
236 
237 		if (n_valid == MLX5_COUNTER_POOLS_MAX_NUM) {
238 			DRV_LOG(ERR, "All counter is in used, try again later.");
239 			rte_errno = EAGAIN;
240 			return 0;
241 		}
242 		/* Allocate memory for new pool */
243 		size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
244 		pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
245 		if (!pool)
246 			return 0;
247 		for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
248 			cnt = MLX5_POOL_GET_CNT(pool, i);
249 			TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
250 		}
251 		cnt = MLX5_POOL_GET_CNT(pool, 0);
252 		cmng->pools[n_valid] = pool;
253 		pool_idx = n_valid;
254 		cmng->n_valid++;
255 	}
256 	TAILQ_REMOVE(&pool->counters[0], cnt, next);
257 	i = MLX5_CNT_ARRAY_IDX(pool, cnt);
258 	cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
259 	/* Create counter with Verbs. */
260 	ret = flow_verbs_counter_create(dev, cnt);
261 	if (!ret) {
262 		cnt->dcs_when_active = cnt->dcs_when_free;
263 		cnt->hits = 0;
264 		cnt->bytes = 0;
265 		return cnt_idx;
266 	}
267 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
268 	/* Some error occurred in Verbs library. */
269 	rte_errno = -ret;
270 	return 0;
271 }
272 
273 /**
274  * Release a flow counter.
275  *
276  * @param[in] dev
277  *   Pointer to the Ethernet device structure.
278  * @param[in] counter
279  *   Index to the counter handler.
280  */
281 static void
282 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
283 {
284 	struct mlx5_flow_counter_pool *pool;
285 	struct mlx5_flow_counter *cnt;
286 
287 	cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
288 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
289 	claim_zero(mlx5_glue->destroy_counter_set
290 			((struct ibv_counter_set *)cnt->dcs_when_active));
291 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
292 	claim_zero(mlx5_glue->destroy_counters
293 				((struct ibv_counters *)cnt->dcs_when_active));
294 #endif
295 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
296 }
297 
298 /**
299  * Query a flow counter via Verbs library call.
300  *
301  * @see rte_flow_query()
302  * @see rte_flow_ops
303  */
304 static int
305 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
306 			 struct rte_flow *flow, void *data,
307 			 struct rte_flow_error *error)
308 {
309 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
310 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
311 	if (flow->counter) {
312 		struct mlx5_flow_counter_pool *pool;
313 		struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
314 						(dev, flow->counter, &pool);
315 		struct rte_flow_query_count *qc = data;
316 		uint64_t counters[2] = {0, 0};
317 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
318 		struct ibv_query_counter_set_attr query_cs_attr = {
319 			.dcs_when_free = (struct ibv_counter_set *)
320 						cnt->dcs_when_active,
321 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
322 		};
323 		struct ibv_counter_set_data query_out = {
324 			.out = counters,
325 			.outlen = 2 * sizeof(uint64_t),
326 		};
327 		int err = mlx5_glue->query_counter_set(&query_cs_attr,
328 						       &query_out);
329 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
330 		int err = mlx5_glue->query_counters
331 			((struct ibv_counters *)cnt->dcs_when_active, counters,
332 				RTE_DIM(counters),
333 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
334 #endif
335 		if (err)
336 			return rte_flow_error_set
337 				(error, err,
338 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
339 				 NULL,
340 				 "cannot read counter");
341 		qc->hits_set = 1;
342 		qc->bytes_set = 1;
343 		qc->hits = counters[0] - cnt->hits;
344 		qc->bytes = counters[1] - cnt->bytes;
345 		if (qc->reset) {
346 			cnt->hits = counters[0];
347 			cnt->bytes = counters[1];
348 		}
349 		return 0;
350 	}
351 	return rte_flow_error_set(error, EINVAL,
352 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
353 				  NULL,
354 				  "flow does not have counter");
355 #else
356 	(void)flow;
357 	(void)data;
358 	return rte_flow_error_set(error, ENOTSUP,
359 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
360 				  NULL,
361 				  "counters are not available");
362 #endif
363 }
364 
365 /**
366  * Add a verbs item specification into @p verbs.
367  *
368  * @param[out] verbs
369  *   Pointer to verbs structure.
370  * @param[in] src
371  *   Create specification.
372  * @param[in] size
373  *   Size in bytes of the specification to copy.
374  */
375 static void
376 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
377 		    void *src, unsigned int size)
378 {
379 	void *dst;
380 
381 	if (!verbs)
382 		return;
383 	MLX5_ASSERT(verbs->specs);
384 	dst = (void *)(verbs->specs + verbs->size);
385 	memcpy(dst, src, size);
386 	++verbs->attr.num_of_specs;
387 	verbs->size += size;
388 }
389 
390 /**
391  * Convert the @p item into a Verbs specification. This function assumes that
392  * the input is valid and that there is space to insert the requested item
393  * into the flow.
394  *
395  * @param[in, out] dev_flow
396  *   Pointer to dev_flow structure.
397  * @param[in] item
398  *   Item specification.
399  * @param[in] item_flags
400  *   Parsed item flags.
401  */
402 static void
403 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
404 			      const struct rte_flow_item *item,
405 			      uint64_t item_flags)
406 {
407 	const struct rte_flow_item_eth *spec = item->spec;
408 	const struct rte_flow_item_eth *mask = item->mask;
409 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
410 	struct ibv_flow_spec_eth eth = {
411 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
412 		.size = size,
413 	};
414 
415 	if (!mask)
416 		mask = &rte_flow_item_eth_mask;
417 	if (spec) {
418 		unsigned int i;
419 
420 		memcpy(&eth.val.dst_mac, spec->hdr.dst_addr.addr_bytes,
421 			RTE_ETHER_ADDR_LEN);
422 		memcpy(&eth.val.src_mac, spec->hdr.src_addr.addr_bytes,
423 			RTE_ETHER_ADDR_LEN);
424 		eth.val.ether_type = spec->hdr.ether_type;
425 		memcpy(&eth.mask.dst_mac, mask->hdr.dst_addr.addr_bytes,
426 			RTE_ETHER_ADDR_LEN);
427 		memcpy(&eth.mask.src_mac, mask->hdr.src_addr.addr_bytes,
428 			RTE_ETHER_ADDR_LEN);
429 		eth.mask.ether_type = mask->hdr.ether_type;
430 		/* Remove unwanted bits from values. */
431 		for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
432 			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
433 			eth.val.src_mac[i] &= eth.mask.src_mac[i];
434 		}
435 		eth.val.ether_type &= eth.mask.ether_type;
436 	}
437 	flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
438 }
439 
440 /**
441  * Update the VLAN tag in the Verbs Ethernet specification.
442  * This function assumes that the input is valid and there is space to add
443  * the requested item.
444  *
445  * @param[in, out] attr
446  *   Pointer to Verbs attributes structure.
447  * @param[in] eth
448  *   Verbs structure containing the VLAN information to copy.
449  */
450 static void
451 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
452 			    struct ibv_flow_spec_eth *eth)
453 {
454 	unsigned int i;
455 	const enum ibv_flow_spec_type search = eth->type;
456 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
457 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
458 
459 	for (i = 0; i != attr->num_of_specs; ++i) {
460 		if (hdr->type == search) {
461 			struct ibv_flow_spec_eth *e =
462 				(struct ibv_flow_spec_eth *)hdr;
463 
464 			e->val.vlan_tag = eth->val.vlan_tag;
465 			e->mask.vlan_tag = eth->mask.vlan_tag;
466 			e->val.ether_type = eth->val.ether_type;
467 			e->mask.ether_type = eth->mask.ether_type;
468 			break;
469 		}
470 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
471 	}
472 }
473 
474 /**
475  * Convert the @p item into a Verbs specification. This function assumes that
476  * the input is valid and that there is space to insert the requested item
477  * into the flow.
478  *
479  * @param[in, out] dev_flow
480  *   Pointer to dev_flow structure.
481  * @param[in] item
482  *   Item specification.
483  * @param[in] item_flags
484  *   Parsed item flags.
485  */
486 static void
487 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
488 			       const struct rte_flow_item *item,
489 			       uint64_t item_flags)
490 {
491 	const struct rte_flow_item_vlan *spec = item->spec;
492 	const struct rte_flow_item_vlan *mask = item->mask;
493 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
494 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
495 	struct ibv_flow_spec_eth eth = {
496 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
497 		.size = size,
498 	};
499 	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
500 				      MLX5_FLOW_LAYER_OUTER_L2;
501 
502 	if (!mask)
503 		mask = &rte_flow_item_vlan_mask;
504 	if (spec) {
505 		eth.val.vlan_tag = spec->hdr.vlan_tci;
506 		eth.mask.vlan_tag = mask->hdr.vlan_tci;
507 		eth.val.vlan_tag &= eth.mask.vlan_tag;
508 		eth.val.ether_type = spec->hdr.eth_proto;
509 		eth.mask.ether_type = mask->hdr.eth_proto;
510 		eth.val.ether_type &= eth.mask.ether_type;
511 	}
512 	if (!(item_flags & l2m))
513 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
514 	else
515 		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
516 	if (!tunnel)
517 		dev_flow->handle->vf_vlan.tag =
518 			rte_be_to_cpu_16(spec->hdr.vlan_tci) & 0x0fff;
519 }
520 
521 /**
522  * Convert the @p item into a Verbs specification. This function assumes that
523  * the input is valid and that there is space to insert the requested item
524  * into the flow.
525  *
526  * @param[in, out] dev_flow
527  *   Pointer to dev_flow structure.
528  * @param[in] item
529  *   Item specification.
530  * @param[in] item_flags
531  *   Parsed item flags.
532  */
533 static void
534 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
535 			       const struct rte_flow_item *item,
536 			       uint64_t item_flags)
537 {
538 	const struct rte_flow_item_ipv4 *spec = item->spec;
539 	const struct rte_flow_item_ipv4 *mask = item->mask;
540 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
541 	struct ibv_flow_spec_ipv4_ext ipv4 = {
542 		.type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
543 		.size = size,
544 	};
545 
546 	if (!mask)
547 		mask = &rte_flow_item_ipv4_mask;
548 	if (spec) {
549 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
550 			.src_ip = spec->hdr.src_addr,
551 			.dst_ip = spec->hdr.dst_addr,
552 			.proto = spec->hdr.next_proto_id,
553 			.tos = spec->hdr.type_of_service,
554 		};
555 		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
556 			.src_ip = mask->hdr.src_addr,
557 			.dst_ip = mask->hdr.dst_addr,
558 			.proto = mask->hdr.next_proto_id,
559 			.tos = mask->hdr.type_of_service,
560 		};
561 		/* Remove unwanted bits from values. */
562 		ipv4.val.src_ip &= ipv4.mask.src_ip;
563 		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
564 		ipv4.val.proto &= ipv4.mask.proto;
565 		ipv4.val.tos &= ipv4.mask.tos;
566 	}
567 	flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
568 }
569 
570 /**
571  * Convert the @p item into a Verbs specification. This function assumes that
572  * the input is valid and that there is space to insert the requested item
573  * into the flow.
574  *
575  * @param[in, out] dev_flow
576  *   Pointer to dev_flow structure.
577  * @param[in] item
578  *   Item specification.
579  * @param[in] item_flags
580  *   Parsed item flags.
581  */
582 static void
583 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
584 			       const struct rte_flow_item *item,
585 			       uint64_t item_flags)
586 {
587 	const struct rte_flow_item_ipv6 *spec = item->spec;
588 	const struct rte_flow_item_ipv6 *mask = item->mask;
589 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
590 	struct ibv_flow_spec_ipv6 ipv6 = {
591 		.type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
592 		.size = size,
593 	};
594 
595 	if (!mask)
596 		mask = &rte_flow_item_ipv6_mask;
597 	if (spec) {
598 		unsigned int i;
599 		uint32_t vtc_flow_val;
600 		uint32_t vtc_flow_mask;
601 
602 		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
603 		       RTE_DIM(ipv6.val.src_ip));
604 		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
605 		       RTE_DIM(ipv6.val.dst_ip));
606 		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
607 		       RTE_DIM(ipv6.mask.src_ip));
608 		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
609 		       RTE_DIM(ipv6.mask.dst_ip));
610 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
611 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
612 		ipv6.val.flow_label =
613 			rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
614 					 RTE_IPV6_HDR_FL_SHIFT);
615 		ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
616 					 RTE_IPV6_HDR_TC_SHIFT;
617 		ipv6.val.next_hdr = spec->hdr.proto;
618 		ipv6.mask.flow_label =
619 			rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
620 					 RTE_IPV6_HDR_FL_SHIFT);
621 		ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
622 					  RTE_IPV6_HDR_TC_SHIFT;
623 		ipv6.mask.next_hdr = mask->hdr.proto;
624 		/* Remove unwanted bits from values. */
625 		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
626 			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
627 			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
628 		}
629 		ipv6.val.flow_label &= ipv6.mask.flow_label;
630 		ipv6.val.traffic_class &= ipv6.mask.traffic_class;
631 		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
632 	}
633 	flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
634 }
635 
636 /**
637  * Convert the @p item into a Verbs specification. This function assumes that
638  * the input is valid and that there is space to insert the requested item
639  * into the flow.
640  *
641  * @param[in, out] dev_flow
642  *   Pointer to dev_flow structure.
643  * @param[in] item
644  *   Item specification.
645  * @param[in] item_flags
646  *   Parsed item flags.
647  */
648 static void
649 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
650 			      const struct rte_flow_item *item,
651 			      uint64_t item_flags __rte_unused)
652 {
653 	const struct rte_flow_item_tcp *spec = item->spec;
654 	const struct rte_flow_item_tcp *mask = item->mask;
655 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
656 	struct ibv_flow_spec_tcp_udp tcp = {
657 		.type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
658 		.size = size,
659 	};
660 
661 	if (!mask)
662 		mask = &rte_flow_item_tcp_mask;
663 	if (spec) {
664 		tcp.val.dst_port = spec->hdr.dst_port;
665 		tcp.val.src_port = spec->hdr.src_port;
666 		tcp.mask.dst_port = mask->hdr.dst_port;
667 		tcp.mask.src_port = mask->hdr.src_port;
668 		/* Remove unwanted bits from values. */
669 		tcp.val.src_port &= tcp.mask.src_port;
670 		tcp.val.dst_port &= tcp.mask.dst_port;
671 	}
672 	flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
673 }
674 
675 /**
676  * Convert the @p item into a Verbs specification. This function assumes that
677  * the input is valid and that there is space to insert the requested item
678  * into the flow.
679  *
680  * @param[in, out] dev_flow
681  *   Pointer to dev_flow structure.
682  * @param[in] item
683  *   Item specification.
684  * @param[in] item_flags
685  *   Parsed item flags.
686  */
687 static void
688 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
689 			      const struct rte_flow_item *item,
690 			      uint64_t item_flags __rte_unused)
691 {
692 	const struct rte_flow_item_udp *spec = item->spec;
693 	const struct rte_flow_item_udp *mask = item->mask;
694 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
695 	struct ibv_flow_spec_tcp_udp udp = {
696 		.type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
697 		.size = size,
698 	};
699 
700 	if (!mask)
701 		mask = &rte_flow_item_udp_mask;
702 	if (spec) {
703 		udp.val.dst_port = spec->hdr.dst_port;
704 		udp.val.src_port = spec->hdr.src_port;
705 		udp.mask.dst_port = mask->hdr.dst_port;
706 		udp.mask.src_port = mask->hdr.src_port;
707 		/* Remove unwanted bits from values. */
708 		udp.val.src_port &= udp.mask.src_port;
709 		udp.val.dst_port &= udp.mask.dst_port;
710 	}
711 	item++;
712 	while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
713 		item++;
714 	if (!(udp.val.dst_port & udp.mask.dst_port)) {
715 		switch ((item)->type) {
716 		case RTE_FLOW_ITEM_TYPE_VXLAN:
717 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
718 			udp.mask.dst_port = 0xffff;
719 			break;
720 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
721 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
722 			udp.mask.dst_port = 0xffff;
723 			break;
724 		case RTE_FLOW_ITEM_TYPE_MPLS:
725 			udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
726 			udp.mask.dst_port = 0xffff;
727 			break;
728 		default:
729 			break;
730 		}
731 	}
732 
733 	flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
734 }
735 
736 /**
737  * Convert the @p item into a Verbs specification. This function assumes that
738  * the input is valid and that there is space to insert the requested item
739  * into the flow.
740  *
741  * @param[in, out] dev_flow
742  *   Pointer to dev_flow structure.
743  * @param[in] item
744  *   Item specification.
745  * @param[in] item_flags
746  *   Parsed item flags.
747  */
748 static void
749 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
750 				const struct rte_flow_item *item,
751 				uint64_t item_flags __rte_unused)
752 {
753 	const struct rte_flow_item_vxlan *spec = item->spec;
754 	const struct rte_flow_item_vxlan *mask = item->mask;
755 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
756 	struct ibv_flow_spec_tunnel vxlan = {
757 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
758 		.size = size,
759 	};
760 	union vni {
761 		uint32_t vlan_id;
762 		uint8_t vni[4];
763 	} id = { .vlan_id = 0, };
764 
765 	if (!mask)
766 		mask = &rte_flow_item_vxlan_mask;
767 	if (spec) {
768 		memcpy(&id.vni[1], spec->hdr.vni, 3);
769 		vxlan.val.tunnel_id = id.vlan_id;
770 		memcpy(&id.vni[1], mask->hdr.vni, 3);
771 		vxlan.mask.tunnel_id = id.vlan_id;
772 		/* Remove unwanted bits from values. */
773 		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
774 	}
775 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
776 }
777 
778 /**
779  * Convert the @p item into a Verbs specification. This function assumes that
780  * the input is valid and that there is space to insert the requested item
781  * into the flow.
782  *
783  * @param[in, out] dev_flow
784  *   Pointer to dev_flow structure.
785  * @param[in] item
786  *   Item specification.
787  * @param[in] item_flags
788  *   Parsed item flags.
789  */
790 static void
791 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
792 				    const struct rte_flow_item *item,
793 				    uint64_t item_flags __rte_unused)
794 {
795 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
796 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
797 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
798 	struct ibv_flow_spec_tunnel vxlan_gpe = {
799 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
800 		.size = size,
801 	};
802 	union vni {
803 		uint32_t vlan_id;
804 		uint8_t vni[4];
805 	} id = { .vlan_id = 0, };
806 
807 	if (!mask)
808 		mask = &rte_flow_item_vxlan_gpe_mask;
809 	if (spec) {
810 		memcpy(&id.vni[1], spec->hdr.vni, 3);
811 		vxlan_gpe.val.tunnel_id = id.vlan_id;
812 		memcpy(&id.vni[1], mask->hdr.vni, 3);
813 		vxlan_gpe.mask.tunnel_id = id.vlan_id;
814 		/* Remove unwanted bits from values. */
815 		vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
816 	}
817 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
818 }
819 
820 /**
821  * Update the protocol in Verbs IPv4/IPv6 spec.
822  *
823  * @param[in, out] attr
824  *   Pointer to Verbs attributes structure.
825  * @param[in] search
826  *   Specification type to search in order to update the IP protocol.
827  * @param[in] protocol
828  *   Protocol value to set if none is present in the specification.
829  */
830 static void
831 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
832 				       enum ibv_flow_spec_type search,
833 				       uint8_t protocol)
834 {
835 	unsigned int i;
836 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
837 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
838 
839 	if (!attr)
840 		return;
841 	for (i = 0; i != attr->num_of_specs; ++i) {
842 		if (hdr->type == search) {
843 			union {
844 				struct ibv_flow_spec_ipv4_ext *ipv4;
845 				struct ibv_flow_spec_ipv6 *ipv6;
846 			} ip;
847 
848 			switch (search) {
849 			case IBV_FLOW_SPEC_IPV4_EXT:
850 				ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
851 				if (!ip.ipv4->val.proto) {
852 					ip.ipv4->val.proto = protocol;
853 					ip.ipv4->mask.proto = 0xff;
854 				}
855 				break;
856 			case IBV_FLOW_SPEC_IPV6:
857 				ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
858 				if (!ip.ipv6->val.next_hdr) {
859 					ip.ipv6->val.next_hdr = protocol;
860 					ip.ipv6->mask.next_hdr = 0xff;
861 				}
862 				break;
863 			default:
864 				break;
865 			}
866 			break;
867 		}
868 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
869 	}
870 }
871 
872 /**
873  * Reserve space for GRE spec in spec buffer.
874  *
875  * @param[in,out] dev_flow
876  *   Pointer to dev_flow structure.
877  *
878  * @return
879  *   Pointer to reserved space in spec buffer.
880  */
881 static uint8_t *
882 flow_verbs_reserve_gre(struct mlx5_flow *dev_flow)
883 {
884 	uint8_t *buffer;
885 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
886 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
887 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
888 	struct ibv_flow_spec_tunnel tunnel = {
889 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
890 		.size = size,
891 	};
892 #else
893 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
894 	struct ibv_flow_spec_gre tunnel = {
895 		.type = IBV_FLOW_SPEC_GRE,
896 		.size = size,
897 	};
898 #endif
899 
900 	buffer = verbs->specs + verbs->size;
901 	flow_verbs_spec_add(verbs, &tunnel, size);
902 	return buffer;
903 }
904 
905 /**
906  * Convert the @p item into a Verbs specification. This function assumes that
907  * the input is valid and that Verbs specification will be placed in
908  * the pre-reserved space.
909  *
910  * @param[in, out] dev_flow
911  *   Pointer to dev_flow structure.
912  * @param[in, out] gre_spec
913  *   Pointer to space reserved for GRE spec.
914  * @param[in] item
915  *   Item specification.
916  * @param[in] item_flags
917  *   Parsed item flags.
918  */
919 static void
920 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
921 			      uint8_t *gre_spec,
922 			      const struct rte_flow_item *item __rte_unused,
923 			      uint64_t item_flags)
924 {
925 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
926 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
927 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
928 	struct ibv_flow_spec_tunnel tunnel = {
929 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
930 		.size = size,
931 	};
932 #else
933 	static const struct rte_flow_item_gre empty_gre = {0,};
934 	const struct rte_flow_item_gre *spec = item->spec;
935 	const struct rte_flow_item_gre *mask = item->mask;
936 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
937 	struct ibv_flow_spec_gre tunnel = {
938 		.type = IBV_FLOW_SPEC_GRE,
939 		.size = size,
940 	};
941 
942 	if (!spec) {
943 		spec = &empty_gre;
944 		mask = &empty_gre;
945 	} else {
946 		if (!mask)
947 			mask = &rte_flow_item_gre_mask;
948 	}
949 	tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
950 	tunnel.val.protocol = spec->protocol;
951 	tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
952 	tunnel.mask.protocol = mask->protocol;
953 	/* Remove unwanted bits from values. */
954 	tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
955 	tunnel.val.key &= tunnel.mask.key;
956 	if (tunnel.mask.protocol) {
957 		tunnel.val.protocol &= tunnel.mask.protocol;
958 	} else {
959 		tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
960 		if (tunnel.val.protocol) {
961 			tunnel.mask.protocol = 0xFFFF;
962 			tunnel.val.protocol =
963 				rte_cpu_to_be_16(tunnel.val.protocol);
964 		}
965 	}
966 #endif
967 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
968 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
969 						       IBV_FLOW_SPEC_IPV4_EXT,
970 						       IPPROTO_GRE);
971 	else
972 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
973 						       IBV_FLOW_SPEC_IPV6,
974 						       IPPROTO_GRE);
975 	MLX5_ASSERT(gre_spec);
976 	memcpy(gre_spec, &tunnel, size);
977 }
978 
979 /**
980  * Convert the @p action into a Verbs specification. This function assumes that
981  * the input is valid and that there is space to insert the requested action
982  * into the flow. This function also return the action that was added.
983  *
984  * @param[in, out] dev_flow
985  *   Pointer to dev_flow structure.
986  * @param[in] item
987  *   Item specification.
988  * @param[in] item_flags
989  *   Parsed item flags.
990  */
991 static void
992 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
993 			       const struct rte_flow_item *item __rte_unused,
994 			       uint64_t item_flags __rte_unused)
995 {
996 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
997 	const struct rte_flow_item_mpls *spec = item->spec;
998 	const struct rte_flow_item_mpls *mask = item->mask;
999 	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1000 	struct ibv_flow_spec_mpls mpls = {
1001 		.type = IBV_FLOW_SPEC_MPLS,
1002 		.size = size,
1003 	};
1004 
1005 	if (!mask)
1006 		mask = &rte_flow_item_mpls_mask;
1007 	if (spec) {
1008 		memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1009 		memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1010 		/* Remove unwanted bits from values.  */
1011 		mpls.val.label &= mpls.mask.label;
1012 	}
1013 	flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1014 #endif
1015 }
1016 
1017 /**
1018  * Convert the @p action into a Verbs specification. This function assumes that
1019  * the input is valid and that there is space to insert the requested action
1020  * into the flow.
1021  *
1022  * @param[in] dev_flow
1023  *   Pointer to mlx5_flow.
1024  * @param[in] action
1025  *   Action configuration.
1026  */
1027 static void
1028 flow_verbs_translate_action_drop
1029 	(struct mlx5_flow *dev_flow,
1030 	 const struct rte_flow_action *action __rte_unused)
1031 {
1032 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1033 	struct ibv_flow_spec_action_drop drop = {
1034 			.type = IBV_FLOW_SPEC_ACTION_DROP,
1035 			.size = size,
1036 	};
1037 
1038 	flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1039 }
1040 
1041 /**
1042  * Convert the @p action into a Verbs specification. This function assumes that
1043  * the input is valid and that there is space to insert the requested action
1044  * into the flow.
1045  *
1046  * @param[in] rss_desc
1047  *   Pointer to mlx5_flow_rss_desc.
1048  * @param[in] action
1049  *   Action configuration.
1050  */
1051 static void
1052 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1053 				  const struct rte_flow_action *action)
1054 {
1055 	const struct rte_flow_action_queue *queue = action->conf;
1056 
1057 	rss_desc->queue[0] = queue->index;
1058 	rss_desc->queue_num = 1;
1059 }
1060 
1061 /**
1062  * Convert the @p action into a Verbs specification. This function assumes that
1063  * the input is valid and that there is space to insert the requested action
1064  * into the flow.
1065  *
1066  * @param[in] rss_desc
1067  *   Pointer to mlx5_flow_rss_desc.
1068  * @param[in] action
1069  *   Action configuration.
1070  */
1071 static void
1072 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1073 				const struct rte_flow_action *action)
1074 {
1075 	const struct rte_flow_action_rss *rss = action->conf;
1076 	const uint8_t *rss_key;
1077 
1078 	memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1079 	rss_desc->queue_num = rss->queue_num;
1080 	/* NULL RSS key indicates default RSS key. */
1081 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
1082 	memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1083 	/*
1084 	 * rss->level and rss.types should be set in advance when expanding
1085 	 * items for RSS.
1086 	 */
1087 }
1088 
1089 /**
1090  * Convert the @p action into a Verbs specification. This function assumes that
1091  * the input is valid and that there is space to insert the requested action
1092  * into the flow.
1093  *
1094  * @param[in] dev_flow
1095  *   Pointer to mlx5_flow.
1096  * @param[in] action
1097  *   Action configuration.
1098  */
1099 static void
1100 flow_verbs_translate_action_flag
1101 	(struct mlx5_flow *dev_flow,
1102 	 const struct rte_flow_action *action __rte_unused)
1103 {
1104 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1105 	struct ibv_flow_spec_action_tag tag = {
1106 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1107 		.size = size,
1108 		.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1109 	};
1110 
1111 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1112 }
1113 
1114 /**
1115  * Convert the @p action into a Verbs specification. This function assumes that
1116  * the input is valid and that there is space to insert the requested action
1117  * into the flow.
1118  *
1119  * @param[in] dev_flow
1120  *   Pointer to mlx5_flow.
1121  * @param[in] action
1122  *   Action configuration.
1123  */
1124 static void
1125 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1126 				 const struct rte_flow_action *action)
1127 {
1128 	const struct rte_flow_action_mark *mark = action->conf;
1129 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1130 	struct ibv_flow_spec_action_tag tag = {
1131 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1132 		.size = size,
1133 		.tag_id = mlx5_flow_mark_set(mark->id),
1134 	};
1135 
1136 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1137 }
1138 
1139 /**
1140  * Convert the @p action into a Verbs specification. This function assumes that
1141  * the input is valid and that there is space to insert the requested action
1142  * into the flow.
1143  *
1144  * @param[in] dev
1145  *   Pointer to the Ethernet device structure.
1146  * @param[in] action
1147  *   Action configuration.
1148  * @param[in] dev_flow
1149  *   Pointer to mlx5_flow.
1150  * @param[out] error
1151  *   Pointer to error structure.
1152  *
1153  * @return
1154  *   0 On success else a negative errno value is returned and rte_errno is set.
1155  */
1156 static int
1157 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1158 				  const struct rte_flow_action *action,
1159 				  struct rte_eth_dev *dev,
1160 				  struct rte_flow_error *error)
1161 {
1162 	const struct rte_flow_action_count *count = action->conf;
1163 	struct rte_flow *flow = dev_flow->flow;
1164 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1165 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1166 	struct mlx5_flow_counter_pool *pool;
1167 	struct mlx5_flow_counter *cnt = NULL;
1168 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1169 	struct ibv_flow_spec_counter_action counter = {
1170 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
1171 		.size = size,
1172 	};
1173 #endif
1174 
1175 	if (!flow->counter) {
1176 		flow->counter = flow_verbs_counter_new(dev, count->id);
1177 		if (!flow->counter)
1178 			return rte_flow_error_set(error, rte_errno,
1179 						  RTE_FLOW_ERROR_TYPE_ACTION,
1180 						  action,
1181 						  "cannot get counter"
1182 						  " context.");
1183 	}
1184 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1185 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1186 	counter.counter_set_handle =
1187 		((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1188 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1189 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1190 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1191 	counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1192 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1193 #endif
1194 	return 0;
1195 }
1196 
1197 /**
1198  * Validates @p attributes of the flow rule.
1199  *
1200  * This function is used if and only if legacy Verbs flow engine is used.
1201  *
1202  * @param[in] dev
1203  *   Pointer to the Ethernet device structure.
1204  * @param[in] attributes
1205  *   Pointer to flow attributes
1206  * @param[out] error
1207  *   Pointer to error structure.
1208  *
1209  * @return
1210  *   0 on success, a negative errno value otherwise and rte_errno is set.
1211  */
1212 static int
1213 flow_verbs_validate_attributes(struct rte_eth_dev *dev,
1214 			       const struct rte_flow_attr *attributes,
1215 			       struct rte_flow_error *error)
1216 {
1217 	struct mlx5_priv *priv = dev->data->dev_private;
1218 	uint32_t priority_max = priv->sh->flow_max_priority - 1;
1219 
1220 	if (attributes->group)
1221 		return rte_flow_error_set(error, ENOTSUP,
1222 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1223 					  NULL, "groups is not supported");
1224 	if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
1225 	    attributes->priority >= priority_max)
1226 		return rte_flow_error_set(error, ENOTSUP,
1227 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1228 					  NULL, "priority out of range");
1229 	if (attributes->egress)
1230 		return rte_flow_error_set(error, ENOTSUP,
1231 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1232 					  "egress is not supported");
1233 	if (attributes->transfer)
1234 		return rte_flow_error_set(error, ENOTSUP,
1235 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1236 					  NULL, "transfer is not supported");
1237 	if (!attributes->ingress)
1238 		return rte_flow_error_set(error, EINVAL,
1239 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1240 					  NULL,
1241 					  "ingress attribute is mandatory");
1242 	return 0;
1243 }
1244 
1245 /**
1246  * Internal validation function. For validating both actions and items.
1247  *
1248  * @param[in] dev
1249  *   Pointer to the Ethernet device structure.
1250  * @param[in] attr
1251  *   Pointer to the flow attributes.
1252  * @param[in] items
1253  *   Pointer to the list of items.
1254  * @param[in] actions
1255  *   Pointer to the list of actions.
1256  * @param[in] external
1257  *   This flow rule is created by request external to PMD.
1258  * @param[in] hairpin
1259  *   Number of hairpin TX actions, 0 means classic flow.
1260  * @param[out] error
1261  *   Pointer to the error structure.
1262  *
1263  * @return
1264  *   0 on success, a negative errno value otherwise and rte_errno is set.
1265  */
1266 static int
1267 flow_verbs_validate(struct rte_eth_dev *dev,
1268 		    const struct rte_flow_attr *attr,
1269 		    const struct rte_flow_item items[],
1270 		    const struct rte_flow_action actions[],
1271 		    bool external __rte_unused,
1272 		    int hairpin __rte_unused,
1273 		    struct rte_flow_error *error)
1274 {
1275 	int ret;
1276 	uint64_t action_flags = 0;
1277 	uint64_t item_flags = 0;
1278 	uint64_t last_item = 0;
1279 	uint8_t next_protocol = 0xff;
1280 	uint16_t ether_type = 0;
1281 	bool is_empty_vlan = false;
1282 	uint16_t udp_dport = 0;
1283 	bool is_root;
1284 
1285 	if (items == NULL)
1286 		return -1;
1287 	ret = flow_verbs_validate_attributes(dev, attr, error);
1288 	if (ret < 0)
1289 		return ret;
1290 	is_root = ret;
1291 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1292 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1293 		int ret = 0;
1294 
1295 		switch (items->type) {
1296 		case RTE_FLOW_ITEM_TYPE_VOID:
1297 			break;
1298 		case RTE_FLOW_ITEM_TYPE_ETH:
1299 			ret = mlx5_flow_validate_item_eth(items, item_flags,
1300 							  false, error);
1301 			if (ret < 0)
1302 				return ret;
1303 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1304 					     MLX5_FLOW_LAYER_OUTER_L2;
1305 			if (items->mask != NULL && items->spec != NULL) {
1306 				ether_type =
1307 					((const struct rte_flow_item_eth *)
1308 					 items->spec)->hdr.ether_type;
1309 				ether_type &=
1310 					((const struct rte_flow_item_eth *)
1311 					 items->mask)->hdr.ether_type;
1312 				if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
1313 					is_empty_vlan = true;
1314 				ether_type = rte_be_to_cpu_16(ether_type);
1315 			} else {
1316 				ether_type = 0;
1317 			}
1318 			break;
1319 		case RTE_FLOW_ITEM_TYPE_VLAN:
1320 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
1321 							   dev, error);
1322 			if (ret < 0)
1323 				return ret;
1324 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1325 					      MLX5_FLOW_LAYER_INNER_VLAN) :
1326 					     (MLX5_FLOW_LAYER_OUTER_L2 |
1327 					      MLX5_FLOW_LAYER_OUTER_VLAN);
1328 			if (items->mask != NULL && items->spec != NULL) {
1329 				ether_type =
1330 					((const struct rte_flow_item_vlan *)
1331 					 items->spec)->hdr.eth_proto;
1332 				ether_type &=
1333 					((const struct rte_flow_item_vlan *)
1334 					 items->mask)->hdr.eth_proto;
1335 				ether_type = rte_be_to_cpu_16(ether_type);
1336 			} else {
1337 				ether_type = 0;
1338 			}
1339 			is_empty_vlan = false;
1340 			break;
1341 		case RTE_FLOW_ITEM_TYPE_IPV4:
1342 			ret = mlx5_flow_validate_item_ipv4
1343 						(items, item_flags,
1344 						 last_item, ether_type, NULL,
1345 						 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1346 						 error);
1347 			if (ret < 0)
1348 				return ret;
1349 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1350 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1351 			if (items->mask != NULL &&
1352 			    ((const struct rte_flow_item_ipv4 *)
1353 			     items->mask)->hdr.next_proto_id) {
1354 				next_protocol =
1355 					((const struct rte_flow_item_ipv4 *)
1356 					 (items->spec))->hdr.next_proto_id;
1357 				next_protocol &=
1358 					((const struct rte_flow_item_ipv4 *)
1359 					 (items->mask))->hdr.next_proto_id;
1360 			} else {
1361 				/* Reset for inner layer. */
1362 				next_protocol = 0xff;
1363 			}
1364 			break;
1365 		case RTE_FLOW_ITEM_TYPE_IPV6:
1366 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1367 							   last_item,
1368 							   ether_type, NULL,
1369 							   error);
1370 			if (ret < 0)
1371 				return ret;
1372 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1373 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1374 			if (items->mask != NULL &&
1375 			    ((const struct rte_flow_item_ipv6 *)
1376 			     items->mask)->hdr.proto) {
1377 				next_protocol =
1378 					((const struct rte_flow_item_ipv6 *)
1379 					 items->spec)->hdr.proto;
1380 				next_protocol &=
1381 					((const struct rte_flow_item_ipv6 *)
1382 					 items->mask)->hdr.proto;
1383 			} else {
1384 				/* Reset for inner layer. */
1385 				next_protocol = 0xff;
1386 			}
1387 			break;
1388 		case RTE_FLOW_ITEM_TYPE_UDP:
1389 			ret = mlx5_flow_validate_item_udp(items, item_flags,
1390 							  next_protocol,
1391 							  error);
1392 			const struct rte_flow_item_udp *spec = items->spec;
1393 			const struct rte_flow_item_udp *mask = items->mask;
1394 			if (!mask)
1395 				mask = &rte_flow_item_udp_mask;
1396 			if (spec != NULL)
1397 				udp_dport = rte_be_to_cpu_16
1398 						(spec->hdr.dst_port &
1399 						 mask->hdr.dst_port);
1400 
1401 			if (ret < 0)
1402 				return ret;
1403 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1404 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
1405 			break;
1406 		case RTE_FLOW_ITEM_TYPE_TCP:
1407 			ret = mlx5_flow_validate_item_tcp
1408 						(items, item_flags,
1409 						 next_protocol,
1410 						 &rte_flow_item_tcp_mask,
1411 						 error);
1412 			if (ret < 0)
1413 				return ret;
1414 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1415 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
1416 			break;
1417 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1418 			ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
1419 							    items, item_flags,
1420 							    is_root, error);
1421 			if (ret < 0)
1422 				return ret;
1423 			last_item = MLX5_FLOW_LAYER_VXLAN;
1424 			break;
1425 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1426 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
1427 								item_flags,
1428 								dev, error);
1429 			if (ret < 0)
1430 				return ret;
1431 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1432 			break;
1433 		case RTE_FLOW_ITEM_TYPE_GRE:
1434 			ret = mlx5_flow_validate_item_gre(items, item_flags,
1435 							  next_protocol, error);
1436 			if (ret < 0)
1437 				return ret;
1438 			last_item = MLX5_FLOW_LAYER_GRE;
1439 			break;
1440 		case RTE_FLOW_ITEM_TYPE_MPLS:
1441 			ret = mlx5_flow_validate_item_mpls(dev, items,
1442 							   item_flags,
1443 							   last_item, error);
1444 			if (ret < 0)
1445 				return ret;
1446 			last_item = MLX5_FLOW_LAYER_MPLS;
1447 			break;
1448 		case RTE_FLOW_ITEM_TYPE_ICMP:
1449 		case RTE_FLOW_ITEM_TYPE_ICMP6:
1450 			return rte_flow_error_set(error, ENOTSUP,
1451 						  RTE_FLOW_ERROR_TYPE_ITEM,
1452 						  NULL, "ICMP/ICMP6 "
1453 						  "item not supported");
1454 		default:
1455 			return rte_flow_error_set(error, ENOTSUP,
1456 						  RTE_FLOW_ERROR_TYPE_ITEM,
1457 						  NULL, "item not supported");
1458 		}
1459 		item_flags |= last_item;
1460 	}
1461 	if (is_empty_vlan)
1462 		return rte_flow_error_set(error, ENOTSUP,
1463 						 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1464 		    "VLAN matching without vid specification is not supported");
1465 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1466 		switch (actions->type) {
1467 		case RTE_FLOW_ACTION_TYPE_VOID:
1468 			break;
1469 		case RTE_FLOW_ACTION_TYPE_FLAG:
1470 			ret = mlx5_flow_validate_action_flag(action_flags,
1471 							     attr,
1472 							     error);
1473 			if (ret < 0)
1474 				return ret;
1475 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1476 			break;
1477 		case RTE_FLOW_ACTION_TYPE_MARK:
1478 			ret = mlx5_flow_validate_action_mark(actions,
1479 							     action_flags,
1480 							     attr,
1481 							     error);
1482 			if (ret < 0)
1483 				return ret;
1484 			action_flags |= MLX5_FLOW_ACTION_MARK;
1485 			break;
1486 		case RTE_FLOW_ACTION_TYPE_DROP:
1487 			ret = mlx5_flow_validate_action_drop(action_flags,
1488 							     attr,
1489 							     error);
1490 			if (ret < 0)
1491 				return ret;
1492 			action_flags |= MLX5_FLOW_ACTION_DROP;
1493 			break;
1494 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1495 			ret = mlx5_flow_validate_action_queue(actions,
1496 							      action_flags, dev,
1497 							      attr,
1498 							      error);
1499 			if (ret < 0)
1500 				return ret;
1501 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1502 			break;
1503 		case RTE_FLOW_ACTION_TYPE_RSS:
1504 			ret = mlx5_flow_validate_action_rss(actions,
1505 							    action_flags, dev,
1506 							    attr, item_flags,
1507 							    error);
1508 			if (ret < 0)
1509 				return ret;
1510 			action_flags |= MLX5_FLOW_ACTION_RSS;
1511 			break;
1512 		case RTE_FLOW_ACTION_TYPE_COUNT:
1513 			ret = mlx5_flow_validate_action_count(dev, attr, error);
1514 			if (ret < 0)
1515 				return ret;
1516 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1517 			break;
1518 		default:
1519 			return rte_flow_error_set(error, ENOTSUP,
1520 						  RTE_FLOW_ERROR_TYPE_ACTION,
1521 						  actions,
1522 						  "action not supported");
1523 		}
1524 	}
1525 	/*
1526 	 * Validate the drop action mutual exclusion with other actions.
1527 	 * Drop action is mutually-exclusive with any other action, except for
1528 	 * Count action.
1529 	 */
1530 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1531 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1532 		return rte_flow_error_set(error, EINVAL,
1533 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1534 					  "Drop action is mutually-exclusive "
1535 					  "with any other action, except for "
1536 					  "Count action");
1537 	if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1538 		return rte_flow_error_set(error, EINVAL,
1539 					  RTE_FLOW_ERROR_TYPE_ACTION, actions,
1540 					  "no fate action is found");
1541 	return 0;
1542 }
1543 
1544 /**
1545  * Calculate the required bytes that are needed for the action part of the verbs
1546  * flow.
1547  *
1548  * @param[in] actions
1549  *   Pointer to the list of actions.
1550  *
1551  * @return
1552  *   The size of the memory needed for all actions.
1553  */
1554 static int
1555 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1556 {
1557 	int size = 0;
1558 
1559 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1560 		switch (actions->type) {
1561 		case RTE_FLOW_ACTION_TYPE_VOID:
1562 			break;
1563 		case RTE_FLOW_ACTION_TYPE_FLAG:
1564 			size += sizeof(struct ibv_flow_spec_action_tag);
1565 			break;
1566 		case RTE_FLOW_ACTION_TYPE_MARK:
1567 			size += sizeof(struct ibv_flow_spec_action_tag);
1568 			break;
1569 		case RTE_FLOW_ACTION_TYPE_DROP:
1570 			size += sizeof(struct ibv_flow_spec_action_drop);
1571 			break;
1572 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1573 			break;
1574 		case RTE_FLOW_ACTION_TYPE_RSS:
1575 			break;
1576 		case RTE_FLOW_ACTION_TYPE_COUNT:
1577 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1578 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1579 			size += sizeof(struct ibv_flow_spec_counter_action);
1580 #endif
1581 			break;
1582 		default:
1583 			break;
1584 		}
1585 	}
1586 	return size;
1587 }
1588 
1589 /**
1590  * Calculate the required bytes that are needed for the item part of the verbs
1591  * flow.
1592  *
1593  * @param[in] items
1594  *   Pointer to the list of items.
1595  *
1596  * @return
1597  *   The size of the memory needed for all items.
1598  */
1599 static int
1600 flow_verbs_get_items_size(const struct rte_flow_item items[])
1601 {
1602 	int size = 0;
1603 
1604 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1605 		switch (items->type) {
1606 		case RTE_FLOW_ITEM_TYPE_VOID:
1607 			break;
1608 		case RTE_FLOW_ITEM_TYPE_ETH:
1609 			size += sizeof(struct ibv_flow_spec_eth);
1610 			break;
1611 		case RTE_FLOW_ITEM_TYPE_VLAN:
1612 			size += sizeof(struct ibv_flow_spec_eth);
1613 			break;
1614 		case RTE_FLOW_ITEM_TYPE_IPV4:
1615 			size += sizeof(struct ibv_flow_spec_ipv4_ext);
1616 			break;
1617 		case RTE_FLOW_ITEM_TYPE_IPV6:
1618 			size += sizeof(struct ibv_flow_spec_ipv6);
1619 			break;
1620 		case RTE_FLOW_ITEM_TYPE_UDP:
1621 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1622 			break;
1623 		case RTE_FLOW_ITEM_TYPE_TCP:
1624 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1625 			break;
1626 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1627 			size += sizeof(struct ibv_flow_spec_tunnel);
1628 			break;
1629 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1630 			size += sizeof(struct ibv_flow_spec_tunnel);
1631 			break;
1632 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1633 		case RTE_FLOW_ITEM_TYPE_GRE:
1634 			size += sizeof(struct ibv_flow_spec_gre);
1635 			break;
1636 		case RTE_FLOW_ITEM_TYPE_MPLS:
1637 			size += sizeof(struct ibv_flow_spec_mpls);
1638 			break;
1639 #else
1640 		case RTE_FLOW_ITEM_TYPE_GRE:
1641 			size += sizeof(struct ibv_flow_spec_tunnel);
1642 			break;
1643 #endif
1644 		default:
1645 			break;
1646 		}
1647 	}
1648 	return size;
1649 }
1650 
1651 /**
1652  * Internal preparation function. Allocate mlx5_flow with the required size.
1653  * The required size is calculate based on the actions and items. This function
1654  * also returns the detected actions and items for later use.
1655  *
1656  * @param[in] dev
1657  *   Pointer to Ethernet device.
1658  * @param[in] attr
1659  *   Pointer to the flow attributes.
1660  * @param[in] items
1661  *   Pointer to the list of items.
1662  * @param[in] actions
1663  *   Pointer to the list of actions.
1664  * @param[out] error
1665  *   Pointer to the error structure.
1666  *
1667  * @return
1668  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1669  *   is set.
1670  */
1671 static struct mlx5_flow *
1672 flow_verbs_prepare(struct rte_eth_dev *dev,
1673 		   const struct rte_flow_attr *attr __rte_unused,
1674 		   const struct rte_flow_item items[],
1675 		   const struct rte_flow_action actions[],
1676 		   struct rte_flow_error *error)
1677 {
1678 	size_t size = 0;
1679 	uint32_t handle_idx = 0;
1680 	struct mlx5_flow *dev_flow;
1681 	struct mlx5_flow_handle *dev_handle;
1682 	struct mlx5_priv *priv = dev->data->dev_private;
1683 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1684 
1685 	MLX5_ASSERT(wks);
1686 	size += flow_verbs_get_actions_size(actions);
1687 	size += flow_verbs_get_items_size(items);
1688 	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1689 		rte_flow_error_set(error, E2BIG,
1690 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1691 				   "Verbs spec/action size too large");
1692 		return NULL;
1693 	}
1694 	/* In case of corrupting the memory. */
1695 	if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1696 		rte_flow_error_set(error, ENOSPC,
1697 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1698 				   "not free temporary device flow");
1699 		return NULL;
1700 	}
1701 	dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1702 				   &handle_idx);
1703 	if (!dev_handle) {
1704 		rte_flow_error_set(error, ENOMEM,
1705 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1706 				   "not enough memory to create flow handle");
1707 		return NULL;
1708 	}
1709 	MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1710 	dev_flow = &wks->flows[wks->flow_idx++];
1711 	dev_flow->handle = dev_handle;
1712 	dev_flow->handle_idx = handle_idx;
1713 	/* Memcpy is used, only size needs to be cleared to 0. */
1714 	dev_flow->verbs.size = 0;
1715 	dev_flow->verbs.attr.num_of_specs = 0;
1716 	dev_flow->ingress = attr->ingress;
1717 	dev_flow->hash_fields = 0;
1718 	/* Need to set transfer attribute: not supported in Verbs mode. */
1719 	return dev_flow;
1720 }
1721 
1722 /**
1723  * Fill the flow with verb spec.
1724  *
1725  * @param[in] dev
1726  *   Pointer to Ethernet device.
1727  * @param[in, out] dev_flow
1728  *   Pointer to the mlx5 flow.
1729  * @param[in] attr
1730  *   Pointer to the flow attributes.
1731  * @param[in] items
1732  *   Pointer to the list of items.
1733  * @param[in] actions
1734  *   Pointer to the list of actions.
1735  * @param[out] error
1736  *   Pointer to the error structure.
1737  *
1738  * @return
1739  *   0 on success, else a negative errno value otherwise and rte_errno is set.
1740  */
1741 static int
1742 flow_verbs_translate(struct rte_eth_dev *dev,
1743 		     struct mlx5_flow *dev_flow,
1744 		     const struct rte_flow_attr *attr,
1745 		     const struct rte_flow_item items[],
1746 		     const struct rte_flow_action actions[],
1747 		     struct rte_flow_error *error)
1748 {
1749 	uint64_t item_flags = 0;
1750 	uint64_t action_flags = 0;
1751 	uint64_t priority = attr->priority;
1752 	uint32_t subpriority = 0;
1753 	struct mlx5_priv *priv = dev->data->dev_private;
1754 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1755 	struct mlx5_flow_rss_desc *rss_desc;
1756 	const struct rte_flow_item *tunnel_item = NULL;
1757 	uint8_t *gre_spec = NULL;
1758 
1759 	MLX5_ASSERT(wks);
1760 	rss_desc = &wks->rss_desc;
1761 	if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1762 		priority = priv->sh->flow_max_priority - 1;
1763 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1764 		int ret;
1765 
1766 		switch (actions->type) {
1767 		case RTE_FLOW_ACTION_TYPE_VOID:
1768 			break;
1769 		case RTE_FLOW_ACTION_TYPE_FLAG:
1770 			flow_verbs_translate_action_flag(dev_flow, actions);
1771 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1772 			wks->mark = 1;
1773 			break;
1774 		case RTE_FLOW_ACTION_TYPE_MARK:
1775 			flow_verbs_translate_action_mark(dev_flow, actions);
1776 			action_flags |= MLX5_FLOW_ACTION_MARK;
1777 			wks->mark = 1;
1778 			break;
1779 		case RTE_FLOW_ACTION_TYPE_DROP:
1780 			flow_verbs_translate_action_drop(dev_flow, actions);
1781 			action_flags |= MLX5_FLOW_ACTION_DROP;
1782 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1783 			break;
1784 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1785 			flow_verbs_translate_action_queue(rss_desc, actions);
1786 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1787 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1788 			break;
1789 		case RTE_FLOW_ACTION_TYPE_RSS:
1790 			flow_verbs_translate_action_rss(rss_desc, actions);
1791 			action_flags |= MLX5_FLOW_ACTION_RSS;
1792 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1793 			break;
1794 		case RTE_FLOW_ACTION_TYPE_COUNT:
1795 			ret = flow_verbs_translate_action_count(dev_flow,
1796 								actions,
1797 								dev, error);
1798 			if (ret < 0)
1799 				return ret;
1800 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1801 			break;
1802 		default:
1803 			return rte_flow_error_set(error, ENOTSUP,
1804 						  RTE_FLOW_ERROR_TYPE_ACTION,
1805 						  actions,
1806 						  "action not supported");
1807 		}
1808 	}
1809 	dev_flow->act_flags = action_flags;
1810 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1811 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1812 
1813 		switch (items->type) {
1814 		case RTE_FLOW_ITEM_TYPE_VOID:
1815 			break;
1816 		case RTE_FLOW_ITEM_TYPE_ETH:
1817 			flow_verbs_translate_item_eth(dev_flow, items,
1818 						      item_flags);
1819 			subpriority = MLX5_PRIORITY_MAP_L2;
1820 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1821 					       MLX5_FLOW_LAYER_OUTER_L2;
1822 			break;
1823 		case RTE_FLOW_ITEM_TYPE_VLAN:
1824 			flow_verbs_translate_item_vlan(dev_flow, items,
1825 						       item_flags);
1826 			subpriority = MLX5_PRIORITY_MAP_L2;
1827 			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1828 						MLX5_FLOW_LAYER_INNER_VLAN) :
1829 					       (MLX5_FLOW_LAYER_OUTER_L2 |
1830 						MLX5_FLOW_LAYER_OUTER_VLAN);
1831 			break;
1832 		case RTE_FLOW_ITEM_TYPE_IPV4:
1833 			flow_verbs_translate_item_ipv4(dev_flow, items,
1834 						       item_flags);
1835 			subpriority = MLX5_PRIORITY_MAP_L3;
1836 			dev_flow->hash_fields |=
1837 				mlx5_flow_hashfields_adjust
1838 					(rss_desc, tunnel,
1839 					 MLX5_IPV4_LAYER_TYPES,
1840 					 MLX5_IPV4_IBV_RX_HASH);
1841 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1842 					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1843 			break;
1844 		case RTE_FLOW_ITEM_TYPE_IPV6:
1845 			flow_verbs_translate_item_ipv6(dev_flow, items,
1846 						       item_flags);
1847 			subpriority = MLX5_PRIORITY_MAP_L3;
1848 			dev_flow->hash_fields |=
1849 				mlx5_flow_hashfields_adjust
1850 					(rss_desc, tunnel,
1851 					 MLX5_IPV6_LAYER_TYPES,
1852 					 MLX5_IPV6_IBV_RX_HASH);
1853 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1854 					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1855 			break;
1856 		case RTE_FLOW_ITEM_TYPE_TCP:
1857 			flow_verbs_translate_item_tcp(dev_flow, items,
1858 						      item_flags);
1859 			subpriority = MLX5_PRIORITY_MAP_L4;
1860 			if (dev_flow->hash_fields != 0)
1861 				dev_flow->hash_fields |=
1862 					mlx5_flow_hashfields_adjust
1863 					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
1864 					 (IBV_RX_HASH_SRC_PORT_TCP |
1865 					  IBV_RX_HASH_DST_PORT_TCP));
1866 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1867 					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
1868 			break;
1869 		case RTE_FLOW_ITEM_TYPE_UDP:
1870 			flow_verbs_translate_item_udp(dev_flow, items,
1871 						      item_flags);
1872 			subpriority = MLX5_PRIORITY_MAP_L4;
1873 			if (dev_flow->hash_fields != 0)
1874 				dev_flow->hash_fields |=
1875 					mlx5_flow_hashfields_adjust
1876 					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
1877 					 (IBV_RX_HASH_SRC_PORT_UDP |
1878 					  IBV_RX_HASH_DST_PORT_UDP));
1879 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1880 					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
1881 			break;
1882 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1883 			flow_verbs_translate_item_vxlan(dev_flow, items,
1884 							item_flags);
1885 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1886 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
1887 			break;
1888 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1889 			flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1890 							    item_flags);
1891 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1892 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1893 			break;
1894 		case RTE_FLOW_ITEM_TYPE_GRE:
1895 			gre_spec = flow_verbs_reserve_gre(dev_flow);
1896 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1897 			item_flags |= MLX5_FLOW_LAYER_GRE;
1898 			tunnel_item = items;
1899 			break;
1900 		case RTE_FLOW_ITEM_TYPE_MPLS:
1901 			flow_verbs_translate_item_mpls(dev_flow, items,
1902 						       item_flags);
1903 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1904 			item_flags |= MLX5_FLOW_LAYER_MPLS;
1905 			break;
1906 		default:
1907 			return rte_flow_error_set(error, ENOTSUP,
1908 						  RTE_FLOW_ERROR_TYPE_ITEM,
1909 						  NULL, "item not supported");
1910 		}
1911 	}
1912 	if (item_flags & MLX5_FLOW_LAYER_GRE)
1913 		flow_verbs_translate_item_gre(dev_flow, gre_spec,
1914 					      tunnel_item, item_flags);
1915 	dev_flow->handle->layers = item_flags;
1916 	/* Other members of attr will be ignored. */
1917 	dev_flow->verbs.attr.priority =
1918 		mlx5_flow_adjust_priority(dev, priority, subpriority);
1919 	dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1920 	return 0;
1921 }
1922 
1923 /**
1924  * Remove the flow from the NIC but keeps it in memory.
1925  *
1926  * @param[in] dev
1927  *   Pointer to the Ethernet device structure.
1928  * @param[in, out] flow
1929  *   Pointer to flow structure.
1930  */
1931 static void
1932 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1933 {
1934 	struct mlx5_priv *priv = dev->data->dev_private;
1935 	struct mlx5_flow_handle *handle;
1936 	uint32_t handle_idx;
1937 
1938 	if (!flow)
1939 		return;
1940 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1941 		       handle_idx, handle, next) {
1942 		if (handle->drv_flow) {
1943 			claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1944 			handle->drv_flow = NULL;
1945 		}
1946 		/* hrxq is union, don't touch it only the flag is set. */
1947 		if (handle->rix_hrxq &&
1948 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1949 			mlx5_hrxq_release(dev, handle->rix_hrxq);
1950 			handle->rix_hrxq = 0;
1951 		}
1952 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
1953 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1954 	}
1955 }
1956 
1957 /**
1958  * Remove the flow from the NIC and the memory.
1959  *
1960  * @param[in] dev
1961  *   Pointer to the Ethernet device structure.
1962  * @param[in, out] flow
1963  *   Pointer to flow structure.
1964  */
1965 static void
1966 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1967 {
1968 	struct mlx5_priv *priv = dev->data->dev_private;
1969 	struct mlx5_flow_handle *handle;
1970 
1971 	if (!flow)
1972 		return;
1973 	flow_verbs_remove(dev, flow);
1974 	while (flow->dev_handles) {
1975 		uint32_t tmp_idx = flow->dev_handles;
1976 
1977 		handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1978 				   tmp_idx);
1979 		if (!handle)
1980 			return;
1981 		flow->dev_handles = handle->next.next;
1982 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1983 			   tmp_idx);
1984 	}
1985 	if (flow->counter) {
1986 		flow_verbs_counter_release(dev, flow->counter);
1987 		flow->counter = 0;
1988 	}
1989 }
1990 
1991 /**
1992  * Apply the flow to the NIC.
1993  *
1994  * @param[in] dev
1995  *   Pointer to the Ethernet device structure.
1996  * @param[in, out] flow
1997  *   Pointer to flow structure.
1998  * @param[out] error
1999  *   Pointer to error structure.
2000  *
2001  * @return
2002  *   0 on success, a negative errno value otherwise and rte_errno is set.
2003  */
2004 static int
2005 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2006 		 struct rte_flow_error *error)
2007 {
2008 	struct mlx5_priv *priv = dev->data->dev_private;
2009 	struct mlx5_flow_handle *handle;
2010 	struct mlx5_flow *dev_flow;
2011 	struct mlx5_hrxq *hrxq;
2012 	uint32_t dev_handles;
2013 	int err;
2014 	int idx;
2015 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
2016 
2017 	MLX5_ASSERT(wks);
2018 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
2019 		dev_flow = &wks->flows[idx];
2020 		handle = dev_flow->handle;
2021 		if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
2022 			MLX5_ASSERT(priv->drop_queue.hrxq);
2023 			hrxq = priv->drop_queue.hrxq;
2024 		} else {
2025 			struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
2026 
2027 			MLX5_ASSERT(rss_desc->queue_num);
2028 			rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
2029 			rss_desc->hash_fields = dev_flow->hash_fields;
2030 			rss_desc->tunnel = !!(handle->layers &
2031 					      MLX5_FLOW_LAYER_TUNNEL);
2032 			rss_desc->shared_rss = 0;
2033 			hrxq = mlx5_hrxq_get(dev, rss_desc);
2034 			if (!hrxq) {
2035 				rte_flow_error_set
2036 					(error, rte_errno,
2037 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2038 					 "cannot get hash queue");
2039 				goto error;
2040 			}
2041 			handle->rix_hrxq = hrxq->idx;
2042 		}
2043 		MLX5_ASSERT(hrxq);
2044 		handle->drv_flow = mlx5_glue->create_flow
2045 					(hrxq->qp, &dev_flow->verbs.attr);
2046 		if (!handle->drv_flow) {
2047 			rte_flow_error_set(error, errno,
2048 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2049 					   NULL,
2050 					   "hardware refuses to create flow");
2051 			goto error;
2052 		}
2053 		if (priv->vmwa_context &&
2054 		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
2055 			/*
2056 			 * The rule contains the VLAN pattern.
2057 			 * For VF we are going to create VLAN
2058 			 * interface to make hypervisor set correct
2059 			 * e-Switch vport context.
2060 			 */
2061 			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2062 		}
2063 	}
2064 	return 0;
2065 error:
2066 	err = rte_errno; /* Save rte_errno before cleanup. */
2067 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2068 		       dev_handles, handle, next) {
2069 		/* hrxq is union, don't touch it only the flag is set. */
2070 		if (handle->rix_hrxq &&
2071 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2072 			mlx5_hrxq_release(dev, handle->rix_hrxq);
2073 			handle->rix_hrxq = 0;
2074 		}
2075 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
2076 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2077 	}
2078 	rte_errno = err; /* Restore rte_errno. */
2079 	return -rte_errno;
2080 }
2081 
2082 /**
2083  * Query a flow.
2084  *
2085  * @see rte_flow_query()
2086  * @see rte_flow_ops
2087  */
2088 static int
2089 flow_verbs_query(struct rte_eth_dev *dev,
2090 		 struct rte_flow *flow,
2091 		 const struct rte_flow_action *actions,
2092 		 void *data,
2093 		 struct rte_flow_error *error)
2094 {
2095 	int ret = -EINVAL;
2096 
2097 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2098 		switch (actions->type) {
2099 		case RTE_FLOW_ACTION_TYPE_VOID:
2100 			break;
2101 		case RTE_FLOW_ACTION_TYPE_COUNT:
2102 			ret = flow_verbs_counter_query(dev, flow, data, error);
2103 			break;
2104 		default:
2105 			return rte_flow_error_set(error, ENOTSUP,
2106 						  RTE_FLOW_ERROR_TYPE_ACTION,
2107 						  actions,
2108 						  "action not supported");
2109 		}
2110 	}
2111 	return ret;
2112 }
2113 
2114 static int
2115 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2116 		       uint32_t flags)
2117 {
2118 	RTE_SET_USED(dev);
2119 	RTE_SET_USED(domains);
2120 	RTE_SET_USED(flags);
2121 
2122 	return 0;
2123 }
2124 
2125 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2126 	.validate = flow_verbs_validate,
2127 	.prepare = flow_verbs_prepare,
2128 	.translate = flow_verbs_translate,
2129 	.apply = flow_verbs_apply,
2130 	.remove = flow_verbs_remove,
2131 	.destroy = flow_verbs_destroy,
2132 	.query = flow_verbs_query,
2133 	.sync_domain = flow_verbs_sync_domain,
2134 	.discover_priorities = flow_verbs_discover_priorities,
2135 };
2136