xref: /dpdk/drivers/net/mlx5/mlx5_flow_verbs.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10 
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_ip.h>
18 
19 #include <mlx5_glue.h>
20 #include <mlx5_prm.h>
21 #include <mlx5_malloc.h>
22 
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rx.h"
27 
28 #define VERBS_SPEC_INNER(item_flags) \
29 	(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
30 
31 /* Verbs specification header. */
32 struct ibv_spec_header {
33 	enum ibv_flow_spec_type type;
34 	uint16_t size;
35 };
36 
37 /**
38  * Discover the maximum number of priority available.
39  *
40  * @param[in] dev
41  *   Pointer to the Ethernet device structure.
42  * @param[in] vprio
43  *   Expected result variants.
44  * @param[in] vprio_n
45  *   Number of entries in @p vprio array.
46  * @return
47  *   Number of supported flow priority on success, a negative errno
48  *   value otherwise and rte_errno is set.
49  */
50 static int
51 flow_verbs_discover_priorities(struct rte_eth_dev *dev,
52 			       const uint16_t *vprio, int vprio_n)
53 {
54 	struct mlx5_priv *priv = dev->data->dev_private;
55 	struct {
56 		struct ibv_flow_attr attr;
57 		struct ibv_flow_spec_eth eth;
58 		struct ibv_flow_spec_action_drop drop;
59 	} flow_attr = {
60 		.attr = {
61 			.num_of_specs = 2,
62 			.port = (uint8_t)priv->dev_port,
63 		},
64 		.eth = {
65 			.type = IBV_FLOW_SPEC_ETH,
66 			.size = sizeof(struct ibv_flow_spec_eth),
67 		},
68 		.drop = {
69 			.size = sizeof(struct ibv_flow_spec_action_drop),
70 			.type = IBV_FLOW_SPEC_ACTION_DROP,
71 		},
72 	};
73 	struct ibv_flow *flow;
74 	struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
75 	int i;
76 	int priority = 0;
77 
78 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
79 	/* If DevX supported, driver must support 16 verbs flow priorities. */
80 	priority = 16;
81 	goto out;
82 #endif
83 	if (!drop->qp) {
84 		rte_errno = ENOTSUP;
85 		return -rte_errno;
86 	}
87 	for (i = 0; i != vprio_n; i++) {
88 		flow_attr.attr.priority = vprio[i] - 1;
89 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
90 		if (!flow)
91 			break;
92 		claim_zero(mlx5_glue->destroy_flow(flow));
93 		priority = vprio[i];
94 	}
95 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
96 out:
97 #endif
98 	DRV_LOG(INFO, "port %u supported flow priorities:"
99 		" 0-%d for ingress or egress root table,"
100 		" 0-%d for non-root table or transfer root table.",
101 		dev->data->port_id, priority - 2,
102 		MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
103 	return priority;
104 }
105 
106 /**
107  * Get Verbs flow counter by index.
108  *
109  * @param[in] dev
110  *   Pointer to the Ethernet device structure.
111  * @param[in] idx
112  *   mlx5 flow counter index in the container.
113  * @param[out] ppool
114  *   mlx5 flow counter pool in the container,
115  *
116  * @return
117  *   A pointer to the counter, NULL otherwise.
118  */
119 static struct mlx5_flow_counter *
120 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
121 			      uint32_t idx,
122 			      struct mlx5_flow_counter_pool **ppool)
123 {
124 	struct mlx5_priv *priv = dev->data->dev_private;
125 	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
126 	struct mlx5_flow_counter_pool *pool;
127 
128 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
129 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
130 	MLX5_ASSERT(pool);
131 	if (ppool)
132 		*ppool = pool;
133 	return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
134 }
135 
136 /**
137  * Create Verbs flow counter with Verbs library.
138  *
139  * @param[in] dev
140  *   Pointer to the Ethernet device structure.
141  * @param[in, out] counter
142  *   mlx5 flow counter object, contains the counter id,
143  *   handle of created Verbs flow counter is returned
144  *   in cs field (if counters are supported).
145  *
146  * @return
147  *   0 On success else a negative errno value is returned
148  *   and rte_errno is set.
149  */
150 static int
151 flow_verbs_counter_create(struct rte_eth_dev *dev,
152 			  struct mlx5_flow_counter *counter)
153 {
154 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
155 	struct mlx5_priv *priv = dev->data->dev_private;
156 	struct ibv_context *ctx = priv->sh->cdev->ctx;
157 	struct ibv_counter_set_init_attr init = {
158 			 .counter_set_id = counter->shared_info.id};
159 
160 	counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
161 	if (!counter->dcs_when_free) {
162 		rte_errno = ENOTSUP;
163 		return -ENOTSUP;
164 	}
165 	return 0;
166 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
167 	struct mlx5_priv *priv = dev->data->dev_private;
168 	struct ibv_context *ctx = priv->sh->cdev->ctx;
169 	struct ibv_counters_init_attr init = {0};
170 	struct ibv_counter_attach_attr attach;
171 	int ret;
172 
173 	memset(&attach, 0, sizeof(attach));
174 	counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
175 	if (!counter->dcs_when_free) {
176 		rte_errno = ENOTSUP;
177 		return -ENOTSUP;
178 	}
179 	attach.counter_desc = IBV_COUNTER_PACKETS;
180 	attach.index = 0;
181 	ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
182 	if (!ret) {
183 		attach.counter_desc = IBV_COUNTER_BYTES;
184 		attach.index = 1;
185 		ret = mlx5_glue->attach_counters
186 					(counter->dcs_when_free, &attach, NULL);
187 	}
188 	if (ret) {
189 		claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
190 		counter->dcs_when_free = NULL;
191 		rte_errno = ret;
192 		return -ret;
193 	}
194 	return 0;
195 #else
196 	(void)dev;
197 	(void)counter;
198 	rte_errno = ENOTSUP;
199 	return -ENOTSUP;
200 #endif
201 }
202 
203 /**
204  * Get a flow counter.
205  *
206  * @param[in] dev
207  *   Pointer to the Ethernet device structure.
208  * @param[in] id
209  *   Counter identifier.
210  *
211  * @return
212  *   Index to the counter, 0 otherwise and rte_errno is set.
213  */
214 static uint32_t
215 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
216 {
217 	struct mlx5_priv *priv = dev->data->dev_private;
218 	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
219 	struct mlx5_flow_counter_pool *pool = NULL;
220 	struct mlx5_flow_counter *cnt = NULL;
221 	uint32_t n_valid = cmng->n_valid;
222 	uint32_t pool_idx, cnt_idx;
223 	uint32_t i;
224 	int ret;
225 
226 	for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
227 		pool = cmng->pools[pool_idx];
228 		if (!pool)
229 			continue;
230 		cnt = TAILQ_FIRST(&pool->counters[0]);
231 		if (cnt)
232 			break;
233 	}
234 	if (!cnt) {
235 		struct mlx5_flow_counter_pool **pools;
236 		uint32_t size;
237 
238 		if (n_valid == cmng->n) {
239 			/* Resize the container pool array. */
240 			size = sizeof(struct mlx5_flow_counter_pool *) *
241 				     (n_valid + MLX5_CNT_CONTAINER_RESIZE);
242 			pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
243 					    SOCKET_ID_ANY);
244 			if (!pools)
245 				return 0;
246 			if (n_valid) {
247 				memcpy(pools, cmng->pools,
248 				       sizeof(struct mlx5_flow_counter_pool *) *
249 				       n_valid);
250 				mlx5_free(cmng->pools);
251 			}
252 			cmng->pools = pools;
253 			cmng->n += MLX5_CNT_CONTAINER_RESIZE;
254 		}
255 		/* Allocate memory for new pool*/
256 		size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
257 		pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
258 		if (!pool)
259 			return 0;
260 		for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
261 			cnt = MLX5_POOL_GET_CNT(pool, i);
262 			TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
263 		}
264 		cnt = MLX5_POOL_GET_CNT(pool, 0);
265 		cmng->pools[n_valid] = pool;
266 		pool_idx = n_valid;
267 		cmng->n_valid++;
268 	}
269 	TAILQ_REMOVE(&pool->counters[0], cnt, next);
270 	i = MLX5_CNT_ARRAY_IDX(pool, cnt);
271 	cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
272 	/* Create counter with Verbs. */
273 	ret = flow_verbs_counter_create(dev, cnt);
274 	if (!ret) {
275 		cnt->dcs_when_active = cnt->dcs_when_free;
276 		cnt->hits = 0;
277 		cnt->bytes = 0;
278 		return cnt_idx;
279 	}
280 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
281 	/* Some error occurred in Verbs library. */
282 	rte_errno = -ret;
283 	return 0;
284 }
285 
286 /**
287  * Release a flow counter.
288  *
289  * @param[in] dev
290  *   Pointer to the Ethernet device structure.
291  * @param[in] counter
292  *   Index to the counter handler.
293  */
294 static void
295 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
296 {
297 	struct mlx5_flow_counter_pool *pool;
298 	struct mlx5_flow_counter *cnt;
299 
300 	cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
301 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
302 	claim_zero(mlx5_glue->destroy_counter_set
303 			((struct ibv_counter_set *)cnt->dcs_when_active));
304 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
305 	claim_zero(mlx5_glue->destroy_counters
306 				((struct ibv_counters *)cnt->dcs_when_active));
307 #endif
308 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
309 }
310 
311 /**
312  * Query a flow counter via Verbs library call.
313  *
314  * @see rte_flow_query()
315  * @see rte_flow_ops
316  */
317 static int
318 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
319 			 struct rte_flow *flow, void *data,
320 			 struct rte_flow_error *error)
321 {
322 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
323 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
324 	if (flow->counter) {
325 		struct mlx5_flow_counter_pool *pool;
326 		struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
327 						(dev, flow->counter, &pool);
328 		struct rte_flow_query_count *qc = data;
329 		uint64_t counters[2] = {0, 0};
330 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
331 		struct ibv_query_counter_set_attr query_cs_attr = {
332 			.dcs_when_free = (struct ibv_counter_set *)
333 						cnt->dcs_when_active,
334 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
335 		};
336 		struct ibv_counter_set_data query_out = {
337 			.out = counters,
338 			.outlen = 2 * sizeof(uint64_t),
339 		};
340 		int err = mlx5_glue->query_counter_set(&query_cs_attr,
341 						       &query_out);
342 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
343 		int err = mlx5_glue->query_counters
344 			((struct ibv_counters *)cnt->dcs_when_active, counters,
345 				RTE_DIM(counters),
346 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
347 #endif
348 		if (err)
349 			return rte_flow_error_set
350 				(error, err,
351 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
352 				 NULL,
353 				 "cannot read counter");
354 		qc->hits_set = 1;
355 		qc->bytes_set = 1;
356 		qc->hits = counters[0] - cnt->hits;
357 		qc->bytes = counters[1] - cnt->bytes;
358 		if (qc->reset) {
359 			cnt->hits = counters[0];
360 			cnt->bytes = counters[1];
361 		}
362 		return 0;
363 	}
364 	return rte_flow_error_set(error, EINVAL,
365 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
366 				  NULL,
367 				  "flow does not have counter");
368 #else
369 	(void)flow;
370 	(void)data;
371 	return rte_flow_error_set(error, ENOTSUP,
372 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
373 				  NULL,
374 				  "counters are not available");
375 #endif
376 }
377 
378 /**
379  * Add a verbs item specification into @p verbs.
380  *
381  * @param[out] verbs
382  *   Pointer to verbs structure.
383  * @param[in] src
384  *   Create specification.
385  * @param[in] size
386  *   Size in bytes of the specification to copy.
387  */
388 static void
389 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
390 		    void *src, unsigned int size)
391 {
392 	void *dst;
393 
394 	if (!verbs)
395 		return;
396 	MLX5_ASSERT(verbs->specs);
397 	dst = (void *)(verbs->specs + verbs->size);
398 	memcpy(dst, src, size);
399 	++verbs->attr.num_of_specs;
400 	verbs->size += size;
401 }
402 
403 /**
404  * Convert the @p item into a Verbs specification. This function assumes that
405  * the input is valid and that there is space to insert the requested item
406  * into the flow.
407  *
408  * @param[in, out] dev_flow
409  *   Pointer to dev_flow structure.
410  * @param[in] item
411  *   Item specification.
412  * @param[in] item_flags
413  *   Parsed item flags.
414  */
415 static void
416 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
417 			      const struct rte_flow_item *item,
418 			      uint64_t item_flags)
419 {
420 	const struct rte_flow_item_eth *spec = item->spec;
421 	const struct rte_flow_item_eth *mask = item->mask;
422 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
423 	struct ibv_flow_spec_eth eth = {
424 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
425 		.size = size,
426 	};
427 
428 	if (!mask)
429 		mask = &rte_flow_item_eth_mask;
430 	if (spec) {
431 		unsigned int i;
432 
433 		memcpy(&eth.val.dst_mac, spec->dst.addr_bytes,
434 			RTE_ETHER_ADDR_LEN);
435 		memcpy(&eth.val.src_mac, spec->src.addr_bytes,
436 			RTE_ETHER_ADDR_LEN);
437 		eth.val.ether_type = spec->type;
438 		memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes,
439 			RTE_ETHER_ADDR_LEN);
440 		memcpy(&eth.mask.src_mac, mask->src.addr_bytes,
441 			RTE_ETHER_ADDR_LEN);
442 		eth.mask.ether_type = mask->type;
443 		/* Remove unwanted bits from values. */
444 		for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
445 			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
446 			eth.val.src_mac[i] &= eth.mask.src_mac[i];
447 		}
448 		eth.val.ether_type &= eth.mask.ether_type;
449 	}
450 	flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
451 }
452 
453 /**
454  * Update the VLAN tag in the Verbs Ethernet specification.
455  * This function assumes that the input is valid and there is space to add
456  * the requested item.
457  *
458  * @param[in, out] attr
459  *   Pointer to Verbs attributes structure.
460  * @param[in] eth
461  *   Verbs structure containing the VLAN information to copy.
462  */
463 static void
464 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
465 			    struct ibv_flow_spec_eth *eth)
466 {
467 	unsigned int i;
468 	const enum ibv_flow_spec_type search = eth->type;
469 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
470 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
471 
472 	for (i = 0; i != attr->num_of_specs; ++i) {
473 		if (hdr->type == search) {
474 			struct ibv_flow_spec_eth *e =
475 				(struct ibv_flow_spec_eth *)hdr;
476 
477 			e->val.vlan_tag = eth->val.vlan_tag;
478 			e->mask.vlan_tag = eth->mask.vlan_tag;
479 			e->val.ether_type = eth->val.ether_type;
480 			e->mask.ether_type = eth->mask.ether_type;
481 			break;
482 		}
483 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
484 	}
485 }
486 
487 /**
488  * Convert the @p item into a Verbs specification. This function assumes that
489  * the input is valid and that there is space to insert the requested item
490  * into the flow.
491  *
492  * @param[in, out] dev_flow
493  *   Pointer to dev_flow structure.
494  * @param[in] item
495  *   Item specification.
496  * @param[in] item_flags
497  *   Parsed item flags.
498  */
499 static void
500 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
501 			       const struct rte_flow_item *item,
502 			       uint64_t item_flags)
503 {
504 	const struct rte_flow_item_vlan *spec = item->spec;
505 	const struct rte_flow_item_vlan *mask = item->mask;
506 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
507 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
508 	struct ibv_flow_spec_eth eth = {
509 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
510 		.size = size,
511 	};
512 	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
513 				      MLX5_FLOW_LAYER_OUTER_L2;
514 
515 	if (!mask)
516 		mask = &rte_flow_item_vlan_mask;
517 	if (spec) {
518 		eth.val.vlan_tag = spec->tci;
519 		eth.mask.vlan_tag = mask->tci;
520 		eth.val.vlan_tag &= eth.mask.vlan_tag;
521 		eth.val.ether_type = spec->inner_type;
522 		eth.mask.ether_type = mask->inner_type;
523 		eth.val.ether_type &= eth.mask.ether_type;
524 	}
525 	if (!(item_flags & l2m))
526 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
527 	else
528 		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
529 	if (!tunnel)
530 		dev_flow->handle->vf_vlan.tag =
531 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
532 }
533 
534 /**
535  * Convert the @p item into a Verbs specification. This function assumes that
536  * the input is valid and that there is space to insert the requested item
537  * into the flow.
538  *
539  * @param[in, out] dev_flow
540  *   Pointer to dev_flow structure.
541  * @param[in] item
542  *   Item specification.
543  * @param[in] item_flags
544  *   Parsed item flags.
545  */
546 static void
547 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
548 			       const struct rte_flow_item *item,
549 			       uint64_t item_flags)
550 {
551 	const struct rte_flow_item_ipv4 *spec = item->spec;
552 	const struct rte_flow_item_ipv4 *mask = item->mask;
553 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
554 	struct ibv_flow_spec_ipv4_ext ipv4 = {
555 		.type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
556 		.size = size,
557 	};
558 
559 	if (!mask)
560 		mask = &rte_flow_item_ipv4_mask;
561 	if (spec) {
562 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
563 			.src_ip = spec->hdr.src_addr,
564 			.dst_ip = spec->hdr.dst_addr,
565 			.proto = spec->hdr.next_proto_id,
566 			.tos = spec->hdr.type_of_service,
567 		};
568 		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
569 			.src_ip = mask->hdr.src_addr,
570 			.dst_ip = mask->hdr.dst_addr,
571 			.proto = mask->hdr.next_proto_id,
572 			.tos = mask->hdr.type_of_service,
573 		};
574 		/* Remove unwanted bits from values. */
575 		ipv4.val.src_ip &= ipv4.mask.src_ip;
576 		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
577 		ipv4.val.proto &= ipv4.mask.proto;
578 		ipv4.val.tos &= ipv4.mask.tos;
579 	}
580 	flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
581 }
582 
583 /**
584  * Convert the @p item into a Verbs specification. This function assumes that
585  * the input is valid and that there is space to insert the requested item
586  * into the flow.
587  *
588  * @param[in, out] dev_flow
589  *   Pointer to dev_flow structure.
590  * @param[in] item
591  *   Item specification.
592  * @param[in] item_flags
593  *   Parsed item flags.
594  */
595 static void
596 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
597 			       const struct rte_flow_item *item,
598 			       uint64_t item_flags)
599 {
600 	const struct rte_flow_item_ipv6 *spec = item->spec;
601 	const struct rte_flow_item_ipv6 *mask = item->mask;
602 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
603 	struct ibv_flow_spec_ipv6 ipv6 = {
604 		.type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
605 		.size = size,
606 	};
607 
608 	if (!mask)
609 		mask = &rte_flow_item_ipv6_mask;
610 	if (spec) {
611 		unsigned int i;
612 		uint32_t vtc_flow_val;
613 		uint32_t vtc_flow_mask;
614 
615 		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
616 		       RTE_DIM(ipv6.val.src_ip));
617 		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
618 		       RTE_DIM(ipv6.val.dst_ip));
619 		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
620 		       RTE_DIM(ipv6.mask.src_ip));
621 		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
622 		       RTE_DIM(ipv6.mask.dst_ip));
623 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
624 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
625 		ipv6.val.flow_label =
626 			rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
627 					 RTE_IPV6_HDR_FL_SHIFT);
628 		ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
629 					 RTE_IPV6_HDR_TC_SHIFT;
630 		ipv6.val.next_hdr = spec->hdr.proto;
631 		ipv6.mask.flow_label =
632 			rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
633 					 RTE_IPV6_HDR_FL_SHIFT);
634 		ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
635 					  RTE_IPV6_HDR_TC_SHIFT;
636 		ipv6.mask.next_hdr = mask->hdr.proto;
637 		/* Remove unwanted bits from values. */
638 		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
639 			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
640 			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
641 		}
642 		ipv6.val.flow_label &= ipv6.mask.flow_label;
643 		ipv6.val.traffic_class &= ipv6.mask.traffic_class;
644 		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
645 	}
646 	flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
647 }
648 
649 /**
650  * Convert the @p item into a Verbs specification. This function assumes that
651  * the input is valid and that there is space to insert the requested item
652  * into the flow.
653  *
654  * @param[in, out] dev_flow
655  *   Pointer to dev_flow structure.
656  * @param[in] item
657  *   Item specification.
658  * @param[in] item_flags
659  *   Parsed item flags.
660  */
661 static void
662 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
663 			      const struct rte_flow_item *item,
664 			      uint64_t item_flags __rte_unused)
665 {
666 	const struct rte_flow_item_tcp *spec = item->spec;
667 	const struct rte_flow_item_tcp *mask = item->mask;
668 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
669 	struct ibv_flow_spec_tcp_udp tcp = {
670 		.type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
671 		.size = size,
672 	};
673 
674 	if (!mask)
675 		mask = &rte_flow_item_tcp_mask;
676 	if (spec) {
677 		tcp.val.dst_port = spec->hdr.dst_port;
678 		tcp.val.src_port = spec->hdr.src_port;
679 		tcp.mask.dst_port = mask->hdr.dst_port;
680 		tcp.mask.src_port = mask->hdr.src_port;
681 		/* Remove unwanted bits from values. */
682 		tcp.val.src_port &= tcp.mask.src_port;
683 		tcp.val.dst_port &= tcp.mask.dst_port;
684 	}
685 	flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
686 }
687 
688 /**
689  * Convert the @p item into a Verbs specification. This function assumes that
690  * the input is valid and that there is space to insert the requested item
691  * into the flow.
692  *
693  * @param[in, out] dev_flow
694  *   Pointer to dev_flow structure.
695  * @param[in] item
696  *   Item specification.
697  * @param[in] item_flags
698  *   Parsed item flags.
699  */
700 static void
701 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
702 			      const struct rte_flow_item *item,
703 			      uint64_t item_flags __rte_unused)
704 {
705 	const struct rte_flow_item_udp *spec = item->spec;
706 	const struct rte_flow_item_udp *mask = item->mask;
707 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
708 	struct ibv_flow_spec_tcp_udp udp = {
709 		.type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
710 		.size = size,
711 	};
712 
713 	if (!mask)
714 		mask = &rte_flow_item_udp_mask;
715 	if (spec) {
716 		udp.val.dst_port = spec->hdr.dst_port;
717 		udp.val.src_port = spec->hdr.src_port;
718 		udp.mask.dst_port = mask->hdr.dst_port;
719 		udp.mask.src_port = mask->hdr.src_port;
720 		/* Remove unwanted bits from values. */
721 		udp.val.src_port &= udp.mask.src_port;
722 		udp.val.dst_port &= udp.mask.dst_port;
723 	}
724 	item++;
725 	while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
726 		item++;
727 	if (!(udp.val.dst_port & udp.mask.dst_port)) {
728 		switch ((item)->type) {
729 		case RTE_FLOW_ITEM_TYPE_VXLAN:
730 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
731 			udp.mask.dst_port = 0xffff;
732 			break;
733 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
734 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
735 			udp.mask.dst_port = 0xffff;
736 			break;
737 		case RTE_FLOW_ITEM_TYPE_MPLS:
738 			udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
739 			udp.mask.dst_port = 0xffff;
740 			break;
741 		default:
742 			break;
743 		}
744 	}
745 
746 	flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
747 }
748 
749 /**
750  * Convert the @p item into a Verbs specification. This function assumes that
751  * the input is valid and that there is space to insert the requested item
752  * into the flow.
753  *
754  * @param[in, out] dev_flow
755  *   Pointer to dev_flow structure.
756  * @param[in] item
757  *   Item specification.
758  * @param[in] item_flags
759  *   Parsed item flags.
760  */
761 static void
762 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
763 				const struct rte_flow_item *item,
764 				uint64_t item_flags __rte_unused)
765 {
766 	const struct rte_flow_item_vxlan *spec = item->spec;
767 	const struct rte_flow_item_vxlan *mask = item->mask;
768 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
769 	struct ibv_flow_spec_tunnel vxlan = {
770 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
771 		.size = size,
772 	};
773 	union vni {
774 		uint32_t vlan_id;
775 		uint8_t vni[4];
776 	} id = { .vlan_id = 0, };
777 
778 	if (!mask)
779 		mask = &rte_flow_item_vxlan_mask;
780 	if (spec) {
781 		memcpy(&id.vni[1], spec->vni, 3);
782 		vxlan.val.tunnel_id = id.vlan_id;
783 		memcpy(&id.vni[1], mask->vni, 3);
784 		vxlan.mask.tunnel_id = id.vlan_id;
785 		/* Remove unwanted bits from values. */
786 		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
787 	}
788 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
789 }
790 
791 /**
792  * Convert the @p item into a Verbs specification. This function assumes that
793  * the input is valid and that there is space to insert the requested item
794  * into the flow.
795  *
796  * @param[in, out] dev_flow
797  *   Pointer to dev_flow structure.
798  * @param[in] item
799  *   Item specification.
800  * @param[in] item_flags
801  *   Parsed item flags.
802  */
803 static void
804 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
805 				    const struct rte_flow_item *item,
806 				    uint64_t item_flags __rte_unused)
807 {
808 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
809 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
810 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
811 	struct ibv_flow_spec_tunnel vxlan_gpe = {
812 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
813 		.size = size,
814 	};
815 	union vni {
816 		uint32_t vlan_id;
817 		uint8_t vni[4];
818 	} id = { .vlan_id = 0, };
819 
820 	if (!mask)
821 		mask = &rte_flow_item_vxlan_gpe_mask;
822 	if (spec) {
823 		memcpy(&id.vni[1], spec->vni, 3);
824 		vxlan_gpe.val.tunnel_id = id.vlan_id;
825 		memcpy(&id.vni[1], mask->vni, 3);
826 		vxlan_gpe.mask.tunnel_id = id.vlan_id;
827 		/* Remove unwanted bits from values. */
828 		vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
829 	}
830 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
831 }
832 
833 /**
834  * Update the protocol in Verbs IPv4/IPv6 spec.
835  *
836  * @param[in, out] attr
837  *   Pointer to Verbs attributes structure.
838  * @param[in] search
839  *   Specification type to search in order to update the IP protocol.
840  * @param[in] protocol
841  *   Protocol value to set if none is present in the specification.
842  */
843 static void
844 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
845 				       enum ibv_flow_spec_type search,
846 				       uint8_t protocol)
847 {
848 	unsigned int i;
849 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
850 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
851 
852 	if (!attr)
853 		return;
854 	for (i = 0; i != attr->num_of_specs; ++i) {
855 		if (hdr->type == search) {
856 			union {
857 				struct ibv_flow_spec_ipv4_ext *ipv4;
858 				struct ibv_flow_spec_ipv6 *ipv6;
859 			} ip;
860 
861 			switch (search) {
862 			case IBV_FLOW_SPEC_IPV4_EXT:
863 				ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
864 				if (!ip.ipv4->val.proto) {
865 					ip.ipv4->val.proto = protocol;
866 					ip.ipv4->mask.proto = 0xff;
867 				}
868 				break;
869 			case IBV_FLOW_SPEC_IPV6:
870 				ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
871 				if (!ip.ipv6->val.next_hdr) {
872 					ip.ipv6->val.next_hdr = protocol;
873 					ip.ipv6->mask.next_hdr = 0xff;
874 				}
875 				break;
876 			default:
877 				break;
878 			}
879 			break;
880 		}
881 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
882 	}
883 }
884 
885 /**
886  * Reserve space for GRE spec in spec buffer.
887  *
888  * @param[in,out] dev_flow
889  *   Pointer to dev_flow structure.
890  *
891  * @return
892  *   Pointer to reserved space in spec buffer.
893  */
894 static uint8_t *
895 flow_verbs_reserve_gre(struct mlx5_flow *dev_flow)
896 {
897 	uint8_t *buffer;
898 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
899 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
900 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
901 	struct ibv_flow_spec_tunnel tunnel = {
902 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
903 		.size = size,
904 	};
905 #else
906 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
907 	struct ibv_flow_spec_gre tunnel = {
908 		.type = IBV_FLOW_SPEC_GRE,
909 		.size = size,
910 	};
911 #endif
912 
913 	buffer = verbs->specs + verbs->size;
914 	flow_verbs_spec_add(verbs, &tunnel, size);
915 	return buffer;
916 }
917 
918 /**
919  * Convert the @p item into a Verbs specification. This function assumes that
920  * the input is valid and that Verbs specification will be placed in
921  * the pre-reserved space.
922  *
923  * @param[in, out] dev_flow
924  *   Pointer to dev_flow structure.
925  * @param[in, out] gre_spec
926  *   Pointer to space reserved for GRE spec.
927  * @param[in] item
928  *   Item specification.
929  * @param[in] item_flags
930  *   Parsed item flags.
931  */
932 static void
933 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
934 			      uint8_t *gre_spec,
935 			      const struct rte_flow_item *item __rte_unused,
936 			      uint64_t item_flags)
937 {
938 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
939 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
940 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
941 	struct ibv_flow_spec_tunnel tunnel = {
942 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
943 		.size = size,
944 	};
945 #else
946 	static const struct rte_flow_item_gre empty_gre = {0,};
947 	const struct rte_flow_item_gre *spec = item->spec;
948 	const struct rte_flow_item_gre *mask = item->mask;
949 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
950 	struct ibv_flow_spec_gre tunnel = {
951 		.type = IBV_FLOW_SPEC_GRE,
952 		.size = size,
953 	};
954 
955 	if (!spec) {
956 		spec = &empty_gre;
957 		mask = &empty_gre;
958 	} else {
959 		if (!mask)
960 			mask = &rte_flow_item_gre_mask;
961 	}
962 	tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
963 	tunnel.val.protocol = spec->protocol;
964 	tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
965 	tunnel.mask.protocol = mask->protocol;
966 	/* Remove unwanted bits from values. */
967 	tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
968 	tunnel.val.key &= tunnel.mask.key;
969 	if (tunnel.mask.protocol) {
970 		tunnel.val.protocol &= tunnel.mask.protocol;
971 	} else {
972 		tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
973 		if (tunnel.val.protocol) {
974 			tunnel.mask.protocol = 0xFFFF;
975 			tunnel.val.protocol =
976 				rte_cpu_to_be_16(tunnel.val.protocol);
977 		}
978 	}
979 #endif
980 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
981 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
982 						       IBV_FLOW_SPEC_IPV4_EXT,
983 						       IPPROTO_GRE);
984 	else
985 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
986 						       IBV_FLOW_SPEC_IPV6,
987 						       IPPROTO_GRE);
988 	MLX5_ASSERT(gre_spec);
989 	memcpy(gre_spec, &tunnel, size);
990 }
991 
992 /**
993  * Convert the @p action into a Verbs specification. This function assumes that
994  * the input is valid and that there is space to insert the requested action
995  * into the flow. This function also return the action that was added.
996  *
997  * @param[in, out] dev_flow
998  *   Pointer to dev_flow structure.
999  * @param[in] item
1000  *   Item specification.
1001  * @param[in] item_flags
1002  *   Parsed item flags.
1003  */
1004 static void
1005 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1006 			       const struct rte_flow_item *item __rte_unused,
1007 			       uint64_t item_flags __rte_unused)
1008 {
1009 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1010 	const struct rte_flow_item_mpls *spec = item->spec;
1011 	const struct rte_flow_item_mpls *mask = item->mask;
1012 	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1013 	struct ibv_flow_spec_mpls mpls = {
1014 		.type = IBV_FLOW_SPEC_MPLS,
1015 		.size = size,
1016 	};
1017 
1018 	if (!mask)
1019 		mask = &rte_flow_item_mpls_mask;
1020 	if (spec) {
1021 		memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1022 		memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1023 		/* Remove unwanted bits from values.  */
1024 		mpls.val.label &= mpls.mask.label;
1025 	}
1026 	flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1027 #endif
1028 }
1029 
1030 /**
1031  * Convert the @p action into a Verbs specification. This function assumes that
1032  * the input is valid and that there is space to insert the requested action
1033  * into the flow.
1034  *
1035  * @param[in] dev_flow
1036  *   Pointer to mlx5_flow.
1037  * @param[in] action
1038  *   Action configuration.
1039  */
1040 static void
1041 flow_verbs_translate_action_drop
1042 	(struct mlx5_flow *dev_flow,
1043 	 const struct rte_flow_action *action __rte_unused)
1044 {
1045 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1046 	struct ibv_flow_spec_action_drop drop = {
1047 			.type = IBV_FLOW_SPEC_ACTION_DROP,
1048 			.size = size,
1049 	};
1050 
1051 	flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1052 }
1053 
1054 /**
1055  * Convert the @p action into a Verbs specification. This function assumes that
1056  * the input is valid and that there is space to insert the requested action
1057  * into the flow.
1058  *
1059  * @param[in] rss_desc
1060  *   Pointer to mlx5_flow_rss_desc.
1061  * @param[in] action
1062  *   Action configuration.
1063  */
1064 static void
1065 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1066 				  const struct rte_flow_action *action)
1067 {
1068 	const struct rte_flow_action_queue *queue = action->conf;
1069 
1070 	rss_desc->queue[0] = queue->index;
1071 	rss_desc->queue_num = 1;
1072 }
1073 
1074 /**
1075  * Convert the @p action into a Verbs specification. This function assumes that
1076  * the input is valid and that there is space to insert the requested action
1077  * into the flow.
1078  *
1079  * @param[in] rss_desc
1080  *   Pointer to mlx5_flow_rss_desc.
1081  * @param[in] action
1082  *   Action configuration.
1083  */
1084 static void
1085 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1086 				const struct rte_flow_action *action)
1087 {
1088 	const struct rte_flow_action_rss *rss = action->conf;
1089 	const uint8_t *rss_key;
1090 
1091 	memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1092 	rss_desc->queue_num = rss->queue_num;
1093 	/* NULL RSS key indicates default RSS key. */
1094 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
1095 	memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1096 	/*
1097 	 * rss->level and rss.types should be set in advance when expanding
1098 	 * items for RSS.
1099 	 */
1100 }
1101 
1102 /**
1103  * Convert the @p action into a Verbs specification. This function assumes that
1104  * the input is valid and that there is space to insert the requested action
1105  * into the flow.
1106  *
1107  * @param[in] dev_flow
1108  *   Pointer to mlx5_flow.
1109  * @param[in] action
1110  *   Action configuration.
1111  */
1112 static void
1113 flow_verbs_translate_action_flag
1114 	(struct mlx5_flow *dev_flow,
1115 	 const struct rte_flow_action *action __rte_unused)
1116 {
1117 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1118 	struct ibv_flow_spec_action_tag tag = {
1119 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1120 		.size = size,
1121 		.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1122 	};
1123 
1124 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1125 }
1126 
1127 /**
1128  * Convert the @p action into a Verbs specification. This function assumes that
1129  * the input is valid and that there is space to insert the requested action
1130  * into the flow.
1131  *
1132  * @param[in] dev_flow
1133  *   Pointer to mlx5_flow.
1134  * @param[in] action
1135  *   Action configuration.
1136  */
1137 static void
1138 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1139 				 const struct rte_flow_action *action)
1140 {
1141 	const struct rte_flow_action_mark *mark = action->conf;
1142 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1143 	struct ibv_flow_spec_action_tag tag = {
1144 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1145 		.size = size,
1146 		.tag_id = mlx5_flow_mark_set(mark->id),
1147 	};
1148 
1149 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1150 }
1151 
1152 /**
1153  * Convert the @p action into a Verbs specification. This function assumes that
1154  * the input is valid and that there is space to insert the requested action
1155  * into the flow.
1156  *
1157  * @param[in] dev
1158  *   Pointer to the Ethernet device structure.
1159  * @param[in] action
1160  *   Action configuration.
1161  * @param[in] dev_flow
1162  *   Pointer to mlx5_flow.
1163  * @param[out] error
1164  *   Pointer to error structure.
1165  *
1166  * @return
1167  *   0 On success else a negative errno value is returned and rte_errno is set.
1168  */
1169 static int
1170 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1171 				  const struct rte_flow_action *action,
1172 				  struct rte_eth_dev *dev,
1173 				  struct rte_flow_error *error)
1174 {
1175 	const struct rte_flow_action_count *count = action->conf;
1176 	struct rte_flow *flow = dev_flow->flow;
1177 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1178 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1179 	struct mlx5_flow_counter_pool *pool;
1180 	struct mlx5_flow_counter *cnt = NULL;
1181 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1182 	struct ibv_flow_spec_counter_action counter = {
1183 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
1184 		.size = size,
1185 	};
1186 #endif
1187 
1188 	if (!flow->counter) {
1189 		flow->counter = flow_verbs_counter_new(dev, count->id);
1190 		if (!flow->counter)
1191 			return rte_flow_error_set(error, rte_errno,
1192 						  RTE_FLOW_ERROR_TYPE_ACTION,
1193 						  action,
1194 						  "cannot get counter"
1195 						  " context.");
1196 	}
1197 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1198 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1199 	counter.counter_set_handle =
1200 		((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1201 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1202 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1203 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1204 	counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1205 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1206 #endif
1207 	return 0;
1208 }
1209 
1210 /**
1211  * Internal validation function. For validating both actions and items.
1212  *
1213  * @param[in] dev
1214  *   Pointer to the Ethernet device structure.
1215  * @param[in] attr
1216  *   Pointer to the flow attributes.
1217  * @param[in] items
1218  *   Pointer to the list of items.
1219  * @param[in] actions
1220  *   Pointer to the list of actions.
1221  * @param[in] external
1222  *   This flow rule is created by request external to PMD.
1223  * @param[in] hairpin
1224  *   Number of hairpin TX actions, 0 means classic flow.
1225  * @param[out] error
1226  *   Pointer to the error structure.
1227  *
1228  * @return
1229  *   0 on success, a negative errno value otherwise and rte_errno is set.
1230  */
1231 static int
1232 flow_verbs_validate(struct rte_eth_dev *dev,
1233 		    const struct rte_flow_attr *attr,
1234 		    const struct rte_flow_item items[],
1235 		    const struct rte_flow_action actions[],
1236 		    bool external __rte_unused,
1237 		    int hairpin __rte_unused,
1238 		    struct rte_flow_error *error)
1239 {
1240 	int ret;
1241 	uint64_t action_flags = 0;
1242 	uint64_t item_flags = 0;
1243 	uint64_t last_item = 0;
1244 	uint8_t next_protocol = 0xff;
1245 	uint16_t ether_type = 0;
1246 	bool is_empty_vlan = false;
1247 	uint16_t udp_dport = 0;
1248 	bool is_root;
1249 
1250 	if (items == NULL)
1251 		return -1;
1252 	ret = mlx5_flow_validate_attributes(dev, attr, error);
1253 	if (ret < 0)
1254 		return ret;
1255 	is_root = ret;
1256 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1257 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1258 		int ret = 0;
1259 
1260 		switch (items->type) {
1261 		case RTE_FLOW_ITEM_TYPE_VOID:
1262 			break;
1263 		case RTE_FLOW_ITEM_TYPE_ETH:
1264 			ret = mlx5_flow_validate_item_eth(items, item_flags,
1265 							  false, error);
1266 			if (ret < 0)
1267 				return ret;
1268 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1269 					     MLX5_FLOW_LAYER_OUTER_L2;
1270 			if (items->mask != NULL && items->spec != NULL) {
1271 				ether_type =
1272 					((const struct rte_flow_item_eth *)
1273 					 items->spec)->type;
1274 				ether_type &=
1275 					((const struct rte_flow_item_eth *)
1276 					 items->mask)->type;
1277 				if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
1278 					is_empty_vlan = true;
1279 				ether_type = rte_be_to_cpu_16(ether_type);
1280 			} else {
1281 				ether_type = 0;
1282 			}
1283 			break;
1284 		case RTE_FLOW_ITEM_TYPE_VLAN:
1285 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
1286 							   dev, error);
1287 			if (ret < 0)
1288 				return ret;
1289 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1290 					      MLX5_FLOW_LAYER_INNER_VLAN) :
1291 					     (MLX5_FLOW_LAYER_OUTER_L2 |
1292 					      MLX5_FLOW_LAYER_OUTER_VLAN);
1293 			if (items->mask != NULL && items->spec != NULL) {
1294 				ether_type =
1295 					((const struct rte_flow_item_vlan *)
1296 					 items->spec)->inner_type;
1297 				ether_type &=
1298 					((const struct rte_flow_item_vlan *)
1299 					 items->mask)->inner_type;
1300 				ether_type = rte_be_to_cpu_16(ether_type);
1301 			} else {
1302 				ether_type = 0;
1303 			}
1304 			is_empty_vlan = false;
1305 			break;
1306 		case RTE_FLOW_ITEM_TYPE_IPV4:
1307 			ret = mlx5_flow_validate_item_ipv4
1308 						(items, item_flags,
1309 						 last_item, ether_type, NULL,
1310 						 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1311 						 error);
1312 			if (ret < 0)
1313 				return ret;
1314 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1315 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1316 			if (items->mask != NULL &&
1317 			    ((const struct rte_flow_item_ipv4 *)
1318 			     items->mask)->hdr.next_proto_id) {
1319 				next_protocol =
1320 					((const struct rte_flow_item_ipv4 *)
1321 					 (items->spec))->hdr.next_proto_id;
1322 				next_protocol &=
1323 					((const struct rte_flow_item_ipv4 *)
1324 					 (items->mask))->hdr.next_proto_id;
1325 			} else {
1326 				/* Reset for inner layer. */
1327 				next_protocol = 0xff;
1328 			}
1329 			break;
1330 		case RTE_FLOW_ITEM_TYPE_IPV6:
1331 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1332 							   last_item,
1333 							   ether_type, NULL,
1334 							   error);
1335 			if (ret < 0)
1336 				return ret;
1337 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1338 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1339 			if (items->mask != NULL &&
1340 			    ((const struct rte_flow_item_ipv6 *)
1341 			     items->mask)->hdr.proto) {
1342 				next_protocol =
1343 					((const struct rte_flow_item_ipv6 *)
1344 					 items->spec)->hdr.proto;
1345 				next_protocol &=
1346 					((const struct rte_flow_item_ipv6 *)
1347 					 items->mask)->hdr.proto;
1348 			} else {
1349 				/* Reset for inner layer. */
1350 				next_protocol = 0xff;
1351 			}
1352 			break;
1353 		case RTE_FLOW_ITEM_TYPE_UDP:
1354 			ret = mlx5_flow_validate_item_udp(items, item_flags,
1355 							  next_protocol,
1356 							  error);
1357 			const struct rte_flow_item_udp *spec = items->spec;
1358 			const struct rte_flow_item_udp *mask = items->mask;
1359 			if (!mask)
1360 				mask = &rte_flow_item_udp_mask;
1361 			if (spec != NULL)
1362 				udp_dport = rte_be_to_cpu_16
1363 						(spec->hdr.dst_port &
1364 						 mask->hdr.dst_port);
1365 
1366 			if (ret < 0)
1367 				return ret;
1368 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1369 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
1370 			break;
1371 		case RTE_FLOW_ITEM_TYPE_TCP:
1372 			ret = mlx5_flow_validate_item_tcp
1373 						(items, item_flags,
1374 						 next_protocol,
1375 						 &rte_flow_item_tcp_mask,
1376 						 error);
1377 			if (ret < 0)
1378 				return ret;
1379 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1380 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
1381 			break;
1382 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1383 			ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
1384 							    items, item_flags,
1385 							    is_root, error);
1386 			if (ret < 0)
1387 				return ret;
1388 			last_item = MLX5_FLOW_LAYER_VXLAN;
1389 			break;
1390 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1391 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
1392 								item_flags,
1393 								dev, error);
1394 			if (ret < 0)
1395 				return ret;
1396 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1397 			break;
1398 		case RTE_FLOW_ITEM_TYPE_GRE:
1399 			ret = mlx5_flow_validate_item_gre(items, item_flags,
1400 							  next_protocol, error);
1401 			if (ret < 0)
1402 				return ret;
1403 			last_item = MLX5_FLOW_LAYER_GRE;
1404 			break;
1405 		case RTE_FLOW_ITEM_TYPE_MPLS:
1406 			ret = mlx5_flow_validate_item_mpls(dev, items,
1407 							   item_flags,
1408 							   last_item, error);
1409 			if (ret < 0)
1410 				return ret;
1411 			last_item = MLX5_FLOW_LAYER_MPLS;
1412 			break;
1413 		case RTE_FLOW_ITEM_TYPE_ICMP:
1414 		case RTE_FLOW_ITEM_TYPE_ICMP6:
1415 			return rte_flow_error_set(error, ENOTSUP,
1416 						  RTE_FLOW_ERROR_TYPE_ITEM,
1417 						  NULL, "ICMP/ICMP6 "
1418 						  "item not supported");
1419 		default:
1420 			return rte_flow_error_set(error, ENOTSUP,
1421 						  RTE_FLOW_ERROR_TYPE_ITEM,
1422 						  NULL, "item not supported");
1423 		}
1424 		item_flags |= last_item;
1425 	}
1426 	if (is_empty_vlan)
1427 		return rte_flow_error_set(error, ENOTSUP,
1428 						 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1429 		    "VLAN matching without vid specification is not supported");
1430 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1431 		switch (actions->type) {
1432 		case RTE_FLOW_ACTION_TYPE_VOID:
1433 			break;
1434 		case RTE_FLOW_ACTION_TYPE_FLAG:
1435 			ret = mlx5_flow_validate_action_flag(action_flags,
1436 							     attr,
1437 							     error);
1438 			if (ret < 0)
1439 				return ret;
1440 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1441 			break;
1442 		case RTE_FLOW_ACTION_TYPE_MARK:
1443 			ret = mlx5_flow_validate_action_mark(actions,
1444 							     action_flags,
1445 							     attr,
1446 							     error);
1447 			if (ret < 0)
1448 				return ret;
1449 			action_flags |= MLX5_FLOW_ACTION_MARK;
1450 			break;
1451 		case RTE_FLOW_ACTION_TYPE_DROP:
1452 			ret = mlx5_flow_validate_action_drop(action_flags,
1453 							     attr,
1454 							     error);
1455 			if (ret < 0)
1456 				return ret;
1457 			action_flags |= MLX5_FLOW_ACTION_DROP;
1458 			break;
1459 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1460 			ret = mlx5_flow_validate_action_queue(actions,
1461 							      action_flags, dev,
1462 							      attr,
1463 							      error);
1464 			if (ret < 0)
1465 				return ret;
1466 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1467 			break;
1468 		case RTE_FLOW_ACTION_TYPE_RSS:
1469 			ret = mlx5_flow_validate_action_rss(actions,
1470 							    action_flags, dev,
1471 							    attr, item_flags,
1472 							    error);
1473 			if (ret < 0)
1474 				return ret;
1475 			action_flags |= MLX5_FLOW_ACTION_RSS;
1476 			break;
1477 		case RTE_FLOW_ACTION_TYPE_COUNT:
1478 			ret = mlx5_flow_validate_action_count(dev, attr, error);
1479 			if (ret < 0)
1480 				return ret;
1481 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1482 			break;
1483 		default:
1484 			return rte_flow_error_set(error, ENOTSUP,
1485 						  RTE_FLOW_ERROR_TYPE_ACTION,
1486 						  actions,
1487 						  "action not supported");
1488 		}
1489 	}
1490 	/*
1491 	 * Validate the drop action mutual exclusion with other actions.
1492 	 * Drop action is mutually-exclusive with any other action, except for
1493 	 * Count action.
1494 	 */
1495 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1496 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1497 		return rte_flow_error_set(error, EINVAL,
1498 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1499 					  "Drop action is mutually-exclusive "
1500 					  "with any other action, except for "
1501 					  "Count action");
1502 	if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1503 		return rte_flow_error_set(error, EINVAL,
1504 					  RTE_FLOW_ERROR_TYPE_ACTION, actions,
1505 					  "no fate action is found");
1506 	return 0;
1507 }
1508 
1509 /**
1510  * Calculate the required bytes that are needed for the action part of the verbs
1511  * flow.
1512  *
1513  * @param[in] actions
1514  *   Pointer to the list of actions.
1515  *
1516  * @return
1517  *   The size of the memory needed for all actions.
1518  */
1519 static int
1520 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1521 {
1522 	int size = 0;
1523 
1524 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1525 		switch (actions->type) {
1526 		case RTE_FLOW_ACTION_TYPE_VOID:
1527 			break;
1528 		case RTE_FLOW_ACTION_TYPE_FLAG:
1529 			size += sizeof(struct ibv_flow_spec_action_tag);
1530 			break;
1531 		case RTE_FLOW_ACTION_TYPE_MARK:
1532 			size += sizeof(struct ibv_flow_spec_action_tag);
1533 			break;
1534 		case RTE_FLOW_ACTION_TYPE_DROP:
1535 			size += sizeof(struct ibv_flow_spec_action_drop);
1536 			break;
1537 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1538 			break;
1539 		case RTE_FLOW_ACTION_TYPE_RSS:
1540 			break;
1541 		case RTE_FLOW_ACTION_TYPE_COUNT:
1542 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1543 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1544 			size += sizeof(struct ibv_flow_spec_counter_action);
1545 #endif
1546 			break;
1547 		default:
1548 			break;
1549 		}
1550 	}
1551 	return size;
1552 }
1553 
1554 /**
1555  * Calculate the required bytes that are needed for the item part of the verbs
1556  * flow.
1557  *
1558  * @param[in] items
1559  *   Pointer to the list of items.
1560  *
1561  * @return
1562  *   The size of the memory needed for all items.
1563  */
1564 static int
1565 flow_verbs_get_items_size(const struct rte_flow_item items[])
1566 {
1567 	int size = 0;
1568 
1569 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1570 		switch (items->type) {
1571 		case RTE_FLOW_ITEM_TYPE_VOID:
1572 			break;
1573 		case RTE_FLOW_ITEM_TYPE_ETH:
1574 			size += sizeof(struct ibv_flow_spec_eth);
1575 			break;
1576 		case RTE_FLOW_ITEM_TYPE_VLAN:
1577 			size += sizeof(struct ibv_flow_spec_eth);
1578 			break;
1579 		case RTE_FLOW_ITEM_TYPE_IPV4:
1580 			size += sizeof(struct ibv_flow_spec_ipv4_ext);
1581 			break;
1582 		case RTE_FLOW_ITEM_TYPE_IPV6:
1583 			size += sizeof(struct ibv_flow_spec_ipv6);
1584 			break;
1585 		case RTE_FLOW_ITEM_TYPE_UDP:
1586 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1587 			break;
1588 		case RTE_FLOW_ITEM_TYPE_TCP:
1589 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1590 			break;
1591 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1592 			size += sizeof(struct ibv_flow_spec_tunnel);
1593 			break;
1594 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1595 			size += sizeof(struct ibv_flow_spec_tunnel);
1596 			break;
1597 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1598 		case RTE_FLOW_ITEM_TYPE_GRE:
1599 			size += sizeof(struct ibv_flow_spec_gre);
1600 			break;
1601 		case RTE_FLOW_ITEM_TYPE_MPLS:
1602 			size += sizeof(struct ibv_flow_spec_mpls);
1603 			break;
1604 #else
1605 		case RTE_FLOW_ITEM_TYPE_GRE:
1606 			size += sizeof(struct ibv_flow_spec_tunnel);
1607 			break;
1608 #endif
1609 		default:
1610 			break;
1611 		}
1612 	}
1613 	return size;
1614 }
1615 
1616 /**
1617  * Internal preparation function. Allocate mlx5_flow with the required size.
1618  * The required size is calculate based on the actions and items. This function
1619  * also returns the detected actions and items for later use.
1620  *
1621  * @param[in] dev
1622  *   Pointer to Ethernet device.
1623  * @param[in] attr
1624  *   Pointer to the flow attributes.
1625  * @param[in] items
1626  *   Pointer to the list of items.
1627  * @param[in] actions
1628  *   Pointer to the list of actions.
1629  * @param[out] error
1630  *   Pointer to the error structure.
1631  *
1632  * @return
1633  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1634  *   is set.
1635  */
1636 static struct mlx5_flow *
1637 flow_verbs_prepare(struct rte_eth_dev *dev,
1638 		   const struct rte_flow_attr *attr __rte_unused,
1639 		   const struct rte_flow_item items[],
1640 		   const struct rte_flow_action actions[],
1641 		   struct rte_flow_error *error)
1642 {
1643 	size_t size = 0;
1644 	uint32_t handle_idx = 0;
1645 	struct mlx5_flow *dev_flow;
1646 	struct mlx5_flow_handle *dev_handle;
1647 	struct mlx5_priv *priv = dev->data->dev_private;
1648 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1649 
1650 	MLX5_ASSERT(wks);
1651 	size += flow_verbs_get_actions_size(actions);
1652 	size += flow_verbs_get_items_size(items);
1653 	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1654 		rte_flow_error_set(error, E2BIG,
1655 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1656 				   "Verbs spec/action size too large");
1657 		return NULL;
1658 	}
1659 	/* In case of corrupting the memory. */
1660 	if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1661 		rte_flow_error_set(error, ENOSPC,
1662 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1663 				   "not free temporary device flow");
1664 		return NULL;
1665 	}
1666 	dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1667 				   &handle_idx);
1668 	if (!dev_handle) {
1669 		rte_flow_error_set(error, ENOMEM,
1670 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1671 				   "not enough memory to create flow handle");
1672 		return NULL;
1673 	}
1674 	MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1675 	dev_flow = &wks->flows[wks->flow_idx++];
1676 	dev_flow->handle = dev_handle;
1677 	dev_flow->handle_idx = handle_idx;
1678 	/* Memcpy is used, only size needs to be cleared to 0. */
1679 	dev_flow->verbs.size = 0;
1680 	dev_flow->verbs.attr.num_of_specs = 0;
1681 	dev_flow->ingress = attr->ingress;
1682 	dev_flow->hash_fields = 0;
1683 	/* Need to set transfer attribute: not supported in Verbs mode. */
1684 	return dev_flow;
1685 }
1686 
1687 /**
1688  * Fill the flow with verb spec.
1689  *
1690  * @param[in] dev
1691  *   Pointer to Ethernet device.
1692  * @param[in, out] dev_flow
1693  *   Pointer to the mlx5 flow.
1694  * @param[in] attr
1695  *   Pointer to the flow attributes.
1696  * @param[in] items
1697  *   Pointer to the list of items.
1698  * @param[in] actions
1699  *   Pointer to the list of actions.
1700  * @param[out] error
1701  *   Pointer to the error structure.
1702  *
1703  * @return
1704  *   0 on success, else a negative errno value otherwise and rte_errno is set.
1705  */
1706 static int
1707 flow_verbs_translate(struct rte_eth_dev *dev,
1708 		     struct mlx5_flow *dev_flow,
1709 		     const struct rte_flow_attr *attr,
1710 		     const struct rte_flow_item items[],
1711 		     const struct rte_flow_action actions[],
1712 		     struct rte_flow_error *error)
1713 {
1714 	uint64_t item_flags = 0;
1715 	uint64_t action_flags = 0;
1716 	uint64_t priority = attr->priority;
1717 	uint32_t subpriority = 0;
1718 	struct mlx5_priv *priv = dev->data->dev_private;
1719 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1720 	struct mlx5_flow_rss_desc *rss_desc;
1721 	const struct rte_flow_item *tunnel_item = NULL;
1722 	uint8_t *gre_spec = NULL;
1723 
1724 	MLX5_ASSERT(wks);
1725 	rss_desc = &wks->rss_desc;
1726 	if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1727 		priority = priv->sh->flow_max_priority - 1;
1728 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1729 		int ret;
1730 
1731 		switch (actions->type) {
1732 		case RTE_FLOW_ACTION_TYPE_VOID:
1733 			break;
1734 		case RTE_FLOW_ACTION_TYPE_FLAG:
1735 			flow_verbs_translate_action_flag(dev_flow, actions);
1736 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1737 			wks->mark = 1;
1738 			break;
1739 		case RTE_FLOW_ACTION_TYPE_MARK:
1740 			flow_verbs_translate_action_mark(dev_flow, actions);
1741 			action_flags |= MLX5_FLOW_ACTION_MARK;
1742 			wks->mark = 1;
1743 			break;
1744 		case RTE_FLOW_ACTION_TYPE_DROP:
1745 			flow_verbs_translate_action_drop(dev_flow, actions);
1746 			action_flags |= MLX5_FLOW_ACTION_DROP;
1747 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1748 			break;
1749 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1750 			flow_verbs_translate_action_queue(rss_desc, actions);
1751 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1752 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1753 			break;
1754 		case RTE_FLOW_ACTION_TYPE_RSS:
1755 			flow_verbs_translate_action_rss(rss_desc, actions);
1756 			action_flags |= MLX5_FLOW_ACTION_RSS;
1757 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1758 			break;
1759 		case RTE_FLOW_ACTION_TYPE_COUNT:
1760 			ret = flow_verbs_translate_action_count(dev_flow,
1761 								actions,
1762 								dev, error);
1763 			if (ret < 0)
1764 				return ret;
1765 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1766 			break;
1767 		default:
1768 			return rte_flow_error_set(error, ENOTSUP,
1769 						  RTE_FLOW_ERROR_TYPE_ACTION,
1770 						  actions,
1771 						  "action not supported");
1772 		}
1773 	}
1774 	dev_flow->act_flags = action_flags;
1775 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1776 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1777 
1778 		switch (items->type) {
1779 		case RTE_FLOW_ITEM_TYPE_VOID:
1780 			break;
1781 		case RTE_FLOW_ITEM_TYPE_ETH:
1782 			flow_verbs_translate_item_eth(dev_flow, items,
1783 						      item_flags);
1784 			subpriority = MLX5_PRIORITY_MAP_L2;
1785 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1786 					       MLX5_FLOW_LAYER_OUTER_L2;
1787 			break;
1788 		case RTE_FLOW_ITEM_TYPE_VLAN:
1789 			flow_verbs_translate_item_vlan(dev_flow, items,
1790 						       item_flags);
1791 			subpriority = MLX5_PRIORITY_MAP_L2;
1792 			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1793 						MLX5_FLOW_LAYER_INNER_VLAN) :
1794 					       (MLX5_FLOW_LAYER_OUTER_L2 |
1795 						MLX5_FLOW_LAYER_OUTER_VLAN);
1796 			break;
1797 		case RTE_FLOW_ITEM_TYPE_IPV4:
1798 			flow_verbs_translate_item_ipv4(dev_flow, items,
1799 						       item_flags);
1800 			subpriority = MLX5_PRIORITY_MAP_L3;
1801 			dev_flow->hash_fields |=
1802 				mlx5_flow_hashfields_adjust
1803 					(rss_desc, tunnel,
1804 					 MLX5_IPV4_LAYER_TYPES,
1805 					 MLX5_IPV4_IBV_RX_HASH);
1806 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1807 					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1808 			break;
1809 		case RTE_FLOW_ITEM_TYPE_IPV6:
1810 			flow_verbs_translate_item_ipv6(dev_flow, items,
1811 						       item_flags);
1812 			subpriority = MLX5_PRIORITY_MAP_L3;
1813 			dev_flow->hash_fields |=
1814 				mlx5_flow_hashfields_adjust
1815 					(rss_desc, tunnel,
1816 					 MLX5_IPV6_LAYER_TYPES,
1817 					 MLX5_IPV6_IBV_RX_HASH);
1818 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1819 					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1820 			break;
1821 		case RTE_FLOW_ITEM_TYPE_TCP:
1822 			flow_verbs_translate_item_tcp(dev_flow, items,
1823 						      item_flags);
1824 			subpriority = MLX5_PRIORITY_MAP_L4;
1825 			if (dev_flow->hash_fields != 0)
1826 				dev_flow->hash_fields |=
1827 					mlx5_flow_hashfields_adjust
1828 					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
1829 					 (IBV_RX_HASH_SRC_PORT_TCP |
1830 					  IBV_RX_HASH_DST_PORT_TCP));
1831 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1832 					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
1833 			break;
1834 		case RTE_FLOW_ITEM_TYPE_UDP:
1835 			flow_verbs_translate_item_udp(dev_flow, items,
1836 						      item_flags);
1837 			subpriority = MLX5_PRIORITY_MAP_L4;
1838 			if (dev_flow->hash_fields != 0)
1839 				dev_flow->hash_fields |=
1840 					mlx5_flow_hashfields_adjust
1841 					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
1842 					 (IBV_RX_HASH_SRC_PORT_UDP |
1843 					  IBV_RX_HASH_DST_PORT_UDP));
1844 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1845 					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
1846 			break;
1847 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1848 			flow_verbs_translate_item_vxlan(dev_flow, items,
1849 							item_flags);
1850 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1851 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
1852 			break;
1853 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1854 			flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1855 							    item_flags);
1856 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1857 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1858 			break;
1859 		case RTE_FLOW_ITEM_TYPE_GRE:
1860 			gre_spec = flow_verbs_reserve_gre(dev_flow);
1861 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1862 			item_flags |= MLX5_FLOW_LAYER_GRE;
1863 			tunnel_item = items;
1864 			break;
1865 		case RTE_FLOW_ITEM_TYPE_MPLS:
1866 			flow_verbs_translate_item_mpls(dev_flow, items,
1867 						       item_flags);
1868 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1869 			item_flags |= MLX5_FLOW_LAYER_MPLS;
1870 			break;
1871 		default:
1872 			return rte_flow_error_set(error, ENOTSUP,
1873 						  RTE_FLOW_ERROR_TYPE_ITEM,
1874 						  NULL, "item not supported");
1875 		}
1876 	}
1877 	if (item_flags & MLX5_FLOW_LAYER_GRE)
1878 		flow_verbs_translate_item_gre(dev_flow, gre_spec,
1879 					      tunnel_item, item_flags);
1880 	dev_flow->handle->layers = item_flags;
1881 	/* Other members of attr will be ignored. */
1882 	dev_flow->verbs.attr.priority =
1883 		mlx5_flow_adjust_priority(dev, priority, subpriority);
1884 	dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1885 	return 0;
1886 }
1887 
1888 /**
1889  * Remove the flow from the NIC but keeps it in memory.
1890  *
1891  * @param[in] dev
1892  *   Pointer to the Ethernet device structure.
1893  * @param[in, out] flow
1894  *   Pointer to flow structure.
1895  */
1896 static void
1897 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1898 {
1899 	struct mlx5_priv *priv = dev->data->dev_private;
1900 	struct mlx5_flow_handle *handle;
1901 	uint32_t handle_idx;
1902 
1903 	if (!flow)
1904 		return;
1905 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1906 		       handle_idx, handle, next) {
1907 		if (handle->drv_flow) {
1908 			claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1909 			handle->drv_flow = NULL;
1910 		}
1911 		/* hrxq is union, don't touch it only the flag is set. */
1912 		if (handle->rix_hrxq &&
1913 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1914 			mlx5_hrxq_release(dev, handle->rix_hrxq);
1915 			handle->rix_hrxq = 0;
1916 		}
1917 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
1918 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1919 	}
1920 }
1921 
1922 /**
1923  * Remove the flow from the NIC and the memory.
1924  *
1925  * @param[in] dev
1926  *   Pointer to the Ethernet device structure.
1927  * @param[in, out] flow
1928  *   Pointer to flow structure.
1929  */
1930 static void
1931 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1932 {
1933 	struct mlx5_priv *priv = dev->data->dev_private;
1934 	struct mlx5_flow_handle *handle;
1935 
1936 	if (!flow)
1937 		return;
1938 	flow_verbs_remove(dev, flow);
1939 	while (flow->dev_handles) {
1940 		uint32_t tmp_idx = flow->dev_handles;
1941 
1942 		handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1943 				   tmp_idx);
1944 		if (!handle)
1945 			return;
1946 		flow->dev_handles = handle->next.next;
1947 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1948 			   tmp_idx);
1949 	}
1950 	if (flow->counter) {
1951 		flow_verbs_counter_release(dev, flow->counter);
1952 		flow->counter = 0;
1953 	}
1954 }
1955 
1956 /**
1957  * Apply the flow to the NIC.
1958  *
1959  * @param[in] dev
1960  *   Pointer to the Ethernet device structure.
1961  * @param[in, out] flow
1962  *   Pointer to flow structure.
1963  * @param[out] error
1964  *   Pointer to error structure.
1965  *
1966  * @return
1967  *   0 on success, a negative errno value otherwise and rte_errno is set.
1968  */
1969 static int
1970 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1971 		 struct rte_flow_error *error)
1972 {
1973 	struct mlx5_priv *priv = dev->data->dev_private;
1974 	struct mlx5_flow_handle *handle;
1975 	struct mlx5_flow *dev_flow;
1976 	struct mlx5_hrxq *hrxq;
1977 	uint32_t dev_handles;
1978 	int err;
1979 	int idx;
1980 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1981 
1982 	MLX5_ASSERT(wks);
1983 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1984 		dev_flow = &wks->flows[idx];
1985 		handle = dev_flow->handle;
1986 		if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1987 			MLX5_ASSERT(priv->drop_queue.hrxq);
1988 			hrxq = priv->drop_queue.hrxq;
1989 		} else {
1990 			struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1991 
1992 			MLX5_ASSERT(rss_desc->queue_num);
1993 			rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1994 			rss_desc->hash_fields = dev_flow->hash_fields;
1995 			rss_desc->tunnel = !!(handle->layers &
1996 					      MLX5_FLOW_LAYER_TUNNEL);
1997 			rss_desc->shared_rss = 0;
1998 			hrxq = mlx5_hrxq_get(dev, rss_desc);
1999 			if (!hrxq) {
2000 				rte_flow_error_set
2001 					(error, rte_errno,
2002 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2003 					 "cannot get hash queue");
2004 				goto error;
2005 			}
2006 			handle->rix_hrxq = hrxq->idx;
2007 		}
2008 		MLX5_ASSERT(hrxq);
2009 		handle->drv_flow = mlx5_glue->create_flow
2010 					(hrxq->qp, &dev_flow->verbs.attr);
2011 		if (!handle->drv_flow) {
2012 			rte_flow_error_set(error, errno,
2013 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2014 					   NULL,
2015 					   "hardware refuses to create flow");
2016 			goto error;
2017 		}
2018 		if (priv->vmwa_context &&
2019 		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
2020 			/*
2021 			 * The rule contains the VLAN pattern.
2022 			 * For VF we are going to create VLAN
2023 			 * interface to make hypervisor set correct
2024 			 * e-Switch vport context.
2025 			 */
2026 			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2027 		}
2028 	}
2029 	return 0;
2030 error:
2031 	err = rte_errno; /* Save rte_errno before cleanup. */
2032 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2033 		       dev_handles, handle, next) {
2034 		/* hrxq is union, don't touch it only the flag is set. */
2035 		if (handle->rix_hrxq &&
2036 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2037 			mlx5_hrxq_release(dev, handle->rix_hrxq);
2038 			handle->rix_hrxq = 0;
2039 		}
2040 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
2041 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2042 	}
2043 	rte_errno = err; /* Restore rte_errno. */
2044 	return -rte_errno;
2045 }
2046 
2047 /**
2048  * Query a flow.
2049  *
2050  * @see rte_flow_query()
2051  * @see rte_flow_ops
2052  */
2053 static int
2054 flow_verbs_query(struct rte_eth_dev *dev,
2055 		 struct rte_flow *flow,
2056 		 const struct rte_flow_action *actions,
2057 		 void *data,
2058 		 struct rte_flow_error *error)
2059 {
2060 	int ret = -EINVAL;
2061 
2062 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2063 		switch (actions->type) {
2064 		case RTE_FLOW_ACTION_TYPE_VOID:
2065 			break;
2066 		case RTE_FLOW_ACTION_TYPE_COUNT:
2067 			ret = flow_verbs_counter_query(dev, flow, data, error);
2068 			break;
2069 		default:
2070 			return rte_flow_error_set(error, ENOTSUP,
2071 						  RTE_FLOW_ERROR_TYPE_ACTION,
2072 						  actions,
2073 						  "action not supported");
2074 		}
2075 	}
2076 	return ret;
2077 }
2078 
2079 static int
2080 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2081 		       uint32_t flags)
2082 {
2083 	RTE_SET_USED(dev);
2084 	RTE_SET_USED(domains);
2085 	RTE_SET_USED(flags);
2086 
2087 	return 0;
2088 }
2089 
2090 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2091 	.validate = flow_verbs_validate,
2092 	.prepare = flow_verbs_prepare,
2093 	.translate = flow_verbs_translate,
2094 	.apply = flow_verbs_apply,
2095 	.remove = flow_verbs_remove,
2096 	.destroy = flow_verbs_destroy,
2097 	.query = flow_verbs_query,
2098 	.sync_domain = flow_verbs_sync_domain,
2099 	.discover_priorities = flow_verbs_discover_priorities,
2100 };
2101