xref: /dpdk/drivers/net/mlx5/mlx5_flow_verbs.c (revision e88bd4746737a1ca464b866d29f20ff5a739cd3f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10 
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_ip.h>
18 
19 #include <mlx5_glue.h>
20 #include <mlx5_prm.h>
21 #include <mlx5_malloc.h>
22 
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rxtx.h"
27 
28 #define VERBS_SPEC_INNER(item_flags) \
29 	(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
30 
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
34 };
35 
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 	{ 9, 10, 11 }, { 12, 13, 14 },
40 };
41 
42 /* Verbs specification header. */
43 struct ibv_spec_header {
44 	enum ibv_flow_spec_type type;
45 	uint16_t size;
46 };
47 
48 /**
49  * Discover the maximum number of priority available.
50  *
51  * @param[in] dev
52  *   Pointer to the Ethernet device structure.
53  *
54  * @return
55  *   number of supported flow priority on success, a negative errno
56  *   value otherwise and rte_errno is set.
57  */
58 int
59 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
60 {
61 	struct mlx5_priv *priv = dev->data->dev_private;
62 	struct {
63 		struct ibv_flow_attr attr;
64 		struct ibv_flow_spec_eth eth;
65 		struct ibv_flow_spec_action_drop drop;
66 	} flow_attr = {
67 		.attr = {
68 			.num_of_specs = 2,
69 			.port = (uint8_t)priv->dev_port,
70 		},
71 		.eth = {
72 			.type = IBV_FLOW_SPEC_ETH,
73 			.size = sizeof(struct ibv_flow_spec_eth),
74 		},
75 		.drop = {
76 			.size = sizeof(struct ibv_flow_spec_action_drop),
77 			.type = IBV_FLOW_SPEC_ACTION_DROP,
78 		},
79 	};
80 	struct ibv_flow *flow;
81 	struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
82 	uint16_t vprio[] = { 8, 16 };
83 	int i;
84 	int priority = 0;
85 
86 	if (!drop->qp) {
87 		rte_errno = ENOTSUP;
88 		return -rte_errno;
89 	}
90 	for (i = 0; i != RTE_DIM(vprio); i++) {
91 		flow_attr.attr.priority = vprio[i] - 1;
92 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
93 		if (!flow)
94 			break;
95 		claim_zero(mlx5_glue->destroy_flow(flow));
96 		priority = vprio[i];
97 	}
98 	switch (priority) {
99 	case 8:
100 		priority = RTE_DIM(priority_map_3);
101 		break;
102 	case 16:
103 		priority = RTE_DIM(priority_map_5);
104 		break;
105 	default:
106 		rte_errno = ENOTSUP;
107 		DRV_LOG(ERR,
108 			"port %u verbs maximum priority: %d expected 8/16",
109 			dev->data->port_id, priority);
110 		return -rte_errno;
111 	}
112 	DRV_LOG(INFO, "port %u flow maximum priority: %d",
113 		dev->data->port_id, priority);
114 	return priority;
115 }
116 
117 /**
118  * Adjust flow priority based on the highest layer and the request priority.
119  *
120  * @param[in] dev
121  *   Pointer to the Ethernet device structure.
122  * @param[in] priority
123  *   The rule base priority.
124  * @param[in] subpriority
125  *   The priority based on the items.
126  *
127  * @return
128  *   The new priority.
129  */
130 uint32_t
131 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
132 				   uint32_t subpriority)
133 {
134 	uint32_t res = 0;
135 	struct mlx5_priv *priv = dev->data->dev_private;
136 
137 	switch (priv->config.flow_prio) {
138 	case RTE_DIM(priority_map_3):
139 		res = priority_map_3[priority][subpriority];
140 		break;
141 	case RTE_DIM(priority_map_5):
142 		res = priority_map_5[priority][subpriority];
143 		break;
144 	}
145 	return  res;
146 }
147 
148 /**
149  * Get Verbs flow counter by index.
150  *
151  * @param[in] dev
152  *   Pointer to the Ethernet device structure.
153  * @param[in] idx
154  *   mlx5 flow counter index in the container.
155  * @param[out] ppool
156  *   mlx5 flow counter pool in the container,
157  *
158  * @return
159  *   A pointer to the counter, NULL otherwise.
160  */
161 static struct mlx5_flow_counter *
162 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
163 			      uint32_t idx,
164 			      struct mlx5_flow_counter_pool **ppool)
165 {
166 	struct mlx5_priv *priv = dev->data->dev_private;
167 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
168 	struct mlx5_flow_counter_pool *pool;
169 
170 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
171 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
172 	MLX5_ASSERT(pool);
173 	if (ppool)
174 		*ppool = pool;
175 	return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
176 }
177 
178 /**
179  * Create Verbs flow counter with Verbs library.
180  *
181  * @param[in] dev
182  *   Pointer to the Ethernet device structure.
183  * @param[in, out] counter
184  *   mlx5 flow counter object, contains the counter id,
185  *   handle of created Verbs flow counter is returned
186  *   in cs field (if counters are supported).
187  *
188  * @return
189  *   0 On success else a negative errno value is returned
190  *   and rte_errno is set.
191  */
192 static int
193 flow_verbs_counter_create(struct rte_eth_dev *dev,
194 			  struct mlx5_flow_counter *counter)
195 {
196 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
197 	struct mlx5_priv *priv = dev->data->dev_private;
198 	struct ibv_context *ctx = priv->sh->ctx;
199 	struct ibv_counter_set_init_attr init = {
200 			 .counter_set_id = counter->shared_info.id};
201 
202 	counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
203 	if (!counter->dcs_when_free) {
204 		rte_errno = ENOTSUP;
205 		return -ENOTSUP;
206 	}
207 	return 0;
208 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
209 	struct mlx5_priv *priv = dev->data->dev_private;
210 	struct ibv_context *ctx = priv->sh->ctx;
211 	struct ibv_counters_init_attr init = {0};
212 	struct ibv_counter_attach_attr attach;
213 	int ret;
214 
215 	memset(&attach, 0, sizeof(attach));
216 	counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
217 	if (!counter->dcs_when_free) {
218 		rte_errno = ENOTSUP;
219 		return -ENOTSUP;
220 	}
221 	attach.counter_desc = IBV_COUNTER_PACKETS;
222 	attach.index = 0;
223 	ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
224 	if (!ret) {
225 		attach.counter_desc = IBV_COUNTER_BYTES;
226 		attach.index = 1;
227 		ret = mlx5_glue->attach_counters
228 					(counter->dcs_when_free, &attach, NULL);
229 	}
230 	if (ret) {
231 		claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
232 		counter->dcs_when_free = NULL;
233 		rte_errno = ret;
234 		return -ret;
235 	}
236 	return 0;
237 #else
238 	(void)dev;
239 	(void)counter;
240 	rte_errno = ENOTSUP;
241 	return -ENOTSUP;
242 #endif
243 }
244 
245 /**
246  * Get a flow counter.
247  *
248  * @param[in] dev
249  *   Pointer to the Ethernet device structure.
250  * @param[in] shared
251  *   Indicate if this counter is shared with other flows.
252  * @param[in] id
253  *   Counter identifier.
254  *
255  * @return
256  *   Index to the counter, 0 otherwise and rte_errno is set.
257  */
258 static uint32_t
259 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
260 {
261 	struct mlx5_priv *priv = dev->data->dev_private;
262 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
263 	struct mlx5_flow_counter_pool *pool = NULL;
264 	struct mlx5_flow_counter *cnt = NULL;
265 	union mlx5_l3t_data data;
266 	uint32_t n_valid = cmng->n_valid;
267 	uint32_t pool_idx, cnt_idx;
268 	uint32_t i;
269 	int ret;
270 
271 	if (shared && !mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) &&
272 	    data.dword)
273 		return data.dword;
274 	for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
275 		pool = cmng->pools[pool_idx];
276 		if (!pool)
277 			continue;
278 		cnt = TAILQ_FIRST(&pool->counters[0]);
279 		if (cnt)
280 			break;
281 	}
282 	if (!cnt) {
283 		struct mlx5_flow_counter_pool **pools;
284 		uint32_t size;
285 
286 		if (n_valid == cmng->n) {
287 			/* Resize the container pool array. */
288 			size = sizeof(struct mlx5_flow_counter_pool *) *
289 				     (n_valid + MLX5_CNT_CONTAINER_RESIZE);
290 			pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
291 					    SOCKET_ID_ANY);
292 			if (!pools)
293 				return 0;
294 			if (n_valid) {
295 				memcpy(pools, cmng->pools,
296 				       sizeof(struct mlx5_flow_counter_pool *) *
297 				       n_valid);
298 				mlx5_free(cmng->pools);
299 			}
300 			cmng->pools = pools;
301 			cmng->n += MLX5_CNT_CONTAINER_RESIZE;
302 		}
303 		/* Allocate memory for new pool*/
304 		size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
305 		pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
306 		if (!pool)
307 			return 0;
308 		for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
309 			cnt = MLX5_POOL_GET_CNT(pool, i);
310 			TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
311 		}
312 		cnt = MLX5_POOL_GET_CNT(pool, 0);
313 		cmng->pools[n_valid] = pool;
314 		pool_idx = n_valid;
315 		cmng->n_valid++;
316 	}
317 	TAILQ_REMOVE(&pool->counters[0], cnt, next);
318 	i = MLX5_CNT_ARRAY_IDX(pool, cnt);
319 	cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
320 	if (shared) {
321 		data.dword = cnt_idx;
322 		if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
323 			return 0;
324 		cnt->shared_info.id = id;
325 		cnt_idx |= MLX5_CNT_SHARED_OFFSET;
326 	}
327 	/* Create counter with Verbs. */
328 	ret = flow_verbs_counter_create(dev, cnt);
329 	if (!ret) {
330 		cnt->dcs_when_active = cnt->dcs_when_free;
331 		cnt->hits = 0;
332 		cnt->bytes = 0;
333 		return cnt_idx;
334 	}
335 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
336 	/* Some error occurred in Verbs library. */
337 	rte_errno = -ret;
338 	return 0;
339 }
340 
341 /**
342  * Release a flow counter.
343  *
344  * @param[in] dev
345  *   Pointer to the Ethernet device structure.
346  * @param[in] counter
347  *   Index to the counter handler.
348  */
349 static void
350 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
351 {
352 	struct mlx5_priv *priv = dev->data->dev_private;
353 	struct mlx5_flow_counter_pool *pool;
354 	struct mlx5_flow_counter *cnt;
355 
356 	cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
357 	if (IS_SHARED_CNT(counter) &&
358 	    mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
359 		return;
360 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
361 	claim_zero(mlx5_glue->destroy_counter_set
362 			((struct ibv_counter_set *)cnt->dcs_when_active));
363 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
364 	claim_zero(mlx5_glue->destroy_counters
365 				((struct ibv_counters *)cnt->dcs_when_active));
366 #endif
367 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
368 }
369 
370 /**
371  * Query a flow counter via Verbs library call.
372  *
373  * @see rte_flow_query()
374  * @see rte_flow_ops
375  */
376 static int
377 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
378 			 struct rte_flow *flow, void *data,
379 			 struct rte_flow_error *error)
380 {
381 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
382 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
383 	if (flow->counter) {
384 		struct mlx5_flow_counter_pool *pool;
385 		struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
386 						(dev, flow->counter, &pool);
387 		struct rte_flow_query_count *qc = data;
388 		uint64_t counters[2] = {0, 0};
389 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
390 		struct ibv_query_counter_set_attr query_cs_attr = {
391 			.dcs_when_free = (struct ibv_counter_set *)
392 						cnt->dcs_when_active,
393 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
394 		};
395 		struct ibv_counter_set_data query_out = {
396 			.out = counters,
397 			.outlen = 2 * sizeof(uint64_t),
398 		};
399 		int err = mlx5_glue->query_counter_set(&query_cs_attr,
400 						       &query_out);
401 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
402 		int err = mlx5_glue->query_counters
403 			((struct ibv_counters *)cnt->dcs_when_active, counters,
404 				RTE_DIM(counters),
405 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
406 #endif
407 		if (err)
408 			return rte_flow_error_set
409 				(error, err,
410 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
411 				 NULL,
412 				 "cannot read counter");
413 		qc->hits_set = 1;
414 		qc->bytes_set = 1;
415 		qc->hits = counters[0] - cnt->hits;
416 		qc->bytes = counters[1] - cnt->bytes;
417 		if (qc->reset) {
418 			cnt->hits = counters[0];
419 			cnt->bytes = counters[1];
420 		}
421 		return 0;
422 	}
423 	return rte_flow_error_set(error, EINVAL,
424 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
425 				  NULL,
426 				  "flow does not have counter");
427 #else
428 	(void)flow;
429 	(void)data;
430 	return rte_flow_error_set(error, ENOTSUP,
431 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
432 				  NULL,
433 				  "counters are not available");
434 #endif
435 }
436 
437 /**
438  * Add a verbs item specification into @p verbs.
439  *
440  * @param[out] verbs
441  *   Pointer to verbs structure.
442  * @param[in] src
443  *   Create specification.
444  * @param[in] size
445  *   Size in bytes of the specification to copy.
446  */
447 static void
448 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
449 		    void *src, unsigned int size)
450 {
451 	void *dst;
452 
453 	if (!verbs)
454 		return;
455 	MLX5_ASSERT(verbs->specs);
456 	dst = (void *)(verbs->specs + verbs->size);
457 	memcpy(dst, src, size);
458 	++verbs->attr.num_of_specs;
459 	verbs->size += size;
460 }
461 
462 /**
463  * Convert the @p item into a Verbs specification. This function assumes that
464  * the input is valid and that there is space to insert the requested item
465  * into the flow.
466  *
467  * @param[in, out] dev_flow
468  *   Pointer to dev_flow structure.
469  * @param[in] item
470  *   Item specification.
471  * @param[in] item_flags
472  *   Parsed item flags.
473  */
474 static void
475 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
476 			      const struct rte_flow_item *item,
477 			      uint64_t item_flags)
478 {
479 	const struct rte_flow_item_eth *spec = item->spec;
480 	const struct rte_flow_item_eth *mask = item->mask;
481 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
482 	struct ibv_flow_spec_eth eth = {
483 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
484 		.size = size,
485 	};
486 
487 	if (!mask)
488 		mask = &rte_flow_item_eth_mask;
489 	if (spec) {
490 		unsigned int i;
491 
492 		memcpy(&eth.val.dst_mac, spec->dst.addr_bytes,
493 			RTE_ETHER_ADDR_LEN);
494 		memcpy(&eth.val.src_mac, spec->src.addr_bytes,
495 			RTE_ETHER_ADDR_LEN);
496 		eth.val.ether_type = spec->type;
497 		memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes,
498 			RTE_ETHER_ADDR_LEN);
499 		memcpy(&eth.mask.src_mac, mask->src.addr_bytes,
500 			RTE_ETHER_ADDR_LEN);
501 		eth.mask.ether_type = mask->type;
502 		/* Remove unwanted bits from values. */
503 		for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
504 			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
505 			eth.val.src_mac[i] &= eth.mask.src_mac[i];
506 		}
507 		eth.val.ether_type &= eth.mask.ether_type;
508 	}
509 	flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
510 }
511 
512 /**
513  * Update the VLAN tag in the Verbs Ethernet specification.
514  * This function assumes that the input is valid and there is space to add
515  * the requested item.
516  *
517  * @param[in, out] attr
518  *   Pointer to Verbs attributes structure.
519  * @param[in] eth
520  *   Verbs structure containing the VLAN information to copy.
521  */
522 static void
523 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
524 			    struct ibv_flow_spec_eth *eth)
525 {
526 	unsigned int i;
527 	const enum ibv_flow_spec_type search = eth->type;
528 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
529 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
530 
531 	for (i = 0; i != attr->num_of_specs; ++i) {
532 		if (hdr->type == search) {
533 			struct ibv_flow_spec_eth *e =
534 				(struct ibv_flow_spec_eth *)hdr;
535 
536 			e->val.vlan_tag = eth->val.vlan_tag;
537 			e->mask.vlan_tag = eth->mask.vlan_tag;
538 			e->val.ether_type = eth->val.ether_type;
539 			e->mask.ether_type = eth->mask.ether_type;
540 			break;
541 		}
542 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
543 	}
544 }
545 
546 /**
547  * Convert the @p item into a Verbs specification. This function assumes that
548  * the input is valid and that there is space to insert the requested item
549  * into the flow.
550  *
551  * @param[in, out] dev_flow
552  *   Pointer to dev_flow structure.
553  * @param[in] item
554  *   Item specification.
555  * @param[in] item_flags
556  *   Parsed item flags.
557  */
558 static void
559 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
560 			       const struct rte_flow_item *item,
561 			       uint64_t item_flags)
562 {
563 	const struct rte_flow_item_vlan *spec = item->spec;
564 	const struct rte_flow_item_vlan *mask = item->mask;
565 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
566 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
567 	struct ibv_flow_spec_eth eth = {
568 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
569 		.size = size,
570 	};
571 	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
572 				      MLX5_FLOW_LAYER_OUTER_L2;
573 
574 	if (!mask)
575 		mask = &rte_flow_item_vlan_mask;
576 	if (spec) {
577 		eth.val.vlan_tag = spec->tci;
578 		eth.mask.vlan_tag = mask->tci;
579 		eth.val.vlan_tag &= eth.mask.vlan_tag;
580 		eth.val.ether_type = spec->inner_type;
581 		eth.mask.ether_type = mask->inner_type;
582 		eth.val.ether_type &= eth.mask.ether_type;
583 	}
584 	if (!(item_flags & l2m))
585 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
586 	else
587 		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
588 	if (!tunnel)
589 		dev_flow->handle->vf_vlan.tag =
590 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
591 }
592 
593 /**
594  * Convert the @p item into a Verbs specification. This function assumes that
595  * the input is valid and that there is space to insert the requested item
596  * into the flow.
597  *
598  * @param[in, out] dev_flow
599  *   Pointer to dev_flow structure.
600  * @param[in] item
601  *   Item specification.
602  * @param[in] item_flags
603  *   Parsed item flags.
604  */
605 static void
606 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
607 			       const struct rte_flow_item *item,
608 			       uint64_t item_flags)
609 {
610 	const struct rte_flow_item_ipv4 *spec = item->spec;
611 	const struct rte_flow_item_ipv4 *mask = item->mask;
612 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
613 	struct ibv_flow_spec_ipv4_ext ipv4 = {
614 		.type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
615 		.size = size,
616 	};
617 
618 	if (!mask)
619 		mask = &rte_flow_item_ipv4_mask;
620 	if (spec) {
621 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
622 			.src_ip = spec->hdr.src_addr,
623 			.dst_ip = spec->hdr.dst_addr,
624 			.proto = spec->hdr.next_proto_id,
625 			.tos = spec->hdr.type_of_service,
626 		};
627 		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
628 			.src_ip = mask->hdr.src_addr,
629 			.dst_ip = mask->hdr.dst_addr,
630 			.proto = mask->hdr.next_proto_id,
631 			.tos = mask->hdr.type_of_service,
632 		};
633 		/* Remove unwanted bits from values. */
634 		ipv4.val.src_ip &= ipv4.mask.src_ip;
635 		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
636 		ipv4.val.proto &= ipv4.mask.proto;
637 		ipv4.val.tos &= ipv4.mask.tos;
638 	}
639 	flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
640 }
641 
642 /**
643  * Convert the @p item into a Verbs specification. This function assumes that
644  * the input is valid and that there is space to insert the requested item
645  * into the flow.
646  *
647  * @param[in, out] dev_flow
648  *   Pointer to dev_flow structure.
649  * @param[in] item
650  *   Item specification.
651  * @param[in] item_flags
652  *   Parsed item flags.
653  */
654 static void
655 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
656 			       const struct rte_flow_item *item,
657 			       uint64_t item_flags)
658 {
659 	const struct rte_flow_item_ipv6 *spec = item->spec;
660 	const struct rte_flow_item_ipv6 *mask = item->mask;
661 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
662 	struct ibv_flow_spec_ipv6 ipv6 = {
663 		.type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
664 		.size = size,
665 	};
666 
667 	if (!mask)
668 		mask = &rte_flow_item_ipv6_mask;
669 	if (spec) {
670 		unsigned int i;
671 		uint32_t vtc_flow_val;
672 		uint32_t vtc_flow_mask;
673 
674 		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
675 		       RTE_DIM(ipv6.val.src_ip));
676 		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
677 		       RTE_DIM(ipv6.val.dst_ip));
678 		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
679 		       RTE_DIM(ipv6.mask.src_ip));
680 		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
681 		       RTE_DIM(ipv6.mask.dst_ip));
682 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
683 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
684 		ipv6.val.flow_label =
685 			rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
686 					 RTE_IPV6_HDR_FL_SHIFT);
687 		ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
688 					 RTE_IPV6_HDR_TC_SHIFT;
689 		ipv6.val.next_hdr = spec->hdr.proto;
690 		ipv6.mask.flow_label =
691 			rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
692 					 RTE_IPV6_HDR_FL_SHIFT);
693 		ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
694 					  RTE_IPV6_HDR_TC_SHIFT;
695 		ipv6.mask.next_hdr = mask->hdr.proto;
696 		/* Remove unwanted bits from values. */
697 		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
698 			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
699 			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
700 		}
701 		ipv6.val.flow_label &= ipv6.mask.flow_label;
702 		ipv6.val.traffic_class &= ipv6.mask.traffic_class;
703 		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
704 	}
705 	flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
706 }
707 
708 /**
709  * Convert the @p item into a Verbs specification. This function assumes that
710  * the input is valid and that there is space to insert the requested item
711  * into the flow.
712  *
713  * @param[in, out] dev_flow
714  *   Pointer to dev_flow structure.
715  * @param[in] item
716  *   Item specification.
717  * @param[in] item_flags
718  *   Parsed item flags.
719  */
720 static void
721 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
722 			      const struct rte_flow_item *item,
723 			      uint64_t item_flags __rte_unused)
724 {
725 	const struct rte_flow_item_tcp *spec = item->spec;
726 	const struct rte_flow_item_tcp *mask = item->mask;
727 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
728 	struct ibv_flow_spec_tcp_udp tcp = {
729 		.type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
730 		.size = size,
731 	};
732 
733 	if (!mask)
734 		mask = &rte_flow_item_tcp_mask;
735 	if (spec) {
736 		tcp.val.dst_port = spec->hdr.dst_port;
737 		tcp.val.src_port = spec->hdr.src_port;
738 		tcp.mask.dst_port = mask->hdr.dst_port;
739 		tcp.mask.src_port = mask->hdr.src_port;
740 		/* Remove unwanted bits from values. */
741 		tcp.val.src_port &= tcp.mask.src_port;
742 		tcp.val.dst_port &= tcp.mask.dst_port;
743 	}
744 	flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
745 }
746 
747 /**
748  * Convert the @p item into a Verbs specification. This function assumes that
749  * the input is valid and that there is space to insert the requested item
750  * into the flow.
751  *
752  * @param[in, out] dev_flow
753  *   Pointer to dev_flow structure.
754  * @param[in] item
755  *   Item specification.
756  * @param[in] item_flags
757  *   Parsed item flags.
758  */
759 static void
760 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
761 			      const struct rte_flow_item *item,
762 			      uint64_t item_flags __rte_unused)
763 {
764 	const struct rte_flow_item_udp *spec = item->spec;
765 	const struct rte_flow_item_udp *mask = item->mask;
766 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
767 	struct ibv_flow_spec_tcp_udp udp = {
768 		.type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
769 		.size = size,
770 	};
771 
772 	if (!mask)
773 		mask = &rte_flow_item_udp_mask;
774 	if (spec) {
775 		udp.val.dst_port = spec->hdr.dst_port;
776 		udp.val.src_port = spec->hdr.src_port;
777 		udp.mask.dst_port = mask->hdr.dst_port;
778 		udp.mask.src_port = mask->hdr.src_port;
779 		/* Remove unwanted bits from values. */
780 		udp.val.src_port &= udp.mask.src_port;
781 		udp.val.dst_port &= udp.mask.dst_port;
782 	}
783 	item++;
784 	while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
785 		item++;
786 	if (!(udp.val.dst_port & udp.mask.dst_port)) {
787 		switch ((item)->type) {
788 		case RTE_FLOW_ITEM_TYPE_VXLAN:
789 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
790 			udp.mask.dst_port = 0xffff;
791 			break;
792 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
793 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
794 			udp.mask.dst_port = 0xffff;
795 			break;
796 		case RTE_FLOW_ITEM_TYPE_MPLS:
797 			udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
798 			udp.mask.dst_port = 0xffff;
799 			break;
800 		default:
801 			break;
802 		}
803 	}
804 
805 	flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
806 }
807 
808 /**
809  * Convert the @p item into a Verbs specification. This function assumes that
810  * the input is valid and that there is space to insert the requested item
811  * into the flow.
812  *
813  * @param[in, out] dev_flow
814  *   Pointer to dev_flow structure.
815  * @param[in] item
816  *   Item specification.
817  * @param[in] item_flags
818  *   Parsed item flags.
819  */
820 static void
821 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
822 				const struct rte_flow_item *item,
823 				uint64_t item_flags __rte_unused)
824 {
825 	const struct rte_flow_item_vxlan *spec = item->spec;
826 	const struct rte_flow_item_vxlan *mask = item->mask;
827 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
828 	struct ibv_flow_spec_tunnel vxlan = {
829 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
830 		.size = size,
831 	};
832 	union vni {
833 		uint32_t vlan_id;
834 		uint8_t vni[4];
835 	} id = { .vlan_id = 0, };
836 
837 	if (!mask)
838 		mask = &rte_flow_item_vxlan_mask;
839 	if (spec) {
840 		memcpy(&id.vni[1], spec->vni, 3);
841 		vxlan.val.tunnel_id = id.vlan_id;
842 		memcpy(&id.vni[1], mask->vni, 3);
843 		vxlan.mask.tunnel_id = id.vlan_id;
844 		/* Remove unwanted bits from values. */
845 		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
846 	}
847 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
848 }
849 
850 /**
851  * Convert the @p item into a Verbs specification. This function assumes that
852  * the input is valid and that there is space to insert the requested item
853  * into the flow.
854  *
855  * @param[in, out] dev_flow
856  *   Pointer to dev_flow structure.
857  * @param[in] item
858  *   Item specification.
859  * @param[in] item_flags
860  *   Parsed item flags.
861  */
862 static void
863 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
864 				    const struct rte_flow_item *item,
865 				    uint64_t item_flags __rte_unused)
866 {
867 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
868 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
869 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
870 	struct ibv_flow_spec_tunnel vxlan_gpe = {
871 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
872 		.size = size,
873 	};
874 	union vni {
875 		uint32_t vlan_id;
876 		uint8_t vni[4];
877 	} id = { .vlan_id = 0, };
878 
879 	if (!mask)
880 		mask = &rte_flow_item_vxlan_gpe_mask;
881 	if (spec) {
882 		memcpy(&id.vni[1], spec->vni, 3);
883 		vxlan_gpe.val.tunnel_id = id.vlan_id;
884 		memcpy(&id.vni[1], mask->vni, 3);
885 		vxlan_gpe.mask.tunnel_id = id.vlan_id;
886 		/* Remove unwanted bits from values. */
887 		vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
888 	}
889 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
890 }
891 
892 /**
893  * Update the protocol in Verbs IPv4/IPv6 spec.
894  *
895  * @param[in, out] attr
896  *   Pointer to Verbs attributes structure.
897  * @param[in] search
898  *   Specification type to search in order to update the IP protocol.
899  * @param[in] protocol
900  *   Protocol value to set if none is present in the specification.
901  */
902 static void
903 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
904 				       enum ibv_flow_spec_type search,
905 				       uint8_t protocol)
906 {
907 	unsigned int i;
908 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
909 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
910 
911 	if (!attr)
912 		return;
913 	for (i = 0; i != attr->num_of_specs; ++i) {
914 		if (hdr->type == search) {
915 			union {
916 				struct ibv_flow_spec_ipv4_ext *ipv4;
917 				struct ibv_flow_spec_ipv6 *ipv6;
918 			} ip;
919 
920 			switch (search) {
921 			case IBV_FLOW_SPEC_IPV4_EXT:
922 				ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
923 				if (!ip.ipv4->val.proto) {
924 					ip.ipv4->val.proto = protocol;
925 					ip.ipv4->mask.proto = 0xff;
926 				}
927 				break;
928 			case IBV_FLOW_SPEC_IPV6:
929 				ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
930 				if (!ip.ipv6->val.next_hdr) {
931 					ip.ipv6->val.next_hdr = protocol;
932 					ip.ipv6->mask.next_hdr = 0xff;
933 				}
934 				break;
935 			default:
936 				break;
937 			}
938 			break;
939 		}
940 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
941 	}
942 }
943 
944 /**
945  * Convert the @p item into a Verbs specification. This function assumes that
946  * the input is valid and that there is space to insert the requested item
947  * into the flow.
948  *
949  * @param[in, out] dev_flow
950  *   Pointer to dev_flow structure.
951  * @param[in] item
952  *   Item specification.
953  * @param[in] item_flags
954  *   Parsed item flags.
955  */
956 static void
957 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
958 			      const struct rte_flow_item *item __rte_unused,
959 			      uint64_t item_flags)
960 {
961 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
962 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
963 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
964 	struct ibv_flow_spec_tunnel tunnel = {
965 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
966 		.size = size,
967 	};
968 #else
969 	const struct rte_flow_item_gre *spec = item->spec;
970 	const struct rte_flow_item_gre *mask = item->mask;
971 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
972 	struct ibv_flow_spec_gre tunnel = {
973 		.type = IBV_FLOW_SPEC_GRE,
974 		.size = size,
975 	};
976 
977 	if (!mask)
978 		mask = &rte_flow_item_gre_mask;
979 	if (spec) {
980 		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
981 		tunnel.val.protocol = spec->protocol;
982 		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
983 		tunnel.mask.protocol = mask->protocol;
984 		/* Remove unwanted bits from values. */
985 		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
986 		tunnel.val.protocol &= tunnel.mask.protocol;
987 		tunnel.val.key &= tunnel.mask.key;
988 	}
989 #endif
990 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
991 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
992 						       IBV_FLOW_SPEC_IPV4_EXT,
993 						       IPPROTO_GRE);
994 	else
995 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
996 						       IBV_FLOW_SPEC_IPV6,
997 						       IPPROTO_GRE);
998 	flow_verbs_spec_add(verbs, &tunnel, size);
999 }
1000 
1001 /**
1002  * Convert the @p action into a Verbs specification. This function assumes that
1003  * the input is valid and that there is space to insert the requested action
1004  * into the flow. This function also return the action that was added.
1005  *
1006  * @param[in, out] dev_flow
1007  *   Pointer to dev_flow structure.
1008  * @param[in] item
1009  *   Item specification.
1010  * @param[in] item_flags
1011  *   Parsed item flags.
1012  */
1013 static void
1014 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1015 			       const struct rte_flow_item *item __rte_unused,
1016 			       uint64_t item_flags __rte_unused)
1017 {
1018 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1019 	const struct rte_flow_item_mpls *spec = item->spec;
1020 	const struct rte_flow_item_mpls *mask = item->mask;
1021 	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1022 	struct ibv_flow_spec_mpls mpls = {
1023 		.type = IBV_FLOW_SPEC_MPLS,
1024 		.size = size,
1025 	};
1026 
1027 	if (!mask)
1028 		mask = &rte_flow_item_mpls_mask;
1029 	if (spec) {
1030 		memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1031 		memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1032 		/* Remove unwanted bits from values.  */
1033 		mpls.val.label &= mpls.mask.label;
1034 	}
1035 	flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1036 #endif
1037 }
1038 
1039 /**
1040  * Convert the @p action into a Verbs specification. This function assumes that
1041  * the input is valid and that there is space to insert the requested action
1042  * into the flow.
1043  *
1044  * @param[in] dev_flow
1045  *   Pointer to mlx5_flow.
1046  * @param[in] action
1047  *   Action configuration.
1048  */
1049 static void
1050 flow_verbs_translate_action_drop
1051 	(struct mlx5_flow *dev_flow,
1052 	 const struct rte_flow_action *action __rte_unused)
1053 {
1054 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1055 	struct ibv_flow_spec_action_drop drop = {
1056 			.type = IBV_FLOW_SPEC_ACTION_DROP,
1057 			.size = size,
1058 	};
1059 
1060 	flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1061 }
1062 
1063 /**
1064  * Convert the @p action into a Verbs specification. This function assumes that
1065  * the input is valid and that there is space to insert the requested action
1066  * into the flow.
1067  *
1068  * @param[in] rss_desc
1069  *   Pointer to mlx5_flow_rss_desc.
1070  * @param[in] action
1071  *   Action configuration.
1072  */
1073 static void
1074 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1075 				  const struct rte_flow_action *action)
1076 {
1077 	const struct rte_flow_action_queue *queue = action->conf;
1078 
1079 	rss_desc->queue[0] = queue->index;
1080 	rss_desc->queue_num = 1;
1081 }
1082 
1083 /**
1084  * Convert the @p action into a Verbs specification. This function assumes that
1085  * the input is valid and that there is space to insert the requested action
1086  * into the flow.
1087  *
1088  * @param[in] rss_desc
1089  *   Pointer to mlx5_flow_rss_desc.
1090  * @param[in] action
1091  *   Action configuration.
1092  */
1093 static void
1094 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1095 				const struct rte_flow_action *action)
1096 {
1097 	const struct rte_flow_action_rss *rss = action->conf;
1098 	const uint8_t *rss_key;
1099 
1100 	memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1101 	rss_desc->queue_num = rss->queue_num;
1102 	/* NULL RSS key indicates default RSS key. */
1103 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
1104 	memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1105 	/*
1106 	 * rss->level and rss.types should be set in advance when expanding
1107 	 * items for RSS.
1108 	 */
1109 }
1110 
1111 /**
1112  * Convert the @p action into a Verbs specification. This function assumes that
1113  * the input is valid and that there is space to insert the requested action
1114  * into the flow.
1115  *
1116  * @param[in] dev_flow
1117  *   Pointer to mlx5_flow.
1118  * @param[in] action
1119  *   Action configuration.
1120  */
1121 static void
1122 flow_verbs_translate_action_flag
1123 	(struct mlx5_flow *dev_flow,
1124 	 const struct rte_flow_action *action __rte_unused)
1125 {
1126 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1127 	struct ibv_flow_spec_action_tag tag = {
1128 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1129 		.size = size,
1130 		.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1131 	};
1132 
1133 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1134 }
1135 
1136 /**
1137  * Convert the @p action into a Verbs specification. This function assumes that
1138  * the input is valid and that there is space to insert the requested action
1139  * into the flow.
1140  *
1141  * @param[in] dev_flow
1142  *   Pointer to mlx5_flow.
1143  * @param[in] action
1144  *   Action configuration.
1145  */
1146 static void
1147 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1148 				 const struct rte_flow_action *action)
1149 {
1150 	const struct rte_flow_action_mark *mark = action->conf;
1151 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1152 	struct ibv_flow_spec_action_tag tag = {
1153 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1154 		.size = size,
1155 		.tag_id = mlx5_flow_mark_set(mark->id),
1156 	};
1157 
1158 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1159 }
1160 
1161 /**
1162  * Convert the @p action into a Verbs specification. This function assumes that
1163  * the input is valid and that there is space to insert the requested action
1164  * into the flow.
1165  *
1166  * @param[in] dev
1167  *   Pointer to the Ethernet device structure.
1168  * @param[in] action
1169  *   Action configuration.
1170  * @param[in] dev_flow
1171  *   Pointer to mlx5_flow.
1172  * @param[out] error
1173  *   Pointer to error structure.
1174  *
1175  * @return
1176  *   0 On success else a negative errno value is returned and rte_errno is set.
1177  */
1178 static int
1179 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1180 				  const struct rte_flow_action *action,
1181 				  struct rte_eth_dev *dev,
1182 				  struct rte_flow_error *error)
1183 {
1184 	const struct rte_flow_action_count *count = action->conf;
1185 	struct rte_flow *flow = dev_flow->flow;
1186 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1187 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1188 	struct mlx5_flow_counter_pool *pool;
1189 	struct mlx5_flow_counter *cnt = NULL;
1190 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1191 	struct ibv_flow_spec_counter_action counter = {
1192 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
1193 		.size = size,
1194 	};
1195 #endif
1196 
1197 	if (!flow->counter) {
1198 		flow->counter = flow_verbs_counter_new(dev, count->shared,
1199 						       count->id);
1200 		if (!flow->counter)
1201 			return rte_flow_error_set(error, rte_errno,
1202 						  RTE_FLOW_ERROR_TYPE_ACTION,
1203 						  action,
1204 						  "cannot get counter"
1205 						  " context.");
1206 	}
1207 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1208 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1209 	counter.counter_set_handle =
1210 		((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1211 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1212 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1213 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1214 	counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1215 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1216 #endif
1217 	return 0;
1218 }
1219 
1220 /**
1221  * Internal validation function. For validating both actions and items.
1222  *
1223  * @param[in] dev
1224  *   Pointer to the Ethernet device structure.
1225  * @param[in] attr
1226  *   Pointer to the flow attributes.
1227  * @param[in] items
1228  *   Pointer to the list of items.
1229  * @param[in] actions
1230  *   Pointer to the list of actions.
1231  * @param[in] external
1232  *   This flow rule is created by request external to PMD.
1233  * @param[in] hairpin
1234  *   Number of hairpin TX actions, 0 means classic flow.
1235  * @param[out] error
1236  *   Pointer to the error structure.
1237  *
1238  * @return
1239  *   0 on success, a negative errno value otherwise and rte_errno is set.
1240  */
1241 static int
1242 flow_verbs_validate(struct rte_eth_dev *dev,
1243 		    const struct rte_flow_attr *attr,
1244 		    const struct rte_flow_item items[],
1245 		    const struct rte_flow_action actions[],
1246 		    bool external __rte_unused,
1247 		    int hairpin __rte_unused,
1248 		    struct rte_flow_error *error)
1249 {
1250 	int ret;
1251 	uint64_t action_flags = 0;
1252 	uint64_t item_flags = 0;
1253 	uint64_t last_item = 0;
1254 	uint8_t next_protocol = 0xff;
1255 	uint16_t ether_type = 0;
1256 
1257 	if (items == NULL)
1258 		return -1;
1259 	ret = mlx5_flow_validate_attributes(dev, attr, error);
1260 	if (ret < 0)
1261 		return ret;
1262 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1263 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1264 		int ret = 0;
1265 
1266 		switch (items->type) {
1267 		case RTE_FLOW_ITEM_TYPE_VOID:
1268 			break;
1269 		case RTE_FLOW_ITEM_TYPE_ETH:
1270 			ret = mlx5_flow_validate_item_eth(items, item_flags,
1271 							  false, error);
1272 			if (ret < 0)
1273 				return ret;
1274 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1275 					     MLX5_FLOW_LAYER_OUTER_L2;
1276 			if (items->mask != NULL && items->spec != NULL) {
1277 				ether_type =
1278 					((const struct rte_flow_item_eth *)
1279 					 items->spec)->type;
1280 				ether_type &=
1281 					((const struct rte_flow_item_eth *)
1282 					 items->mask)->type;
1283 				ether_type = rte_be_to_cpu_16(ether_type);
1284 			} else {
1285 				ether_type = 0;
1286 			}
1287 			break;
1288 		case RTE_FLOW_ITEM_TYPE_VLAN:
1289 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
1290 							   dev, error);
1291 			if (ret < 0)
1292 				return ret;
1293 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1294 					      MLX5_FLOW_LAYER_INNER_VLAN) :
1295 					     (MLX5_FLOW_LAYER_OUTER_L2 |
1296 					      MLX5_FLOW_LAYER_OUTER_VLAN);
1297 			if (items->mask != NULL && items->spec != NULL) {
1298 				ether_type =
1299 					((const struct rte_flow_item_vlan *)
1300 					 items->spec)->inner_type;
1301 				ether_type &=
1302 					((const struct rte_flow_item_vlan *)
1303 					 items->mask)->inner_type;
1304 				ether_type = rte_be_to_cpu_16(ether_type);
1305 			} else {
1306 				ether_type = 0;
1307 			}
1308 			break;
1309 		case RTE_FLOW_ITEM_TYPE_IPV4:
1310 			ret = mlx5_flow_validate_item_ipv4
1311 						(items, item_flags,
1312 						 last_item, ether_type, NULL,
1313 						 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1314 						 error);
1315 			if (ret < 0)
1316 				return ret;
1317 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1318 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1319 			if (items->mask != NULL &&
1320 			    ((const struct rte_flow_item_ipv4 *)
1321 			     items->mask)->hdr.next_proto_id) {
1322 				next_protocol =
1323 					((const struct rte_flow_item_ipv4 *)
1324 					 (items->spec))->hdr.next_proto_id;
1325 				next_protocol &=
1326 					((const struct rte_flow_item_ipv4 *)
1327 					 (items->mask))->hdr.next_proto_id;
1328 			} else {
1329 				/* Reset for inner layer. */
1330 				next_protocol = 0xff;
1331 			}
1332 			break;
1333 		case RTE_FLOW_ITEM_TYPE_IPV6:
1334 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1335 							   last_item,
1336 							   ether_type, NULL,
1337 							   error);
1338 			if (ret < 0)
1339 				return ret;
1340 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1341 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1342 			if (items->mask != NULL &&
1343 			    ((const struct rte_flow_item_ipv6 *)
1344 			     items->mask)->hdr.proto) {
1345 				next_protocol =
1346 					((const struct rte_flow_item_ipv6 *)
1347 					 items->spec)->hdr.proto;
1348 				next_protocol &=
1349 					((const struct rte_flow_item_ipv6 *)
1350 					 items->mask)->hdr.proto;
1351 			} else {
1352 				/* Reset for inner layer. */
1353 				next_protocol = 0xff;
1354 			}
1355 			break;
1356 		case RTE_FLOW_ITEM_TYPE_UDP:
1357 			ret = mlx5_flow_validate_item_udp(items, item_flags,
1358 							  next_protocol,
1359 							  error);
1360 			if (ret < 0)
1361 				return ret;
1362 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1363 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
1364 			break;
1365 		case RTE_FLOW_ITEM_TYPE_TCP:
1366 			ret = mlx5_flow_validate_item_tcp
1367 						(items, item_flags,
1368 						 next_protocol,
1369 						 &rte_flow_item_tcp_mask,
1370 						 error);
1371 			if (ret < 0)
1372 				return ret;
1373 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1374 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
1375 			break;
1376 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1377 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1378 							    error);
1379 			if (ret < 0)
1380 				return ret;
1381 			last_item = MLX5_FLOW_LAYER_VXLAN;
1382 			break;
1383 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1384 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
1385 								item_flags,
1386 								dev, error);
1387 			if (ret < 0)
1388 				return ret;
1389 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1390 			break;
1391 		case RTE_FLOW_ITEM_TYPE_GRE:
1392 			ret = mlx5_flow_validate_item_gre(items, item_flags,
1393 							  next_protocol, error);
1394 			if (ret < 0)
1395 				return ret;
1396 			last_item = MLX5_FLOW_LAYER_GRE;
1397 			break;
1398 		case RTE_FLOW_ITEM_TYPE_MPLS:
1399 			ret = mlx5_flow_validate_item_mpls(dev, items,
1400 							   item_flags,
1401 							   last_item, error);
1402 			if (ret < 0)
1403 				return ret;
1404 			last_item = MLX5_FLOW_LAYER_MPLS;
1405 			break;
1406 		case RTE_FLOW_ITEM_TYPE_ICMP:
1407 		case RTE_FLOW_ITEM_TYPE_ICMP6:
1408 			return rte_flow_error_set(error, ENOTSUP,
1409 						  RTE_FLOW_ERROR_TYPE_ITEM,
1410 						  NULL, "ICMP/ICMP6 "
1411 						  "item not supported");
1412 		default:
1413 			return rte_flow_error_set(error, ENOTSUP,
1414 						  RTE_FLOW_ERROR_TYPE_ITEM,
1415 						  NULL, "item not supported");
1416 		}
1417 		item_flags |= last_item;
1418 	}
1419 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1420 		switch (actions->type) {
1421 		case RTE_FLOW_ACTION_TYPE_VOID:
1422 			break;
1423 		case RTE_FLOW_ACTION_TYPE_FLAG:
1424 			ret = mlx5_flow_validate_action_flag(action_flags,
1425 							     attr,
1426 							     error);
1427 			if (ret < 0)
1428 				return ret;
1429 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1430 			break;
1431 		case RTE_FLOW_ACTION_TYPE_MARK:
1432 			ret = mlx5_flow_validate_action_mark(actions,
1433 							     action_flags,
1434 							     attr,
1435 							     error);
1436 			if (ret < 0)
1437 				return ret;
1438 			action_flags |= MLX5_FLOW_ACTION_MARK;
1439 			break;
1440 		case RTE_FLOW_ACTION_TYPE_DROP:
1441 			ret = mlx5_flow_validate_action_drop(action_flags,
1442 							     attr,
1443 							     error);
1444 			if (ret < 0)
1445 				return ret;
1446 			action_flags |= MLX5_FLOW_ACTION_DROP;
1447 			break;
1448 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1449 			ret = mlx5_flow_validate_action_queue(actions,
1450 							      action_flags, dev,
1451 							      attr,
1452 							      error);
1453 			if (ret < 0)
1454 				return ret;
1455 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1456 			break;
1457 		case RTE_FLOW_ACTION_TYPE_RSS:
1458 			ret = mlx5_flow_validate_action_rss(actions,
1459 							    action_flags, dev,
1460 							    attr, item_flags,
1461 							    error);
1462 			if (ret < 0)
1463 				return ret;
1464 			action_flags |= MLX5_FLOW_ACTION_RSS;
1465 			break;
1466 		case RTE_FLOW_ACTION_TYPE_COUNT:
1467 			ret = mlx5_flow_validate_action_count(dev, attr, error);
1468 			if (ret < 0)
1469 				return ret;
1470 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1471 			break;
1472 		default:
1473 			return rte_flow_error_set(error, ENOTSUP,
1474 						  RTE_FLOW_ERROR_TYPE_ACTION,
1475 						  actions,
1476 						  "action not supported");
1477 		}
1478 	}
1479 	/*
1480 	 * Validate the drop action mutual exclusion with other actions.
1481 	 * Drop action is mutually-exclusive with any other action, except for
1482 	 * Count action.
1483 	 */
1484 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1485 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1486 		return rte_flow_error_set(error, EINVAL,
1487 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1488 					  "Drop action is mutually-exclusive "
1489 					  "with any other action, except for "
1490 					  "Count action");
1491 	if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1492 		return rte_flow_error_set(error, EINVAL,
1493 					  RTE_FLOW_ERROR_TYPE_ACTION, actions,
1494 					  "no fate action is found");
1495 	return 0;
1496 }
1497 
1498 /**
1499  * Calculate the required bytes that are needed for the action part of the verbs
1500  * flow.
1501  *
1502  * @param[in] actions
1503  *   Pointer to the list of actions.
1504  *
1505  * @return
1506  *   The size of the memory needed for all actions.
1507  */
1508 static int
1509 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1510 {
1511 	int size = 0;
1512 
1513 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1514 		switch (actions->type) {
1515 		case RTE_FLOW_ACTION_TYPE_VOID:
1516 			break;
1517 		case RTE_FLOW_ACTION_TYPE_FLAG:
1518 			size += sizeof(struct ibv_flow_spec_action_tag);
1519 			break;
1520 		case RTE_FLOW_ACTION_TYPE_MARK:
1521 			size += sizeof(struct ibv_flow_spec_action_tag);
1522 			break;
1523 		case RTE_FLOW_ACTION_TYPE_DROP:
1524 			size += sizeof(struct ibv_flow_spec_action_drop);
1525 			break;
1526 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1527 			break;
1528 		case RTE_FLOW_ACTION_TYPE_RSS:
1529 			break;
1530 		case RTE_FLOW_ACTION_TYPE_COUNT:
1531 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1532 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1533 			size += sizeof(struct ibv_flow_spec_counter_action);
1534 #endif
1535 			break;
1536 		default:
1537 			break;
1538 		}
1539 	}
1540 	return size;
1541 }
1542 
1543 /**
1544  * Calculate the required bytes that are needed for the item part of the verbs
1545  * flow.
1546  *
1547  * @param[in] items
1548  *   Pointer to the list of items.
1549  *
1550  * @return
1551  *   The size of the memory needed for all items.
1552  */
1553 static int
1554 flow_verbs_get_items_size(const struct rte_flow_item items[])
1555 {
1556 	int size = 0;
1557 
1558 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1559 		switch (items->type) {
1560 		case RTE_FLOW_ITEM_TYPE_VOID:
1561 			break;
1562 		case RTE_FLOW_ITEM_TYPE_ETH:
1563 			size += sizeof(struct ibv_flow_spec_eth);
1564 			break;
1565 		case RTE_FLOW_ITEM_TYPE_VLAN:
1566 			size += sizeof(struct ibv_flow_spec_eth);
1567 			break;
1568 		case RTE_FLOW_ITEM_TYPE_IPV4:
1569 			size += sizeof(struct ibv_flow_spec_ipv4_ext);
1570 			break;
1571 		case RTE_FLOW_ITEM_TYPE_IPV6:
1572 			size += sizeof(struct ibv_flow_spec_ipv6);
1573 			break;
1574 		case RTE_FLOW_ITEM_TYPE_UDP:
1575 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1576 			break;
1577 		case RTE_FLOW_ITEM_TYPE_TCP:
1578 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1579 			break;
1580 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1581 			size += sizeof(struct ibv_flow_spec_tunnel);
1582 			break;
1583 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1584 			size += sizeof(struct ibv_flow_spec_tunnel);
1585 			break;
1586 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1587 		case RTE_FLOW_ITEM_TYPE_GRE:
1588 			size += sizeof(struct ibv_flow_spec_gre);
1589 			break;
1590 		case RTE_FLOW_ITEM_TYPE_MPLS:
1591 			size += sizeof(struct ibv_flow_spec_mpls);
1592 			break;
1593 #else
1594 		case RTE_FLOW_ITEM_TYPE_GRE:
1595 			size += sizeof(struct ibv_flow_spec_tunnel);
1596 			break;
1597 #endif
1598 		default:
1599 			break;
1600 		}
1601 	}
1602 	return size;
1603 }
1604 
1605 /**
1606  * Internal preparation function. Allocate mlx5_flow with the required size.
1607  * The required size is calculate based on the actions and items. This function
1608  * also returns the detected actions and items for later use.
1609  *
1610  * @param[in] dev
1611  *   Pointer to Ethernet device.
1612  * @param[in] attr
1613  *   Pointer to the flow attributes.
1614  * @param[in] items
1615  *   Pointer to the list of items.
1616  * @param[in] actions
1617  *   Pointer to the list of actions.
1618  * @param[out] error
1619  *   Pointer to the error structure.
1620  *
1621  * @return
1622  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1623  *   is set.
1624  */
1625 static struct mlx5_flow *
1626 flow_verbs_prepare(struct rte_eth_dev *dev,
1627 		   const struct rte_flow_attr *attr __rte_unused,
1628 		   const struct rte_flow_item items[],
1629 		   const struct rte_flow_action actions[],
1630 		   struct rte_flow_error *error)
1631 {
1632 	size_t size = 0;
1633 	uint32_t handle_idx = 0;
1634 	struct mlx5_flow *dev_flow;
1635 	struct mlx5_flow_handle *dev_handle;
1636 	struct mlx5_priv *priv = dev->data->dev_private;
1637 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1638 
1639 	MLX5_ASSERT(wks);
1640 	size += flow_verbs_get_actions_size(actions);
1641 	size += flow_verbs_get_items_size(items);
1642 	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1643 		rte_flow_error_set(error, E2BIG,
1644 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1645 				   "Verbs spec/action size too large");
1646 		return NULL;
1647 	}
1648 	/* In case of corrupting the memory. */
1649 	if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1650 		rte_flow_error_set(error, ENOSPC,
1651 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1652 				   "not free temporary device flow");
1653 		return NULL;
1654 	}
1655 	dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1656 				   &handle_idx);
1657 	if (!dev_handle) {
1658 		rte_flow_error_set(error, ENOMEM,
1659 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1660 				   "not enough memory to create flow handle");
1661 		return NULL;
1662 	}
1663 	MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1664 	dev_flow = &wks->flows[wks->flow_idx++];
1665 	dev_flow->handle = dev_handle;
1666 	dev_flow->handle_idx = handle_idx;
1667 	/* Memcpy is used, only size needs to be cleared to 0. */
1668 	dev_flow->verbs.size = 0;
1669 	dev_flow->verbs.attr.num_of_specs = 0;
1670 	dev_flow->ingress = attr->ingress;
1671 	dev_flow->hash_fields = 0;
1672 	/* Need to set transfer attribute: not supported in Verbs mode. */
1673 	return dev_flow;
1674 }
1675 
1676 /**
1677  * Fill the flow with verb spec.
1678  *
1679  * @param[in] dev
1680  *   Pointer to Ethernet device.
1681  * @param[in, out] dev_flow
1682  *   Pointer to the mlx5 flow.
1683  * @param[in] attr
1684  *   Pointer to the flow attributes.
1685  * @param[in] items
1686  *   Pointer to the list of items.
1687  * @param[in] actions
1688  *   Pointer to the list of actions.
1689  * @param[out] error
1690  *   Pointer to the error structure.
1691  *
1692  * @return
1693  *   0 on success, else a negative errno value otherwise and rte_errno is set.
1694  */
1695 static int
1696 flow_verbs_translate(struct rte_eth_dev *dev,
1697 		     struct mlx5_flow *dev_flow,
1698 		     const struct rte_flow_attr *attr,
1699 		     const struct rte_flow_item items[],
1700 		     const struct rte_flow_action actions[],
1701 		     struct rte_flow_error *error)
1702 {
1703 	uint64_t item_flags = 0;
1704 	uint64_t action_flags = 0;
1705 	uint64_t priority = attr->priority;
1706 	uint32_t subpriority = 0;
1707 	struct mlx5_priv *priv = dev->data->dev_private;
1708 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1709 	struct mlx5_flow_rss_desc *rss_desc;
1710 
1711 	MLX5_ASSERT(wks);
1712 	rss_desc = &wks->rss_desc;
1713 	if (priority == MLX5_FLOW_PRIO_RSVD)
1714 		priority = priv->config.flow_prio - 1;
1715 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1716 		int ret;
1717 
1718 		switch (actions->type) {
1719 		case RTE_FLOW_ACTION_TYPE_VOID:
1720 			break;
1721 		case RTE_FLOW_ACTION_TYPE_FLAG:
1722 			flow_verbs_translate_action_flag(dev_flow, actions);
1723 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1724 			dev_flow->handle->mark = 1;
1725 			break;
1726 		case RTE_FLOW_ACTION_TYPE_MARK:
1727 			flow_verbs_translate_action_mark(dev_flow, actions);
1728 			action_flags |= MLX5_FLOW_ACTION_MARK;
1729 			dev_flow->handle->mark = 1;
1730 			break;
1731 		case RTE_FLOW_ACTION_TYPE_DROP:
1732 			flow_verbs_translate_action_drop(dev_flow, actions);
1733 			action_flags |= MLX5_FLOW_ACTION_DROP;
1734 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1735 			break;
1736 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1737 			flow_verbs_translate_action_queue(rss_desc, actions);
1738 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1739 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1740 			break;
1741 		case RTE_FLOW_ACTION_TYPE_RSS:
1742 			flow_verbs_translate_action_rss(rss_desc, actions);
1743 			action_flags |= MLX5_FLOW_ACTION_RSS;
1744 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1745 			break;
1746 		case RTE_FLOW_ACTION_TYPE_COUNT:
1747 			ret = flow_verbs_translate_action_count(dev_flow,
1748 								actions,
1749 								dev, error);
1750 			if (ret < 0)
1751 				return ret;
1752 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1753 			break;
1754 		default:
1755 			return rte_flow_error_set(error, ENOTSUP,
1756 						  RTE_FLOW_ERROR_TYPE_ACTION,
1757 						  actions,
1758 						  "action not supported");
1759 		}
1760 	}
1761 	dev_flow->act_flags = action_flags;
1762 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1763 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1764 
1765 		switch (items->type) {
1766 		case RTE_FLOW_ITEM_TYPE_VOID:
1767 			break;
1768 		case RTE_FLOW_ITEM_TYPE_ETH:
1769 			flow_verbs_translate_item_eth(dev_flow, items,
1770 						      item_flags);
1771 			subpriority = MLX5_PRIORITY_MAP_L2;
1772 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1773 					       MLX5_FLOW_LAYER_OUTER_L2;
1774 			break;
1775 		case RTE_FLOW_ITEM_TYPE_VLAN:
1776 			flow_verbs_translate_item_vlan(dev_flow, items,
1777 						       item_flags);
1778 			subpriority = MLX5_PRIORITY_MAP_L2;
1779 			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1780 						MLX5_FLOW_LAYER_INNER_VLAN) :
1781 					       (MLX5_FLOW_LAYER_OUTER_L2 |
1782 						MLX5_FLOW_LAYER_OUTER_VLAN);
1783 			break;
1784 		case RTE_FLOW_ITEM_TYPE_IPV4:
1785 			flow_verbs_translate_item_ipv4(dev_flow, items,
1786 						       item_flags);
1787 			subpriority = MLX5_PRIORITY_MAP_L3;
1788 			dev_flow->hash_fields |=
1789 				mlx5_flow_hashfields_adjust
1790 					(rss_desc, tunnel,
1791 					 MLX5_IPV4_LAYER_TYPES,
1792 					 MLX5_IPV4_IBV_RX_HASH);
1793 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1794 					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1795 			break;
1796 		case RTE_FLOW_ITEM_TYPE_IPV6:
1797 			flow_verbs_translate_item_ipv6(dev_flow, items,
1798 						       item_flags);
1799 			subpriority = MLX5_PRIORITY_MAP_L3;
1800 			dev_flow->hash_fields |=
1801 				mlx5_flow_hashfields_adjust
1802 					(rss_desc, tunnel,
1803 					 MLX5_IPV6_LAYER_TYPES,
1804 					 MLX5_IPV6_IBV_RX_HASH);
1805 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1806 					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1807 			break;
1808 		case RTE_FLOW_ITEM_TYPE_TCP:
1809 			flow_verbs_translate_item_tcp(dev_flow, items,
1810 						      item_flags);
1811 			subpriority = MLX5_PRIORITY_MAP_L4;
1812 			dev_flow->hash_fields |=
1813 				mlx5_flow_hashfields_adjust
1814 					(rss_desc, tunnel, ETH_RSS_TCP,
1815 					 (IBV_RX_HASH_SRC_PORT_TCP |
1816 					  IBV_RX_HASH_DST_PORT_TCP));
1817 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1818 					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
1819 			break;
1820 		case RTE_FLOW_ITEM_TYPE_UDP:
1821 			flow_verbs_translate_item_udp(dev_flow, items,
1822 						      item_flags);
1823 			subpriority = MLX5_PRIORITY_MAP_L4;
1824 			dev_flow->hash_fields |=
1825 				mlx5_flow_hashfields_adjust
1826 					(rss_desc, tunnel, ETH_RSS_UDP,
1827 					 (IBV_RX_HASH_SRC_PORT_UDP |
1828 					  IBV_RX_HASH_DST_PORT_UDP));
1829 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1830 					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
1831 			break;
1832 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1833 			flow_verbs_translate_item_vxlan(dev_flow, items,
1834 							item_flags);
1835 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1836 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
1837 			break;
1838 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1839 			flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1840 							    item_flags);
1841 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1842 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1843 			break;
1844 		case RTE_FLOW_ITEM_TYPE_GRE:
1845 			flow_verbs_translate_item_gre(dev_flow, items,
1846 						      item_flags);
1847 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1848 			item_flags |= MLX5_FLOW_LAYER_GRE;
1849 			break;
1850 		case RTE_FLOW_ITEM_TYPE_MPLS:
1851 			flow_verbs_translate_item_mpls(dev_flow, items,
1852 						       item_flags);
1853 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1854 			item_flags |= MLX5_FLOW_LAYER_MPLS;
1855 			break;
1856 		default:
1857 			return rte_flow_error_set(error, ENOTSUP,
1858 						  RTE_FLOW_ERROR_TYPE_ITEM,
1859 						  NULL, "item not supported");
1860 		}
1861 	}
1862 	dev_flow->handle->layers = item_flags;
1863 	/* Other members of attr will be ignored. */
1864 	dev_flow->verbs.attr.priority =
1865 		mlx5_flow_adjust_priority(dev, priority, subpriority);
1866 	dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1867 	return 0;
1868 }
1869 
1870 /**
1871  * Remove the flow from the NIC but keeps it in memory.
1872  *
1873  * @param[in] dev
1874  *   Pointer to the Ethernet device structure.
1875  * @param[in, out] flow
1876  *   Pointer to flow structure.
1877  */
1878 static void
1879 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1880 {
1881 	struct mlx5_priv *priv = dev->data->dev_private;
1882 	struct mlx5_flow_handle *handle;
1883 	uint32_t handle_idx;
1884 
1885 	if (!flow)
1886 		return;
1887 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1888 		       handle_idx, handle, next) {
1889 		if (handle->drv_flow) {
1890 			claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1891 			handle->drv_flow = NULL;
1892 		}
1893 		/* hrxq is union, don't touch it only the flag is set. */
1894 		if (handle->rix_hrxq &&
1895 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1896 			mlx5_hrxq_release(dev, handle->rix_hrxq);
1897 			handle->rix_hrxq = 0;
1898 		}
1899 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
1900 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1901 	}
1902 }
1903 
1904 /**
1905  * Remove the flow from the NIC and the memory.
1906  *
1907  * @param[in] dev
1908  *   Pointer to the Ethernet device structure.
1909  * @param[in, out] flow
1910  *   Pointer to flow structure.
1911  */
1912 static void
1913 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1914 {
1915 	struct mlx5_priv *priv = dev->data->dev_private;
1916 	struct mlx5_flow_handle *handle;
1917 
1918 	if (!flow)
1919 		return;
1920 	flow_verbs_remove(dev, flow);
1921 	while (flow->dev_handles) {
1922 		uint32_t tmp_idx = flow->dev_handles;
1923 
1924 		handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1925 				   tmp_idx);
1926 		if (!handle)
1927 			return;
1928 		flow->dev_handles = handle->next.next;
1929 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1930 			   tmp_idx);
1931 	}
1932 	if (flow->counter) {
1933 		flow_verbs_counter_release(dev, flow->counter);
1934 		flow->counter = 0;
1935 	}
1936 }
1937 
1938 /**
1939  * Apply the flow to the NIC.
1940  *
1941  * @param[in] dev
1942  *   Pointer to the Ethernet device structure.
1943  * @param[in, out] flow
1944  *   Pointer to flow structure.
1945  * @param[out] error
1946  *   Pointer to error structure.
1947  *
1948  * @return
1949  *   0 on success, a negative errno value otherwise and rte_errno is set.
1950  */
1951 static int
1952 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1953 		 struct rte_flow_error *error)
1954 {
1955 	struct mlx5_priv *priv = dev->data->dev_private;
1956 	struct mlx5_flow_handle *handle;
1957 	struct mlx5_flow *dev_flow;
1958 	struct mlx5_hrxq *hrxq;
1959 	uint32_t dev_handles;
1960 	int err;
1961 	int idx;
1962 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1963 
1964 	MLX5_ASSERT(wks);
1965 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1966 		dev_flow = &wks->flows[idx];
1967 		handle = dev_flow->handle;
1968 		if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1969 			MLX5_ASSERT(priv->drop_queue.hrxq);
1970 			hrxq = priv->drop_queue.hrxq;
1971 		} else {
1972 			uint32_t hrxq_idx;
1973 			struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1974 
1975 			MLX5_ASSERT(rss_desc->queue_num);
1976 			rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1977 			rss_desc->hash_fields = dev_flow->hash_fields;
1978 			rss_desc->tunnel = !!(handle->layers &
1979 					      MLX5_FLOW_LAYER_TUNNEL);
1980 			rss_desc->shared_rss = 0;
1981 			hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
1982 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1983 					      hrxq_idx);
1984 			if (!hrxq) {
1985 				rte_flow_error_set
1986 					(error, rte_errno,
1987 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1988 					 "cannot get hash queue");
1989 				goto error;
1990 			}
1991 			handle->rix_hrxq = hrxq_idx;
1992 		}
1993 		MLX5_ASSERT(hrxq);
1994 		handle->drv_flow = mlx5_glue->create_flow
1995 					(hrxq->qp, &dev_flow->verbs.attr);
1996 		if (!handle->drv_flow) {
1997 			rte_flow_error_set(error, errno,
1998 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1999 					   NULL,
2000 					   "hardware refuses to create flow");
2001 			goto error;
2002 		}
2003 		if (priv->vmwa_context &&
2004 		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
2005 			/*
2006 			 * The rule contains the VLAN pattern.
2007 			 * For VF we are going to create VLAN
2008 			 * interface to make hypervisor set correct
2009 			 * e-Switch vport context.
2010 			 */
2011 			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2012 		}
2013 	}
2014 	return 0;
2015 error:
2016 	err = rte_errno; /* Save rte_errno before cleanup. */
2017 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2018 		       dev_handles, handle, next) {
2019 		/* hrxq is union, don't touch it only the flag is set. */
2020 		if (handle->rix_hrxq &&
2021 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2022 			mlx5_hrxq_release(dev, handle->rix_hrxq);
2023 			handle->rix_hrxq = 0;
2024 		}
2025 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
2026 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2027 	}
2028 	rte_errno = err; /* Restore rte_errno. */
2029 	return -rte_errno;
2030 }
2031 
2032 /**
2033  * Query a flow.
2034  *
2035  * @see rte_flow_query()
2036  * @see rte_flow_ops
2037  */
2038 static int
2039 flow_verbs_query(struct rte_eth_dev *dev,
2040 		 struct rte_flow *flow,
2041 		 const struct rte_flow_action *actions,
2042 		 void *data,
2043 		 struct rte_flow_error *error)
2044 {
2045 	int ret = -EINVAL;
2046 
2047 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2048 		switch (actions->type) {
2049 		case RTE_FLOW_ACTION_TYPE_VOID:
2050 			break;
2051 		case RTE_FLOW_ACTION_TYPE_COUNT:
2052 			ret = flow_verbs_counter_query(dev, flow, data, error);
2053 			break;
2054 		default:
2055 			return rte_flow_error_set(error, ENOTSUP,
2056 						  RTE_FLOW_ERROR_TYPE_ACTION,
2057 						  actions,
2058 						  "action not supported");
2059 		}
2060 	}
2061 	return ret;
2062 }
2063 
2064 static int
2065 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2066 		       uint32_t flags)
2067 {
2068 	RTE_SET_USED(dev);
2069 	RTE_SET_USED(domains);
2070 	RTE_SET_USED(flags);
2071 
2072 	return 0;
2073 }
2074 
2075 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2076 	.validate = flow_verbs_validate,
2077 	.prepare = flow_verbs_prepare,
2078 	.translate = flow_verbs_translate,
2079 	.apply = flow_verbs_apply,
2080 	.remove = flow_verbs_remove,
2081 	.destroy = flow_verbs_destroy,
2082 	.query = flow_verbs_query,
2083 	.sync_domain = flow_verbs_sync_domain,
2084 };
2085