xref: /dpdk/drivers/net/mlx5/mlx5_flow_verbs.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10 
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_ip.h>
18 
19 #include <mlx5_glue.h>
20 #include <mlx5_prm.h>
21 #include <mlx5_malloc.h>
22 
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rxtx.h"
27 
28 #define VERBS_SPEC_INNER(item_flags) \
29 	(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
30 
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 	{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
34 };
35 
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 	{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 	{ 9, 10, 11 }, { 12, 13, 14 },
40 };
41 
42 /* Verbs specification header. */
43 struct ibv_spec_header {
44 	enum ibv_flow_spec_type type;
45 	uint16_t size;
46 };
47 
48 /**
49  * Discover the maximum number of priority available.
50  *
51  * @param[in] dev
52  *   Pointer to the Ethernet device structure.
53  *
54  * @return
55  *   number of supported flow priority on success, a negative errno
56  *   value otherwise and rte_errno is set.
57  */
58 int
59 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
60 {
61 	struct mlx5_priv *priv = dev->data->dev_private;
62 	struct {
63 		struct ibv_flow_attr attr;
64 		struct ibv_flow_spec_eth eth;
65 		struct ibv_flow_spec_action_drop drop;
66 	} flow_attr = {
67 		.attr = {
68 			.num_of_specs = 2,
69 			.port = (uint8_t)priv->dev_port,
70 		},
71 		.eth = {
72 			.type = IBV_FLOW_SPEC_ETH,
73 			.size = sizeof(struct ibv_flow_spec_eth),
74 		},
75 		.drop = {
76 			.size = sizeof(struct ibv_flow_spec_action_drop),
77 			.type = IBV_FLOW_SPEC_ACTION_DROP,
78 		},
79 	};
80 	struct ibv_flow *flow;
81 	struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
82 	uint16_t vprio[] = { 8, 16 };
83 	int i;
84 	int priority = 0;
85 
86 	if (!drop->qp) {
87 		rte_errno = ENOTSUP;
88 		return -rte_errno;
89 	}
90 	for (i = 0; i != RTE_DIM(vprio); i++) {
91 		flow_attr.attr.priority = vprio[i] - 1;
92 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
93 		if (!flow)
94 			break;
95 		claim_zero(mlx5_glue->destroy_flow(flow));
96 		priority = vprio[i];
97 	}
98 	switch (priority) {
99 	case 8:
100 		priority = RTE_DIM(priority_map_3);
101 		break;
102 	case 16:
103 		priority = RTE_DIM(priority_map_5);
104 		break;
105 	default:
106 		rte_errno = ENOTSUP;
107 		DRV_LOG(ERR,
108 			"port %u verbs maximum priority: %d expected 8/16",
109 			dev->data->port_id, priority);
110 		return -rte_errno;
111 	}
112 	DRV_LOG(INFO, "port %u supported flow priorities:"
113 		" 0-%d for ingress or egress root table,"
114 		" 0-%d for non-root table or transfer root table.",
115 		dev->data->port_id, priority - 2,
116 		MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
117 	return priority;
118 }
119 
120 /**
121  * Adjust flow priority based on the highest layer and the request priority.
122  *
123  * @param[in] dev
124  *   Pointer to the Ethernet device structure.
125  * @param[in] priority
126  *   The rule base priority.
127  * @param[in] subpriority
128  *   The priority based on the items.
129  *
130  * @return
131  *   The new priority.
132  */
133 uint32_t
134 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
135 				   uint32_t subpriority)
136 {
137 	uint32_t res = 0;
138 	struct mlx5_priv *priv = dev->data->dev_private;
139 
140 	switch (priv->config.flow_prio) {
141 	case RTE_DIM(priority_map_3):
142 		res = priority_map_3[priority][subpriority];
143 		break;
144 	case RTE_DIM(priority_map_5):
145 		res = priority_map_5[priority][subpriority];
146 		break;
147 	}
148 	return  res;
149 }
150 
151 /**
152  * Get Verbs flow counter by index.
153  *
154  * @param[in] dev
155  *   Pointer to the Ethernet device structure.
156  * @param[in] idx
157  *   mlx5 flow counter index in the container.
158  * @param[out] ppool
159  *   mlx5 flow counter pool in the container,
160  *
161  * @return
162  *   A pointer to the counter, NULL otherwise.
163  */
164 static struct mlx5_flow_counter *
165 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
166 			      uint32_t idx,
167 			      struct mlx5_flow_counter_pool **ppool)
168 {
169 	struct mlx5_priv *priv = dev->data->dev_private;
170 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
171 	struct mlx5_flow_counter_pool *pool;
172 
173 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
174 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
175 	MLX5_ASSERT(pool);
176 	if (ppool)
177 		*ppool = pool;
178 	return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
179 }
180 
181 /**
182  * Create Verbs flow counter with Verbs library.
183  *
184  * @param[in] dev
185  *   Pointer to the Ethernet device structure.
186  * @param[in, out] counter
187  *   mlx5 flow counter object, contains the counter id,
188  *   handle of created Verbs flow counter is returned
189  *   in cs field (if counters are supported).
190  *
191  * @return
192  *   0 On success else a negative errno value is returned
193  *   and rte_errno is set.
194  */
195 static int
196 flow_verbs_counter_create(struct rte_eth_dev *dev,
197 			  struct mlx5_flow_counter *counter)
198 {
199 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
200 	struct mlx5_priv *priv = dev->data->dev_private;
201 	struct ibv_context *ctx = priv->sh->ctx;
202 	struct ibv_counter_set_init_attr init = {
203 			 .counter_set_id = counter->shared_info.id};
204 
205 	counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
206 	if (!counter->dcs_when_free) {
207 		rte_errno = ENOTSUP;
208 		return -ENOTSUP;
209 	}
210 	return 0;
211 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
212 	struct mlx5_priv *priv = dev->data->dev_private;
213 	struct ibv_context *ctx = priv->sh->ctx;
214 	struct ibv_counters_init_attr init = {0};
215 	struct ibv_counter_attach_attr attach;
216 	int ret;
217 
218 	memset(&attach, 0, sizeof(attach));
219 	counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
220 	if (!counter->dcs_when_free) {
221 		rte_errno = ENOTSUP;
222 		return -ENOTSUP;
223 	}
224 	attach.counter_desc = IBV_COUNTER_PACKETS;
225 	attach.index = 0;
226 	ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
227 	if (!ret) {
228 		attach.counter_desc = IBV_COUNTER_BYTES;
229 		attach.index = 1;
230 		ret = mlx5_glue->attach_counters
231 					(counter->dcs_when_free, &attach, NULL);
232 	}
233 	if (ret) {
234 		claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
235 		counter->dcs_when_free = NULL;
236 		rte_errno = ret;
237 		return -ret;
238 	}
239 	return 0;
240 #else
241 	(void)dev;
242 	(void)counter;
243 	rte_errno = ENOTSUP;
244 	return -ENOTSUP;
245 #endif
246 }
247 
248 /**
249  * Get a flow counter.
250  *
251  * @param[in] dev
252  *   Pointer to the Ethernet device structure.
253  * @param[in] shared
254  *   Indicate if this counter is shared with other flows.
255  * @param[in] id
256  *   Counter identifier.
257  *
258  * @return
259  *   Index to the counter, 0 otherwise and rte_errno is set.
260  */
261 static uint32_t
262 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
263 {
264 	struct mlx5_priv *priv = dev->data->dev_private;
265 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
266 	struct mlx5_flow_counter_pool *pool = NULL;
267 	struct mlx5_flow_counter *cnt = NULL;
268 	union mlx5_l3t_data data;
269 	uint32_t n_valid = cmng->n_valid;
270 	uint32_t pool_idx, cnt_idx;
271 	uint32_t i;
272 	int ret;
273 
274 	if (shared && !mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) &&
275 	    data.dword)
276 		return data.dword;
277 	for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
278 		pool = cmng->pools[pool_idx];
279 		if (!pool)
280 			continue;
281 		cnt = TAILQ_FIRST(&pool->counters[0]);
282 		if (cnt)
283 			break;
284 	}
285 	if (!cnt) {
286 		struct mlx5_flow_counter_pool **pools;
287 		uint32_t size;
288 
289 		if (n_valid == cmng->n) {
290 			/* Resize the container pool array. */
291 			size = sizeof(struct mlx5_flow_counter_pool *) *
292 				     (n_valid + MLX5_CNT_CONTAINER_RESIZE);
293 			pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
294 					    SOCKET_ID_ANY);
295 			if (!pools)
296 				return 0;
297 			if (n_valid) {
298 				memcpy(pools, cmng->pools,
299 				       sizeof(struct mlx5_flow_counter_pool *) *
300 				       n_valid);
301 				mlx5_free(cmng->pools);
302 			}
303 			cmng->pools = pools;
304 			cmng->n += MLX5_CNT_CONTAINER_RESIZE;
305 		}
306 		/* Allocate memory for new pool*/
307 		size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
308 		pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
309 		if (!pool)
310 			return 0;
311 		for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
312 			cnt = MLX5_POOL_GET_CNT(pool, i);
313 			TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
314 		}
315 		cnt = MLX5_POOL_GET_CNT(pool, 0);
316 		cmng->pools[n_valid] = pool;
317 		pool_idx = n_valid;
318 		cmng->n_valid++;
319 	}
320 	TAILQ_REMOVE(&pool->counters[0], cnt, next);
321 	i = MLX5_CNT_ARRAY_IDX(pool, cnt);
322 	cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
323 	if (shared) {
324 		data.dword = cnt_idx;
325 		if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
326 			return 0;
327 		cnt->shared_info.id = id;
328 		cnt_idx |= MLX5_CNT_SHARED_OFFSET;
329 	}
330 	/* Create counter with Verbs. */
331 	ret = flow_verbs_counter_create(dev, cnt);
332 	if (!ret) {
333 		cnt->dcs_when_active = cnt->dcs_when_free;
334 		cnt->hits = 0;
335 		cnt->bytes = 0;
336 		return cnt_idx;
337 	}
338 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
339 	/* Some error occurred in Verbs library. */
340 	rte_errno = -ret;
341 	return 0;
342 }
343 
344 /**
345  * Release a flow counter.
346  *
347  * @param[in] dev
348  *   Pointer to the Ethernet device structure.
349  * @param[in] counter
350  *   Index to the counter handler.
351  */
352 static void
353 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
354 {
355 	struct mlx5_priv *priv = dev->data->dev_private;
356 	struct mlx5_flow_counter_pool *pool;
357 	struct mlx5_flow_counter *cnt;
358 
359 	cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
360 	if (IS_SHARED_CNT(counter) &&
361 	    mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
362 		return;
363 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
364 	claim_zero(mlx5_glue->destroy_counter_set
365 			((struct ibv_counter_set *)cnt->dcs_when_active));
366 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
367 	claim_zero(mlx5_glue->destroy_counters
368 				((struct ibv_counters *)cnt->dcs_when_active));
369 #endif
370 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
371 }
372 
373 /**
374  * Query a flow counter via Verbs library call.
375  *
376  * @see rte_flow_query()
377  * @see rte_flow_ops
378  */
379 static int
380 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
381 			 struct rte_flow *flow, void *data,
382 			 struct rte_flow_error *error)
383 {
384 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
385 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
386 	if (flow->counter) {
387 		struct mlx5_flow_counter_pool *pool;
388 		struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
389 						(dev, flow->counter, &pool);
390 		struct rte_flow_query_count *qc = data;
391 		uint64_t counters[2] = {0, 0};
392 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
393 		struct ibv_query_counter_set_attr query_cs_attr = {
394 			.dcs_when_free = (struct ibv_counter_set *)
395 						cnt->dcs_when_active,
396 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
397 		};
398 		struct ibv_counter_set_data query_out = {
399 			.out = counters,
400 			.outlen = 2 * sizeof(uint64_t),
401 		};
402 		int err = mlx5_glue->query_counter_set(&query_cs_attr,
403 						       &query_out);
404 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
405 		int err = mlx5_glue->query_counters
406 			((struct ibv_counters *)cnt->dcs_when_active, counters,
407 				RTE_DIM(counters),
408 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
409 #endif
410 		if (err)
411 			return rte_flow_error_set
412 				(error, err,
413 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
414 				 NULL,
415 				 "cannot read counter");
416 		qc->hits_set = 1;
417 		qc->bytes_set = 1;
418 		qc->hits = counters[0] - cnt->hits;
419 		qc->bytes = counters[1] - cnt->bytes;
420 		if (qc->reset) {
421 			cnt->hits = counters[0];
422 			cnt->bytes = counters[1];
423 		}
424 		return 0;
425 	}
426 	return rte_flow_error_set(error, EINVAL,
427 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
428 				  NULL,
429 				  "flow does not have counter");
430 #else
431 	(void)flow;
432 	(void)data;
433 	return rte_flow_error_set(error, ENOTSUP,
434 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
435 				  NULL,
436 				  "counters are not available");
437 #endif
438 }
439 
440 /**
441  * Add a verbs item specification into @p verbs.
442  *
443  * @param[out] verbs
444  *   Pointer to verbs structure.
445  * @param[in] src
446  *   Create specification.
447  * @param[in] size
448  *   Size in bytes of the specification to copy.
449  */
450 static void
451 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
452 		    void *src, unsigned int size)
453 {
454 	void *dst;
455 
456 	if (!verbs)
457 		return;
458 	MLX5_ASSERT(verbs->specs);
459 	dst = (void *)(verbs->specs + verbs->size);
460 	memcpy(dst, src, size);
461 	++verbs->attr.num_of_specs;
462 	verbs->size += size;
463 }
464 
465 /**
466  * Convert the @p item into a Verbs specification. This function assumes that
467  * the input is valid and that there is space to insert the requested item
468  * into the flow.
469  *
470  * @param[in, out] dev_flow
471  *   Pointer to dev_flow structure.
472  * @param[in] item
473  *   Item specification.
474  * @param[in] item_flags
475  *   Parsed item flags.
476  */
477 static void
478 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
479 			      const struct rte_flow_item *item,
480 			      uint64_t item_flags)
481 {
482 	const struct rte_flow_item_eth *spec = item->spec;
483 	const struct rte_flow_item_eth *mask = item->mask;
484 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
485 	struct ibv_flow_spec_eth eth = {
486 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
487 		.size = size,
488 	};
489 
490 	if (!mask)
491 		mask = &rte_flow_item_eth_mask;
492 	if (spec) {
493 		unsigned int i;
494 
495 		memcpy(&eth.val.dst_mac, spec->dst.addr_bytes,
496 			RTE_ETHER_ADDR_LEN);
497 		memcpy(&eth.val.src_mac, spec->src.addr_bytes,
498 			RTE_ETHER_ADDR_LEN);
499 		eth.val.ether_type = spec->type;
500 		memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes,
501 			RTE_ETHER_ADDR_LEN);
502 		memcpy(&eth.mask.src_mac, mask->src.addr_bytes,
503 			RTE_ETHER_ADDR_LEN);
504 		eth.mask.ether_type = mask->type;
505 		/* Remove unwanted bits from values. */
506 		for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
507 			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
508 			eth.val.src_mac[i] &= eth.mask.src_mac[i];
509 		}
510 		eth.val.ether_type &= eth.mask.ether_type;
511 	}
512 	flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
513 }
514 
515 /**
516  * Update the VLAN tag in the Verbs Ethernet specification.
517  * This function assumes that the input is valid and there is space to add
518  * the requested item.
519  *
520  * @param[in, out] attr
521  *   Pointer to Verbs attributes structure.
522  * @param[in] eth
523  *   Verbs structure containing the VLAN information to copy.
524  */
525 static void
526 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
527 			    struct ibv_flow_spec_eth *eth)
528 {
529 	unsigned int i;
530 	const enum ibv_flow_spec_type search = eth->type;
531 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
532 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
533 
534 	for (i = 0; i != attr->num_of_specs; ++i) {
535 		if (hdr->type == search) {
536 			struct ibv_flow_spec_eth *e =
537 				(struct ibv_flow_spec_eth *)hdr;
538 
539 			e->val.vlan_tag = eth->val.vlan_tag;
540 			e->mask.vlan_tag = eth->mask.vlan_tag;
541 			e->val.ether_type = eth->val.ether_type;
542 			e->mask.ether_type = eth->mask.ether_type;
543 			break;
544 		}
545 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
546 	}
547 }
548 
549 /**
550  * Convert the @p item into a Verbs specification. This function assumes that
551  * the input is valid and that there is space to insert the requested item
552  * into the flow.
553  *
554  * @param[in, out] dev_flow
555  *   Pointer to dev_flow structure.
556  * @param[in] item
557  *   Item specification.
558  * @param[in] item_flags
559  *   Parsed item flags.
560  */
561 static void
562 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
563 			       const struct rte_flow_item *item,
564 			       uint64_t item_flags)
565 {
566 	const struct rte_flow_item_vlan *spec = item->spec;
567 	const struct rte_flow_item_vlan *mask = item->mask;
568 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
569 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
570 	struct ibv_flow_spec_eth eth = {
571 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
572 		.size = size,
573 	};
574 	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
575 				      MLX5_FLOW_LAYER_OUTER_L2;
576 
577 	if (!mask)
578 		mask = &rte_flow_item_vlan_mask;
579 	if (spec) {
580 		eth.val.vlan_tag = spec->tci;
581 		eth.mask.vlan_tag = mask->tci;
582 		eth.val.vlan_tag &= eth.mask.vlan_tag;
583 		eth.val.ether_type = spec->inner_type;
584 		eth.mask.ether_type = mask->inner_type;
585 		eth.val.ether_type &= eth.mask.ether_type;
586 	}
587 	if (!(item_flags & l2m))
588 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
589 	else
590 		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
591 	if (!tunnel)
592 		dev_flow->handle->vf_vlan.tag =
593 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
594 }
595 
596 /**
597  * Convert the @p item into a Verbs specification. This function assumes that
598  * the input is valid and that there is space to insert the requested item
599  * into the flow.
600  *
601  * @param[in, out] dev_flow
602  *   Pointer to dev_flow structure.
603  * @param[in] item
604  *   Item specification.
605  * @param[in] item_flags
606  *   Parsed item flags.
607  */
608 static void
609 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
610 			       const struct rte_flow_item *item,
611 			       uint64_t item_flags)
612 {
613 	const struct rte_flow_item_ipv4 *spec = item->spec;
614 	const struct rte_flow_item_ipv4 *mask = item->mask;
615 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
616 	struct ibv_flow_spec_ipv4_ext ipv4 = {
617 		.type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
618 		.size = size,
619 	};
620 
621 	if (!mask)
622 		mask = &rte_flow_item_ipv4_mask;
623 	if (spec) {
624 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
625 			.src_ip = spec->hdr.src_addr,
626 			.dst_ip = spec->hdr.dst_addr,
627 			.proto = spec->hdr.next_proto_id,
628 			.tos = spec->hdr.type_of_service,
629 		};
630 		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
631 			.src_ip = mask->hdr.src_addr,
632 			.dst_ip = mask->hdr.dst_addr,
633 			.proto = mask->hdr.next_proto_id,
634 			.tos = mask->hdr.type_of_service,
635 		};
636 		/* Remove unwanted bits from values. */
637 		ipv4.val.src_ip &= ipv4.mask.src_ip;
638 		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
639 		ipv4.val.proto &= ipv4.mask.proto;
640 		ipv4.val.tos &= ipv4.mask.tos;
641 	}
642 	flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
643 }
644 
645 /**
646  * Convert the @p item into a Verbs specification. This function assumes that
647  * the input is valid and that there is space to insert the requested item
648  * into the flow.
649  *
650  * @param[in, out] dev_flow
651  *   Pointer to dev_flow structure.
652  * @param[in] item
653  *   Item specification.
654  * @param[in] item_flags
655  *   Parsed item flags.
656  */
657 static void
658 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
659 			       const struct rte_flow_item *item,
660 			       uint64_t item_flags)
661 {
662 	const struct rte_flow_item_ipv6 *spec = item->spec;
663 	const struct rte_flow_item_ipv6 *mask = item->mask;
664 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
665 	struct ibv_flow_spec_ipv6 ipv6 = {
666 		.type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
667 		.size = size,
668 	};
669 
670 	if (!mask)
671 		mask = &rte_flow_item_ipv6_mask;
672 	if (spec) {
673 		unsigned int i;
674 		uint32_t vtc_flow_val;
675 		uint32_t vtc_flow_mask;
676 
677 		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
678 		       RTE_DIM(ipv6.val.src_ip));
679 		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
680 		       RTE_DIM(ipv6.val.dst_ip));
681 		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
682 		       RTE_DIM(ipv6.mask.src_ip));
683 		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
684 		       RTE_DIM(ipv6.mask.dst_ip));
685 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
686 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
687 		ipv6.val.flow_label =
688 			rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
689 					 RTE_IPV6_HDR_FL_SHIFT);
690 		ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
691 					 RTE_IPV6_HDR_TC_SHIFT;
692 		ipv6.val.next_hdr = spec->hdr.proto;
693 		ipv6.mask.flow_label =
694 			rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
695 					 RTE_IPV6_HDR_FL_SHIFT);
696 		ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
697 					  RTE_IPV6_HDR_TC_SHIFT;
698 		ipv6.mask.next_hdr = mask->hdr.proto;
699 		/* Remove unwanted bits from values. */
700 		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
701 			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
702 			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
703 		}
704 		ipv6.val.flow_label &= ipv6.mask.flow_label;
705 		ipv6.val.traffic_class &= ipv6.mask.traffic_class;
706 		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
707 	}
708 	flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
709 }
710 
711 /**
712  * Convert the @p item into a Verbs specification. This function assumes that
713  * the input is valid and that there is space to insert the requested item
714  * into the flow.
715  *
716  * @param[in, out] dev_flow
717  *   Pointer to dev_flow structure.
718  * @param[in] item
719  *   Item specification.
720  * @param[in] item_flags
721  *   Parsed item flags.
722  */
723 static void
724 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
725 			      const struct rte_flow_item *item,
726 			      uint64_t item_flags __rte_unused)
727 {
728 	const struct rte_flow_item_tcp *spec = item->spec;
729 	const struct rte_flow_item_tcp *mask = item->mask;
730 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
731 	struct ibv_flow_spec_tcp_udp tcp = {
732 		.type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
733 		.size = size,
734 	};
735 
736 	if (!mask)
737 		mask = &rte_flow_item_tcp_mask;
738 	if (spec) {
739 		tcp.val.dst_port = spec->hdr.dst_port;
740 		tcp.val.src_port = spec->hdr.src_port;
741 		tcp.mask.dst_port = mask->hdr.dst_port;
742 		tcp.mask.src_port = mask->hdr.src_port;
743 		/* Remove unwanted bits from values. */
744 		tcp.val.src_port &= tcp.mask.src_port;
745 		tcp.val.dst_port &= tcp.mask.dst_port;
746 	}
747 	flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
748 }
749 
750 /**
751  * Convert the @p item into a Verbs specification. This function assumes that
752  * the input is valid and that there is space to insert the requested item
753  * into the flow.
754  *
755  * @param[in, out] dev_flow
756  *   Pointer to dev_flow structure.
757  * @param[in] item
758  *   Item specification.
759  * @param[in] item_flags
760  *   Parsed item flags.
761  */
762 static void
763 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
764 			      const struct rte_flow_item *item,
765 			      uint64_t item_flags __rte_unused)
766 {
767 	const struct rte_flow_item_udp *spec = item->spec;
768 	const struct rte_flow_item_udp *mask = item->mask;
769 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
770 	struct ibv_flow_spec_tcp_udp udp = {
771 		.type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
772 		.size = size,
773 	};
774 
775 	if (!mask)
776 		mask = &rte_flow_item_udp_mask;
777 	if (spec) {
778 		udp.val.dst_port = spec->hdr.dst_port;
779 		udp.val.src_port = spec->hdr.src_port;
780 		udp.mask.dst_port = mask->hdr.dst_port;
781 		udp.mask.src_port = mask->hdr.src_port;
782 		/* Remove unwanted bits from values. */
783 		udp.val.src_port &= udp.mask.src_port;
784 		udp.val.dst_port &= udp.mask.dst_port;
785 	}
786 	item++;
787 	while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
788 		item++;
789 	if (!(udp.val.dst_port & udp.mask.dst_port)) {
790 		switch ((item)->type) {
791 		case RTE_FLOW_ITEM_TYPE_VXLAN:
792 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
793 			udp.mask.dst_port = 0xffff;
794 			break;
795 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
796 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
797 			udp.mask.dst_port = 0xffff;
798 			break;
799 		case RTE_FLOW_ITEM_TYPE_MPLS:
800 			udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
801 			udp.mask.dst_port = 0xffff;
802 			break;
803 		default:
804 			break;
805 		}
806 	}
807 
808 	flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
809 }
810 
811 /**
812  * Convert the @p item into a Verbs specification. This function assumes that
813  * the input is valid and that there is space to insert the requested item
814  * into the flow.
815  *
816  * @param[in, out] dev_flow
817  *   Pointer to dev_flow structure.
818  * @param[in] item
819  *   Item specification.
820  * @param[in] item_flags
821  *   Parsed item flags.
822  */
823 static void
824 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
825 				const struct rte_flow_item *item,
826 				uint64_t item_flags __rte_unused)
827 {
828 	const struct rte_flow_item_vxlan *spec = item->spec;
829 	const struct rte_flow_item_vxlan *mask = item->mask;
830 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
831 	struct ibv_flow_spec_tunnel vxlan = {
832 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
833 		.size = size,
834 	};
835 	union vni {
836 		uint32_t vlan_id;
837 		uint8_t vni[4];
838 	} id = { .vlan_id = 0, };
839 
840 	if (!mask)
841 		mask = &rte_flow_item_vxlan_mask;
842 	if (spec) {
843 		memcpy(&id.vni[1], spec->vni, 3);
844 		vxlan.val.tunnel_id = id.vlan_id;
845 		memcpy(&id.vni[1], mask->vni, 3);
846 		vxlan.mask.tunnel_id = id.vlan_id;
847 		/* Remove unwanted bits from values. */
848 		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
849 	}
850 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
851 }
852 
853 /**
854  * Convert the @p item into a Verbs specification. This function assumes that
855  * the input is valid and that there is space to insert the requested item
856  * into the flow.
857  *
858  * @param[in, out] dev_flow
859  *   Pointer to dev_flow structure.
860  * @param[in] item
861  *   Item specification.
862  * @param[in] item_flags
863  *   Parsed item flags.
864  */
865 static void
866 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
867 				    const struct rte_flow_item *item,
868 				    uint64_t item_flags __rte_unused)
869 {
870 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
871 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
872 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
873 	struct ibv_flow_spec_tunnel vxlan_gpe = {
874 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
875 		.size = size,
876 	};
877 	union vni {
878 		uint32_t vlan_id;
879 		uint8_t vni[4];
880 	} id = { .vlan_id = 0, };
881 
882 	if (!mask)
883 		mask = &rte_flow_item_vxlan_gpe_mask;
884 	if (spec) {
885 		memcpy(&id.vni[1], spec->vni, 3);
886 		vxlan_gpe.val.tunnel_id = id.vlan_id;
887 		memcpy(&id.vni[1], mask->vni, 3);
888 		vxlan_gpe.mask.tunnel_id = id.vlan_id;
889 		/* Remove unwanted bits from values. */
890 		vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
891 	}
892 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
893 }
894 
895 /**
896  * Update the protocol in Verbs IPv4/IPv6 spec.
897  *
898  * @param[in, out] attr
899  *   Pointer to Verbs attributes structure.
900  * @param[in] search
901  *   Specification type to search in order to update the IP protocol.
902  * @param[in] protocol
903  *   Protocol value to set if none is present in the specification.
904  */
905 static void
906 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
907 				       enum ibv_flow_spec_type search,
908 				       uint8_t protocol)
909 {
910 	unsigned int i;
911 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
912 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
913 
914 	if (!attr)
915 		return;
916 	for (i = 0; i != attr->num_of_specs; ++i) {
917 		if (hdr->type == search) {
918 			union {
919 				struct ibv_flow_spec_ipv4_ext *ipv4;
920 				struct ibv_flow_spec_ipv6 *ipv6;
921 			} ip;
922 
923 			switch (search) {
924 			case IBV_FLOW_SPEC_IPV4_EXT:
925 				ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
926 				if (!ip.ipv4->val.proto) {
927 					ip.ipv4->val.proto = protocol;
928 					ip.ipv4->mask.proto = 0xff;
929 				}
930 				break;
931 			case IBV_FLOW_SPEC_IPV6:
932 				ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
933 				if (!ip.ipv6->val.next_hdr) {
934 					ip.ipv6->val.next_hdr = protocol;
935 					ip.ipv6->mask.next_hdr = 0xff;
936 				}
937 				break;
938 			default:
939 				break;
940 			}
941 			break;
942 		}
943 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
944 	}
945 }
946 
947 /**
948  * Convert the @p item into a Verbs specification. This function assumes that
949  * the input is valid and that there is space to insert the requested item
950  * into the flow.
951  *
952  * @param[in, out] dev_flow
953  *   Pointer to dev_flow structure.
954  * @param[in] item
955  *   Item specification.
956  * @param[in] item_flags
957  *   Parsed item flags.
958  */
959 static void
960 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
961 			      const struct rte_flow_item *item __rte_unused,
962 			      uint64_t item_flags)
963 {
964 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
965 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
966 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
967 	struct ibv_flow_spec_tunnel tunnel = {
968 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
969 		.size = size,
970 	};
971 #else
972 	const struct rte_flow_item_gre *spec = item->spec;
973 	const struct rte_flow_item_gre *mask = item->mask;
974 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
975 	struct ibv_flow_spec_gre tunnel = {
976 		.type = IBV_FLOW_SPEC_GRE,
977 		.size = size,
978 	};
979 
980 	if (!mask)
981 		mask = &rte_flow_item_gre_mask;
982 	if (spec) {
983 		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
984 		tunnel.val.protocol = spec->protocol;
985 		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
986 		tunnel.mask.protocol = mask->protocol;
987 		/* Remove unwanted bits from values. */
988 		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
989 		tunnel.val.protocol &= tunnel.mask.protocol;
990 		tunnel.val.key &= tunnel.mask.key;
991 	}
992 #endif
993 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
994 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
995 						       IBV_FLOW_SPEC_IPV4_EXT,
996 						       IPPROTO_GRE);
997 	else
998 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
999 						       IBV_FLOW_SPEC_IPV6,
1000 						       IPPROTO_GRE);
1001 	flow_verbs_spec_add(verbs, &tunnel, size);
1002 }
1003 
1004 /**
1005  * Convert the @p action into a Verbs specification. This function assumes that
1006  * the input is valid and that there is space to insert the requested action
1007  * into the flow. This function also return the action that was added.
1008  *
1009  * @param[in, out] dev_flow
1010  *   Pointer to dev_flow structure.
1011  * @param[in] item
1012  *   Item specification.
1013  * @param[in] item_flags
1014  *   Parsed item flags.
1015  */
1016 static void
1017 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1018 			       const struct rte_flow_item *item __rte_unused,
1019 			       uint64_t item_flags __rte_unused)
1020 {
1021 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1022 	const struct rte_flow_item_mpls *spec = item->spec;
1023 	const struct rte_flow_item_mpls *mask = item->mask;
1024 	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1025 	struct ibv_flow_spec_mpls mpls = {
1026 		.type = IBV_FLOW_SPEC_MPLS,
1027 		.size = size,
1028 	};
1029 
1030 	if (!mask)
1031 		mask = &rte_flow_item_mpls_mask;
1032 	if (spec) {
1033 		memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1034 		memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1035 		/* Remove unwanted bits from values.  */
1036 		mpls.val.label &= mpls.mask.label;
1037 	}
1038 	flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1039 #endif
1040 }
1041 
1042 /**
1043  * Convert the @p action into a Verbs specification. This function assumes that
1044  * the input is valid and that there is space to insert the requested action
1045  * into the flow.
1046  *
1047  * @param[in] dev_flow
1048  *   Pointer to mlx5_flow.
1049  * @param[in] action
1050  *   Action configuration.
1051  */
1052 static void
1053 flow_verbs_translate_action_drop
1054 	(struct mlx5_flow *dev_flow,
1055 	 const struct rte_flow_action *action __rte_unused)
1056 {
1057 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1058 	struct ibv_flow_spec_action_drop drop = {
1059 			.type = IBV_FLOW_SPEC_ACTION_DROP,
1060 			.size = size,
1061 	};
1062 
1063 	flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1064 }
1065 
1066 /**
1067  * Convert the @p action into a Verbs specification. This function assumes that
1068  * the input is valid and that there is space to insert the requested action
1069  * into the flow.
1070  *
1071  * @param[in] rss_desc
1072  *   Pointer to mlx5_flow_rss_desc.
1073  * @param[in] action
1074  *   Action configuration.
1075  */
1076 static void
1077 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1078 				  const struct rte_flow_action *action)
1079 {
1080 	const struct rte_flow_action_queue *queue = action->conf;
1081 
1082 	rss_desc->queue[0] = queue->index;
1083 	rss_desc->queue_num = 1;
1084 }
1085 
1086 /**
1087  * Convert the @p action into a Verbs specification. This function assumes that
1088  * the input is valid and that there is space to insert the requested action
1089  * into the flow.
1090  *
1091  * @param[in] rss_desc
1092  *   Pointer to mlx5_flow_rss_desc.
1093  * @param[in] action
1094  *   Action configuration.
1095  */
1096 static void
1097 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1098 				const struct rte_flow_action *action)
1099 {
1100 	const struct rte_flow_action_rss *rss = action->conf;
1101 	const uint8_t *rss_key;
1102 
1103 	memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1104 	rss_desc->queue_num = rss->queue_num;
1105 	/* NULL RSS key indicates default RSS key. */
1106 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
1107 	memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1108 	/*
1109 	 * rss->level and rss.types should be set in advance when expanding
1110 	 * items for RSS.
1111 	 */
1112 }
1113 
1114 /**
1115  * Convert the @p action into a Verbs specification. This function assumes that
1116  * the input is valid and that there is space to insert the requested action
1117  * into the flow.
1118  *
1119  * @param[in] dev_flow
1120  *   Pointer to mlx5_flow.
1121  * @param[in] action
1122  *   Action configuration.
1123  */
1124 static void
1125 flow_verbs_translate_action_flag
1126 	(struct mlx5_flow *dev_flow,
1127 	 const struct rte_flow_action *action __rte_unused)
1128 {
1129 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1130 	struct ibv_flow_spec_action_tag tag = {
1131 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1132 		.size = size,
1133 		.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1134 	};
1135 
1136 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1137 }
1138 
1139 /**
1140  * Convert the @p action into a Verbs specification. This function assumes that
1141  * the input is valid and that there is space to insert the requested action
1142  * into the flow.
1143  *
1144  * @param[in] dev_flow
1145  *   Pointer to mlx5_flow.
1146  * @param[in] action
1147  *   Action configuration.
1148  */
1149 static void
1150 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1151 				 const struct rte_flow_action *action)
1152 {
1153 	const struct rte_flow_action_mark *mark = action->conf;
1154 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1155 	struct ibv_flow_spec_action_tag tag = {
1156 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1157 		.size = size,
1158 		.tag_id = mlx5_flow_mark_set(mark->id),
1159 	};
1160 
1161 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1162 }
1163 
1164 /**
1165  * Convert the @p action into a Verbs specification. This function assumes that
1166  * the input is valid and that there is space to insert the requested action
1167  * into the flow.
1168  *
1169  * @param[in] dev
1170  *   Pointer to the Ethernet device structure.
1171  * @param[in] action
1172  *   Action configuration.
1173  * @param[in] dev_flow
1174  *   Pointer to mlx5_flow.
1175  * @param[out] error
1176  *   Pointer to error structure.
1177  *
1178  * @return
1179  *   0 On success else a negative errno value is returned and rte_errno is set.
1180  */
1181 static int
1182 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1183 				  const struct rte_flow_action *action,
1184 				  struct rte_eth_dev *dev,
1185 				  struct rte_flow_error *error)
1186 {
1187 	const struct rte_flow_action_count *count = action->conf;
1188 	struct rte_flow *flow = dev_flow->flow;
1189 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1190 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1191 	struct mlx5_flow_counter_pool *pool;
1192 	struct mlx5_flow_counter *cnt = NULL;
1193 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1194 	struct ibv_flow_spec_counter_action counter = {
1195 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
1196 		.size = size,
1197 	};
1198 #endif
1199 
1200 	if (!flow->counter) {
1201 		flow->counter = flow_verbs_counter_new(dev, count->shared,
1202 						       count->id);
1203 		if (!flow->counter)
1204 			return rte_flow_error_set(error, rte_errno,
1205 						  RTE_FLOW_ERROR_TYPE_ACTION,
1206 						  action,
1207 						  "cannot get counter"
1208 						  " context.");
1209 	}
1210 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1211 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1212 	counter.counter_set_handle =
1213 		((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1214 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1215 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1216 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1217 	counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1218 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1219 #endif
1220 	return 0;
1221 }
1222 
1223 /**
1224  * Internal validation function. For validating both actions and items.
1225  *
1226  * @param[in] dev
1227  *   Pointer to the Ethernet device structure.
1228  * @param[in] attr
1229  *   Pointer to the flow attributes.
1230  * @param[in] items
1231  *   Pointer to the list of items.
1232  * @param[in] actions
1233  *   Pointer to the list of actions.
1234  * @param[in] external
1235  *   This flow rule is created by request external to PMD.
1236  * @param[in] hairpin
1237  *   Number of hairpin TX actions, 0 means classic flow.
1238  * @param[out] error
1239  *   Pointer to the error structure.
1240  *
1241  * @return
1242  *   0 on success, a negative errno value otherwise and rte_errno is set.
1243  */
1244 static int
1245 flow_verbs_validate(struct rte_eth_dev *dev,
1246 		    const struct rte_flow_attr *attr,
1247 		    const struct rte_flow_item items[],
1248 		    const struct rte_flow_action actions[],
1249 		    bool external __rte_unused,
1250 		    int hairpin __rte_unused,
1251 		    struct rte_flow_error *error)
1252 {
1253 	int ret;
1254 	uint64_t action_flags = 0;
1255 	uint64_t item_flags = 0;
1256 	uint64_t last_item = 0;
1257 	uint8_t next_protocol = 0xff;
1258 	uint16_t ether_type = 0;
1259 	bool is_empty_vlan = false;
1260 
1261 	if (items == NULL)
1262 		return -1;
1263 	ret = mlx5_flow_validate_attributes(dev, attr, error);
1264 	if (ret < 0)
1265 		return ret;
1266 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1267 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1268 		int ret = 0;
1269 
1270 		switch (items->type) {
1271 		case RTE_FLOW_ITEM_TYPE_VOID:
1272 			break;
1273 		case RTE_FLOW_ITEM_TYPE_ETH:
1274 			ret = mlx5_flow_validate_item_eth(items, item_flags,
1275 							  false, error);
1276 			if (ret < 0)
1277 				return ret;
1278 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1279 					     MLX5_FLOW_LAYER_OUTER_L2;
1280 			if (items->mask != NULL && items->spec != NULL) {
1281 				ether_type =
1282 					((const struct rte_flow_item_eth *)
1283 					 items->spec)->type;
1284 				ether_type &=
1285 					((const struct rte_flow_item_eth *)
1286 					 items->mask)->type;
1287 				if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
1288 					is_empty_vlan = true;
1289 				ether_type = rte_be_to_cpu_16(ether_type);
1290 			} else {
1291 				ether_type = 0;
1292 			}
1293 			break;
1294 		case RTE_FLOW_ITEM_TYPE_VLAN:
1295 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
1296 							   dev, error);
1297 			if (ret < 0)
1298 				return ret;
1299 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1300 					      MLX5_FLOW_LAYER_INNER_VLAN) :
1301 					     (MLX5_FLOW_LAYER_OUTER_L2 |
1302 					      MLX5_FLOW_LAYER_OUTER_VLAN);
1303 			if (items->mask != NULL && items->spec != NULL) {
1304 				ether_type =
1305 					((const struct rte_flow_item_vlan *)
1306 					 items->spec)->inner_type;
1307 				ether_type &=
1308 					((const struct rte_flow_item_vlan *)
1309 					 items->mask)->inner_type;
1310 				ether_type = rte_be_to_cpu_16(ether_type);
1311 			} else {
1312 				ether_type = 0;
1313 			}
1314 			is_empty_vlan = false;
1315 			break;
1316 		case RTE_FLOW_ITEM_TYPE_IPV4:
1317 			ret = mlx5_flow_validate_item_ipv4
1318 						(items, item_flags,
1319 						 last_item, ether_type, NULL,
1320 						 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1321 						 error);
1322 			if (ret < 0)
1323 				return ret;
1324 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1325 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1326 			if (items->mask != NULL &&
1327 			    ((const struct rte_flow_item_ipv4 *)
1328 			     items->mask)->hdr.next_proto_id) {
1329 				next_protocol =
1330 					((const struct rte_flow_item_ipv4 *)
1331 					 (items->spec))->hdr.next_proto_id;
1332 				next_protocol &=
1333 					((const struct rte_flow_item_ipv4 *)
1334 					 (items->mask))->hdr.next_proto_id;
1335 			} else {
1336 				/* Reset for inner layer. */
1337 				next_protocol = 0xff;
1338 			}
1339 			break;
1340 		case RTE_FLOW_ITEM_TYPE_IPV6:
1341 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1342 							   last_item,
1343 							   ether_type, NULL,
1344 							   error);
1345 			if (ret < 0)
1346 				return ret;
1347 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1348 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1349 			if (items->mask != NULL &&
1350 			    ((const struct rte_flow_item_ipv6 *)
1351 			     items->mask)->hdr.proto) {
1352 				next_protocol =
1353 					((const struct rte_flow_item_ipv6 *)
1354 					 items->spec)->hdr.proto;
1355 				next_protocol &=
1356 					((const struct rte_flow_item_ipv6 *)
1357 					 items->mask)->hdr.proto;
1358 			} else {
1359 				/* Reset for inner layer. */
1360 				next_protocol = 0xff;
1361 			}
1362 			break;
1363 		case RTE_FLOW_ITEM_TYPE_UDP:
1364 			ret = mlx5_flow_validate_item_udp(items, item_flags,
1365 							  next_protocol,
1366 							  error);
1367 			if (ret < 0)
1368 				return ret;
1369 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1370 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
1371 			break;
1372 		case RTE_FLOW_ITEM_TYPE_TCP:
1373 			ret = mlx5_flow_validate_item_tcp
1374 						(items, item_flags,
1375 						 next_protocol,
1376 						 &rte_flow_item_tcp_mask,
1377 						 error);
1378 			if (ret < 0)
1379 				return ret;
1380 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1381 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
1382 			break;
1383 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1384 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1385 							    error);
1386 			if (ret < 0)
1387 				return ret;
1388 			last_item = MLX5_FLOW_LAYER_VXLAN;
1389 			break;
1390 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1391 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
1392 								item_flags,
1393 								dev, error);
1394 			if (ret < 0)
1395 				return ret;
1396 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1397 			break;
1398 		case RTE_FLOW_ITEM_TYPE_GRE:
1399 			ret = mlx5_flow_validate_item_gre(items, item_flags,
1400 							  next_protocol, error);
1401 			if (ret < 0)
1402 				return ret;
1403 			last_item = MLX5_FLOW_LAYER_GRE;
1404 			break;
1405 		case RTE_FLOW_ITEM_TYPE_MPLS:
1406 			ret = mlx5_flow_validate_item_mpls(dev, items,
1407 							   item_flags,
1408 							   last_item, error);
1409 			if (ret < 0)
1410 				return ret;
1411 			last_item = MLX5_FLOW_LAYER_MPLS;
1412 			break;
1413 		case RTE_FLOW_ITEM_TYPE_ICMP:
1414 		case RTE_FLOW_ITEM_TYPE_ICMP6:
1415 			return rte_flow_error_set(error, ENOTSUP,
1416 						  RTE_FLOW_ERROR_TYPE_ITEM,
1417 						  NULL, "ICMP/ICMP6 "
1418 						  "item not supported");
1419 		default:
1420 			return rte_flow_error_set(error, ENOTSUP,
1421 						  RTE_FLOW_ERROR_TYPE_ITEM,
1422 						  NULL, "item not supported");
1423 		}
1424 		item_flags |= last_item;
1425 	}
1426 	if (is_empty_vlan)
1427 		return rte_flow_error_set(error, ENOTSUP,
1428 						 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1429 		    "VLAN matching without vid specification is not supported");
1430 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1431 		switch (actions->type) {
1432 		case RTE_FLOW_ACTION_TYPE_VOID:
1433 			break;
1434 		case RTE_FLOW_ACTION_TYPE_FLAG:
1435 			ret = mlx5_flow_validate_action_flag(action_flags,
1436 							     attr,
1437 							     error);
1438 			if (ret < 0)
1439 				return ret;
1440 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1441 			break;
1442 		case RTE_FLOW_ACTION_TYPE_MARK:
1443 			ret = mlx5_flow_validate_action_mark(actions,
1444 							     action_flags,
1445 							     attr,
1446 							     error);
1447 			if (ret < 0)
1448 				return ret;
1449 			action_flags |= MLX5_FLOW_ACTION_MARK;
1450 			break;
1451 		case RTE_FLOW_ACTION_TYPE_DROP:
1452 			ret = mlx5_flow_validate_action_drop(action_flags,
1453 							     attr,
1454 							     error);
1455 			if (ret < 0)
1456 				return ret;
1457 			action_flags |= MLX5_FLOW_ACTION_DROP;
1458 			break;
1459 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1460 			ret = mlx5_flow_validate_action_queue(actions,
1461 							      action_flags, dev,
1462 							      attr,
1463 							      error);
1464 			if (ret < 0)
1465 				return ret;
1466 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1467 			break;
1468 		case RTE_FLOW_ACTION_TYPE_RSS:
1469 			ret = mlx5_flow_validate_action_rss(actions,
1470 							    action_flags, dev,
1471 							    attr, item_flags,
1472 							    error);
1473 			if (ret < 0)
1474 				return ret;
1475 			action_flags |= MLX5_FLOW_ACTION_RSS;
1476 			break;
1477 		case RTE_FLOW_ACTION_TYPE_COUNT:
1478 			ret = mlx5_flow_validate_action_count(dev, attr, error);
1479 			if (ret < 0)
1480 				return ret;
1481 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1482 			break;
1483 		default:
1484 			return rte_flow_error_set(error, ENOTSUP,
1485 						  RTE_FLOW_ERROR_TYPE_ACTION,
1486 						  actions,
1487 						  "action not supported");
1488 		}
1489 	}
1490 	/*
1491 	 * Validate the drop action mutual exclusion with other actions.
1492 	 * Drop action is mutually-exclusive with any other action, except for
1493 	 * Count action.
1494 	 */
1495 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1496 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1497 		return rte_flow_error_set(error, EINVAL,
1498 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1499 					  "Drop action is mutually-exclusive "
1500 					  "with any other action, except for "
1501 					  "Count action");
1502 	if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1503 		return rte_flow_error_set(error, EINVAL,
1504 					  RTE_FLOW_ERROR_TYPE_ACTION, actions,
1505 					  "no fate action is found");
1506 	return 0;
1507 }
1508 
1509 /**
1510  * Calculate the required bytes that are needed for the action part of the verbs
1511  * flow.
1512  *
1513  * @param[in] actions
1514  *   Pointer to the list of actions.
1515  *
1516  * @return
1517  *   The size of the memory needed for all actions.
1518  */
1519 static int
1520 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1521 {
1522 	int size = 0;
1523 
1524 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1525 		switch (actions->type) {
1526 		case RTE_FLOW_ACTION_TYPE_VOID:
1527 			break;
1528 		case RTE_FLOW_ACTION_TYPE_FLAG:
1529 			size += sizeof(struct ibv_flow_spec_action_tag);
1530 			break;
1531 		case RTE_FLOW_ACTION_TYPE_MARK:
1532 			size += sizeof(struct ibv_flow_spec_action_tag);
1533 			break;
1534 		case RTE_FLOW_ACTION_TYPE_DROP:
1535 			size += sizeof(struct ibv_flow_spec_action_drop);
1536 			break;
1537 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1538 			break;
1539 		case RTE_FLOW_ACTION_TYPE_RSS:
1540 			break;
1541 		case RTE_FLOW_ACTION_TYPE_COUNT:
1542 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1543 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1544 			size += sizeof(struct ibv_flow_spec_counter_action);
1545 #endif
1546 			break;
1547 		default:
1548 			break;
1549 		}
1550 	}
1551 	return size;
1552 }
1553 
1554 /**
1555  * Calculate the required bytes that are needed for the item part of the verbs
1556  * flow.
1557  *
1558  * @param[in] items
1559  *   Pointer to the list of items.
1560  *
1561  * @return
1562  *   The size of the memory needed for all items.
1563  */
1564 static int
1565 flow_verbs_get_items_size(const struct rte_flow_item items[])
1566 {
1567 	int size = 0;
1568 
1569 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1570 		switch (items->type) {
1571 		case RTE_FLOW_ITEM_TYPE_VOID:
1572 			break;
1573 		case RTE_FLOW_ITEM_TYPE_ETH:
1574 			size += sizeof(struct ibv_flow_spec_eth);
1575 			break;
1576 		case RTE_FLOW_ITEM_TYPE_VLAN:
1577 			size += sizeof(struct ibv_flow_spec_eth);
1578 			break;
1579 		case RTE_FLOW_ITEM_TYPE_IPV4:
1580 			size += sizeof(struct ibv_flow_spec_ipv4_ext);
1581 			break;
1582 		case RTE_FLOW_ITEM_TYPE_IPV6:
1583 			size += sizeof(struct ibv_flow_spec_ipv6);
1584 			break;
1585 		case RTE_FLOW_ITEM_TYPE_UDP:
1586 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1587 			break;
1588 		case RTE_FLOW_ITEM_TYPE_TCP:
1589 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1590 			break;
1591 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1592 			size += sizeof(struct ibv_flow_spec_tunnel);
1593 			break;
1594 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1595 			size += sizeof(struct ibv_flow_spec_tunnel);
1596 			break;
1597 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1598 		case RTE_FLOW_ITEM_TYPE_GRE:
1599 			size += sizeof(struct ibv_flow_spec_gre);
1600 			break;
1601 		case RTE_FLOW_ITEM_TYPE_MPLS:
1602 			size += sizeof(struct ibv_flow_spec_mpls);
1603 			break;
1604 #else
1605 		case RTE_FLOW_ITEM_TYPE_GRE:
1606 			size += sizeof(struct ibv_flow_spec_tunnel);
1607 			break;
1608 #endif
1609 		default:
1610 			break;
1611 		}
1612 	}
1613 	return size;
1614 }
1615 
1616 /**
1617  * Internal preparation function. Allocate mlx5_flow with the required size.
1618  * The required size is calculate based on the actions and items. This function
1619  * also returns the detected actions and items for later use.
1620  *
1621  * @param[in] dev
1622  *   Pointer to Ethernet device.
1623  * @param[in] attr
1624  *   Pointer to the flow attributes.
1625  * @param[in] items
1626  *   Pointer to the list of items.
1627  * @param[in] actions
1628  *   Pointer to the list of actions.
1629  * @param[out] error
1630  *   Pointer to the error structure.
1631  *
1632  * @return
1633  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1634  *   is set.
1635  */
1636 static struct mlx5_flow *
1637 flow_verbs_prepare(struct rte_eth_dev *dev,
1638 		   const struct rte_flow_attr *attr __rte_unused,
1639 		   const struct rte_flow_item items[],
1640 		   const struct rte_flow_action actions[],
1641 		   struct rte_flow_error *error)
1642 {
1643 	size_t size = 0;
1644 	uint32_t handle_idx = 0;
1645 	struct mlx5_flow *dev_flow;
1646 	struct mlx5_flow_handle *dev_handle;
1647 	struct mlx5_priv *priv = dev->data->dev_private;
1648 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1649 
1650 	MLX5_ASSERT(wks);
1651 	size += flow_verbs_get_actions_size(actions);
1652 	size += flow_verbs_get_items_size(items);
1653 	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1654 		rte_flow_error_set(error, E2BIG,
1655 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1656 				   "Verbs spec/action size too large");
1657 		return NULL;
1658 	}
1659 	/* In case of corrupting the memory. */
1660 	if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1661 		rte_flow_error_set(error, ENOSPC,
1662 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1663 				   "not free temporary device flow");
1664 		return NULL;
1665 	}
1666 	dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1667 				   &handle_idx);
1668 	if (!dev_handle) {
1669 		rte_flow_error_set(error, ENOMEM,
1670 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1671 				   "not enough memory to create flow handle");
1672 		return NULL;
1673 	}
1674 	MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1675 	dev_flow = &wks->flows[wks->flow_idx++];
1676 	dev_flow->handle = dev_handle;
1677 	dev_flow->handle_idx = handle_idx;
1678 	/* Memcpy is used, only size needs to be cleared to 0. */
1679 	dev_flow->verbs.size = 0;
1680 	dev_flow->verbs.attr.num_of_specs = 0;
1681 	dev_flow->ingress = attr->ingress;
1682 	dev_flow->hash_fields = 0;
1683 	/* Need to set transfer attribute: not supported in Verbs mode. */
1684 	return dev_flow;
1685 }
1686 
1687 /**
1688  * Fill the flow with verb spec.
1689  *
1690  * @param[in] dev
1691  *   Pointer to Ethernet device.
1692  * @param[in, out] dev_flow
1693  *   Pointer to the mlx5 flow.
1694  * @param[in] attr
1695  *   Pointer to the flow attributes.
1696  * @param[in] items
1697  *   Pointer to the list of items.
1698  * @param[in] actions
1699  *   Pointer to the list of actions.
1700  * @param[out] error
1701  *   Pointer to the error structure.
1702  *
1703  * @return
1704  *   0 on success, else a negative errno value otherwise and rte_errno is set.
1705  */
1706 static int
1707 flow_verbs_translate(struct rte_eth_dev *dev,
1708 		     struct mlx5_flow *dev_flow,
1709 		     const struct rte_flow_attr *attr,
1710 		     const struct rte_flow_item items[],
1711 		     const struct rte_flow_action actions[],
1712 		     struct rte_flow_error *error)
1713 {
1714 	uint64_t item_flags = 0;
1715 	uint64_t action_flags = 0;
1716 	uint64_t priority = attr->priority;
1717 	uint32_t subpriority = 0;
1718 	struct mlx5_priv *priv = dev->data->dev_private;
1719 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1720 	struct mlx5_flow_rss_desc *rss_desc;
1721 
1722 	MLX5_ASSERT(wks);
1723 	rss_desc = &wks->rss_desc;
1724 	if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1725 		priority = priv->config.flow_prio - 1;
1726 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1727 		int ret;
1728 
1729 		switch (actions->type) {
1730 		case RTE_FLOW_ACTION_TYPE_VOID:
1731 			break;
1732 		case RTE_FLOW_ACTION_TYPE_FLAG:
1733 			flow_verbs_translate_action_flag(dev_flow, actions);
1734 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1735 			dev_flow->handle->mark = 1;
1736 			break;
1737 		case RTE_FLOW_ACTION_TYPE_MARK:
1738 			flow_verbs_translate_action_mark(dev_flow, actions);
1739 			action_flags |= MLX5_FLOW_ACTION_MARK;
1740 			dev_flow->handle->mark = 1;
1741 			break;
1742 		case RTE_FLOW_ACTION_TYPE_DROP:
1743 			flow_verbs_translate_action_drop(dev_flow, actions);
1744 			action_flags |= MLX5_FLOW_ACTION_DROP;
1745 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1746 			break;
1747 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1748 			flow_verbs_translate_action_queue(rss_desc, actions);
1749 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1750 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1751 			break;
1752 		case RTE_FLOW_ACTION_TYPE_RSS:
1753 			flow_verbs_translate_action_rss(rss_desc, actions);
1754 			action_flags |= MLX5_FLOW_ACTION_RSS;
1755 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1756 			break;
1757 		case RTE_FLOW_ACTION_TYPE_COUNT:
1758 			ret = flow_verbs_translate_action_count(dev_flow,
1759 								actions,
1760 								dev, error);
1761 			if (ret < 0)
1762 				return ret;
1763 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1764 			break;
1765 		default:
1766 			return rte_flow_error_set(error, ENOTSUP,
1767 						  RTE_FLOW_ERROR_TYPE_ACTION,
1768 						  actions,
1769 						  "action not supported");
1770 		}
1771 	}
1772 	dev_flow->act_flags = action_flags;
1773 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1774 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1775 
1776 		switch (items->type) {
1777 		case RTE_FLOW_ITEM_TYPE_VOID:
1778 			break;
1779 		case RTE_FLOW_ITEM_TYPE_ETH:
1780 			flow_verbs_translate_item_eth(dev_flow, items,
1781 						      item_flags);
1782 			subpriority = MLX5_PRIORITY_MAP_L2;
1783 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1784 					       MLX5_FLOW_LAYER_OUTER_L2;
1785 			break;
1786 		case RTE_FLOW_ITEM_TYPE_VLAN:
1787 			flow_verbs_translate_item_vlan(dev_flow, items,
1788 						       item_flags);
1789 			subpriority = MLX5_PRIORITY_MAP_L2;
1790 			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1791 						MLX5_FLOW_LAYER_INNER_VLAN) :
1792 					       (MLX5_FLOW_LAYER_OUTER_L2 |
1793 						MLX5_FLOW_LAYER_OUTER_VLAN);
1794 			break;
1795 		case RTE_FLOW_ITEM_TYPE_IPV4:
1796 			flow_verbs_translate_item_ipv4(dev_flow, items,
1797 						       item_flags);
1798 			subpriority = MLX5_PRIORITY_MAP_L3;
1799 			dev_flow->hash_fields |=
1800 				mlx5_flow_hashfields_adjust
1801 					(rss_desc, tunnel,
1802 					 MLX5_IPV4_LAYER_TYPES,
1803 					 MLX5_IPV4_IBV_RX_HASH);
1804 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1805 					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1806 			break;
1807 		case RTE_FLOW_ITEM_TYPE_IPV6:
1808 			flow_verbs_translate_item_ipv6(dev_flow, items,
1809 						       item_flags);
1810 			subpriority = MLX5_PRIORITY_MAP_L3;
1811 			dev_flow->hash_fields |=
1812 				mlx5_flow_hashfields_adjust
1813 					(rss_desc, tunnel,
1814 					 MLX5_IPV6_LAYER_TYPES,
1815 					 MLX5_IPV6_IBV_RX_HASH);
1816 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1817 					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1818 			break;
1819 		case RTE_FLOW_ITEM_TYPE_TCP:
1820 			flow_verbs_translate_item_tcp(dev_flow, items,
1821 						      item_flags);
1822 			subpriority = MLX5_PRIORITY_MAP_L4;
1823 			dev_flow->hash_fields |=
1824 				mlx5_flow_hashfields_adjust
1825 					(rss_desc, tunnel, ETH_RSS_TCP,
1826 					 (IBV_RX_HASH_SRC_PORT_TCP |
1827 					  IBV_RX_HASH_DST_PORT_TCP));
1828 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1829 					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
1830 			break;
1831 		case RTE_FLOW_ITEM_TYPE_UDP:
1832 			flow_verbs_translate_item_udp(dev_flow, items,
1833 						      item_flags);
1834 			subpriority = MLX5_PRIORITY_MAP_L4;
1835 			dev_flow->hash_fields |=
1836 				mlx5_flow_hashfields_adjust
1837 					(rss_desc, tunnel, ETH_RSS_UDP,
1838 					 (IBV_RX_HASH_SRC_PORT_UDP |
1839 					  IBV_RX_HASH_DST_PORT_UDP));
1840 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1841 					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
1842 			break;
1843 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1844 			flow_verbs_translate_item_vxlan(dev_flow, items,
1845 							item_flags);
1846 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1847 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
1848 			break;
1849 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1850 			flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1851 							    item_flags);
1852 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1853 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1854 			break;
1855 		case RTE_FLOW_ITEM_TYPE_GRE:
1856 			flow_verbs_translate_item_gre(dev_flow, items,
1857 						      item_flags);
1858 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1859 			item_flags |= MLX5_FLOW_LAYER_GRE;
1860 			break;
1861 		case RTE_FLOW_ITEM_TYPE_MPLS:
1862 			flow_verbs_translate_item_mpls(dev_flow, items,
1863 						       item_flags);
1864 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1865 			item_flags |= MLX5_FLOW_LAYER_MPLS;
1866 			break;
1867 		default:
1868 			return rte_flow_error_set(error, ENOTSUP,
1869 						  RTE_FLOW_ERROR_TYPE_ITEM,
1870 						  NULL, "item not supported");
1871 		}
1872 	}
1873 	dev_flow->handle->layers = item_flags;
1874 	/* Other members of attr will be ignored. */
1875 	dev_flow->verbs.attr.priority =
1876 		mlx5_flow_adjust_priority(dev, priority, subpriority);
1877 	dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1878 	return 0;
1879 }
1880 
1881 /**
1882  * Remove the flow from the NIC but keeps it in memory.
1883  *
1884  * @param[in] dev
1885  *   Pointer to the Ethernet device structure.
1886  * @param[in, out] flow
1887  *   Pointer to flow structure.
1888  */
1889 static void
1890 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1891 {
1892 	struct mlx5_priv *priv = dev->data->dev_private;
1893 	struct mlx5_flow_handle *handle;
1894 	uint32_t handle_idx;
1895 
1896 	if (!flow)
1897 		return;
1898 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1899 		       handle_idx, handle, next) {
1900 		if (handle->drv_flow) {
1901 			claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1902 			handle->drv_flow = NULL;
1903 		}
1904 		/* hrxq is union, don't touch it only the flag is set. */
1905 		if (handle->rix_hrxq &&
1906 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1907 			mlx5_hrxq_release(dev, handle->rix_hrxq);
1908 			handle->rix_hrxq = 0;
1909 		}
1910 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
1911 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1912 	}
1913 }
1914 
1915 /**
1916  * Remove the flow from the NIC and the memory.
1917  *
1918  * @param[in] dev
1919  *   Pointer to the Ethernet device structure.
1920  * @param[in, out] flow
1921  *   Pointer to flow structure.
1922  */
1923 static void
1924 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1925 {
1926 	struct mlx5_priv *priv = dev->data->dev_private;
1927 	struct mlx5_flow_handle *handle;
1928 
1929 	if (!flow)
1930 		return;
1931 	flow_verbs_remove(dev, flow);
1932 	while (flow->dev_handles) {
1933 		uint32_t tmp_idx = flow->dev_handles;
1934 
1935 		handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1936 				   tmp_idx);
1937 		if (!handle)
1938 			return;
1939 		flow->dev_handles = handle->next.next;
1940 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1941 			   tmp_idx);
1942 	}
1943 	if (flow->counter) {
1944 		flow_verbs_counter_release(dev, flow->counter);
1945 		flow->counter = 0;
1946 	}
1947 }
1948 
1949 /**
1950  * Apply the flow to the NIC.
1951  *
1952  * @param[in] dev
1953  *   Pointer to the Ethernet device structure.
1954  * @param[in, out] flow
1955  *   Pointer to flow structure.
1956  * @param[out] error
1957  *   Pointer to error structure.
1958  *
1959  * @return
1960  *   0 on success, a negative errno value otherwise and rte_errno is set.
1961  */
1962 static int
1963 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1964 		 struct rte_flow_error *error)
1965 {
1966 	struct mlx5_priv *priv = dev->data->dev_private;
1967 	struct mlx5_flow_handle *handle;
1968 	struct mlx5_flow *dev_flow;
1969 	struct mlx5_hrxq *hrxq;
1970 	uint32_t dev_handles;
1971 	int err;
1972 	int idx;
1973 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1974 
1975 	MLX5_ASSERT(wks);
1976 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1977 		dev_flow = &wks->flows[idx];
1978 		handle = dev_flow->handle;
1979 		if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1980 			MLX5_ASSERT(priv->drop_queue.hrxq);
1981 			hrxq = priv->drop_queue.hrxq;
1982 		} else {
1983 			uint32_t hrxq_idx;
1984 			struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1985 
1986 			MLX5_ASSERT(rss_desc->queue_num);
1987 			rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1988 			rss_desc->hash_fields = dev_flow->hash_fields;
1989 			rss_desc->tunnel = !!(handle->layers &
1990 					      MLX5_FLOW_LAYER_TUNNEL);
1991 			rss_desc->shared_rss = 0;
1992 			hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
1993 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1994 					      hrxq_idx);
1995 			if (!hrxq) {
1996 				rte_flow_error_set
1997 					(error, rte_errno,
1998 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1999 					 "cannot get hash queue");
2000 				goto error;
2001 			}
2002 			handle->rix_hrxq = hrxq_idx;
2003 		}
2004 		MLX5_ASSERT(hrxq);
2005 		handle->drv_flow = mlx5_glue->create_flow
2006 					(hrxq->qp, &dev_flow->verbs.attr);
2007 		if (!handle->drv_flow) {
2008 			rte_flow_error_set(error, errno,
2009 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2010 					   NULL,
2011 					   "hardware refuses to create flow");
2012 			goto error;
2013 		}
2014 		if (priv->vmwa_context &&
2015 		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
2016 			/*
2017 			 * The rule contains the VLAN pattern.
2018 			 * For VF we are going to create VLAN
2019 			 * interface to make hypervisor set correct
2020 			 * e-Switch vport context.
2021 			 */
2022 			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2023 		}
2024 	}
2025 	return 0;
2026 error:
2027 	err = rte_errno; /* Save rte_errno before cleanup. */
2028 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2029 		       dev_handles, handle, next) {
2030 		/* hrxq is union, don't touch it only the flag is set. */
2031 		if (handle->rix_hrxq &&
2032 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2033 			mlx5_hrxq_release(dev, handle->rix_hrxq);
2034 			handle->rix_hrxq = 0;
2035 		}
2036 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
2037 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2038 	}
2039 	rte_errno = err; /* Restore rte_errno. */
2040 	return -rte_errno;
2041 }
2042 
2043 /**
2044  * Query a flow.
2045  *
2046  * @see rte_flow_query()
2047  * @see rte_flow_ops
2048  */
2049 static int
2050 flow_verbs_query(struct rte_eth_dev *dev,
2051 		 struct rte_flow *flow,
2052 		 const struct rte_flow_action *actions,
2053 		 void *data,
2054 		 struct rte_flow_error *error)
2055 {
2056 	int ret = -EINVAL;
2057 
2058 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2059 		switch (actions->type) {
2060 		case RTE_FLOW_ACTION_TYPE_VOID:
2061 			break;
2062 		case RTE_FLOW_ACTION_TYPE_COUNT:
2063 			ret = flow_verbs_counter_query(dev, flow, data, error);
2064 			break;
2065 		default:
2066 			return rte_flow_error_set(error, ENOTSUP,
2067 						  RTE_FLOW_ERROR_TYPE_ACTION,
2068 						  actions,
2069 						  "action not supported");
2070 		}
2071 	}
2072 	return ret;
2073 }
2074 
2075 static int
2076 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2077 		       uint32_t flags)
2078 {
2079 	RTE_SET_USED(dev);
2080 	RTE_SET_USED(domains);
2081 	RTE_SET_USED(flags);
2082 
2083 	return 0;
2084 }
2085 
2086 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2087 	.validate = flow_verbs_validate,
2088 	.prepare = flow_verbs_prepare,
2089 	.translate = flow_verbs_translate,
2090 	.apply = flow_verbs_apply,
2091 	.remove = flow_verbs_remove,
2092 	.destroy = flow_verbs_destroy,
2093 	.query = flow_verbs_query,
2094 	.sync_domain = flow_verbs_sync_domain,
2095 };
2096