xref: /dpdk/drivers/net/mlx5/mlx5_flow_verbs.c (revision 89b5642d0d45c22c0ceab57efe3fab3b49ff4324)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10 
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_ip.h>
18 
19 #include <mlx5_glue.h>
20 #include <mlx5_prm.h>
21 #include <mlx5_malloc.h>
22 
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rx.h"
27 #include "mlx5_flow_os.h"
28 
29 #define VERBS_SPEC_INNER(item_flags) \
30 	(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
31 
32 /* Verbs specification header. */
33 struct ibv_spec_header {
34 	enum ibv_flow_spec_type type;
35 	uint16_t size;
36 };
37 
38 /**
39  * Discover the maximum number of priority available.
40  *
41  * @param[in] dev
42  *   Pointer to the Ethernet device structure.
43  * @param[in] vprio
44  *   Expected result variants.
45  * @param[in] vprio_n
46  *   Number of entries in @p vprio array.
47  * @return
48  *   Number of supported flow priority on success, a negative errno
49  *   value otherwise and rte_errno is set.
50  */
51 static int
52 flow_verbs_discover_priorities(struct rte_eth_dev *dev,
53 			       const uint16_t *vprio, int vprio_n)
54 {
55 	struct mlx5_priv *priv = dev->data->dev_private;
56 	struct {
57 		struct ibv_flow_attr attr;
58 		struct ibv_flow_spec_eth eth;
59 		struct ibv_flow_spec_action_drop drop;
60 	} flow_attr = {
61 		.attr = {
62 			.num_of_specs = 2,
63 			.port = (uint8_t)priv->dev_port,
64 		},
65 		.eth = {
66 			.type = IBV_FLOW_SPEC_ETH,
67 			.size = sizeof(struct ibv_flow_spec_eth),
68 		},
69 		.drop = {
70 			.size = sizeof(struct ibv_flow_spec_action_drop),
71 			.type = IBV_FLOW_SPEC_ACTION_DROP,
72 		},
73 	};
74 	struct ibv_flow *flow;
75 	struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
76 	int i;
77 	int priority = 0;
78 
79 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
80 	/* If DevX supported, driver must support 16 verbs flow priorities. */
81 	priority = 16;
82 	goto out;
83 #endif
84 	if (!drop->qp) {
85 		rte_errno = ENOTSUP;
86 		return -rte_errno;
87 	}
88 	for (i = 0; i != vprio_n; i++) {
89 		flow_attr.attr.priority = vprio[i] - 1;
90 		flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
91 		if (!flow)
92 			break;
93 		claim_zero(mlx5_glue->destroy_flow(flow));
94 		priority = vprio[i];
95 	}
96 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
97 out:
98 #endif
99 	DRV_LOG(INFO, "port %u supported flow priorities:"
100 		" 0-%d for ingress or egress root table,"
101 		" 0-%d for non-root table or transfer root table.",
102 		dev->data->port_id, priority - 2,
103 		MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
104 	return priority;
105 }
106 
107 /**
108  * Get Verbs flow counter by index.
109  *
110  * @param[in] dev
111  *   Pointer to the Ethernet device structure.
112  * @param[in] idx
113  *   mlx5 flow counter index in the container.
114  * @param[out] ppool
115  *   mlx5 flow counter pool in the container,
116  *
117  * @return
118  *   A pointer to the counter, NULL otherwise.
119  */
120 static struct mlx5_flow_counter *
121 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
122 			      uint32_t idx,
123 			      struct mlx5_flow_counter_pool **ppool)
124 {
125 	struct mlx5_priv *priv = dev->data->dev_private;
126 	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
127 	struct mlx5_flow_counter_pool *pool;
128 
129 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
130 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
131 	MLX5_ASSERT(pool);
132 	if (ppool)
133 		*ppool = pool;
134 	return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
135 }
136 
137 /**
138  * Create Verbs flow counter with Verbs library.
139  *
140  * @param[in] dev
141  *   Pointer to the Ethernet device structure.
142  * @param[in, out] counter
143  *   mlx5 flow counter object, contains the counter id,
144  *   handle of created Verbs flow counter is returned
145  *   in cs field (if counters are supported).
146  *
147  * @return
148  *   0 On success else a negative errno value is returned
149  *   and rte_errno is set.
150  */
151 static int
152 flow_verbs_counter_create(struct rte_eth_dev *dev,
153 			  struct mlx5_flow_counter *counter)
154 {
155 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
156 	struct mlx5_priv *priv = dev->data->dev_private;
157 	struct ibv_context *ctx = priv->sh->cdev->ctx;
158 	struct ibv_counter_set_init_attr init = {
159 			 .counter_set_id = counter->shared_info.id};
160 
161 	counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
162 	if (!counter->dcs_when_free) {
163 		rte_errno = ENOTSUP;
164 		return -ENOTSUP;
165 	}
166 	return 0;
167 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
168 	struct mlx5_priv *priv = dev->data->dev_private;
169 	struct ibv_context *ctx = priv->sh->cdev->ctx;
170 	struct ibv_counters_init_attr init = {0};
171 	struct ibv_counter_attach_attr attach;
172 	int ret;
173 
174 	memset(&attach, 0, sizeof(attach));
175 	counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
176 	if (!counter->dcs_when_free) {
177 		rte_errno = ENOTSUP;
178 		return -ENOTSUP;
179 	}
180 	attach.counter_desc = IBV_COUNTER_PACKETS;
181 	attach.index = 0;
182 	ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
183 	if (!ret) {
184 		attach.counter_desc = IBV_COUNTER_BYTES;
185 		attach.index = 1;
186 		ret = mlx5_glue->attach_counters
187 					(counter->dcs_when_free, &attach, NULL);
188 	}
189 	if (ret) {
190 		claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
191 		counter->dcs_when_free = NULL;
192 		rte_errno = ret;
193 		return -ret;
194 	}
195 	return 0;
196 #else
197 	(void)dev;
198 	(void)counter;
199 	rte_errno = ENOTSUP;
200 	return -ENOTSUP;
201 #endif
202 }
203 
204 /**
205  * Get a flow counter.
206  *
207  * @param[in] dev
208  *   Pointer to the Ethernet device structure.
209  * @param[in] id
210  *   Counter identifier.
211  *
212  * @return
213  *   Index to the counter, 0 otherwise and rte_errno is set.
214  */
215 static uint32_t
216 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
217 {
218 	struct mlx5_priv *priv = dev->data->dev_private;
219 	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
220 	struct mlx5_flow_counter_pool *pool = NULL;
221 	struct mlx5_flow_counter *cnt = NULL;
222 	uint32_t n_valid = cmng->n_valid;
223 	uint32_t pool_idx, cnt_idx;
224 	uint32_t i;
225 	int ret;
226 
227 	for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
228 		pool = cmng->pools[pool_idx];
229 		if (!pool)
230 			continue;
231 		cnt = TAILQ_FIRST(&pool->counters[0]);
232 		if (cnt)
233 			break;
234 	}
235 	if (!cnt) {
236 		uint32_t size;
237 
238 		if (n_valid == MLX5_COUNTER_POOLS_MAX_NUM) {
239 			DRV_LOG(ERR, "All counter is in used, try again later.");
240 			rte_errno = EAGAIN;
241 			return 0;
242 		}
243 		/* Allocate memory for new pool */
244 		size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
245 		pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
246 		if (!pool)
247 			return 0;
248 		for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
249 			cnt = MLX5_POOL_GET_CNT(pool, i);
250 			TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
251 		}
252 		cnt = MLX5_POOL_GET_CNT(pool, 0);
253 		cmng->pools[n_valid] = pool;
254 		pool_idx = n_valid;
255 		cmng->n_valid++;
256 	}
257 	TAILQ_REMOVE(&pool->counters[0], cnt, next);
258 	i = MLX5_CNT_ARRAY_IDX(pool, cnt);
259 	cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
260 	/* Create counter with Verbs. */
261 	ret = flow_verbs_counter_create(dev, cnt);
262 	if (!ret) {
263 		cnt->dcs_when_active = cnt->dcs_when_free;
264 		cnt->hits = 0;
265 		cnt->bytes = 0;
266 		return cnt_idx;
267 	}
268 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
269 	/* Some error occurred in Verbs library. */
270 	rte_errno = -ret;
271 	return 0;
272 }
273 
274 /**
275  * Release a flow counter.
276  *
277  * @param[in] dev
278  *   Pointer to the Ethernet device structure.
279  * @param[in] counter
280  *   Index to the counter handler.
281  */
282 static void
283 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
284 {
285 	struct mlx5_flow_counter_pool *pool;
286 	struct mlx5_flow_counter *cnt;
287 
288 	cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
289 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
290 	claim_zero(mlx5_glue->destroy_counter_set
291 			((struct ibv_counter_set *)cnt->dcs_when_active));
292 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
293 	claim_zero(mlx5_glue->destroy_counters
294 				((struct ibv_counters *)cnt->dcs_when_active));
295 #endif
296 	TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
297 }
298 
299 /**
300  * Query a flow counter via Verbs library call.
301  *
302  * @see rte_flow_query()
303  * @see rte_flow_ops
304  */
305 static int
306 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
307 			 struct rte_flow *flow, void *data,
308 			 struct rte_flow_error *error)
309 {
310 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
311 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
312 	if (flow->counter) {
313 		struct mlx5_flow_counter_pool *pool;
314 		struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
315 						(dev, flow->counter, &pool);
316 		struct rte_flow_query_count *qc = data;
317 		uint64_t counters[2] = {0, 0};
318 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
319 		struct ibv_query_counter_set_attr query_cs_attr = {
320 			.dcs_when_free = (struct ibv_counter_set *)
321 						cnt->dcs_when_active,
322 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
323 		};
324 		struct ibv_counter_set_data query_out = {
325 			.out = counters,
326 			.outlen = 2 * sizeof(uint64_t),
327 		};
328 		int err = mlx5_glue->query_counter_set(&query_cs_attr,
329 						       &query_out);
330 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
331 		int err = mlx5_glue->query_counters
332 			((struct ibv_counters *)cnt->dcs_when_active, counters,
333 				RTE_DIM(counters),
334 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
335 #endif
336 		if (err)
337 			return rte_flow_error_set
338 				(error, err,
339 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
340 				 NULL,
341 				 "cannot read counter");
342 		qc->hits_set = 1;
343 		qc->bytes_set = 1;
344 		qc->hits = counters[0] - cnt->hits;
345 		qc->bytes = counters[1] - cnt->bytes;
346 		if (qc->reset) {
347 			cnt->hits = counters[0];
348 			cnt->bytes = counters[1];
349 		}
350 		return 0;
351 	}
352 	return rte_flow_error_set(error, EINVAL,
353 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
354 				  NULL,
355 				  "flow does not have counter");
356 #else
357 	(void)flow;
358 	(void)data;
359 	return rte_flow_error_set(error, ENOTSUP,
360 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
361 				  NULL,
362 				  "counters are not available");
363 #endif
364 }
365 
366 /**
367  * Add a verbs item specification into @p verbs.
368  *
369  * @param[out] verbs
370  *   Pointer to verbs structure.
371  * @param[in] src
372  *   Create specification.
373  * @param[in] size
374  *   Size in bytes of the specification to copy.
375  */
376 static void
377 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
378 		    void *src, unsigned int size)
379 {
380 	void *dst;
381 
382 	if (!verbs)
383 		return;
384 	MLX5_ASSERT(verbs->specs);
385 	dst = (void *)(verbs->specs + verbs->size);
386 	memcpy(dst, src, size);
387 	++verbs->attr.num_of_specs;
388 	verbs->size += size;
389 }
390 
391 /**
392  * Convert the @p item into a Verbs specification. This function assumes that
393  * the input is valid and that there is space to insert the requested item
394  * into the flow.
395  *
396  * @param[in, out] dev_flow
397  *   Pointer to dev_flow structure.
398  * @param[in] item
399  *   Item specification.
400  * @param[in] item_flags
401  *   Parsed item flags.
402  */
403 static void
404 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
405 			      const struct rte_flow_item *item,
406 			      uint64_t item_flags)
407 {
408 	const struct rte_flow_item_eth *spec = item->spec;
409 	const struct rte_flow_item_eth *mask = item->mask;
410 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
411 	struct ibv_flow_spec_eth eth = {
412 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
413 		.size = size,
414 	};
415 
416 	if (!mask)
417 		mask = &rte_flow_item_eth_mask;
418 	if (spec) {
419 		unsigned int i;
420 
421 		memcpy(&eth.val.dst_mac, spec->hdr.dst_addr.addr_bytes,
422 			RTE_ETHER_ADDR_LEN);
423 		memcpy(&eth.val.src_mac, spec->hdr.src_addr.addr_bytes,
424 			RTE_ETHER_ADDR_LEN);
425 		eth.val.ether_type = spec->hdr.ether_type;
426 		memcpy(&eth.mask.dst_mac, mask->hdr.dst_addr.addr_bytes,
427 			RTE_ETHER_ADDR_LEN);
428 		memcpy(&eth.mask.src_mac, mask->hdr.src_addr.addr_bytes,
429 			RTE_ETHER_ADDR_LEN);
430 		eth.mask.ether_type = mask->hdr.ether_type;
431 		/* Remove unwanted bits from values. */
432 		for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
433 			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
434 			eth.val.src_mac[i] &= eth.mask.src_mac[i];
435 		}
436 		eth.val.ether_type &= eth.mask.ether_type;
437 	}
438 	flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
439 }
440 
441 /**
442  * Update the VLAN tag in the Verbs Ethernet specification.
443  * This function assumes that the input is valid and there is space to add
444  * the requested item.
445  *
446  * @param[in, out] attr
447  *   Pointer to Verbs attributes structure.
448  * @param[in] eth
449  *   Verbs structure containing the VLAN information to copy.
450  */
451 static void
452 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
453 			    struct ibv_flow_spec_eth *eth)
454 {
455 	unsigned int i;
456 	const enum ibv_flow_spec_type search = eth->type;
457 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
458 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
459 
460 	for (i = 0; i != attr->num_of_specs; ++i) {
461 		if (hdr->type == search) {
462 			struct ibv_flow_spec_eth *e =
463 				(struct ibv_flow_spec_eth *)hdr;
464 
465 			e->val.vlan_tag = eth->val.vlan_tag;
466 			e->mask.vlan_tag = eth->mask.vlan_tag;
467 			e->val.ether_type = eth->val.ether_type;
468 			e->mask.ether_type = eth->mask.ether_type;
469 			break;
470 		}
471 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
472 	}
473 }
474 
475 /**
476  * Convert the @p item into a Verbs specification. This function assumes that
477  * the input is valid and that there is space to insert the requested item
478  * into the flow.
479  *
480  * @param[in, out] dev_flow
481  *   Pointer to dev_flow structure.
482  * @param[in] item
483  *   Item specification.
484  * @param[in] item_flags
485  *   Parsed item flags.
486  */
487 static void
488 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
489 			       const struct rte_flow_item *item,
490 			       uint64_t item_flags)
491 {
492 	const struct rte_flow_item_vlan *spec = item->spec;
493 	const struct rte_flow_item_vlan *mask = item->mask;
494 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
495 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
496 	struct ibv_flow_spec_eth eth = {
497 		.type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
498 		.size = size,
499 	};
500 	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
501 				      MLX5_FLOW_LAYER_OUTER_L2;
502 
503 	if (!mask)
504 		mask = &rte_flow_item_vlan_mask;
505 	if (spec) {
506 		eth.val.vlan_tag = spec->hdr.vlan_tci;
507 		eth.mask.vlan_tag = mask->hdr.vlan_tci;
508 		eth.val.vlan_tag &= eth.mask.vlan_tag;
509 		eth.val.ether_type = spec->hdr.eth_proto;
510 		eth.mask.ether_type = mask->hdr.eth_proto;
511 		eth.val.ether_type &= eth.mask.ether_type;
512 	}
513 	if (!(item_flags & l2m))
514 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
515 	else
516 		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
517 	if (!tunnel)
518 		dev_flow->handle->vf_vlan.tag =
519 			rte_be_to_cpu_16(spec->hdr.vlan_tci) & 0x0fff;
520 }
521 
522 /**
523  * Convert the @p item into a Verbs specification. This function assumes that
524  * the input is valid and that there is space to insert the requested item
525  * into the flow.
526  *
527  * @param[in, out] dev_flow
528  *   Pointer to dev_flow structure.
529  * @param[in] item
530  *   Item specification.
531  * @param[in] item_flags
532  *   Parsed item flags.
533  */
534 static void
535 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
536 			       const struct rte_flow_item *item,
537 			       uint64_t item_flags)
538 {
539 	const struct rte_flow_item_ipv4 *spec = item->spec;
540 	const struct rte_flow_item_ipv4 *mask = item->mask;
541 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
542 	struct ibv_flow_spec_ipv4_ext ipv4 = {
543 		.type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
544 		.size = size,
545 	};
546 
547 	if (!mask)
548 		mask = &rte_flow_item_ipv4_mask;
549 	if (spec) {
550 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
551 			.src_ip = spec->hdr.src_addr,
552 			.dst_ip = spec->hdr.dst_addr,
553 			.proto = spec->hdr.next_proto_id,
554 			.tos = spec->hdr.type_of_service,
555 		};
556 		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
557 			.src_ip = mask->hdr.src_addr,
558 			.dst_ip = mask->hdr.dst_addr,
559 			.proto = mask->hdr.next_proto_id,
560 			.tos = mask->hdr.type_of_service,
561 		};
562 		/* Remove unwanted bits from values. */
563 		ipv4.val.src_ip &= ipv4.mask.src_ip;
564 		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
565 		ipv4.val.proto &= ipv4.mask.proto;
566 		ipv4.val.tos &= ipv4.mask.tos;
567 	}
568 	flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
569 }
570 
571 /**
572  * Convert the @p item into a Verbs specification. This function assumes that
573  * the input is valid and that there is space to insert the requested item
574  * into the flow.
575  *
576  * @param[in, out] dev_flow
577  *   Pointer to dev_flow structure.
578  * @param[in] item
579  *   Item specification.
580  * @param[in] item_flags
581  *   Parsed item flags.
582  */
583 static void
584 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
585 			       const struct rte_flow_item *item,
586 			       uint64_t item_flags)
587 {
588 	const struct rte_flow_item_ipv6 *spec = item->spec;
589 	const struct rte_flow_item_ipv6 *mask = item->mask;
590 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
591 	struct ibv_flow_spec_ipv6 ipv6 = {
592 		.type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
593 		.size = size,
594 	};
595 
596 	if (!mask)
597 		mask = &rte_flow_item_ipv6_mask;
598 	if (spec) {
599 		unsigned int i;
600 		uint32_t vtc_flow_val;
601 		uint32_t vtc_flow_mask;
602 
603 		memcpy(&ipv6.val.src_ip, &spec->hdr.src_addr,
604 		       RTE_DIM(ipv6.val.src_ip));
605 		memcpy(&ipv6.val.dst_ip, &spec->hdr.dst_addr,
606 		       RTE_DIM(ipv6.val.dst_ip));
607 		memcpy(&ipv6.mask.src_ip, &mask->hdr.src_addr,
608 		       RTE_DIM(ipv6.mask.src_ip));
609 		memcpy(&ipv6.mask.dst_ip, &mask->hdr.dst_addr,
610 		       RTE_DIM(ipv6.mask.dst_ip));
611 		vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
612 		vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
613 		ipv6.val.flow_label =
614 			rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
615 					 RTE_IPV6_HDR_FL_SHIFT);
616 		ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
617 					 RTE_IPV6_HDR_TC_SHIFT;
618 		ipv6.val.next_hdr = spec->hdr.proto;
619 		ipv6.mask.flow_label =
620 			rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
621 					 RTE_IPV6_HDR_FL_SHIFT);
622 		ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
623 					  RTE_IPV6_HDR_TC_SHIFT;
624 		ipv6.mask.next_hdr = mask->hdr.proto;
625 		/* Remove unwanted bits from values. */
626 		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
627 			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
628 			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
629 		}
630 		ipv6.val.flow_label &= ipv6.mask.flow_label;
631 		ipv6.val.traffic_class &= ipv6.mask.traffic_class;
632 		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
633 	}
634 	flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
635 }
636 
637 /**
638  * Convert the @p item into a Verbs specification. This function assumes that
639  * the input is valid and that there is space to insert the requested item
640  * into the flow.
641  *
642  * @param[in, out] dev_flow
643  *   Pointer to dev_flow structure.
644  * @param[in] item
645  *   Item specification.
646  * @param[in] item_flags
647  *   Parsed item flags.
648  */
649 static void
650 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
651 			      const struct rte_flow_item *item,
652 			      uint64_t item_flags __rte_unused)
653 {
654 	const struct rte_flow_item_tcp *spec = item->spec;
655 	const struct rte_flow_item_tcp *mask = item->mask;
656 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
657 	struct ibv_flow_spec_tcp_udp tcp = {
658 		.type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
659 		.size = size,
660 	};
661 
662 	if (!mask)
663 		mask = &rte_flow_item_tcp_mask;
664 	if (spec) {
665 		tcp.val.dst_port = spec->hdr.dst_port;
666 		tcp.val.src_port = spec->hdr.src_port;
667 		tcp.mask.dst_port = mask->hdr.dst_port;
668 		tcp.mask.src_port = mask->hdr.src_port;
669 		/* Remove unwanted bits from values. */
670 		tcp.val.src_port &= tcp.mask.src_port;
671 		tcp.val.dst_port &= tcp.mask.dst_port;
672 	}
673 	flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
674 }
675 
676 /**
677  * Convert the @p item into a Verbs specification. This function assumes that
678  * the input is valid and that there is space to insert the requested item
679  * into the flow.
680  *
681  * @param[in, out] dev_flow
682  *   Pointer to dev_flow structure.
683  * @param[in] item
684  *   Item specification.
685  * @param[in] item_flags
686  *   Parsed item flags.
687  */
688 #ifdef HAVE_IBV_FLOW_SPEC_ESP
689 static void
690 flow_verbs_translate_item_esp(struct mlx5_flow *dev_flow,
691 			      const struct rte_flow_item *item,
692 			      uint64_t item_flags __rte_unused)
693 {
694 	const struct rte_flow_item_esp *spec = item->spec;
695 	const struct rte_flow_item_esp *mask = item->mask;
696 	unsigned int size = sizeof(struct ibv_flow_spec_esp);
697 	struct ibv_flow_spec_esp esp = {
698 		.type = IBV_FLOW_SPEC_ESP | VERBS_SPEC_INNER(item_flags),
699 		.size = size,
700 	};
701 
702 	if (!mask)
703 		mask = &rte_flow_item_esp_mask;
704 	if (spec) {
705 		esp.val.spi = spec->hdr.spi & mask->hdr.spi;
706 		esp.mask.spi = mask->hdr.spi;
707 	}
708 	flow_verbs_spec_add(&dev_flow->verbs, &esp, size);
709 }
710 #endif
711 
712 /**
713  * Convert the @p item into a Verbs specification. This function assumes that
714  * the input is valid and that there is space to insert the requested item
715  * into the flow.
716  *
717  * @param[in, out] dev_flow
718  *   Pointer to dev_flow structure.
719  * @param[in] item
720  *   Item specification.
721  * @param[in] item_flags
722  *   Parsed item flags.
723  */
724 static void
725 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
726 			      const struct rte_flow_item *item,
727 			      uint64_t item_flags __rte_unused)
728 {
729 	const struct rte_flow_item_udp *spec = item->spec;
730 	const struct rte_flow_item_udp *mask = item->mask;
731 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
732 	struct ibv_flow_spec_tcp_udp udp = {
733 		.type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
734 		.size = size,
735 	};
736 
737 	if (!mask)
738 		mask = &rte_flow_item_udp_mask;
739 	if (spec) {
740 		udp.val.dst_port = spec->hdr.dst_port;
741 		udp.val.src_port = spec->hdr.src_port;
742 		udp.mask.dst_port = mask->hdr.dst_port;
743 		udp.mask.src_port = mask->hdr.src_port;
744 		/* Remove unwanted bits from values. */
745 		udp.val.src_port &= udp.mask.src_port;
746 		udp.val.dst_port &= udp.mask.dst_port;
747 	}
748 	item++;
749 	while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
750 		item++;
751 	if (!(udp.val.dst_port & udp.mask.dst_port)) {
752 		switch ((item)->type) {
753 		case RTE_FLOW_ITEM_TYPE_VXLAN:
754 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
755 			udp.mask.dst_port = 0xffff;
756 			break;
757 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
758 			udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
759 			udp.mask.dst_port = 0xffff;
760 			break;
761 		case RTE_FLOW_ITEM_TYPE_MPLS:
762 			udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
763 			udp.mask.dst_port = 0xffff;
764 			break;
765 		default:
766 			break;
767 		}
768 	}
769 
770 	flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
771 }
772 
773 /**
774  * Convert the @p item into a Verbs specification. This function assumes that
775  * the input is valid and that there is space to insert the requested item
776  * into the flow.
777  *
778  * @param[in, out] dev_flow
779  *   Pointer to dev_flow structure.
780  * @param[in] item
781  *   Item specification.
782  * @param[in] item_flags
783  *   Parsed item flags.
784  */
785 static void
786 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
787 				const struct rte_flow_item *item,
788 				uint64_t item_flags __rte_unused)
789 {
790 	const struct rte_flow_item_vxlan *spec = item->spec;
791 	const struct rte_flow_item_vxlan *mask = item->mask;
792 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
793 	struct ibv_flow_spec_tunnel vxlan = {
794 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
795 		.size = size,
796 	};
797 	union vni {
798 		uint32_t vlan_id;
799 		uint8_t vni[4];
800 	} id = { .vlan_id = 0, };
801 
802 	if (!mask)
803 		mask = &rte_flow_item_vxlan_mask;
804 	if (spec) {
805 		memcpy(&id.vni[1], spec->hdr.vni, 3);
806 		vxlan.val.tunnel_id = id.vlan_id;
807 		memcpy(&id.vni[1], mask->hdr.vni, 3);
808 		vxlan.mask.tunnel_id = id.vlan_id;
809 		/* Remove unwanted bits from values. */
810 		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
811 	}
812 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
813 }
814 
815 /**
816  * Convert the @p item into a Verbs specification. This function assumes that
817  * the input is valid and that there is space to insert the requested item
818  * into the flow.
819  *
820  * @param[in, out] dev_flow
821  *   Pointer to dev_flow structure.
822  * @param[in] item
823  *   Item specification.
824  * @param[in] item_flags
825  *   Parsed item flags.
826  */
827 static void
828 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
829 				    const struct rte_flow_item *item,
830 				    uint64_t item_flags __rte_unused)
831 {
832 	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
833 	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
834 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
835 	struct ibv_flow_spec_tunnel vxlan_gpe = {
836 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
837 		.size = size,
838 	};
839 	union vni {
840 		uint32_t vlan_id;
841 		uint8_t vni[4];
842 	} id = { .vlan_id = 0, };
843 
844 	if (!mask)
845 		mask = &rte_flow_item_vxlan_gpe_mask;
846 	if (spec) {
847 		memcpy(&id.vni[1], spec->hdr.vni, 3);
848 		vxlan_gpe.val.tunnel_id = id.vlan_id;
849 		memcpy(&id.vni[1], mask->hdr.vni, 3);
850 		vxlan_gpe.mask.tunnel_id = id.vlan_id;
851 		/* Remove unwanted bits from values. */
852 		vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
853 	}
854 	flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
855 }
856 
857 /**
858  * Update the protocol in Verbs IPv4/IPv6 spec.
859  *
860  * @param[in, out] attr
861  *   Pointer to Verbs attributes structure.
862  * @param[in] search
863  *   Specification type to search in order to update the IP protocol.
864  * @param[in] protocol
865  *   Protocol value to set if none is present in the specification.
866  */
867 static void
868 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
869 				       enum ibv_flow_spec_type search,
870 				       uint8_t protocol)
871 {
872 	unsigned int i;
873 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
874 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
875 
876 	if (!attr)
877 		return;
878 	for (i = 0; i != attr->num_of_specs; ++i) {
879 		if (hdr->type == search) {
880 			union {
881 				struct ibv_flow_spec_ipv4_ext *ipv4;
882 				struct ibv_flow_spec_ipv6 *ipv6;
883 			} ip;
884 
885 			switch (search) {
886 			case IBV_FLOW_SPEC_IPV4_EXT:
887 				ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
888 				if (!ip.ipv4->val.proto) {
889 					ip.ipv4->val.proto = protocol;
890 					ip.ipv4->mask.proto = 0xff;
891 				}
892 				break;
893 			case IBV_FLOW_SPEC_IPV6:
894 				ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
895 				if (!ip.ipv6->val.next_hdr) {
896 					ip.ipv6->val.next_hdr = protocol;
897 					ip.ipv6->mask.next_hdr = 0xff;
898 				}
899 				break;
900 			default:
901 				break;
902 			}
903 			break;
904 		}
905 		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
906 	}
907 }
908 
909 /**
910  * Reserve space for GRE spec in spec buffer.
911  *
912  * @param[in,out] dev_flow
913  *   Pointer to dev_flow structure.
914  *
915  * @return
916  *   Pointer to reserved space in spec buffer.
917  */
918 static uint8_t *
919 flow_verbs_reserve_gre(struct mlx5_flow *dev_flow)
920 {
921 	uint8_t *buffer;
922 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
923 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
924 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
925 	struct ibv_flow_spec_tunnel tunnel = {
926 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
927 		.size = size,
928 	};
929 #else
930 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
931 	struct ibv_flow_spec_gre tunnel = {
932 		.type = IBV_FLOW_SPEC_GRE,
933 		.size = size,
934 	};
935 #endif
936 
937 	buffer = verbs->specs + verbs->size;
938 	flow_verbs_spec_add(verbs, &tunnel, size);
939 	return buffer;
940 }
941 
942 /**
943  * Convert the @p item into a Verbs specification. This function assumes that
944  * the input is valid and that Verbs specification will be placed in
945  * the pre-reserved space.
946  *
947  * @param[in, out] dev_flow
948  *   Pointer to dev_flow structure.
949  * @param[in, out] gre_spec
950  *   Pointer to space reserved for GRE spec.
951  * @param[in] item
952  *   Item specification.
953  * @param[in] item_flags
954  *   Parsed item flags.
955  */
956 static void
957 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
958 			      uint8_t *gre_spec,
959 			      const struct rte_flow_item *item __rte_unused,
960 			      uint64_t item_flags)
961 {
962 	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
963 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
964 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
965 	struct ibv_flow_spec_tunnel tunnel = {
966 		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
967 		.size = size,
968 	};
969 #else
970 	static const struct rte_flow_item_gre empty_gre = {0,};
971 	const struct rte_flow_item_gre *spec = item->spec;
972 	const struct rte_flow_item_gre *mask = item->mask;
973 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
974 	struct ibv_flow_spec_gre tunnel = {
975 		.type = IBV_FLOW_SPEC_GRE,
976 		.size = size,
977 	};
978 
979 	if (!spec) {
980 		spec = &empty_gre;
981 		mask = &empty_gre;
982 	} else {
983 		if (!mask)
984 			mask = &rte_flow_item_gre_mask;
985 	}
986 	tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
987 	tunnel.val.protocol = spec->protocol;
988 	tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
989 	tunnel.mask.protocol = mask->protocol;
990 	/* Remove unwanted bits from values. */
991 	tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
992 	tunnel.val.key &= tunnel.mask.key;
993 	if (tunnel.mask.protocol) {
994 		tunnel.val.protocol &= tunnel.mask.protocol;
995 	} else {
996 		tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
997 		if (tunnel.val.protocol) {
998 			tunnel.mask.protocol = 0xFFFF;
999 			tunnel.val.protocol =
1000 				rte_cpu_to_be_16(tunnel.val.protocol);
1001 		}
1002 	}
1003 #endif
1004 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
1005 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
1006 						       IBV_FLOW_SPEC_IPV4_EXT,
1007 						       IPPROTO_GRE);
1008 	else
1009 		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
1010 						       IBV_FLOW_SPEC_IPV6,
1011 						       IPPROTO_GRE);
1012 	MLX5_ASSERT(gre_spec);
1013 	memcpy(gre_spec, &tunnel, size);
1014 }
1015 
1016 /**
1017  * Convert the @p action into a Verbs specification. This function assumes that
1018  * the input is valid and that there is space to insert the requested action
1019  * into the flow. This function also return the action that was added.
1020  *
1021  * @param[in, out] dev_flow
1022  *   Pointer to dev_flow structure.
1023  * @param[in] item
1024  *   Item specification.
1025  * @param[in] item_flags
1026  *   Parsed item flags.
1027  */
1028 static void
1029 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1030 			       const struct rte_flow_item *item __rte_unused,
1031 			       uint64_t item_flags __rte_unused)
1032 {
1033 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1034 	const struct rte_flow_item_mpls *spec = item->spec;
1035 	const struct rte_flow_item_mpls *mask = item->mask;
1036 	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1037 	struct ibv_flow_spec_mpls mpls = {
1038 		.type = IBV_FLOW_SPEC_MPLS,
1039 		.size = size,
1040 	};
1041 
1042 	if (!mask)
1043 		mask = &rte_flow_item_mpls_mask;
1044 	if (spec) {
1045 		memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1046 		memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1047 		/* Remove unwanted bits from values.  */
1048 		mpls.val.label &= mpls.mask.label;
1049 	}
1050 	flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1051 #endif
1052 }
1053 
1054 /**
1055  * Convert the @p action into a Verbs specification. This function assumes that
1056  * the input is valid and that there is space to insert the requested action
1057  * into the flow.
1058  *
1059  * @param[in] dev_flow
1060  *   Pointer to mlx5_flow.
1061  * @param[in] action
1062  *   Action configuration.
1063  */
1064 static void
1065 flow_verbs_translate_action_drop
1066 	(struct mlx5_flow *dev_flow,
1067 	 const struct rte_flow_action *action __rte_unused)
1068 {
1069 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1070 	struct ibv_flow_spec_action_drop drop = {
1071 			.type = IBV_FLOW_SPEC_ACTION_DROP,
1072 			.size = size,
1073 	};
1074 
1075 	flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1076 }
1077 
1078 /**
1079  * Convert the @p action into a Verbs specification. This function assumes that
1080  * the input is valid and that there is space to insert the requested action
1081  * into the flow.
1082  *
1083  * @param[in] rss_desc
1084  *   Pointer to mlx5_flow_rss_desc.
1085  * @param[in] action
1086  *   Action configuration.
1087  */
1088 static void
1089 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1090 				  const struct rte_flow_action *action)
1091 {
1092 	const struct rte_flow_action_queue *queue = action->conf;
1093 
1094 	rss_desc->queue[0] = queue->index;
1095 	rss_desc->queue_num = 1;
1096 }
1097 
1098 /**
1099  * Convert the @p action into a Verbs specification. This function assumes that
1100  * the input is valid and that there is space to insert the requested action
1101  * into the flow.
1102  *
1103  * @param[in] rss_desc
1104  *   Pointer to mlx5_flow_rss_desc.
1105  * @param[in] action
1106  *   Action configuration.
1107  */
1108 static void
1109 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1110 				const struct rte_flow_action *action)
1111 {
1112 	const struct rte_flow_action_rss *rss = action->conf;
1113 	const uint8_t *rss_key;
1114 
1115 	memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1116 	rss_desc->queue_num = rss->queue_num;
1117 	/* NULL RSS key indicates default RSS key. */
1118 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
1119 	memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1120 	/*
1121 	 * rss->level and rss.types should be set in advance when expanding
1122 	 * items for RSS.
1123 	 */
1124 }
1125 
1126 /**
1127  * Convert the @p action into a Verbs specification. This function assumes that
1128  * the input is valid and that there is space to insert the requested action
1129  * into the flow.
1130  *
1131  * @param[in] dev_flow
1132  *   Pointer to mlx5_flow.
1133  * @param[in] action
1134  *   Action configuration.
1135  */
1136 static void
1137 flow_verbs_translate_action_flag
1138 	(struct mlx5_flow *dev_flow,
1139 	 const struct rte_flow_action *action __rte_unused)
1140 {
1141 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1142 	struct ibv_flow_spec_action_tag tag = {
1143 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1144 		.size = size,
1145 		.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1146 	};
1147 
1148 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1149 }
1150 
1151 /**
1152  * Convert the @p action into a Verbs specification. This function assumes that
1153  * the input is valid and that there is space to insert the requested action
1154  * into the flow.
1155  *
1156  * @param[in] dev_flow
1157  *   Pointer to mlx5_flow.
1158  * @param[in] action
1159  *   Action configuration.
1160  */
1161 static void
1162 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1163 				 const struct rte_flow_action *action)
1164 {
1165 	const struct rte_flow_action_mark *mark = action->conf;
1166 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1167 	struct ibv_flow_spec_action_tag tag = {
1168 		.type = IBV_FLOW_SPEC_ACTION_TAG,
1169 		.size = size,
1170 		.tag_id = mlx5_flow_mark_set(mark->id),
1171 	};
1172 
1173 	flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1174 }
1175 
1176 /**
1177  * Convert the @p action into a Verbs specification. This function assumes that
1178  * the input is valid and that there is space to insert the requested action
1179  * into the flow.
1180  *
1181  * @param[in] dev
1182  *   Pointer to the Ethernet device structure.
1183  * @param[in] action
1184  *   Action configuration.
1185  * @param[in] dev_flow
1186  *   Pointer to mlx5_flow.
1187  * @param[out] error
1188  *   Pointer to error structure.
1189  *
1190  * @return
1191  *   0 On success else a negative errno value is returned and rte_errno is set.
1192  */
1193 static int
1194 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1195 				  const struct rte_flow_action *action,
1196 				  struct rte_eth_dev *dev,
1197 				  struct rte_flow_error *error)
1198 {
1199 	const struct rte_flow_action_count *count = action->conf;
1200 	struct rte_flow *flow = dev_flow->flow;
1201 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1202 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1203 	struct mlx5_flow_counter_pool *pool;
1204 	struct mlx5_flow_counter *cnt = NULL;
1205 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1206 	struct ibv_flow_spec_counter_action counter = {
1207 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
1208 		.size = size,
1209 	};
1210 #endif
1211 
1212 	if (!flow->counter) {
1213 		flow->counter = flow_verbs_counter_new(dev, count->id);
1214 		if (!flow->counter)
1215 			return rte_flow_error_set(error, rte_errno,
1216 						  RTE_FLOW_ERROR_TYPE_ACTION,
1217 						  action,
1218 						  "cannot get counter"
1219 						  " context.");
1220 	}
1221 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1222 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1223 	counter.counter_set_handle =
1224 		((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1225 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1226 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1227 	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1228 	counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1229 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1230 #endif
1231 	return 0;
1232 }
1233 
1234 /**
1235  * Validates @p attributes of the flow rule.
1236  *
1237  * This function is used if and only if legacy Verbs flow engine is used.
1238  *
1239  * @param[in] dev
1240  *   Pointer to the Ethernet device structure.
1241  * @param[in] attributes
1242  *   Pointer to flow attributes
1243  * @param[out] error
1244  *   Pointer to error structure.
1245  *
1246  * @return
1247  *   0 on success, a negative errno value otherwise and rte_errno is set.
1248  */
1249 static int
1250 flow_verbs_validate_attributes(struct rte_eth_dev *dev,
1251 			       const struct rte_flow_attr *attributes,
1252 			       struct rte_flow_error *error)
1253 {
1254 	struct mlx5_priv *priv = dev->data->dev_private;
1255 	uint32_t priority_max = priv->sh->flow_max_priority - 1;
1256 
1257 	if (attributes->group)
1258 		return rte_flow_error_set(error, ENOTSUP,
1259 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1260 					  NULL, "groups is not supported");
1261 	if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
1262 	    attributes->priority >= priority_max)
1263 		return rte_flow_error_set(error, ENOTSUP,
1264 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1265 					  NULL, "priority out of range");
1266 	if (attributes->egress)
1267 		return rte_flow_error_set(error, ENOTSUP,
1268 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1269 					  "egress is not supported");
1270 	if (attributes->transfer)
1271 		return rte_flow_error_set(error, ENOTSUP,
1272 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1273 					  NULL, "transfer is not supported");
1274 	if (!attributes->ingress)
1275 		return rte_flow_error_set(error, EINVAL,
1276 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1277 					  NULL,
1278 					  "ingress attribute is mandatory");
1279 	return 0;
1280 }
1281 
1282 /**
1283  * Internal validation function. For validating both actions and items.
1284  *
1285  * @param[in] dev
1286  *   Pointer to the Ethernet device structure.
1287  * @param[in] attr
1288  *   Pointer to the flow attributes.
1289  * @param[in] items
1290  *   Pointer to the list of items.
1291  * @param[in] actions
1292  *   Pointer to the list of actions.
1293  * @param[in] external
1294  *   This flow rule is created by request external to PMD.
1295  * @param[in] hairpin
1296  *   Number of hairpin TX actions, 0 means classic flow.
1297  * @param[out] error
1298  *   Pointer to the error structure.
1299  *
1300  * @return
1301  *   0 on success, a negative errno value otherwise and rte_errno is set.
1302  */
1303 static int
1304 flow_verbs_validate(struct rte_eth_dev *dev,
1305 		    const struct rte_flow_attr *attr,
1306 		    const struct rte_flow_item items[],
1307 		    const struct rte_flow_action actions[],
1308 		    bool external __rte_unused,
1309 		    int hairpin __rte_unused,
1310 		    struct rte_flow_error *error)
1311 {
1312 	int ret;
1313 	uint64_t action_flags = 0;
1314 	uint64_t item_flags = 0;
1315 	uint64_t last_item = 0;
1316 	uint8_t next_protocol = 0xff;
1317 	uint16_t ether_type = 0;
1318 	bool is_empty_vlan = false;
1319 	uint16_t udp_dport = 0;
1320 	/* Verbs interface does not support groups higher than 0. */
1321 	bool is_root = true;
1322 
1323 	if (items == NULL)
1324 		return -1;
1325 	ret = flow_verbs_validate_attributes(dev, attr, error);
1326 	if (ret < 0)
1327 		return ret;
1328 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1329 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1330 		int ret = 0;
1331 
1332 		switch (items->type) {
1333 #ifdef HAVE_IBV_FLOW_SPEC_ESP
1334 		case RTE_FLOW_ITEM_TYPE_ESP:
1335 			ret = mlx5_flow_os_validate_item_esp(dev, items,
1336 							     item_flags,
1337 							     next_protocol,
1338 							     error);
1339 			if (ret < 0)
1340 				return ret;
1341 			last_item = MLX5_FLOW_ITEM_ESP;
1342 			break;
1343 #endif
1344 		case RTE_FLOW_ITEM_TYPE_VOID:
1345 			break;
1346 		case RTE_FLOW_ITEM_TYPE_ETH:
1347 			ret = mlx5_flow_validate_item_eth(dev, items, item_flags,
1348 							  false, error);
1349 			if (ret < 0)
1350 				return ret;
1351 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1352 					     MLX5_FLOW_LAYER_OUTER_L2;
1353 			if (items->mask != NULL && items->spec != NULL) {
1354 				ether_type =
1355 					((const struct rte_flow_item_eth *)
1356 					 items->spec)->hdr.ether_type;
1357 				ether_type &=
1358 					((const struct rte_flow_item_eth *)
1359 					 items->mask)->hdr.ether_type;
1360 				if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
1361 					is_empty_vlan = true;
1362 				ether_type = rte_be_to_cpu_16(ether_type);
1363 			} else {
1364 				ether_type = 0;
1365 			}
1366 			break;
1367 		case RTE_FLOW_ITEM_TYPE_VLAN:
1368 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
1369 							   dev, error);
1370 			if (ret < 0)
1371 				return ret;
1372 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1373 					      MLX5_FLOW_LAYER_INNER_VLAN) :
1374 					     (MLX5_FLOW_LAYER_OUTER_L2 |
1375 					      MLX5_FLOW_LAYER_OUTER_VLAN);
1376 			if (items->mask != NULL && items->spec != NULL) {
1377 				ether_type =
1378 					((const struct rte_flow_item_vlan *)
1379 					 items->spec)->hdr.eth_proto;
1380 				ether_type &=
1381 					((const struct rte_flow_item_vlan *)
1382 					 items->mask)->hdr.eth_proto;
1383 				ether_type = rte_be_to_cpu_16(ether_type);
1384 			} else {
1385 				ether_type = 0;
1386 			}
1387 			is_empty_vlan = false;
1388 			break;
1389 		case RTE_FLOW_ITEM_TYPE_IPV4:
1390 			ret = mlx5_flow_validate_item_ipv4
1391 						(dev, items, item_flags,
1392 						 last_item, ether_type, NULL,
1393 						 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1394 						 error);
1395 			if (ret < 0)
1396 				return ret;
1397 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1398 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1399 			if (items->mask != NULL &&
1400 			    ((const struct rte_flow_item_ipv4 *)
1401 			     items->mask)->hdr.next_proto_id) {
1402 				next_protocol =
1403 					((const struct rte_flow_item_ipv4 *)
1404 					 (items->spec))->hdr.next_proto_id;
1405 				next_protocol &=
1406 					((const struct rte_flow_item_ipv4 *)
1407 					 (items->mask))->hdr.next_proto_id;
1408 			} else {
1409 				/* Reset for inner layer. */
1410 				next_protocol = 0xff;
1411 			}
1412 			break;
1413 		case RTE_FLOW_ITEM_TYPE_IPV6:
1414 			ret = mlx5_flow_validate_item_ipv6(dev, items,
1415 							   item_flags,
1416 							   last_item,
1417 							   ether_type, NULL,
1418 							   error);
1419 			if (ret < 0)
1420 				return ret;
1421 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1422 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1423 			if (items->mask != NULL &&
1424 			    ((const struct rte_flow_item_ipv6 *)
1425 			     items->mask)->hdr.proto) {
1426 				next_protocol =
1427 					((const struct rte_flow_item_ipv6 *)
1428 					 items->spec)->hdr.proto;
1429 				next_protocol &=
1430 					((const struct rte_flow_item_ipv6 *)
1431 					 items->mask)->hdr.proto;
1432 			} else {
1433 				/* Reset for inner layer. */
1434 				next_protocol = 0xff;
1435 			}
1436 			break;
1437 		case RTE_FLOW_ITEM_TYPE_UDP:
1438 			ret = mlx5_flow_validate_item_udp(dev, items,
1439 							  item_flags,
1440 							  next_protocol,
1441 							  error);
1442 			const struct rte_flow_item_udp *spec = items->spec;
1443 			const struct rte_flow_item_udp *mask = items->mask;
1444 			if (!mask)
1445 				mask = &rte_flow_item_udp_mask;
1446 			if (spec != NULL)
1447 				udp_dport = rte_be_to_cpu_16
1448 						(spec->hdr.dst_port &
1449 						 mask->hdr.dst_port);
1450 
1451 			if (ret < 0)
1452 				return ret;
1453 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1454 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
1455 			break;
1456 		case RTE_FLOW_ITEM_TYPE_TCP:
1457 			ret = mlx5_flow_validate_item_tcp
1458 						(dev, items, item_flags,
1459 						 next_protocol,
1460 						 &rte_flow_item_tcp_mask,
1461 						 error);
1462 			if (ret < 0)
1463 				return ret;
1464 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1465 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
1466 			break;
1467 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1468 			ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
1469 							    items, item_flags,
1470 							    is_root, error);
1471 			if (ret < 0)
1472 				return ret;
1473 			last_item = MLX5_FLOW_LAYER_VXLAN;
1474 			break;
1475 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1476 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
1477 								item_flags,
1478 								dev, error);
1479 			if (ret < 0)
1480 				return ret;
1481 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1482 			break;
1483 		case RTE_FLOW_ITEM_TYPE_GRE:
1484 			ret = mlx5_flow_validate_item_gre(dev, items, item_flags,
1485 							  next_protocol, error);
1486 			if (ret < 0)
1487 				return ret;
1488 			last_item = MLX5_FLOW_LAYER_GRE;
1489 			break;
1490 		case RTE_FLOW_ITEM_TYPE_MPLS:
1491 			ret = mlx5_flow_validate_item_mpls(dev, items,
1492 							   item_flags,
1493 							   last_item, error);
1494 			if (ret < 0)
1495 				return ret;
1496 			last_item = MLX5_FLOW_LAYER_MPLS;
1497 			break;
1498 		case RTE_FLOW_ITEM_TYPE_ICMP:
1499 		case RTE_FLOW_ITEM_TYPE_ICMP6:
1500 			return rte_flow_error_set(error, ENOTSUP,
1501 						  RTE_FLOW_ERROR_TYPE_ITEM,
1502 						  NULL, "ICMP/ICMP6 "
1503 						  "item not supported");
1504 		default:
1505 			return rte_flow_error_set(error, ENOTSUP,
1506 						  RTE_FLOW_ERROR_TYPE_ITEM,
1507 						  NULL, "item not supported");
1508 		}
1509 		item_flags |= last_item;
1510 	}
1511 	if (is_empty_vlan)
1512 		return rte_flow_error_set(error, ENOTSUP,
1513 						 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1514 		    "VLAN matching without vid specification is not supported");
1515 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1516 		switch (actions->type) {
1517 		case RTE_FLOW_ACTION_TYPE_VOID:
1518 			break;
1519 		case RTE_FLOW_ACTION_TYPE_FLAG:
1520 			ret = mlx5_flow_validate_action_flag(action_flags,
1521 							     attr,
1522 							     error);
1523 			if (ret < 0)
1524 				return ret;
1525 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1526 			break;
1527 		case RTE_FLOW_ACTION_TYPE_MARK:
1528 			ret = mlx5_flow_validate_action_mark(dev, actions,
1529 							     action_flags,
1530 							     attr,
1531 							     error);
1532 			if (ret < 0)
1533 				return ret;
1534 			action_flags |= MLX5_FLOW_ACTION_MARK;
1535 			break;
1536 		case RTE_FLOW_ACTION_TYPE_DROP:
1537 			ret = mlx5_flow_validate_action_drop(dev,
1538 							     is_root,
1539 							     attr,
1540 							     error);
1541 			if (ret < 0)
1542 				return ret;
1543 			action_flags |= MLX5_FLOW_ACTION_DROP;
1544 			break;
1545 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1546 			ret = mlx5_flow_validate_action_queue(actions,
1547 							      action_flags, dev,
1548 							      attr,
1549 							      error);
1550 			if (ret < 0)
1551 				return ret;
1552 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1553 			break;
1554 		case RTE_FLOW_ACTION_TYPE_RSS:
1555 			ret = mlx5_flow_validate_action_rss(actions,
1556 							    action_flags, dev,
1557 							    attr, item_flags,
1558 							    error);
1559 			if (ret < 0)
1560 				return ret;
1561 			action_flags |= MLX5_FLOW_ACTION_RSS;
1562 			break;
1563 		case RTE_FLOW_ACTION_TYPE_COUNT:
1564 			ret = mlx5_flow_validate_action_count(dev, attr, error);
1565 			if (ret < 0)
1566 				return ret;
1567 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1568 			break;
1569 		default:
1570 			return rte_flow_error_set(error, ENOTSUP,
1571 						  RTE_FLOW_ERROR_TYPE_ACTION,
1572 						  actions,
1573 						  "action not supported");
1574 		}
1575 	}
1576 	/*
1577 	 * Validate the drop action mutual exclusion with other actions.
1578 	 * Drop action is mutually-exclusive with any other action, except for
1579 	 * Count action.
1580 	 */
1581 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1582 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1583 		return rte_flow_error_set(error, EINVAL,
1584 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1585 					  "Drop action is mutually-exclusive "
1586 					  "with any other action, except for "
1587 					  "Count action");
1588 	if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1589 		return rte_flow_error_set(error, EINVAL,
1590 					  RTE_FLOW_ERROR_TYPE_ACTION, actions,
1591 					  "no fate action is found");
1592 	return 0;
1593 }
1594 
1595 /**
1596  * Calculate the required bytes that are needed for the action part of the verbs
1597  * flow.
1598  *
1599  * @param[in] actions
1600  *   Pointer to the list of actions.
1601  *
1602  * @return
1603  *   The size of the memory needed for all actions.
1604  */
1605 static int
1606 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1607 {
1608 	int size = 0;
1609 
1610 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1611 		switch (actions->type) {
1612 		case RTE_FLOW_ACTION_TYPE_VOID:
1613 			break;
1614 		case RTE_FLOW_ACTION_TYPE_FLAG:
1615 			size += sizeof(struct ibv_flow_spec_action_tag);
1616 			break;
1617 		case RTE_FLOW_ACTION_TYPE_MARK:
1618 			size += sizeof(struct ibv_flow_spec_action_tag);
1619 			break;
1620 		case RTE_FLOW_ACTION_TYPE_DROP:
1621 			size += sizeof(struct ibv_flow_spec_action_drop);
1622 			break;
1623 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1624 			break;
1625 		case RTE_FLOW_ACTION_TYPE_RSS:
1626 			break;
1627 		case RTE_FLOW_ACTION_TYPE_COUNT:
1628 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1629 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1630 			size += sizeof(struct ibv_flow_spec_counter_action);
1631 #endif
1632 			break;
1633 		default:
1634 			break;
1635 		}
1636 	}
1637 	return size;
1638 }
1639 
1640 /**
1641  * Calculate the required bytes that are needed for the item part of the verbs
1642  * flow.
1643  *
1644  * @param[in] items
1645  *   Pointer to the list of items.
1646  *
1647  * @return
1648  *   The size of the memory needed for all items.
1649  */
1650 static int
1651 flow_verbs_get_items_size(const struct rte_flow_item items[])
1652 {
1653 	int size = 0;
1654 
1655 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1656 		switch (items->type) {
1657 		case RTE_FLOW_ITEM_TYPE_VOID:
1658 			break;
1659 		case RTE_FLOW_ITEM_TYPE_ETH:
1660 			size += sizeof(struct ibv_flow_spec_eth);
1661 			break;
1662 		case RTE_FLOW_ITEM_TYPE_VLAN:
1663 			size += sizeof(struct ibv_flow_spec_eth);
1664 			break;
1665 		case RTE_FLOW_ITEM_TYPE_IPV4:
1666 			size += sizeof(struct ibv_flow_spec_ipv4_ext);
1667 			break;
1668 		case RTE_FLOW_ITEM_TYPE_IPV6:
1669 			size += sizeof(struct ibv_flow_spec_ipv6);
1670 			break;
1671 		case RTE_FLOW_ITEM_TYPE_UDP:
1672 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1673 			break;
1674 		case RTE_FLOW_ITEM_TYPE_TCP:
1675 			size += sizeof(struct ibv_flow_spec_tcp_udp);
1676 			break;
1677 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1678 			size += sizeof(struct ibv_flow_spec_tunnel);
1679 			break;
1680 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1681 			size += sizeof(struct ibv_flow_spec_tunnel);
1682 			break;
1683 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1684 		case RTE_FLOW_ITEM_TYPE_GRE:
1685 			size += sizeof(struct ibv_flow_spec_gre);
1686 			break;
1687 		case RTE_FLOW_ITEM_TYPE_MPLS:
1688 			size += sizeof(struct ibv_flow_spec_mpls);
1689 			break;
1690 #else
1691 		case RTE_FLOW_ITEM_TYPE_GRE:
1692 			size += sizeof(struct ibv_flow_spec_tunnel);
1693 			break;
1694 #endif
1695 		default:
1696 			break;
1697 		}
1698 	}
1699 	return size;
1700 }
1701 
1702 /**
1703  * Internal preparation function. Allocate mlx5_flow with the required size.
1704  * The required size is calculate based on the actions and items. This function
1705  * also returns the detected actions and items for later use.
1706  *
1707  * @param[in] dev
1708  *   Pointer to Ethernet device.
1709  * @param[in] attr
1710  *   Pointer to the flow attributes.
1711  * @param[in] items
1712  *   Pointer to the list of items.
1713  * @param[in] actions
1714  *   Pointer to the list of actions.
1715  * @param[out] error
1716  *   Pointer to the error structure.
1717  *
1718  * @return
1719  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1720  *   is set.
1721  */
1722 static struct mlx5_flow *
1723 flow_verbs_prepare(struct rte_eth_dev *dev,
1724 		   const struct rte_flow_attr *attr __rte_unused,
1725 		   const struct rte_flow_item items[],
1726 		   const struct rte_flow_action actions[],
1727 		   struct rte_flow_error *error)
1728 {
1729 	size_t size = 0;
1730 	uint32_t handle_idx = 0;
1731 	struct mlx5_flow *dev_flow;
1732 	struct mlx5_flow_handle *dev_handle;
1733 	struct mlx5_priv *priv = dev->data->dev_private;
1734 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1735 
1736 	MLX5_ASSERT(wks);
1737 	size += flow_verbs_get_actions_size(actions);
1738 	size += flow_verbs_get_items_size(items);
1739 	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1740 		rte_flow_error_set(error, E2BIG,
1741 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1742 				   "Verbs spec/action size too large");
1743 		return NULL;
1744 	}
1745 	/* In case of corrupting the memory. */
1746 	if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1747 		rte_flow_error_set(error, ENOSPC,
1748 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1749 				   "not free temporary device flow");
1750 		return NULL;
1751 	}
1752 	dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1753 				   &handle_idx);
1754 	if (!dev_handle) {
1755 		rte_flow_error_set(error, ENOMEM,
1756 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1757 				   "not enough memory to create flow handle");
1758 		return NULL;
1759 	}
1760 	MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1761 	dev_flow = &wks->flows[wks->flow_idx++];
1762 	dev_flow->handle = dev_handle;
1763 	dev_flow->handle_idx = handle_idx;
1764 	/* Memcpy is used, only size needs to be cleared to 0. */
1765 	dev_flow->verbs.size = 0;
1766 	dev_flow->verbs.attr.num_of_specs = 0;
1767 	dev_flow->ingress = attr->ingress;
1768 	dev_flow->hash_fields = 0;
1769 	/* Need to set transfer attribute: not supported in Verbs mode. */
1770 	return dev_flow;
1771 }
1772 
1773 /**
1774  * Fill the flow with verb spec.
1775  *
1776  * @param[in] dev
1777  *   Pointer to Ethernet device.
1778  * @param[in, out] dev_flow
1779  *   Pointer to the mlx5 flow.
1780  * @param[in] attr
1781  *   Pointer to the flow attributes.
1782  * @param[in] items
1783  *   Pointer to the list of items.
1784  * @param[in] actions
1785  *   Pointer to the list of actions.
1786  * @param[out] error
1787  *   Pointer to the error structure.
1788  *
1789  * @return
1790  *   0 on success, else a negative errno value otherwise and rte_errno is set.
1791  */
1792 static int
1793 flow_verbs_translate(struct rte_eth_dev *dev,
1794 		     struct mlx5_flow *dev_flow,
1795 		     const struct rte_flow_attr *attr,
1796 		     const struct rte_flow_item items[],
1797 		     const struct rte_flow_action actions[],
1798 		     struct rte_flow_error *error)
1799 {
1800 	uint64_t item_flags = 0;
1801 	uint64_t action_flags = 0;
1802 	uint64_t priority = attr->priority;
1803 	uint32_t subpriority = 0;
1804 	struct mlx5_priv *priv = dev->data->dev_private;
1805 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1806 	struct mlx5_flow_rss_desc *rss_desc;
1807 	const struct rte_flow_item *tunnel_item = NULL;
1808 	uint8_t *gre_spec = NULL;
1809 
1810 	MLX5_ASSERT(wks);
1811 	rss_desc = &wks->rss_desc;
1812 	if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1813 		priority = priv->sh->flow_max_priority - 1;
1814 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1815 		int ret;
1816 
1817 		switch (actions->type) {
1818 		case RTE_FLOW_ACTION_TYPE_VOID:
1819 			break;
1820 		case RTE_FLOW_ACTION_TYPE_FLAG:
1821 			flow_verbs_translate_action_flag(dev_flow, actions);
1822 			action_flags |= MLX5_FLOW_ACTION_FLAG;
1823 			wks->mark = 1;
1824 			break;
1825 		case RTE_FLOW_ACTION_TYPE_MARK:
1826 			flow_verbs_translate_action_mark(dev_flow, actions);
1827 			action_flags |= MLX5_FLOW_ACTION_MARK;
1828 			wks->mark = 1;
1829 			break;
1830 		case RTE_FLOW_ACTION_TYPE_DROP:
1831 			flow_verbs_translate_action_drop(dev_flow, actions);
1832 			action_flags |= MLX5_FLOW_ACTION_DROP;
1833 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1834 			break;
1835 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1836 			flow_verbs_translate_action_queue(rss_desc, actions);
1837 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
1838 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1839 			break;
1840 		case RTE_FLOW_ACTION_TYPE_RSS:
1841 			flow_verbs_translate_action_rss(rss_desc, actions);
1842 			action_flags |= MLX5_FLOW_ACTION_RSS;
1843 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1844 			break;
1845 		case RTE_FLOW_ACTION_TYPE_COUNT:
1846 			ret = flow_verbs_translate_action_count(dev_flow,
1847 								actions,
1848 								dev, error);
1849 			if (ret < 0)
1850 				return ret;
1851 			action_flags |= MLX5_FLOW_ACTION_COUNT;
1852 			break;
1853 		default:
1854 			return rte_flow_error_set(error, ENOTSUP,
1855 						  RTE_FLOW_ERROR_TYPE_ACTION,
1856 						  actions,
1857 						  "action not supported");
1858 		}
1859 	}
1860 	dev_flow->act_flags = action_flags;
1861 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1862 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1863 
1864 		switch (items->type) {
1865 		case RTE_FLOW_ITEM_TYPE_VOID:
1866 			break;
1867 		case RTE_FLOW_ITEM_TYPE_ETH:
1868 			flow_verbs_translate_item_eth(dev_flow, items,
1869 						      item_flags);
1870 			subpriority = MLX5_PRIORITY_MAP_L2;
1871 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1872 					       MLX5_FLOW_LAYER_OUTER_L2;
1873 			break;
1874 		case RTE_FLOW_ITEM_TYPE_VLAN:
1875 			flow_verbs_translate_item_vlan(dev_flow, items,
1876 						       item_flags);
1877 			subpriority = MLX5_PRIORITY_MAP_L2;
1878 			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1879 						MLX5_FLOW_LAYER_INNER_VLAN) :
1880 					       (MLX5_FLOW_LAYER_OUTER_L2 |
1881 						MLX5_FLOW_LAYER_OUTER_VLAN);
1882 			break;
1883 		case RTE_FLOW_ITEM_TYPE_IPV4:
1884 			flow_verbs_translate_item_ipv4(dev_flow, items,
1885 						       item_flags);
1886 			subpriority = MLX5_PRIORITY_MAP_L3;
1887 			dev_flow->hash_fields |=
1888 				mlx5_flow_hashfields_adjust
1889 					(rss_desc, tunnel,
1890 					 MLX5_IPV4_LAYER_TYPES,
1891 					 MLX5_IPV4_IBV_RX_HASH);
1892 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1893 					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1894 			break;
1895 		case RTE_FLOW_ITEM_TYPE_IPV6:
1896 			flow_verbs_translate_item_ipv6(dev_flow, items,
1897 						       item_flags);
1898 			subpriority = MLX5_PRIORITY_MAP_L3;
1899 			dev_flow->hash_fields |=
1900 				mlx5_flow_hashfields_adjust
1901 					(rss_desc, tunnel,
1902 					 MLX5_IPV6_LAYER_TYPES,
1903 					 MLX5_IPV6_IBV_RX_HASH);
1904 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1905 					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1906 			break;
1907 		case RTE_FLOW_ITEM_TYPE_TCP:
1908 			flow_verbs_translate_item_tcp(dev_flow, items,
1909 						      item_flags);
1910 			subpriority = MLX5_PRIORITY_MAP_L4;
1911 			if (dev_flow->hash_fields != 0)
1912 				dev_flow->hash_fields |=
1913 					mlx5_flow_hashfields_adjust
1914 					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
1915 					 (IBV_RX_HASH_SRC_PORT_TCP |
1916 					  IBV_RX_HASH_DST_PORT_TCP));
1917 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1918 					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
1919 			break;
1920 		case RTE_FLOW_ITEM_TYPE_UDP:
1921 			flow_verbs_translate_item_udp(dev_flow, items,
1922 						      item_flags);
1923 			subpriority = MLX5_PRIORITY_MAP_L4;
1924 			if (dev_flow->hash_fields != 0)
1925 				dev_flow->hash_fields |=
1926 					mlx5_flow_hashfields_adjust
1927 					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
1928 					 (IBV_RX_HASH_SRC_PORT_UDP |
1929 					  IBV_RX_HASH_DST_PORT_UDP));
1930 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1931 					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
1932 			break;
1933 #ifdef HAVE_IBV_FLOW_SPEC_ESP
1934 		case RTE_FLOW_ITEM_TYPE_ESP:
1935 			flow_verbs_translate_item_esp(dev_flow, items,
1936 						      item_flags);
1937 			dev_flow->hash_fields |=
1938 				mlx5_flow_hashfields_adjust
1939 				(rss_desc, tunnel,
1940 				RTE_ETH_RSS_ESP,
1941 				IBV_RX_HASH_IPSEC_SPI);
1942 			item_flags |= MLX5_FLOW_ITEM_ESP;
1943 			break;
1944 #endif
1945 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1946 			flow_verbs_translate_item_vxlan(dev_flow, items,
1947 							item_flags);
1948 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1949 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
1950 			break;
1951 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1952 			flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1953 							    item_flags);
1954 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1955 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1956 			break;
1957 		case RTE_FLOW_ITEM_TYPE_GRE:
1958 			gre_spec = flow_verbs_reserve_gre(dev_flow);
1959 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1960 			item_flags |= MLX5_FLOW_LAYER_GRE;
1961 			tunnel_item = items;
1962 			break;
1963 		case RTE_FLOW_ITEM_TYPE_MPLS:
1964 			flow_verbs_translate_item_mpls(dev_flow, items,
1965 						       item_flags);
1966 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1967 			item_flags |= MLX5_FLOW_LAYER_MPLS;
1968 			break;
1969 		default:
1970 			return rte_flow_error_set(error, ENOTSUP,
1971 						  RTE_FLOW_ERROR_TYPE_ITEM,
1972 						  NULL, "item not supported");
1973 		}
1974 	}
1975 	if (item_flags & MLX5_FLOW_LAYER_GRE)
1976 		flow_verbs_translate_item_gre(dev_flow, gre_spec,
1977 					      tunnel_item, item_flags);
1978 	dev_flow->handle->layers = item_flags;
1979 	/* Other members of attr will be ignored. */
1980 	dev_flow->verbs.attr.priority =
1981 		mlx5_flow_adjust_priority(dev, priority, subpriority);
1982 	dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1983 	return 0;
1984 }
1985 
1986 /**
1987  * Remove the flow from the NIC but keeps it in memory.
1988  *
1989  * @param[in] dev
1990  *   Pointer to the Ethernet device structure.
1991  * @param[in, out] flow
1992  *   Pointer to flow structure.
1993  */
1994 static void
1995 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1996 {
1997 	struct mlx5_priv *priv = dev->data->dev_private;
1998 	struct mlx5_flow_handle *handle;
1999 	uint32_t handle_idx;
2000 
2001 	if (!flow)
2002 		return;
2003 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2004 		       handle_idx, handle, next) {
2005 		if (handle->drv_flow) {
2006 			claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
2007 			handle->drv_flow = NULL;
2008 		}
2009 		/* hrxq is union, don't touch it only the flag is set. */
2010 		if (handle->rix_hrxq &&
2011 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2012 			mlx5_hrxq_release(dev, handle->rix_hrxq);
2013 			handle->rix_hrxq = 0;
2014 		}
2015 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
2016 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2017 	}
2018 }
2019 
2020 /**
2021  * Remove the flow from the NIC and the memory.
2022  *
2023  * @param[in] dev
2024  *   Pointer to the Ethernet device structure.
2025  * @param[in, out] flow
2026  *   Pointer to flow structure.
2027  */
2028 static void
2029 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2030 {
2031 	struct mlx5_priv *priv = dev->data->dev_private;
2032 	struct mlx5_flow_handle *handle;
2033 
2034 	if (!flow)
2035 		return;
2036 	flow_verbs_remove(dev, flow);
2037 	while (flow->dev_handles) {
2038 		uint32_t tmp_idx = flow->dev_handles;
2039 
2040 		handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
2041 				   tmp_idx);
2042 		if (!handle)
2043 			return;
2044 		flow->dev_handles = handle->next.next;
2045 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
2046 			   tmp_idx);
2047 	}
2048 	if (flow->counter) {
2049 		flow_verbs_counter_release(dev, flow->counter);
2050 		flow->counter = 0;
2051 	}
2052 }
2053 
2054 /**
2055  * Apply the flow to the NIC.
2056  *
2057  * @param[in] dev
2058  *   Pointer to the Ethernet device structure.
2059  * @param[in, out] flow
2060  *   Pointer to flow structure.
2061  * @param[out] error
2062  *   Pointer to error structure.
2063  *
2064  * @return
2065  *   0 on success, a negative errno value otherwise and rte_errno is set.
2066  */
2067 static int
2068 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2069 		 struct rte_flow_error *error)
2070 {
2071 	struct mlx5_priv *priv = dev->data->dev_private;
2072 	struct mlx5_flow_handle *handle;
2073 	struct mlx5_flow *dev_flow;
2074 	struct mlx5_hrxq *hrxq;
2075 	uint32_t dev_handles;
2076 	int err;
2077 	int idx;
2078 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
2079 
2080 	MLX5_ASSERT(wks);
2081 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
2082 		dev_flow = &wks->flows[idx];
2083 		handle = dev_flow->handle;
2084 		if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
2085 			MLX5_ASSERT(priv->drop_queue.hrxq);
2086 			hrxq = priv->drop_queue.hrxq;
2087 		} else {
2088 			struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
2089 
2090 			MLX5_ASSERT(rss_desc->queue_num);
2091 			rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
2092 			rss_desc->hash_fields = dev_flow->hash_fields;
2093 			rss_desc->tunnel = !!(handle->layers &
2094 					      MLX5_FLOW_LAYER_TUNNEL);
2095 			rss_desc->shared_rss = 0;
2096 			hrxq = mlx5_hrxq_get(dev, rss_desc);
2097 			if (!hrxq) {
2098 				rte_flow_error_set
2099 					(error, rte_errno,
2100 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2101 					 "cannot get hash queue");
2102 				goto error;
2103 			}
2104 			handle->rix_hrxq = hrxq->idx;
2105 		}
2106 		MLX5_ASSERT(hrxq);
2107 		handle->drv_flow = mlx5_glue->create_flow
2108 					(hrxq->qp, &dev_flow->verbs.attr);
2109 		if (!handle->drv_flow) {
2110 			rte_flow_error_set(error, errno,
2111 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2112 					   NULL,
2113 					   "hardware refuses to create flow");
2114 			goto error;
2115 		}
2116 		if (priv->vmwa_context &&
2117 		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
2118 			/*
2119 			 * The rule contains the VLAN pattern.
2120 			 * For VF we are going to create VLAN
2121 			 * interface to make hypervisor set correct
2122 			 * e-Switch vport context.
2123 			 */
2124 			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2125 		}
2126 	}
2127 	return 0;
2128 error:
2129 	err = rte_errno; /* Save rte_errno before cleanup. */
2130 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2131 		       dev_handles, handle, next) {
2132 		/* hrxq is union, don't touch it only the flag is set. */
2133 		if (handle->rix_hrxq &&
2134 		    handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2135 			mlx5_hrxq_release(dev, handle->rix_hrxq);
2136 			handle->rix_hrxq = 0;
2137 		}
2138 		if (handle->vf_vlan.tag && handle->vf_vlan.created)
2139 			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2140 	}
2141 	rte_errno = err; /* Restore rte_errno. */
2142 	return -rte_errno;
2143 }
2144 
2145 /**
2146  * Query a flow.
2147  *
2148  * @see rte_flow_query()
2149  * @see rte_flow_ops
2150  */
2151 static int
2152 flow_verbs_query(struct rte_eth_dev *dev,
2153 		 struct rte_flow *flow,
2154 		 const struct rte_flow_action *actions,
2155 		 void *data,
2156 		 struct rte_flow_error *error)
2157 {
2158 	int ret = -EINVAL;
2159 
2160 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2161 		switch (actions->type) {
2162 		case RTE_FLOW_ACTION_TYPE_VOID:
2163 			break;
2164 		case RTE_FLOW_ACTION_TYPE_COUNT:
2165 			ret = flow_verbs_counter_query(dev, flow, data, error);
2166 			break;
2167 		default:
2168 			return rte_flow_error_set(error, ENOTSUP,
2169 						  RTE_FLOW_ERROR_TYPE_ACTION,
2170 						  actions,
2171 						  "action not supported");
2172 		}
2173 	}
2174 	return ret;
2175 }
2176 
2177 static int
2178 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2179 		       uint32_t flags)
2180 {
2181 	RTE_SET_USED(dev);
2182 	RTE_SET_USED(domains);
2183 	RTE_SET_USED(flags);
2184 
2185 	return 0;
2186 }
2187 
2188 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2189 	.list_create = flow_legacy_list_create,
2190 	.list_destroy = flow_legacy_list_destroy,
2191 	.validate = flow_verbs_validate,
2192 	.prepare = flow_verbs_prepare,
2193 	.translate = flow_verbs_translate,
2194 	.apply = flow_verbs_apply,
2195 	.remove = flow_verbs_remove,
2196 	.destroy = flow_verbs_destroy,
2197 	.query = flow_verbs_query,
2198 	.sync_domain = flow_verbs_sync_domain,
2199 	.discover_priorities = flow_verbs_discover_priorities,
2200 	.get_aged_flows = flow_null_get_aged_flows,
2201 	.counter_alloc = flow_null_counter_allocate,
2202 	.counter_free = flow_null_counter_free,
2203 	.counter_query = flow_null_counter_query,
2204 };
2205