xref: /dpdk/drivers/net/mlx5/mlx5_flow_dv.c (revision f69ed1044230c218c9afd8f1b47b6fe6aa1eeec5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20 
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30 #include <rte_gtp.h>
31 
32 #include <mlx5_glue.h>
33 #include <mlx5_devx_cmds.h>
34 #include <mlx5_prm.h>
35 
36 #include "mlx5_defs.h"
37 #include "mlx5.h"
38 #include "mlx5_flow.h"
39 #include "mlx5_rxtx.h"
40 
41 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
42 
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46 
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52 
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56 
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63 
64 union flow_dv_attr {
65 	struct {
66 		uint32_t valid:1;
67 		uint32_t ipv4:1;
68 		uint32_t ipv6:1;
69 		uint32_t tcp:1;
70 		uint32_t udp:1;
71 		uint32_t reserved:27;
72 	};
73 	uint32_t attr;
74 };
75 
76 /**
77  * Initialize flow attributes structure according to flow items' types.
78  *
79  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
80  * mode. For tunnel mode, the items to be modified are the outermost ones.
81  *
82  * @param[in] item
83  *   Pointer to item specification.
84  * @param[out] attr
85  *   Pointer to flow attributes structure.
86  * @param[in] dev_flow
87  *   Pointer to the sub flow.
88  * @param[in] tunnel_decap
89  *   Whether action is after tunnel decapsulation.
90  */
91 static void
92 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
93 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
94 {
95 	/*
96 	 * If layers is already initialized, it means this dev_flow is the
97 	 * suffix flow, the layers flags is set by the prefix flow. Need to
98 	 * use the layer flags from prefix flow as the suffix flow may not
99 	 * have the user defined items as the flow is split.
100 	 */
101 	if (dev_flow->layers) {
102 		if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
103 			attr->ipv4 = 1;
104 		else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
105 			attr->ipv6 = 1;
106 		if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
107 			attr->tcp = 1;
108 		else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
109 			attr->udp = 1;
110 		attr->valid = 1;
111 		return;
112 	}
113 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
114 		uint8_t next_protocol = 0xff;
115 		switch (item->type) {
116 		case RTE_FLOW_ITEM_TYPE_GRE:
117 		case RTE_FLOW_ITEM_TYPE_NVGRE:
118 		case RTE_FLOW_ITEM_TYPE_VXLAN:
119 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
120 		case RTE_FLOW_ITEM_TYPE_GENEVE:
121 		case RTE_FLOW_ITEM_TYPE_MPLS:
122 			if (tunnel_decap)
123 				attr->attr = 0;
124 			break;
125 		case RTE_FLOW_ITEM_TYPE_IPV4:
126 			if (!attr->ipv6)
127 				attr->ipv4 = 1;
128 			if (item->mask != NULL &&
129 			    ((const struct rte_flow_item_ipv4 *)
130 			    item->mask)->hdr.next_proto_id)
131 				next_protocol =
132 				    ((const struct rte_flow_item_ipv4 *)
133 				      (item->spec))->hdr.next_proto_id &
134 				    ((const struct rte_flow_item_ipv4 *)
135 				      (item->mask))->hdr.next_proto_id;
136 			if ((next_protocol == IPPROTO_IPIP ||
137 			    next_protocol == IPPROTO_IPV6) && tunnel_decap)
138 				attr->attr = 0;
139 			break;
140 		case RTE_FLOW_ITEM_TYPE_IPV6:
141 			if (!attr->ipv4)
142 				attr->ipv6 = 1;
143 			if (item->mask != NULL &&
144 			    ((const struct rte_flow_item_ipv6 *)
145 			    item->mask)->hdr.proto)
146 				next_protocol =
147 				    ((const struct rte_flow_item_ipv6 *)
148 				      (item->spec))->hdr.proto &
149 				    ((const struct rte_flow_item_ipv6 *)
150 				      (item->mask))->hdr.proto;
151 			if ((next_protocol == IPPROTO_IPIP ||
152 			    next_protocol == IPPROTO_IPV6) && tunnel_decap)
153 				attr->attr = 0;
154 			break;
155 		case RTE_FLOW_ITEM_TYPE_UDP:
156 			if (!attr->tcp)
157 				attr->udp = 1;
158 			break;
159 		case RTE_FLOW_ITEM_TYPE_TCP:
160 			if (!attr->udp)
161 				attr->tcp = 1;
162 			break;
163 		default:
164 			break;
165 		}
166 	}
167 	attr->valid = 1;
168 }
169 
170 /**
171  * Convert rte_mtr_color to mlx5 color.
172  *
173  * @param[in] rcol
174  *   rte_mtr_color.
175  *
176  * @return
177  *   mlx5 color.
178  */
179 static int
180 rte_col_2_mlx5_col(enum rte_color rcol)
181 {
182 	switch (rcol) {
183 	case RTE_COLOR_GREEN:
184 		return MLX5_FLOW_COLOR_GREEN;
185 	case RTE_COLOR_YELLOW:
186 		return MLX5_FLOW_COLOR_YELLOW;
187 	case RTE_COLOR_RED:
188 		return MLX5_FLOW_COLOR_RED;
189 	default:
190 		break;
191 	}
192 	return MLX5_FLOW_COLOR_UNDEFINED;
193 }
194 
195 struct field_modify_info {
196 	uint32_t size; /* Size of field in protocol header, in bytes. */
197 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
198 	enum mlx5_modification_field id;
199 };
200 
201 struct field_modify_info modify_eth[] = {
202 	{4,  0, MLX5_MODI_OUT_DMAC_47_16},
203 	{2,  4, MLX5_MODI_OUT_DMAC_15_0},
204 	{4,  6, MLX5_MODI_OUT_SMAC_47_16},
205 	{2, 10, MLX5_MODI_OUT_SMAC_15_0},
206 	{0, 0, 0},
207 };
208 
209 struct field_modify_info modify_vlan_out_first_vid[] = {
210 	/* Size in bits !!! */
211 	{12, 0, MLX5_MODI_OUT_FIRST_VID},
212 	{0, 0, 0},
213 };
214 
215 struct field_modify_info modify_ipv4[] = {
216 	{1,  1, MLX5_MODI_OUT_IP_DSCP},
217 	{1,  8, MLX5_MODI_OUT_IPV4_TTL},
218 	{4, 12, MLX5_MODI_OUT_SIPV4},
219 	{4, 16, MLX5_MODI_OUT_DIPV4},
220 	{0, 0, 0},
221 };
222 
223 struct field_modify_info modify_ipv6[] = {
224 	{1,  0, MLX5_MODI_OUT_IP_DSCP},
225 	{1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
226 	{4,  8, MLX5_MODI_OUT_SIPV6_127_96},
227 	{4, 12, MLX5_MODI_OUT_SIPV6_95_64},
228 	{4, 16, MLX5_MODI_OUT_SIPV6_63_32},
229 	{4, 20, MLX5_MODI_OUT_SIPV6_31_0},
230 	{4, 24, MLX5_MODI_OUT_DIPV6_127_96},
231 	{4, 28, MLX5_MODI_OUT_DIPV6_95_64},
232 	{4, 32, MLX5_MODI_OUT_DIPV6_63_32},
233 	{4, 36, MLX5_MODI_OUT_DIPV6_31_0},
234 	{0, 0, 0},
235 };
236 
237 struct field_modify_info modify_udp[] = {
238 	{2, 0, MLX5_MODI_OUT_UDP_SPORT},
239 	{2, 2, MLX5_MODI_OUT_UDP_DPORT},
240 	{0, 0, 0},
241 };
242 
243 struct field_modify_info modify_tcp[] = {
244 	{2, 0, MLX5_MODI_OUT_TCP_SPORT},
245 	{2, 2, MLX5_MODI_OUT_TCP_DPORT},
246 	{4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
247 	{4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
248 	{0, 0, 0},
249 };
250 
251 static void
252 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
253 			  uint8_t next_protocol, uint64_t *item_flags,
254 			  int *tunnel)
255 {
256 	MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
257 		    item->type == RTE_FLOW_ITEM_TYPE_IPV6);
258 	if (next_protocol == IPPROTO_IPIP) {
259 		*item_flags |= MLX5_FLOW_LAYER_IPIP;
260 		*tunnel = 1;
261 	}
262 	if (next_protocol == IPPROTO_IPV6) {
263 		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
264 		*tunnel = 1;
265 	}
266 }
267 
268 /**
269  * Acquire the synchronizing object to protect multithreaded access
270  * to shared dv context. Lock occurs only if context is actually
271  * shared, i.e. we have multiport IB device and representors are
272  * created.
273  *
274  * @param[in] dev
275  *   Pointer to the rte_eth_dev structure.
276  */
277 static void
278 flow_dv_shared_lock(struct rte_eth_dev *dev)
279 {
280 	struct mlx5_priv *priv = dev->data->dev_private;
281 	struct mlx5_ibv_shared *sh = priv->sh;
282 
283 	if (sh->dv_refcnt > 1) {
284 		int ret;
285 
286 		ret = pthread_mutex_lock(&sh->dv_mutex);
287 		MLX5_ASSERT(!ret);
288 		(void)ret;
289 	}
290 }
291 
292 static void
293 flow_dv_shared_unlock(struct rte_eth_dev *dev)
294 {
295 	struct mlx5_priv *priv = dev->data->dev_private;
296 	struct mlx5_ibv_shared *sh = priv->sh;
297 
298 	if (sh->dv_refcnt > 1) {
299 		int ret;
300 
301 		ret = pthread_mutex_unlock(&sh->dv_mutex);
302 		MLX5_ASSERT(!ret);
303 		(void)ret;
304 	}
305 }
306 
307 /* Update VLAN's VID/PCP based on input rte_flow_action.
308  *
309  * @param[in] action
310  *   Pointer to struct rte_flow_action.
311  * @param[out] vlan
312  *   Pointer to struct rte_vlan_hdr.
313  */
314 static void
315 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
316 			 struct rte_vlan_hdr *vlan)
317 {
318 	uint16_t vlan_tci;
319 	if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
320 		vlan_tci =
321 		    ((const struct rte_flow_action_of_set_vlan_pcp *)
322 					       action->conf)->vlan_pcp;
323 		vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
324 		vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
325 		vlan->vlan_tci |= vlan_tci;
326 	} else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
327 		vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
328 		vlan->vlan_tci |= rte_be_to_cpu_16
329 		    (((const struct rte_flow_action_of_set_vlan_vid *)
330 					     action->conf)->vlan_vid);
331 	}
332 }
333 
334 /**
335  * Fetch 1, 2, 3 or 4 byte field from the byte array
336  * and return as unsigned integer in host-endian format.
337  *
338  * @param[in] data
339  *   Pointer to data array.
340  * @param[in] size
341  *   Size of field to extract.
342  *
343  * @return
344  *   converted field in host endian format.
345  */
346 static inline uint32_t
347 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
348 {
349 	uint32_t ret;
350 
351 	switch (size) {
352 	case 1:
353 		ret = *data;
354 		break;
355 	case 2:
356 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
357 		break;
358 	case 3:
359 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
360 		ret = (ret << 8) | *(data + sizeof(uint16_t));
361 		break;
362 	case 4:
363 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
364 		break;
365 	default:
366 		MLX5_ASSERT(false);
367 		ret = 0;
368 		break;
369 	}
370 	return ret;
371 }
372 
373 /**
374  * Convert modify-header action to DV specification.
375  *
376  * Data length of each action is determined by provided field description
377  * and the item mask. Data bit offset and width of each action is determined
378  * by provided item mask.
379  *
380  * @param[in] item
381  *   Pointer to item specification.
382  * @param[in] field
383  *   Pointer to field modification information.
384  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
385  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
386  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
387  * @param[in] dcopy
388  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
389  *   Negative offset value sets the same offset as source offset.
390  *   size field is ignored, value is taken from source field.
391  * @param[in,out] resource
392  *   Pointer to the modify-header resource.
393  * @param[in] type
394  *   Type of modification.
395  * @param[out] error
396  *   Pointer to the error structure.
397  *
398  * @return
399  *   0 on success, a negative errno value otherwise and rte_errno is set.
400  */
401 static int
402 flow_dv_convert_modify_action(struct rte_flow_item *item,
403 			      struct field_modify_info *field,
404 			      struct field_modify_info *dcopy,
405 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
406 			      uint32_t type, struct rte_flow_error *error)
407 {
408 	uint32_t i = resource->actions_num;
409 	struct mlx5_modification_cmd *actions = resource->actions;
410 
411 	/*
412 	 * The item and mask are provided in big-endian format.
413 	 * The fields should be presented as in big-endian format either.
414 	 * Mask must be always present, it defines the actual field width.
415 	 */
416 	MLX5_ASSERT(item->mask);
417 	MLX5_ASSERT(field->size);
418 	do {
419 		unsigned int size_b;
420 		unsigned int off_b;
421 		uint32_t mask;
422 		uint32_t data;
423 
424 		if (i >= MLX5_MAX_MODIFY_NUM)
425 			return rte_flow_error_set(error, EINVAL,
426 				 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
427 				 "too many items to modify");
428 		/* Fetch variable byte size mask from the array. */
429 		mask = flow_dv_fetch_field((const uint8_t *)item->mask +
430 					   field->offset, field->size);
431 		if (!mask) {
432 			++field;
433 			continue;
434 		}
435 		/* Deduce actual data width in bits from mask value. */
436 		off_b = rte_bsf32(mask);
437 		size_b = sizeof(uint32_t) * CHAR_BIT -
438 			 off_b - __builtin_clz(mask);
439 		MLX5_ASSERT(size_b);
440 		size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
441 		actions[i] = (struct mlx5_modification_cmd) {
442 			.action_type = type,
443 			.field = field->id,
444 			.offset = off_b,
445 			.length = size_b,
446 		};
447 		/* Convert entire record to expected big-endian format. */
448 		actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
449 		if (type == MLX5_MODIFICATION_TYPE_COPY) {
450 			MLX5_ASSERT(dcopy);
451 			actions[i].dst_field = dcopy->id;
452 			actions[i].dst_offset =
453 				(int)dcopy->offset < 0 ? off_b : dcopy->offset;
454 			/* Convert entire record to big-endian format. */
455 			actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
456 		} else {
457 			MLX5_ASSERT(item->spec);
458 			data = flow_dv_fetch_field((const uint8_t *)item->spec +
459 						   field->offset, field->size);
460 			/* Shift out the trailing masked bits from data. */
461 			data = (data & mask) >> off_b;
462 			actions[i].data1 = rte_cpu_to_be_32(data);
463 		}
464 		++i;
465 		++field;
466 	} while (field->size);
467 	if (resource->actions_num == i)
468 		return rte_flow_error_set(error, EINVAL,
469 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
470 					  "invalid modification flow item");
471 	resource->actions_num = i;
472 	return 0;
473 }
474 
475 /**
476  * Convert modify-header set IPv4 address action to DV specification.
477  *
478  * @param[in,out] resource
479  *   Pointer to the modify-header resource.
480  * @param[in] action
481  *   Pointer to action specification.
482  * @param[out] error
483  *   Pointer to the error structure.
484  *
485  * @return
486  *   0 on success, a negative errno value otherwise and rte_errno is set.
487  */
488 static int
489 flow_dv_convert_action_modify_ipv4
490 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
491 			 const struct rte_flow_action *action,
492 			 struct rte_flow_error *error)
493 {
494 	const struct rte_flow_action_set_ipv4 *conf =
495 		(const struct rte_flow_action_set_ipv4 *)(action->conf);
496 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
497 	struct rte_flow_item_ipv4 ipv4;
498 	struct rte_flow_item_ipv4 ipv4_mask;
499 
500 	memset(&ipv4, 0, sizeof(ipv4));
501 	memset(&ipv4_mask, 0, sizeof(ipv4_mask));
502 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
503 		ipv4.hdr.src_addr = conf->ipv4_addr;
504 		ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
505 	} else {
506 		ipv4.hdr.dst_addr = conf->ipv4_addr;
507 		ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
508 	}
509 	item.spec = &ipv4;
510 	item.mask = &ipv4_mask;
511 	return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
512 					     MLX5_MODIFICATION_TYPE_SET, error);
513 }
514 
515 /**
516  * Convert modify-header set IPv6 address action to DV specification.
517  *
518  * @param[in,out] resource
519  *   Pointer to the modify-header resource.
520  * @param[in] action
521  *   Pointer to action specification.
522  * @param[out] error
523  *   Pointer to the error structure.
524  *
525  * @return
526  *   0 on success, a negative errno value otherwise and rte_errno is set.
527  */
528 static int
529 flow_dv_convert_action_modify_ipv6
530 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
531 			 const struct rte_flow_action *action,
532 			 struct rte_flow_error *error)
533 {
534 	const struct rte_flow_action_set_ipv6 *conf =
535 		(const struct rte_flow_action_set_ipv6 *)(action->conf);
536 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
537 	struct rte_flow_item_ipv6 ipv6;
538 	struct rte_flow_item_ipv6 ipv6_mask;
539 
540 	memset(&ipv6, 0, sizeof(ipv6));
541 	memset(&ipv6_mask, 0, sizeof(ipv6_mask));
542 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
543 		memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
544 		       sizeof(ipv6.hdr.src_addr));
545 		memcpy(&ipv6_mask.hdr.src_addr,
546 		       &rte_flow_item_ipv6_mask.hdr.src_addr,
547 		       sizeof(ipv6.hdr.src_addr));
548 	} else {
549 		memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
550 		       sizeof(ipv6.hdr.dst_addr));
551 		memcpy(&ipv6_mask.hdr.dst_addr,
552 		       &rte_flow_item_ipv6_mask.hdr.dst_addr,
553 		       sizeof(ipv6.hdr.dst_addr));
554 	}
555 	item.spec = &ipv6;
556 	item.mask = &ipv6_mask;
557 	return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
558 					     MLX5_MODIFICATION_TYPE_SET, error);
559 }
560 
561 /**
562  * Convert modify-header set MAC address action to DV specification.
563  *
564  * @param[in,out] resource
565  *   Pointer to the modify-header resource.
566  * @param[in] action
567  *   Pointer to action specification.
568  * @param[out] error
569  *   Pointer to the error structure.
570  *
571  * @return
572  *   0 on success, a negative errno value otherwise and rte_errno is set.
573  */
574 static int
575 flow_dv_convert_action_modify_mac
576 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
577 			 const struct rte_flow_action *action,
578 			 struct rte_flow_error *error)
579 {
580 	const struct rte_flow_action_set_mac *conf =
581 		(const struct rte_flow_action_set_mac *)(action->conf);
582 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
583 	struct rte_flow_item_eth eth;
584 	struct rte_flow_item_eth eth_mask;
585 
586 	memset(&eth, 0, sizeof(eth));
587 	memset(&eth_mask, 0, sizeof(eth_mask));
588 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
589 		memcpy(&eth.src.addr_bytes, &conf->mac_addr,
590 		       sizeof(eth.src.addr_bytes));
591 		memcpy(&eth_mask.src.addr_bytes,
592 		       &rte_flow_item_eth_mask.src.addr_bytes,
593 		       sizeof(eth_mask.src.addr_bytes));
594 	} else {
595 		memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
596 		       sizeof(eth.dst.addr_bytes));
597 		memcpy(&eth_mask.dst.addr_bytes,
598 		       &rte_flow_item_eth_mask.dst.addr_bytes,
599 		       sizeof(eth_mask.dst.addr_bytes));
600 	}
601 	item.spec = &eth;
602 	item.mask = &eth_mask;
603 	return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
604 					     MLX5_MODIFICATION_TYPE_SET, error);
605 }
606 
607 /**
608  * Convert modify-header set VLAN VID action to DV specification.
609  *
610  * @param[in,out] resource
611  *   Pointer to the modify-header resource.
612  * @param[in] action
613  *   Pointer to action specification.
614  * @param[out] error
615  *   Pointer to the error structure.
616  *
617  * @return
618  *   0 on success, a negative errno value otherwise and rte_errno is set.
619  */
620 static int
621 flow_dv_convert_action_modify_vlan_vid
622 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
623 			 const struct rte_flow_action *action,
624 			 struct rte_flow_error *error)
625 {
626 	const struct rte_flow_action_of_set_vlan_vid *conf =
627 		(const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
628 	int i = resource->actions_num;
629 	struct mlx5_modification_cmd *actions = resource->actions;
630 	struct field_modify_info *field = modify_vlan_out_first_vid;
631 
632 	if (i >= MLX5_MAX_MODIFY_NUM)
633 		return rte_flow_error_set(error, EINVAL,
634 			 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
635 			 "too many items to modify");
636 	actions[i] = (struct mlx5_modification_cmd) {
637 		.action_type = MLX5_MODIFICATION_TYPE_SET,
638 		.field = field->id,
639 		.length = field->size,
640 		.offset = field->offset,
641 	};
642 	actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
643 	actions[i].data1 = conf->vlan_vid;
644 	actions[i].data1 = actions[i].data1 << 16;
645 	resource->actions_num = ++i;
646 	return 0;
647 }
648 
649 /**
650  * Convert modify-header set TP action to DV specification.
651  *
652  * @param[in,out] resource
653  *   Pointer to the modify-header resource.
654  * @param[in] action
655  *   Pointer to action specification.
656  * @param[in] items
657  *   Pointer to rte_flow_item objects list.
658  * @param[in] attr
659  *   Pointer to flow attributes structure.
660  * @param[in] dev_flow
661  *   Pointer to the sub flow.
662  * @param[in] tunnel_decap
663  *   Whether action is after tunnel decapsulation.
664  * @param[out] error
665  *   Pointer to the error structure.
666  *
667  * @return
668  *   0 on success, a negative errno value otherwise and rte_errno is set.
669  */
670 static int
671 flow_dv_convert_action_modify_tp
672 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
673 			 const struct rte_flow_action *action,
674 			 const struct rte_flow_item *items,
675 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
676 			 bool tunnel_decap, struct rte_flow_error *error)
677 {
678 	const struct rte_flow_action_set_tp *conf =
679 		(const struct rte_flow_action_set_tp *)(action->conf);
680 	struct rte_flow_item item;
681 	struct rte_flow_item_udp udp;
682 	struct rte_flow_item_udp udp_mask;
683 	struct rte_flow_item_tcp tcp;
684 	struct rte_flow_item_tcp tcp_mask;
685 	struct field_modify_info *field;
686 
687 	if (!attr->valid)
688 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
689 	if (attr->udp) {
690 		memset(&udp, 0, sizeof(udp));
691 		memset(&udp_mask, 0, sizeof(udp_mask));
692 		if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
693 			udp.hdr.src_port = conf->port;
694 			udp_mask.hdr.src_port =
695 					rte_flow_item_udp_mask.hdr.src_port;
696 		} else {
697 			udp.hdr.dst_port = conf->port;
698 			udp_mask.hdr.dst_port =
699 					rte_flow_item_udp_mask.hdr.dst_port;
700 		}
701 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
702 		item.spec = &udp;
703 		item.mask = &udp_mask;
704 		field = modify_udp;
705 	}
706 	if (attr->tcp) {
707 		memset(&tcp, 0, sizeof(tcp));
708 		memset(&tcp_mask, 0, sizeof(tcp_mask));
709 		if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
710 			tcp.hdr.src_port = conf->port;
711 			tcp_mask.hdr.src_port =
712 					rte_flow_item_tcp_mask.hdr.src_port;
713 		} else {
714 			tcp.hdr.dst_port = conf->port;
715 			tcp_mask.hdr.dst_port =
716 					rte_flow_item_tcp_mask.hdr.dst_port;
717 		}
718 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
719 		item.spec = &tcp;
720 		item.mask = &tcp_mask;
721 		field = modify_tcp;
722 	}
723 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
724 					     MLX5_MODIFICATION_TYPE_SET, error);
725 }
726 
727 /**
728  * Convert modify-header set TTL action to DV specification.
729  *
730  * @param[in,out] resource
731  *   Pointer to the modify-header resource.
732  * @param[in] action
733  *   Pointer to action specification.
734  * @param[in] items
735  *   Pointer to rte_flow_item objects list.
736  * @param[in] attr
737  *   Pointer to flow attributes structure.
738  * @param[in] dev_flow
739  *   Pointer to the sub flow.
740  * @param[in] tunnel_decap
741  *   Whether action is after tunnel decapsulation.
742  * @param[out] error
743  *   Pointer to the error structure.
744  *
745  * @return
746  *   0 on success, a negative errno value otherwise and rte_errno is set.
747  */
748 static int
749 flow_dv_convert_action_modify_ttl
750 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
751 			 const struct rte_flow_action *action,
752 			 const struct rte_flow_item *items,
753 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
754 			 bool tunnel_decap, struct rte_flow_error *error)
755 {
756 	const struct rte_flow_action_set_ttl *conf =
757 		(const struct rte_flow_action_set_ttl *)(action->conf);
758 	struct rte_flow_item item;
759 	struct rte_flow_item_ipv4 ipv4;
760 	struct rte_flow_item_ipv4 ipv4_mask;
761 	struct rte_flow_item_ipv6 ipv6;
762 	struct rte_flow_item_ipv6 ipv6_mask;
763 	struct field_modify_info *field;
764 
765 	if (!attr->valid)
766 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
767 	if (attr->ipv4) {
768 		memset(&ipv4, 0, sizeof(ipv4));
769 		memset(&ipv4_mask, 0, sizeof(ipv4_mask));
770 		ipv4.hdr.time_to_live = conf->ttl_value;
771 		ipv4_mask.hdr.time_to_live = 0xFF;
772 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
773 		item.spec = &ipv4;
774 		item.mask = &ipv4_mask;
775 		field = modify_ipv4;
776 	}
777 	if (attr->ipv6) {
778 		memset(&ipv6, 0, sizeof(ipv6));
779 		memset(&ipv6_mask, 0, sizeof(ipv6_mask));
780 		ipv6.hdr.hop_limits = conf->ttl_value;
781 		ipv6_mask.hdr.hop_limits = 0xFF;
782 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
783 		item.spec = &ipv6;
784 		item.mask = &ipv6_mask;
785 		field = modify_ipv6;
786 	}
787 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
788 					     MLX5_MODIFICATION_TYPE_SET, error);
789 }
790 
791 /**
792  * Convert modify-header decrement TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_dec_ttl
814 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
815 			 const struct rte_flow_item *items,
816 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
817 			 bool tunnel_decap, struct rte_flow_error *error)
818 {
819 	struct rte_flow_item item;
820 	struct rte_flow_item_ipv4 ipv4;
821 	struct rte_flow_item_ipv4 ipv4_mask;
822 	struct rte_flow_item_ipv6 ipv6;
823 	struct rte_flow_item_ipv6 ipv6_mask;
824 	struct field_modify_info *field;
825 
826 	if (!attr->valid)
827 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
828 	if (attr->ipv4) {
829 		memset(&ipv4, 0, sizeof(ipv4));
830 		memset(&ipv4_mask, 0, sizeof(ipv4_mask));
831 		ipv4.hdr.time_to_live = 0xFF;
832 		ipv4_mask.hdr.time_to_live = 0xFF;
833 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
834 		item.spec = &ipv4;
835 		item.mask = &ipv4_mask;
836 		field = modify_ipv4;
837 	}
838 	if (attr->ipv6) {
839 		memset(&ipv6, 0, sizeof(ipv6));
840 		memset(&ipv6_mask, 0, sizeof(ipv6_mask));
841 		ipv6.hdr.hop_limits = 0xFF;
842 		ipv6_mask.hdr.hop_limits = 0xFF;
843 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
844 		item.spec = &ipv6;
845 		item.mask = &ipv6_mask;
846 		field = modify_ipv6;
847 	}
848 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
849 					     MLX5_MODIFICATION_TYPE_ADD, error);
850 }
851 
852 /**
853  * Convert modify-header increment/decrement TCP Sequence number
854  * to DV specification.
855  *
856  * @param[in,out] resource
857  *   Pointer to the modify-header resource.
858  * @param[in] action
859  *   Pointer to action specification.
860  * @param[out] error
861  *   Pointer to the error structure.
862  *
863  * @return
864  *   0 on success, a negative errno value otherwise and rte_errno is set.
865  */
866 static int
867 flow_dv_convert_action_modify_tcp_seq
868 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
869 			 const struct rte_flow_action *action,
870 			 struct rte_flow_error *error)
871 {
872 	const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
873 	uint64_t value = rte_be_to_cpu_32(*conf);
874 	struct rte_flow_item item;
875 	struct rte_flow_item_tcp tcp;
876 	struct rte_flow_item_tcp tcp_mask;
877 
878 	memset(&tcp, 0, sizeof(tcp));
879 	memset(&tcp_mask, 0, sizeof(tcp_mask));
880 	if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
881 		/*
882 		 * The HW has no decrement operation, only increment operation.
883 		 * To simulate decrement X from Y using increment operation
884 		 * we need to add UINT32_MAX X times to Y.
885 		 * Each adding of UINT32_MAX decrements Y by 1.
886 		 */
887 		value *= UINT32_MAX;
888 	tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
889 	tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
890 	item.type = RTE_FLOW_ITEM_TYPE_TCP;
891 	item.spec = &tcp;
892 	item.mask = &tcp_mask;
893 	return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
894 					     MLX5_MODIFICATION_TYPE_ADD, error);
895 }
896 
897 /**
898  * Convert modify-header increment/decrement TCP Acknowledgment number
899  * to DV specification.
900  *
901  * @param[in,out] resource
902  *   Pointer to the modify-header resource.
903  * @param[in] action
904  *   Pointer to action specification.
905  * @param[out] error
906  *   Pointer to the error structure.
907  *
908  * @return
909  *   0 on success, a negative errno value otherwise and rte_errno is set.
910  */
911 static int
912 flow_dv_convert_action_modify_tcp_ack
913 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
914 			 const struct rte_flow_action *action,
915 			 struct rte_flow_error *error)
916 {
917 	const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
918 	uint64_t value = rte_be_to_cpu_32(*conf);
919 	struct rte_flow_item item;
920 	struct rte_flow_item_tcp tcp;
921 	struct rte_flow_item_tcp tcp_mask;
922 
923 	memset(&tcp, 0, sizeof(tcp));
924 	memset(&tcp_mask, 0, sizeof(tcp_mask));
925 	if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
926 		/*
927 		 * The HW has no decrement operation, only increment operation.
928 		 * To simulate decrement X from Y using increment operation
929 		 * we need to add UINT32_MAX X times to Y.
930 		 * Each adding of UINT32_MAX decrements Y by 1.
931 		 */
932 		value *= UINT32_MAX;
933 	tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
934 	tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
935 	item.type = RTE_FLOW_ITEM_TYPE_TCP;
936 	item.spec = &tcp;
937 	item.mask = &tcp_mask;
938 	return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
939 					     MLX5_MODIFICATION_TYPE_ADD, error);
940 }
941 
942 static enum mlx5_modification_field reg_to_field[] = {
943 	[REG_NONE] = MLX5_MODI_OUT_NONE,
944 	[REG_A] = MLX5_MODI_META_DATA_REG_A,
945 	[REG_B] = MLX5_MODI_META_DATA_REG_B,
946 	[REG_C_0] = MLX5_MODI_META_REG_C_0,
947 	[REG_C_1] = MLX5_MODI_META_REG_C_1,
948 	[REG_C_2] = MLX5_MODI_META_REG_C_2,
949 	[REG_C_3] = MLX5_MODI_META_REG_C_3,
950 	[REG_C_4] = MLX5_MODI_META_REG_C_4,
951 	[REG_C_5] = MLX5_MODI_META_REG_C_5,
952 	[REG_C_6] = MLX5_MODI_META_REG_C_6,
953 	[REG_C_7] = MLX5_MODI_META_REG_C_7,
954 };
955 
956 /**
957  * Convert register set to DV specification.
958  *
959  * @param[in,out] resource
960  *   Pointer to the modify-header resource.
961  * @param[in] action
962  *   Pointer to action specification.
963  * @param[out] error
964  *   Pointer to the error structure.
965  *
966  * @return
967  *   0 on success, a negative errno value otherwise and rte_errno is set.
968  */
969 static int
970 flow_dv_convert_action_set_reg
971 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
972 			 const struct rte_flow_action *action,
973 			 struct rte_flow_error *error)
974 {
975 	const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
976 	struct mlx5_modification_cmd *actions = resource->actions;
977 	uint32_t i = resource->actions_num;
978 
979 	if (i >= MLX5_MAX_MODIFY_NUM)
980 		return rte_flow_error_set(error, EINVAL,
981 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
982 					  "too many items to modify");
983 	MLX5_ASSERT(conf->id != REG_NONE);
984 	MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
985 	actions[i] = (struct mlx5_modification_cmd) {
986 		.action_type = MLX5_MODIFICATION_TYPE_SET,
987 		.field = reg_to_field[conf->id],
988 	};
989 	actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
990 	actions[i].data1 = rte_cpu_to_be_32(conf->data);
991 	++i;
992 	resource->actions_num = i;
993 	return 0;
994 }
995 
996 /**
997  * Convert SET_TAG action to DV specification.
998  *
999  * @param[in] dev
1000  *   Pointer to the rte_eth_dev structure.
1001  * @param[in,out] resource
1002  *   Pointer to the modify-header resource.
1003  * @param[in] conf
1004  *   Pointer to action specification.
1005  * @param[out] error
1006  *   Pointer to the error structure.
1007  *
1008  * @return
1009  *   0 on success, a negative errno value otherwise and rte_errno is set.
1010  */
1011 static int
1012 flow_dv_convert_action_set_tag
1013 			(struct rte_eth_dev *dev,
1014 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
1015 			 const struct rte_flow_action_set_tag *conf,
1016 			 struct rte_flow_error *error)
1017 {
1018 	rte_be32_t data = rte_cpu_to_be_32(conf->data);
1019 	rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1020 	struct rte_flow_item item = {
1021 		.spec = &data,
1022 		.mask = &mask,
1023 	};
1024 	struct field_modify_info reg_c_x[] = {
1025 		[1] = {0, 0, 0},
1026 	};
1027 	enum mlx5_modification_field reg_type;
1028 	int ret;
1029 
1030 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1031 	if (ret < 0)
1032 		return ret;
1033 	MLX5_ASSERT(ret != REG_NONE);
1034 	MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1035 	reg_type = reg_to_field[ret];
1036 	MLX5_ASSERT(reg_type > 0);
1037 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1038 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1039 					     MLX5_MODIFICATION_TYPE_SET, error);
1040 }
1041 
1042 /**
1043  * Convert internal COPY_REG action to DV specification.
1044  *
1045  * @param[in] dev
1046  *   Pointer to the rte_eth_dev structure.
1047  * @param[in,out] res
1048  *   Pointer to the modify-header resource.
1049  * @param[in] action
1050  *   Pointer to action specification.
1051  * @param[out] error
1052  *   Pointer to the error structure.
1053  *
1054  * @return
1055  *   0 on success, a negative errno value otherwise and rte_errno is set.
1056  */
1057 static int
1058 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1059 				 struct mlx5_flow_dv_modify_hdr_resource *res,
1060 				 const struct rte_flow_action *action,
1061 				 struct rte_flow_error *error)
1062 {
1063 	const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1064 	rte_be32_t mask = RTE_BE32(UINT32_MAX);
1065 	struct rte_flow_item item = {
1066 		.spec = NULL,
1067 		.mask = &mask,
1068 	};
1069 	struct field_modify_info reg_src[] = {
1070 		{4, 0, reg_to_field[conf->src]},
1071 		{0, 0, 0},
1072 	};
1073 	struct field_modify_info reg_dst = {
1074 		.offset = 0,
1075 		.id = reg_to_field[conf->dst],
1076 	};
1077 	/* Adjust reg_c[0] usage according to reported mask. */
1078 	if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1079 		struct mlx5_priv *priv = dev->data->dev_private;
1080 		uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1081 
1082 		MLX5_ASSERT(reg_c0);
1083 		MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1084 		if (conf->dst == REG_C_0) {
1085 			/* Copy to reg_c[0], within mask only. */
1086 			reg_dst.offset = rte_bsf32(reg_c0);
1087 			/*
1088 			 * Mask is ignoring the enianness, because
1089 			 * there is no conversion in datapath.
1090 			 */
1091 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1092 			/* Copy from destination lower bits to reg_c[0]. */
1093 			mask = reg_c0 >> reg_dst.offset;
1094 #else
1095 			/* Copy from destination upper bits to reg_c[0]. */
1096 			mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1097 					  rte_fls_u32(reg_c0));
1098 #endif
1099 		} else {
1100 			mask = rte_cpu_to_be_32(reg_c0);
1101 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1102 			/* Copy from reg_c[0] to destination lower bits. */
1103 			reg_dst.offset = 0;
1104 #else
1105 			/* Copy from reg_c[0] to destination upper bits. */
1106 			reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1107 					 (rte_fls_u32(reg_c0) -
1108 					  rte_bsf32(reg_c0));
1109 #endif
1110 		}
1111 	}
1112 	return flow_dv_convert_modify_action(&item,
1113 					     reg_src, &reg_dst, res,
1114 					     MLX5_MODIFICATION_TYPE_COPY,
1115 					     error);
1116 }
1117 
1118 /**
1119  * Convert MARK action to DV specification. This routine is used
1120  * in extensive metadata only and requires metadata register to be
1121  * handled. In legacy mode hardware tag resource is engaged.
1122  *
1123  * @param[in] dev
1124  *   Pointer to the rte_eth_dev structure.
1125  * @param[in] conf
1126  *   Pointer to MARK action specification.
1127  * @param[in,out] resource
1128  *   Pointer to the modify-header resource.
1129  * @param[out] error
1130  *   Pointer to the error structure.
1131  *
1132  * @return
1133  *   0 on success, a negative errno value otherwise and rte_errno is set.
1134  */
1135 static int
1136 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1137 			    const struct rte_flow_action_mark *conf,
1138 			    struct mlx5_flow_dv_modify_hdr_resource *resource,
1139 			    struct rte_flow_error *error)
1140 {
1141 	struct mlx5_priv *priv = dev->data->dev_private;
1142 	rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1143 					   priv->sh->dv_mark_mask);
1144 	rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1145 	struct rte_flow_item item = {
1146 		.spec = &data,
1147 		.mask = &mask,
1148 	};
1149 	struct field_modify_info reg_c_x[] = {
1150 		{4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1151 		{0, 0, 0},
1152 	};
1153 	int reg;
1154 
1155 	if (!mask)
1156 		return rte_flow_error_set(error, EINVAL,
1157 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1158 					  NULL, "zero mark action mask");
1159 	reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1160 	if (reg < 0)
1161 		return reg;
1162 	MLX5_ASSERT(reg > 0);
1163 	if (reg == REG_C_0) {
1164 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1165 		uint32_t shl_c0 = rte_bsf32(msk_c0);
1166 
1167 		data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1168 		mask = rte_cpu_to_be_32(mask) & msk_c0;
1169 		mask = rte_cpu_to_be_32(mask << shl_c0);
1170 	}
1171 	reg_c_x[0].id = reg_to_field[reg];
1172 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1173 					     MLX5_MODIFICATION_TYPE_SET, error);
1174 }
1175 
1176 /**
1177  * Get metadata register index for specified steering domain.
1178  *
1179  * @param[in] dev
1180  *   Pointer to the rte_eth_dev structure.
1181  * @param[in] attr
1182  *   Attributes of flow to determine steering domain.
1183  * @param[out] error
1184  *   Pointer to the error structure.
1185  *
1186  * @return
1187  *   positive index on success, a negative errno value otherwise
1188  *   and rte_errno is set.
1189  */
1190 static enum modify_reg
1191 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1192 			 const struct rte_flow_attr *attr,
1193 			 struct rte_flow_error *error)
1194 {
1195 	int reg =
1196 		mlx5_flow_get_reg_id(dev, attr->transfer ?
1197 					  MLX5_METADATA_FDB :
1198 					    attr->egress ?
1199 					    MLX5_METADATA_TX :
1200 					    MLX5_METADATA_RX, 0, error);
1201 	if (reg < 0)
1202 		return rte_flow_error_set(error,
1203 					  ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1204 					  NULL, "unavailable "
1205 					  "metadata register");
1206 	return reg;
1207 }
1208 
1209 /**
1210  * Convert SET_META action to DV specification.
1211  *
1212  * @param[in] dev
1213  *   Pointer to the rte_eth_dev structure.
1214  * @param[in,out] resource
1215  *   Pointer to the modify-header resource.
1216  * @param[in] attr
1217  *   Attributes of flow that includes this item.
1218  * @param[in] conf
1219  *   Pointer to action specification.
1220  * @param[out] error
1221  *   Pointer to the error structure.
1222  *
1223  * @return
1224  *   0 on success, a negative errno value otherwise and rte_errno is set.
1225  */
1226 static int
1227 flow_dv_convert_action_set_meta
1228 			(struct rte_eth_dev *dev,
1229 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
1230 			 const struct rte_flow_attr *attr,
1231 			 const struct rte_flow_action_set_meta *conf,
1232 			 struct rte_flow_error *error)
1233 {
1234 	uint32_t data = conf->data;
1235 	uint32_t mask = conf->mask;
1236 	struct rte_flow_item item = {
1237 		.spec = &data,
1238 		.mask = &mask,
1239 	};
1240 	struct field_modify_info reg_c_x[] = {
1241 		[1] = {0, 0, 0},
1242 	};
1243 	int reg = flow_dv_get_metadata_reg(dev, attr, error);
1244 
1245 	if (reg < 0)
1246 		return reg;
1247 	/*
1248 	 * In datapath code there is no endianness
1249 	 * coversions for perfromance reasons, all
1250 	 * pattern conversions are done in rte_flow.
1251 	 */
1252 	if (reg == REG_C_0) {
1253 		struct mlx5_priv *priv = dev->data->dev_private;
1254 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1255 		uint32_t shl_c0;
1256 
1257 		MLX5_ASSERT(msk_c0);
1258 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1259 		shl_c0 = rte_bsf32(msk_c0);
1260 #else
1261 		shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1262 #endif
1263 		mask <<= shl_c0;
1264 		data <<= shl_c0;
1265 		MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1266 	}
1267 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1268 	/* The routine expects parameters in memory as big-endian ones. */
1269 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1270 					     MLX5_MODIFICATION_TYPE_SET, error);
1271 }
1272 
1273 /**
1274  * Convert modify-header set IPv4 DSCP action to DV specification.
1275  *
1276  * @param[in,out] resource
1277  *   Pointer to the modify-header resource.
1278  * @param[in] action
1279  *   Pointer to action specification.
1280  * @param[out] error
1281  *   Pointer to the error structure.
1282  *
1283  * @return
1284  *   0 on success, a negative errno value otherwise and rte_errno is set.
1285  */
1286 static int
1287 flow_dv_convert_action_modify_ipv4_dscp
1288 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
1289 			 const struct rte_flow_action *action,
1290 			 struct rte_flow_error *error)
1291 {
1292 	const struct rte_flow_action_set_dscp *conf =
1293 		(const struct rte_flow_action_set_dscp *)(action->conf);
1294 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1295 	struct rte_flow_item_ipv4 ipv4;
1296 	struct rte_flow_item_ipv4 ipv4_mask;
1297 
1298 	memset(&ipv4, 0, sizeof(ipv4));
1299 	memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1300 	ipv4.hdr.type_of_service = conf->dscp;
1301 	ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1302 	item.spec = &ipv4;
1303 	item.mask = &ipv4_mask;
1304 	return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1305 					     MLX5_MODIFICATION_TYPE_SET, error);
1306 }
1307 
1308 /**
1309  * Convert modify-header set IPv6 DSCP action to DV specification.
1310  *
1311  * @param[in,out] resource
1312  *   Pointer to the modify-header resource.
1313  * @param[in] action
1314  *   Pointer to action specification.
1315  * @param[out] error
1316  *   Pointer to the error structure.
1317  *
1318  * @return
1319  *   0 on success, a negative errno value otherwise and rte_errno is set.
1320  */
1321 static int
1322 flow_dv_convert_action_modify_ipv6_dscp
1323 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
1324 			 const struct rte_flow_action *action,
1325 			 struct rte_flow_error *error)
1326 {
1327 	const struct rte_flow_action_set_dscp *conf =
1328 		(const struct rte_flow_action_set_dscp *)(action->conf);
1329 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1330 	struct rte_flow_item_ipv6 ipv6;
1331 	struct rte_flow_item_ipv6 ipv6_mask;
1332 
1333 	memset(&ipv6, 0, sizeof(ipv6));
1334 	memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1335 	/*
1336 	 * Even though the DSCP bits offset of IPv6 is not byte aligned,
1337 	 * rdma-core only accept the DSCP bits byte aligned start from
1338 	 * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1339 	 * bits in IPv6 case as rdma-core requires byte aligned value.
1340 	 */
1341 	ipv6.hdr.vtc_flow = conf->dscp;
1342 	ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1343 	item.spec = &ipv6;
1344 	item.mask = &ipv6_mask;
1345 	return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1346 					     MLX5_MODIFICATION_TYPE_SET, error);
1347 }
1348 
1349 /**
1350  * Validate MARK item.
1351  *
1352  * @param[in] dev
1353  *   Pointer to the rte_eth_dev structure.
1354  * @param[in] item
1355  *   Item specification.
1356  * @param[in] attr
1357  *   Attributes of flow that includes this item.
1358  * @param[out] error
1359  *   Pointer to error structure.
1360  *
1361  * @return
1362  *   0 on success, a negative errno value otherwise and rte_errno is set.
1363  */
1364 static int
1365 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1366 			   const struct rte_flow_item *item,
1367 			   const struct rte_flow_attr *attr __rte_unused,
1368 			   struct rte_flow_error *error)
1369 {
1370 	struct mlx5_priv *priv = dev->data->dev_private;
1371 	struct mlx5_dev_config *config = &priv->config;
1372 	const struct rte_flow_item_mark *spec = item->spec;
1373 	const struct rte_flow_item_mark *mask = item->mask;
1374 	const struct rte_flow_item_mark nic_mask = {
1375 		.id = priv->sh->dv_mark_mask,
1376 	};
1377 	int ret;
1378 
1379 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1380 		return rte_flow_error_set(error, ENOTSUP,
1381 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1382 					  "extended metadata feature"
1383 					  " isn't enabled");
1384 	if (!mlx5_flow_ext_mreg_supported(dev))
1385 		return rte_flow_error_set(error, ENOTSUP,
1386 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1387 					  "extended metadata register"
1388 					  " isn't supported");
1389 	if (!nic_mask.id)
1390 		return rte_flow_error_set(error, ENOTSUP,
1391 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1392 					  "extended metadata register"
1393 					  " isn't available");
1394 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1395 	if (ret < 0)
1396 		return ret;
1397 	if (!spec)
1398 		return rte_flow_error_set(error, EINVAL,
1399 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1400 					  item->spec,
1401 					  "data cannot be empty");
1402 	if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1403 		return rte_flow_error_set(error, EINVAL,
1404 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1405 					  &spec->id,
1406 					  "mark id exceeds the limit");
1407 	if (!mask)
1408 		mask = &nic_mask;
1409 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1410 					(const uint8_t *)&nic_mask,
1411 					sizeof(struct rte_flow_item_mark),
1412 					error);
1413 	if (ret < 0)
1414 		return ret;
1415 	return 0;
1416 }
1417 
1418 /**
1419  * Validate META item.
1420  *
1421  * @param[in] dev
1422  *   Pointer to the rte_eth_dev structure.
1423  * @param[in] item
1424  *   Item specification.
1425  * @param[in] attr
1426  *   Attributes of flow that includes this item.
1427  * @param[out] error
1428  *   Pointer to error structure.
1429  *
1430  * @return
1431  *   0 on success, a negative errno value otherwise and rte_errno is set.
1432  */
1433 static int
1434 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1435 			   const struct rte_flow_item *item,
1436 			   const struct rte_flow_attr *attr,
1437 			   struct rte_flow_error *error)
1438 {
1439 	struct mlx5_priv *priv = dev->data->dev_private;
1440 	struct mlx5_dev_config *config = &priv->config;
1441 	const struct rte_flow_item_meta *spec = item->spec;
1442 	const struct rte_flow_item_meta *mask = item->mask;
1443 	struct rte_flow_item_meta nic_mask = {
1444 		.data = UINT32_MAX
1445 	};
1446 	int reg;
1447 	int ret;
1448 
1449 	if (!spec)
1450 		return rte_flow_error_set(error, EINVAL,
1451 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1452 					  item->spec,
1453 					  "data cannot be empty");
1454 	if (!spec->data)
1455 		return rte_flow_error_set(error, EINVAL,
1456 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1457 					  "data cannot be zero");
1458 	if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1459 		if (!mlx5_flow_ext_mreg_supported(dev))
1460 			return rte_flow_error_set(error, ENOTSUP,
1461 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1462 					  "extended metadata register"
1463 					  " isn't supported");
1464 		reg = flow_dv_get_metadata_reg(dev, attr, error);
1465 		if (reg < 0)
1466 			return reg;
1467 		if (reg == REG_B)
1468 			return rte_flow_error_set(error, ENOTSUP,
1469 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1470 					  "match on reg_b "
1471 					  "isn't supported");
1472 		if (reg != REG_A)
1473 			nic_mask.data = priv->sh->dv_meta_mask;
1474 	}
1475 	if (!mask)
1476 		mask = &rte_flow_item_meta_mask;
1477 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1478 					(const uint8_t *)&nic_mask,
1479 					sizeof(struct rte_flow_item_meta),
1480 					error);
1481 	return ret;
1482 }
1483 
1484 /**
1485  * Validate TAG item.
1486  *
1487  * @param[in] dev
1488  *   Pointer to the rte_eth_dev structure.
1489  * @param[in] item
1490  *   Item specification.
1491  * @param[in] attr
1492  *   Attributes of flow that includes this item.
1493  * @param[out] error
1494  *   Pointer to error structure.
1495  *
1496  * @return
1497  *   0 on success, a negative errno value otherwise and rte_errno is set.
1498  */
1499 static int
1500 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1501 			  const struct rte_flow_item *item,
1502 			  const struct rte_flow_attr *attr __rte_unused,
1503 			  struct rte_flow_error *error)
1504 {
1505 	const struct rte_flow_item_tag *spec = item->spec;
1506 	const struct rte_flow_item_tag *mask = item->mask;
1507 	const struct rte_flow_item_tag nic_mask = {
1508 		.data = RTE_BE32(UINT32_MAX),
1509 		.index = 0xff,
1510 	};
1511 	int ret;
1512 
1513 	if (!mlx5_flow_ext_mreg_supported(dev))
1514 		return rte_flow_error_set(error, ENOTSUP,
1515 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1516 					  "extensive metadata register"
1517 					  " isn't supported");
1518 	if (!spec)
1519 		return rte_flow_error_set(error, EINVAL,
1520 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1521 					  item->spec,
1522 					  "data cannot be empty");
1523 	if (!mask)
1524 		mask = &rte_flow_item_tag_mask;
1525 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1526 					(const uint8_t *)&nic_mask,
1527 					sizeof(struct rte_flow_item_tag),
1528 					error);
1529 	if (ret < 0)
1530 		return ret;
1531 	if (mask->index != 0xff)
1532 		return rte_flow_error_set(error, EINVAL,
1533 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1534 					  "partial mask for tag index"
1535 					  " is not supported");
1536 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1537 	if (ret < 0)
1538 		return ret;
1539 	MLX5_ASSERT(ret != REG_NONE);
1540 	return 0;
1541 }
1542 
1543 /**
1544  * Validate vport item.
1545  *
1546  * @param[in] dev
1547  *   Pointer to the rte_eth_dev structure.
1548  * @param[in] item
1549  *   Item specification.
1550  * @param[in] attr
1551  *   Attributes of flow that includes this item.
1552  * @param[in] item_flags
1553  *   Bit-fields that holds the items detected until now.
1554  * @param[out] error
1555  *   Pointer to error structure.
1556  *
1557  * @return
1558  *   0 on success, a negative errno value otherwise and rte_errno is set.
1559  */
1560 static int
1561 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1562 			      const struct rte_flow_item *item,
1563 			      const struct rte_flow_attr *attr,
1564 			      uint64_t item_flags,
1565 			      struct rte_flow_error *error)
1566 {
1567 	const struct rte_flow_item_port_id *spec = item->spec;
1568 	const struct rte_flow_item_port_id *mask = item->mask;
1569 	const struct rte_flow_item_port_id switch_mask = {
1570 			.id = 0xffffffff,
1571 	};
1572 	struct mlx5_priv *esw_priv;
1573 	struct mlx5_priv *dev_priv;
1574 	int ret;
1575 
1576 	if (!attr->transfer)
1577 		return rte_flow_error_set(error, EINVAL,
1578 					  RTE_FLOW_ERROR_TYPE_ITEM,
1579 					  NULL,
1580 					  "match on port id is valid only"
1581 					  " when transfer flag is enabled");
1582 	if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1583 		return rte_flow_error_set(error, ENOTSUP,
1584 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1585 					  "multiple source ports are not"
1586 					  " supported");
1587 	if (!mask)
1588 		mask = &switch_mask;
1589 	if (mask->id != 0xffffffff)
1590 		return rte_flow_error_set(error, ENOTSUP,
1591 					   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1592 					   mask,
1593 					   "no support for partial mask on"
1594 					   " \"id\" field");
1595 	ret = mlx5_flow_item_acceptable
1596 				(item, (const uint8_t *)mask,
1597 				 (const uint8_t *)&rte_flow_item_port_id_mask,
1598 				 sizeof(struct rte_flow_item_port_id),
1599 				 error);
1600 	if (ret)
1601 		return ret;
1602 	if (!spec)
1603 		return 0;
1604 	esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1605 	if (!esw_priv)
1606 		return rte_flow_error_set(error, rte_errno,
1607 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1608 					  "failed to obtain E-Switch info for"
1609 					  " port");
1610 	dev_priv = mlx5_dev_to_eswitch_info(dev);
1611 	if (!dev_priv)
1612 		return rte_flow_error_set(error, rte_errno,
1613 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1614 					  NULL,
1615 					  "failed to obtain E-Switch info");
1616 	if (esw_priv->domain_id != dev_priv->domain_id)
1617 		return rte_flow_error_set(error, EINVAL,
1618 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1619 					  "cannot match on a port from a"
1620 					  " different E-Switch");
1621 	return 0;
1622 }
1623 
1624 /**
1625  * Validate GTP item.
1626  *
1627  * @param[in] dev
1628  *   Pointer to the rte_eth_dev structure.
1629  * @param[in] item
1630  *   Item specification.
1631  * @param[in] item_flags
1632  *   Bit-fields that holds the items detected until now.
1633  * @param[out] error
1634  *   Pointer to error structure.
1635  *
1636  * @return
1637  *   0 on success, a negative errno value otherwise and rte_errno is set.
1638  */
1639 static int
1640 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1641 			  const struct rte_flow_item *item,
1642 			  uint64_t item_flags,
1643 			  struct rte_flow_error *error)
1644 {
1645 	struct mlx5_priv *priv = dev->data->dev_private;
1646 	const struct rte_flow_item_gtp *mask = item->mask;
1647 	const struct rte_flow_item_gtp nic_mask = {
1648 		.msg_type = 0xff,
1649 		.teid = RTE_BE32(0xffffffff),
1650 	};
1651 
1652 	if (!priv->config.hca_attr.tunnel_stateless_gtp)
1653 		return rte_flow_error_set(error, ENOTSUP,
1654 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1655 					  "GTP support is not enabled");
1656 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1657 		return rte_flow_error_set(error, ENOTSUP,
1658 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1659 					  "multiple tunnel layers not"
1660 					  " supported");
1661 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1662 		return rte_flow_error_set(error, EINVAL,
1663 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1664 					  "no outer UDP layer found");
1665 	if (!mask)
1666 		mask = &rte_flow_item_gtp_mask;
1667 	return mlx5_flow_item_acceptable
1668 		(item, (const uint8_t *)mask,
1669 		 (const uint8_t *)&nic_mask,
1670 		 sizeof(struct rte_flow_item_gtp),
1671 		 error);
1672 }
1673 
1674 /**
1675  * Validate the pop VLAN action.
1676  *
1677  * @param[in] dev
1678  *   Pointer to the rte_eth_dev structure.
1679  * @param[in] action_flags
1680  *   Holds the actions detected until now.
1681  * @param[in] action
1682  *   Pointer to the pop vlan action.
1683  * @param[in] item_flags
1684  *   The items found in this flow rule.
1685  * @param[in] attr
1686  *   Pointer to flow attributes.
1687  * @param[out] error
1688  *   Pointer to error structure.
1689  *
1690  * @return
1691  *   0 on success, a negative errno value otherwise and rte_errno is set.
1692  */
1693 static int
1694 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1695 				 uint64_t action_flags,
1696 				 const struct rte_flow_action *action,
1697 				 uint64_t item_flags,
1698 				 const struct rte_flow_attr *attr,
1699 				 struct rte_flow_error *error)
1700 {
1701 	struct mlx5_priv *priv = dev->data->dev_private;
1702 
1703 	(void)action;
1704 	(void)attr;
1705 	if (!priv->sh->pop_vlan_action)
1706 		return rte_flow_error_set(error, ENOTSUP,
1707 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1708 					  NULL,
1709 					  "pop vlan action is not supported");
1710 	if (attr->egress)
1711 		return rte_flow_error_set(error, ENOTSUP,
1712 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1713 					  NULL,
1714 					  "pop vlan action not supported for "
1715 					  "egress");
1716 	if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1717 		return rte_flow_error_set(error, ENOTSUP,
1718 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1719 					  "no support for multiple VLAN "
1720 					  "actions");
1721 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1722 		return rte_flow_error_set(error, ENOTSUP,
1723 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1724 					  NULL,
1725 					  "cannot pop vlan without a "
1726 					  "match on (outer) vlan in the flow");
1727 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1728 		return rte_flow_error_set(error, EINVAL,
1729 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1730 					  "wrong action order, port_id should "
1731 					  "be after pop VLAN action");
1732 	return 0;
1733 }
1734 
1735 /**
1736  * Get VLAN default info from vlan match info.
1737  *
1738  * @param[in] items
1739  *   the list of item specifications.
1740  * @param[out] vlan
1741  *   pointer VLAN info to fill to.
1742  *
1743  * @return
1744  *   0 on success, a negative errno value otherwise and rte_errno is set.
1745  */
1746 static void
1747 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1748 				  struct rte_vlan_hdr *vlan)
1749 {
1750 	const struct rte_flow_item_vlan nic_mask = {
1751 		.tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1752 				MLX5DV_FLOW_VLAN_VID_MASK),
1753 		.inner_type = RTE_BE16(0xffff),
1754 	};
1755 
1756 	if (items == NULL)
1757 		return;
1758 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1759 		int type = items->type;
1760 
1761 		if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
1762 		    type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
1763 			break;
1764 	}
1765 	if (items->type != RTE_FLOW_ITEM_TYPE_END) {
1766 		const struct rte_flow_item_vlan *vlan_m = items->mask;
1767 		const struct rte_flow_item_vlan *vlan_v = items->spec;
1768 
1769 		if (!vlan_m)
1770 			vlan_m = &nic_mask;
1771 		/* Only full match values are accepted */
1772 		if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1773 		     MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1774 			vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1775 			vlan->vlan_tci |=
1776 				rte_be_to_cpu_16(vlan_v->tci &
1777 						 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1778 		}
1779 		if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1780 		     MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1781 			vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1782 			vlan->vlan_tci |=
1783 				rte_be_to_cpu_16(vlan_v->tci &
1784 						 MLX5DV_FLOW_VLAN_VID_MASK_BE);
1785 		}
1786 		if (vlan_m->inner_type == nic_mask.inner_type)
1787 			vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1788 							   vlan_m->inner_type);
1789 	}
1790 }
1791 
1792 /**
1793  * Validate the push VLAN action.
1794  *
1795  * @param[in] action_flags
1796  *   Holds the actions detected until now.
1797  * @param[in] item_flags
1798  *   The items found in this flow rule.
1799  * @param[in] action
1800  *   Pointer to the action structure.
1801  * @param[in] attr
1802  *   Pointer to flow attributes
1803  * @param[out] error
1804  *   Pointer to error structure.
1805  *
1806  * @return
1807  *   0 on success, a negative errno value otherwise and rte_errno is set.
1808  */
1809 static int
1810 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1811 				  uint64_t item_flags __rte_unused,
1812 				  const struct rte_flow_action *action,
1813 				  const struct rte_flow_attr *attr,
1814 				  struct rte_flow_error *error)
1815 {
1816 	const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1817 
1818 	if (!attr->transfer && attr->ingress)
1819 		return rte_flow_error_set(error, ENOTSUP,
1820 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1821 					  NULL,
1822 					  "push VLAN action not supported for "
1823 					  "ingress");
1824 	if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1825 	    push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1826 		return rte_flow_error_set(error, EINVAL,
1827 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1828 					  "invalid vlan ethertype");
1829 	if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1830 		return rte_flow_error_set(error, ENOTSUP,
1831 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1832 					  "no support for multiple VLAN "
1833 					  "actions");
1834 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1835 		return rte_flow_error_set(error, EINVAL,
1836 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1837 					  "wrong action order, port_id should "
1838 					  "be after push VLAN");
1839 	(void)attr;
1840 	return 0;
1841 }
1842 
1843 /**
1844  * Validate the set VLAN PCP.
1845  *
1846  * @param[in] action_flags
1847  *   Holds the actions detected until now.
1848  * @param[in] actions
1849  *   Pointer to the list of actions remaining in the flow rule.
1850  * @param[out] error
1851  *   Pointer to error structure.
1852  *
1853  * @return
1854  *   0 on success, a negative errno value otherwise and rte_errno is set.
1855  */
1856 static int
1857 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1858 				     const struct rte_flow_action actions[],
1859 				     struct rte_flow_error *error)
1860 {
1861 	const struct rte_flow_action *action = actions;
1862 	const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1863 
1864 	if (conf->vlan_pcp > 7)
1865 		return rte_flow_error_set(error, EINVAL,
1866 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1867 					  "VLAN PCP value is too big");
1868 	if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1869 		return rte_flow_error_set(error, ENOTSUP,
1870 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1871 					  "set VLAN PCP action must follow "
1872 					  "the push VLAN action");
1873 	if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1874 		return rte_flow_error_set(error, ENOTSUP,
1875 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1876 					  "Multiple VLAN PCP modification are "
1877 					  "not supported");
1878 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1879 		return rte_flow_error_set(error, EINVAL,
1880 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1881 					  "wrong action order, port_id should "
1882 					  "be after set VLAN PCP");
1883 	return 0;
1884 }
1885 
1886 /**
1887  * Validate the set VLAN VID.
1888  *
1889  * @param[in] item_flags
1890  *   Holds the items detected in this rule.
1891  * @param[in] action_flags
1892  *   Holds the actions detected until now.
1893  * @param[in] actions
1894  *   Pointer to the list of actions remaining in the flow rule.
1895  * @param[out] error
1896  *   Pointer to error structure.
1897  *
1898  * @return
1899  *   0 on success, a negative errno value otherwise and rte_errno is set.
1900  */
1901 static int
1902 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1903 				     uint64_t action_flags,
1904 				     const struct rte_flow_action actions[],
1905 				     struct rte_flow_error *error)
1906 {
1907 	const struct rte_flow_action *action = actions;
1908 	const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1909 
1910 	if (conf->vlan_vid > RTE_BE16(0xFFE))
1911 		return rte_flow_error_set(error, EINVAL,
1912 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1913 					  "VLAN VID value is too big");
1914 	if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
1915 	    !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1916 		return rte_flow_error_set(error, ENOTSUP,
1917 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1918 					  "set VLAN VID action must follow push"
1919 					  " VLAN action or match on VLAN item");
1920 	if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1921 		return rte_flow_error_set(error, ENOTSUP,
1922 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1923 					  "Multiple VLAN VID modifications are "
1924 					  "not supported");
1925 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1926 		return rte_flow_error_set(error, EINVAL,
1927 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
1928 					  "wrong action order, port_id should "
1929 					  "be after set VLAN VID");
1930 	return 0;
1931 }
1932 
1933 /*
1934  * Validate the FLAG action.
1935  *
1936  * @param[in] dev
1937  *   Pointer to the rte_eth_dev structure.
1938  * @param[in] action_flags
1939  *   Holds the actions detected until now.
1940  * @param[in] attr
1941  *   Pointer to flow attributes
1942  * @param[out] error
1943  *   Pointer to error structure.
1944  *
1945  * @return
1946  *   0 on success, a negative errno value otherwise and rte_errno is set.
1947  */
1948 static int
1949 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
1950 			     uint64_t action_flags,
1951 			     const struct rte_flow_attr *attr,
1952 			     struct rte_flow_error *error)
1953 {
1954 	struct mlx5_priv *priv = dev->data->dev_private;
1955 	struct mlx5_dev_config *config = &priv->config;
1956 	int ret;
1957 
1958 	/* Fall back if no extended metadata register support. */
1959 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1960 		return mlx5_flow_validate_action_flag(action_flags, attr,
1961 						      error);
1962 	/* Extensive metadata mode requires registers. */
1963 	if (!mlx5_flow_ext_mreg_supported(dev))
1964 		return rte_flow_error_set(error, ENOTSUP,
1965 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1966 					  "no metadata registers "
1967 					  "to support flag action");
1968 	if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
1969 		return rte_flow_error_set(error, ENOTSUP,
1970 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1971 					  "extended metadata register"
1972 					  " isn't available");
1973 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1974 	if (ret < 0)
1975 		return ret;
1976 	MLX5_ASSERT(ret > 0);
1977 	if (action_flags & MLX5_FLOW_ACTION_MARK)
1978 		return rte_flow_error_set(error, EINVAL,
1979 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1980 					  "can't mark and flag in same flow");
1981 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
1982 		return rte_flow_error_set(error, EINVAL,
1983 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1984 					  "can't have 2 flag"
1985 					  " actions in same flow");
1986 	return 0;
1987 }
1988 
1989 /**
1990  * Validate MARK action.
1991  *
1992  * @param[in] dev
1993  *   Pointer to the rte_eth_dev structure.
1994  * @param[in] action
1995  *   Pointer to action.
1996  * @param[in] action_flags
1997  *   Holds the actions detected until now.
1998  * @param[in] attr
1999  *   Pointer to flow attributes
2000  * @param[out] error
2001  *   Pointer to error structure.
2002  *
2003  * @return
2004  *   0 on success, a negative errno value otherwise and rte_errno is set.
2005  */
2006 static int
2007 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2008 			     const struct rte_flow_action *action,
2009 			     uint64_t action_flags,
2010 			     const struct rte_flow_attr *attr,
2011 			     struct rte_flow_error *error)
2012 {
2013 	struct mlx5_priv *priv = dev->data->dev_private;
2014 	struct mlx5_dev_config *config = &priv->config;
2015 	const struct rte_flow_action_mark *mark = action->conf;
2016 	int ret;
2017 
2018 	/* Fall back if no extended metadata register support. */
2019 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2020 		return mlx5_flow_validate_action_mark(action, action_flags,
2021 						      attr, error);
2022 	/* Extensive metadata mode requires registers. */
2023 	if (!mlx5_flow_ext_mreg_supported(dev))
2024 		return rte_flow_error_set(error, ENOTSUP,
2025 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2026 					  "no metadata registers "
2027 					  "to support mark action");
2028 	if (!priv->sh->dv_mark_mask)
2029 		return rte_flow_error_set(error, ENOTSUP,
2030 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2031 					  "extended metadata register"
2032 					  " isn't available");
2033 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2034 	if (ret < 0)
2035 		return ret;
2036 	MLX5_ASSERT(ret > 0);
2037 	if (!mark)
2038 		return rte_flow_error_set(error, EINVAL,
2039 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2040 					  "configuration cannot be null");
2041 	if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2042 		return rte_flow_error_set(error, EINVAL,
2043 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2044 					  &mark->id,
2045 					  "mark id exceeds the limit");
2046 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
2047 		return rte_flow_error_set(error, EINVAL,
2048 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2049 					  "can't flag and mark in same flow");
2050 	if (action_flags & MLX5_FLOW_ACTION_MARK)
2051 		return rte_flow_error_set(error, EINVAL,
2052 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2053 					  "can't have 2 mark actions in same"
2054 					  " flow");
2055 	return 0;
2056 }
2057 
2058 /**
2059  * Validate SET_META action.
2060  *
2061  * @param[in] dev
2062  *   Pointer to the rte_eth_dev structure.
2063  * @param[in] action
2064  *   Pointer to the action structure.
2065  * @param[in] action_flags
2066  *   Holds the actions detected until now.
2067  * @param[in] attr
2068  *   Pointer to flow attributes
2069  * @param[out] error
2070  *   Pointer to error structure.
2071  *
2072  * @return
2073  *   0 on success, a negative errno value otherwise and rte_errno is set.
2074  */
2075 static int
2076 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2077 				 const struct rte_flow_action *action,
2078 				 uint64_t action_flags __rte_unused,
2079 				 const struct rte_flow_attr *attr,
2080 				 struct rte_flow_error *error)
2081 {
2082 	const struct rte_flow_action_set_meta *conf;
2083 	uint32_t nic_mask = UINT32_MAX;
2084 	int reg;
2085 
2086 	if (!mlx5_flow_ext_mreg_supported(dev))
2087 		return rte_flow_error_set(error, ENOTSUP,
2088 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2089 					  "extended metadata register"
2090 					  " isn't supported");
2091 	reg = flow_dv_get_metadata_reg(dev, attr, error);
2092 	if (reg < 0)
2093 		return reg;
2094 	if (reg != REG_A && reg != REG_B) {
2095 		struct mlx5_priv *priv = dev->data->dev_private;
2096 
2097 		nic_mask = priv->sh->dv_meta_mask;
2098 	}
2099 	if (!(action->conf))
2100 		return rte_flow_error_set(error, EINVAL,
2101 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2102 					  "configuration cannot be null");
2103 	conf = (const struct rte_flow_action_set_meta *)action->conf;
2104 	if (!conf->mask)
2105 		return rte_flow_error_set(error, EINVAL,
2106 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2107 					  "zero mask doesn't have any effect");
2108 	if (conf->mask & ~nic_mask)
2109 		return rte_flow_error_set(error, EINVAL,
2110 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2111 					  "meta data must be within reg C0");
2112 	if (!(conf->data & conf->mask))
2113 		return rte_flow_error_set(error, EINVAL,
2114 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2115 					  "zero value has no effect");
2116 	return 0;
2117 }
2118 
2119 /**
2120  * Validate SET_TAG action.
2121  *
2122  * @param[in] dev
2123  *   Pointer to the rte_eth_dev structure.
2124  * @param[in] action
2125  *   Pointer to the action structure.
2126  * @param[in] action_flags
2127  *   Holds the actions detected until now.
2128  * @param[in] attr
2129  *   Pointer to flow attributes
2130  * @param[out] error
2131  *   Pointer to error structure.
2132  *
2133  * @return
2134  *   0 on success, a negative errno value otherwise and rte_errno is set.
2135  */
2136 static int
2137 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2138 				const struct rte_flow_action *action,
2139 				uint64_t action_flags,
2140 				const struct rte_flow_attr *attr,
2141 				struct rte_flow_error *error)
2142 {
2143 	const struct rte_flow_action_set_tag *conf;
2144 	const uint64_t terminal_action_flags =
2145 		MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2146 		MLX5_FLOW_ACTION_RSS;
2147 	int ret;
2148 
2149 	if (!mlx5_flow_ext_mreg_supported(dev))
2150 		return rte_flow_error_set(error, ENOTSUP,
2151 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2152 					  "extensive metadata register"
2153 					  " isn't supported");
2154 	if (!(action->conf))
2155 		return rte_flow_error_set(error, EINVAL,
2156 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2157 					  "configuration cannot be null");
2158 	conf = (const struct rte_flow_action_set_tag *)action->conf;
2159 	if (!conf->mask)
2160 		return rte_flow_error_set(error, EINVAL,
2161 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2162 					  "zero mask doesn't have any effect");
2163 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2164 	if (ret < 0)
2165 		return ret;
2166 	if (!attr->transfer && attr->ingress &&
2167 	    (action_flags & terminal_action_flags))
2168 		return rte_flow_error_set(error, EINVAL,
2169 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2170 					  "set_tag has no effect"
2171 					  " with terminal actions");
2172 	return 0;
2173 }
2174 
2175 /**
2176  * Validate count action.
2177  *
2178  * @param[in] dev
2179  *   Pointer to rte_eth_dev structure.
2180  * @param[out] error
2181  *   Pointer to error structure.
2182  *
2183  * @return
2184  *   0 on success, a negative errno value otherwise and rte_errno is set.
2185  */
2186 static int
2187 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2188 			      struct rte_flow_error *error)
2189 {
2190 	struct mlx5_priv *priv = dev->data->dev_private;
2191 
2192 	if (!priv->config.devx)
2193 		goto notsup_err;
2194 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2195 	return 0;
2196 #endif
2197 notsup_err:
2198 	return rte_flow_error_set
2199 		      (error, ENOTSUP,
2200 		       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2201 		       NULL,
2202 		       "count action not supported");
2203 }
2204 
2205 /**
2206  * Validate the L2 encap action.
2207  *
2208  * @param[in] action_flags
2209  *   Holds the actions detected until now.
2210  * @param[in] action
2211  *   Pointer to the action structure.
2212  * @param[out] error
2213  *   Pointer to error structure.
2214  *
2215  * @return
2216  *   0 on success, a negative errno value otherwise and rte_errno is set.
2217  */
2218 static int
2219 flow_dv_validate_action_l2_encap(uint64_t action_flags,
2220 				 const struct rte_flow_action *action,
2221 				 struct rte_flow_error *error)
2222 {
2223 	if (!(action->conf))
2224 		return rte_flow_error_set(error, EINVAL,
2225 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2226 					  "configuration cannot be null");
2227 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2228 		return rte_flow_error_set(error, EINVAL,
2229 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2230 					  "can only have a single encap action "
2231 					  "in a flow");
2232 	return 0;
2233 }
2234 
2235 /**
2236  * Validate a decap action.
2237  *
2238  * @param[in] action_flags
2239  *   Holds the actions detected until now.
2240  * @param[in] attr
2241  *   Pointer to flow attributes
2242  * @param[out] error
2243  *   Pointer to error structure.
2244  *
2245  * @return
2246  *   0 on success, a negative errno value otherwise and rte_errno is set.
2247  */
2248 static int
2249 flow_dv_validate_action_decap(uint64_t action_flags,
2250 				 const struct rte_flow_attr *attr,
2251 				 struct rte_flow_error *error)
2252 {
2253 	if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2254 		return rte_flow_error_set(error, ENOTSUP,
2255 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2256 					  action_flags &
2257 					  MLX5_FLOW_ACTION_DECAP ? "can only "
2258 					  "have a single decap action" : "decap "
2259 					  "after encap is not supported");
2260 	if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2261 		return rte_flow_error_set(error, EINVAL,
2262 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2263 					  "can't have decap action after"
2264 					  " modify action");
2265 	if (attr->egress)
2266 		return rte_flow_error_set(error, ENOTSUP,
2267 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2268 					  NULL,
2269 					  "decap action not supported for "
2270 					  "egress");
2271 	return 0;
2272 }
2273 
2274 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2275 
2276 /**
2277  * Validate the raw encap and decap actions.
2278  *
2279  * @param[in] decap
2280  *   Pointer to the decap action.
2281  * @param[in] encap
2282  *   Pointer to the encap action.
2283  * @param[in] attr
2284  *   Pointer to flow attributes
2285  * @param[in/out] action_flags
2286  *   Holds the actions detected until now.
2287  * @param[out] actions_n
2288  *   pointer to the number of actions counter.
2289  * @param[out] error
2290  *   Pointer to error structure.
2291  *
2292  * @return
2293  *   0 on success, a negative errno value otherwise and rte_errno is set.
2294  */
2295 static int
2296 flow_dv_validate_action_raw_encap_decap
2297 	(const struct rte_flow_action_raw_decap *decap,
2298 	 const struct rte_flow_action_raw_encap *encap,
2299 	 const struct rte_flow_attr *attr, uint64_t *action_flags,
2300 	 int *actions_n, struct rte_flow_error *error)
2301 {
2302 	int ret;
2303 
2304 	if (encap && (!encap->size || !encap->data))
2305 		return rte_flow_error_set(error, EINVAL,
2306 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2307 					  "raw encap data cannot be empty");
2308 	if (decap && encap) {
2309 		if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2310 		    encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2311 			/* L3 encap. */
2312 			decap = NULL;
2313 		else if (encap->size <=
2314 			   MLX5_ENCAPSULATION_DECISION_SIZE &&
2315 			   decap->size >
2316 			   MLX5_ENCAPSULATION_DECISION_SIZE)
2317 			/* L3 decap. */
2318 			encap = NULL;
2319 		else if (encap->size >
2320 			   MLX5_ENCAPSULATION_DECISION_SIZE &&
2321 			   decap->size >
2322 			   MLX5_ENCAPSULATION_DECISION_SIZE)
2323 			/* 2 L2 actions: encap and decap. */
2324 			;
2325 		else
2326 			return rte_flow_error_set(error,
2327 				ENOTSUP,
2328 				RTE_FLOW_ERROR_TYPE_ACTION,
2329 				NULL, "unsupported too small "
2330 				"raw decap and too small raw "
2331 				"encap combination");
2332 	}
2333 	if (decap) {
2334 		ret = flow_dv_validate_action_decap(*action_flags, attr, error);
2335 		if (ret < 0)
2336 			return ret;
2337 		*action_flags |= MLX5_FLOW_ACTION_DECAP;
2338 		++(*actions_n);
2339 	}
2340 	if (encap) {
2341 		if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2342 			return rte_flow_error_set(error, ENOTSUP,
2343 						  RTE_FLOW_ERROR_TYPE_ACTION,
2344 						  NULL,
2345 						  "small raw encap size");
2346 		if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2347 			return rte_flow_error_set(error, EINVAL,
2348 						  RTE_FLOW_ERROR_TYPE_ACTION,
2349 						  NULL,
2350 						  "more than one encap action");
2351 		*action_flags |= MLX5_FLOW_ACTION_ENCAP;
2352 		++(*actions_n);
2353 	}
2354 	return 0;
2355 }
2356 
2357 /**
2358  * Find existing encap/decap resource or create and register a new one.
2359  *
2360  * @param[in, out] dev
2361  *   Pointer to rte_eth_dev structure.
2362  * @param[in, out] resource
2363  *   Pointer to encap/decap resource.
2364  * @parm[in, out] dev_flow
2365  *   Pointer to the dev_flow.
2366  * @param[out] error
2367  *   pointer to error structure.
2368  *
2369  * @return
2370  *   0 on success otherwise -errno and errno is set.
2371  */
2372 static int
2373 flow_dv_encap_decap_resource_register
2374 			(struct rte_eth_dev *dev,
2375 			 struct mlx5_flow_dv_encap_decap_resource *resource,
2376 			 struct mlx5_flow *dev_flow,
2377 			 struct rte_flow_error *error)
2378 {
2379 	struct mlx5_priv *priv = dev->data->dev_private;
2380 	struct mlx5_ibv_shared *sh = priv->sh;
2381 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2382 	struct mlx5dv_dr_domain *domain;
2383 
2384 	resource->flags = dev_flow->group ? 0 : 1;
2385 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2386 		domain = sh->fdb_domain;
2387 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2388 		domain = sh->rx_domain;
2389 	else
2390 		domain = sh->tx_domain;
2391 	/* Lookup a matching resource from cache. */
2392 	LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
2393 		if (resource->reformat_type == cache_resource->reformat_type &&
2394 		    resource->ft_type == cache_resource->ft_type &&
2395 		    resource->flags == cache_resource->flags &&
2396 		    resource->size == cache_resource->size &&
2397 		    !memcmp((const void *)resource->buf,
2398 			    (const void *)cache_resource->buf,
2399 			    resource->size)) {
2400 			DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2401 				(void *)cache_resource,
2402 				rte_atomic32_read(&cache_resource->refcnt));
2403 			rte_atomic32_inc(&cache_resource->refcnt);
2404 			dev_flow->dv.encap_decap = cache_resource;
2405 			return 0;
2406 		}
2407 	}
2408 	/* Register new encap/decap resource. */
2409 	cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2410 	if (!cache_resource)
2411 		return rte_flow_error_set(error, ENOMEM,
2412 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2413 					  "cannot allocate resource memory");
2414 	*cache_resource = *resource;
2415 	cache_resource->verbs_action =
2416 		mlx5_glue->dv_create_flow_action_packet_reformat
2417 			(sh->ctx, cache_resource->reformat_type,
2418 			 cache_resource->ft_type, domain, cache_resource->flags,
2419 			 cache_resource->size,
2420 			 (cache_resource->size ? cache_resource->buf : NULL));
2421 	if (!cache_resource->verbs_action) {
2422 		rte_free(cache_resource);
2423 		return rte_flow_error_set(error, ENOMEM,
2424 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2425 					  NULL, "cannot create action");
2426 	}
2427 	rte_atomic32_init(&cache_resource->refcnt);
2428 	rte_atomic32_inc(&cache_resource->refcnt);
2429 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
2430 	dev_flow->dv.encap_decap = cache_resource;
2431 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2432 		(void *)cache_resource,
2433 		rte_atomic32_read(&cache_resource->refcnt));
2434 	return 0;
2435 }
2436 
2437 /**
2438  * Find existing table jump resource or create and register a new one.
2439  *
2440  * @param[in, out] dev
2441  *   Pointer to rte_eth_dev structure.
2442  * @param[in, out] tbl
2443  *   Pointer to flow table resource.
2444  * @parm[in, out] dev_flow
2445  *   Pointer to the dev_flow.
2446  * @param[out] error
2447  *   pointer to error structure.
2448  *
2449  * @return
2450  *   0 on success otherwise -errno and errno is set.
2451  */
2452 static int
2453 flow_dv_jump_tbl_resource_register
2454 			(struct rte_eth_dev *dev __rte_unused,
2455 			 struct mlx5_flow_tbl_resource *tbl,
2456 			 struct mlx5_flow *dev_flow,
2457 			 struct rte_flow_error *error)
2458 {
2459 	struct mlx5_flow_tbl_data_entry *tbl_data =
2460 		container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2461 	int cnt;
2462 
2463 	MLX5_ASSERT(tbl);
2464 	cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2465 	if (!cnt) {
2466 		tbl_data->jump.action =
2467 			mlx5_glue->dr_create_flow_action_dest_flow_tbl
2468 			(tbl->obj);
2469 		if (!tbl_data->jump.action)
2470 			return rte_flow_error_set(error, ENOMEM,
2471 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2472 					NULL, "cannot create jump action");
2473 		DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2474 			(void *)&tbl_data->jump, cnt);
2475 	} else {
2476 		MLX5_ASSERT(tbl_data->jump.action);
2477 		DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2478 			(void *)&tbl_data->jump, cnt);
2479 	}
2480 	rte_atomic32_inc(&tbl_data->jump.refcnt);
2481 	dev_flow->dv.jump = &tbl_data->jump;
2482 	return 0;
2483 }
2484 
2485 /**
2486  * Find existing table port ID resource or create and register a new one.
2487  *
2488  * @param[in, out] dev
2489  *   Pointer to rte_eth_dev structure.
2490  * @param[in, out] resource
2491  *   Pointer to port ID action resource.
2492  * @parm[in, out] dev_flow
2493  *   Pointer to the dev_flow.
2494  * @param[out] error
2495  *   pointer to error structure.
2496  *
2497  * @return
2498  *   0 on success otherwise -errno and errno is set.
2499  */
2500 static int
2501 flow_dv_port_id_action_resource_register
2502 			(struct rte_eth_dev *dev,
2503 			 struct mlx5_flow_dv_port_id_action_resource *resource,
2504 			 struct mlx5_flow *dev_flow,
2505 			 struct rte_flow_error *error)
2506 {
2507 	struct mlx5_priv *priv = dev->data->dev_private;
2508 	struct mlx5_ibv_shared *sh = priv->sh;
2509 	struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2510 
2511 	/* Lookup a matching resource from cache. */
2512 	LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
2513 		if (resource->port_id == cache_resource->port_id) {
2514 			DRV_LOG(DEBUG, "port id action resource resource %p: "
2515 				"refcnt %d++",
2516 				(void *)cache_resource,
2517 				rte_atomic32_read(&cache_resource->refcnt));
2518 			rte_atomic32_inc(&cache_resource->refcnt);
2519 			dev_flow->dv.port_id_action = cache_resource;
2520 			return 0;
2521 		}
2522 	}
2523 	/* Register new port id action resource. */
2524 	cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2525 	if (!cache_resource)
2526 		return rte_flow_error_set(error, ENOMEM,
2527 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2528 					  "cannot allocate resource memory");
2529 	*cache_resource = *resource;
2530 	/*
2531 	 * Depending on rdma_core version the glue routine calls
2532 	 * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
2533 	 * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
2534 	 */
2535 	cache_resource->action =
2536 		mlx5_glue->dr_create_flow_action_dest_port
2537 			(priv->sh->fdb_domain, resource->port_id);
2538 	if (!cache_resource->action) {
2539 		rte_free(cache_resource);
2540 		return rte_flow_error_set(error, ENOMEM,
2541 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2542 					  NULL, "cannot create action");
2543 	}
2544 	rte_atomic32_init(&cache_resource->refcnt);
2545 	rte_atomic32_inc(&cache_resource->refcnt);
2546 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
2547 	dev_flow->dv.port_id_action = cache_resource;
2548 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2549 		(void *)cache_resource,
2550 		rte_atomic32_read(&cache_resource->refcnt));
2551 	return 0;
2552 }
2553 
2554 /**
2555  * Find existing push vlan resource or create and register a new one.
2556  *
2557  * @param [in, out] dev
2558  *   Pointer to rte_eth_dev structure.
2559  * @param[in, out] resource
2560  *   Pointer to port ID action resource.
2561  * @parm[in, out] dev_flow
2562  *   Pointer to the dev_flow.
2563  * @param[out] error
2564  *   pointer to error structure.
2565  *
2566  * @return
2567  *   0 on success otherwise -errno and errno is set.
2568  */
2569 static int
2570 flow_dv_push_vlan_action_resource_register
2571 		       (struct rte_eth_dev *dev,
2572 			struct mlx5_flow_dv_push_vlan_action_resource *resource,
2573 			struct mlx5_flow *dev_flow,
2574 			struct rte_flow_error *error)
2575 {
2576 	struct mlx5_priv *priv = dev->data->dev_private;
2577 	struct mlx5_ibv_shared *sh = priv->sh;
2578 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2579 	struct mlx5dv_dr_domain *domain;
2580 
2581 	/* Lookup a matching resource from cache. */
2582 	LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
2583 		if (resource->vlan_tag == cache_resource->vlan_tag &&
2584 		    resource->ft_type == cache_resource->ft_type) {
2585 			DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2586 				"refcnt %d++",
2587 				(void *)cache_resource,
2588 				rte_atomic32_read(&cache_resource->refcnt));
2589 			rte_atomic32_inc(&cache_resource->refcnt);
2590 			dev_flow->dv.push_vlan_res = cache_resource;
2591 			return 0;
2592 		}
2593 	}
2594 	/* Register new push_vlan action resource. */
2595 	cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2596 	if (!cache_resource)
2597 		return rte_flow_error_set(error, ENOMEM,
2598 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2599 					  "cannot allocate resource memory");
2600 	*cache_resource = *resource;
2601 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2602 		domain = sh->fdb_domain;
2603 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2604 		domain = sh->rx_domain;
2605 	else
2606 		domain = sh->tx_domain;
2607 	cache_resource->action =
2608 		mlx5_glue->dr_create_flow_action_push_vlan(domain,
2609 							   resource->vlan_tag);
2610 	if (!cache_resource->action) {
2611 		rte_free(cache_resource);
2612 		return rte_flow_error_set(error, ENOMEM,
2613 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2614 					  NULL, "cannot create action");
2615 	}
2616 	rte_atomic32_init(&cache_resource->refcnt);
2617 	rte_atomic32_inc(&cache_resource->refcnt);
2618 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
2619 	dev_flow->dv.push_vlan_res = cache_resource;
2620 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2621 		(void *)cache_resource,
2622 		rte_atomic32_read(&cache_resource->refcnt));
2623 	return 0;
2624 }
2625 /**
2626  * Get the size of specific rte_flow_item_type
2627  *
2628  * @param[in] item_type
2629  *   Tested rte_flow_item_type.
2630  *
2631  * @return
2632  *   sizeof struct item_type, 0 if void or irrelevant.
2633  */
2634 static size_t
2635 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2636 {
2637 	size_t retval;
2638 
2639 	switch (item_type) {
2640 	case RTE_FLOW_ITEM_TYPE_ETH:
2641 		retval = sizeof(struct rte_flow_item_eth);
2642 		break;
2643 	case RTE_FLOW_ITEM_TYPE_VLAN:
2644 		retval = sizeof(struct rte_flow_item_vlan);
2645 		break;
2646 	case RTE_FLOW_ITEM_TYPE_IPV4:
2647 		retval = sizeof(struct rte_flow_item_ipv4);
2648 		break;
2649 	case RTE_FLOW_ITEM_TYPE_IPV6:
2650 		retval = sizeof(struct rte_flow_item_ipv6);
2651 		break;
2652 	case RTE_FLOW_ITEM_TYPE_UDP:
2653 		retval = sizeof(struct rte_flow_item_udp);
2654 		break;
2655 	case RTE_FLOW_ITEM_TYPE_TCP:
2656 		retval = sizeof(struct rte_flow_item_tcp);
2657 		break;
2658 	case RTE_FLOW_ITEM_TYPE_VXLAN:
2659 		retval = sizeof(struct rte_flow_item_vxlan);
2660 		break;
2661 	case RTE_FLOW_ITEM_TYPE_GRE:
2662 		retval = sizeof(struct rte_flow_item_gre);
2663 		break;
2664 	case RTE_FLOW_ITEM_TYPE_NVGRE:
2665 		retval = sizeof(struct rte_flow_item_nvgre);
2666 		break;
2667 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2668 		retval = sizeof(struct rte_flow_item_vxlan_gpe);
2669 		break;
2670 	case RTE_FLOW_ITEM_TYPE_MPLS:
2671 		retval = sizeof(struct rte_flow_item_mpls);
2672 		break;
2673 	case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2674 	default:
2675 		retval = 0;
2676 		break;
2677 	}
2678 	return retval;
2679 }
2680 
2681 #define MLX5_ENCAP_IPV4_VERSION		0x40
2682 #define MLX5_ENCAP_IPV4_IHL_MIN		0x05
2683 #define MLX5_ENCAP_IPV4_TTL_DEF		0x40
2684 #define MLX5_ENCAP_IPV6_VTC_FLOW	0x60000000
2685 #define MLX5_ENCAP_IPV6_HOP_LIMIT	0xff
2686 #define MLX5_ENCAP_VXLAN_FLAGS		0x08000000
2687 #define MLX5_ENCAP_VXLAN_GPE_FLAGS	0x04
2688 
2689 /**
2690  * Convert the encap action data from list of rte_flow_item to raw buffer
2691  *
2692  * @param[in] items
2693  *   Pointer to rte_flow_item objects list.
2694  * @param[out] buf
2695  *   Pointer to the output buffer.
2696  * @param[out] size
2697  *   Pointer to the output buffer size.
2698  * @param[out] error
2699  *   Pointer to the error structure.
2700  *
2701  * @return
2702  *   0 on success, a negative errno value otherwise and rte_errno is set.
2703  */
2704 static int
2705 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2706 			   size_t *size, struct rte_flow_error *error)
2707 {
2708 	struct rte_ether_hdr *eth = NULL;
2709 	struct rte_vlan_hdr *vlan = NULL;
2710 	struct rte_ipv4_hdr *ipv4 = NULL;
2711 	struct rte_ipv6_hdr *ipv6 = NULL;
2712 	struct rte_udp_hdr *udp = NULL;
2713 	struct rte_vxlan_hdr *vxlan = NULL;
2714 	struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2715 	struct rte_gre_hdr *gre = NULL;
2716 	size_t len;
2717 	size_t temp_size = 0;
2718 
2719 	if (!items)
2720 		return rte_flow_error_set(error, EINVAL,
2721 					  RTE_FLOW_ERROR_TYPE_ACTION,
2722 					  NULL, "invalid empty data");
2723 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2724 		len = flow_dv_get_item_len(items->type);
2725 		if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2726 			return rte_flow_error_set(error, EINVAL,
2727 						  RTE_FLOW_ERROR_TYPE_ACTION,
2728 						  (void *)items->type,
2729 						  "items total size is too big"
2730 						  " for encap action");
2731 		rte_memcpy((void *)&buf[temp_size], items->spec, len);
2732 		switch (items->type) {
2733 		case RTE_FLOW_ITEM_TYPE_ETH:
2734 			eth = (struct rte_ether_hdr *)&buf[temp_size];
2735 			break;
2736 		case RTE_FLOW_ITEM_TYPE_VLAN:
2737 			vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2738 			if (!eth)
2739 				return rte_flow_error_set(error, EINVAL,
2740 						RTE_FLOW_ERROR_TYPE_ACTION,
2741 						(void *)items->type,
2742 						"eth header not found");
2743 			if (!eth->ether_type)
2744 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2745 			break;
2746 		case RTE_FLOW_ITEM_TYPE_IPV4:
2747 			ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2748 			if (!vlan && !eth)
2749 				return rte_flow_error_set(error, EINVAL,
2750 						RTE_FLOW_ERROR_TYPE_ACTION,
2751 						(void *)items->type,
2752 						"neither eth nor vlan"
2753 						" header found");
2754 			if (vlan && !vlan->eth_proto)
2755 				vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2756 			else if (eth && !eth->ether_type)
2757 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2758 			if (!ipv4->version_ihl)
2759 				ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
2760 						    MLX5_ENCAP_IPV4_IHL_MIN;
2761 			if (!ipv4->time_to_live)
2762 				ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
2763 			break;
2764 		case RTE_FLOW_ITEM_TYPE_IPV6:
2765 			ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
2766 			if (!vlan && !eth)
2767 				return rte_flow_error_set(error, EINVAL,
2768 						RTE_FLOW_ERROR_TYPE_ACTION,
2769 						(void *)items->type,
2770 						"neither eth nor vlan"
2771 						" header found");
2772 			if (vlan && !vlan->eth_proto)
2773 				vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2774 			else if (eth && !eth->ether_type)
2775 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2776 			if (!ipv6->vtc_flow)
2777 				ipv6->vtc_flow =
2778 					RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
2779 			if (!ipv6->hop_limits)
2780 				ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
2781 			break;
2782 		case RTE_FLOW_ITEM_TYPE_UDP:
2783 			udp = (struct rte_udp_hdr *)&buf[temp_size];
2784 			if (!ipv4 && !ipv6)
2785 				return rte_flow_error_set(error, EINVAL,
2786 						RTE_FLOW_ERROR_TYPE_ACTION,
2787 						(void *)items->type,
2788 						"ip header not found");
2789 			if (ipv4 && !ipv4->next_proto_id)
2790 				ipv4->next_proto_id = IPPROTO_UDP;
2791 			else if (ipv6 && !ipv6->proto)
2792 				ipv6->proto = IPPROTO_UDP;
2793 			break;
2794 		case RTE_FLOW_ITEM_TYPE_VXLAN:
2795 			vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
2796 			if (!udp)
2797 				return rte_flow_error_set(error, EINVAL,
2798 						RTE_FLOW_ERROR_TYPE_ACTION,
2799 						(void *)items->type,
2800 						"udp header not found");
2801 			if (!udp->dst_port)
2802 				udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
2803 			if (!vxlan->vx_flags)
2804 				vxlan->vx_flags =
2805 					RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
2806 			break;
2807 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2808 			vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
2809 			if (!udp)
2810 				return rte_flow_error_set(error, EINVAL,
2811 						RTE_FLOW_ERROR_TYPE_ACTION,
2812 						(void *)items->type,
2813 						"udp header not found");
2814 			if (!vxlan_gpe->proto)
2815 				return rte_flow_error_set(error, EINVAL,
2816 						RTE_FLOW_ERROR_TYPE_ACTION,
2817 						(void *)items->type,
2818 						"next protocol not found");
2819 			if (!udp->dst_port)
2820 				udp->dst_port =
2821 					RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
2822 			if (!vxlan_gpe->vx_flags)
2823 				vxlan_gpe->vx_flags =
2824 						MLX5_ENCAP_VXLAN_GPE_FLAGS;
2825 			break;
2826 		case RTE_FLOW_ITEM_TYPE_GRE:
2827 		case RTE_FLOW_ITEM_TYPE_NVGRE:
2828 			gre = (struct rte_gre_hdr *)&buf[temp_size];
2829 			if (!gre->proto)
2830 				return rte_flow_error_set(error, EINVAL,
2831 						RTE_FLOW_ERROR_TYPE_ACTION,
2832 						(void *)items->type,
2833 						"next protocol not found");
2834 			if (!ipv4 && !ipv6)
2835 				return rte_flow_error_set(error, EINVAL,
2836 						RTE_FLOW_ERROR_TYPE_ACTION,
2837 						(void *)items->type,
2838 						"ip header not found");
2839 			if (ipv4 && !ipv4->next_proto_id)
2840 				ipv4->next_proto_id = IPPROTO_GRE;
2841 			else if (ipv6 && !ipv6->proto)
2842 				ipv6->proto = IPPROTO_GRE;
2843 			break;
2844 		case RTE_FLOW_ITEM_TYPE_VOID:
2845 			break;
2846 		default:
2847 			return rte_flow_error_set(error, EINVAL,
2848 						  RTE_FLOW_ERROR_TYPE_ACTION,
2849 						  (void *)items->type,
2850 						  "unsupported item type");
2851 			break;
2852 		}
2853 		temp_size += len;
2854 	}
2855 	*size = temp_size;
2856 	return 0;
2857 }
2858 
2859 static int
2860 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
2861 {
2862 	struct rte_ether_hdr *eth = NULL;
2863 	struct rte_vlan_hdr *vlan = NULL;
2864 	struct rte_ipv6_hdr *ipv6 = NULL;
2865 	struct rte_udp_hdr *udp = NULL;
2866 	char *next_hdr;
2867 	uint16_t proto;
2868 
2869 	eth = (struct rte_ether_hdr *)data;
2870 	next_hdr = (char *)(eth + 1);
2871 	proto = RTE_BE16(eth->ether_type);
2872 
2873 	/* VLAN skipping */
2874 	while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
2875 		vlan = (struct rte_vlan_hdr *)next_hdr;
2876 		proto = RTE_BE16(vlan->eth_proto);
2877 		next_hdr += sizeof(struct rte_vlan_hdr);
2878 	}
2879 
2880 	/* HW calculates IPv4 csum. no need to proceed */
2881 	if (proto == RTE_ETHER_TYPE_IPV4)
2882 		return 0;
2883 
2884 	/* non IPv4/IPv6 header. not supported */
2885 	if (proto != RTE_ETHER_TYPE_IPV6) {
2886 		return rte_flow_error_set(error, ENOTSUP,
2887 					  RTE_FLOW_ERROR_TYPE_ACTION,
2888 					  NULL, "Cannot offload non IPv4/IPv6");
2889 	}
2890 
2891 	ipv6 = (struct rte_ipv6_hdr *)next_hdr;
2892 
2893 	/* ignore non UDP */
2894 	if (ipv6->proto != IPPROTO_UDP)
2895 		return 0;
2896 
2897 	udp = (struct rte_udp_hdr *)(ipv6 + 1);
2898 	udp->dgram_cksum = 0;
2899 
2900 	return 0;
2901 }
2902 
2903 /**
2904  * Convert L2 encap action to DV specification.
2905  *
2906  * @param[in] dev
2907  *   Pointer to rte_eth_dev structure.
2908  * @param[in] action
2909  *   Pointer to action structure.
2910  * @param[in, out] dev_flow
2911  *   Pointer to the mlx5_flow.
2912  * @param[in] transfer
2913  *   Mark if the flow is E-Switch flow.
2914  * @param[out] error
2915  *   Pointer to the error structure.
2916  *
2917  * @return
2918  *   0 on success, a negative errno value otherwise and rte_errno is set.
2919  */
2920 static int
2921 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2922 			       const struct rte_flow_action *action,
2923 			       struct mlx5_flow *dev_flow,
2924 			       uint8_t transfer,
2925 			       struct rte_flow_error *error)
2926 {
2927 	const struct rte_flow_item *encap_data;
2928 	const struct rte_flow_action_raw_encap *raw_encap_data;
2929 	struct mlx5_flow_dv_encap_decap_resource res = {
2930 		.reformat_type =
2931 			MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2932 		.ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2933 				      MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2934 	};
2935 
2936 	if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2937 		raw_encap_data =
2938 			(const struct rte_flow_action_raw_encap *)action->conf;
2939 		res.size = raw_encap_data->size;
2940 		memcpy(res.buf, raw_encap_data->data, res.size);
2941 	} else {
2942 		if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2943 			encap_data =
2944 				((const struct rte_flow_action_vxlan_encap *)
2945 						action->conf)->definition;
2946 		else
2947 			encap_data =
2948 				((const struct rte_flow_action_nvgre_encap *)
2949 						action->conf)->definition;
2950 		if (flow_dv_convert_encap_data(encap_data, res.buf,
2951 					       &res.size, error))
2952 			return -rte_errno;
2953 	}
2954 	if (flow_dv_zero_encap_udp_csum(res.buf, error))
2955 		return -rte_errno;
2956 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2957 		return rte_flow_error_set(error, EINVAL,
2958 					  RTE_FLOW_ERROR_TYPE_ACTION,
2959 					  NULL, "can't create L2 encap action");
2960 	return 0;
2961 }
2962 
2963 /**
2964  * Convert L2 decap action to DV specification.
2965  *
2966  * @param[in] dev
2967  *   Pointer to rte_eth_dev structure.
2968  * @param[in, out] dev_flow
2969  *   Pointer to the mlx5_flow.
2970  * @param[in] transfer
2971  *   Mark if the flow is E-Switch flow.
2972  * @param[out] error
2973  *   Pointer to the error structure.
2974  *
2975  * @return
2976  *   0 on success, a negative errno value otherwise and rte_errno is set.
2977  */
2978 static int
2979 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2980 			       struct mlx5_flow *dev_flow,
2981 			       uint8_t transfer,
2982 			       struct rte_flow_error *error)
2983 {
2984 	struct mlx5_flow_dv_encap_decap_resource res = {
2985 		.size = 0,
2986 		.reformat_type =
2987 			MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2988 		.ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2989 				      MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2990 	};
2991 
2992 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2993 		return rte_flow_error_set(error, EINVAL,
2994 					  RTE_FLOW_ERROR_TYPE_ACTION,
2995 					  NULL, "can't create L2 decap action");
2996 	return 0;
2997 }
2998 
2999 /**
3000  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3001  *
3002  * @param[in] dev
3003  *   Pointer to rte_eth_dev structure.
3004  * @param[in] action
3005  *   Pointer to action structure.
3006  * @param[in, out] dev_flow
3007  *   Pointer to the mlx5_flow.
3008  * @param[in] attr
3009  *   Pointer to the flow attributes.
3010  * @param[out] error
3011  *   Pointer to the error structure.
3012  *
3013  * @return
3014  *   0 on success, a negative errno value otherwise and rte_errno is set.
3015  */
3016 static int
3017 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3018 				const struct rte_flow_action *action,
3019 				struct mlx5_flow *dev_flow,
3020 				const struct rte_flow_attr *attr,
3021 				struct rte_flow_error *error)
3022 {
3023 	const struct rte_flow_action_raw_encap *encap_data;
3024 	struct mlx5_flow_dv_encap_decap_resource res;
3025 
3026 	encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3027 	res.size = encap_data->size;
3028 	memcpy(res.buf, encap_data->data, res.size);
3029 	res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3030 		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3031 		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3032 	if (attr->transfer)
3033 		res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3034 	else
3035 		res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3036 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3037 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3038 		return rte_flow_error_set(error, EINVAL,
3039 					  RTE_FLOW_ERROR_TYPE_ACTION,
3040 					  NULL, "can't create encap action");
3041 	return 0;
3042 }
3043 
3044 /**
3045  * Create action push VLAN.
3046  *
3047  * @param[in] dev
3048  *   Pointer to rte_eth_dev structure.
3049  * @param[in] attr
3050  *   Pointer to the flow attributes.
3051  * @param[in] vlan
3052  *   Pointer to the vlan to push to the Ethernet header.
3053  * @param[in, out] dev_flow
3054  *   Pointer to the mlx5_flow.
3055  * @param[out] error
3056  *   Pointer to the error structure.
3057  *
3058  * @return
3059  *   0 on success, a negative errno value otherwise and rte_errno is set.
3060  */
3061 static int
3062 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3063 				const struct rte_flow_attr *attr,
3064 				const struct rte_vlan_hdr *vlan,
3065 				struct mlx5_flow *dev_flow,
3066 				struct rte_flow_error *error)
3067 {
3068 	struct mlx5_flow_dv_push_vlan_action_resource res;
3069 
3070 	res.vlan_tag =
3071 		rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3072 				 vlan->vlan_tci);
3073 	if (attr->transfer)
3074 		res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3075 	else
3076 		res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3077 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3078 	return flow_dv_push_vlan_action_resource_register
3079 					    (dev, &res, dev_flow, error);
3080 }
3081 
3082 /**
3083  * Validate the modify-header actions.
3084  *
3085  * @param[in] action_flags
3086  *   Holds the actions detected until now.
3087  * @param[in] action
3088  *   Pointer to the modify action.
3089  * @param[out] error
3090  *   Pointer to error structure.
3091  *
3092  * @return
3093  *   0 on success, a negative errno value otherwise and rte_errno is set.
3094  */
3095 static int
3096 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3097 				   const struct rte_flow_action *action,
3098 				   struct rte_flow_error *error)
3099 {
3100 	if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3101 		return rte_flow_error_set(error, EINVAL,
3102 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3103 					  NULL, "action configuration not set");
3104 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3105 		return rte_flow_error_set(error, EINVAL,
3106 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3107 					  "can't have encap action before"
3108 					  " modify action");
3109 	return 0;
3110 }
3111 
3112 /**
3113  * Validate the modify-header MAC address actions.
3114  *
3115  * @param[in] action_flags
3116  *   Holds the actions detected until now.
3117  * @param[in] action
3118  *   Pointer to the modify action.
3119  * @param[in] item_flags
3120  *   Holds the items detected.
3121  * @param[out] error
3122  *   Pointer to error structure.
3123  *
3124  * @return
3125  *   0 on success, a negative errno value otherwise and rte_errno is set.
3126  */
3127 static int
3128 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3129 				   const struct rte_flow_action *action,
3130 				   const uint64_t item_flags,
3131 				   struct rte_flow_error *error)
3132 {
3133 	int ret = 0;
3134 
3135 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3136 	if (!ret) {
3137 		if (!(item_flags & MLX5_FLOW_LAYER_L2))
3138 			return rte_flow_error_set(error, EINVAL,
3139 						  RTE_FLOW_ERROR_TYPE_ACTION,
3140 						  NULL,
3141 						  "no L2 item in pattern");
3142 	}
3143 	return ret;
3144 }
3145 
3146 /**
3147  * Validate the modify-header IPv4 address actions.
3148  *
3149  * @param[in] action_flags
3150  *   Holds the actions detected until now.
3151  * @param[in] action
3152  *   Pointer to the modify action.
3153  * @param[in] item_flags
3154  *   Holds the items detected.
3155  * @param[out] error
3156  *   Pointer to error structure.
3157  *
3158  * @return
3159  *   0 on success, a negative errno value otherwise and rte_errno is set.
3160  */
3161 static int
3162 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3163 				    const struct rte_flow_action *action,
3164 				    const uint64_t item_flags,
3165 				    struct rte_flow_error *error)
3166 {
3167 	int ret = 0;
3168 	uint64_t layer;
3169 
3170 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3171 	if (!ret) {
3172 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3173 				 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3174 				 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3175 		if (!(item_flags & layer))
3176 			return rte_flow_error_set(error, EINVAL,
3177 						  RTE_FLOW_ERROR_TYPE_ACTION,
3178 						  NULL,
3179 						  "no ipv4 item in pattern");
3180 	}
3181 	return ret;
3182 }
3183 
3184 /**
3185  * Validate the modify-header IPv6 address actions.
3186  *
3187  * @param[in] action_flags
3188  *   Holds the actions detected until now.
3189  * @param[in] action
3190  *   Pointer to the modify action.
3191  * @param[in] item_flags
3192  *   Holds the items detected.
3193  * @param[out] error
3194  *   Pointer to error structure.
3195  *
3196  * @return
3197  *   0 on success, a negative errno value otherwise and rte_errno is set.
3198  */
3199 static int
3200 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3201 				    const struct rte_flow_action *action,
3202 				    const uint64_t item_flags,
3203 				    struct rte_flow_error *error)
3204 {
3205 	int ret = 0;
3206 	uint64_t layer;
3207 
3208 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3209 	if (!ret) {
3210 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3211 				 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3212 				 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3213 		if (!(item_flags & layer))
3214 			return rte_flow_error_set(error, EINVAL,
3215 						  RTE_FLOW_ERROR_TYPE_ACTION,
3216 						  NULL,
3217 						  "no ipv6 item in pattern");
3218 	}
3219 	return ret;
3220 }
3221 
3222 /**
3223  * Validate the modify-header TP actions.
3224  *
3225  * @param[in] action_flags
3226  *   Holds the actions detected until now.
3227  * @param[in] action
3228  *   Pointer to the modify action.
3229  * @param[in] item_flags
3230  *   Holds the items detected.
3231  * @param[out] error
3232  *   Pointer to error structure.
3233  *
3234  * @return
3235  *   0 on success, a negative errno value otherwise and rte_errno is set.
3236  */
3237 static int
3238 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3239 				  const struct rte_flow_action *action,
3240 				  const uint64_t item_flags,
3241 				  struct rte_flow_error *error)
3242 {
3243 	int ret = 0;
3244 	uint64_t layer;
3245 
3246 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3247 	if (!ret) {
3248 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3249 				 MLX5_FLOW_LAYER_INNER_L4 :
3250 				 MLX5_FLOW_LAYER_OUTER_L4;
3251 		if (!(item_flags & layer))
3252 			return rte_flow_error_set(error, EINVAL,
3253 						  RTE_FLOW_ERROR_TYPE_ACTION,
3254 						  NULL, "no transport layer "
3255 						  "in pattern");
3256 	}
3257 	return ret;
3258 }
3259 
3260 /**
3261  * Validate the modify-header actions of increment/decrement
3262  * TCP Sequence-number.
3263  *
3264  * @param[in] action_flags
3265  *   Holds the actions detected until now.
3266  * @param[in] action
3267  *   Pointer to the modify action.
3268  * @param[in] item_flags
3269  *   Holds the items detected.
3270  * @param[out] error
3271  *   Pointer to error structure.
3272  *
3273  * @return
3274  *   0 on success, a negative errno value otherwise and rte_errno is set.
3275  */
3276 static int
3277 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3278 				       const struct rte_flow_action *action,
3279 				       const uint64_t item_flags,
3280 				       struct rte_flow_error *error)
3281 {
3282 	int ret = 0;
3283 	uint64_t layer;
3284 
3285 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3286 	if (!ret) {
3287 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3288 				 MLX5_FLOW_LAYER_INNER_L4_TCP :
3289 				 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3290 		if (!(item_flags & layer))
3291 			return rte_flow_error_set(error, EINVAL,
3292 						  RTE_FLOW_ERROR_TYPE_ACTION,
3293 						  NULL, "no TCP item in"
3294 						  " pattern");
3295 		if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3296 			(action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3297 		    (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3298 			(action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3299 			return rte_flow_error_set(error, EINVAL,
3300 						  RTE_FLOW_ERROR_TYPE_ACTION,
3301 						  NULL,
3302 						  "cannot decrease and increase"
3303 						  " TCP sequence number"
3304 						  " at the same time");
3305 	}
3306 	return ret;
3307 }
3308 
3309 /**
3310  * Validate the modify-header actions of increment/decrement
3311  * TCP Acknowledgment number.
3312  *
3313  * @param[in] action_flags
3314  *   Holds the actions detected until now.
3315  * @param[in] action
3316  *   Pointer to the modify action.
3317  * @param[in] item_flags
3318  *   Holds the items detected.
3319  * @param[out] error
3320  *   Pointer to error structure.
3321  *
3322  * @return
3323  *   0 on success, a negative errno value otherwise and rte_errno is set.
3324  */
3325 static int
3326 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3327 				       const struct rte_flow_action *action,
3328 				       const uint64_t item_flags,
3329 				       struct rte_flow_error *error)
3330 {
3331 	int ret = 0;
3332 	uint64_t layer;
3333 
3334 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3335 	if (!ret) {
3336 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3337 				 MLX5_FLOW_LAYER_INNER_L4_TCP :
3338 				 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3339 		if (!(item_flags & layer))
3340 			return rte_flow_error_set(error, EINVAL,
3341 						  RTE_FLOW_ERROR_TYPE_ACTION,
3342 						  NULL, "no TCP item in"
3343 						  " pattern");
3344 		if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3345 			(action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3346 		    (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3347 			(action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3348 			return rte_flow_error_set(error, EINVAL,
3349 						  RTE_FLOW_ERROR_TYPE_ACTION,
3350 						  NULL,
3351 						  "cannot decrease and increase"
3352 						  " TCP acknowledgment number"
3353 						  " at the same time");
3354 	}
3355 	return ret;
3356 }
3357 
3358 /**
3359  * Validate the modify-header TTL actions.
3360  *
3361  * @param[in] action_flags
3362  *   Holds the actions detected until now.
3363  * @param[in] action
3364  *   Pointer to the modify action.
3365  * @param[in] item_flags
3366  *   Holds the items detected.
3367  * @param[out] error
3368  *   Pointer to error structure.
3369  *
3370  * @return
3371  *   0 on success, a negative errno value otherwise and rte_errno is set.
3372  */
3373 static int
3374 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3375 				   const struct rte_flow_action *action,
3376 				   const uint64_t item_flags,
3377 				   struct rte_flow_error *error)
3378 {
3379 	int ret = 0;
3380 	uint64_t layer;
3381 
3382 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3383 	if (!ret) {
3384 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3385 				 MLX5_FLOW_LAYER_INNER_L3 :
3386 				 MLX5_FLOW_LAYER_OUTER_L3;
3387 		if (!(item_flags & layer))
3388 			return rte_flow_error_set(error, EINVAL,
3389 						  RTE_FLOW_ERROR_TYPE_ACTION,
3390 						  NULL,
3391 						  "no IP protocol in pattern");
3392 	}
3393 	return ret;
3394 }
3395 
3396 /**
3397  * Validate jump action.
3398  *
3399  * @param[in] action
3400  *   Pointer to the jump action.
3401  * @param[in] action_flags
3402  *   Holds the actions detected until now.
3403  * @param[in] attributes
3404  *   Pointer to flow attributes
3405  * @param[in] external
3406  *   Action belongs to flow rule created by request external to PMD.
3407  * @param[out] error
3408  *   Pointer to error structure.
3409  *
3410  * @return
3411  *   0 on success, a negative errno value otherwise and rte_errno is set.
3412  */
3413 static int
3414 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3415 			     uint64_t action_flags,
3416 			     const struct rte_flow_attr *attributes,
3417 			     bool external, struct rte_flow_error *error)
3418 {
3419 	uint32_t target_group, table;
3420 	int ret = 0;
3421 
3422 	if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3423 			    MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3424 		return rte_flow_error_set(error, EINVAL,
3425 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3426 					  "can't have 2 fate actions in"
3427 					  " same flow");
3428 	if (action_flags & MLX5_FLOW_ACTION_METER)
3429 		return rte_flow_error_set(error, ENOTSUP,
3430 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3431 					  "jump with meter not support");
3432 	if (!action->conf)
3433 		return rte_flow_error_set(error, EINVAL,
3434 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3435 					  NULL, "action configuration not set");
3436 	target_group =
3437 		((const struct rte_flow_action_jump *)action->conf)->group;
3438 	ret = mlx5_flow_group_to_table(attributes, external, target_group,
3439 				       true, &table, error);
3440 	if (ret)
3441 		return ret;
3442 	if (attributes->group == target_group)
3443 		return rte_flow_error_set(error, EINVAL,
3444 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3445 					  "target group must be other than"
3446 					  " the current flow group");
3447 	return 0;
3448 }
3449 
3450 /*
3451  * Validate the port_id action.
3452  *
3453  * @param[in] dev
3454  *   Pointer to rte_eth_dev structure.
3455  * @param[in] action_flags
3456  *   Bit-fields that holds the actions detected until now.
3457  * @param[in] action
3458  *   Port_id RTE action structure.
3459  * @param[in] attr
3460  *   Attributes of flow that includes this action.
3461  * @param[out] error
3462  *   Pointer to error structure.
3463  *
3464  * @return
3465  *   0 on success, a negative errno value otherwise and rte_errno is set.
3466  */
3467 static int
3468 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3469 				uint64_t action_flags,
3470 				const struct rte_flow_action *action,
3471 				const struct rte_flow_attr *attr,
3472 				struct rte_flow_error *error)
3473 {
3474 	const struct rte_flow_action_port_id *port_id;
3475 	struct mlx5_priv *act_priv;
3476 	struct mlx5_priv *dev_priv;
3477 	uint16_t port;
3478 
3479 	if (!attr->transfer)
3480 		return rte_flow_error_set(error, ENOTSUP,
3481 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3482 					  NULL,
3483 					  "port id action is valid in transfer"
3484 					  " mode only");
3485 	if (!action || !action->conf)
3486 		return rte_flow_error_set(error, ENOTSUP,
3487 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3488 					  NULL,
3489 					  "port id action parameters must be"
3490 					  " specified");
3491 	if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3492 			    MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3493 		return rte_flow_error_set(error, EINVAL,
3494 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3495 					  "can have only one fate actions in"
3496 					  " a flow");
3497 	dev_priv = mlx5_dev_to_eswitch_info(dev);
3498 	if (!dev_priv)
3499 		return rte_flow_error_set(error, rte_errno,
3500 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3501 					  NULL,
3502 					  "failed to obtain E-Switch info");
3503 	port_id = action->conf;
3504 	port = port_id->original ? dev->data->port_id : port_id->id;
3505 	act_priv = mlx5_port_to_eswitch_info(port, false);
3506 	if (!act_priv)
3507 		return rte_flow_error_set
3508 				(error, rte_errno,
3509 				 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3510 				 "failed to obtain E-Switch port id for port");
3511 	if (act_priv->domain_id != dev_priv->domain_id)
3512 		return rte_flow_error_set
3513 				(error, EINVAL,
3514 				 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3515 				 "port does not belong to"
3516 				 " E-Switch being configured");
3517 	return 0;
3518 }
3519 
3520 /**
3521  * Get the maximum number of modify header actions.
3522  *
3523  * @param dev
3524  *   Pointer to rte_eth_dev structure.
3525  * @param flags
3526  *   Flags bits to check if root level.
3527  *
3528  * @return
3529  *   Max number of modify header actions device can support.
3530  */
3531 static unsigned int
3532 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev, uint64_t flags)
3533 {
3534 	/*
3535 	 * There's no way to directly query the max cap. Although it has to be
3536 	 * acquried by iterative trial, it is a safe assumption that more
3537 	 * actions are supported by FW if extensive metadata register is
3538 	 * supported. (Only in the root table)
3539 	 */
3540 	if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
3541 		return MLX5_MAX_MODIFY_NUM;
3542 	else
3543 		return mlx5_flow_ext_mreg_supported(dev) ?
3544 					MLX5_ROOT_TBL_MODIFY_NUM :
3545 					MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG;
3546 }
3547 
3548 /**
3549  * Validate the meter action.
3550  *
3551  * @param[in] dev
3552  *   Pointer to rte_eth_dev structure.
3553  * @param[in] action_flags
3554  *   Bit-fields that holds the actions detected until now.
3555  * @param[in] action
3556  *   Pointer to the meter action.
3557  * @param[in] attr
3558  *   Attributes of flow that includes this action.
3559  * @param[out] error
3560  *   Pointer to error structure.
3561  *
3562  * @return
3563  *   0 on success, a negative errno value otherwise and rte_ernno is set.
3564  */
3565 static int
3566 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3567 				uint64_t action_flags,
3568 				const struct rte_flow_action *action,
3569 				const struct rte_flow_attr *attr,
3570 				struct rte_flow_error *error)
3571 {
3572 	struct mlx5_priv *priv = dev->data->dev_private;
3573 	const struct rte_flow_action_meter *am = action->conf;
3574 	struct mlx5_flow_meter *fm;
3575 
3576 	if (!am)
3577 		return rte_flow_error_set(error, EINVAL,
3578 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3579 					  "meter action conf is NULL");
3580 
3581 	if (action_flags & MLX5_FLOW_ACTION_METER)
3582 		return rte_flow_error_set(error, ENOTSUP,
3583 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3584 					  "meter chaining not support");
3585 	if (action_flags & MLX5_FLOW_ACTION_JUMP)
3586 		return rte_flow_error_set(error, ENOTSUP,
3587 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3588 					  "meter with jump not support");
3589 	if (!priv->mtr_en)
3590 		return rte_flow_error_set(error, ENOTSUP,
3591 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3592 					  NULL,
3593 					  "meter action not supported");
3594 	fm = mlx5_flow_meter_find(priv, am->mtr_id);
3595 	if (!fm)
3596 		return rte_flow_error_set(error, EINVAL,
3597 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3598 					  "Meter not found");
3599 	if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
3600 	      (!fm->attr.ingress && !attr->ingress && attr->egress) ||
3601 	      (!fm->attr.egress && !attr->egress && attr->ingress))))
3602 		return rte_flow_error_set(error, EINVAL,
3603 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3604 					  "Flow attributes are either invalid "
3605 					  "or have a conflict with current "
3606 					  "meter attributes");
3607 	return 0;
3608 }
3609 
3610 /**
3611  * Validate the modify-header IPv4 DSCP actions.
3612  *
3613  * @param[in] action_flags
3614  *   Holds the actions detected until now.
3615  * @param[in] action
3616  *   Pointer to the modify action.
3617  * @param[in] item_flags
3618  *   Holds the items detected.
3619  * @param[out] error
3620  *   Pointer to error structure.
3621  *
3622  * @return
3623  *   0 on success, a negative errno value otherwise and rte_errno is set.
3624  */
3625 static int
3626 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3627 					 const struct rte_flow_action *action,
3628 					 const uint64_t item_flags,
3629 					 struct rte_flow_error *error)
3630 {
3631 	int ret = 0;
3632 
3633 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3634 	if (!ret) {
3635 		if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3636 			return rte_flow_error_set(error, EINVAL,
3637 						  RTE_FLOW_ERROR_TYPE_ACTION,
3638 						  NULL,
3639 						  "no ipv4 item in pattern");
3640 	}
3641 	return ret;
3642 }
3643 
3644 /**
3645  * Validate the modify-header IPv6 DSCP actions.
3646  *
3647  * @param[in] action_flags
3648  *   Holds the actions detected until now.
3649  * @param[in] action
3650  *   Pointer to the modify action.
3651  * @param[in] item_flags
3652  *   Holds the items detected.
3653  * @param[out] error
3654  *   Pointer to error structure.
3655  *
3656  * @return
3657  *   0 on success, a negative errno value otherwise and rte_errno is set.
3658  */
3659 static int
3660 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3661 					 const struct rte_flow_action *action,
3662 					 const uint64_t item_flags,
3663 					 struct rte_flow_error *error)
3664 {
3665 	int ret = 0;
3666 
3667 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3668 	if (!ret) {
3669 		if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3670 			return rte_flow_error_set(error, EINVAL,
3671 						  RTE_FLOW_ERROR_TYPE_ACTION,
3672 						  NULL,
3673 						  "no ipv6 item in pattern");
3674 	}
3675 	return ret;
3676 }
3677 
3678 /**
3679  * Find existing modify-header resource or create and register a new one.
3680  *
3681  * @param dev[in, out]
3682  *   Pointer to rte_eth_dev structure.
3683  * @param[in, out] resource
3684  *   Pointer to modify-header resource.
3685  * @parm[in, out] dev_flow
3686  *   Pointer to the dev_flow.
3687  * @param[out] error
3688  *   pointer to error structure.
3689  *
3690  * @return
3691  *   0 on success otherwise -errno and errno is set.
3692  */
3693 static int
3694 flow_dv_modify_hdr_resource_register
3695 			(struct rte_eth_dev *dev,
3696 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
3697 			 struct mlx5_flow *dev_flow,
3698 			 struct rte_flow_error *error)
3699 {
3700 	struct mlx5_priv *priv = dev->data->dev_private;
3701 	struct mlx5_ibv_shared *sh = priv->sh;
3702 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3703 	struct mlx5dv_dr_domain *ns;
3704 	uint32_t actions_len;
3705 
3706 	resource->flags =
3707 		dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3708 	if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
3709 				    resource->flags))
3710 		return rte_flow_error_set(error, EOVERFLOW,
3711 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3712 					  "too many modify header items");
3713 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3714 		ns = sh->fdb_domain;
3715 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
3716 		ns = sh->tx_domain;
3717 	else
3718 		ns = sh->rx_domain;
3719 	/* Lookup a matching resource from cache. */
3720 	actions_len = resource->actions_num * sizeof(resource->actions[0]);
3721 	LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
3722 		if (resource->ft_type == cache_resource->ft_type &&
3723 		    resource->actions_num == cache_resource->actions_num &&
3724 		    resource->flags == cache_resource->flags &&
3725 		    !memcmp((const void *)resource->actions,
3726 			    (const void *)cache_resource->actions,
3727 			    actions_len)) {
3728 			DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
3729 				(void *)cache_resource,
3730 				rte_atomic32_read(&cache_resource->refcnt));
3731 			rte_atomic32_inc(&cache_resource->refcnt);
3732 			dev_flow->dv.modify_hdr = cache_resource;
3733 			return 0;
3734 		}
3735 	}
3736 	/* Register new modify-header resource. */
3737 	cache_resource = rte_calloc(__func__, 1,
3738 				    sizeof(*cache_resource) + actions_len, 0);
3739 	if (!cache_resource)
3740 		return rte_flow_error_set(error, ENOMEM,
3741 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3742 					  "cannot allocate resource memory");
3743 	*cache_resource = *resource;
3744 	rte_memcpy(cache_resource->actions, resource->actions, actions_len);
3745 	cache_resource->verbs_action =
3746 		mlx5_glue->dv_create_flow_action_modify_header
3747 					(sh->ctx, cache_resource->ft_type, ns,
3748 					 cache_resource->flags, actions_len,
3749 					 (uint64_t *)cache_resource->actions);
3750 	if (!cache_resource->verbs_action) {
3751 		rte_free(cache_resource);
3752 		return rte_flow_error_set(error, ENOMEM,
3753 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3754 					  NULL, "cannot create action");
3755 	}
3756 	rte_atomic32_init(&cache_resource->refcnt);
3757 	rte_atomic32_inc(&cache_resource->refcnt);
3758 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
3759 	dev_flow->dv.modify_hdr = cache_resource;
3760 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
3761 		(void *)cache_resource,
3762 		rte_atomic32_read(&cache_resource->refcnt));
3763 	return 0;
3764 }
3765 
3766 #define MLX5_CNT_CONTAINER_RESIZE 64
3767 
3768 /**
3769  * Get or create a flow counter.
3770  *
3771  * @param[in] dev
3772  *   Pointer to the Ethernet device structure.
3773  * @param[in] shared
3774  *   Indicate if this counter is shared with other flows.
3775  * @param[in] id
3776  *   Counter identifier.
3777  *
3778  * @return
3779  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3780  */
3781 static struct mlx5_flow_counter *
3782 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
3783 			       uint32_t id)
3784 {
3785 	struct mlx5_priv *priv = dev->data->dev_private;
3786 	struct mlx5_flow_counter *cnt = NULL;
3787 	struct mlx5_devx_obj *dcs = NULL;
3788 
3789 	if (!priv->config.devx) {
3790 		rte_errno = ENOTSUP;
3791 		return NULL;
3792 	}
3793 	if (shared) {
3794 		TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
3795 			if (cnt->shared && cnt->id == id) {
3796 				cnt->ref_cnt++;
3797 				return cnt;
3798 			}
3799 		}
3800 	}
3801 	dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3802 	if (!dcs)
3803 		return NULL;
3804 	cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
3805 	if (!cnt) {
3806 		claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
3807 		rte_errno = ENOMEM;
3808 		return NULL;
3809 	}
3810 	struct mlx5_flow_counter tmpl = {
3811 		.shared = shared,
3812 		.ref_cnt = 1,
3813 		.id = id,
3814 		.dcs = dcs,
3815 	};
3816 	tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
3817 	if (!tmpl.action) {
3818 		claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
3819 		rte_errno = errno;
3820 		rte_free(cnt);
3821 		return NULL;
3822 	}
3823 	*cnt = tmpl;
3824 	TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
3825 	return cnt;
3826 }
3827 
3828 /**
3829  * Release a flow counter.
3830  *
3831  * @param[in] dev
3832  *   Pointer to the Ethernet device structure.
3833  * @param[in] counter
3834  *   Pointer to the counter handler.
3835  */
3836 static void
3837 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
3838 				 struct mlx5_flow_counter *counter)
3839 {
3840 	struct mlx5_priv *priv = dev->data->dev_private;
3841 
3842 	if (!counter)
3843 		return;
3844 	if (--counter->ref_cnt == 0) {
3845 		TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
3846 		claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
3847 		rte_free(counter);
3848 	}
3849 }
3850 
3851 /**
3852  * Query a devx flow counter.
3853  *
3854  * @param[in] dev
3855  *   Pointer to the Ethernet device structure.
3856  * @param[in] cnt
3857  *   Pointer to the flow counter.
3858  * @param[out] pkts
3859  *   The statistics value of packets.
3860  * @param[out] bytes
3861  *   The statistics value of bytes.
3862  *
3863  * @return
3864  *   0 on success, otherwise a negative errno value and rte_errno is set.
3865  */
3866 static inline int
3867 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
3868 		     struct mlx5_flow_counter *cnt, uint64_t *pkts,
3869 		     uint64_t *bytes)
3870 {
3871 	return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
3872 						0, NULL, NULL, 0);
3873 }
3874 
3875 /**
3876  * Get a pool by a counter.
3877  *
3878  * @param[in] cnt
3879  *   Pointer to the counter.
3880  *
3881  * @return
3882  *   The counter pool.
3883  */
3884 static struct mlx5_flow_counter_pool *
3885 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
3886 {
3887 	if (!cnt->batch) {
3888 		cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
3889 		return (struct mlx5_flow_counter_pool *)cnt - 1;
3890 	}
3891 	return cnt->pool;
3892 }
3893 
3894 /**
3895  * Get a pool by devx counter ID.
3896  *
3897  * @param[in] cont
3898  *   Pointer to the counter container.
3899  * @param[in] id
3900  *   The counter devx ID.
3901  *
3902  * @return
3903  *   The counter pool pointer if exists, NULL otherwise,
3904  */
3905 static struct mlx5_flow_counter_pool *
3906 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
3907 {
3908 	struct mlx5_flow_counter_pool *pool;
3909 
3910 	TAILQ_FOREACH(pool, &cont->pool_list, next) {
3911 		int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
3912 				MLX5_COUNTERS_PER_POOL;
3913 
3914 		if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
3915 			return pool;
3916 	};
3917 	return NULL;
3918 }
3919 
3920 /**
3921  * Allocate a new memory for the counter values wrapped by all the needed
3922  * management.
3923  *
3924  * @param[in] dev
3925  *   Pointer to the Ethernet device structure.
3926  * @param[in] raws_n
3927  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
3928  *
3929  * @return
3930  *   The new memory management pointer on success, otherwise NULL and rte_errno
3931  *   is set.
3932  */
3933 static struct mlx5_counter_stats_mem_mng *
3934 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
3935 {
3936 	struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
3937 					(dev->data->dev_private))->sh;
3938 	struct mlx5_devx_mkey_attr mkey_attr;
3939 	struct mlx5_counter_stats_mem_mng *mem_mng;
3940 	volatile struct flow_counter_stats *raw_data;
3941 	int size = (sizeof(struct flow_counter_stats) *
3942 			MLX5_COUNTERS_PER_POOL +
3943 			sizeof(struct mlx5_counter_stats_raw)) * raws_n +
3944 			sizeof(struct mlx5_counter_stats_mem_mng);
3945 	uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
3946 	int i;
3947 
3948 	if (!mem) {
3949 		rte_errno = ENOMEM;
3950 		return NULL;
3951 	}
3952 	mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
3953 	size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
3954 	mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
3955 						 IBV_ACCESS_LOCAL_WRITE);
3956 	if (!mem_mng->umem) {
3957 		rte_errno = errno;
3958 		rte_free(mem);
3959 		return NULL;
3960 	}
3961 	mkey_attr.addr = (uintptr_t)mem;
3962 	mkey_attr.size = size;
3963 	mkey_attr.umem_id = mem_mng->umem->umem_id;
3964 	mkey_attr.pd = sh->pdn;
3965 	mkey_attr.log_entity_size = 0;
3966 	mkey_attr.pg_access = 0;
3967 	mkey_attr.klm_array = NULL;
3968 	mkey_attr.klm_num = 0;
3969 	mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
3970 	if (!mem_mng->dm) {
3971 		mlx5_glue->devx_umem_dereg(mem_mng->umem);
3972 		rte_errno = errno;
3973 		rte_free(mem);
3974 		return NULL;
3975 	}
3976 	mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
3977 	raw_data = (volatile struct flow_counter_stats *)mem;
3978 	for (i = 0; i < raws_n; ++i) {
3979 		mem_mng->raws[i].mem_mng = mem_mng;
3980 		mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
3981 	}
3982 	LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
3983 	return mem_mng;
3984 }
3985 
3986 /**
3987  * Resize a counter container.
3988  *
3989  * @param[in] dev
3990  *   Pointer to the Ethernet device structure.
3991  * @param[in] batch
3992  *   Whether the pool is for counter that was allocated by batch command.
3993  *
3994  * @return
3995  *   The new container pointer on success, otherwise NULL and rte_errno is set.
3996  */
3997 static struct mlx5_pools_container *
3998 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
3999 {
4000 	struct mlx5_priv *priv = dev->data->dev_private;
4001 	struct mlx5_pools_container *cont =
4002 			MLX5_CNT_CONTAINER(priv->sh, batch, 0);
4003 	struct mlx5_pools_container *new_cont =
4004 			MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
4005 	struct mlx5_counter_stats_mem_mng *mem_mng;
4006 	uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
4007 	uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4008 	int i;
4009 
4010 	if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
4011 		/* The last resize still hasn't detected by the host thread. */
4012 		rte_errno = EAGAIN;
4013 		return NULL;
4014 	}
4015 	new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
4016 	if (!new_cont->pools) {
4017 		rte_errno = ENOMEM;
4018 		return NULL;
4019 	}
4020 	if (cont->n)
4021 		memcpy(new_cont->pools, cont->pools, cont->n *
4022 		       sizeof(struct mlx5_flow_counter_pool *));
4023 	mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
4024 		MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
4025 	if (!mem_mng) {
4026 		rte_free(new_cont->pools);
4027 		return NULL;
4028 	}
4029 	for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
4030 		LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
4031 				 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
4032 				 i, next);
4033 	new_cont->n = resize;
4034 	rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
4035 	TAILQ_INIT(&new_cont->pool_list);
4036 	TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
4037 	new_cont->init_mem_mng = mem_mng;
4038 	rte_cio_wmb();
4039 	 /* Flip the master container. */
4040 	priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
4041 	return new_cont;
4042 }
4043 
4044 /**
4045  * Query a devx flow counter.
4046  *
4047  * @param[in] dev
4048  *   Pointer to the Ethernet device structure.
4049  * @param[in] cnt
4050  *   Pointer to the flow counter.
4051  * @param[out] pkts
4052  *   The statistics value of packets.
4053  * @param[out] bytes
4054  *   The statistics value of bytes.
4055  *
4056  * @return
4057  *   0 on success, otherwise a negative errno value and rte_errno is set.
4058  */
4059 static inline int
4060 _flow_dv_query_count(struct rte_eth_dev *dev,
4061 		     struct mlx5_flow_counter *cnt, uint64_t *pkts,
4062 		     uint64_t *bytes)
4063 {
4064 	struct mlx5_priv *priv = dev->data->dev_private;
4065 	struct mlx5_flow_counter_pool *pool =
4066 			flow_dv_counter_pool_get(cnt);
4067 	int offset = cnt - &pool->counters_raw[0];
4068 
4069 	if (priv->counter_fallback)
4070 		return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
4071 
4072 	rte_spinlock_lock(&pool->sl);
4073 	/*
4074 	 * The single counters allocation may allocate smaller ID than the
4075 	 * current allocated in parallel to the host reading.
4076 	 * In this case the new counter values must be reported as 0.
4077 	 */
4078 	if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
4079 		*pkts = 0;
4080 		*bytes = 0;
4081 	} else {
4082 		*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4083 		*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4084 	}
4085 	rte_spinlock_unlock(&pool->sl);
4086 	return 0;
4087 }
4088 
4089 /**
4090  * Create and initialize a new counter pool.
4091  *
4092  * @param[in] dev
4093  *   Pointer to the Ethernet device structure.
4094  * @param[out] dcs
4095  *   The devX counter handle.
4096  * @param[in] batch
4097  *   Whether the pool is for counter that was allocated by batch command.
4098  *
4099  * @return
4100  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
4101  */
4102 static struct mlx5_flow_counter_pool *
4103 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4104 		    uint32_t batch)
4105 {
4106 	struct mlx5_priv *priv = dev->data->dev_private;
4107 	struct mlx5_flow_counter_pool *pool;
4108 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4109 							       0);
4110 	int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4111 	uint32_t size;
4112 
4113 	if (cont->n == n_valid) {
4114 		cont = flow_dv_container_resize(dev, batch);
4115 		if (!cont)
4116 			return NULL;
4117 	}
4118 	size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
4119 			sizeof(struct mlx5_flow_counter);
4120 	pool = rte_calloc(__func__, 1, size, 0);
4121 	if (!pool) {
4122 		rte_errno = ENOMEM;
4123 		return NULL;
4124 	}
4125 	pool->min_dcs = dcs;
4126 	pool->raw = cont->init_mem_mng->raws + n_valid %
4127 						     MLX5_CNT_CONTAINER_RESIZE;
4128 	pool->raw_hw = NULL;
4129 	rte_spinlock_init(&pool->sl);
4130 	/*
4131 	 * The generation of the new allocated counters in this pool is 0, 2 in
4132 	 * the pool generation makes all the counters valid for allocation.
4133 	 */
4134 	rte_atomic64_set(&pool->query_gen, 0x2);
4135 	TAILQ_INIT(&pool->counters);
4136 	TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4137 	cont->pools[n_valid] = pool;
4138 	/* Pool initialization must be updated before host thread access. */
4139 	rte_cio_wmb();
4140 	rte_atomic16_add(&cont->n_valid, 1);
4141 	return pool;
4142 }
4143 
4144 /**
4145  * Prepare a new counter and/or a new counter pool.
4146  *
4147  * @param[in] dev
4148  *   Pointer to the Ethernet device structure.
4149  * @param[out] cnt_free
4150  *   Where to put the pointer of a new counter.
4151  * @param[in] batch
4152  *   Whether the pool is for counter that was allocated by batch command.
4153  *
4154  * @return
4155  *   The free counter pool pointer and @p cnt_free is set on success,
4156  *   NULL otherwise and rte_errno is set.
4157  */
4158 static struct mlx5_flow_counter_pool *
4159 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4160 			     struct mlx5_flow_counter **cnt_free,
4161 			     uint32_t batch)
4162 {
4163 	struct mlx5_priv *priv = dev->data->dev_private;
4164 	struct mlx5_flow_counter_pool *pool;
4165 	struct mlx5_devx_obj *dcs = NULL;
4166 	struct mlx5_flow_counter *cnt;
4167 	uint32_t i;
4168 
4169 	if (!batch) {
4170 		/* bulk_bitmap must be 0 for single counter allocation. */
4171 		dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4172 		if (!dcs)
4173 			return NULL;
4174 		pool = flow_dv_find_pool_by_id
4175 			(MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
4176 		if (!pool) {
4177 			pool = flow_dv_pool_create(dev, dcs, batch);
4178 			if (!pool) {
4179 				mlx5_devx_cmd_destroy(dcs);
4180 				return NULL;
4181 			}
4182 		} else if (dcs->id < pool->min_dcs->id) {
4183 			rte_atomic64_set(&pool->a64_dcs,
4184 					 (int64_t)(uintptr_t)dcs);
4185 		}
4186 		cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
4187 		TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4188 		cnt->dcs = dcs;
4189 		*cnt_free = cnt;
4190 		return pool;
4191 	}
4192 	/* bulk_bitmap is in 128 counters units. */
4193 	if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4194 		dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4195 	if (!dcs) {
4196 		rte_errno = ENODATA;
4197 		return NULL;
4198 	}
4199 	pool = flow_dv_pool_create(dev, dcs, batch);
4200 	if (!pool) {
4201 		mlx5_devx_cmd_destroy(dcs);
4202 		return NULL;
4203 	}
4204 	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4205 		cnt = &pool->counters_raw[i];
4206 		cnt->pool = pool;
4207 		TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4208 	}
4209 	*cnt_free = &pool->counters_raw[0];
4210 	return pool;
4211 }
4212 
4213 /**
4214  * Search for existed shared counter.
4215  *
4216  * @param[in] cont
4217  *   Pointer to the relevant counter pool container.
4218  * @param[in] id
4219  *   The shared counter ID to search.
4220  *
4221  * @return
4222  *   NULL if not existed, otherwise pointer to the shared counter.
4223  */
4224 static struct mlx5_flow_counter *
4225 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
4226 			      uint32_t id)
4227 {
4228 	static struct mlx5_flow_counter *cnt;
4229 	struct mlx5_flow_counter_pool *pool;
4230 	int i;
4231 
4232 	TAILQ_FOREACH(pool, &cont->pool_list, next) {
4233 		for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4234 			cnt = &pool->counters_raw[i];
4235 			if (cnt->ref_cnt && cnt->shared && cnt->id == id)
4236 				return cnt;
4237 		}
4238 	}
4239 	return NULL;
4240 }
4241 
4242 /**
4243  * Allocate a flow counter.
4244  *
4245  * @param[in] dev
4246  *   Pointer to the Ethernet device structure.
4247  * @param[in] shared
4248  *   Indicate if this counter is shared with other flows.
4249  * @param[in] id
4250  *   Counter identifier.
4251  * @param[in] group
4252  *   Counter flow group.
4253  *
4254  * @return
4255  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
4256  */
4257 static struct mlx5_flow_counter *
4258 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4259 		      uint16_t group)
4260 {
4261 	struct mlx5_priv *priv = dev->data->dev_private;
4262 	struct mlx5_flow_counter_pool *pool = NULL;
4263 	struct mlx5_flow_counter *cnt_free = NULL;
4264 	/*
4265 	 * Currently group 0 flow counter cannot be assigned to a flow if it is
4266 	 * not the first one in the batch counter allocation, so it is better
4267 	 * to allocate counters one by one for these flows in a separate
4268 	 * container.
4269 	 * A counter can be shared between different groups so need to take
4270 	 * shared counters from the single container.
4271 	 */
4272 	uint32_t batch = (group && !shared) ? 1 : 0;
4273 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4274 							       0);
4275 
4276 	if (priv->counter_fallback)
4277 		return flow_dv_counter_alloc_fallback(dev, shared, id);
4278 	if (!priv->config.devx) {
4279 		rte_errno = ENOTSUP;
4280 		return NULL;
4281 	}
4282 	if (shared) {
4283 		cnt_free = flow_dv_counter_shared_search(cont, id);
4284 		if (cnt_free) {
4285 			if (cnt_free->ref_cnt + 1 == 0) {
4286 				rte_errno = E2BIG;
4287 				return NULL;
4288 			}
4289 			cnt_free->ref_cnt++;
4290 			return cnt_free;
4291 		}
4292 	}
4293 	/* Pools which has a free counters are in the start. */
4294 	TAILQ_FOREACH(pool, &cont->pool_list, next) {
4295 		/*
4296 		 * The free counter reset values must be updated between the
4297 		 * counter release to the counter allocation, so, at least one
4298 		 * query must be done in this time. ensure it by saving the
4299 		 * query generation in the release time.
4300 		 * The free list is sorted according to the generation - so if
4301 		 * the first one is not updated, all the others are not
4302 		 * updated too.
4303 		 */
4304 		cnt_free = TAILQ_FIRST(&pool->counters);
4305 		if (cnt_free && cnt_free->query_gen + 1 <
4306 		    rte_atomic64_read(&pool->query_gen))
4307 			break;
4308 		cnt_free = NULL;
4309 	}
4310 	if (!cnt_free) {
4311 		pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
4312 		if (!pool)
4313 			return NULL;
4314 	}
4315 	cnt_free->batch = batch;
4316 	/* Create a DV counter action only in the first time usage. */
4317 	if (!cnt_free->action) {
4318 		uint16_t offset;
4319 		struct mlx5_devx_obj *dcs;
4320 
4321 		if (batch) {
4322 			offset = cnt_free - &pool->counters_raw[0];
4323 			dcs = pool->min_dcs;
4324 		} else {
4325 			offset = 0;
4326 			dcs = cnt_free->dcs;
4327 		}
4328 		cnt_free->action = mlx5_glue->dv_create_flow_action_counter
4329 					(dcs->obj, offset);
4330 		if (!cnt_free->action) {
4331 			rte_errno = errno;
4332 			return NULL;
4333 		}
4334 	}
4335 	/* Update the counter reset values. */
4336 	if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
4337 				 &cnt_free->bytes))
4338 		return NULL;
4339 	cnt_free->shared = shared;
4340 	cnt_free->ref_cnt = 1;
4341 	cnt_free->id = id;
4342 	if (!priv->sh->cmng.query_thread_on)
4343 		/* Start the asynchronous batch query by the host thread. */
4344 		mlx5_set_query_alarm(priv->sh);
4345 	TAILQ_REMOVE(&pool->counters, cnt_free, next);
4346 	if (TAILQ_EMPTY(&pool->counters)) {
4347 		/* Move the pool to the end of the container pool list. */
4348 		TAILQ_REMOVE(&cont->pool_list, pool, next);
4349 		TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4350 	}
4351 	return cnt_free;
4352 }
4353 
4354 /**
4355  * Release a flow counter.
4356  *
4357  * @param[in] dev
4358  *   Pointer to the Ethernet device structure.
4359  * @param[in] counter
4360  *   Pointer to the counter handler.
4361  */
4362 static void
4363 flow_dv_counter_release(struct rte_eth_dev *dev,
4364 			struct mlx5_flow_counter *counter)
4365 {
4366 	struct mlx5_priv *priv = dev->data->dev_private;
4367 
4368 	if (!counter)
4369 		return;
4370 	if (priv->counter_fallback) {
4371 		flow_dv_counter_release_fallback(dev, counter);
4372 		return;
4373 	}
4374 	if (--counter->ref_cnt == 0) {
4375 		struct mlx5_flow_counter_pool *pool =
4376 				flow_dv_counter_pool_get(counter);
4377 
4378 		/* Put the counter in the end - the last updated one. */
4379 		TAILQ_INSERT_TAIL(&pool->counters, counter, next);
4380 		counter->query_gen = rte_atomic64_read(&pool->query_gen);
4381 	}
4382 }
4383 
4384 /**
4385  * Verify the @p attributes will be correctly understood by the NIC and store
4386  * them in the @p flow if everything is correct.
4387  *
4388  * @param[in] dev
4389  *   Pointer to dev struct.
4390  * @param[in] attributes
4391  *   Pointer to flow attributes
4392  * @param[in] external
4393  *   This flow rule is created by request external to PMD.
4394  * @param[out] error
4395  *   Pointer to error structure.
4396  *
4397  * @return
4398  *   0 on success, a negative errno value otherwise and rte_errno is set.
4399  */
4400 static int
4401 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4402 			    const struct rte_flow_attr *attributes,
4403 			    bool external __rte_unused,
4404 			    struct rte_flow_error *error)
4405 {
4406 	struct mlx5_priv *priv = dev->data->dev_private;
4407 	uint32_t priority_max = priv->config.flow_prio - 1;
4408 
4409 #ifndef HAVE_MLX5DV_DR
4410 	if (attributes->group)
4411 		return rte_flow_error_set(error, ENOTSUP,
4412 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4413 					  NULL,
4414 					  "groups are not supported");
4415 #else
4416 	uint32_t table;
4417 	int ret;
4418 
4419 	ret = mlx5_flow_group_to_table(attributes, external,
4420 				       attributes->group, !!priv->fdb_def_rule,
4421 				       &table, error);
4422 	if (ret)
4423 		return ret;
4424 #endif
4425 	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4426 	    attributes->priority >= priority_max)
4427 		return rte_flow_error_set(error, ENOTSUP,
4428 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4429 					  NULL,
4430 					  "priority out of range");
4431 	if (attributes->transfer) {
4432 		if (!priv->config.dv_esw_en)
4433 			return rte_flow_error_set
4434 				(error, ENOTSUP,
4435 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4436 				 "E-Switch dr is not supported");
4437 		if (!(priv->representor || priv->master))
4438 			return rte_flow_error_set
4439 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4440 				 NULL, "E-Switch configuration can only be"
4441 				 " done by a master or a representor device");
4442 		if (attributes->egress)
4443 			return rte_flow_error_set
4444 				(error, ENOTSUP,
4445 				 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4446 				 "egress is not supported");
4447 	}
4448 	if (!(attributes->egress ^ attributes->ingress))
4449 		return rte_flow_error_set(error, ENOTSUP,
4450 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4451 					  "must specify exactly one of "
4452 					  "ingress or egress");
4453 	return 0;
4454 }
4455 
4456 /**
4457  * Internal validation function. For validating both actions and items.
4458  *
4459  * @param[in] dev
4460  *   Pointer to the rte_eth_dev structure.
4461  * @param[in] attr
4462  *   Pointer to the flow attributes.
4463  * @param[in] items
4464  *   Pointer to the list of items.
4465  * @param[in] actions
4466  *   Pointer to the list of actions.
4467  * @param[in] external
4468  *   This flow rule is created by request external to PMD.
4469  * @param[out] error
4470  *   Pointer to the error structure.
4471  *
4472  * @return
4473  *   0 on success, a negative errno value otherwise and rte_errno is set.
4474  */
4475 static int
4476 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4477 		 const struct rte_flow_item items[],
4478 		 const struct rte_flow_action actions[],
4479 		 bool external, struct rte_flow_error *error)
4480 {
4481 	int ret;
4482 	uint64_t action_flags = 0;
4483 	uint64_t item_flags = 0;
4484 	uint64_t last_item = 0;
4485 	uint8_t next_protocol = 0xff;
4486 	uint16_t ether_type = 0;
4487 	int actions_n = 0;
4488 	uint8_t item_ipv6_proto = 0;
4489 	const struct rte_flow_item *gre_item = NULL;
4490 	const struct rte_flow_action_raw_decap *decap;
4491 	const struct rte_flow_action_raw_encap *encap;
4492 	const struct rte_flow_action_rss *rss;
4493 	struct rte_flow_item_tcp nic_tcp_mask = {
4494 		.hdr = {
4495 			.tcp_flags = 0xFF,
4496 			.src_port = RTE_BE16(UINT16_MAX),
4497 			.dst_port = RTE_BE16(UINT16_MAX),
4498 		}
4499 	};
4500 	struct mlx5_priv *priv = dev->data->dev_private;
4501 	struct mlx5_dev_config *dev_conf = &priv->config;
4502 	uint16_t queue_index = 0xFFFF;
4503 
4504 	if (items == NULL)
4505 		return -1;
4506 	ret = flow_dv_validate_attributes(dev, attr, external, error);
4507 	if (ret < 0)
4508 		return ret;
4509 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4510 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4511 		int type = items->type;
4512 
4513 		switch (type) {
4514 		case RTE_FLOW_ITEM_TYPE_VOID:
4515 			break;
4516 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
4517 			ret = flow_dv_validate_item_port_id
4518 					(dev, items, attr, item_flags, error);
4519 			if (ret < 0)
4520 				return ret;
4521 			last_item = MLX5_FLOW_ITEM_PORT_ID;
4522 			break;
4523 		case RTE_FLOW_ITEM_TYPE_ETH:
4524 			ret = mlx5_flow_validate_item_eth(items, item_flags,
4525 							  error);
4526 			if (ret < 0)
4527 				return ret;
4528 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4529 					     MLX5_FLOW_LAYER_OUTER_L2;
4530 			if (items->mask != NULL && items->spec != NULL) {
4531 				ether_type =
4532 					((const struct rte_flow_item_eth *)
4533 					 items->spec)->type;
4534 				ether_type &=
4535 					((const struct rte_flow_item_eth *)
4536 					 items->mask)->type;
4537 				ether_type = rte_be_to_cpu_16(ether_type);
4538 			} else {
4539 				ether_type = 0;
4540 			}
4541 			break;
4542 		case RTE_FLOW_ITEM_TYPE_VLAN:
4543 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
4544 							   dev, error);
4545 			if (ret < 0)
4546 				return ret;
4547 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4548 					     MLX5_FLOW_LAYER_OUTER_VLAN;
4549 			if (items->mask != NULL && items->spec != NULL) {
4550 				ether_type =
4551 					((const struct rte_flow_item_vlan *)
4552 					 items->spec)->inner_type;
4553 				ether_type &=
4554 					((const struct rte_flow_item_vlan *)
4555 					 items->mask)->inner_type;
4556 				ether_type = rte_be_to_cpu_16(ether_type);
4557 			} else {
4558 				ether_type = 0;
4559 			}
4560 			break;
4561 		case RTE_FLOW_ITEM_TYPE_IPV4:
4562 			mlx5_flow_tunnel_ip_check(items, next_protocol,
4563 						  &item_flags, &tunnel);
4564 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
4565 							   last_item,
4566 							   ether_type, NULL,
4567 							   error);
4568 			if (ret < 0)
4569 				return ret;
4570 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4571 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4572 			if (items->mask != NULL &&
4573 			    ((const struct rte_flow_item_ipv4 *)
4574 			     items->mask)->hdr.next_proto_id) {
4575 				next_protocol =
4576 					((const struct rte_flow_item_ipv4 *)
4577 					 (items->spec))->hdr.next_proto_id;
4578 				next_protocol &=
4579 					((const struct rte_flow_item_ipv4 *)
4580 					 (items->mask))->hdr.next_proto_id;
4581 			} else {
4582 				/* Reset for inner layer. */
4583 				next_protocol = 0xff;
4584 			}
4585 			break;
4586 		case RTE_FLOW_ITEM_TYPE_IPV6:
4587 			mlx5_flow_tunnel_ip_check(items, next_protocol,
4588 						  &item_flags, &tunnel);
4589 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
4590 							   last_item,
4591 							   ether_type, NULL,
4592 							   error);
4593 			if (ret < 0)
4594 				return ret;
4595 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4596 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4597 			if (items->mask != NULL &&
4598 			    ((const struct rte_flow_item_ipv6 *)
4599 			     items->mask)->hdr.proto) {
4600 				item_ipv6_proto =
4601 					((const struct rte_flow_item_ipv6 *)
4602 					 items->spec)->hdr.proto;
4603 				next_protocol =
4604 					((const struct rte_flow_item_ipv6 *)
4605 					 items->spec)->hdr.proto;
4606 				next_protocol &=
4607 					((const struct rte_flow_item_ipv6 *)
4608 					 items->mask)->hdr.proto;
4609 			} else {
4610 				/* Reset for inner layer. */
4611 				next_protocol = 0xff;
4612 			}
4613 			break;
4614 		case RTE_FLOW_ITEM_TYPE_TCP:
4615 			ret = mlx5_flow_validate_item_tcp
4616 						(items, item_flags,
4617 						 next_protocol,
4618 						 &nic_tcp_mask,
4619 						 error);
4620 			if (ret < 0)
4621 				return ret;
4622 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4623 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
4624 			break;
4625 		case RTE_FLOW_ITEM_TYPE_UDP:
4626 			ret = mlx5_flow_validate_item_udp(items, item_flags,
4627 							  next_protocol,
4628 							  error);
4629 			if (ret < 0)
4630 				return ret;
4631 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4632 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
4633 			break;
4634 		case RTE_FLOW_ITEM_TYPE_GRE:
4635 			ret = mlx5_flow_validate_item_gre(items, item_flags,
4636 							  next_protocol, error);
4637 			if (ret < 0)
4638 				return ret;
4639 			gre_item = items;
4640 			last_item = MLX5_FLOW_LAYER_GRE;
4641 			break;
4642 		case RTE_FLOW_ITEM_TYPE_NVGRE:
4643 			ret = mlx5_flow_validate_item_nvgre(items, item_flags,
4644 							    next_protocol,
4645 							    error);
4646 			if (ret < 0)
4647 				return ret;
4648 			last_item = MLX5_FLOW_LAYER_NVGRE;
4649 			break;
4650 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4651 			ret = mlx5_flow_validate_item_gre_key
4652 				(items, item_flags, gre_item, error);
4653 			if (ret < 0)
4654 				return ret;
4655 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
4656 			break;
4657 		case RTE_FLOW_ITEM_TYPE_VXLAN:
4658 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
4659 							    error);
4660 			if (ret < 0)
4661 				return ret;
4662 			last_item = MLX5_FLOW_LAYER_VXLAN;
4663 			break;
4664 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4665 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
4666 								item_flags, dev,
4667 								error);
4668 			if (ret < 0)
4669 				return ret;
4670 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4671 			break;
4672 		case RTE_FLOW_ITEM_TYPE_GENEVE:
4673 			ret = mlx5_flow_validate_item_geneve(items,
4674 							     item_flags, dev,
4675 							     error);
4676 			if (ret < 0)
4677 				return ret;
4678 			last_item = MLX5_FLOW_LAYER_GENEVE;
4679 			break;
4680 		case RTE_FLOW_ITEM_TYPE_MPLS:
4681 			ret = mlx5_flow_validate_item_mpls(dev, items,
4682 							   item_flags,
4683 							   last_item, error);
4684 			if (ret < 0)
4685 				return ret;
4686 			last_item = MLX5_FLOW_LAYER_MPLS;
4687 			break;
4688 
4689 		case RTE_FLOW_ITEM_TYPE_MARK:
4690 			ret = flow_dv_validate_item_mark(dev, items, attr,
4691 							 error);
4692 			if (ret < 0)
4693 				return ret;
4694 			last_item = MLX5_FLOW_ITEM_MARK;
4695 			break;
4696 		case RTE_FLOW_ITEM_TYPE_META:
4697 			ret = flow_dv_validate_item_meta(dev, items, attr,
4698 							 error);
4699 			if (ret < 0)
4700 				return ret;
4701 			last_item = MLX5_FLOW_ITEM_METADATA;
4702 			break;
4703 		case RTE_FLOW_ITEM_TYPE_ICMP:
4704 			ret = mlx5_flow_validate_item_icmp(items, item_flags,
4705 							   next_protocol,
4706 							   error);
4707 			if (ret < 0)
4708 				return ret;
4709 			last_item = MLX5_FLOW_LAYER_ICMP;
4710 			break;
4711 		case RTE_FLOW_ITEM_TYPE_ICMP6:
4712 			ret = mlx5_flow_validate_item_icmp6(items, item_flags,
4713 							    next_protocol,
4714 							    error);
4715 			if (ret < 0)
4716 				return ret;
4717 			item_ipv6_proto = IPPROTO_ICMPV6;
4718 			last_item = MLX5_FLOW_LAYER_ICMP6;
4719 			break;
4720 		case RTE_FLOW_ITEM_TYPE_TAG:
4721 			ret = flow_dv_validate_item_tag(dev, items,
4722 							attr, error);
4723 			if (ret < 0)
4724 				return ret;
4725 			last_item = MLX5_FLOW_ITEM_TAG;
4726 			break;
4727 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
4728 		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
4729 			break;
4730 		case RTE_FLOW_ITEM_TYPE_GTP:
4731 			ret = flow_dv_validate_item_gtp(dev, items, item_flags,
4732 							error);
4733 			if (ret < 0)
4734 				return ret;
4735 			last_item = MLX5_FLOW_LAYER_GTP;
4736 			break;
4737 		default:
4738 			return rte_flow_error_set(error, ENOTSUP,
4739 						  RTE_FLOW_ERROR_TYPE_ITEM,
4740 						  NULL, "item not supported");
4741 		}
4742 		item_flags |= last_item;
4743 	}
4744 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4745 		int type = actions->type;
4746 		if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4747 			return rte_flow_error_set(error, ENOTSUP,
4748 						  RTE_FLOW_ERROR_TYPE_ACTION,
4749 						  actions, "too many actions");
4750 		switch (type) {
4751 		case RTE_FLOW_ACTION_TYPE_VOID:
4752 			break;
4753 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
4754 			ret = flow_dv_validate_action_port_id(dev,
4755 							      action_flags,
4756 							      actions,
4757 							      attr,
4758 							      error);
4759 			if (ret)
4760 				return ret;
4761 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4762 			++actions_n;
4763 			break;
4764 		case RTE_FLOW_ACTION_TYPE_FLAG:
4765 			ret = flow_dv_validate_action_flag(dev, action_flags,
4766 							   attr, error);
4767 			if (ret < 0)
4768 				return ret;
4769 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4770 				/* Count all modify-header actions as one. */
4771 				if (!(action_flags &
4772 				      MLX5_FLOW_MODIFY_HDR_ACTIONS))
4773 					++actions_n;
4774 				action_flags |= MLX5_FLOW_ACTION_FLAG |
4775 						MLX5_FLOW_ACTION_MARK_EXT;
4776 			} else {
4777 				action_flags |= MLX5_FLOW_ACTION_FLAG;
4778 				++actions_n;
4779 			}
4780 			break;
4781 		case RTE_FLOW_ACTION_TYPE_MARK:
4782 			ret = flow_dv_validate_action_mark(dev, actions,
4783 							   action_flags,
4784 							   attr, error);
4785 			if (ret < 0)
4786 				return ret;
4787 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4788 				/* Count all modify-header actions as one. */
4789 				if (!(action_flags &
4790 				      MLX5_FLOW_MODIFY_HDR_ACTIONS))
4791 					++actions_n;
4792 				action_flags |= MLX5_FLOW_ACTION_MARK |
4793 						MLX5_FLOW_ACTION_MARK_EXT;
4794 			} else {
4795 				action_flags |= MLX5_FLOW_ACTION_MARK;
4796 				++actions_n;
4797 			}
4798 			break;
4799 		case RTE_FLOW_ACTION_TYPE_SET_META:
4800 			ret = flow_dv_validate_action_set_meta(dev, actions,
4801 							       action_flags,
4802 							       attr, error);
4803 			if (ret < 0)
4804 				return ret;
4805 			/* Count all modify-header actions as one action. */
4806 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4807 				++actions_n;
4808 			action_flags |= MLX5_FLOW_ACTION_SET_META;
4809 			break;
4810 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
4811 			ret = flow_dv_validate_action_set_tag(dev, actions,
4812 							      action_flags,
4813 							      attr, error);
4814 			if (ret < 0)
4815 				return ret;
4816 			/* Count all modify-header actions as one action. */
4817 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4818 				++actions_n;
4819 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
4820 			break;
4821 		case RTE_FLOW_ACTION_TYPE_DROP:
4822 			ret = mlx5_flow_validate_action_drop(action_flags,
4823 							     attr, error);
4824 			if (ret < 0)
4825 				return ret;
4826 			action_flags |= MLX5_FLOW_ACTION_DROP;
4827 			++actions_n;
4828 			break;
4829 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4830 			ret = mlx5_flow_validate_action_queue(actions,
4831 							      action_flags, dev,
4832 							      attr, error);
4833 			if (ret < 0)
4834 				return ret;
4835 			queue_index = ((const struct rte_flow_action_queue *)
4836 							(actions->conf))->index;
4837 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
4838 			++actions_n;
4839 			break;
4840 		case RTE_FLOW_ACTION_TYPE_RSS:
4841 			rss = actions->conf;
4842 			ret = mlx5_flow_validate_action_rss(actions,
4843 							    action_flags, dev,
4844 							    attr, item_flags,
4845 							    error);
4846 			if (ret < 0)
4847 				return ret;
4848 			if (rss != NULL && rss->queue_num)
4849 				queue_index = rss->queue[0];
4850 			action_flags |= MLX5_FLOW_ACTION_RSS;
4851 			++actions_n;
4852 			break;
4853 		case RTE_FLOW_ACTION_TYPE_COUNT:
4854 			ret = flow_dv_validate_action_count(dev, error);
4855 			if (ret < 0)
4856 				return ret;
4857 			action_flags |= MLX5_FLOW_ACTION_COUNT;
4858 			++actions_n;
4859 			break;
4860 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4861 			if (flow_dv_validate_action_pop_vlan(dev,
4862 							     action_flags,
4863 							     actions,
4864 							     item_flags, attr,
4865 							     error))
4866 				return -rte_errno;
4867 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
4868 			++actions_n;
4869 			break;
4870 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4871 			ret = flow_dv_validate_action_push_vlan(action_flags,
4872 								item_flags,
4873 								actions, attr,
4874 								error);
4875 			if (ret < 0)
4876 				return ret;
4877 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
4878 			++actions_n;
4879 			break;
4880 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4881 			ret = flow_dv_validate_action_set_vlan_pcp
4882 						(action_flags, actions, error);
4883 			if (ret < 0)
4884 				return ret;
4885 			/* Count PCP with push_vlan command. */
4886 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
4887 			break;
4888 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4889 			ret = flow_dv_validate_action_set_vlan_vid
4890 						(item_flags, action_flags,
4891 						 actions, error);
4892 			if (ret < 0)
4893 				return ret;
4894 			/* Count VID with push_vlan command. */
4895 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
4896 			break;
4897 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4898 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4899 			ret = flow_dv_validate_action_l2_encap(action_flags,
4900 							       actions, error);
4901 			if (ret < 0)
4902 				return ret;
4903 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
4904 			++actions_n;
4905 			break;
4906 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4907 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4908 			ret = flow_dv_validate_action_decap(action_flags, attr,
4909 							    error);
4910 			if (ret < 0)
4911 				return ret;
4912 			action_flags |= MLX5_FLOW_ACTION_DECAP;
4913 			++actions_n;
4914 			break;
4915 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4916 			ret = flow_dv_validate_action_raw_encap_decap
4917 				(NULL, actions->conf, attr, &action_flags,
4918 				 &actions_n, error);
4919 			if (ret < 0)
4920 				return ret;
4921 			break;
4922 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4923 			decap = actions->conf;
4924 			while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
4925 				;
4926 			if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4927 				encap = NULL;
4928 				actions--;
4929 			} else {
4930 				encap = actions->conf;
4931 			}
4932 			ret = flow_dv_validate_action_raw_encap_decap
4933 					   (decap ? decap : &empty_decap, encap,
4934 					    attr, &action_flags, &actions_n,
4935 					    error);
4936 			if (ret < 0)
4937 				return ret;
4938 			break;
4939 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4940 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4941 			ret = flow_dv_validate_action_modify_mac(action_flags,
4942 								 actions,
4943 								 item_flags,
4944 								 error);
4945 			if (ret < 0)
4946 				return ret;
4947 			/* Count all modify-header actions as one action. */
4948 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4949 				++actions_n;
4950 			action_flags |= actions->type ==
4951 					RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4952 						MLX5_FLOW_ACTION_SET_MAC_SRC :
4953 						MLX5_FLOW_ACTION_SET_MAC_DST;
4954 			break;
4955 
4956 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4957 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4958 			ret = flow_dv_validate_action_modify_ipv4(action_flags,
4959 								  actions,
4960 								  item_flags,
4961 								  error);
4962 			if (ret < 0)
4963 				return ret;
4964 			/* Count all modify-header actions as one action. */
4965 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4966 				++actions_n;
4967 			action_flags |= actions->type ==
4968 					RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4969 						MLX5_FLOW_ACTION_SET_IPV4_SRC :
4970 						MLX5_FLOW_ACTION_SET_IPV4_DST;
4971 			break;
4972 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4973 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4974 			ret = flow_dv_validate_action_modify_ipv6(action_flags,
4975 								  actions,
4976 								  item_flags,
4977 								  error);
4978 			if (ret < 0)
4979 				return ret;
4980 			if (item_ipv6_proto == IPPROTO_ICMPV6)
4981 				return rte_flow_error_set(error, ENOTSUP,
4982 					RTE_FLOW_ERROR_TYPE_ACTION,
4983 					actions,
4984 					"Can't change header "
4985 					"with ICMPv6 proto");
4986 			/* Count all modify-header actions as one action. */
4987 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4988 				++actions_n;
4989 			action_flags |= actions->type ==
4990 					RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4991 						MLX5_FLOW_ACTION_SET_IPV6_SRC :
4992 						MLX5_FLOW_ACTION_SET_IPV6_DST;
4993 			break;
4994 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4995 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4996 			ret = flow_dv_validate_action_modify_tp(action_flags,
4997 								actions,
4998 								item_flags,
4999 								error);
5000 			if (ret < 0)
5001 				return ret;
5002 			/* Count all modify-header actions as one action. */
5003 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5004 				++actions_n;
5005 			action_flags |= actions->type ==
5006 					RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5007 						MLX5_FLOW_ACTION_SET_TP_SRC :
5008 						MLX5_FLOW_ACTION_SET_TP_DST;
5009 			break;
5010 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5011 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
5012 			ret = flow_dv_validate_action_modify_ttl(action_flags,
5013 								 actions,
5014 								 item_flags,
5015 								 error);
5016 			if (ret < 0)
5017 				return ret;
5018 			/* Count all modify-header actions as one action. */
5019 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5020 				++actions_n;
5021 			action_flags |= actions->type ==
5022 					RTE_FLOW_ACTION_TYPE_SET_TTL ?
5023 						MLX5_FLOW_ACTION_SET_TTL :
5024 						MLX5_FLOW_ACTION_DEC_TTL;
5025 			break;
5026 		case RTE_FLOW_ACTION_TYPE_JUMP:
5027 			ret = flow_dv_validate_action_jump(actions,
5028 							   action_flags,
5029 							   attr, external,
5030 							   error);
5031 			if (ret)
5032 				return ret;
5033 			++actions_n;
5034 			action_flags |= MLX5_FLOW_ACTION_JUMP;
5035 			break;
5036 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5037 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5038 			ret = flow_dv_validate_action_modify_tcp_seq
5039 								(action_flags,
5040 								 actions,
5041 								 item_flags,
5042 								 error);
5043 			if (ret < 0)
5044 				return ret;
5045 			/* Count all modify-header actions as one action. */
5046 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5047 				++actions_n;
5048 			action_flags |= actions->type ==
5049 					RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5050 						MLX5_FLOW_ACTION_INC_TCP_SEQ :
5051 						MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5052 			break;
5053 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5054 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5055 			ret = flow_dv_validate_action_modify_tcp_ack
5056 								(action_flags,
5057 								 actions,
5058 								 item_flags,
5059 								 error);
5060 			if (ret < 0)
5061 				return ret;
5062 			/* Count all modify-header actions as one action. */
5063 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5064 				++actions_n;
5065 			action_flags |= actions->type ==
5066 					RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5067 						MLX5_FLOW_ACTION_INC_TCP_ACK :
5068 						MLX5_FLOW_ACTION_DEC_TCP_ACK;
5069 			break;
5070 		case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5071 		case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5072 		case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5073 			break;
5074 		case RTE_FLOW_ACTION_TYPE_METER:
5075 			ret = mlx5_flow_validate_action_meter(dev,
5076 							      action_flags,
5077 							      actions, attr,
5078 							      error);
5079 			if (ret < 0)
5080 				return ret;
5081 			action_flags |= MLX5_FLOW_ACTION_METER;
5082 			++actions_n;
5083 			break;
5084 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5085 			ret = flow_dv_validate_action_modify_ipv4_dscp
5086 							 (action_flags,
5087 							  actions,
5088 							  item_flags,
5089 							  error);
5090 			if (ret < 0)
5091 				return ret;
5092 			/* Count all modify-header actions as one action. */
5093 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5094 				++actions_n;
5095 			action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5096 			break;
5097 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5098 			ret = flow_dv_validate_action_modify_ipv6_dscp
5099 								(action_flags,
5100 								 actions,
5101 								 item_flags,
5102 								 error);
5103 			if (ret < 0)
5104 				return ret;
5105 			/* Count all modify-header actions as one action. */
5106 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5107 				++actions_n;
5108 			action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5109 			break;
5110 		default:
5111 			return rte_flow_error_set(error, ENOTSUP,
5112 						  RTE_FLOW_ERROR_TYPE_ACTION,
5113 						  actions,
5114 						  "action not supported");
5115 		}
5116 	}
5117 	/*
5118 	 * Validate the drop action mutual exclusion with other actions.
5119 	 * Drop action is mutually-exclusive with any other action, except for
5120 	 * Count action.
5121 	 */
5122 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
5123 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
5124 		return rte_flow_error_set(error, EINVAL,
5125 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5126 					  "Drop action is mutually-exclusive "
5127 					  "with any other action, except for "
5128 					  "Count action");
5129 	/* Eswitch has few restrictions on using items and actions */
5130 	if (attr->transfer) {
5131 		if (!mlx5_flow_ext_mreg_supported(dev) &&
5132 		    action_flags & MLX5_FLOW_ACTION_FLAG)
5133 			return rte_flow_error_set(error, ENOTSUP,
5134 						  RTE_FLOW_ERROR_TYPE_ACTION,
5135 						  NULL,
5136 						  "unsupported action FLAG");
5137 		if (!mlx5_flow_ext_mreg_supported(dev) &&
5138 		    action_flags & MLX5_FLOW_ACTION_MARK)
5139 			return rte_flow_error_set(error, ENOTSUP,
5140 						  RTE_FLOW_ERROR_TYPE_ACTION,
5141 						  NULL,
5142 						  "unsupported action MARK");
5143 		if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5144 			return rte_flow_error_set(error, ENOTSUP,
5145 						  RTE_FLOW_ERROR_TYPE_ACTION,
5146 						  NULL,
5147 						  "unsupported action QUEUE");
5148 		if (action_flags & MLX5_FLOW_ACTION_RSS)
5149 			return rte_flow_error_set(error, ENOTSUP,
5150 						  RTE_FLOW_ERROR_TYPE_ACTION,
5151 						  NULL,
5152 						  "unsupported action RSS");
5153 		if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5154 			return rte_flow_error_set(error, EINVAL,
5155 						  RTE_FLOW_ERROR_TYPE_ACTION,
5156 						  actions,
5157 						  "no fate action is found");
5158 	} else {
5159 		if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5160 			return rte_flow_error_set(error, EINVAL,
5161 						  RTE_FLOW_ERROR_TYPE_ACTION,
5162 						  actions,
5163 						  "no fate action is found");
5164 	}
5165 	/* Continue validation for Xcap actions.*/
5166 	if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
5167 	    mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5168 		if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5169 		    MLX5_FLOW_XCAP_ACTIONS)
5170 			return rte_flow_error_set(error, ENOTSUP,
5171 						  RTE_FLOW_ERROR_TYPE_ACTION,
5172 						  NULL, "encap and decap "
5173 						  "combination aren't supported");
5174 		if (!attr->transfer && attr->ingress && (action_flags &
5175 							MLX5_FLOW_ACTION_ENCAP))
5176 			return rte_flow_error_set(error, ENOTSUP,
5177 						  RTE_FLOW_ERROR_TYPE_ACTION,
5178 						  NULL, "encap is not supported"
5179 						  " for ingress traffic");
5180 	}
5181 	return 0;
5182 }
5183 
5184 /**
5185  * Internal preparation function. Allocates the DV flow size,
5186  * this size is constant.
5187  *
5188  * @param[in] attr
5189  *   Pointer to the flow attributes.
5190  * @param[in] items
5191  *   Pointer to the list of items.
5192  * @param[in] actions
5193  *   Pointer to the list of actions.
5194  * @param[out] error
5195  *   Pointer to the error structure.
5196  *
5197  * @return
5198  *   Pointer to mlx5_flow object on success,
5199  *   otherwise NULL and rte_errno is set.
5200  */
5201 static struct mlx5_flow *
5202 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
5203 		const struct rte_flow_item items[] __rte_unused,
5204 		const struct rte_flow_action actions[] __rte_unused,
5205 		struct rte_flow_error *error)
5206 {
5207 	size_t size = sizeof(struct mlx5_flow);
5208 	struct mlx5_flow *dev_flow;
5209 
5210 	dev_flow = rte_calloc(__func__, 1, size, 0);
5211 	if (!dev_flow) {
5212 		rte_flow_error_set(error, ENOMEM,
5213 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5214 				   "not enough memory to create flow");
5215 		return NULL;
5216 	}
5217 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
5218 	dev_flow->ingress = attr->ingress;
5219 	dev_flow->transfer = attr->transfer;
5220 	return dev_flow;
5221 }
5222 
5223 #ifdef RTE_LIBRTE_MLX5_DEBUG
5224 /**
5225  * Sanity check for match mask and value. Similar to check_valid_spec() in
5226  * kernel driver. If unmasked bit is present in value, it returns failure.
5227  *
5228  * @param match_mask
5229  *   pointer to match mask buffer.
5230  * @param match_value
5231  *   pointer to match value buffer.
5232  *
5233  * @return
5234  *   0 if valid, -EINVAL otherwise.
5235  */
5236 static int
5237 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5238 {
5239 	uint8_t *m = match_mask;
5240 	uint8_t *v = match_value;
5241 	unsigned int i;
5242 
5243 	for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5244 		if (v[i] & ~m[i]) {
5245 			DRV_LOG(ERR,
5246 				"match_value differs from match_criteria"
5247 				" %p[%u] != %p[%u]",
5248 				match_value, i, match_mask, i);
5249 			return -EINVAL;
5250 		}
5251 	}
5252 	return 0;
5253 }
5254 #endif
5255 
5256 /**
5257  * Add Ethernet item to matcher and to the value.
5258  *
5259  * @param[in, out] matcher
5260  *   Flow matcher.
5261  * @param[in, out] key
5262  *   Flow matcher value.
5263  * @param[in] item
5264  *   Flow pattern to translate.
5265  * @param[in] inner
5266  *   Item is inner pattern.
5267  */
5268 static void
5269 flow_dv_translate_item_eth(void *matcher, void *key,
5270 			   const struct rte_flow_item *item, int inner)
5271 {
5272 	const struct rte_flow_item_eth *eth_m = item->mask;
5273 	const struct rte_flow_item_eth *eth_v = item->spec;
5274 	const struct rte_flow_item_eth nic_mask = {
5275 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5276 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5277 		.type = RTE_BE16(0xffff),
5278 	};
5279 	void *headers_m;
5280 	void *headers_v;
5281 	char *l24_v;
5282 	unsigned int i;
5283 
5284 	if (!eth_v)
5285 		return;
5286 	if (!eth_m)
5287 		eth_m = &nic_mask;
5288 	if (inner) {
5289 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5290 					 inner_headers);
5291 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5292 	} else {
5293 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5294 					 outer_headers);
5295 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5296 	}
5297 	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5298 	       &eth_m->dst, sizeof(eth_m->dst));
5299 	/* The value must be in the range of the mask. */
5300 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5301 	for (i = 0; i < sizeof(eth_m->dst); ++i)
5302 		l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5303 	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5304 	       &eth_m->src, sizeof(eth_m->src));
5305 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5306 	/* The value must be in the range of the mask. */
5307 	for (i = 0; i < sizeof(eth_m->dst); ++i)
5308 		l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5309 	if (eth_v->type) {
5310 		/* When ethertype is present set mask for tagged VLAN. */
5311 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5312 		/* Set value for tagged VLAN if ethertype is 802.1Q. */
5313 		if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
5314 		    eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
5315 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
5316 				 1);
5317 			/* Return here to avoid setting match on ethertype. */
5318 			return;
5319 		}
5320 	}
5321 	/*
5322 	 * HW supports match on one Ethertype, the Ethertype following the last
5323 	 * VLAN tag of the packet (see PRM).
5324 	 * Set match on ethertype only if ETH header is not followed by VLAN.
5325 	 */
5326 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5327 		 rte_be_to_cpu_16(eth_m->type));
5328 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
5329 	*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5330 }
5331 
5332 /**
5333  * Add VLAN item to matcher and to the value.
5334  *
5335  * @param[in, out] dev_flow
5336  *   Flow descriptor.
5337  * @param[in, out] matcher
5338  *   Flow matcher.
5339  * @param[in, out] key
5340  *   Flow matcher value.
5341  * @param[in] item
5342  *   Flow pattern to translate.
5343  * @param[in] inner
5344  *   Item is inner pattern.
5345  */
5346 static void
5347 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5348 			    void *matcher, void *key,
5349 			    const struct rte_flow_item *item,
5350 			    int inner)
5351 {
5352 	const struct rte_flow_item_vlan *vlan_m = item->mask;
5353 	const struct rte_flow_item_vlan *vlan_v = item->spec;
5354 	void *headers_m;
5355 	void *headers_v;
5356 	uint16_t tci_m;
5357 	uint16_t tci_v;
5358 
5359 	if (!vlan_v)
5360 		return;
5361 	if (!vlan_m)
5362 		vlan_m = &rte_flow_item_vlan_mask;
5363 	if (inner) {
5364 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5365 					 inner_headers);
5366 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5367 	} else {
5368 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5369 					 outer_headers);
5370 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5371 		/*
5372 		 * This is workaround, masks are not supported,
5373 		 * and pre-validated.
5374 		 */
5375 		dev_flow->dv.vf_vlan.tag =
5376 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5377 	}
5378 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
5379 	tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
5380 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5381 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
5382 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
5383 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
5384 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
5385 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
5386 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
5387 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
5388 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5389 		 rte_be_to_cpu_16(vlan_m->inner_type));
5390 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
5391 		 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
5392 }
5393 
5394 /**
5395  * Add IPV4 item to matcher and to the value.
5396  *
5397  * @param[in, out] matcher
5398  *   Flow matcher.
5399  * @param[in, out] key
5400  *   Flow matcher value.
5401  * @param[in] item
5402  *   Flow pattern to translate.
5403  * @param[in] item_flags
5404  *   Bit-fields that holds the items detected until now.
5405  * @param[in] inner
5406  *   Item is inner pattern.
5407  * @param[in] group
5408  *   The group to insert the rule.
5409  */
5410 static void
5411 flow_dv_translate_item_ipv4(void *matcher, void *key,
5412 			    const struct rte_flow_item *item,
5413 			    const uint64_t item_flags,
5414 			    int inner, uint32_t group)
5415 {
5416 	const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
5417 	const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
5418 	const struct rte_flow_item_ipv4 nic_mask = {
5419 		.hdr = {
5420 			.src_addr = RTE_BE32(0xffffffff),
5421 			.dst_addr = RTE_BE32(0xffffffff),
5422 			.type_of_service = 0xff,
5423 			.next_proto_id = 0xff,
5424 		},
5425 	};
5426 	void *headers_m;
5427 	void *headers_v;
5428 	char *l24_m;
5429 	char *l24_v;
5430 	uint8_t tos;
5431 
5432 	if (inner) {
5433 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5434 					 inner_headers);
5435 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5436 	} else {
5437 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5438 					 outer_headers);
5439 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5440 	}
5441 	if (group == 0)
5442 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5443 	else
5444 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
5445 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
5446 	if (!ipv4_v)
5447 		return;
5448 	if (!ipv4_m)
5449 		ipv4_m = &nic_mask;
5450 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5451 			     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5452 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5453 			     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5454 	*(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
5455 	*(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
5456 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5457 			  src_ipv4_src_ipv6.ipv4_layout.ipv4);
5458 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5459 			  src_ipv4_src_ipv6.ipv4_layout.ipv4);
5460 	*(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
5461 	*(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
5462 	tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
5463 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
5464 		 ipv4_m->hdr.type_of_service);
5465 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
5466 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
5467 		 ipv4_m->hdr.type_of_service >> 2);
5468 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
5469 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5470 		 ipv4_m->hdr.next_proto_id);
5471 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5472 		 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
5473 	/*
5474 	 * On outer header (which must contains L2), or inner header with L2,
5475 	 * set cvlan_tag mask bit to mark this packet as untagged.
5476 	 */
5477 	if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
5478 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5479 }
5480 
5481 /**
5482  * Add IPV6 item to matcher and to the value.
5483  *
5484  * @param[in, out] matcher
5485  *   Flow matcher.
5486  * @param[in, out] key
5487  *   Flow matcher value.
5488  * @param[in] item
5489  *   Flow pattern to translate.
5490  * @param[in] item_flags
5491  *   Bit-fields that holds the items detected until now.
5492  * @param[in] inner
5493  *   Item is inner pattern.
5494  * @param[in] group
5495  *   The group to insert the rule.
5496  */
5497 static void
5498 flow_dv_translate_item_ipv6(void *matcher, void *key,
5499 			    const struct rte_flow_item *item,
5500 			    const uint64_t item_flags,
5501 			    int inner, uint32_t group)
5502 {
5503 	const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
5504 	const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
5505 	const struct rte_flow_item_ipv6 nic_mask = {
5506 		.hdr = {
5507 			.src_addr =
5508 				"\xff\xff\xff\xff\xff\xff\xff\xff"
5509 				"\xff\xff\xff\xff\xff\xff\xff\xff",
5510 			.dst_addr =
5511 				"\xff\xff\xff\xff\xff\xff\xff\xff"
5512 				"\xff\xff\xff\xff\xff\xff\xff\xff",
5513 			.vtc_flow = RTE_BE32(0xffffffff),
5514 			.proto = 0xff,
5515 			.hop_limits = 0xff,
5516 		},
5517 	};
5518 	void *headers_m;
5519 	void *headers_v;
5520 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5521 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5522 	char *l24_m;
5523 	char *l24_v;
5524 	uint32_t vtc_m;
5525 	uint32_t vtc_v;
5526 	int i;
5527 	int size;
5528 
5529 	if (inner) {
5530 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5531 					 inner_headers);
5532 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5533 	} else {
5534 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5535 					 outer_headers);
5536 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5537 	}
5538 	if (group == 0)
5539 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5540 	else
5541 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
5542 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
5543 	if (!ipv6_v)
5544 		return;
5545 	if (!ipv6_m)
5546 		ipv6_m = &nic_mask;
5547 	size = sizeof(ipv6_m->hdr.dst_addr);
5548 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5549 			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5550 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5551 			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5552 	memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
5553 	for (i = 0; i < size; ++i)
5554 		l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
5555 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5556 			     src_ipv4_src_ipv6.ipv6_layout.ipv6);
5557 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5558 			     src_ipv4_src_ipv6.ipv6_layout.ipv6);
5559 	memcpy(l24_m, ipv6_m->hdr.src_addr, size);
5560 	for (i = 0; i < size; ++i)
5561 		l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
5562 	/* TOS. */
5563 	vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
5564 	vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
5565 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
5566 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
5567 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
5568 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
5569 	/* Label. */
5570 	if (inner) {
5571 		MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
5572 			 vtc_m);
5573 		MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
5574 			 vtc_v);
5575 	} else {
5576 		MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
5577 			 vtc_m);
5578 		MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
5579 			 vtc_v);
5580 	}
5581 	/* Protocol. */
5582 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5583 		 ipv6_m->hdr.proto);
5584 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5585 		 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
5586 	/*
5587 	 * On outer header (which must contains L2), or inner header with L2,
5588 	 * set cvlan_tag mask bit to mark this packet as untagged.
5589 	 */
5590 	if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
5591 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5592 }
5593 
5594 /**
5595  * Add TCP item to matcher and to the value.
5596  *
5597  * @param[in, out] matcher
5598  *   Flow matcher.
5599  * @param[in, out] key
5600  *   Flow matcher value.
5601  * @param[in] item
5602  *   Flow pattern to translate.
5603  * @param[in] inner
5604  *   Item is inner pattern.
5605  */
5606 static void
5607 flow_dv_translate_item_tcp(void *matcher, void *key,
5608 			   const struct rte_flow_item *item,
5609 			   int inner)
5610 {
5611 	const struct rte_flow_item_tcp *tcp_m = item->mask;
5612 	const struct rte_flow_item_tcp *tcp_v = item->spec;
5613 	void *headers_m;
5614 	void *headers_v;
5615 
5616 	if (inner) {
5617 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5618 					 inner_headers);
5619 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5620 	} else {
5621 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5622 					 outer_headers);
5623 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5624 	}
5625 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5626 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
5627 	if (!tcp_v)
5628 		return;
5629 	if (!tcp_m)
5630 		tcp_m = &rte_flow_item_tcp_mask;
5631 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
5632 		 rte_be_to_cpu_16(tcp_m->hdr.src_port));
5633 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
5634 		 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
5635 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
5636 		 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
5637 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
5638 		 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
5639 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
5640 		 tcp_m->hdr.tcp_flags);
5641 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
5642 		 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
5643 }
5644 
5645 /**
5646  * Add UDP item to matcher and to the value.
5647  *
5648  * @param[in, out] matcher
5649  *   Flow matcher.
5650  * @param[in, out] key
5651  *   Flow matcher value.
5652  * @param[in] item
5653  *   Flow pattern to translate.
5654  * @param[in] inner
5655  *   Item is inner pattern.
5656  */
5657 static void
5658 flow_dv_translate_item_udp(void *matcher, void *key,
5659 			   const struct rte_flow_item *item,
5660 			   int inner)
5661 {
5662 	const struct rte_flow_item_udp *udp_m = item->mask;
5663 	const struct rte_flow_item_udp *udp_v = item->spec;
5664 	void *headers_m;
5665 	void *headers_v;
5666 
5667 	if (inner) {
5668 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5669 					 inner_headers);
5670 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5671 	} else {
5672 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5673 					 outer_headers);
5674 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5675 	}
5676 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5677 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
5678 	if (!udp_v)
5679 		return;
5680 	if (!udp_m)
5681 		udp_m = &rte_flow_item_udp_mask;
5682 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
5683 		 rte_be_to_cpu_16(udp_m->hdr.src_port));
5684 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
5685 		 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
5686 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
5687 		 rte_be_to_cpu_16(udp_m->hdr.dst_port));
5688 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
5689 		 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
5690 }
5691 
5692 /**
5693  * Add GRE optional Key item to matcher and to the value.
5694  *
5695  * @param[in, out] matcher
5696  *   Flow matcher.
5697  * @param[in, out] key
5698  *   Flow matcher value.
5699  * @param[in] item
5700  *   Flow pattern to translate.
5701  * @param[in] inner
5702  *   Item is inner pattern.
5703  */
5704 static void
5705 flow_dv_translate_item_gre_key(void *matcher, void *key,
5706 				   const struct rte_flow_item *item)
5707 {
5708 	const rte_be32_t *key_m = item->mask;
5709 	const rte_be32_t *key_v = item->spec;
5710 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5711 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5712 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5713 
5714 	/* GRE K bit must be on and should already be validated */
5715 	MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
5716 	MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
5717 	if (!key_v)
5718 		return;
5719 	if (!key_m)
5720 		key_m = &gre_key_default_mask;
5721 	MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
5722 		 rte_be_to_cpu_32(*key_m) >> 8);
5723 	MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
5724 		 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
5725 	MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
5726 		 rte_be_to_cpu_32(*key_m) & 0xFF);
5727 	MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
5728 		 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
5729 }
5730 
5731 /**
5732  * Add GRE item to matcher and to the value.
5733  *
5734  * @param[in, out] matcher
5735  *   Flow matcher.
5736  * @param[in, out] key
5737  *   Flow matcher value.
5738  * @param[in] item
5739  *   Flow pattern to translate.
5740  * @param[in] inner
5741  *   Item is inner pattern.
5742  */
5743 static void
5744 flow_dv_translate_item_gre(void *matcher, void *key,
5745 			   const struct rte_flow_item *item,
5746 			   int inner)
5747 {
5748 	const struct rte_flow_item_gre *gre_m = item->mask;
5749 	const struct rte_flow_item_gre *gre_v = item->spec;
5750 	void *headers_m;
5751 	void *headers_v;
5752 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5753 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5754 	struct {
5755 		union {
5756 			__extension__
5757 			struct {
5758 				uint16_t version:3;
5759 				uint16_t rsvd0:9;
5760 				uint16_t s_present:1;
5761 				uint16_t k_present:1;
5762 				uint16_t rsvd_bit1:1;
5763 				uint16_t c_present:1;
5764 			};
5765 			uint16_t value;
5766 		};
5767 	} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
5768 
5769 	if (inner) {
5770 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5771 					 inner_headers);
5772 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5773 	} else {
5774 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5775 					 outer_headers);
5776 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5777 	}
5778 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5779 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
5780 	if (!gre_v)
5781 		return;
5782 	if (!gre_m)
5783 		gre_m = &rte_flow_item_gre_mask;
5784 	MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
5785 		 rte_be_to_cpu_16(gre_m->protocol));
5786 	MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
5787 		 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
5788 	gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
5789 	gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
5790 	MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
5791 		 gre_crks_rsvd0_ver_m.c_present);
5792 	MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
5793 		 gre_crks_rsvd0_ver_v.c_present &
5794 		 gre_crks_rsvd0_ver_m.c_present);
5795 	MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
5796 		 gre_crks_rsvd0_ver_m.k_present);
5797 	MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
5798 		 gre_crks_rsvd0_ver_v.k_present &
5799 		 gre_crks_rsvd0_ver_m.k_present);
5800 	MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
5801 		 gre_crks_rsvd0_ver_m.s_present);
5802 	MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
5803 		 gre_crks_rsvd0_ver_v.s_present &
5804 		 gre_crks_rsvd0_ver_m.s_present);
5805 }
5806 
5807 /**
5808  * Add NVGRE item to matcher and to the value.
5809  *
5810  * @param[in, out] matcher
5811  *   Flow matcher.
5812  * @param[in, out] key
5813  *   Flow matcher value.
5814  * @param[in] item
5815  *   Flow pattern to translate.
5816  * @param[in] inner
5817  *   Item is inner pattern.
5818  */
5819 static void
5820 flow_dv_translate_item_nvgre(void *matcher, void *key,
5821 			     const struct rte_flow_item *item,
5822 			     int inner)
5823 {
5824 	const struct rte_flow_item_nvgre *nvgre_m = item->mask;
5825 	const struct rte_flow_item_nvgre *nvgre_v = item->spec;
5826 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5827 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5828 	const char *tni_flow_id_m = (const char *)nvgre_m->tni;
5829 	const char *tni_flow_id_v = (const char *)nvgre_v->tni;
5830 	char *gre_key_m;
5831 	char *gre_key_v;
5832 	int size;
5833 	int i;
5834 
5835 	/* For NVGRE, GRE header fields must be set with defined values. */
5836 	const struct rte_flow_item_gre gre_spec = {
5837 		.c_rsvd0_ver = RTE_BE16(0x2000),
5838 		.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
5839 	};
5840 	const struct rte_flow_item_gre gre_mask = {
5841 		.c_rsvd0_ver = RTE_BE16(0xB000),
5842 		.protocol = RTE_BE16(UINT16_MAX),
5843 	};
5844 	const struct rte_flow_item gre_item = {
5845 		.spec = &gre_spec,
5846 		.mask = &gre_mask,
5847 		.last = NULL,
5848 	};
5849 	flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
5850 	if (!nvgre_v)
5851 		return;
5852 	if (!nvgre_m)
5853 		nvgre_m = &rte_flow_item_nvgre_mask;
5854 	size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
5855 	gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
5856 	gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
5857 	memcpy(gre_key_m, tni_flow_id_m, size);
5858 	for (i = 0; i < size; ++i)
5859 		gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
5860 }
5861 
5862 /**
5863  * Add VXLAN item to matcher and to the value.
5864  *
5865  * @param[in, out] matcher
5866  *   Flow matcher.
5867  * @param[in, out] key
5868  *   Flow matcher value.
5869  * @param[in] item
5870  *   Flow pattern to translate.
5871  * @param[in] inner
5872  *   Item is inner pattern.
5873  */
5874 static void
5875 flow_dv_translate_item_vxlan(void *matcher, void *key,
5876 			     const struct rte_flow_item *item,
5877 			     int inner)
5878 {
5879 	const struct rte_flow_item_vxlan *vxlan_m = item->mask;
5880 	const struct rte_flow_item_vxlan *vxlan_v = item->spec;
5881 	void *headers_m;
5882 	void *headers_v;
5883 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5884 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5885 	char *vni_m;
5886 	char *vni_v;
5887 	uint16_t dport;
5888 	int size;
5889 	int i;
5890 
5891 	if (inner) {
5892 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5893 					 inner_headers);
5894 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5895 	} else {
5896 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5897 					 outer_headers);
5898 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5899 	}
5900 	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
5901 		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
5902 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
5903 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
5904 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
5905 	}
5906 	if (!vxlan_v)
5907 		return;
5908 	if (!vxlan_m)
5909 		vxlan_m = &rte_flow_item_vxlan_mask;
5910 	size = sizeof(vxlan_m->vni);
5911 	vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
5912 	vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
5913 	memcpy(vni_m, vxlan_m->vni, size);
5914 	for (i = 0; i < size; ++i)
5915 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
5916 }
5917 
5918 /**
5919  * Add VXLAN-GPE item to matcher and to the value.
5920  *
5921  * @param[in, out] matcher
5922  *   Flow matcher.
5923  * @param[in, out] key
5924  *   Flow matcher value.
5925  * @param[in] item
5926  *   Flow pattern to translate.
5927  * @param[in] inner
5928  *   Item is inner pattern.
5929  */
5930 
5931 static void
5932 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
5933 				 const struct rte_flow_item *item, int inner)
5934 {
5935 	const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
5936 	const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
5937 	void *headers_m;
5938 	void *headers_v;
5939 	void *misc_m =
5940 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
5941 	void *misc_v =
5942 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5943 	char *vni_m;
5944 	char *vni_v;
5945 	uint16_t dport;
5946 	int size;
5947 	int i;
5948 	uint8_t flags_m = 0xff;
5949 	uint8_t flags_v = 0xc;
5950 
5951 	if (inner) {
5952 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5953 					 inner_headers);
5954 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5955 	} else {
5956 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5957 					 outer_headers);
5958 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5959 	}
5960 	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
5961 		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
5962 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
5963 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
5964 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
5965 	}
5966 	if (!vxlan_v)
5967 		return;
5968 	if (!vxlan_m)
5969 		vxlan_m = &rte_flow_item_vxlan_gpe_mask;
5970 	size = sizeof(vxlan_m->vni);
5971 	vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
5972 	vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
5973 	memcpy(vni_m, vxlan_m->vni, size);
5974 	for (i = 0; i < size; ++i)
5975 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
5976 	if (vxlan_m->flags) {
5977 		flags_m = vxlan_m->flags;
5978 		flags_v = vxlan_v->flags;
5979 	}
5980 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
5981 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
5982 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
5983 		 vxlan_m->protocol);
5984 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
5985 		 vxlan_v->protocol);
5986 }
5987 
5988 /**
5989  * Add Geneve item to matcher and to the value.
5990  *
5991  * @param[in, out] matcher
5992  *   Flow matcher.
5993  * @param[in, out] key
5994  *   Flow matcher value.
5995  * @param[in] item
5996  *   Flow pattern to translate.
5997  * @param[in] inner
5998  *   Item is inner pattern.
5999  */
6000 
6001 static void
6002 flow_dv_translate_item_geneve(void *matcher, void *key,
6003 			      const struct rte_flow_item *item, int inner)
6004 {
6005 	const struct rte_flow_item_geneve *geneve_m = item->mask;
6006 	const struct rte_flow_item_geneve *geneve_v = item->spec;
6007 	void *headers_m;
6008 	void *headers_v;
6009 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6010 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6011 	uint16_t dport;
6012 	uint16_t gbhdr_m;
6013 	uint16_t gbhdr_v;
6014 	char *vni_m;
6015 	char *vni_v;
6016 	size_t size, i;
6017 
6018 	if (inner) {
6019 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6020 					 inner_headers);
6021 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6022 	} else {
6023 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6024 					 outer_headers);
6025 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6026 	}
6027 	dport = MLX5_UDP_PORT_GENEVE;
6028 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6029 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6030 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6031 	}
6032 	if (!geneve_v)
6033 		return;
6034 	if (!geneve_m)
6035 		geneve_m = &rte_flow_item_geneve_mask;
6036 	size = sizeof(geneve_m->vni);
6037 	vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
6038 	vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
6039 	memcpy(vni_m, geneve_m->vni, size);
6040 	for (i = 0; i < size; ++i)
6041 		vni_v[i] = vni_m[i] & geneve_v->vni[i];
6042 	MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
6043 		 rte_be_to_cpu_16(geneve_m->protocol));
6044 	MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
6045 		 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
6046 	gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
6047 	gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
6048 	MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
6049 		 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6050 	MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
6051 		 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6052 	MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
6053 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6054 	MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
6055 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
6056 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6057 }
6058 
6059 /**
6060  * Add MPLS item to matcher and to the value.
6061  *
6062  * @param[in, out] matcher
6063  *   Flow matcher.
6064  * @param[in, out] key
6065  *   Flow matcher value.
6066  * @param[in] item
6067  *   Flow pattern to translate.
6068  * @param[in] prev_layer
6069  *   The protocol layer indicated in previous item.
6070  * @param[in] inner
6071  *   Item is inner pattern.
6072  */
6073 static void
6074 flow_dv_translate_item_mpls(void *matcher, void *key,
6075 			    const struct rte_flow_item *item,
6076 			    uint64_t prev_layer,
6077 			    int inner)
6078 {
6079 	const uint32_t *in_mpls_m = item->mask;
6080 	const uint32_t *in_mpls_v = item->spec;
6081 	uint32_t *out_mpls_m = 0;
6082 	uint32_t *out_mpls_v = 0;
6083 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6084 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6085 	void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
6086 				     misc_parameters_2);
6087 	void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6088 	void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
6089 	void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6090 
6091 	switch (prev_layer) {
6092 	case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6093 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
6094 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6095 			 MLX5_UDP_PORT_MPLS);
6096 		break;
6097 	case MLX5_FLOW_LAYER_GRE:
6098 		MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
6099 		MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6100 			 RTE_ETHER_TYPE_MPLS);
6101 		break;
6102 	default:
6103 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6104 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6105 			 IPPROTO_MPLS);
6106 		break;
6107 	}
6108 	if (!in_mpls_v)
6109 		return;
6110 	if (!in_mpls_m)
6111 		in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
6112 	switch (prev_layer) {
6113 	case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6114 		out_mpls_m =
6115 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6116 						 outer_first_mpls_over_udp);
6117 		out_mpls_v =
6118 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6119 						 outer_first_mpls_over_udp);
6120 		break;
6121 	case MLX5_FLOW_LAYER_GRE:
6122 		out_mpls_m =
6123 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6124 						 outer_first_mpls_over_gre);
6125 		out_mpls_v =
6126 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6127 						 outer_first_mpls_over_gre);
6128 		break;
6129 	default:
6130 		/* Inner MPLS not over GRE is not supported. */
6131 		if (!inner) {
6132 			out_mpls_m =
6133 				(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6134 							 misc2_m,
6135 							 outer_first_mpls);
6136 			out_mpls_v =
6137 				(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6138 							 misc2_v,
6139 							 outer_first_mpls);
6140 		}
6141 		break;
6142 	}
6143 	if (out_mpls_m && out_mpls_v) {
6144 		*out_mpls_m = *in_mpls_m;
6145 		*out_mpls_v = *in_mpls_v & *in_mpls_m;
6146 	}
6147 }
6148 
6149 /**
6150  * Add metadata register item to matcher
6151  *
6152  * @param[in, out] matcher
6153  *   Flow matcher.
6154  * @param[in, out] key
6155  *   Flow matcher value.
6156  * @param[in] reg_type
6157  *   Type of device metadata register
6158  * @param[in] value
6159  *   Register value
6160  * @param[in] mask
6161  *   Register mask
6162  */
6163 static void
6164 flow_dv_match_meta_reg(void *matcher, void *key,
6165 		       enum modify_reg reg_type,
6166 		       uint32_t data, uint32_t mask)
6167 {
6168 	void *misc2_m =
6169 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
6170 	void *misc2_v =
6171 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6172 	uint32_t temp;
6173 
6174 	data &= mask;
6175 	switch (reg_type) {
6176 	case REG_A:
6177 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
6178 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
6179 		break;
6180 	case REG_B:
6181 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
6182 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
6183 		break;
6184 	case REG_C_0:
6185 		/*
6186 		 * The metadata register C0 field might be divided into
6187 		 * source vport index and META item value, we should set
6188 		 * this field according to specified mask, not as whole one.
6189 		 */
6190 		temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
6191 		temp |= mask;
6192 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
6193 		temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
6194 		temp &= ~mask;
6195 		temp |= data;
6196 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
6197 		break;
6198 	case REG_C_1:
6199 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
6200 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
6201 		break;
6202 	case REG_C_2:
6203 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
6204 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
6205 		break;
6206 	case REG_C_3:
6207 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
6208 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
6209 		break;
6210 	case REG_C_4:
6211 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
6212 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
6213 		break;
6214 	case REG_C_5:
6215 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
6216 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
6217 		break;
6218 	case REG_C_6:
6219 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
6220 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
6221 		break;
6222 	case REG_C_7:
6223 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
6224 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
6225 		break;
6226 	default:
6227 		MLX5_ASSERT(false);
6228 		break;
6229 	}
6230 }
6231 
6232 /**
6233  * Add MARK item to matcher
6234  *
6235  * @param[in] dev
6236  *   The device to configure through.
6237  * @param[in, out] matcher
6238  *   Flow matcher.
6239  * @param[in, out] key
6240  *   Flow matcher value.
6241  * @param[in] item
6242  *   Flow pattern to translate.
6243  */
6244 static void
6245 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6246 			    void *matcher, void *key,
6247 			    const struct rte_flow_item *item)
6248 {
6249 	struct mlx5_priv *priv = dev->data->dev_private;
6250 	const struct rte_flow_item_mark *mark;
6251 	uint32_t value;
6252 	uint32_t mask;
6253 
6254 	mark = item->mask ? (const void *)item->mask :
6255 			    &rte_flow_item_mark_mask;
6256 	mask = mark->id & priv->sh->dv_mark_mask;
6257 	mark = (const void *)item->spec;
6258 	MLX5_ASSERT(mark);
6259 	value = mark->id & priv->sh->dv_mark_mask & mask;
6260 	if (mask) {
6261 		enum modify_reg reg;
6262 
6263 		/* Get the metadata register index for the mark. */
6264 		reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6265 		MLX5_ASSERT(reg > 0);
6266 		if (reg == REG_C_0) {
6267 			struct mlx5_priv *priv = dev->data->dev_private;
6268 			uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6269 			uint32_t shl_c0 = rte_bsf32(msk_c0);
6270 
6271 			mask &= msk_c0;
6272 			mask <<= shl_c0;
6273 			value <<= shl_c0;
6274 		}
6275 		flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6276 	}
6277 }
6278 
6279 /**
6280  * Add META item to matcher
6281  *
6282  * @param[in] dev
6283  *   The devich to configure through.
6284  * @param[in, out] matcher
6285  *   Flow matcher.
6286  * @param[in, out] key
6287  *   Flow matcher value.
6288  * @param[in] attr
6289  *   Attributes of flow that includes this item.
6290  * @param[in] item
6291  *   Flow pattern to translate.
6292  */
6293 static void
6294 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6295 			    void *matcher, void *key,
6296 			    const struct rte_flow_attr *attr,
6297 			    const struct rte_flow_item *item)
6298 {
6299 	const struct rte_flow_item_meta *meta_m;
6300 	const struct rte_flow_item_meta *meta_v;
6301 
6302 	meta_m = (const void *)item->mask;
6303 	if (!meta_m)
6304 		meta_m = &rte_flow_item_meta_mask;
6305 	meta_v = (const void *)item->spec;
6306 	if (meta_v) {
6307 		int reg;
6308 		uint32_t value = meta_v->data;
6309 		uint32_t mask = meta_m->data;
6310 
6311 		reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6312 		if (reg < 0)
6313 			return;
6314 		/*
6315 		 * In datapath code there is no endianness
6316 		 * coversions for perfromance reasons, all
6317 		 * pattern conversions are done in rte_flow.
6318 		 */
6319 		value = rte_cpu_to_be_32(value);
6320 		mask = rte_cpu_to_be_32(mask);
6321 		if (reg == REG_C_0) {
6322 			struct mlx5_priv *priv = dev->data->dev_private;
6323 			uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6324 			uint32_t shl_c0 = rte_bsf32(msk_c0);
6325 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6326 			uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
6327 
6328 			value >>= shr_c0;
6329 			mask >>= shr_c0;
6330 #endif
6331 			value <<= shl_c0;
6332 			mask <<= shl_c0;
6333 			MLX5_ASSERT(msk_c0);
6334 			MLX5_ASSERT(!(~msk_c0 & mask));
6335 		}
6336 		flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6337 	}
6338 }
6339 
6340 /**
6341  * Add vport metadata Reg C0 item to matcher
6342  *
6343  * @param[in, out] matcher
6344  *   Flow matcher.
6345  * @param[in, out] key
6346  *   Flow matcher value.
6347  * @param[in] reg
6348  *   Flow pattern to translate.
6349  */
6350 static void
6351 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6352 				  uint32_t value, uint32_t mask)
6353 {
6354 	flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6355 }
6356 
6357 /**
6358  * Add tag item to matcher
6359  *
6360  * @param[in] dev
6361  *   The devich to configure through.
6362  * @param[in, out] matcher
6363  *   Flow matcher.
6364  * @param[in, out] key
6365  *   Flow matcher value.
6366  * @param[in] item
6367  *   Flow pattern to translate.
6368  */
6369 static void
6370 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
6371 				void *matcher, void *key,
6372 				const struct rte_flow_item *item)
6373 {
6374 	const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
6375 	const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
6376 	uint32_t mask, value;
6377 
6378 	MLX5_ASSERT(tag_v);
6379 	value = tag_v->data;
6380 	mask = tag_m ? tag_m->data : UINT32_MAX;
6381 	if (tag_v->id == REG_C_0) {
6382 		struct mlx5_priv *priv = dev->data->dev_private;
6383 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6384 		uint32_t shl_c0 = rte_bsf32(msk_c0);
6385 
6386 		mask &= msk_c0;
6387 		mask <<= shl_c0;
6388 		value <<= shl_c0;
6389 	}
6390 	flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
6391 }
6392 
6393 /**
6394  * Add TAG item to matcher
6395  *
6396  * @param[in] dev
6397  *   The devich to configure through.
6398  * @param[in, out] matcher
6399  *   Flow matcher.
6400  * @param[in, out] key
6401  *   Flow matcher value.
6402  * @param[in] item
6403  *   Flow pattern to translate.
6404  */
6405 static void
6406 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
6407 			   void *matcher, void *key,
6408 			   const struct rte_flow_item *item)
6409 {
6410 	const struct rte_flow_item_tag *tag_v = item->spec;
6411 	const struct rte_flow_item_tag *tag_m = item->mask;
6412 	enum modify_reg reg;
6413 
6414 	MLX5_ASSERT(tag_v);
6415 	tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
6416 	/* Get the metadata register index for the tag. */
6417 	reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
6418 	MLX5_ASSERT(reg > 0);
6419 	flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
6420 }
6421 
6422 /**
6423  * Add source vport match to the specified matcher.
6424  *
6425  * @param[in, out] matcher
6426  *   Flow matcher.
6427  * @param[in, out] key
6428  *   Flow matcher value.
6429  * @param[in] port
6430  *   Source vport value to match
6431  * @param[in] mask
6432  *   Mask
6433  */
6434 static void
6435 flow_dv_translate_item_source_vport(void *matcher, void *key,
6436 				    int16_t port, uint16_t mask)
6437 {
6438 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6439 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6440 
6441 	MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
6442 	MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
6443 }
6444 
6445 /**
6446  * Translate port-id item to eswitch match on  port-id.
6447  *
6448  * @param[in] dev
6449  *   The devich to configure through.
6450  * @param[in, out] matcher
6451  *   Flow matcher.
6452  * @param[in, out] key
6453  *   Flow matcher value.
6454  * @param[in] item
6455  *   Flow pattern to translate.
6456  *
6457  * @return
6458  *   0 on success, a negative errno value otherwise.
6459  */
6460 static int
6461 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
6462 			       void *key, const struct rte_flow_item *item)
6463 {
6464 	const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
6465 	const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
6466 	struct mlx5_priv *priv;
6467 	uint16_t mask, id;
6468 
6469 	mask = pid_m ? pid_m->id : 0xffff;
6470 	id = pid_v ? pid_v->id : dev->data->port_id;
6471 	priv = mlx5_port_to_eswitch_info(id, item == NULL);
6472 	if (!priv)
6473 		return -rte_errno;
6474 	/* Translate to vport field or to metadata, depending on mode. */
6475 	if (priv->vport_meta_mask)
6476 		flow_dv_translate_item_meta_vport(matcher, key,
6477 						  priv->vport_meta_tag,
6478 						  priv->vport_meta_mask);
6479 	else
6480 		flow_dv_translate_item_source_vport(matcher, key,
6481 						    priv->vport_id, mask);
6482 	return 0;
6483 }
6484 
6485 /**
6486  * Add ICMP6 item to matcher and to the value.
6487  *
6488  * @param[in, out] matcher
6489  *   Flow matcher.
6490  * @param[in, out] key
6491  *   Flow matcher value.
6492  * @param[in] item
6493  *   Flow pattern to translate.
6494  * @param[in] inner
6495  *   Item is inner pattern.
6496  */
6497 static void
6498 flow_dv_translate_item_icmp6(void *matcher, void *key,
6499 			      const struct rte_flow_item *item,
6500 			      int inner)
6501 {
6502 	const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
6503 	const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
6504 	void *headers_m;
6505 	void *headers_v;
6506 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6507 				     misc_parameters_3);
6508 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6509 	if (inner) {
6510 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6511 					 inner_headers);
6512 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6513 	} else {
6514 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6515 					 outer_headers);
6516 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6517 	}
6518 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6519 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
6520 	if (!icmp6_v)
6521 		return;
6522 	if (!icmp6_m)
6523 		icmp6_m = &rte_flow_item_icmp6_mask;
6524 	/*
6525 	 * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
6526 	 * If only the protocol is specified, no need to match the frag.
6527 	 */
6528 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6529 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
6530 	MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
6531 	MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
6532 		 icmp6_v->type & icmp6_m->type);
6533 	MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
6534 	MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
6535 		 icmp6_v->code & icmp6_m->code);
6536 }
6537 
6538 /**
6539  * Add ICMP item to matcher and to the value.
6540  *
6541  * @param[in, out] matcher
6542  *   Flow matcher.
6543  * @param[in, out] key
6544  *   Flow matcher value.
6545  * @param[in] item
6546  *   Flow pattern to translate.
6547  * @param[in] inner
6548  *   Item is inner pattern.
6549  */
6550 static void
6551 flow_dv_translate_item_icmp(void *matcher, void *key,
6552 			    const struct rte_flow_item *item,
6553 			    int inner)
6554 {
6555 	const struct rte_flow_item_icmp *icmp_m = item->mask;
6556 	const struct rte_flow_item_icmp *icmp_v = item->spec;
6557 	void *headers_m;
6558 	void *headers_v;
6559 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6560 				     misc_parameters_3);
6561 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6562 	if (inner) {
6563 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6564 					 inner_headers);
6565 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6566 	} else {
6567 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6568 					 outer_headers);
6569 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6570 	}
6571 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6572 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
6573 	if (!icmp_v)
6574 		return;
6575 	if (!icmp_m)
6576 		icmp_m = &rte_flow_item_icmp_mask;
6577 	/*
6578 	 * Force flow only to match the non-fragmented IPv4 ICMP packets.
6579 	 * If only the protocol is specified, no need to match the frag.
6580 	 */
6581 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6582 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
6583 	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
6584 		 icmp_m->hdr.icmp_type);
6585 	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
6586 		 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
6587 	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
6588 		 icmp_m->hdr.icmp_code);
6589 	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
6590 		 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
6591 }
6592 
6593 /**
6594  * Add GTP item to matcher and to the value.
6595  *
6596  * @param[in, out] matcher
6597  *   Flow matcher.
6598  * @param[in, out] key
6599  *   Flow matcher value.
6600  * @param[in] item
6601  *   Flow pattern to translate.
6602  * @param[in] inner
6603  *   Item is inner pattern.
6604  */
6605 static void
6606 flow_dv_translate_item_gtp(void *matcher, void *key,
6607 			   const struct rte_flow_item *item, int inner)
6608 {
6609 	const struct rte_flow_item_gtp *gtp_m = item->mask;
6610 	const struct rte_flow_item_gtp *gtp_v = item->spec;
6611 	void *headers_m;
6612 	void *headers_v;
6613 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6614 				     misc_parameters_3);
6615 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6616 	uint16_t dport = RTE_GTPU_UDP_PORT;
6617 
6618 	if (inner) {
6619 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6620 					 inner_headers);
6621 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6622 	} else {
6623 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6624 					 outer_headers);
6625 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6626 	}
6627 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6628 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6629 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6630 	}
6631 	if (!gtp_v)
6632 		return;
6633 	if (!gtp_m)
6634 		gtp_m = &rte_flow_item_gtp_mask;
6635 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
6636 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
6637 		 gtp_v->msg_type & gtp_m->msg_type);
6638 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
6639 		 rte_be_to_cpu_32(gtp_m->teid));
6640 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
6641 		 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
6642 }
6643 
6644 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
6645 
6646 #define HEADER_IS_ZERO(match_criteria, headers)				     \
6647 	!(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
6648 		 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
6649 
6650 /**
6651  * Calculate flow matcher enable bitmap.
6652  *
6653  * @param match_criteria
6654  *   Pointer to flow matcher criteria.
6655  *
6656  * @return
6657  *   Bitmap of enabled fields.
6658  */
6659 static uint8_t
6660 flow_dv_matcher_enable(uint32_t *match_criteria)
6661 {
6662 	uint8_t match_criteria_enable;
6663 
6664 	match_criteria_enable =
6665 		(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
6666 		MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
6667 	match_criteria_enable |=
6668 		(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
6669 		MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
6670 	match_criteria_enable |=
6671 		(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
6672 		MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
6673 	match_criteria_enable |=
6674 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
6675 		MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
6676 	match_criteria_enable |=
6677 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
6678 		MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
6679 	return match_criteria_enable;
6680 }
6681 
6682 
6683 /**
6684  * Get a flow table.
6685  *
6686  * @param[in, out] dev
6687  *   Pointer to rte_eth_dev structure.
6688  * @param[in] table_id
6689  *   Table id to use.
6690  * @param[in] egress
6691  *   Direction of the table.
6692  * @param[in] transfer
6693  *   E-Switch or NIC flow.
6694  * @param[out] error
6695  *   pointer to error structure.
6696  *
6697  * @return
6698  *   Returns tables resource based on the index, NULL in case of failed.
6699  */
6700 static struct mlx5_flow_tbl_resource *
6701 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
6702 			 uint32_t table_id, uint8_t egress,
6703 			 uint8_t transfer,
6704 			 struct rte_flow_error *error)
6705 {
6706 	struct mlx5_priv *priv = dev->data->dev_private;
6707 	struct mlx5_ibv_shared *sh = priv->sh;
6708 	struct mlx5_flow_tbl_resource *tbl;
6709 	union mlx5_flow_tbl_key table_key = {
6710 		{
6711 			.table_id = table_id,
6712 			.reserved = 0,
6713 			.domain = !!transfer,
6714 			.direction = !!egress,
6715 		}
6716 	};
6717 	struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
6718 							 table_key.v64);
6719 	struct mlx5_flow_tbl_data_entry *tbl_data;
6720 	int ret;
6721 	void *domain;
6722 
6723 	if (pos) {
6724 		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
6725 					entry);
6726 		tbl = &tbl_data->tbl;
6727 		rte_atomic32_inc(&tbl->refcnt);
6728 		return tbl;
6729 	}
6730 	tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
6731 	if (!tbl_data) {
6732 		rte_flow_error_set(error, ENOMEM,
6733 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6734 				   NULL,
6735 				   "cannot allocate flow table data entry");
6736 		return NULL;
6737 	}
6738 	tbl = &tbl_data->tbl;
6739 	pos = &tbl_data->entry;
6740 	if (transfer)
6741 		domain = sh->fdb_domain;
6742 	else if (egress)
6743 		domain = sh->tx_domain;
6744 	else
6745 		domain = sh->rx_domain;
6746 	tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
6747 	if (!tbl->obj) {
6748 		rte_flow_error_set(error, ENOMEM,
6749 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6750 				   NULL, "cannot create flow table object");
6751 		rte_free(tbl_data);
6752 		return NULL;
6753 	}
6754 	/*
6755 	 * No multi-threads now, but still better to initialize the reference
6756 	 * count before insert it into the hash list.
6757 	 */
6758 	rte_atomic32_init(&tbl->refcnt);
6759 	/* Jump action reference count is initialized here. */
6760 	rte_atomic32_init(&tbl_data->jump.refcnt);
6761 	pos->key = table_key.v64;
6762 	ret = mlx5_hlist_insert(sh->flow_tbls, pos);
6763 	if (ret < 0) {
6764 		rte_flow_error_set(error, -ret,
6765 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6766 				   "cannot insert flow table data entry");
6767 		mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6768 		rte_free(tbl_data);
6769 	}
6770 	rte_atomic32_inc(&tbl->refcnt);
6771 	return tbl;
6772 }
6773 
6774 /**
6775  * Release a flow table.
6776  *
6777  * @param[in] dev
6778  *   Pointer to rte_eth_dev structure.
6779  * @param[in] tbl
6780  *   Table resource to be released.
6781  *
6782  * @return
6783  *   Returns 0 if table was released, else return 1;
6784  */
6785 static int
6786 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
6787 			     struct mlx5_flow_tbl_resource *tbl)
6788 {
6789 	struct mlx5_priv *priv = dev->data->dev_private;
6790 	struct mlx5_ibv_shared *sh = priv->sh;
6791 	struct mlx5_flow_tbl_data_entry *tbl_data =
6792 		container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6793 
6794 	if (!tbl)
6795 		return 0;
6796 	if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
6797 		struct mlx5_hlist_entry *pos = &tbl_data->entry;
6798 
6799 		mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6800 		tbl->obj = NULL;
6801 		/* remove the entry from the hash list and free memory. */
6802 		mlx5_hlist_remove(sh->flow_tbls, pos);
6803 		rte_free(tbl_data);
6804 		return 0;
6805 	}
6806 	return 1;
6807 }
6808 
6809 /**
6810  * Register the flow matcher.
6811  *
6812  * @param[in, out] dev
6813  *   Pointer to rte_eth_dev structure.
6814  * @param[in, out] matcher
6815  *   Pointer to flow matcher.
6816  * @param[in, out] key
6817  *   Pointer to flow table key.
6818  * @parm[in, out] dev_flow
6819  *   Pointer to the dev_flow.
6820  * @param[out] error
6821  *   pointer to error structure.
6822  *
6823  * @return
6824  *   0 on success otherwise -errno and errno is set.
6825  */
6826 static int
6827 flow_dv_matcher_register(struct rte_eth_dev *dev,
6828 			 struct mlx5_flow_dv_matcher *matcher,
6829 			 union mlx5_flow_tbl_key *key,
6830 			 struct mlx5_flow *dev_flow,
6831 			 struct rte_flow_error *error)
6832 {
6833 	struct mlx5_priv *priv = dev->data->dev_private;
6834 	struct mlx5_ibv_shared *sh = priv->sh;
6835 	struct mlx5_flow_dv_matcher *cache_matcher;
6836 	struct mlx5dv_flow_matcher_attr dv_attr = {
6837 		.type = IBV_FLOW_ATTR_NORMAL,
6838 		.match_mask = (void *)&matcher->mask,
6839 	};
6840 	struct mlx5_flow_tbl_resource *tbl;
6841 	struct mlx5_flow_tbl_data_entry *tbl_data;
6842 
6843 	tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
6844 				       key->domain, error);
6845 	if (!tbl)
6846 		return -rte_errno;	/* No need to refill the error info */
6847 	tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6848 	/* Lookup from cache. */
6849 	LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
6850 		if (matcher->crc == cache_matcher->crc &&
6851 		    matcher->priority == cache_matcher->priority &&
6852 		    !memcmp((const void *)matcher->mask.buf,
6853 			    (const void *)cache_matcher->mask.buf,
6854 			    cache_matcher->mask.size)) {
6855 			DRV_LOG(DEBUG,
6856 				"%s group %u priority %hd use %s "
6857 				"matcher %p: refcnt %d++",
6858 				key->domain ? "FDB" : "NIC", key->table_id,
6859 				cache_matcher->priority,
6860 				key->direction ? "tx" : "rx",
6861 				(void *)cache_matcher,
6862 				rte_atomic32_read(&cache_matcher->refcnt));
6863 			rte_atomic32_inc(&cache_matcher->refcnt);
6864 			dev_flow->dv.matcher = cache_matcher;
6865 			/* old matcher should not make the table ref++. */
6866 			flow_dv_tbl_resource_release(dev, tbl);
6867 			return 0;
6868 		}
6869 	}
6870 	/* Register new matcher. */
6871 	cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
6872 	if (!cache_matcher) {
6873 		flow_dv_tbl_resource_release(dev, tbl);
6874 		return rte_flow_error_set(error, ENOMEM,
6875 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6876 					  "cannot allocate matcher memory");
6877 	}
6878 	*cache_matcher = *matcher;
6879 	dv_attr.match_criteria_enable =
6880 		flow_dv_matcher_enable(cache_matcher->mask.buf);
6881 	dv_attr.priority = matcher->priority;
6882 	if (key->direction)
6883 		dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
6884 	cache_matcher->matcher_object =
6885 		mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
6886 	if (!cache_matcher->matcher_object) {
6887 		rte_free(cache_matcher);
6888 #ifdef HAVE_MLX5DV_DR
6889 		flow_dv_tbl_resource_release(dev, tbl);
6890 #endif
6891 		return rte_flow_error_set(error, ENOMEM,
6892 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6893 					  NULL, "cannot create matcher");
6894 	}
6895 	/* Save the table information */
6896 	cache_matcher->tbl = tbl;
6897 	rte_atomic32_init(&cache_matcher->refcnt);
6898 	/* only matcher ref++, table ref++ already done above in get API. */
6899 	rte_atomic32_inc(&cache_matcher->refcnt);
6900 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
6901 	dev_flow->dv.matcher = cache_matcher;
6902 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
6903 		key->domain ? "FDB" : "NIC", key->table_id,
6904 		cache_matcher->priority,
6905 		key->direction ? "tx" : "rx", (void *)cache_matcher,
6906 		rte_atomic32_read(&cache_matcher->refcnt));
6907 	return 0;
6908 }
6909 
6910 /**
6911  * Find existing tag resource or create and register a new one.
6912  *
6913  * @param dev[in, out]
6914  *   Pointer to rte_eth_dev structure.
6915  * @param[in, out] tag_be24
6916  *   Tag value in big endian then R-shift 8.
6917  * @parm[in, out] dev_flow
6918  *   Pointer to the dev_flow.
6919  * @param[out] error
6920  *   pointer to error structure.
6921  *
6922  * @return
6923  *   0 on success otherwise -errno and errno is set.
6924  */
6925 static int
6926 flow_dv_tag_resource_register
6927 			(struct rte_eth_dev *dev,
6928 			 uint32_t tag_be24,
6929 			 struct mlx5_flow *dev_flow,
6930 			 struct rte_flow_error *error)
6931 {
6932 	struct mlx5_priv *priv = dev->data->dev_private;
6933 	struct mlx5_ibv_shared *sh = priv->sh;
6934 	struct mlx5_flow_dv_tag_resource *cache_resource;
6935 	struct mlx5_hlist_entry *entry;
6936 
6937 	/* Lookup a matching resource from cache. */
6938 	entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
6939 	if (entry) {
6940 		cache_resource = container_of
6941 			(entry, struct mlx5_flow_dv_tag_resource, entry);
6942 		rte_atomic32_inc(&cache_resource->refcnt);
6943 		dev_flow->dv.tag_resource = cache_resource;
6944 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
6945 			(void *)cache_resource,
6946 			rte_atomic32_read(&cache_resource->refcnt));
6947 		return 0;
6948 	}
6949 	/* Register new resource. */
6950 	cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
6951 	if (!cache_resource)
6952 		return rte_flow_error_set(error, ENOMEM,
6953 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6954 					  "cannot allocate resource memory");
6955 	cache_resource->entry.key = (uint64_t)tag_be24;
6956 	cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
6957 	if (!cache_resource->action) {
6958 		rte_free(cache_resource);
6959 		return rte_flow_error_set(error, ENOMEM,
6960 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6961 					  NULL, "cannot create action");
6962 	}
6963 	rte_atomic32_init(&cache_resource->refcnt);
6964 	rte_atomic32_inc(&cache_resource->refcnt);
6965 	if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
6966 		mlx5_glue->destroy_flow_action(cache_resource->action);
6967 		rte_free(cache_resource);
6968 		return rte_flow_error_set(error, EEXIST,
6969 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6970 					  NULL, "cannot insert tag");
6971 	}
6972 	dev_flow->dv.tag_resource = cache_resource;
6973 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
6974 		(void *)cache_resource,
6975 		rte_atomic32_read(&cache_resource->refcnt));
6976 	return 0;
6977 }
6978 
6979 /**
6980  * Release the tag.
6981  *
6982  * @param dev
6983  *   Pointer to Ethernet device.
6984  * @param flow
6985  *   Pointer to mlx5_flow.
6986  *
6987  * @return
6988  *   1 while a reference on it exists, 0 when freed.
6989  */
6990 static int
6991 flow_dv_tag_release(struct rte_eth_dev *dev,
6992 		    struct mlx5_flow_dv_tag_resource *tag)
6993 {
6994 	struct mlx5_priv *priv = dev->data->dev_private;
6995 	struct mlx5_ibv_shared *sh = priv->sh;
6996 
6997 	MLX5_ASSERT(tag);
6998 	DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
6999 		dev->data->port_id, (void *)tag,
7000 		rte_atomic32_read(&tag->refcnt));
7001 	if (rte_atomic32_dec_and_test(&tag->refcnt)) {
7002 		claim_zero(mlx5_glue->destroy_flow_action(tag->action));
7003 		mlx5_hlist_remove(sh->tag_table, &tag->entry);
7004 		DRV_LOG(DEBUG, "port %u tag %p: removed",
7005 			dev->data->port_id, (void *)tag);
7006 		rte_free(tag);
7007 		return 0;
7008 	}
7009 	return 1;
7010 }
7011 
7012 /**
7013  * Translate port ID action to vport.
7014  *
7015  * @param[in] dev
7016  *   Pointer to rte_eth_dev structure.
7017  * @param[in] action
7018  *   Pointer to the port ID action.
7019  * @param[out] dst_port_id
7020  *   The target port ID.
7021  * @param[out] error
7022  *   Pointer to the error structure.
7023  *
7024  * @return
7025  *   0 on success, a negative errno value otherwise and rte_errno is set.
7026  */
7027 static int
7028 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
7029 				 const struct rte_flow_action *action,
7030 				 uint32_t *dst_port_id,
7031 				 struct rte_flow_error *error)
7032 {
7033 	uint32_t port;
7034 	struct mlx5_priv *priv;
7035 	const struct rte_flow_action_port_id *conf =
7036 			(const struct rte_flow_action_port_id *)action->conf;
7037 
7038 	port = conf->original ? dev->data->port_id : conf->id;
7039 	priv = mlx5_port_to_eswitch_info(port, false);
7040 	if (!priv)
7041 		return rte_flow_error_set(error, -rte_errno,
7042 					  RTE_FLOW_ERROR_TYPE_ACTION,
7043 					  NULL,
7044 					  "No eswitch info was found for port");
7045 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
7046 	/*
7047 	 * This parameter is transferred to
7048 	 * mlx5dv_dr_action_create_dest_ib_port().
7049 	 */
7050 	*dst_port_id = priv->ibv_port;
7051 #else
7052 	/*
7053 	 * Legacy mode, no LAG configurations is supported.
7054 	 * This parameter is transferred to
7055 	 * mlx5dv_dr_action_create_dest_vport().
7056 	 */
7057 	*dst_port_id = priv->vport_id;
7058 #endif
7059 	return 0;
7060 }
7061 
7062 /**
7063  * Add Tx queue matcher
7064  *
7065  * @param[in] dev
7066  *   Pointer to the dev struct.
7067  * @param[in, out] matcher
7068  *   Flow matcher.
7069  * @param[in, out] key
7070  *   Flow matcher value.
7071  * @param[in] item
7072  *   Flow pattern to translate.
7073  * @param[in] inner
7074  *   Item is inner pattern.
7075  */
7076 static void
7077 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
7078 				void *matcher, void *key,
7079 				const struct rte_flow_item *item)
7080 {
7081 	const struct mlx5_rte_flow_item_tx_queue *queue_m;
7082 	const struct mlx5_rte_flow_item_tx_queue *queue_v;
7083 	void *misc_m =
7084 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7085 	void *misc_v =
7086 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7087 	struct mlx5_txq_ctrl *txq;
7088 	uint32_t queue;
7089 
7090 
7091 	queue_m = (const void *)item->mask;
7092 	if (!queue_m)
7093 		return;
7094 	queue_v = (const void *)item->spec;
7095 	if (!queue_v)
7096 		return;
7097 	txq = mlx5_txq_get(dev, queue_v->queue);
7098 	if (!txq)
7099 		return;
7100 	queue = txq->obj->sq->id;
7101 	MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
7102 	MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
7103 		 queue & queue_m->queue);
7104 	mlx5_txq_release(dev, queue_v->queue);
7105 }
7106 
7107 /**
7108  * Set the hash fields according to the @p flow information.
7109  *
7110  * @param[in] dev_flow
7111  *   Pointer to the mlx5_flow.
7112  */
7113 static void
7114 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
7115 {
7116 	struct rte_flow *flow = dev_flow->flow;
7117 	uint64_t items = dev_flow->layers;
7118 	int rss_inner = 0;
7119 	uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
7120 
7121 	dev_flow->hash_fields = 0;
7122 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
7123 	if (flow->rss.level >= 2) {
7124 		dev_flow->hash_fields |= IBV_RX_HASH_INNER;
7125 		rss_inner = 1;
7126 	}
7127 #endif
7128 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
7129 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
7130 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
7131 			if (rss_types & ETH_RSS_L3_SRC_ONLY)
7132 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
7133 			else if (rss_types & ETH_RSS_L3_DST_ONLY)
7134 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
7135 			else
7136 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
7137 		}
7138 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
7139 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
7140 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
7141 			if (rss_types & ETH_RSS_L3_SRC_ONLY)
7142 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
7143 			else if (rss_types & ETH_RSS_L3_DST_ONLY)
7144 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
7145 			else
7146 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
7147 		}
7148 	}
7149 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
7150 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
7151 		if (rss_types & ETH_RSS_UDP) {
7152 			if (rss_types & ETH_RSS_L4_SRC_ONLY)
7153 				dev_flow->hash_fields |=
7154 						IBV_RX_HASH_SRC_PORT_UDP;
7155 			else if (rss_types & ETH_RSS_L4_DST_ONLY)
7156 				dev_flow->hash_fields |=
7157 						IBV_RX_HASH_DST_PORT_UDP;
7158 			else
7159 				dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
7160 		}
7161 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
7162 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
7163 		if (rss_types & ETH_RSS_TCP) {
7164 			if (rss_types & ETH_RSS_L4_SRC_ONLY)
7165 				dev_flow->hash_fields |=
7166 						IBV_RX_HASH_SRC_PORT_TCP;
7167 			else if (rss_types & ETH_RSS_L4_DST_ONLY)
7168 				dev_flow->hash_fields |=
7169 						IBV_RX_HASH_DST_PORT_TCP;
7170 			else
7171 				dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
7172 		}
7173 	}
7174 }
7175 
7176 /**
7177  * Fill the flow with DV spec, lock free
7178  * (mutex should be acquired by caller).
7179  *
7180  * @param[in] dev
7181  *   Pointer to rte_eth_dev structure.
7182  * @param[in, out] dev_flow
7183  *   Pointer to the sub flow.
7184  * @param[in] attr
7185  *   Pointer to the flow attributes.
7186  * @param[in] items
7187  *   Pointer to the list of items.
7188  * @param[in] actions
7189  *   Pointer to the list of actions.
7190  * @param[out] error
7191  *   Pointer to the error structure.
7192  *
7193  * @return
7194  *   0 on success, a negative errno value otherwise and rte_errno is set.
7195  */
7196 static int
7197 __flow_dv_translate(struct rte_eth_dev *dev,
7198 		    struct mlx5_flow *dev_flow,
7199 		    const struct rte_flow_attr *attr,
7200 		    const struct rte_flow_item items[],
7201 		    const struct rte_flow_action actions[],
7202 		    struct rte_flow_error *error)
7203 {
7204 	struct mlx5_priv *priv = dev->data->dev_private;
7205 	struct mlx5_dev_config *dev_conf = &priv->config;
7206 	struct rte_flow *flow = dev_flow->flow;
7207 	uint64_t item_flags = 0;
7208 	uint64_t last_item = 0;
7209 	uint64_t action_flags = 0;
7210 	uint64_t priority = attr->priority;
7211 	struct mlx5_flow_dv_matcher matcher = {
7212 		.mask = {
7213 			.size = sizeof(matcher.mask.buf),
7214 		},
7215 	};
7216 	int actions_n = 0;
7217 	bool actions_end = false;
7218 	union {
7219 		struct mlx5_flow_dv_modify_hdr_resource res;
7220 		uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
7221 			    sizeof(struct mlx5_modification_cmd) *
7222 			    (MLX5_MAX_MODIFY_NUM + 1)];
7223 	} mhdr_dummy;
7224 	struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
7225 	union flow_dv_attr flow_attr = { .attr = 0 };
7226 	uint32_t tag_be;
7227 	union mlx5_flow_tbl_key tbl_key;
7228 	uint32_t modify_action_position = UINT32_MAX;
7229 	void *match_mask = matcher.mask.buf;
7230 	void *match_value = dev_flow->dv.value.buf;
7231 	uint8_t next_protocol = 0xff;
7232 	struct rte_vlan_hdr vlan = { 0 };
7233 	uint32_t table;
7234 	int ret = 0;
7235 
7236 	mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
7237 					   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
7238 	ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
7239 				       !!priv->fdb_def_rule, &table, error);
7240 	if (ret)
7241 		return ret;
7242 	dev_flow->group = table;
7243 	if (attr->transfer)
7244 		mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
7245 	if (priority == MLX5_FLOW_PRIO_RSVD)
7246 		priority = dev_conf->flow_prio - 1;
7247 	/* number of actions must be set to 0 in case of dirty stack. */
7248 	mhdr_res->actions_num = 0;
7249 	for (; !actions_end ; actions++) {
7250 		const struct rte_flow_action_queue *queue;
7251 		const struct rte_flow_action_rss *rss;
7252 		const struct rte_flow_action *action = actions;
7253 		const struct rte_flow_action_count *count = action->conf;
7254 		const uint8_t *rss_key;
7255 		const struct rte_flow_action_jump *jump_data;
7256 		const struct rte_flow_action_meter *mtr;
7257 		struct mlx5_flow_tbl_resource *tbl;
7258 		uint32_t port_id = 0;
7259 		struct mlx5_flow_dv_port_id_action_resource port_id_resource;
7260 		int action_type = actions->type;
7261 		const struct rte_flow_action *found_action = NULL;
7262 
7263 		switch (action_type) {
7264 		case RTE_FLOW_ACTION_TYPE_VOID:
7265 			break;
7266 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
7267 			if (flow_dv_translate_action_port_id(dev, action,
7268 							     &port_id, error))
7269 				return -rte_errno;
7270 			port_id_resource.port_id = port_id;
7271 			if (flow_dv_port_id_action_resource_register
7272 			    (dev, &port_id_resource, dev_flow, error))
7273 				return -rte_errno;
7274 			dev_flow->dv.actions[actions_n++] =
7275 				dev_flow->dv.port_id_action->action;
7276 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7277 			break;
7278 		case RTE_FLOW_ACTION_TYPE_FLAG:
7279 			action_flags |= MLX5_FLOW_ACTION_FLAG;
7280 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7281 				struct rte_flow_action_mark mark = {
7282 					.id = MLX5_FLOW_MARK_DEFAULT,
7283 				};
7284 
7285 				if (flow_dv_convert_action_mark(dev, &mark,
7286 								mhdr_res,
7287 								error))
7288 					return -rte_errno;
7289 				action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
7290 				break;
7291 			}
7292 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
7293 			if (!dev_flow->dv.tag_resource)
7294 				if (flow_dv_tag_resource_register
7295 				    (dev, tag_be, dev_flow, error))
7296 					return -rte_errno;
7297 			dev_flow->dv.actions[actions_n++] =
7298 				dev_flow->dv.tag_resource->action;
7299 			break;
7300 		case RTE_FLOW_ACTION_TYPE_MARK:
7301 			action_flags |= MLX5_FLOW_ACTION_MARK;
7302 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7303 				const struct rte_flow_action_mark *mark =
7304 					(const struct rte_flow_action_mark *)
7305 						actions->conf;
7306 
7307 				if (flow_dv_convert_action_mark(dev, mark,
7308 								mhdr_res,
7309 								error))
7310 					return -rte_errno;
7311 				action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
7312 				break;
7313 			}
7314 			/* Fall-through */
7315 		case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7316 			/* Legacy (non-extensive) MARK action. */
7317 			tag_be = mlx5_flow_mark_set
7318 			      (((const struct rte_flow_action_mark *)
7319 			       (actions->conf))->id);
7320 			if (!dev_flow->dv.tag_resource)
7321 				if (flow_dv_tag_resource_register
7322 				    (dev, tag_be, dev_flow, error))
7323 					return -rte_errno;
7324 			dev_flow->dv.actions[actions_n++] =
7325 				dev_flow->dv.tag_resource->action;
7326 			break;
7327 		case RTE_FLOW_ACTION_TYPE_SET_META:
7328 			if (flow_dv_convert_action_set_meta
7329 				(dev, mhdr_res, attr,
7330 				 (const struct rte_flow_action_set_meta *)
7331 				  actions->conf, error))
7332 				return -rte_errno;
7333 			action_flags |= MLX5_FLOW_ACTION_SET_META;
7334 			break;
7335 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
7336 			if (flow_dv_convert_action_set_tag
7337 				(dev, mhdr_res,
7338 				 (const struct rte_flow_action_set_tag *)
7339 				  actions->conf, error))
7340 				return -rte_errno;
7341 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7342 			break;
7343 		case RTE_FLOW_ACTION_TYPE_DROP:
7344 			action_flags |= MLX5_FLOW_ACTION_DROP;
7345 			break;
7346 		case RTE_FLOW_ACTION_TYPE_QUEUE:
7347 			MLX5_ASSERT(flow->rss.queue);
7348 			queue = actions->conf;
7349 			flow->rss.queue_num = 1;
7350 			(*flow->rss.queue)[0] = queue->index;
7351 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
7352 			break;
7353 		case RTE_FLOW_ACTION_TYPE_RSS:
7354 			MLX5_ASSERT(flow->rss.queue);
7355 			rss = actions->conf;
7356 			if (flow->rss.queue)
7357 				memcpy((*flow->rss.queue), rss->queue,
7358 				       rss->queue_num * sizeof(uint16_t));
7359 			flow->rss.queue_num = rss->queue_num;
7360 			/* NULL RSS key indicates default RSS key. */
7361 			rss_key = !rss->key ? rss_hash_default_key : rss->key;
7362 			memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
7363 			/*
7364 			 * rss->level and rss.types should be set in advance
7365 			 * when expanding items for RSS.
7366 			 */
7367 			action_flags |= MLX5_FLOW_ACTION_RSS;
7368 			break;
7369 		case RTE_FLOW_ACTION_TYPE_COUNT:
7370 			if (!dev_conf->devx) {
7371 				rte_errno = ENOTSUP;
7372 				goto cnt_err;
7373 			}
7374 			flow->counter = flow_dv_counter_alloc(dev,
7375 							      count->shared,
7376 							      count->id,
7377 							      dev_flow->group);
7378 			if (flow->counter == NULL)
7379 				goto cnt_err;
7380 			dev_flow->dv.actions[actions_n++] =
7381 				flow->counter->action;
7382 			action_flags |= MLX5_FLOW_ACTION_COUNT;
7383 			break;
7384 cnt_err:
7385 			if (rte_errno == ENOTSUP)
7386 				return rte_flow_error_set
7387 					      (error, ENOTSUP,
7388 					       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7389 					       NULL,
7390 					       "count action not supported");
7391 			else
7392 				return rte_flow_error_set
7393 						(error, rte_errno,
7394 						 RTE_FLOW_ERROR_TYPE_ACTION,
7395 						 action,
7396 						 "cannot create counter"
7397 						  " object.");
7398 			break;
7399 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7400 			dev_flow->dv.actions[actions_n++] =
7401 						priv->sh->pop_vlan_action;
7402 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7403 			break;
7404 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7405 			flow_dev_get_vlan_info_from_items(items, &vlan);
7406 			vlan.eth_proto = rte_be_to_cpu_16
7407 			     ((((const struct rte_flow_action_of_push_vlan *)
7408 						   actions->conf)->ethertype));
7409 			found_action = mlx5_flow_find_action
7410 					(actions + 1,
7411 					 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
7412 			if (found_action)
7413 				mlx5_update_vlan_vid_pcp(found_action, &vlan);
7414 			found_action = mlx5_flow_find_action
7415 					(actions + 1,
7416 					 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
7417 			if (found_action)
7418 				mlx5_update_vlan_vid_pcp(found_action, &vlan);
7419 			if (flow_dv_create_action_push_vlan
7420 					    (dev, attr, &vlan, dev_flow, error))
7421 				return -rte_errno;
7422 			dev_flow->dv.actions[actions_n++] =
7423 					   dev_flow->dv.push_vlan_res->action;
7424 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7425 			break;
7426 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7427 			/* of_vlan_push action handled this action */
7428 			MLX5_ASSERT(action_flags &
7429 				    MLX5_FLOW_ACTION_OF_PUSH_VLAN);
7430 			break;
7431 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7432 			if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7433 				break;
7434 			flow_dev_get_vlan_info_from_items(items, &vlan);
7435 			mlx5_update_vlan_vid_pcp(actions, &vlan);
7436 			/* If no VLAN push - this is a modify header action */
7437 			if (flow_dv_convert_action_modify_vlan_vid
7438 						(mhdr_res, actions, error))
7439 				return -rte_errno;
7440 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7441 			break;
7442 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7443 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7444 			if (flow_dv_create_action_l2_encap(dev, actions,
7445 							   dev_flow,
7446 							   attr->transfer,
7447 							   error))
7448 				return -rte_errno;
7449 			dev_flow->dv.actions[actions_n++] =
7450 				dev_flow->dv.encap_decap->verbs_action;
7451 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
7452 			break;
7453 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7454 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7455 			if (flow_dv_create_action_l2_decap(dev, dev_flow,
7456 							   attr->transfer,
7457 							   error))
7458 				return -rte_errno;
7459 			dev_flow->dv.actions[actions_n++] =
7460 				dev_flow->dv.encap_decap->verbs_action;
7461 			action_flags |= MLX5_FLOW_ACTION_DECAP;
7462 			break;
7463 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7464 			/* Handle encap with preceding decap. */
7465 			if (action_flags & MLX5_FLOW_ACTION_DECAP) {
7466 				if (flow_dv_create_action_raw_encap
7467 					(dev, actions, dev_flow, attr, error))
7468 					return -rte_errno;
7469 				dev_flow->dv.actions[actions_n++] =
7470 					dev_flow->dv.encap_decap->verbs_action;
7471 			} else {
7472 				/* Handle encap without preceding decap. */
7473 				if (flow_dv_create_action_l2_encap
7474 				    (dev, actions, dev_flow, attr->transfer,
7475 				     error))
7476 					return -rte_errno;
7477 				dev_flow->dv.actions[actions_n++] =
7478 					dev_flow->dv.encap_decap->verbs_action;
7479 			}
7480 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
7481 			break;
7482 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7483 			while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
7484 				;
7485 			if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7486 				if (flow_dv_create_action_l2_decap
7487 				    (dev, dev_flow, attr->transfer, error))
7488 					return -rte_errno;
7489 				dev_flow->dv.actions[actions_n++] =
7490 					dev_flow->dv.encap_decap->verbs_action;
7491 			}
7492 			/* If decap is followed by encap, handle it at encap. */
7493 			action_flags |= MLX5_FLOW_ACTION_DECAP;
7494 			break;
7495 		case RTE_FLOW_ACTION_TYPE_JUMP:
7496 			jump_data = action->conf;
7497 			ret = mlx5_flow_group_to_table(attr, dev_flow->external,
7498 						       jump_data->group,
7499 						       !!priv->fdb_def_rule,
7500 						       &table, error);
7501 			if (ret)
7502 				return ret;
7503 			tbl = flow_dv_tbl_resource_get(dev, table,
7504 						       attr->egress,
7505 						       attr->transfer, error);
7506 			if (!tbl)
7507 				return rte_flow_error_set
7508 						(error, errno,
7509 						 RTE_FLOW_ERROR_TYPE_ACTION,
7510 						 NULL,
7511 						 "cannot create jump action.");
7512 			if (flow_dv_jump_tbl_resource_register
7513 			    (dev, tbl, dev_flow, error)) {
7514 				flow_dv_tbl_resource_release(dev, tbl);
7515 				return rte_flow_error_set
7516 						(error, errno,
7517 						 RTE_FLOW_ERROR_TYPE_ACTION,
7518 						 NULL,
7519 						 "cannot create jump action.");
7520 			}
7521 			dev_flow->dv.actions[actions_n++] =
7522 				dev_flow->dv.jump->action;
7523 			action_flags |= MLX5_FLOW_ACTION_JUMP;
7524 			break;
7525 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7526 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7527 			if (flow_dv_convert_action_modify_mac
7528 					(mhdr_res, actions, error))
7529 				return -rte_errno;
7530 			action_flags |= actions->type ==
7531 					RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7532 					MLX5_FLOW_ACTION_SET_MAC_SRC :
7533 					MLX5_FLOW_ACTION_SET_MAC_DST;
7534 			break;
7535 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7536 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7537 			if (flow_dv_convert_action_modify_ipv4
7538 					(mhdr_res, actions, error))
7539 				return -rte_errno;
7540 			action_flags |= actions->type ==
7541 					RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7542 					MLX5_FLOW_ACTION_SET_IPV4_SRC :
7543 					MLX5_FLOW_ACTION_SET_IPV4_DST;
7544 			break;
7545 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7546 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7547 			if (flow_dv_convert_action_modify_ipv6
7548 					(mhdr_res, actions, error))
7549 				return -rte_errno;
7550 			action_flags |= actions->type ==
7551 					RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7552 					MLX5_FLOW_ACTION_SET_IPV6_SRC :
7553 					MLX5_FLOW_ACTION_SET_IPV6_DST;
7554 			break;
7555 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7556 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7557 			if (flow_dv_convert_action_modify_tp
7558 					(mhdr_res, actions, items,
7559 					 &flow_attr, dev_flow, !!(action_flags &
7560 					 MLX5_FLOW_ACTION_DECAP), error))
7561 				return -rte_errno;
7562 			action_flags |= actions->type ==
7563 					RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7564 					MLX5_FLOW_ACTION_SET_TP_SRC :
7565 					MLX5_FLOW_ACTION_SET_TP_DST;
7566 			break;
7567 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7568 			if (flow_dv_convert_action_modify_dec_ttl
7569 					(mhdr_res, items, &flow_attr, dev_flow,
7570 					 !!(action_flags &
7571 					 MLX5_FLOW_ACTION_DECAP), error))
7572 				return -rte_errno;
7573 			action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
7574 			break;
7575 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
7576 			if (flow_dv_convert_action_modify_ttl
7577 					(mhdr_res, actions, items, &flow_attr,
7578 					 dev_flow, !!(action_flags &
7579 					 MLX5_FLOW_ACTION_DECAP), error))
7580 				return -rte_errno;
7581 			action_flags |= MLX5_FLOW_ACTION_SET_TTL;
7582 			break;
7583 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7584 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7585 			if (flow_dv_convert_action_modify_tcp_seq
7586 					(mhdr_res, actions, error))
7587 				return -rte_errno;
7588 			action_flags |= actions->type ==
7589 					RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7590 					MLX5_FLOW_ACTION_INC_TCP_SEQ :
7591 					MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7592 			break;
7593 
7594 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7595 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7596 			if (flow_dv_convert_action_modify_tcp_ack
7597 					(mhdr_res, actions, error))
7598 				return -rte_errno;
7599 			action_flags |= actions->type ==
7600 					RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7601 					MLX5_FLOW_ACTION_INC_TCP_ACK :
7602 					MLX5_FLOW_ACTION_DEC_TCP_ACK;
7603 			break;
7604 		case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7605 			if (flow_dv_convert_action_set_reg
7606 					(mhdr_res, actions, error))
7607 				return -rte_errno;
7608 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7609 			break;
7610 		case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7611 			if (flow_dv_convert_action_copy_mreg
7612 					(dev, mhdr_res, actions, error))
7613 				return -rte_errno;
7614 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7615 			break;
7616 		case RTE_FLOW_ACTION_TYPE_METER:
7617 			mtr = actions->conf;
7618 			if (!flow->meter) {
7619 				flow->meter = mlx5_flow_meter_attach(priv,
7620 							mtr->mtr_id, attr,
7621 							error);
7622 				if (!flow->meter)
7623 					return rte_flow_error_set(error,
7624 						rte_errno,
7625 						RTE_FLOW_ERROR_TYPE_ACTION,
7626 						NULL,
7627 						"meter not found "
7628 						"or invalid parameters");
7629 			}
7630 			/* Set the meter action. */
7631 			dev_flow->dv.actions[actions_n++] =
7632 				flow->meter->mfts->meter_action;
7633 			action_flags |= MLX5_FLOW_ACTION_METER;
7634 			break;
7635 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7636 			if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
7637 							      actions, error))
7638 				return -rte_errno;
7639 			action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7640 			break;
7641 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7642 			if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
7643 							      actions, error))
7644 				return -rte_errno;
7645 			action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7646 			break;
7647 		case RTE_FLOW_ACTION_TYPE_END:
7648 			actions_end = true;
7649 			if (mhdr_res->actions_num) {
7650 				/* create modify action if needed. */
7651 				if (flow_dv_modify_hdr_resource_register
7652 					(dev, mhdr_res, dev_flow, error))
7653 					return -rte_errno;
7654 				dev_flow->dv.actions[modify_action_position] =
7655 					dev_flow->dv.modify_hdr->verbs_action;
7656 			}
7657 			break;
7658 		default:
7659 			break;
7660 		}
7661 		if (mhdr_res->actions_num &&
7662 		    modify_action_position == UINT32_MAX)
7663 			modify_action_position = actions_n++;
7664 	}
7665 	dev_flow->dv.actions_n = actions_n;
7666 	dev_flow->actions = action_flags;
7667 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
7668 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
7669 		int item_type = items->type;
7670 
7671 		switch (item_type) {
7672 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
7673 			flow_dv_translate_item_port_id(dev, match_mask,
7674 						       match_value, items);
7675 			last_item = MLX5_FLOW_ITEM_PORT_ID;
7676 			break;
7677 		case RTE_FLOW_ITEM_TYPE_ETH:
7678 			flow_dv_translate_item_eth(match_mask, match_value,
7679 						   items, tunnel);
7680 			matcher.priority = MLX5_PRIORITY_MAP_L2;
7681 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
7682 					     MLX5_FLOW_LAYER_OUTER_L2;
7683 			break;
7684 		case RTE_FLOW_ITEM_TYPE_VLAN:
7685 			flow_dv_translate_item_vlan(dev_flow,
7686 						    match_mask, match_value,
7687 						    items, tunnel);
7688 			matcher.priority = MLX5_PRIORITY_MAP_L2;
7689 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
7690 					      MLX5_FLOW_LAYER_INNER_VLAN) :
7691 					     (MLX5_FLOW_LAYER_OUTER_L2 |
7692 					      MLX5_FLOW_LAYER_OUTER_VLAN);
7693 			break;
7694 		case RTE_FLOW_ITEM_TYPE_IPV4:
7695 			mlx5_flow_tunnel_ip_check(items, next_protocol,
7696 						  &item_flags, &tunnel);
7697 			flow_dv_translate_item_ipv4(match_mask, match_value,
7698 						    items, item_flags, tunnel,
7699 						    dev_flow->group);
7700 			matcher.priority = MLX5_PRIORITY_MAP_L3;
7701 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7702 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7703 			if (items->mask != NULL &&
7704 			    ((const struct rte_flow_item_ipv4 *)
7705 			     items->mask)->hdr.next_proto_id) {
7706 				next_protocol =
7707 					((const struct rte_flow_item_ipv4 *)
7708 					 (items->spec))->hdr.next_proto_id;
7709 				next_protocol &=
7710 					((const struct rte_flow_item_ipv4 *)
7711 					 (items->mask))->hdr.next_proto_id;
7712 			} else {
7713 				/* Reset for inner layer. */
7714 				next_protocol = 0xff;
7715 			}
7716 			break;
7717 		case RTE_FLOW_ITEM_TYPE_IPV6:
7718 			mlx5_flow_tunnel_ip_check(items, next_protocol,
7719 						  &item_flags, &tunnel);
7720 			flow_dv_translate_item_ipv6(match_mask, match_value,
7721 						    items, item_flags, tunnel,
7722 						    dev_flow->group);
7723 			matcher.priority = MLX5_PRIORITY_MAP_L3;
7724 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7725 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7726 			if (items->mask != NULL &&
7727 			    ((const struct rte_flow_item_ipv6 *)
7728 			     items->mask)->hdr.proto) {
7729 				next_protocol =
7730 					((const struct rte_flow_item_ipv6 *)
7731 					 items->spec)->hdr.proto;
7732 				next_protocol &=
7733 					((const struct rte_flow_item_ipv6 *)
7734 					 items->mask)->hdr.proto;
7735 			} else {
7736 				/* Reset for inner layer. */
7737 				next_protocol = 0xff;
7738 			}
7739 			break;
7740 		case RTE_FLOW_ITEM_TYPE_TCP:
7741 			flow_dv_translate_item_tcp(match_mask, match_value,
7742 						   items, tunnel);
7743 			matcher.priority = MLX5_PRIORITY_MAP_L4;
7744 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7745 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
7746 			break;
7747 		case RTE_FLOW_ITEM_TYPE_UDP:
7748 			flow_dv_translate_item_udp(match_mask, match_value,
7749 						   items, tunnel);
7750 			matcher.priority = MLX5_PRIORITY_MAP_L4;
7751 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7752 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
7753 			break;
7754 		case RTE_FLOW_ITEM_TYPE_GRE:
7755 			flow_dv_translate_item_gre(match_mask, match_value,
7756 						   items, tunnel);
7757 			matcher.priority = flow->rss.level >= 2 ?
7758 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7759 			last_item = MLX5_FLOW_LAYER_GRE;
7760 			break;
7761 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7762 			flow_dv_translate_item_gre_key(match_mask,
7763 						       match_value, items);
7764 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
7765 			break;
7766 		case RTE_FLOW_ITEM_TYPE_NVGRE:
7767 			flow_dv_translate_item_nvgre(match_mask, match_value,
7768 						     items, tunnel);
7769 			matcher.priority = flow->rss.level >= 2 ?
7770 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7771 			last_item = MLX5_FLOW_LAYER_GRE;
7772 			break;
7773 		case RTE_FLOW_ITEM_TYPE_VXLAN:
7774 			flow_dv_translate_item_vxlan(match_mask, match_value,
7775 						     items, tunnel);
7776 			matcher.priority = flow->rss.level >= 2 ?
7777 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7778 			last_item = MLX5_FLOW_LAYER_VXLAN;
7779 			break;
7780 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7781 			flow_dv_translate_item_vxlan_gpe(match_mask,
7782 							 match_value, items,
7783 							 tunnel);
7784 			matcher.priority = flow->rss.level >= 2 ?
7785 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7786 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7787 			break;
7788 		case RTE_FLOW_ITEM_TYPE_GENEVE:
7789 			flow_dv_translate_item_geneve(match_mask, match_value,
7790 						      items, tunnel);
7791 			matcher.priority = flow->rss.level >= 2 ?
7792 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7793 			last_item = MLX5_FLOW_LAYER_GENEVE;
7794 			break;
7795 		case RTE_FLOW_ITEM_TYPE_MPLS:
7796 			flow_dv_translate_item_mpls(match_mask, match_value,
7797 						    items, last_item, tunnel);
7798 			matcher.priority = flow->rss.level >= 2 ?
7799 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7800 			last_item = MLX5_FLOW_LAYER_MPLS;
7801 			break;
7802 		case RTE_FLOW_ITEM_TYPE_MARK:
7803 			flow_dv_translate_item_mark(dev, match_mask,
7804 						    match_value, items);
7805 			last_item = MLX5_FLOW_ITEM_MARK;
7806 			break;
7807 		case RTE_FLOW_ITEM_TYPE_META:
7808 			flow_dv_translate_item_meta(dev, match_mask,
7809 						    match_value, attr, items);
7810 			last_item = MLX5_FLOW_ITEM_METADATA;
7811 			break;
7812 		case RTE_FLOW_ITEM_TYPE_ICMP:
7813 			flow_dv_translate_item_icmp(match_mask, match_value,
7814 						    items, tunnel);
7815 			last_item = MLX5_FLOW_LAYER_ICMP;
7816 			break;
7817 		case RTE_FLOW_ITEM_TYPE_ICMP6:
7818 			flow_dv_translate_item_icmp6(match_mask, match_value,
7819 						      items, tunnel);
7820 			last_item = MLX5_FLOW_LAYER_ICMP6;
7821 			break;
7822 		case RTE_FLOW_ITEM_TYPE_TAG:
7823 			flow_dv_translate_item_tag(dev, match_mask,
7824 						   match_value, items);
7825 			last_item = MLX5_FLOW_ITEM_TAG;
7826 			break;
7827 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7828 			flow_dv_translate_mlx5_item_tag(dev, match_mask,
7829 							match_value, items);
7830 			last_item = MLX5_FLOW_ITEM_TAG;
7831 			break;
7832 		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7833 			flow_dv_translate_item_tx_queue(dev, match_mask,
7834 							match_value,
7835 							items);
7836 			last_item = MLX5_FLOW_ITEM_TX_QUEUE;
7837 			break;
7838 		case RTE_FLOW_ITEM_TYPE_GTP:
7839 			flow_dv_translate_item_gtp(match_mask, match_value,
7840 						   items, tunnel);
7841 			matcher.priority = flow->rss.level >= 2 ?
7842 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7843 			last_item = MLX5_FLOW_LAYER_GTP;
7844 			break;
7845 		default:
7846 			break;
7847 		}
7848 		item_flags |= last_item;
7849 	}
7850 	/*
7851 	 * When E-Switch mode is enabled, we have two cases where we need to
7852 	 * set the source port manually.
7853 	 * The first one, is in case of Nic steering rule, and the second is
7854 	 * E-Switch rule where no port_id item was found. In both cases
7855 	 * the source port is set according the current port in use.
7856 	 */
7857 	if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
7858 	    (priv->representor || priv->master)) {
7859 		if (flow_dv_translate_item_port_id(dev, match_mask,
7860 						   match_value, NULL))
7861 			return -rte_errno;
7862 	}
7863 #ifdef RTE_LIBRTE_MLX5_DEBUG
7864 	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
7865 					      dev_flow->dv.value.buf));
7866 #endif
7867 	/*
7868 	 * Layers may be already initialized from prefix flow if this dev_flow
7869 	 * is the suffix flow.
7870 	 */
7871 	dev_flow->layers |= item_flags;
7872 	if (action_flags & MLX5_FLOW_ACTION_RSS)
7873 		flow_dv_hashfields_set(dev_flow);
7874 	/* Register matcher. */
7875 	matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
7876 				    matcher.mask.size);
7877 	matcher.priority = mlx5_flow_adjust_priority(dev, priority,
7878 						     matcher.priority);
7879 	/* reserved field no needs to be set to 0 here. */
7880 	tbl_key.domain = attr->transfer;
7881 	tbl_key.direction = attr->egress;
7882 	tbl_key.table_id = dev_flow->group;
7883 	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
7884 		return -rte_errno;
7885 	return 0;
7886 }
7887 
7888 /**
7889  * Apply the flow to the NIC, lock free,
7890  * (mutex should be acquired by caller).
7891  *
7892  * @param[in] dev
7893  *   Pointer to the Ethernet device structure.
7894  * @param[in, out] flow
7895  *   Pointer to flow structure.
7896  * @param[out] error
7897  *   Pointer to error structure.
7898  *
7899  * @return
7900  *   0 on success, a negative errno value otherwise and rte_errno is set.
7901  */
7902 static int
7903 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
7904 		struct rte_flow_error *error)
7905 {
7906 	struct mlx5_flow_dv *dv;
7907 	struct mlx5_flow *dev_flow;
7908 	struct mlx5_priv *priv = dev->data->dev_private;
7909 	int n;
7910 	int err;
7911 
7912 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7913 		dv = &dev_flow->dv;
7914 		n = dv->actions_n;
7915 		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
7916 			if (dev_flow->transfer) {
7917 				dv->actions[n++] = priv->sh->esw_drop_action;
7918 			} else {
7919 				dv->hrxq = mlx5_hrxq_drop_new(dev);
7920 				if (!dv->hrxq) {
7921 					rte_flow_error_set
7922 						(error, errno,
7923 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7924 						 NULL,
7925 						 "cannot get drop hash queue");
7926 					goto error;
7927 				}
7928 				dv->actions[n++] = dv->hrxq->action;
7929 			}
7930 		} else if (dev_flow->actions &
7931 			   (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
7932 			struct mlx5_hrxq *hrxq;
7933 
7934 			MLX5_ASSERT(flow->rss.queue);
7935 			hrxq = mlx5_hrxq_get(dev, flow->rss.key,
7936 					     MLX5_RSS_HASH_KEY_LEN,
7937 					     dev_flow->hash_fields,
7938 					     (*flow->rss.queue),
7939 					     flow->rss.queue_num);
7940 			if (!hrxq) {
7941 				hrxq = mlx5_hrxq_new
7942 					(dev, flow->rss.key,
7943 					 MLX5_RSS_HASH_KEY_LEN,
7944 					 dev_flow->hash_fields,
7945 					 (*flow->rss.queue),
7946 					 flow->rss.queue_num,
7947 					 !!(dev_flow->layers &
7948 					    MLX5_FLOW_LAYER_TUNNEL));
7949 			}
7950 			if (!hrxq) {
7951 				rte_flow_error_set
7952 					(error, rte_errno,
7953 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7954 					 "cannot get hash queue");
7955 				goto error;
7956 			}
7957 			dv->hrxq = hrxq;
7958 			dv->actions[n++] = dv->hrxq->action;
7959 		}
7960 		dv->flow =
7961 			mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
7962 						  (void *)&dv->value, n,
7963 						  dv->actions);
7964 		if (!dv->flow) {
7965 			rte_flow_error_set(error, errno,
7966 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7967 					   NULL,
7968 					   "hardware refuses to create flow");
7969 			goto error;
7970 		}
7971 		if (priv->vmwa_context &&
7972 		    dev_flow->dv.vf_vlan.tag &&
7973 		    !dev_flow->dv.vf_vlan.created) {
7974 			/*
7975 			 * The rule contains the VLAN pattern.
7976 			 * For VF we are going to create VLAN
7977 			 * interface to make hypervisor set correct
7978 			 * e-Switch vport context.
7979 			 */
7980 			mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
7981 		}
7982 	}
7983 	return 0;
7984 error:
7985 	err = rte_errno; /* Save rte_errno before cleanup. */
7986 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7987 		struct mlx5_flow_dv *dv = &dev_flow->dv;
7988 		if (dv->hrxq) {
7989 			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
7990 				mlx5_hrxq_drop_release(dev);
7991 			else
7992 				mlx5_hrxq_release(dev, dv->hrxq);
7993 			dv->hrxq = NULL;
7994 		}
7995 		if (dev_flow->dv.vf_vlan.tag &&
7996 		    dev_flow->dv.vf_vlan.created)
7997 			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
7998 	}
7999 	rte_errno = err; /* Restore rte_errno. */
8000 	return -rte_errno;
8001 }
8002 
8003 /**
8004  * Release the flow matcher.
8005  *
8006  * @param dev
8007  *   Pointer to Ethernet device.
8008  * @param flow
8009  *   Pointer to mlx5_flow.
8010  *
8011  * @return
8012  *   1 while a reference on it exists, 0 when freed.
8013  */
8014 static int
8015 flow_dv_matcher_release(struct rte_eth_dev *dev,
8016 			struct mlx5_flow *flow)
8017 {
8018 	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
8019 
8020 	MLX5_ASSERT(matcher->matcher_object);
8021 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
8022 		dev->data->port_id, (void *)matcher,
8023 		rte_atomic32_read(&matcher->refcnt));
8024 	if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
8025 		claim_zero(mlx5_glue->dv_destroy_flow_matcher
8026 			   (matcher->matcher_object));
8027 		LIST_REMOVE(matcher, next);
8028 		/* table ref-- in release interface. */
8029 		flow_dv_tbl_resource_release(dev, matcher->tbl);
8030 		rte_free(matcher);
8031 		DRV_LOG(DEBUG, "port %u matcher %p: removed",
8032 			dev->data->port_id, (void *)matcher);
8033 		return 0;
8034 	}
8035 	return 1;
8036 }
8037 
8038 /**
8039  * Release an encap/decap resource.
8040  *
8041  * @param flow
8042  *   Pointer to mlx5_flow.
8043  *
8044  * @return
8045  *   1 while a reference on it exists, 0 when freed.
8046  */
8047 static int
8048 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
8049 {
8050 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
8051 						flow->dv.encap_decap;
8052 
8053 	MLX5_ASSERT(cache_resource->verbs_action);
8054 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
8055 		(void *)cache_resource,
8056 		rte_atomic32_read(&cache_resource->refcnt));
8057 	if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8058 		claim_zero(mlx5_glue->destroy_flow_action
8059 				(cache_resource->verbs_action));
8060 		LIST_REMOVE(cache_resource, next);
8061 		rte_free(cache_resource);
8062 		DRV_LOG(DEBUG, "encap/decap resource %p: removed",
8063 			(void *)cache_resource);
8064 		return 0;
8065 	}
8066 	return 1;
8067 }
8068 
8069 /**
8070  * Release an jump to table action resource.
8071  *
8072  * @param dev
8073  *   Pointer to Ethernet device.
8074  * @param flow
8075  *   Pointer to mlx5_flow.
8076  *
8077  * @return
8078  *   1 while a reference on it exists, 0 when freed.
8079  */
8080 static int
8081 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
8082 				  struct mlx5_flow *flow)
8083 {
8084 	struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
8085 	struct mlx5_flow_tbl_data_entry *tbl_data =
8086 			container_of(cache_resource,
8087 				     struct mlx5_flow_tbl_data_entry, jump);
8088 
8089 	MLX5_ASSERT(cache_resource->action);
8090 	DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
8091 		(void *)cache_resource,
8092 		rte_atomic32_read(&cache_resource->refcnt));
8093 	if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8094 		claim_zero(mlx5_glue->destroy_flow_action
8095 				(cache_resource->action));
8096 		/* jump action memory free is inside the table release. */
8097 		flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
8098 		DRV_LOG(DEBUG, "jump table resource %p: removed",
8099 			(void *)cache_resource);
8100 		return 0;
8101 	}
8102 	return 1;
8103 }
8104 
8105 /**
8106  * Release a modify-header resource.
8107  *
8108  * @param flow
8109  *   Pointer to mlx5_flow.
8110  *
8111  * @return
8112  *   1 while a reference on it exists, 0 when freed.
8113  */
8114 static int
8115 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
8116 {
8117 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
8118 						flow->dv.modify_hdr;
8119 
8120 	MLX5_ASSERT(cache_resource->verbs_action);
8121 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
8122 		(void *)cache_resource,
8123 		rte_atomic32_read(&cache_resource->refcnt));
8124 	if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8125 		claim_zero(mlx5_glue->destroy_flow_action
8126 				(cache_resource->verbs_action));
8127 		LIST_REMOVE(cache_resource, next);
8128 		rte_free(cache_resource);
8129 		DRV_LOG(DEBUG, "modify-header resource %p: removed",
8130 			(void *)cache_resource);
8131 		return 0;
8132 	}
8133 	return 1;
8134 }
8135 
8136 /**
8137  * Release port ID action resource.
8138  *
8139  * @param flow
8140  *   Pointer to mlx5_flow.
8141  *
8142  * @return
8143  *   1 while a reference on it exists, 0 when freed.
8144  */
8145 static int
8146 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
8147 {
8148 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
8149 		flow->dv.port_id_action;
8150 
8151 	MLX5_ASSERT(cache_resource->action);
8152 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
8153 		(void *)cache_resource,
8154 		rte_atomic32_read(&cache_resource->refcnt));
8155 	if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8156 		claim_zero(mlx5_glue->destroy_flow_action
8157 				(cache_resource->action));
8158 		LIST_REMOVE(cache_resource, next);
8159 		rte_free(cache_resource);
8160 		DRV_LOG(DEBUG, "port id action resource %p: removed",
8161 			(void *)cache_resource);
8162 		return 0;
8163 	}
8164 	return 1;
8165 }
8166 
8167 /**
8168  * Release push vlan action resource.
8169  *
8170  * @param flow
8171  *   Pointer to mlx5_flow.
8172  *
8173  * @return
8174  *   1 while a reference on it exists, 0 when freed.
8175  */
8176 static int
8177 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
8178 {
8179 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
8180 		flow->dv.push_vlan_res;
8181 
8182 	MLX5_ASSERT(cache_resource->action);
8183 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
8184 		(void *)cache_resource,
8185 		rte_atomic32_read(&cache_resource->refcnt));
8186 	if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8187 		claim_zero(mlx5_glue->destroy_flow_action
8188 				(cache_resource->action));
8189 		LIST_REMOVE(cache_resource, next);
8190 		rte_free(cache_resource);
8191 		DRV_LOG(DEBUG, "push vlan action resource %p: removed",
8192 			(void *)cache_resource);
8193 		return 0;
8194 	}
8195 	return 1;
8196 }
8197 
8198 /**
8199  * Remove the flow from the NIC but keeps it in memory.
8200  * Lock free, (mutex should be acquired by caller).
8201  *
8202  * @param[in] dev
8203  *   Pointer to Ethernet device.
8204  * @param[in, out] flow
8205  *   Pointer to flow structure.
8206  */
8207 static void
8208 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
8209 {
8210 	struct mlx5_flow_dv *dv;
8211 	struct mlx5_flow *dev_flow;
8212 
8213 	if (!flow)
8214 		return;
8215 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
8216 		dv = &dev_flow->dv;
8217 		if (dv->flow) {
8218 			claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
8219 			dv->flow = NULL;
8220 		}
8221 		if (dv->hrxq) {
8222 			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
8223 				mlx5_hrxq_drop_release(dev);
8224 			else
8225 				mlx5_hrxq_release(dev, dv->hrxq);
8226 			dv->hrxq = NULL;
8227 		}
8228 		if (dev_flow->dv.vf_vlan.tag &&
8229 		    dev_flow->dv.vf_vlan.created)
8230 			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
8231 	}
8232 }
8233 
8234 /**
8235  * Remove the flow from the NIC and the memory.
8236  * Lock free, (mutex should be acquired by caller).
8237  *
8238  * @param[in] dev
8239  *   Pointer to the Ethernet device structure.
8240  * @param[in, out] flow
8241  *   Pointer to flow structure.
8242  */
8243 static void
8244 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
8245 {
8246 	struct mlx5_flow *dev_flow;
8247 
8248 	if (!flow)
8249 		return;
8250 	__flow_dv_remove(dev, flow);
8251 	if (flow->counter) {
8252 		flow_dv_counter_release(dev, flow->counter);
8253 		flow->counter = NULL;
8254 	}
8255 	if (flow->meter) {
8256 		mlx5_flow_meter_detach(flow->meter);
8257 		flow->meter = NULL;
8258 	}
8259 	while (!LIST_EMPTY(&flow->dev_flows)) {
8260 		dev_flow = LIST_FIRST(&flow->dev_flows);
8261 		LIST_REMOVE(dev_flow, next);
8262 		if (dev_flow->dv.matcher)
8263 			flow_dv_matcher_release(dev, dev_flow);
8264 		if (dev_flow->dv.encap_decap)
8265 			flow_dv_encap_decap_resource_release(dev_flow);
8266 		if (dev_flow->dv.modify_hdr)
8267 			flow_dv_modify_hdr_resource_release(dev_flow);
8268 		if (dev_flow->dv.jump)
8269 			flow_dv_jump_tbl_resource_release(dev, dev_flow);
8270 		if (dev_flow->dv.port_id_action)
8271 			flow_dv_port_id_action_resource_release(dev_flow);
8272 		if (dev_flow->dv.push_vlan_res)
8273 			flow_dv_push_vlan_action_resource_release(dev_flow);
8274 		if (dev_flow->dv.tag_resource)
8275 			flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
8276 		rte_free(dev_flow);
8277 	}
8278 }
8279 
8280 /**
8281  * Query a dv flow  rule for its statistics via devx.
8282  *
8283  * @param[in] dev
8284  *   Pointer to Ethernet device.
8285  * @param[in] flow
8286  *   Pointer to the sub flow.
8287  * @param[out] data
8288  *   data retrieved by the query.
8289  * @param[out] error
8290  *   Perform verbose error reporting if not NULL.
8291  *
8292  * @return
8293  *   0 on success, a negative errno value otherwise and rte_errno is set.
8294  */
8295 static int
8296 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
8297 		    void *data, struct rte_flow_error *error)
8298 {
8299 	struct mlx5_priv *priv = dev->data->dev_private;
8300 	struct rte_flow_query_count *qc = data;
8301 
8302 	if (!priv->config.devx)
8303 		return rte_flow_error_set(error, ENOTSUP,
8304 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8305 					  NULL,
8306 					  "counters are not supported");
8307 	if (flow->counter) {
8308 		uint64_t pkts, bytes;
8309 		int err = _flow_dv_query_count(dev, flow->counter, &pkts,
8310 					       &bytes);
8311 
8312 		if (err)
8313 			return rte_flow_error_set(error, -err,
8314 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8315 					NULL, "cannot read counters");
8316 		qc->hits_set = 1;
8317 		qc->bytes_set = 1;
8318 		qc->hits = pkts - flow->counter->hits;
8319 		qc->bytes = bytes - flow->counter->bytes;
8320 		if (qc->reset) {
8321 			flow->counter->hits = pkts;
8322 			flow->counter->bytes = bytes;
8323 		}
8324 		return 0;
8325 	}
8326 	return rte_flow_error_set(error, EINVAL,
8327 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8328 				  NULL,
8329 				  "counters are not available");
8330 }
8331 
8332 /**
8333  * Query a flow.
8334  *
8335  * @see rte_flow_query()
8336  * @see rte_flow_ops
8337  */
8338 static int
8339 flow_dv_query(struct rte_eth_dev *dev,
8340 	      struct rte_flow *flow __rte_unused,
8341 	      const struct rte_flow_action *actions __rte_unused,
8342 	      void *data __rte_unused,
8343 	      struct rte_flow_error *error __rte_unused)
8344 {
8345 	int ret = -EINVAL;
8346 
8347 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
8348 		switch (actions->type) {
8349 		case RTE_FLOW_ACTION_TYPE_VOID:
8350 			break;
8351 		case RTE_FLOW_ACTION_TYPE_COUNT:
8352 			ret = flow_dv_query_count(dev, flow, data, error);
8353 			break;
8354 		default:
8355 			return rte_flow_error_set(error, ENOTSUP,
8356 						  RTE_FLOW_ERROR_TYPE_ACTION,
8357 						  actions,
8358 						  "action not supported");
8359 		}
8360 	}
8361 	return ret;
8362 }
8363 
8364 /**
8365  * Destroy the meter table set.
8366  * Lock free, (mutex should be acquired by caller).
8367  *
8368  * @param[in] dev
8369  *   Pointer to Ethernet device.
8370  * @param[in] tbl
8371  *   Pointer to the meter table set.
8372  *
8373  * @return
8374  *   Always 0.
8375  */
8376 static int
8377 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
8378 			struct mlx5_meter_domains_infos *tbl)
8379 {
8380 	struct mlx5_priv *priv = dev->data->dev_private;
8381 	struct mlx5_meter_domains_infos *mtd =
8382 				(struct mlx5_meter_domains_infos *)tbl;
8383 
8384 	if (!mtd || !priv->config.dv_flow_en)
8385 		return 0;
8386 	if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
8387 		claim_zero(mlx5_glue->dv_destroy_flow
8388 			  (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
8389 	if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
8390 		claim_zero(mlx5_glue->dv_destroy_flow
8391 			  (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
8392 	if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
8393 		claim_zero(mlx5_glue->dv_destroy_flow
8394 			  (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
8395 	if (mtd->egress.color_matcher)
8396 		claim_zero(mlx5_glue->dv_destroy_flow_matcher
8397 			  (mtd->egress.color_matcher));
8398 	if (mtd->egress.any_matcher)
8399 		claim_zero(mlx5_glue->dv_destroy_flow_matcher
8400 			  (mtd->egress.any_matcher));
8401 	if (mtd->egress.tbl)
8402 		claim_zero(flow_dv_tbl_resource_release(dev,
8403 							mtd->egress.tbl));
8404 	if (mtd->ingress.color_matcher)
8405 		claim_zero(mlx5_glue->dv_destroy_flow_matcher
8406 			  (mtd->ingress.color_matcher));
8407 	if (mtd->ingress.any_matcher)
8408 		claim_zero(mlx5_glue->dv_destroy_flow_matcher
8409 			  (mtd->ingress.any_matcher));
8410 	if (mtd->ingress.tbl)
8411 		claim_zero(flow_dv_tbl_resource_release(dev,
8412 							mtd->ingress.tbl));
8413 	if (mtd->transfer.color_matcher)
8414 		claim_zero(mlx5_glue->dv_destroy_flow_matcher
8415 			  (mtd->transfer.color_matcher));
8416 	if (mtd->transfer.any_matcher)
8417 		claim_zero(mlx5_glue->dv_destroy_flow_matcher
8418 			  (mtd->transfer.any_matcher));
8419 	if (mtd->transfer.tbl)
8420 		claim_zero(flow_dv_tbl_resource_release(dev,
8421 							mtd->transfer.tbl));
8422 	if (mtd->drop_actn)
8423 		claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
8424 	rte_free(mtd);
8425 	return 0;
8426 }
8427 
8428 /* Number of meter flow actions, count and jump or count and drop. */
8429 #define METER_ACTIONS 2
8430 
8431 /**
8432  * Create specify domain meter table and suffix table.
8433  *
8434  * @param[in] dev
8435  *   Pointer to Ethernet device.
8436  * @param[in,out] mtb
8437  *   Pointer to DV meter table set.
8438  * @param[in] egress
8439  *   Table attribute.
8440  * @param[in] transfer
8441  *   Table attribute.
8442  * @param[in] color_reg_c_idx
8443  *   Reg C index for color match.
8444  *
8445  * @return
8446  *   0 on success, -1 otherwise and rte_errno is set.
8447  */
8448 static int
8449 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
8450 			   struct mlx5_meter_domains_infos *mtb,
8451 			   uint8_t egress, uint8_t transfer,
8452 			   uint32_t color_reg_c_idx)
8453 {
8454 	struct mlx5_priv *priv = dev->data->dev_private;
8455 	struct mlx5_ibv_shared *sh = priv->sh;
8456 	struct mlx5_flow_dv_match_params mask = {
8457 		.size = sizeof(mask.buf),
8458 	};
8459 	struct mlx5_flow_dv_match_params value = {
8460 		.size = sizeof(value.buf),
8461 	};
8462 	struct mlx5dv_flow_matcher_attr dv_attr = {
8463 		.type = IBV_FLOW_ATTR_NORMAL,
8464 		.priority = 0,
8465 		.match_criteria_enable = 0,
8466 		.match_mask = (void *)&mask,
8467 	};
8468 	void *actions[METER_ACTIONS];
8469 	struct mlx5_flow_tbl_resource **sfx_tbl;
8470 	struct mlx5_meter_domain_info *dtb;
8471 	struct rte_flow_error error;
8472 	int i = 0;
8473 
8474 	if (transfer) {
8475 		sfx_tbl = &sh->fdb_mtr_sfx_tbl;
8476 		dtb = &mtb->transfer;
8477 	} else if (egress) {
8478 		sfx_tbl = &sh->tx_mtr_sfx_tbl;
8479 		dtb = &mtb->egress;
8480 	} else {
8481 		sfx_tbl = &sh->rx_mtr_sfx_tbl;
8482 		dtb = &mtb->ingress;
8483 	}
8484 	/* If the suffix table in missing, create it. */
8485 	if (!(*sfx_tbl)) {
8486 		*sfx_tbl = flow_dv_tbl_resource_get(dev,
8487 						MLX5_FLOW_TABLE_LEVEL_SUFFIX,
8488 						egress, transfer, &error);
8489 		if (!(*sfx_tbl)) {
8490 			DRV_LOG(ERR, "Failed to create meter suffix table.");
8491 			return -1;
8492 		}
8493 	}
8494 	/* Create the meter table with METER level. */
8495 	dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
8496 					    egress, transfer, &error);
8497 	if (!dtb->tbl) {
8498 		DRV_LOG(ERR, "Failed to create meter policer table.");
8499 		return -1;
8500 	}
8501 	/* Create matchers, Any and Color. */
8502 	dv_attr.priority = 3;
8503 	dv_attr.match_criteria_enable = 0;
8504 	dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8505 							     &dv_attr,
8506 							     dtb->tbl->obj);
8507 	if (!dtb->any_matcher) {
8508 		DRV_LOG(ERR, "Failed to create meter"
8509 			     " policer default matcher.");
8510 		goto error_exit;
8511 	}
8512 	dv_attr.priority = 0;
8513 	dv_attr.match_criteria_enable =
8514 				1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
8515 	flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
8516 			       rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
8517 	dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8518 							       &dv_attr,
8519 							       dtb->tbl->obj);
8520 	if (!dtb->color_matcher) {
8521 		DRV_LOG(ERR, "Failed to create meter policer color matcher.");
8522 		goto error_exit;
8523 	}
8524 	if (mtb->count_actns[RTE_MTR_DROPPED])
8525 		actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
8526 	actions[i++] = mtb->drop_actn;
8527 	/* Default rule: lowest priority, match any, actions: drop. */
8528 	dtb->policer_rules[RTE_MTR_DROPPED] =
8529 			mlx5_glue->dv_create_flow(dtb->any_matcher,
8530 						 (void *)&value, i, actions);
8531 	if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
8532 		DRV_LOG(ERR, "Failed to create meter policer drop rule.");
8533 		goto error_exit;
8534 	}
8535 	return 0;
8536 error_exit:
8537 	return -1;
8538 }
8539 
8540 /**
8541  * Create the needed meter and suffix tables.
8542  * Lock free, (mutex should be acquired by caller).
8543  *
8544  * @param[in] dev
8545  *   Pointer to Ethernet device.
8546  * @param[in] fm
8547  *   Pointer to the flow meter.
8548  *
8549  * @return
8550  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
8551  */
8552 static struct mlx5_meter_domains_infos *
8553 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
8554 		       const struct mlx5_flow_meter *fm)
8555 {
8556 	struct mlx5_priv *priv = dev->data->dev_private;
8557 	struct mlx5_meter_domains_infos *mtb;
8558 	int ret;
8559 	int i;
8560 
8561 	if (!priv->mtr_en) {
8562 		rte_errno = ENOTSUP;
8563 		return NULL;
8564 	}
8565 	mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
8566 	if (!mtb) {
8567 		DRV_LOG(ERR, "Failed to allocate memory for meter.");
8568 		return NULL;
8569 	}
8570 	/* Create meter count actions */
8571 	for (i = 0; i <= RTE_MTR_DROPPED; i++) {
8572 		if (!fm->policer_stats.cnt[i])
8573 			continue;
8574 		mtb->count_actns[i] = fm->policer_stats.cnt[i]->action;
8575 	}
8576 	/* Create drop action. */
8577 	mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
8578 	if (!mtb->drop_actn) {
8579 		DRV_LOG(ERR, "Failed to create drop action.");
8580 		goto error_exit;
8581 	}
8582 	/* Egress meter table. */
8583 	ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
8584 	if (ret) {
8585 		DRV_LOG(ERR, "Failed to prepare egress meter table.");
8586 		goto error_exit;
8587 	}
8588 	/* Ingress meter table. */
8589 	ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
8590 	if (ret) {
8591 		DRV_LOG(ERR, "Failed to prepare ingress meter table.");
8592 		goto error_exit;
8593 	}
8594 	/* FDB meter table. */
8595 	if (priv->config.dv_esw_en) {
8596 		ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
8597 						 priv->mtr_color_reg);
8598 		if (ret) {
8599 			DRV_LOG(ERR, "Failed to prepare fdb meter table.");
8600 			goto error_exit;
8601 		}
8602 	}
8603 	return mtb;
8604 error_exit:
8605 	flow_dv_destroy_mtr_tbl(dev, mtb);
8606 	return NULL;
8607 }
8608 
8609 /**
8610  * Destroy domain policer rule.
8611  *
8612  * @param[in] dt
8613  *   Pointer to domain table.
8614  */
8615 static void
8616 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
8617 {
8618 	int i;
8619 
8620 	for (i = 0; i < RTE_MTR_DROPPED; i++) {
8621 		if (dt->policer_rules[i]) {
8622 			claim_zero(mlx5_glue->dv_destroy_flow
8623 				  (dt->policer_rules[i]));
8624 			dt->policer_rules[i] = NULL;
8625 		}
8626 	}
8627 	if (dt->jump_actn) {
8628 		claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
8629 		dt->jump_actn = NULL;
8630 	}
8631 }
8632 
8633 /**
8634  * Destroy policer rules.
8635  *
8636  * @param[in] dev
8637  *   Pointer to Ethernet device.
8638  * @param[in] fm
8639  *   Pointer to flow meter structure.
8640  * @param[in] attr
8641  *   Pointer to flow attributes.
8642  *
8643  * @return
8644  *   Always 0.
8645  */
8646 static int
8647 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
8648 			      const struct mlx5_flow_meter *fm,
8649 			      const struct rte_flow_attr *attr)
8650 {
8651 	struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
8652 
8653 	if (!mtb)
8654 		return 0;
8655 	if (attr->egress)
8656 		flow_dv_destroy_domain_policer_rule(&mtb->egress);
8657 	if (attr->ingress)
8658 		flow_dv_destroy_domain_policer_rule(&mtb->ingress);
8659 	if (attr->transfer)
8660 		flow_dv_destroy_domain_policer_rule(&mtb->transfer);
8661 	return 0;
8662 }
8663 
8664 /**
8665  * Create specify domain meter policer rule.
8666  *
8667  * @param[in] fm
8668  *   Pointer to flow meter structure.
8669  * @param[in] mtb
8670  *   Pointer to DV meter table set.
8671  * @param[in] sfx_tb
8672  *   Pointer to suffix table.
8673  * @param[in] mtr_reg_c
8674  *   Color match REG_C.
8675  *
8676  * @return
8677  *   0 on success, -1 otherwise.
8678  */
8679 static int
8680 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
8681 				    struct mlx5_meter_domain_info *dtb,
8682 				    struct mlx5_flow_tbl_resource *sfx_tb,
8683 				    uint8_t mtr_reg_c)
8684 {
8685 	struct mlx5_flow_dv_match_params matcher = {
8686 		.size = sizeof(matcher.buf),
8687 	};
8688 	struct mlx5_flow_dv_match_params value = {
8689 		.size = sizeof(value.buf),
8690 	};
8691 	struct mlx5_meter_domains_infos *mtb = fm->mfts;
8692 	void *actions[METER_ACTIONS];
8693 	int i;
8694 
8695 	/* Create jump action. */
8696 	if (!sfx_tb)
8697 		return -1;
8698 	if (!dtb->jump_actn)
8699 		dtb->jump_actn =
8700 			mlx5_glue->dr_create_flow_action_dest_flow_tbl
8701 							(sfx_tb->obj);
8702 	if (!dtb->jump_actn) {
8703 		DRV_LOG(ERR, "Failed to create policer jump action.");
8704 		goto error;
8705 	}
8706 	for (i = 0; i < RTE_MTR_DROPPED; i++) {
8707 		int j = 0;
8708 
8709 		flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
8710 				       rte_col_2_mlx5_col(i), UINT8_MAX);
8711 		if (mtb->count_actns[i])
8712 			actions[j++] = mtb->count_actns[i];
8713 		if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
8714 			actions[j++] = mtb->drop_actn;
8715 		else
8716 			actions[j++] = dtb->jump_actn;
8717 		dtb->policer_rules[i] =
8718 			mlx5_glue->dv_create_flow(dtb->color_matcher,
8719 						 (void *)&value,
8720 						  j, actions);
8721 		if (!dtb->policer_rules[i]) {
8722 			DRV_LOG(ERR, "Failed to create policer rule.");
8723 			goto error;
8724 		}
8725 	}
8726 	return 0;
8727 error:
8728 	rte_errno = errno;
8729 	return -1;
8730 }
8731 
8732 /**
8733  * Create policer rules.
8734  *
8735  * @param[in] dev
8736  *   Pointer to Ethernet device.
8737  * @param[in] fm
8738  *   Pointer to flow meter structure.
8739  * @param[in] attr
8740  *   Pointer to flow attributes.
8741  *
8742  * @return
8743  *   0 on success, -1 otherwise.
8744  */
8745 static int
8746 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
8747 			     struct mlx5_flow_meter *fm,
8748 			     const struct rte_flow_attr *attr)
8749 {
8750 	struct mlx5_priv *priv = dev->data->dev_private;
8751 	struct mlx5_meter_domains_infos *mtb = fm->mfts;
8752 	int ret;
8753 
8754 	if (attr->egress) {
8755 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
8756 						priv->sh->tx_mtr_sfx_tbl,
8757 						priv->mtr_color_reg);
8758 		if (ret) {
8759 			DRV_LOG(ERR, "Failed to create egress policer.");
8760 			goto error;
8761 		}
8762 	}
8763 	if (attr->ingress) {
8764 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
8765 						priv->sh->rx_mtr_sfx_tbl,
8766 						priv->mtr_color_reg);
8767 		if (ret) {
8768 			DRV_LOG(ERR, "Failed to create ingress policer.");
8769 			goto error;
8770 		}
8771 	}
8772 	if (attr->transfer) {
8773 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
8774 						priv->sh->fdb_mtr_sfx_tbl,
8775 						priv->mtr_color_reg);
8776 		if (ret) {
8777 			DRV_LOG(ERR, "Failed to create transfer policer.");
8778 			goto error;
8779 		}
8780 	}
8781 	return 0;
8782 error:
8783 	flow_dv_destroy_policer_rules(dev, fm, attr);
8784 	return -1;
8785 }
8786 
8787 /**
8788  * Query a devx counter.
8789  *
8790  * @param[in] dev
8791  *   Pointer to the Ethernet device structure.
8792  * @param[in] cnt
8793  *   Pointer to the flow counter.
8794  * @param[in] clear
8795  *   Set to clear the counter statistics.
8796  * @param[out] pkts
8797  *   The statistics value of packets.
8798  * @param[out] bytes
8799  *   The statistics value of bytes.
8800  *
8801  * @return
8802  *   0 on success, otherwise return -1.
8803  */
8804 static int
8805 flow_dv_counter_query(struct rte_eth_dev *dev,
8806 		      struct mlx5_flow_counter *cnt, bool clear,
8807 		      uint64_t *pkts, uint64_t *bytes)
8808 {
8809 	struct mlx5_priv *priv = dev->data->dev_private;
8810 	uint64_t inn_pkts, inn_bytes;
8811 	int ret;
8812 
8813 	if (!priv->config.devx)
8814 		return -1;
8815 	ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
8816 	if (ret)
8817 		return -1;
8818 	*pkts = inn_pkts - cnt->hits;
8819 	*bytes = inn_bytes - cnt->bytes;
8820 	if (clear) {
8821 		cnt->hits = inn_pkts;
8822 		cnt->bytes = inn_bytes;
8823 	}
8824 	return 0;
8825 }
8826 
8827 /*
8828  * Mutex-protected thunk to lock-free  __flow_dv_translate().
8829  */
8830 static int
8831 flow_dv_translate(struct rte_eth_dev *dev,
8832 		  struct mlx5_flow *dev_flow,
8833 		  const struct rte_flow_attr *attr,
8834 		  const struct rte_flow_item items[],
8835 		  const struct rte_flow_action actions[],
8836 		  struct rte_flow_error *error)
8837 {
8838 	int ret;
8839 
8840 	flow_dv_shared_lock(dev);
8841 	ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
8842 	flow_dv_shared_unlock(dev);
8843 	return ret;
8844 }
8845 
8846 /*
8847  * Mutex-protected thunk to lock-free  __flow_dv_apply().
8848  */
8849 static int
8850 flow_dv_apply(struct rte_eth_dev *dev,
8851 	      struct rte_flow *flow,
8852 	      struct rte_flow_error *error)
8853 {
8854 	int ret;
8855 
8856 	flow_dv_shared_lock(dev);
8857 	ret = __flow_dv_apply(dev, flow, error);
8858 	flow_dv_shared_unlock(dev);
8859 	return ret;
8860 }
8861 
8862 /*
8863  * Mutex-protected thunk to lock-free __flow_dv_remove().
8864  */
8865 static void
8866 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
8867 {
8868 	flow_dv_shared_lock(dev);
8869 	__flow_dv_remove(dev, flow);
8870 	flow_dv_shared_unlock(dev);
8871 }
8872 
8873 /*
8874  * Mutex-protected thunk to lock-free __flow_dv_destroy().
8875  */
8876 static void
8877 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
8878 {
8879 	flow_dv_shared_lock(dev);
8880 	__flow_dv_destroy(dev, flow);
8881 	flow_dv_shared_unlock(dev);
8882 }
8883 
8884 /*
8885  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
8886  */
8887 static struct mlx5_flow_counter *
8888 flow_dv_counter_allocate(struct rte_eth_dev *dev)
8889 {
8890 	struct mlx5_flow_counter *cnt;
8891 
8892 	flow_dv_shared_lock(dev);
8893 	cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
8894 	flow_dv_shared_unlock(dev);
8895 	return cnt;
8896 }
8897 
8898 /*
8899  * Mutex-protected thunk to lock-free flow_dv_counter_release().
8900  */
8901 static void
8902 flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
8903 {
8904 	flow_dv_shared_lock(dev);
8905 	flow_dv_counter_release(dev, cnt);
8906 	flow_dv_shared_unlock(dev);
8907 }
8908 
8909 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
8910 	.validate = flow_dv_validate,
8911 	.prepare = flow_dv_prepare,
8912 	.translate = flow_dv_translate,
8913 	.apply = flow_dv_apply,
8914 	.remove = flow_dv_remove,
8915 	.destroy = flow_dv_destroy,
8916 	.query = flow_dv_query,
8917 	.create_mtr_tbls = flow_dv_create_mtr_tbl,
8918 	.destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
8919 	.create_policer_rules = flow_dv_create_policer_rules,
8920 	.destroy_policer_rules = flow_dv_destroy_policer_rules,
8921 	.counter_alloc = flow_dv_counter_allocate,
8922 	.counter_free = flow_dv_counter_free,
8923 	.counter_query = flow_dv_counter_query,
8924 };
8925 
8926 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
8927