xref: /dpdk/drivers/net/mlx5/mlx5_flow_dv.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29 
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37 
38 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
39 
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43 
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49 
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53 
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60 
61 union flow_dv_attr {
62 	struct {
63 		uint32_t valid:1;
64 		uint32_t ipv4:1;
65 		uint32_t ipv6:1;
66 		uint32_t tcp:1;
67 		uint32_t udp:1;
68 		uint32_t reserved:27;
69 	};
70 	uint32_t attr;
71 };
72 
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75 			     struct mlx5_flow_tbl_resource *tbl);
76 
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79 				     uint32_t encap_decap_idx);
80 
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83 					uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86 
87 static int
88 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
89 				  uint32_t rix_jump);
90 
91 /**
92  * Initialize flow attributes structure according to flow items' types.
93  *
94  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
95  * mode. For tunnel mode, the items to be modified are the outermost ones.
96  *
97  * @param[in] item
98  *   Pointer to item specification.
99  * @param[out] attr
100  *   Pointer to flow attributes structure.
101  * @param[in] dev_flow
102  *   Pointer to the sub flow.
103  * @param[in] tunnel_decap
104  *   Whether action is after tunnel decapsulation.
105  */
106 static void
107 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
108 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
109 {
110 	uint64_t layers = dev_flow->handle->layers;
111 
112 	/*
113 	 * If layers is already initialized, it means this dev_flow is the
114 	 * suffix flow, the layers flags is set by the prefix flow. Need to
115 	 * use the layer flags from prefix flow as the suffix flow may not
116 	 * have the user defined items as the flow is split.
117 	 */
118 	if (layers) {
119 		if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
120 			attr->ipv4 = 1;
121 		else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
122 			attr->ipv6 = 1;
123 		if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
124 			attr->tcp = 1;
125 		else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
126 			attr->udp = 1;
127 		attr->valid = 1;
128 		return;
129 	}
130 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
131 		uint8_t next_protocol = 0xff;
132 		switch (item->type) {
133 		case RTE_FLOW_ITEM_TYPE_GRE:
134 		case RTE_FLOW_ITEM_TYPE_NVGRE:
135 		case RTE_FLOW_ITEM_TYPE_VXLAN:
136 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
137 		case RTE_FLOW_ITEM_TYPE_GENEVE:
138 		case RTE_FLOW_ITEM_TYPE_MPLS:
139 			if (tunnel_decap)
140 				attr->attr = 0;
141 			break;
142 		case RTE_FLOW_ITEM_TYPE_IPV4:
143 			if (!attr->ipv6)
144 				attr->ipv4 = 1;
145 			if (item->mask != NULL &&
146 			    ((const struct rte_flow_item_ipv4 *)
147 			    item->mask)->hdr.next_proto_id)
148 				next_protocol =
149 				    ((const struct rte_flow_item_ipv4 *)
150 				      (item->spec))->hdr.next_proto_id &
151 				    ((const struct rte_flow_item_ipv4 *)
152 				      (item->mask))->hdr.next_proto_id;
153 			if ((next_protocol == IPPROTO_IPIP ||
154 			    next_protocol == IPPROTO_IPV6) && tunnel_decap)
155 				attr->attr = 0;
156 			break;
157 		case RTE_FLOW_ITEM_TYPE_IPV6:
158 			if (!attr->ipv4)
159 				attr->ipv6 = 1;
160 			if (item->mask != NULL &&
161 			    ((const struct rte_flow_item_ipv6 *)
162 			    item->mask)->hdr.proto)
163 				next_protocol =
164 				    ((const struct rte_flow_item_ipv6 *)
165 				      (item->spec))->hdr.proto &
166 				    ((const struct rte_flow_item_ipv6 *)
167 				      (item->mask))->hdr.proto;
168 			if ((next_protocol == IPPROTO_IPIP ||
169 			    next_protocol == IPPROTO_IPV6) && tunnel_decap)
170 				attr->attr = 0;
171 			break;
172 		case RTE_FLOW_ITEM_TYPE_UDP:
173 			if (!attr->tcp)
174 				attr->udp = 1;
175 			break;
176 		case RTE_FLOW_ITEM_TYPE_TCP:
177 			if (!attr->udp)
178 				attr->tcp = 1;
179 			break;
180 		default:
181 			break;
182 		}
183 	}
184 	attr->valid = 1;
185 }
186 
187 /**
188  * Convert rte_mtr_color to mlx5 color.
189  *
190  * @param[in] rcol
191  *   rte_mtr_color.
192  *
193  * @return
194  *   mlx5 color.
195  */
196 static int
197 rte_col_2_mlx5_col(enum rte_color rcol)
198 {
199 	switch (rcol) {
200 	case RTE_COLOR_GREEN:
201 		return MLX5_FLOW_COLOR_GREEN;
202 	case RTE_COLOR_YELLOW:
203 		return MLX5_FLOW_COLOR_YELLOW;
204 	case RTE_COLOR_RED:
205 		return MLX5_FLOW_COLOR_RED;
206 	default:
207 		break;
208 	}
209 	return MLX5_FLOW_COLOR_UNDEFINED;
210 }
211 
212 struct field_modify_info {
213 	uint32_t size; /* Size of field in protocol header, in bytes. */
214 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
215 	enum mlx5_modification_field id;
216 };
217 
218 struct field_modify_info modify_eth[] = {
219 	{4,  0, MLX5_MODI_OUT_DMAC_47_16},
220 	{2,  4, MLX5_MODI_OUT_DMAC_15_0},
221 	{4,  6, MLX5_MODI_OUT_SMAC_47_16},
222 	{2, 10, MLX5_MODI_OUT_SMAC_15_0},
223 	{0, 0, 0},
224 };
225 
226 struct field_modify_info modify_vlan_out_first_vid[] = {
227 	/* Size in bits !!! */
228 	{12, 0, MLX5_MODI_OUT_FIRST_VID},
229 	{0, 0, 0},
230 };
231 
232 struct field_modify_info modify_ipv4[] = {
233 	{1,  1, MLX5_MODI_OUT_IP_DSCP},
234 	{1,  8, MLX5_MODI_OUT_IPV4_TTL},
235 	{4, 12, MLX5_MODI_OUT_SIPV4},
236 	{4, 16, MLX5_MODI_OUT_DIPV4},
237 	{0, 0, 0},
238 };
239 
240 struct field_modify_info modify_ipv6[] = {
241 	{1,  0, MLX5_MODI_OUT_IP_DSCP},
242 	{1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
243 	{4,  8, MLX5_MODI_OUT_SIPV6_127_96},
244 	{4, 12, MLX5_MODI_OUT_SIPV6_95_64},
245 	{4, 16, MLX5_MODI_OUT_SIPV6_63_32},
246 	{4, 20, MLX5_MODI_OUT_SIPV6_31_0},
247 	{4, 24, MLX5_MODI_OUT_DIPV6_127_96},
248 	{4, 28, MLX5_MODI_OUT_DIPV6_95_64},
249 	{4, 32, MLX5_MODI_OUT_DIPV6_63_32},
250 	{4, 36, MLX5_MODI_OUT_DIPV6_31_0},
251 	{0, 0, 0},
252 };
253 
254 struct field_modify_info modify_udp[] = {
255 	{2, 0, MLX5_MODI_OUT_UDP_SPORT},
256 	{2, 2, MLX5_MODI_OUT_UDP_DPORT},
257 	{0, 0, 0},
258 };
259 
260 struct field_modify_info modify_tcp[] = {
261 	{2, 0, MLX5_MODI_OUT_TCP_SPORT},
262 	{2, 2, MLX5_MODI_OUT_TCP_DPORT},
263 	{4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
264 	{4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
265 	{0, 0, 0},
266 };
267 
268 static void
269 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
270 			  uint8_t next_protocol, uint64_t *item_flags,
271 			  int *tunnel)
272 {
273 	MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
274 		    item->type == RTE_FLOW_ITEM_TYPE_IPV6);
275 	if (next_protocol == IPPROTO_IPIP) {
276 		*item_flags |= MLX5_FLOW_LAYER_IPIP;
277 		*tunnel = 1;
278 	}
279 	if (next_protocol == IPPROTO_IPV6) {
280 		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
281 		*tunnel = 1;
282 	}
283 }
284 
285 /* Update VLAN's VID/PCP based on input rte_flow_action.
286  *
287  * @param[in] action
288  *   Pointer to struct rte_flow_action.
289  * @param[out] vlan
290  *   Pointer to struct rte_vlan_hdr.
291  */
292 static void
293 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
294 			 struct rte_vlan_hdr *vlan)
295 {
296 	uint16_t vlan_tci;
297 	if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
298 		vlan_tci =
299 		    ((const struct rte_flow_action_of_set_vlan_pcp *)
300 					       action->conf)->vlan_pcp;
301 		vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
302 		vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
303 		vlan->vlan_tci |= vlan_tci;
304 	} else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
305 		vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
306 		vlan->vlan_tci |= rte_be_to_cpu_16
307 		    (((const struct rte_flow_action_of_set_vlan_vid *)
308 					     action->conf)->vlan_vid);
309 	}
310 }
311 
312 /**
313  * Fetch 1, 2, 3 or 4 byte field from the byte array
314  * and return as unsigned integer in host-endian format.
315  *
316  * @param[in] data
317  *   Pointer to data array.
318  * @param[in] size
319  *   Size of field to extract.
320  *
321  * @return
322  *   converted field in host endian format.
323  */
324 static inline uint32_t
325 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
326 {
327 	uint32_t ret;
328 
329 	switch (size) {
330 	case 1:
331 		ret = *data;
332 		break;
333 	case 2:
334 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
335 		break;
336 	case 3:
337 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338 		ret = (ret << 8) | *(data + sizeof(uint16_t));
339 		break;
340 	case 4:
341 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
342 		break;
343 	default:
344 		MLX5_ASSERT(false);
345 		ret = 0;
346 		break;
347 	}
348 	return ret;
349 }
350 
351 /**
352  * Convert modify-header action to DV specification.
353  *
354  * Data length of each action is determined by provided field description
355  * and the item mask. Data bit offset and width of each action is determined
356  * by provided item mask.
357  *
358  * @param[in] item
359  *   Pointer to item specification.
360  * @param[in] field
361  *   Pointer to field modification information.
362  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
363  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
364  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
365  * @param[in] dcopy
366  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
367  *   Negative offset value sets the same offset as source offset.
368  *   size field is ignored, value is taken from source field.
369  * @param[in,out] resource
370  *   Pointer to the modify-header resource.
371  * @param[in] type
372  *   Type of modification.
373  * @param[out] error
374  *   Pointer to the error structure.
375  *
376  * @return
377  *   0 on success, a negative errno value otherwise and rte_errno is set.
378  */
379 static int
380 flow_dv_convert_modify_action(struct rte_flow_item *item,
381 			      struct field_modify_info *field,
382 			      struct field_modify_info *dcopy,
383 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
384 			      uint32_t type, struct rte_flow_error *error)
385 {
386 	uint32_t i = resource->actions_num;
387 	struct mlx5_modification_cmd *actions = resource->actions;
388 
389 	/*
390 	 * The item and mask are provided in big-endian format.
391 	 * The fields should be presented as in big-endian format either.
392 	 * Mask must be always present, it defines the actual field width.
393 	 */
394 	MLX5_ASSERT(item->mask);
395 	MLX5_ASSERT(field->size);
396 	do {
397 		unsigned int size_b;
398 		unsigned int off_b;
399 		uint32_t mask;
400 		uint32_t data;
401 
402 		if (i >= MLX5_MAX_MODIFY_NUM)
403 			return rte_flow_error_set(error, EINVAL,
404 				 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
405 				 "too many items to modify");
406 		/* Fetch variable byte size mask from the array. */
407 		mask = flow_dv_fetch_field((const uint8_t *)item->mask +
408 					   field->offset, field->size);
409 		if (!mask) {
410 			++field;
411 			continue;
412 		}
413 		/* Deduce actual data width in bits from mask value. */
414 		off_b = rte_bsf32(mask);
415 		size_b = sizeof(uint32_t) * CHAR_BIT -
416 			 off_b - __builtin_clz(mask);
417 		MLX5_ASSERT(size_b);
418 		size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
419 		actions[i] = (struct mlx5_modification_cmd) {
420 			.action_type = type,
421 			.field = field->id,
422 			.offset = off_b,
423 			.length = size_b,
424 		};
425 		/* Convert entire record to expected big-endian format. */
426 		actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
427 		if (type == MLX5_MODIFICATION_TYPE_COPY) {
428 			MLX5_ASSERT(dcopy);
429 			actions[i].dst_field = dcopy->id;
430 			actions[i].dst_offset =
431 				(int)dcopy->offset < 0 ? off_b : dcopy->offset;
432 			/* Convert entire record to big-endian format. */
433 			actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
434 			++dcopy;
435 		} else {
436 			MLX5_ASSERT(item->spec);
437 			data = flow_dv_fetch_field((const uint8_t *)item->spec +
438 						   field->offset, field->size);
439 			/* Shift out the trailing masked bits from data. */
440 			data = (data & mask) >> off_b;
441 			actions[i].data1 = rte_cpu_to_be_32(data);
442 		}
443 		++i;
444 		++field;
445 	} while (field->size);
446 	if (resource->actions_num == i)
447 		return rte_flow_error_set(error, EINVAL,
448 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
449 					  "invalid modification flow item");
450 	resource->actions_num = i;
451 	return 0;
452 }
453 
454 /**
455  * Convert modify-header set IPv4 address action to DV specification.
456  *
457  * @param[in,out] resource
458  *   Pointer to the modify-header resource.
459  * @param[in] action
460  *   Pointer to action specification.
461  * @param[out] error
462  *   Pointer to the error structure.
463  *
464  * @return
465  *   0 on success, a negative errno value otherwise and rte_errno is set.
466  */
467 static int
468 flow_dv_convert_action_modify_ipv4
469 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
470 			 const struct rte_flow_action *action,
471 			 struct rte_flow_error *error)
472 {
473 	const struct rte_flow_action_set_ipv4 *conf =
474 		(const struct rte_flow_action_set_ipv4 *)(action->conf);
475 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
476 	struct rte_flow_item_ipv4 ipv4;
477 	struct rte_flow_item_ipv4 ipv4_mask;
478 
479 	memset(&ipv4, 0, sizeof(ipv4));
480 	memset(&ipv4_mask, 0, sizeof(ipv4_mask));
481 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
482 		ipv4.hdr.src_addr = conf->ipv4_addr;
483 		ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
484 	} else {
485 		ipv4.hdr.dst_addr = conf->ipv4_addr;
486 		ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
487 	}
488 	item.spec = &ipv4;
489 	item.mask = &ipv4_mask;
490 	return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
491 					     MLX5_MODIFICATION_TYPE_SET, error);
492 }
493 
494 /**
495  * Convert modify-header set IPv6 address action to DV specification.
496  *
497  * @param[in,out] resource
498  *   Pointer to the modify-header resource.
499  * @param[in] action
500  *   Pointer to action specification.
501  * @param[out] error
502  *   Pointer to the error structure.
503  *
504  * @return
505  *   0 on success, a negative errno value otherwise and rte_errno is set.
506  */
507 static int
508 flow_dv_convert_action_modify_ipv6
509 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
510 			 const struct rte_flow_action *action,
511 			 struct rte_flow_error *error)
512 {
513 	const struct rte_flow_action_set_ipv6 *conf =
514 		(const struct rte_flow_action_set_ipv6 *)(action->conf);
515 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
516 	struct rte_flow_item_ipv6 ipv6;
517 	struct rte_flow_item_ipv6 ipv6_mask;
518 
519 	memset(&ipv6, 0, sizeof(ipv6));
520 	memset(&ipv6_mask, 0, sizeof(ipv6_mask));
521 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
522 		memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
523 		       sizeof(ipv6.hdr.src_addr));
524 		memcpy(&ipv6_mask.hdr.src_addr,
525 		       &rte_flow_item_ipv6_mask.hdr.src_addr,
526 		       sizeof(ipv6.hdr.src_addr));
527 	} else {
528 		memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
529 		       sizeof(ipv6.hdr.dst_addr));
530 		memcpy(&ipv6_mask.hdr.dst_addr,
531 		       &rte_flow_item_ipv6_mask.hdr.dst_addr,
532 		       sizeof(ipv6.hdr.dst_addr));
533 	}
534 	item.spec = &ipv6;
535 	item.mask = &ipv6_mask;
536 	return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
537 					     MLX5_MODIFICATION_TYPE_SET, error);
538 }
539 
540 /**
541  * Convert modify-header set MAC address action to DV specification.
542  *
543  * @param[in,out] resource
544  *   Pointer to the modify-header resource.
545  * @param[in] action
546  *   Pointer to action specification.
547  * @param[out] error
548  *   Pointer to the error structure.
549  *
550  * @return
551  *   0 on success, a negative errno value otherwise and rte_errno is set.
552  */
553 static int
554 flow_dv_convert_action_modify_mac
555 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
556 			 const struct rte_flow_action *action,
557 			 struct rte_flow_error *error)
558 {
559 	const struct rte_flow_action_set_mac *conf =
560 		(const struct rte_flow_action_set_mac *)(action->conf);
561 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
562 	struct rte_flow_item_eth eth;
563 	struct rte_flow_item_eth eth_mask;
564 
565 	memset(&eth, 0, sizeof(eth));
566 	memset(&eth_mask, 0, sizeof(eth_mask));
567 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
568 		memcpy(&eth.src.addr_bytes, &conf->mac_addr,
569 		       sizeof(eth.src.addr_bytes));
570 		memcpy(&eth_mask.src.addr_bytes,
571 		       &rte_flow_item_eth_mask.src.addr_bytes,
572 		       sizeof(eth_mask.src.addr_bytes));
573 	} else {
574 		memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
575 		       sizeof(eth.dst.addr_bytes));
576 		memcpy(&eth_mask.dst.addr_bytes,
577 		       &rte_flow_item_eth_mask.dst.addr_bytes,
578 		       sizeof(eth_mask.dst.addr_bytes));
579 	}
580 	item.spec = &eth;
581 	item.mask = &eth_mask;
582 	return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
583 					     MLX5_MODIFICATION_TYPE_SET, error);
584 }
585 
586 /**
587  * Convert modify-header set VLAN VID action to DV specification.
588  *
589  * @param[in,out] resource
590  *   Pointer to the modify-header resource.
591  * @param[in] action
592  *   Pointer to action specification.
593  * @param[out] error
594  *   Pointer to the error structure.
595  *
596  * @return
597  *   0 on success, a negative errno value otherwise and rte_errno is set.
598  */
599 static int
600 flow_dv_convert_action_modify_vlan_vid
601 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
602 			 const struct rte_flow_action *action,
603 			 struct rte_flow_error *error)
604 {
605 	const struct rte_flow_action_of_set_vlan_vid *conf =
606 		(const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
607 	int i = resource->actions_num;
608 	struct mlx5_modification_cmd *actions = resource->actions;
609 	struct field_modify_info *field = modify_vlan_out_first_vid;
610 
611 	if (i >= MLX5_MAX_MODIFY_NUM)
612 		return rte_flow_error_set(error, EINVAL,
613 			 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
614 			 "too many items to modify");
615 	actions[i] = (struct mlx5_modification_cmd) {
616 		.action_type = MLX5_MODIFICATION_TYPE_SET,
617 		.field = field->id,
618 		.length = field->size,
619 		.offset = field->offset,
620 	};
621 	actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
622 	actions[i].data1 = conf->vlan_vid;
623 	actions[i].data1 = actions[i].data1 << 16;
624 	resource->actions_num = ++i;
625 	return 0;
626 }
627 
628 /**
629  * Convert modify-header set TP action to DV specification.
630  *
631  * @param[in,out] resource
632  *   Pointer to the modify-header resource.
633  * @param[in] action
634  *   Pointer to action specification.
635  * @param[in] items
636  *   Pointer to rte_flow_item objects list.
637  * @param[in] attr
638  *   Pointer to flow attributes structure.
639  * @param[in] dev_flow
640  *   Pointer to the sub flow.
641  * @param[in] tunnel_decap
642  *   Whether action is after tunnel decapsulation.
643  * @param[out] error
644  *   Pointer to the error structure.
645  *
646  * @return
647  *   0 on success, a negative errno value otherwise and rte_errno is set.
648  */
649 static int
650 flow_dv_convert_action_modify_tp
651 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
652 			 const struct rte_flow_action *action,
653 			 const struct rte_flow_item *items,
654 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
655 			 bool tunnel_decap, struct rte_flow_error *error)
656 {
657 	const struct rte_flow_action_set_tp *conf =
658 		(const struct rte_flow_action_set_tp *)(action->conf);
659 	struct rte_flow_item item;
660 	struct rte_flow_item_udp udp;
661 	struct rte_flow_item_udp udp_mask;
662 	struct rte_flow_item_tcp tcp;
663 	struct rte_flow_item_tcp tcp_mask;
664 	struct field_modify_info *field;
665 
666 	if (!attr->valid)
667 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
668 	if (attr->udp) {
669 		memset(&udp, 0, sizeof(udp));
670 		memset(&udp_mask, 0, sizeof(udp_mask));
671 		if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
672 			udp.hdr.src_port = conf->port;
673 			udp_mask.hdr.src_port =
674 					rte_flow_item_udp_mask.hdr.src_port;
675 		} else {
676 			udp.hdr.dst_port = conf->port;
677 			udp_mask.hdr.dst_port =
678 					rte_flow_item_udp_mask.hdr.dst_port;
679 		}
680 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
681 		item.spec = &udp;
682 		item.mask = &udp_mask;
683 		field = modify_udp;
684 	} else {
685 		MLX5_ASSERT(attr->tcp);
686 		memset(&tcp, 0, sizeof(tcp));
687 		memset(&tcp_mask, 0, sizeof(tcp_mask));
688 		if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
689 			tcp.hdr.src_port = conf->port;
690 			tcp_mask.hdr.src_port =
691 					rte_flow_item_tcp_mask.hdr.src_port;
692 		} else {
693 			tcp.hdr.dst_port = conf->port;
694 			tcp_mask.hdr.dst_port =
695 					rte_flow_item_tcp_mask.hdr.dst_port;
696 		}
697 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
698 		item.spec = &tcp;
699 		item.mask = &tcp_mask;
700 		field = modify_tcp;
701 	}
702 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
703 					     MLX5_MODIFICATION_TYPE_SET, error);
704 }
705 
706 /**
707  * Convert modify-header set TTL action to DV specification.
708  *
709  * @param[in,out] resource
710  *   Pointer to the modify-header resource.
711  * @param[in] action
712  *   Pointer to action specification.
713  * @param[in] items
714  *   Pointer to rte_flow_item objects list.
715  * @param[in] attr
716  *   Pointer to flow attributes structure.
717  * @param[in] dev_flow
718  *   Pointer to the sub flow.
719  * @param[in] tunnel_decap
720  *   Whether action is after tunnel decapsulation.
721  * @param[out] error
722  *   Pointer to the error structure.
723  *
724  * @return
725  *   0 on success, a negative errno value otherwise and rte_errno is set.
726  */
727 static int
728 flow_dv_convert_action_modify_ttl
729 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
730 			 const struct rte_flow_action *action,
731 			 const struct rte_flow_item *items,
732 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
733 			 bool tunnel_decap, struct rte_flow_error *error)
734 {
735 	const struct rte_flow_action_set_ttl *conf =
736 		(const struct rte_flow_action_set_ttl *)(action->conf);
737 	struct rte_flow_item item;
738 	struct rte_flow_item_ipv4 ipv4;
739 	struct rte_flow_item_ipv4 ipv4_mask;
740 	struct rte_flow_item_ipv6 ipv6;
741 	struct rte_flow_item_ipv6 ipv6_mask;
742 	struct field_modify_info *field;
743 
744 	if (!attr->valid)
745 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
746 	if (attr->ipv4) {
747 		memset(&ipv4, 0, sizeof(ipv4));
748 		memset(&ipv4_mask, 0, sizeof(ipv4_mask));
749 		ipv4.hdr.time_to_live = conf->ttl_value;
750 		ipv4_mask.hdr.time_to_live = 0xFF;
751 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
752 		item.spec = &ipv4;
753 		item.mask = &ipv4_mask;
754 		field = modify_ipv4;
755 	} else {
756 		MLX5_ASSERT(attr->ipv6);
757 		memset(&ipv6, 0, sizeof(ipv6));
758 		memset(&ipv6_mask, 0, sizeof(ipv6_mask));
759 		ipv6.hdr.hop_limits = conf->ttl_value;
760 		ipv6_mask.hdr.hop_limits = 0xFF;
761 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
762 		item.spec = &ipv6;
763 		item.mask = &ipv6_mask;
764 		field = modify_ipv6;
765 	}
766 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
767 					     MLX5_MODIFICATION_TYPE_SET, error);
768 }
769 
770 /**
771  * Convert modify-header decrement TTL action to DV specification.
772  *
773  * @param[in,out] resource
774  *   Pointer to the modify-header resource.
775  * @param[in] action
776  *   Pointer to action specification.
777  * @param[in] items
778  *   Pointer to rte_flow_item objects list.
779  * @param[in] attr
780  *   Pointer to flow attributes structure.
781  * @param[in] dev_flow
782  *   Pointer to the sub flow.
783  * @param[in] tunnel_decap
784  *   Whether action is after tunnel decapsulation.
785  * @param[out] error
786  *   Pointer to the error structure.
787  *
788  * @return
789  *   0 on success, a negative errno value otherwise and rte_errno is set.
790  */
791 static int
792 flow_dv_convert_action_modify_dec_ttl
793 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
794 			 const struct rte_flow_item *items,
795 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
796 			 bool tunnel_decap, struct rte_flow_error *error)
797 {
798 	struct rte_flow_item item;
799 	struct rte_flow_item_ipv4 ipv4;
800 	struct rte_flow_item_ipv4 ipv4_mask;
801 	struct rte_flow_item_ipv6 ipv6;
802 	struct rte_flow_item_ipv6 ipv6_mask;
803 	struct field_modify_info *field;
804 
805 	if (!attr->valid)
806 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
807 	if (attr->ipv4) {
808 		memset(&ipv4, 0, sizeof(ipv4));
809 		memset(&ipv4_mask, 0, sizeof(ipv4_mask));
810 		ipv4.hdr.time_to_live = 0xFF;
811 		ipv4_mask.hdr.time_to_live = 0xFF;
812 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
813 		item.spec = &ipv4;
814 		item.mask = &ipv4_mask;
815 		field = modify_ipv4;
816 	} else {
817 		MLX5_ASSERT(attr->ipv6);
818 		memset(&ipv6, 0, sizeof(ipv6));
819 		memset(&ipv6_mask, 0, sizeof(ipv6_mask));
820 		ipv6.hdr.hop_limits = 0xFF;
821 		ipv6_mask.hdr.hop_limits = 0xFF;
822 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
823 		item.spec = &ipv6;
824 		item.mask = &ipv6_mask;
825 		field = modify_ipv6;
826 	}
827 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
828 					     MLX5_MODIFICATION_TYPE_ADD, error);
829 }
830 
831 /**
832  * Convert modify-header increment/decrement TCP Sequence number
833  * to DV specification.
834  *
835  * @param[in,out] resource
836  *   Pointer to the modify-header resource.
837  * @param[in] action
838  *   Pointer to action specification.
839  * @param[out] error
840  *   Pointer to the error structure.
841  *
842  * @return
843  *   0 on success, a negative errno value otherwise and rte_errno is set.
844  */
845 static int
846 flow_dv_convert_action_modify_tcp_seq
847 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
848 			 const struct rte_flow_action *action,
849 			 struct rte_flow_error *error)
850 {
851 	const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
852 	uint64_t value = rte_be_to_cpu_32(*conf);
853 	struct rte_flow_item item;
854 	struct rte_flow_item_tcp tcp;
855 	struct rte_flow_item_tcp tcp_mask;
856 
857 	memset(&tcp, 0, sizeof(tcp));
858 	memset(&tcp_mask, 0, sizeof(tcp_mask));
859 	if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
860 		/*
861 		 * The HW has no decrement operation, only increment operation.
862 		 * To simulate decrement X from Y using increment operation
863 		 * we need to add UINT32_MAX X times to Y.
864 		 * Each adding of UINT32_MAX decrements Y by 1.
865 		 */
866 		value *= UINT32_MAX;
867 	tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
868 	tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
869 	item.type = RTE_FLOW_ITEM_TYPE_TCP;
870 	item.spec = &tcp;
871 	item.mask = &tcp_mask;
872 	return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
873 					     MLX5_MODIFICATION_TYPE_ADD, error);
874 }
875 
876 /**
877  * Convert modify-header increment/decrement TCP Acknowledgment number
878  * to DV specification.
879  *
880  * @param[in,out] resource
881  *   Pointer to the modify-header resource.
882  * @param[in] action
883  *   Pointer to action specification.
884  * @param[out] error
885  *   Pointer to the error structure.
886  *
887  * @return
888  *   0 on success, a negative errno value otherwise and rte_errno is set.
889  */
890 static int
891 flow_dv_convert_action_modify_tcp_ack
892 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
893 			 const struct rte_flow_action *action,
894 			 struct rte_flow_error *error)
895 {
896 	const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
897 	uint64_t value = rte_be_to_cpu_32(*conf);
898 	struct rte_flow_item item;
899 	struct rte_flow_item_tcp tcp;
900 	struct rte_flow_item_tcp tcp_mask;
901 
902 	memset(&tcp, 0, sizeof(tcp));
903 	memset(&tcp_mask, 0, sizeof(tcp_mask));
904 	if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
905 		/*
906 		 * The HW has no decrement operation, only increment operation.
907 		 * To simulate decrement X from Y using increment operation
908 		 * we need to add UINT32_MAX X times to Y.
909 		 * Each adding of UINT32_MAX decrements Y by 1.
910 		 */
911 		value *= UINT32_MAX;
912 	tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
913 	tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
914 	item.type = RTE_FLOW_ITEM_TYPE_TCP;
915 	item.spec = &tcp;
916 	item.mask = &tcp_mask;
917 	return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
918 					     MLX5_MODIFICATION_TYPE_ADD, error);
919 }
920 
921 static enum mlx5_modification_field reg_to_field[] = {
922 	[REG_NON] = MLX5_MODI_OUT_NONE,
923 	[REG_A] = MLX5_MODI_META_DATA_REG_A,
924 	[REG_B] = MLX5_MODI_META_DATA_REG_B,
925 	[REG_C_0] = MLX5_MODI_META_REG_C_0,
926 	[REG_C_1] = MLX5_MODI_META_REG_C_1,
927 	[REG_C_2] = MLX5_MODI_META_REG_C_2,
928 	[REG_C_3] = MLX5_MODI_META_REG_C_3,
929 	[REG_C_4] = MLX5_MODI_META_REG_C_4,
930 	[REG_C_5] = MLX5_MODI_META_REG_C_5,
931 	[REG_C_6] = MLX5_MODI_META_REG_C_6,
932 	[REG_C_7] = MLX5_MODI_META_REG_C_7,
933 };
934 
935 /**
936  * Convert register set to DV specification.
937  *
938  * @param[in,out] resource
939  *   Pointer to the modify-header resource.
940  * @param[in] action
941  *   Pointer to action specification.
942  * @param[out] error
943  *   Pointer to the error structure.
944  *
945  * @return
946  *   0 on success, a negative errno value otherwise and rte_errno is set.
947  */
948 static int
949 flow_dv_convert_action_set_reg
950 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
951 			 const struct rte_flow_action *action,
952 			 struct rte_flow_error *error)
953 {
954 	const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
955 	struct mlx5_modification_cmd *actions = resource->actions;
956 	uint32_t i = resource->actions_num;
957 
958 	if (i >= MLX5_MAX_MODIFY_NUM)
959 		return rte_flow_error_set(error, EINVAL,
960 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
961 					  "too many items to modify");
962 	MLX5_ASSERT(conf->id != REG_NON);
963 	MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
964 	actions[i] = (struct mlx5_modification_cmd) {
965 		.action_type = MLX5_MODIFICATION_TYPE_SET,
966 		.field = reg_to_field[conf->id],
967 	};
968 	actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
969 	actions[i].data1 = rte_cpu_to_be_32(conf->data);
970 	++i;
971 	resource->actions_num = i;
972 	return 0;
973 }
974 
975 /**
976  * Convert SET_TAG action to DV specification.
977  *
978  * @param[in] dev
979  *   Pointer to the rte_eth_dev structure.
980  * @param[in,out] resource
981  *   Pointer to the modify-header resource.
982  * @param[in] conf
983  *   Pointer to action specification.
984  * @param[out] error
985  *   Pointer to the error structure.
986  *
987  * @return
988  *   0 on success, a negative errno value otherwise and rte_errno is set.
989  */
990 static int
991 flow_dv_convert_action_set_tag
992 			(struct rte_eth_dev *dev,
993 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
994 			 const struct rte_flow_action_set_tag *conf,
995 			 struct rte_flow_error *error)
996 {
997 	rte_be32_t data = rte_cpu_to_be_32(conf->data);
998 	rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
999 	struct rte_flow_item item = {
1000 		.spec = &data,
1001 		.mask = &mask,
1002 	};
1003 	struct field_modify_info reg_c_x[] = {
1004 		[1] = {0, 0, 0},
1005 	};
1006 	enum mlx5_modification_field reg_type;
1007 	int ret;
1008 
1009 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1010 	if (ret < 0)
1011 		return ret;
1012 	MLX5_ASSERT(ret != REG_NON);
1013 	MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1014 	reg_type = reg_to_field[ret];
1015 	MLX5_ASSERT(reg_type > 0);
1016 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1017 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1018 					     MLX5_MODIFICATION_TYPE_SET, error);
1019 }
1020 
1021 /**
1022  * Convert internal COPY_REG action to DV specification.
1023  *
1024  * @param[in] dev
1025  *   Pointer to the rte_eth_dev structure.
1026  * @param[in,out] res
1027  *   Pointer to the modify-header resource.
1028  * @param[in] action
1029  *   Pointer to action specification.
1030  * @param[out] error
1031  *   Pointer to the error structure.
1032  *
1033  * @return
1034  *   0 on success, a negative errno value otherwise and rte_errno is set.
1035  */
1036 static int
1037 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1038 				 struct mlx5_flow_dv_modify_hdr_resource *res,
1039 				 const struct rte_flow_action *action,
1040 				 struct rte_flow_error *error)
1041 {
1042 	const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1043 	rte_be32_t mask = RTE_BE32(UINT32_MAX);
1044 	struct rte_flow_item item = {
1045 		.spec = NULL,
1046 		.mask = &mask,
1047 	};
1048 	struct field_modify_info reg_src[] = {
1049 		{4, 0, reg_to_field[conf->src]},
1050 		{0, 0, 0},
1051 	};
1052 	struct field_modify_info reg_dst = {
1053 		.offset = 0,
1054 		.id = reg_to_field[conf->dst],
1055 	};
1056 	/* Adjust reg_c[0] usage according to reported mask. */
1057 	if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1058 		struct mlx5_priv *priv = dev->data->dev_private;
1059 		uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1060 
1061 		MLX5_ASSERT(reg_c0);
1062 		MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1063 		if (conf->dst == REG_C_0) {
1064 			/* Copy to reg_c[0], within mask only. */
1065 			reg_dst.offset = rte_bsf32(reg_c0);
1066 			/*
1067 			 * Mask is ignoring the enianness, because
1068 			 * there is no conversion in datapath.
1069 			 */
1070 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1071 			/* Copy from destination lower bits to reg_c[0]. */
1072 			mask = reg_c0 >> reg_dst.offset;
1073 #else
1074 			/* Copy from destination upper bits to reg_c[0]. */
1075 			mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1076 					  rte_fls_u32(reg_c0));
1077 #endif
1078 		} else {
1079 			mask = rte_cpu_to_be_32(reg_c0);
1080 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1081 			/* Copy from reg_c[0] to destination lower bits. */
1082 			reg_dst.offset = 0;
1083 #else
1084 			/* Copy from reg_c[0] to destination upper bits. */
1085 			reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1086 					 (rte_fls_u32(reg_c0) -
1087 					  rte_bsf32(reg_c0));
1088 #endif
1089 		}
1090 	}
1091 	return flow_dv_convert_modify_action(&item,
1092 					     reg_src, &reg_dst, res,
1093 					     MLX5_MODIFICATION_TYPE_COPY,
1094 					     error);
1095 }
1096 
1097 /**
1098  * Convert MARK action to DV specification. This routine is used
1099  * in extensive metadata only and requires metadata register to be
1100  * handled. In legacy mode hardware tag resource is engaged.
1101  *
1102  * @param[in] dev
1103  *   Pointer to the rte_eth_dev structure.
1104  * @param[in] conf
1105  *   Pointer to MARK action specification.
1106  * @param[in,out] resource
1107  *   Pointer to the modify-header resource.
1108  * @param[out] error
1109  *   Pointer to the error structure.
1110  *
1111  * @return
1112  *   0 on success, a negative errno value otherwise and rte_errno is set.
1113  */
1114 static int
1115 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1116 			    const struct rte_flow_action_mark *conf,
1117 			    struct mlx5_flow_dv_modify_hdr_resource *resource,
1118 			    struct rte_flow_error *error)
1119 {
1120 	struct mlx5_priv *priv = dev->data->dev_private;
1121 	rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1122 					   priv->sh->dv_mark_mask);
1123 	rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1124 	struct rte_flow_item item = {
1125 		.spec = &data,
1126 		.mask = &mask,
1127 	};
1128 	struct field_modify_info reg_c_x[] = {
1129 		[1] = {0, 0, 0},
1130 	};
1131 	int reg;
1132 
1133 	if (!mask)
1134 		return rte_flow_error_set(error, EINVAL,
1135 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1136 					  NULL, "zero mark action mask");
1137 	reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1138 	if (reg < 0)
1139 		return reg;
1140 	MLX5_ASSERT(reg > 0);
1141 	if (reg == REG_C_0) {
1142 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1143 		uint32_t shl_c0 = rte_bsf32(msk_c0);
1144 
1145 		data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1146 		mask = rte_cpu_to_be_32(mask) & msk_c0;
1147 		mask = rte_cpu_to_be_32(mask << shl_c0);
1148 	}
1149 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1150 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1151 					     MLX5_MODIFICATION_TYPE_SET, error);
1152 }
1153 
1154 /**
1155  * Get metadata register index for specified steering domain.
1156  *
1157  * @param[in] dev
1158  *   Pointer to the rte_eth_dev structure.
1159  * @param[in] attr
1160  *   Attributes of flow to determine steering domain.
1161  * @param[out] error
1162  *   Pointer to the error structure.
1163  *
1164  * @return
1165  *   positive index on success, a negative errno value otherwise
1166  *   and rte_errno is set.
1167  */
1168 static enum modify_reg
1169 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1170 			 const struct rte_flow_attr *attr,
1171 			 struct rte_flow_error *error)
1172 {
1173 	int reg =
1174 		mlx5_flow_get_reg_id(dev, attr->transfer ?
1175 					  MLX5_METADATA_FDB :
1176 					    attr->egress ?
1177 					    MLX5_METADATA_TX :
1178 					    MLX5_METADATA_RX, 0, error);
1179 	if (reg < 0)
1180 		return rte_flow_error_set(error,
1181 					  ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1182 					  NULL, "unavailable "
1183 					  "metadata register");
1184 	return reg;
1185 }
1186 
1187 /**
1188  * Convert SET_META action to DV specification.
1189  *
1190  * @param[in] dev
1191  *   Pointer to the rte_eth_dev structure.
1192  * @param[in,out] resource
1193  *   Pointer to the modify-header resource.
1194  * @param[in] attr
1195  *   Attributes of flow that includes this item.
1196  * @param[in] conf
1197  *   Pointer to action specification.
1198  * @param[out] error
1199  *   Pointer to the error structure.
1200  *
1201  * @return
1202  *   0 on success, a negative errno value otherwise and rte_errno is set.
1203  */
1204 static int
1205 flow_dv_convert_action_set_meta
1206 			(struct rte_eth_dev *dev,
1207 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
1208 			 const struct rte_flow_attr *attr,
1209 			 const struct rte_flow_action_set_meta *conf,
1210 			 struct rte_flow_error *error)
1211 {
1212 	uint32_t data = conf->data;
1213 	uint32_t mask = conf->mask;
1214 	struct rte_flow_item item = {
1215 		.spec = &data,
1216 		.mask = &mask,
1217 	};
1218 	struct field_modify_info reg_c_x[] = {
1219 		[1] = {0, 0, 0},
1220 	};
1221 	int reg = flow_dv_get_metadata_reg(dev, attr, error);
1222 
1223 	if (reg < 0)
1224 		return reg;
1225 	MLX5_ASSERT(reg != REG_NON);
1226 	/*
1227 	 * In datapath code there is no endianness
1228 	 * coversions for perfromance reasons, all
1229 	 * pattern conversions are done in rte_flow.
1230 	 */
1231 	if (reg == REG_C_0) {
1232 		struct mlx5_priv *priv = dev->data->dev_private;
1233 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1234 		uint32_t shl_c0;
1235 
1236 		MLX5_ASSERT(msk_c0);
1237 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1238 		shl_c0 = rte_bsf32(msk_c0);
1239 #else
1240 		shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1241 #endif
1242 		mask <<= shl_c0;
1243 		data <<= shl_c0;
1244 		MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1245 	}
1246 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1247 	/* The routine expects parameters in memory as big-endian ones. */
1248 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1249 					     MLX5_MODIFICATION_TYPE_SET, error);
1250 }
1251 
1252 /**
1253  * Convert modify-header set IPv4 DSCP action to DV specification.
1254  *
1255  * @param[in,out] resource
1256  *   Pointer to the modify-header resource.
1257  * @param[in] action
1258  *   Pointer to action specification.
1259  * @param[out] error
1260  *   Pointer to the error structure.
1261  *
1262  * @return
1263  *   0 on success, a negative errno value otherwise and rte_errno is set.
1264  */
1265 static int
1266 flow_dv_convert_action_modify_ipv4_dscp
1267 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
1268 			 const struct rte_flow_action *action,
1269 			 struct rte_flow_error *error)
1270 {
1271 	const struct rte_flow_action_set_dscp *conf =
1272 		(const struct rte_flow_action_set_dscp *)(action->conf);
1273 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1274 	struct rte_flow_item_ipv4 ipv4;
1275 	struct rte_flow_item_ipv4 ipv4_mask;
1276 
1277 	memset(&ipv4, 0, sizeof(ipv4));
1278 	memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1279 	ipv4.hdr.type_of_service = conf->dscp;
1280 	ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1281 	item.spec = &ipv4;
1282 	item.mask = &ipv4_mask;
1283 	return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1284 					     MLX5_MODIFICATION_TYPE_SET, error);
1285 }
1286 
1287 /**
1288  * Convert modify-header set IPv6 DSCP action to DV specification.
1289  *
1290  * @param[in,out] resource
1291  *   Pointer to the modify-header resource.
1292  * @param[in] action
1293  *   Pointer to action specification.
1294  * @param[out] error
1295  *   Pointer to the error structure.
1296  *
1297  * @return
1298  *   0 on success, a negative errno value otherwise and rte_errno is set.
1299  */
1300 static int
1301 flow_dv_convert_action_modify_ipv6_dscp
1302 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
1303 			 const struct rte_flow_action *action,
1304 			 struct rte_flow_error *error)
1305 {
1306 	const struct rte_flow_action_set_dscp *conf =
1307 		(const struct rte_flow_action_set_dscp *)(action->conf);
1308 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1309 	struct rte_flow_item_ipv6 ipv6;
1310 	struct rte_flow_item_ipv6 ipv6_mask;
1311 
1312 	memset(&ipv6, 0, sizeof(ipv6));
1313 	memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1314 	/*
1315 	 * Even though the DSCP bits offset of IPv6 is not byte aligned,
1316 	 * rdma-core only accept the DSCP bits byte aligned start from
1317 	 * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1318 	 * bits in IPv6 case as rdma-core requires byte aligned value.
1319 	 */
1320 	ipv6.hdr.vtc_flow = conf->dscp;
1321 	ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1322 	item.spec = &ipv6;
1323 	item.mask = &ipv6_mask;
1324 	return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1325 					     MLX5_MODIFICATION_TYPE_SET, error);
1326 }
1327 
1328 static void
1329 mlx5_flow_field_id_to_modify_info
1330 		(const struct rte_flow_action_modify_data *data,
1331 		 struct field_modify_info *info,
1332 		 uint32_t *mask, uint32_t *value, uint32_t width,
1333 		 struct rte_eth_dev *dev,
1334 		 const struct rte_flow_attr *attr,
1335 		 struct rte_flow_error *error)
1336 {
1337 	uint32_t idx = 0;
1338 	switch (data->field) {
1339 	case RTE_FLOW_FIELD_START:
1340 		/* not supported yet */
1341 		MLX5_ASSERT(false);
1342 		break;
1343 	case RTE_FLOW_FIELD_MAC_DST:
1344 		if (mask) {
1345 			if (data->offset < 32) {
1346 				info[idx] = (struct field_modify_info){4, 0,
1347 						MLX5_MODI_OUT_DMAC_47_16};
1348 				if (width < 32) {
1349 					mask[idx] =
1350 						rte_cpu_to_be_32(0xffffffff >>
1351 								 (32 - width));
1352 					width = 0;
1353 				} else {
1354 					mask[idx] = RTE_BE32(0xffffffff);
1355 					width -= 32;
1356 				}
1357 				if (!width)
1358 					break;
1359 				++idx;
1360 			}
1361 			info[idx] = (struct field_modify_info){2, 4 * idx,
1362 						MLX5_MODI_OUT_DMAC_15_0};
1363 			mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
1364 						     (16 - width));
1365 		} else {
1366 			if (data->offset < 32)
1367 				info[idx++] = (struct field_modify_info){4, 0,
1368 						MLX5_MODI_OUT_DMAC_47_16};
1369 			info[idx] = (struct field_modify_info){2, 0,
1370 						MLX5_MODI_OUT_DMAC_15_0};
1371 		}
1372 		break;
1373 	case RTE_FLOW_FIELD_MAC_SRC:
1374 		if (mask) {
1375 			if (data->offset < 32) {
1376 				info[idx] = (struct field_modify_info){4, 0,
1377 						MLX5_MODI_OUT_SMAC_47_16};
1378 				if (width < 32) {
1379 					mask[idx] =
1380 						rte_cpu_to_be_32(0xffffffff >>
1381 								(32 - width));
1382 					width = 0;
1383 				} else {
1384 					mask[idx] = RTE_BE32(0xffffffff);
1385 					width -= 32;
1386 				}
1387 				if (!width)
1388 					break;
1389 				++idx;
1390 			}
1391 			info[idx] = (struct field_modify_info){2, 4 * idx,
1392 						MLX5_MODI_OUT_SMAC_15_0};
1393 			mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
1394 						     (16 - width));
1395 		} else {
1396 			if (data->offset < 32)
1397 				info[idx++] = (struct field_modify_info){4, 0,
1398 						MLX5_MODI_OUT_SMAC_47_16};
1399 			info[idx] = (struct field_modify_info){2, 0,
1400 						MLX5_MODI_OUT_SMAC_15_0};
1401 		}
1402 		break;
1403 	case RTE_FLOW_FIELD_VLAN_TYPE:
1404 		/* not supported yet */
1405 		break;
1406 	case RTE_FLOW_FIELD_VLAN_ID:
1407 		info[idx] = (struct field_modify_info){2, 0,
1408 					MLX5_MODI_OUT_FIRST_VID};
1409 		if (mask)
1410 			mask[idx] = rte_cpu_to_be_32(0x00000fff >>
1411 						     (12 - width));
1412 		break;
1413 	case RTE_FLOW_FIELD_MAC_TYPE:
1414 		info[idx] = (struct field_modify_info){2, 0,
1415 					MLX5_MODI_OUT_ETHERTYPE};
1416 		if (mask)
1417 			mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
1418 						     (16 - width));
1419 		break;
1420 	case RTE_FLOW_FIELD_IPV4_DSCP:
1421 		info[idx] = (struct field_modify_info){1, 0,
1422 					MLX5_MODI_OUT_IP_DSCP};
1423 		if (mask)
1424 			mask[idx] = rte_cpu_to_be_32(0x0000003f >>
1425 						     (6 - width));
1426 		break;
1427 	case RTE_FLOW_FIELD_IPV4_TTL:
1428 		info[idx] = (struct field_modify_info){1, 0,
1429 					MLX5_MODI_OUT_IPV4_TTL};
1430 		if (mask)
1431 			mask[idx] = rte_cpu_to_be_32(0x000000ff >>
1432 						     (8 - width));
1433 		break;
1434 	case RTE_FLOW_FIELD_IPV4_SRC:
1435 		info[idx] = (struct field_modify_info){4, 0,
1436 					MLX5_MODI_OUT_SIPV4};
1437 		if (mask)
1438 			mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1439 						     (32 - width));
1440 		break;
1441 	case RTE_FLOW_FIELD_IPV4_DST:
1442 		info[idx] = (struct field_modify_info){4, 0,
1443 					MLX5_MODI_OUT_DIPV4};
1444 		if (mask)
1445 			mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1446 						     (32 - width));
1447 		break;
1448 	case RTE_FLOW_FIELD_IPV6_DSCP:
1449 		info[idx] = (struct field_modify_info){1, 0,
1450 					MLX5_MODI_OUT_IP_DSCP};
1451 		if (mask)
1452 			mask[idx] = rte_cpu_to_be_32(0x0000003f >>
1453 						     (6 - width));
1454 		break;
1455 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1456 		info[idx] = (struct field_modify_info){1, 0,
1457 					MLX5_MODI_OUT_IPV6_HOPLIMIT};
1458 		if (mask)
1459 			mask[idx] = rte_cpu_to_be_32(0x000000ff >>
1460 						     (8 - width));
1461 		break;
1462 	case RTE_FLOW_FIELD_IPV6_SRC:
1463 		if (mask) {
1464 			if (data->offset < 32) {
1465 				info[idx] = (struct field_modify_info){4, 0,
1466 						MLX5_MODI_OUT_SIPV6_127_96};
1467 				if (width < 32) {
1468 					mask[idx] =
1469 						rte_cpu_to_be_32(0xffffffff >>
1470 								 (32 - width));
1471 					width = 0;
1472 				} else {
1473 					mask[idx] = RTE_BE32(0xffffffff);
1474 					width -= 32;
1475 				}
1476 				if (!width)
1477 					break;
1478 				++idx;
1479 			}
1480 			if (data->offset < 64) {
1481 				info[idx] = (struct field_modify_info){4,
1482 						4 * idx,
1483 						MLX5_MODI_OUT_SIPV6_95_64};
1484 				if (width < 32) {
1485 					mask[idx] =
1486 						rte_cpu_to_be_32(0xffffffff >>
1487 								 (32 - width));
1488 					width = 0;
1489 				} else {
1490 					mask[idx] = RTE_BE32(0xffffffff);
1491 					width -= 32;
1492 				}
1493 				if (!width)
1494 					break;
1495 				++idx;
1496 			}
1497 			if (data->offset < 96) {
1498 				info[idx] = (struct field_modify_info){4,
1499 						8 * idx,
1500 						MLX5_MODI_OUT_SIPV6_63_32};
1501 				if (width < 32) {
1502 					mask[idx] =
1503 						rte_cpu_to_be_32(0xffffffff >>
1504 								 (32 - width));
1505 					width = 0;
1506 				} else {
1507 					mask[idx] = RTE_BE32(0xffffffff);
1508 					width -= 32;
1509 				}
1510 				if (!width)
1511 					break;
1512 				++idx;
1513 			}
1514 			info[idx] = (struct field_modify_info){4, 12 * idx,
1515 						MLX5_MODI_OUT_SIPV6_31_0};
1516 			mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1517 						     (32 - width));
1518 		} else {
1519 			if (data->offset < 32)
1520 				info[idx++] = (struct field_modify_info){4, 0,
1521 						MLX5_MODI_OUT_SIPV6_127_96};
1522 			if (data->offset < 64)
1523 				info[idx++] = (struct field_modify_info){4, 0,
1524 						MLX5_MODI_OUT_SIPV6_95_64};
1525 			if (data->offset < 96)
1526 				info[idx++] = (struct field_modify_info){4, 0,
1527 						MLX5_MODI_OUT_SIPV6_63_32};
1528 			if (data->offset < 128)
1529 				info[idx++] = (struct field_modify_info){4, 0,
1530 						MLX5_MODI_OUT_SIPV6_31_0};
1531 		}
1532 		break;
1533 	case RTE_FLOW_FIELD_IPV6_DST:
1534 		if (mask) {
1535 			if (data->offset < 32) {
1536 				info[idx] = (struct field_modify_info){4, 0,
1537 						MLX5_MODI_OUT_DIPV6_127_96};
1538 				if (width < 32) {
1539 					mask[idx] =
1540 						rte_cpu_to_be_32(0xffffffff >>
1541 								 (32 - width));
1542 					width = 0;
1543 				} else {
1544 					mask[idx] = RTE_BE32(0xffffffff);
1545 					width -= 32;
1546 				}
1547 				if (!width)
1548 					break;
1549 				++idx;
1550 			}
1551 			if (data->offset < 64) {
1552 				info[idx] = (struct field_modify_info){4,
1553 						4 * idx,
1554 						MLX5_MODI_OUT_DIPV6_95_64};
1555 				if (width < 32) {
1556 					mask[idx] =
1557 						rte_cpu_to_be_32(0xffffffff >>
1558 								 (32 - width));
1559 					width = 0;
1560 				} else {
1561 					mask[idx] = RTE_BE32(0xffffffff);
1562 					width -= 32;
1563 				}
1564 				if (!width)
1565 					break;
1566 				++idx;
1567 			}
1568 			if (data->offset < 96) {
1569 				info[idx] = (struct field_modify_info){4,
1570 						8 * idx,
1571 						MLX5_MODI_OUT_DIPV6_63_32};
1572 				if (width < 32) {
1573 					mask[idx] =
1574 						rte_cpu_to_be_32(0xffffffff >>
1575 								 (32 - width));
1576 					width = 0;
1577 				} else {
1578 					mask[idx] = RTE_BE32(0xffffffff);
1579 					width -= 32;
1580 				}
1581 				if (!width)
1582 					break;
1583 				++idx;
1584 			}
1585 			info[idx] = (struct field_modify_info){4, 12 * idx,
1586 						MLX5_MODI_OUT_DIPV6_31_0};
1587 			mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1588 						     (32 - width));
1589 		} else {
1590 			if (data->offset < 32)
1591 				info[idx++] = (struct field_modify_info){4, 0,
1592 						MLX5_MODI_OUT_DIPV6_127_96};
1593 			if (data->offset < 64)
1594 				info[idx++] = (struct field_modify_info){4, 0,
1595 						MLX5_MODI_OUT_DIPV6_95_64};
1596 			if (data->offset < 96)
1597 				info[idx++] = (struct field_modify_info){4, 0,
1598 						MLX5_MODI_OUT_DIPV6_63_32};
1599 			if (data->offset < 128)
1600 				info[idx++] = (struct field_modify_info){4, 0,
1601 						MLX5_MODI_OUT_DIPV6_31_0};
1602 		}
1603 		break;
1604 	case RTE_FLOW_FIELD_TCP_PORT_SRC:
1605 		info[idx] = (struct field_modify_info){2, 0,
1606 					MLX5_MODI_OUT_TCP_SPORT};
1607 		if (mask)
1608 			mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
1609 						     (16 - width));
1610 		break;
1611 	case RTE_FLOW_FIELD_TCP_PORT_DST:
1612 		info[idx] = (struct field_modify_info){2, 0,
1613 					MLX5_MODI_OUT_TCP_DPORT};
1614 		if (mask)
1615 			mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
1616 						     (16 - width));
1617 		break;
1618 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1619 		info[idx] = (struct field_modify_info){4, 0,
1620 					MLX5_MODI_OUT_TCP_SEQ_NUM};
1621 		if (mask)
1622 			mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1623 						     (32 - width));
1624 		break;
1625 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
1626 		info[idx] = (struct field_modify_info){4, 0,
1627 					MLX5_MODI_OUT_TCP_ACK_NUM};
1628 		if (mask)
1629 			mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1630 						     (32 - width));
1631 		break;
1632 	case RTE_FLOW_FIELD_TCP_FLAGS:
1633 		info[idx] = (struct field_modify_info){1, 0,
1634 					MLX5_MODI_OUT_TCP_FLAGS};
1635 		if (mask)
1636 			mask[idx] = rte_cpu_to_be_32(0x0000003f >>
1637 						     (6 - width));
1638 		break;
1639 	case RTE_FLOW_FIELD_UDP_PORT_SRC:
1640 		info[idx] = (struct field_modify_info){2, 0,
1641 					MLX5_MODI_OUT_UDP_SPORT};
1642 		if (mask)
1643 			mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
1644 						     (16 - width));
1645 		break;
1646 	case RTE_FLOW_FIELD_UDP_PORT_DST:
1647 		info[idx] = (struct field_modify_info){2, 0,
1648 					MLX5_MODI_OUT_UDP_DPORT};
1649 		if (mask)
1650 			mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
1651 						     (16 - width));
1652 		break;
1653 	case RTE_FLOW_FIELD_VXLAN_VNI:
1654 		/* not supported yet */
1655 		break;
1656 	case RTE_FLOW_FIELD_GENEVE_VNI:
1657 		/* not supported yet*/
1658 		break;
1659 	case RTE_FLOW_FIELD_GTP_TEID:
1660 		info[idx] = (struct field_modify_info){4, 0,
1661 					MLX5_MODI_GTP_TEID};
1662 		if (mask)
1663 			mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1664 						     (32 - width));
1665 		break;
1666 	case RTE_FLOW_FIELD_TAG:
1667 		{
1668 			int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1669 						   data->level, error);
1670 			if (reg < 0)
1671 				return;
1672 			MLX5_ASSERT(reg != REG_NON);
1673 			MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1674 			info[idx] = (struct field_modify_info){4, 0,
1675 						reg_to_field[reg]};
1676 			if (mask)
1677 				mask[idx] =
1678 					rte_cpu_to_be_32(0xffffffff >>
1679 							 (32 - width));
1680 		}
1681 		break;
1682 	case RTE_FLOW_FIELD_MARK:
1683 		{
1684 			int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1685 						       0, error);
1686 			if (reg < 0)
1687 				return;
1688 			MLX5_ASSERT(reg != REG_NON);
1689 			MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1690 			info[idx] = (struct field_modify_info){4, 0,
1691 						reg_to_field[reg]};
1692 			if (mask)
1693 				mask[idx] =
1694 					rte_cpu_to_be_32(0xffffffff >>
1695 							 (32 - width));
1696 		}
1697 		break;
1698 	case RTE_FLOW_FIELD_META:
1699 		{
1700 			int reg = flow_dv_get_metadata_reg(dev, attr, error);
1701 			if (reg < 0)
1702 				return;
1703 			MLX5_ASSERT(reg != REG_NON);
1704 			MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1705 			info[idx] = (struct field_modify_info){4, 0,
1706 						reg_to_field[reg]};
1707 			if (mask)
1708 				mask[idx] =
1709 					rte_cpu_to_be_32(0xffffffff >>
1710 							 (32 - width));
1711 		}
1712 		break;
1713 	case RTE_FLOW_FIELD_POINTER:
1714 		for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1715 			if (mask[idx]) {
1716 				memcpy(&value[idx],
1717 					(void *)(uintptr_t)data->value, 32);
1718 				value[idx] = rte_cpu_to_be_32(value[idx]);
1719 				break;
1720 			}
1721 		}
1722 		break;
1723 	case RTE_FLOW_FIELD_VALUE:
1724 		for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1725 			if (mask[idx]) {
1726 				value[idx] =
1727 					rte_cpu_to_be_32((uint32_t)data->value);
1728 				break;
1729 			}
1730 		}
1731 		break;
1732 	default:
1733 		MLX5_ASSERT(false);
1734 		break;
1735 	}
1736 }
1737 
1738 /**
1739  * Convert modify_field action to DV specification.
1740  *
1741  * @param[in] dev
1742  *   Pointer to the rte_eth_dev structure.
1743  * @param[in,out] resource
1744  *   Pointer to the modify-header resource.
1745  * @param[in] action
1746  *   Pointer to action specification.
1747  * @param[in] attr
1748  *   Attributes of flow that includes this item.
1749  * @param[out] error
1750  *   Pointer to the error structure.
1751  *
1752  * @return
1753  *   0 on success, a negative errno value otherwise and rte_errno is set.
1754  */
1755 static int
1756 flow_dv_convert_action_modify_field
1757 			(struct rte_eth_dev *dev,
1758 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
1759 			 const struct rte_flow_action *action,
1760 			 const struct rte_flow_attr *attr,
1761 			 struct rte_flow_error *error)
1762 {
1763 	const struct rte_flow_action_modify_field *conf =
1764 		(const struct rte_flow_action_modify_field *)(action->conf);
1765 	struct rte_flow_item item;
1766 	struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1767 								{0, 0, 0} };
1768 	struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1769 								{0, 0, 0} };
1770 	uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1771 	uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1772 	uint32_t type;
1773 
1774 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1775 		conf->src.field == RTE_FLOW_FIELD_VALUE) {
1776 		type = MLX5_MODIFICATION_TYPE_SET;
1777 		/** For SET fill the destination field (field) first. */
1778 		mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1779 					  value, conf->width, dev, attr, error);
1780 		/** Then copy immediate value from source as per mask. */
1781 		mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1782 					  value, conf->width, dev, attr, error);
1783 		item.spec = &value;
1784 	} else {
1785 		type = MLX5_MODIFICATION_TYPE_COPY;
1786 		/** For COPY fill the destination field (dcopy) without mask. */
1787 		mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1788 					  value, conf->width, dev, attr, error);
1789 		/** Then construct the source field (field) with mask. */
1790 		mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1791 					  value, conf->width, dev, attr, error);
1792 	}
1793 	item.mask = &mask;
1794 	return flow_dv_convert_modify_action(&item,
1795 			field, dcopy, resource, type, error);
1796 }
1797 
1798 /**
1799  * Validate MARK item.
1800  *
1801  * @param[in] dev
1802  *   Pointer to the rte_eth_dev structure.
1803  * @param[in] item
1804  *   Item specification.
1805  * @param[in] attr
1806  *   Attributes of flow that includes this item.
1807  * @param[out] error
1808  *   Pointer to error structure.
1809  *
1810  * @return
1811  *   0 on success, a negative errno value otherwise and rte_errno is set.
1812  */
1813 static int
1814 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1815 			   const struct rte_flow_item *item,
1816 			   const struct rte_flow_attr *attr __rte_unused,
1817 			   struct rte_flow_error *error)
1818 {
1819 	struct mlx5_priv *priv = dev->data->dev_private;
1820 	struct mlx5_dev_config *config = &priv->config;
1821 	const struct rte_flow_item_mark *spec = item->spec;
1822 	const struct rte_flow_item_mark *mask = item->mask;
1823 	const struct rte_flow_item_mark nic_mask = {
1824 		.id = priv->sh->dv_mark_mask,
1825 	};
1826 	int ret;
1827 
1828 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1829 		return rte_flow_error_set(error, ENOTSUP,
1830 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1831 					  "extended metadata feature"
1832 					  " isn't enabled");
1833 	if (!mlx5_flow_ext_mreg_supported(dev))
1834 		return rte_flow_error_set(error, ENOTSUP,
1835 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1836 					  "extended metadata register"
1837 					  " isn't supported");
1838 	if (!nic_mask.id)
1839 		return rte_flow_error_set(error, ENOTSUP,
1840 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1841 					  "extended metadata register"
1842 					  " isn't available");
1843 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1844 	if (ret < 0)
1845 		return ret;
1846 	if (!spec)
1847 		return rte_flow_error_set(error, EINVAL,
1848 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1849 					  item->spec,
1850 					  "data cannot be empty");
1851 	if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1852 		return rte_flow_error_set(error, EINVAL,
1853 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1854 					  &spec->id,
1855 					  "mark id exceeds the limit");
1856 	if (!mask)
1857 		mask = &nic_mask;
1858 	if (!mask->id)
1859 		return rte_flow_error_set(error, EINVAL,
1860 					RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1861 					"mask cannot be zero");
1862 
1863 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1864 					(const uint8_t *)&nic_mask,
1865 					sizeof(struct rte_flow_item_mark),
1866 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1867 	if (ret < 0)
1868 		return ret;
1869 	return 0;
1870 }
1871 
1872 /**
1873  * Validate META item.
1874  *
1875  * @param[in] dev
1876  *   Pointer to the rte_eth_dev structure.
1877  * @param[in] item
1878  *   Item specification.
1879  * @param[in] attr
1880  *   Attributes of flow that includes this item.
1881  * @param[out] error
1882  *   Pointer to error structure.
1883  *
1884  * @return
1885  *   0 on success, a negative errno value otherwise and rte_errno is set.
1886  */
1887 static int
1888 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1889 			   const struct rte_flow_item *item,
1890 			   const struct rte_flow_attr *attr,
1891 			   struct rte_flow_error *error)
1892 {
1893 	struct mlx5_priv *priv = dev->data->dev_private;
1894 	struct mlx5_dev_config *config = &priv->config;
1895 	const struct rte_flow_item_meta *spec = item->spec;
1896 	const struct rte_flow_item_meta *mask = item->mask;
1897 	struct rte_flow_item_meta nic_mask = {
1898 		.data = UINT32_MAX
1899 	};
1900 	int reg;
1901 	int ret;
1902 
1903 	if (!spec)
1904 		return rte_flow_error_set(error, EINVAL,
1905 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1906 					  item->spec,
1907 					  "data cannot be empty");
1908 	if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1909 		if (!mlx5_flow_ext_mreg_supported(dev))
1910 			return rte_flow_error_set(error, ENOTSUP,
1911 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1912 					  "extended metadata register"
1913 					  " isn't supported");
1914 		reg = flow_dv_get_metadata_reg(dev, attr, error);
1915 		if (reg < 0)
1916 			return reg;
1917 		if (reg == REG_NON)
1918 			return rte_flow_error_set(error, ENOTSUP,
1919 					RTE_FLOW_ERROR_TYPE_ITEM, item,
1920 					"unavalable extended metadata register");
1921 		if (reg == REG_B)
1922 			return rte_flow_error_set(error, ENOTSUP,
1923 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1924 					  "match on reg_b "
1925 					  "isn't supported");
1926 		if (reg != REG_A)
1927 			nic_mask.data = priv->sh->dv_meta_mask;
1928 	} else {
1929 		if (attr->transfer)
1930 			return rte_flow_error_set(error, ENOTSUP,
1931 					RTE_FLOW_ERROR_TYPE_ITEM, item,
1932 					"extended metadata feature "
1933 					"should be enabled when "
1934 					"meta item is requested "
1935 					"with e-switch mode ");
1936 		if (attr->ingress)
1937 			return rte_flow_error_set(error, ENOTSUP,
1938 					RTE_FLOW_ERROR_TYPE_ITEM, item,
1939 					"match on metadata for ingress "
1940 					"is not supported in legacy "
1941 					"metadata mode");
1942 	}
1943 	if (!mask)
1944 		mask = &rte_flow_item_meta_mask;
1945 	if (!mask->data)
1946 		return rte_flow_error_set(error, EINVAL,
1947 					RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1948 					"mask cannot be zero");
1949 
1950 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1951 					(const uint8_t *)&nic_mask,
1952 					sizeof(struct rte_flow_item_meta),
1953 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1954 	return ret;
1955 }
1956 
1957 /**
1958  * Validate TAG item.
1959  *
1960  * @param[in] dev
1961  *   Pointer to the rte_eth_dev structure.
1962  * @param[in] item
1963  *   Item specification.
1964  * @param[in] attr
1965  *   Attributes of flow that includes this item.
1966  * @param[out] error
1967  *   Pointer to error structure.
1968  *
1969  * @return
1970  *   0 on success, a negative errno value otherwise and rte_errno is set.
1971  */
1972 static int
1973 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1974 			  const struct rte_flow_item *item,
1975 			  const struct rte_flow_attr *attr __rte_unused,
1976 			  struct rte_flow_error *error)
1977 {
1978 	const struct rte_flow_item_tag *spec = item->spec;
1979 	const struct rte_flow_item_tag *mask = item->mask;
1980 	const struct rte_flow_item_tag nic_mask = {
1981 		.data = RTE_BE32(UINT32_MAX),
1982 		.index = 0xff,
1983 	};
1984 	int ret;
1985 
1986 	if (!mlx5_flow_ext_mreg_supported(dev))
1987 		return rte_flow_error_set(error, ENOTSUP,
1988 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1989 					  "extensive metadata register"
1990 					  " isn't supported");
1991 	if (!spec)
1992 		return rte_flow_error_set(error, EINVAL,
1993 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1994 					  item->spec,
1995 					  "data cannot be empty");
1996 	if (!mask)
1997 		mask = &rte_flow_item_tag_mask;
1998 	if (!mask->data)
1999 		return rte_flow_error_set(error, EINVAL,
2000 					RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2001 					"mask cannot be zero");
2002 
2003 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2004 					(const uint8_t *)&nic_mask,
2005 					sizeof(struct rte_flow_item_tag),
2006 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2007 	if (ret < 0)
2008 		return ret;
2009 	if (mask->index != 0xff)
2010 		return rte_flow_error_set(error, EINVAL,
2011 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2012 					  "partial mask for tag index"
2013 					  " is not supported");
2014 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2015 	if (ret < 0)
2016 		return ret;
2017 	MLX5_ASSERT(ret != REG_NON);
2018 	return 0;
2019 }
2020 
2021 /**
2022  * Validate vport item.
2023  *
2024  * @param[in] dev
2025  *   Pointer to the rte_eth_dev structure.
2026  * @param[in] item
2027  *   Item specification.
2028  * @param[in] attr
2029  *   Attributes of flow that includes this item.
2030  * @param[in] item_flags
2031  *   Bit-fields that holds the items detected until now.
2032  * @param[out] error
2033  *   Pointer to error structure.
2034  *
2035  * @return
2036  *   0 on success, a negative errno value otherwise and rte_errno is set.
2037  */
2038 static int
2039 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2040 			      const struct rte_flow_item *item,
2041 			      const struct rte_flow_attr *attr,
2042 			      uint64_t item_flags,
2043 			      struct rte_flow_error *error)
2044 {
2045 	const struct rte_flow_item_port_id *spec = item->spec;
2046 	const struct rte_flow_item_port_id *mask = item->mask;
2047 	const struct rte_flow_item_port_id switch_mask = {
2048 			.id = 0xffffffff,
2049 	};
2050 	struct mlx5_priv *esw_priv;
2051 	struct mlx5_priv *dev_priv;
2052 	int ret;
2053 
2054 	if (!attr->transfer)
2055 		return rte_flow_error_set(error, EINVAL,
2056 					  RTE_FLOW_ERROR_TYPE_ITEM,
2057 					  NULL,
2058 					  "match on port id is valid only"
2059 					  " when transfer flag is enabled");
2060 	if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2061 		return rte_flow_error_set(error, ENOTSUP,
2062 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2063 					  "multiple source ports are not"
2064 					  " supported");
2065 	if (!mask)
2066 		mask = &switch_mask;
2067 	if (mask->id != 0xffffffff)
2068 		return rte_flow_error_set(error, ENOTSUP,
2069 					   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2070 					   mask,
2071 					   "no support for partial mask on"
2072 					   " \"id\" field");
2073 	ret = mlx5_flow_item_acceptable
2074 				(item, (const uint8_t *)mask,
2075 				 (const uint8_t *)&rte_flow_item_port_id_mask,
2076 				 sizeof(struct rte_flow_item_port_id),
2077 				 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2078 	if (ret)
2079 		return ret;
2080 	if (!spec)
2081 		return 0;
2082 	esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2083 	if (!esw_priv)
2084 		return rte_flow_error_set(error, rte_errno,
2085 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2086 					  "failed to obtain E-Switch info for"
2087 					  " port");
2088 	dev_priv = mlx5_dev_to_eswitch_info(dev);
2089 	if (!dev_priv)
2090 		return rte_flow_error_set(error, rte_errno,
2091 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2092 					  NULL,
2093 					  "failed to obtain E-Switch info");
2094 	if (esw_priv->domain_id != dev_priv->domain_id)
2095 		return rte_flow_error_set(error, EINVAL,
2096 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2097 					  "cannot match on a port from a"
2098 					  " different E-Switch");
2099 	return 0;
2100 }
2101 
2102 /**
2103  * Validate VLAN item.
2104  *
2105  * @param[in] item
2106  *   Item specification.
2107  * @param[in] item_flags
2108  *   Bit-fields that holds the items detected until now.
2109  * @param[in] dev
2110  *   Ethernet device flow is being created on.
2111  * @param[out] error
2112  *   Pointer to error structure.
2113  *
2114  * @return
2115  *   0 on success, a negative errno value otherwise and rte_errno is set.
2116  */
2117 static int
2118 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2119 			   uint64_t item_flags,
2120 			   struct rte_eth_dev *dev,
2121 			   struct rte_flow_error *error)
2122 {
2123 	const struct rte_flow_item_vlan *mask = item->mask;
2124 	const struct rte_flow_item_vlan nic_mask = {
2125 		.tci = RTE_BE16(UINT16_MAX),
2126 		.inner_type = RTE_BE16(UINT16_MAX),
2127 		.has_more_vlan = 1,
2128 	};
2129 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2130 	int ret;
2131 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2132 					MLX5_FLOW_LAYER_INNER_L4) :
2133 				       (MLX5_FLOW_LAYER_OUTER_L3 |
2134 					MLX5_FLOW_LAYER_OUTER_L4);
2135 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2136 					MLX5_FLOW_LAYER_OUTER_VLAN;
2137 
2138 	if (item_flags & vlanm)
2139 		return rte_flow_error_set(error, EINVAL,
2140 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2141 					  "multiple VLAN layers not supported");
2142 	else if ((item_flags & l34m) != 0)
2143 		return rte_flow_error_set(error, EINVAL,
2144 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2145 					  "VLAN cannot follow L3/L4 layer");
2146 	if (!mask)
2147 		mask = &rte_flow_item_vlan_mask;
2148 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2149 					(const uint8_t *)&nic_mask,
2150 					sizeof(struct rte_flow_item_vlan),
2151 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2152 	if (ret)
2153 		return ret;
2154 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2155 		struct mlx5_priv *priv = dev->data->dev_private;
2156 
2157 		if (priv->vmwa_context) {
2158 			/*
2159 			 * Non-NULL context means we have a virtual machine
2160 			 * and SR-IOV enabled, we have to create VLAN interface
2161 			 * to make hypervisor to setup E-Switch vport
2162 			 * context correctly. We avoid creating the multiple
2163 			 * VLAN interfaces, so we cannot support VLAN tag mask.
2164 			 */
2165 			return rte_flow_error_set(error, EINVAL,
2166 						  RTE_FLOW_ERROR_TYPE_ITEM,
2167 						  item,
2168 						  "VLAN tag mask is not"
2169 						  " supported in virtual"
2170 						  " environment");
2171 		}
2172 	}
2173 	return 0;
2174 }
2175 
2176 /*
2177  * GTP flags are contained in 1 byte of the format:
2178  * -------------------------------------------
2179  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2180  * |-----------------------------------------|
2181  * | value | Version | PT | Res | E | S | PN |
2182  * -------------------------------------------
2183  *
2184  * Matching is supported only for GTP flags E, S, PN.
2185  */
2186 #define MLX5_GTP_FLAGS_MASK	0x07
2187 
2188 /**
2189  * Validate GTP item.
2190  *
2191  * @param[in] dev
2192  *   Pointer to the rte_eth_dev structure.
2193  * @param[in] item
2194  *   Item specification.
2195  * @param[in] item_flags
2196  *   Bit-fields that holds the items detected until now.
2197  * @param[out] error
2198  *   Pointer to error structure.
2199  *
2200  * @return
2201  *   0 on success, a negative errno value otherwise and rte_errno is set.
2202  */
2203 static int
2204 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2205 			  const struct rte_flow_item *item,
2206 			  uint64_t item_flags,
2207 			  struct rte_flow_error *error)
2208 {
2209 	struct mlx5_priv *priv = dev->data->dev_private;
2210 	const struct rte_flow_item_gtp *spec = item->spec;
2211 	const struct rte_flow_item_gtp *mask = item->mask;
2212 	const struct rte_flow_item_gtp nic_mask = {
2213 		.v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2214 		.msg_type = 0xff,
2215 		.teid = RTE_BE32(0xffffffff),
2216 	};
2217 
2218 	if (!priv->config.hca_attr.tunnel_stateless_gtp)
2219 		return rte_flow_error_set(error, ENOTSUP,
2220 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2221 					  "GTP support is not enabled");
2222 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2223 		return rte_flow_error_set(error, ENOTSUP,
2224 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2225 					  "multiple tunnel layers not"
2226 					  " supported");
2227 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2228 		return rte_flow_error_set(error, EINVAL,
2229 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2230 					  "no outer UDP layer found");
2231 	if (!mask)
2232 		mask = &rte_flow_item_gtp_mask;
2233 	if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2234 		return rte_flow_error_set(error, ENOTSUP,
2235 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2236 					  "Match is supported for GTP"
2237 					  " flags only");
2238 	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2239 					 (const uint8_t *)&nic_mask,
2240 					 sizeof(struct rte_flow_item_gtp),
2241 					 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2242 }
2243 
2244 /**
2245  * Validate GTP PSC item.
2246  *
2247  * @param[in] item
2248  *   Item specification.
2249  * @param[in] last_item
2250  *   Previous validated item in the pattern items.
2251  * @param[in] gtp_item
2252  *   Previous GTP item specification.
2253  * @param[in] attr
2254  *   Pointer to flow attributes.
2255  * @param[out] error
2256  *   Pointer to error structure.
2257  *
2258  * @return
2259  *   0 on success, a negative errno value otherwise and rte_errno is set.
2260  */
2261 static int
2262 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2263 			      uint64_t last_item,
2264 			      const struct rte_flow_item *gtp_item,
2265 			      const struct rte_flow_attr *attr,
2266 			      struct rte_flow_error *error)
2267 {
2268 	const struct rte_flow_item_gtp *gtp_spec;
2269 	const struct rte_flow_item_gtp *gtp_mask;
2270 	const struct rte_flow_item_gtp_psc *spec;
2271 	const struct rte_flow_item_gtp_psc *mask;
2272 	const struct rte_flow_item_gtp_psc nic_mask = {
2273 		.pdu_type = 0xFF,
2274 		.qfi = 0xFF,
2275 	};
2276 
2277 	if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2278 		return rte_flow_error_set
2279 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2280 			 "GTP PSC item must be preceded with GTP item");
2281 	gtp_spec = gtp_item->spec;
2282 	gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2283 	/* GTP spec and E flag is requested to match zero. */
2284 	if (gtp_spec &&
2285 		(gtp_mask->v_pt_rsv_flags &
2286 		~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2287 		return rte_flow_error_set
2288 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2289 			 "GTP E flag must be 1 to match GTP PSC");
2290 	/* Check the flow is not created in group zero. */
2291 	if (!attr->transfer && !attr->group)
2292 		return rte_flow_error_set
2293 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2294 			 "GTP PSC is not supported for group 0");
2295 	/* GTP spec is here and E flag is requested to match zero. */
2296 	if (!item->spec)
2297 		return 0;
2298 	spec = item->spec;
2299 	mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2300 	if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2301 		return rte_flow_error_set
2302 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2303 			 "PDU type should be smaller than 16");
2304 	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2305 					 (const uint8_t *)&nic_mask,
2306 					 sizeof(struct rte_flow_item_gtp_psc),
2307 					 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2308 }
2309 
2310 /**
2311  * Validate IPV4 item.
2312  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2313  * add specific validation of fragment_offset field,
2314  *
2315  * @param[in] item
2316  *   Item specification.
2317  * @param[in] item_flags
2318  *   Bit-fields that holds the items detected until now.
2319  * @param[out] error
2320  *   Pointer to error structure.
2321  *
2322  * @return
2323  *   0 on success, a negative errno value otherwise and rte_errno is set.
2324  */
2325 static int
2326 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2327 			   uint64_t item_flags,
2328 			   uint64_t last_item,
2329 			   uint16_t ether_type,
2330 			   struct rte_flow_error *error)
2331 {
2332 	int ret;
2333 	const struct rte_flow_item_ipv4 *spec = item->spec;
2334 	const struct rte_flow_item_ipv4 *last = item->last;
2335 	const struct rte_flow_item_ipv4 *mask = item->mask;
2336 	rte_be16_t fragment_offset_spec = 0;
2337 	rte_be16_t fragment_offset_last = 0;
2338 	const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2339 		.hdr = {
2340 			.src_addr = RTE_BE32(0xffffffff),
2341 			.dst_addr = RTE_BE32(0xffffffff),
2342 			.type_of_service = 0xff,
2343 			.fragment_offset = RTE_BE16(0xffff),
2344 			.next_proto_id = 0xff,
2345 			.time_to_live = 0xff,
2346 		},
2347 	};
2348 
2349 	ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2350 					   ether_type, &nic_ipv4_mask,
2351 					   MLX5_ITEM_RANGE_ACCEPTED, error);
2352 	if (ret < 0)
2353 		return ret;
2354 	if (spec && mask)
2355 		fragment_offset_spec = spec->hdr.fragment_offset &
2356 				       mask->hdr.fragment_offset;
2357 	if (!fragment_offset_spec)
2358 		return 0;
2359 	/*
2360 	 * spec and mask are valid, enforce using full mask to make sure the
2361 	 * complete value is used correctly.
2362 	 */
2363 	if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2364 			!= RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2365 		return rte_flow_error_set(error, EINVAL,
2366 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2367 					  item, "must use full mask for"
2368 					  " fragment_offset");
2369 	/*
2370 	 * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2371 	 * indicating this is 1st fragment of fragmented packet.
2372 	 * This is not yet supported in MLX5, return appropriate error message.
2373 	 */
2374 	if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2375 		return rte_flow_error_set(error, ENOTSUP,
2376 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2377 					  "match on first fragment not "
2378 					  "supported");
2379 	if (fragment_offset_spec && !last)
2380 		return rte_flow_error_set(error, ENOTSUP,
2381 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2382 					  "specified value not supported");
2383 	/* spec and last are valid, validate the specified range. */
2384 	fragment_offset_last = last->hdr.fragment_offset &
2385 			       mask->hdr.fragment_offset;
2386 	/*
2387 	 * Match on fragment_offset spec 0x2001 and last 0x3fff
2388 	 * means MF is 1 and frag-offset is > 0.
2389 	 * This packet is fragment 2nd and onward, excluding last.
2390 	 * This is not yet supported in MLX5, return appropriate
2391 	 * error message.
2392 	 */
2393 	if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2394 	    fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2395 		return rte_flow_error_set(error, ENOTSUP,
2396 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2397 					  last, "match on following "
2398 					  "fragments not supported");
2399 	/*
2400 	 * Match on fragment_offset spec 0x0001 and last 0x1fff
2401 	 * means MF is 0 and frag-offset is > 0.
2402 	 * This packet is last fragment of fragmented packet.
2403 	 * This is not yet supported in MLX5, return appropriate
2404 	 * error message.
2405 	 */
2406 	if (fragment_offset_spec == RTE_BE16(1) &&
2407 	    fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2408 		return rte_flow_error_set(error, ENOTSUP,
2409 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2410 					  last, "match on last "
2411 					  "fragment not supported");
2412 	/*
2413 	 * Match on fragment_offset spec 0x0001 and last 0x3fff
2414 	 * means MF and/or frag-offset is not 0.
2415 	 * This is a fragmented packet.
2416 	 * Other range values are invalid and rejected.
2417 	 */
2418 	if (!(fragment_offset_spec == RTE_BE16(1) &&
2419 	      fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2420 		return rte_flow_error_set(error, ENOTSUP,
2421 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2422 					  "specified range not supported");
2423 	return 0;
2424 }
2425 
2426 /**
2427  * Validate IPV6 fragment extension item.
2428  *
2429  * @param[in] item
2430  *   Item specification.
2431  * @param[in] item_flags
2432  *   Bit-fields that holds the items detected until now.
2433  * @param[out] error
2434  *   Pointer to error structure.
2435  *
2436  * @return
2437  *   0 on success, a negative errno value otherwise and rte_errno is set.
2438  */
2439 static int
2440 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2441 				    uint64_t item_flags,
2442 				    struct rte_flow_error *error)
2443 {
2444 	const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2445 	const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2446 	const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2447 	rte_be16_t frag_data_spec = 0;
2448 	rte_be16_t frag_data_last = 0;
2449 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2450 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2451 				      MLX5_FLOW_LAYER_OUTER_L4;
2452 	int ret = 0;
2453 	struct rte_flow_item_ipv6_frag_ext nic_mask = {
2454 		.hdr = {
2455 			.next_header = 0xff,
2456 			.frag_data = RTE_BE16(0xffff),
2457 		},
2458 	};
2459 
2460 	if (item_flags & l4m)
2461 		return rte_flow_error_set(error, EINVAL,
2462 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2463 					  "ipv6 fragment extension item cannot "
2464 					  "follow L4 item.");
2465 	if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2466 	    (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2467 		return rte_flow_error_set(error, EINVAL,
2468 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2469 					  "ipv6 fragment extension item must "
2470 					  "follow ipv6 item");
2471 	if (spec && mask)
2472 		frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2473 	if (!frag_data_spec)
2474 		return 0;
2475 	/*
2476 	 * spec and mask are valid, enforce using full mask to make sure the
2477 	 * complete value is used correctly.
2478 	 */
2479 	if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2480 				RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2481 		return rte_flow_error_set(error, EINVAL,
2482 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2483 					  item, "must use full mask for"
2484 					  " frag_data");
2485 	/*
2486 	 * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2487 	 * This is 1st fragment of fragmented packet.
2488 	 */
2489 	if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2490 		return rte_flow_error_set(error, ENOTSUP,
2491 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2492 					  "match on first fragment not "
2493 					  "supported");
2494 	if (frag_data_spec && !last)
2495 		return rte_flow_error_set(error, EINVAL,
2496 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2497 					  "specified value not supported");
2498 	ret = mlx5_flow_item_acceptable
2499 				(item, (const uint8_t *)mask,
2500 				 (const uint8_t *)&nic_mask,
2501 				 sizeof(struct rte_flow_item_ipv6_frag_ext),
2502 				 MLX5_ITEM_RANGE_ACCEPTED, error);
2503 	if (ret)
2504 		return ret;
2505 	/* spec and last are valid, validate the specified range. */
2506 	frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2507 	/*
2508 	 * Match on frag_data spec 0x0009 and last 0xfff9
2509 	 * means M is 1 and frag-offset is > 0.
2510 	 * This packet is fragment 2nd and onward, excluding last.
2511 	 * This is not yet supported in MLX5, return appropriate
2512 	 * error message.
2513 	 */
2514 	if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2515 				       RTE_IPV6_EHDR_MF_MASK) &&
2516 	    frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2517 		return rte_flow_error_set(error, ENOTSUP,
2518 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2519 					  last, "match on following "
2520 					  "fragments not supported");
2521 	/*
2522 	 * Match on frag_data spec 0x0008 and last 0xfff8
2523 	 * means M is 0 and frag-offset is > 0.
2524 	 * This packet is last fragment of fragmented packet.
2525 	 * This is not yet supported in MLX5, return appropriate
2526 	 * error message.
2527 	 */
2528 	if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2529 	    frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2530 		return rte_flow_error_set(error, ENOTSUP,
2531 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2532 					  last, "match on last "
2533 					  "fragment not supported");
2534 	/* Other range values are invalid and rejected. */
2535 	return rte_flow_error_set(error, EINVAL,
2536 				  RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2537 				  "specified range not supported");
2538 }
2539 
2540 /**
2541  * Validate the pop VLAN action.
2542  *
2543  * @param[in] dev
2544  *   Pointer to the rte_eth_dev structure.
2545  * @param[in] action_flags
2546  *   Holds the actions detected until now.
2547  * @param[in] action
2548  *   Pointer to the pop vlan action.
2549  * @param[in] item_flags
2550  *   The items found in this flow rule.
2551  * @param[in] attr
2552  *   Pointer to flow attributes.
2553  * @param[out] error
2554  *   Pointer to error structure.
2555  *
2556  * @return
2557  *   0 on success, a negative errno value otherwise and rte_errno is set.
2558  */
2559 static int
2560 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2561 				 uint64_t action_flags,
2562 				 const struct rte_flow_action *action,
2563 				 uint64_t item_flags,
2564 				 const struct rte_flow_attr *attr,
2565 				 struct rte_flow_error *error)
2566 {
2567 	const struct mlx5_priv *priv = dev->data->dev_private;
2568 
2569 	(void)action;
2570 	(void)attr;
2571 	if (!priv->sh->pop_vlan_action)
2572 		return rte_flow_error_set(error, ENOTSUP,
2573 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2574 					  NULL,
2575 					  "pop vlan action is not supported");
2576 	if (attr->egress)
2577 		return rte_flow_error_set(error, ENOTSUP,
2578 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2579 					  NULL,
2580 					  "pop vlan action not supported for "
2581 					  "egress");
2582 	if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2583 		return rte_flow_error_set(error, ENOTSUP,
2584 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2585 					  "no support for multiple VLAN "
2586 					  "actions");
2587 	/* Pop VLAN with preceding Decap requires inner header with VLAN. */
2588 	if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2589 	    !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2590 		return rte_flow_error_set(error, ENOTSUP,
2591 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2592 					  NULL,
2593 					  "cannot pop vlan after decap without "
2594 					  "match on inner vlan in the flow");
2595 	/* Pop VLAN without preceding Decap requires outer header with VLAN. */
2596 	if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2597 	    !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2598 		return rte_flow_error_set(error, ENOTSUP,
2599 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2600 					  NULL,
2601 					  "cannot pop vlan without a "
2602 					  "match on (outer) vlan in the flow");
2603 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2604 		return rte_flow_error_set(error, EINVAL,
2605 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2606 					  "wrong action order, port_id should "
2607 					  "be after pop VLAN action");
2608 	if (!attr->transfer && priv->representor)
2609 		return rte_flow_error_set(error, ENOTSUP,
2610 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2611 					  "pop vlan action for VF representor "
2612 					  "not supported on NIC table");
2613 	return 0;
2614 }
2615 
2616 /**
2617  * Get VLAN default info from vlan match info.
2618  *
2619  * @param[in] items
2620  *   the list of item specifications.
2621  * @param[out] vlan
2622  *   pointer VLAN info to fill to.
2623  *
2624  * @return
2625  *   0 on success, a negative errno value otherwise and rte_errno is set.
2626  */
2627 static void
2628 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2629 				  struct rte_vlan_hdr *vlan)
2630 {
2631 	const struct rte_flow_item_vlan nic_mask = {
2632 		.tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2633 				MLX5DV_FLOW_VLAN_VID_MASK),
2634 		.inner_type = RTE_BE16(0xffff),
2635 	};
2636 
2637 	if (items == NULL)
2638 		return;
2639 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2640 		int type = items->type;
2641 
2642 		if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2643 		    type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2644 			break;
2645 	}
2646 	if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2647 		const struct rte_flow_item_vlan *vlan_m = items->mask;
2648 		const struct rte_flow_item_vlan *vlan_v = items->spec;
2649 
2650 		/* If VLAN item in pattern doesn't contain data, return here. */
2651 		if (!vlan_v)
2652 			return;
2653 		if (!vlan_m)
2654 			vlan_m = &nic_mask;
2655 		/* Only full match values are accepted */
2656 		if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2657 		     MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2658 			vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2659 			vlan->vlan_tci |=
2660 				rte_be_to_cpu_16(vlan_v->tci &
2661 						 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2662 		}
2663 		if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2664 		     MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2665 			vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2666 			vlan->vlan_tci |=
2667 				rte_be_to_cpu_16(vlan_v->tci &
2668 						 MLX5DV_FLOW_VLAN_VID_MASK_BE);
2669 		}
2670 		if (vlan_m->inner_type == nic_mask.inner_type)
2671 			vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2672 							   vlan_m->inner_type);
2673 	}
2674 }
2675 
2676 /**
2677  * Validate the push VLAN action.
2678  *
2679  * @param[in] dev
2680  *   Pointer to the rte_eth_dev structure.
2681  * @param[in] action_flags
2682  *   Holds the actions detected until now.
2683  * @param[in] item_flags
2684  *   The items found in this flow rule.
2685  * @param[in] action
2686  *   Pointer to the action structure.
2687  * @param[in] attr
2688  *   Pointer to flow attributes
2689  * @param[out] error
2690  *   Pointer to error structure.
2691  *
2692  * @return
2693  *   0 on success, a negative errno value otherwise and rte_errno is set.
2694  */
2695 static int
2696 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2697 				  uint64_t action_flags,
2698 				  const struct rte_flow_item_vlan *vlan_m,
2699 				  const struct rte_flow_action *action,
2700 				  const struct rte_flow_attr *attr,
2701 				  struct rte_flow_error *error)
2702 {
2703 	const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2704 	const struct mlx5_priv *priv = dev->data->dev_private;
2705 
2706 	if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2707 	    push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2708 		return rte_flow_error_set(error, EINVAL,
2709 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2710 					  "invalid vlan ethertype");
2711 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2712 		return rte_flow_error_set(error, EINVAL,
2713 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2714 					  "wrong action order, port_id should "
2715 					  "be after push VLAN");
2716 	if (!attr->transfer && priv->representor)
2717 		return rte_flow_error_set(error, ENOTSUP,
2718 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2719 					  "push vlan action for VF representor "
2720 					  "not supported on NIC table");
2721 	if (vlan_m &&
2722 	    (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2723 	    (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2724 		MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2725 	    !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2726 	    !(mlx5_flow_find_action
2727 		(action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2728 		return rte_flow_error_set(error, EINVAL,
2729 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2730 					  "not full match mask on VLAN PCP and "
2731 					  "there is no of_set_vlan_pcp action, "
2732 					  "push VLAN action cannot figure out "
2733 					  "PCP value");
2734 	if (vlan_m &&
2735 	    (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2736 	    (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2737 		MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2738 	    !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2739 	    !(mlx5_flow_find_action
2740 		(action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2741 		return rte_flow_error_set(error, EINVAL,
2742 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2743 					  "not full match mask on VLAN VID and "
2744 					  "there is no of_set_vlan_vid action, "
2745 					  "push VLAN action cannot figure out "
2746 					  "VID value");
2747 	(void)attr;
2748 	return 0;
2749 }
2750 
2751 /**
2752  * Validate the set VLAN PCP.
2753  *
2754  * @param[in] action_flags
2755  *   Holds the actions detected until now.
2756  * @param[in] actions
2757  *   Pointer to the list of actions remaining in the flow rule.
2758  * @param[out] error
2759  *   Pointer to error structure.
2760  *
2761  * @return
2762  *   0 on success, a negative errno value otherwise and rte_errno is set.
2763  */
2764 static int
2765 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2766 				     const struct rte_flow_action actions[],
2767 				     struct rte_flow_error *error)
2768 {
2769 	const struct rte_flow_action *action = actions;
2770 	const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2771 
2772 	if (conf->vlan_pcp > 7)
2773 		return rte_flow_error_set(error, EINVAL,
2774 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2775 					  "VLAN PCP value is too big");
2776 	if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2777 		return rte_flow_error_set(error, ENOTSUP,
2778 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2779 					  "set VLAN PCP action must follow "
2780 					  "the push VLAN action");
2781 	if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2782 		return rte_flow_error_set(error, ENOTSUP,
2783 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2784 					  "Multiple VLAN PCP modification are "
2785 					  "not supported");
2786 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2787 		return rte_flow_error_set(error, EINVAL,
2788 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2789 					  "wrong action order, port_id should "
2790 					  "be after set VLAN PCP");
2791 	return 0;
2792 }
2793 
2794 /**
2795  * Validate the set VLAN VID.
2796  *
2797  * @param[in] item_flags
2798  *   Holds the items detected in this rule.
2799  * @param[in] action_flags
2800  *   Holds the actions detected until now.
2801  * @param[in] actions
2802  *   Pointer to the list of actions remaining in the flow rule.
2803  * @param[out] error
2804  *   Pointer to error structure.
2805  *
2806  * @return
2807  *   0 on success, a negative errno value otherwise and rte_errno is set.
2808  */
2809 static int
2810 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2811 				     uint64_t action_flags,
2812 				     const struct rte_flow_action actions[],
2813 				     struct rte_flow_error *error)
2814 {
2815 	const struct rte_flow_action *action = actions;
2816 	const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2817 
2818 	if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2819 		return rte_flow_error_set(error, EINVAL,
2820 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2821 					  "VLAN VID value is too big");
2822 	if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2823 	    !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2824 		return rte_flow_error_set(error, ENOTSUP,
2825 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2826 					  "set VLAN VID action must follow push"
2827 					  " VLAN action or match on VLAN item");
2828 	if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2829 		return rte_flow_error_set(error, ENOTSUP,
2830 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2831 					  "Multiple VLAN VID modifications are "
2832 					  "not supported");
2833 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2834 		return rte_flow_error_set(error, EINVAL,
2835 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2836 					  "wrong action order, port_id should "
2837 					  "be after set VLAN VID");
2838 	return 0;
2839 }
2840 
2841 /*
2842  * Validate the FLAG action.
2843  *
2844  * @param[in] dev
2845  *   Pointer to the rte_eth_dev structure.
2846  * @param[in] action_flags
2847  *   Holds the actions detected until now.
2848  * @param[in] attr
2849  *   Pointer to flow attributes
2850  * @param[out] error
2851  *   Pointer to error structure.
2852  *
2853  * @return
2854  *   0 on success, a negative errno value otherwise and rte_errno is set.
2855  */
2856 static int
2857 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2858 			     uint64_t action_flags,
2859 			     const struct rte_flow_attr *attr,
2860 			     struct rte_flow_error *error)
2861 {
2862 	struct mlx5_priv *priv = dev->data->dev_private;
2863 	struct mlx5_dev_config *config = &priv->config;
2864 	int ret;
2865 
2866 	/* Fall back if no extended metadata register support. */
2867 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2868 		return mlx5_flow_validate_action_flag(action_flags, attr,
2869 						      error);
2870 	/* Extensive metadata mode requires registers. */
2871 	if (!mlx5_flow_ext_mreg_supported(dev))
2872 		return rte_flow_error_set(error, ENOTSUP,
2873 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2874 					  "no metadata registers "
2875 					  "to support flag action");
2876 	if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2877 		return rte_flow_error_set(error, ENOTSUP,
2878 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2879 					  "extended metadata register"
2880 					  " isn't available");
2881 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2882 	if (ret < 0)
2883 		return ret;
2884 	MLX5_ASSERT(ret > 0);
2885 	if (action_flags & MLX5_FLOW_ACTION_MARK)
2886 		return rte_flow_error_set(error, EINVAL,
2887 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2888 					  "can't mark and flag in same flow");
2889 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
2890 		return rte_flow_error_set(error, EINVAL,
2891 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2892 					  "can't have 2 flag"
2893 					  " actions in same flow");
2894 	return 0;
2895 }
2896 
2897 /**
2898  * Validate MARK action.
2899  *
2900  * @param[in] dev
2901  *   Pointer to the rte_eth_dev structure.
2902  * @param[in] action
2903  *   Pointer to action.
2904  * @param[in] action_flags
2905  *   Holds the actions detected until now.
2906  * @param[in] attr
2907  *   Pointer to flow attributes
2908  * @param[out] error
2909  *   Pointer to error structure.
2910  *
2911  * @return
2912  *   0 on success, a negative errno value otherwise and rte_errno is set.
2913  */
2914 static int
2915 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2916 			     const struct rte_flow_action *action,
2917 			     uint64_t action_flags,
2918 			     const struct rte_flow_attr *attr,
2919 			     struct rte_flow_error *error)
2920 {
2921 	struct mlx5_priv *priv = dev->data->dev_private;
2922 	struct mlx5_dev_config *config = &priv->config;
2923 	const struct rte_flow_action_mark *mark = action->conf;
2924 	int ret;
2925 
2926 	if (is_tunnel_offload_active(dev))
2927 		return rte_flow_error_set(error, ENOTSUP,
2928 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2929 					  "no mark action "
2930 					  "if tunnel offload active");
2931 	/* Fall back if no extended metadata register support. */
2932 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2933 		return mlx5_flow_validate_action_mark(action, action_flags,
2934 						      attr, error);
2935 	/* Extensive metadata mode requires registers. */
2936 	if (!mlx5_flow_ext_mreg_supported(dev))
2937 		return rte_flow_error_set(error, ENOTSUP,
2938 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2939 					  "no metadata registers "
2940 					  "to support mark action");
2941 	if (!priv->sh->dv_mark_mask)
2942 		return rte_flow_error_set(error, ENOTSUP,
2943 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2944 					  "extended metadata register"
2945 					  " isn't available");
2946 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2947 	if (ret < 0)
2948 		return ret;
2949 	MLX5_ASSERT(ret > 0);
2950 	if (!mark)
2951 		return rte_flow_error_set(error, EINVAL,
2952 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2953 					  "configuration cannot be null");
2954 	if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2955 		return rte_flow_error_set(error, EINVAL,
2956 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2957 					  &mark->id,
2958 					  "mark id exceeds the limit");
2959 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
2960 		return rte_flow_error_set(error, EINVAL,
2961 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2962 					  "can't flag and mark in same flow");
2963 	if (action_flags & MLX5_FLOW_ACTION_MARK)
2964 		return rte_flow_error_set(error, EINVAL,
2965 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2966 					  "can't have 2 mark actions in same"
2967 					  " flow");
2968 	return 0;
2969 }
2970 
2971 /**
2972  * Validate SET_META action.
2973  *
2974  * @param[in] dev
2975  *   Pointer to the rte_eth_dev structure.
2976  * @param[in] action
2977  *   Pointer to the action structure.
2978  * @param[in] action_flags
2979  *   Holds the actions detected until now.
2980  * @param[in] attr
2981  *   Pointer to flow attributes
2982  * @param[out] error
2983  *   Pointer to error structure.
2984  *
2985  * @return
2986  *   0 on success, a negative errno value otherwise and rte_errno is set.
2987  */
2988 static int
2989 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2990 				 const struct rte_flow_action *action,
2991 				 uint64_t action_flags __rte_unused,
2992 				 const struct rte_flow_attr *attr,
2993 				 struct rte_flow_error *error)
2994 {
2995 	const struct rte_flow_action_set_meta *conf;
2996 	uint32_t nic_mask = UINT32_MAX;
2997 	int reg;
2998 
2999 	if (!mlx5_flow_ext_mreg_supported(dev))
3000 		return rte_flow_error_set(error, ENOTSUP,
3001 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3002 					  "extended metadata register"
3003 					  " isn't supported");
3004 	reg = flow_dv_get_metadata_reg(dev, attr, error);
3005 	if (reg < 0)
3006 		return reg;
3007 	if (reg == REG_NON)
3008 		return rte_flow_error_set(error, ENOTSUP,
3009 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3010 					  "unavalable extended metadata register");
3011 	if (reg != REG_A && reg != REG_B) {
3012 		struct mlx5_priv *priv = dev->data->dev_private;
3013 
3014 		nic_mask = priv->sh->dv_meta_mask;
3015 	}
3016 	if (!(action->conf))
3017 		return rte_flow_error_set(error, EINVAL,
3018 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3019 					  "configuration cannot be null");
3020 	conf = (const struct rte_flow_action_set_meta *)action->conf;
3021 	if (!conf->mask)
3022 		return rte_flow_error_set(error, EINVAL,
3023 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3024 					  "zero mask doesn't have any effect");
3025 	if (conf->mask & ~nic_mask)
3026 		return rte_flow_error_set(error, EINVAL,
3027 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3028 					  "meta data must be within reg C0");
3029 	return 0;
3030 }
3031 
3032 /**
3033  * Validate SET_TAG action.
3034  *
3035  * @param[in] dev
3036  *   Pointer to the rte_eth_dev structure.
3037  * @param[in] action
3038  *   Pointer to the action structure.
3039  * @param[in] action_flags
3040  *   Holds the actions detected until now.
3041  * @param[in] attr
3042  *   Pointer to flow attributes
3043  * @param[out] error
3044  *   Pointer to error structure.
3045  *
3046  * @return
3047  *   0 on success, a negative errno value otherwise and rte_errno is set.
3048  */
3049 static int
3050 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3051 				const struct rte_flow_action *action,
3052 				uint64_t action_flags,
3053 				const struct rte_flow_attr *attr,
3054 				struct rte_flow_error *error)
3055 {
3056 	const struct rte_flow_action_set_tag *conf;
3057 	const uint64_t terminal_action_flags =
3058 		MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3059 		MLX5_FLOW_ACTION_RSS;
3060 	int ret;
3061 
3062 	if (!mlx5_flow_ext_mreg_supported(dev))
3063 		return rte_flow_error_set(error, ENOTSUP,
3064 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3065 					  "extensive metadata register"
3066 					  " isn't supported");
3067 	if (!(action->conf))
3068 		return rte_flow_error_set(error, EINVAL,
3069 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3070 					  "configuration cannot be null");
3071 	conf = (const struct rte_flow_action_set_tag *)action->conf;
3072 	if (!conf->mask)
3073 		return rte_flow_error_set(error, EINVAL,
3074 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3075 					  "zero mask doesn't have any effect");
3076 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3077 	if (ret < 0)
3078 		return ret;
3079 	if (!attr->transfer && attr->ingress &&
3080 	    (action_flags & terminal_action_flags))
3081 		return rte_flow_error_set(error, EINVAL,
3082 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3083 					  "set_tag has no effect"
3084 					  " with terminal actions");
3085 	return 0;
3086 }
3087 
3088 /**
3089  * Validate count action.
3090  *
3091  * @param[in] dev
3092  *   Pointer to rte_eth_dev structure.
3093  * @param[in] action
3094  *   Pointer to the action structure.
3095  * @param[in] action_flags
3096  *   Holds the actions detected until now.
3097  * @param[out] error
3098  *   Pointer to error structure.
3099  *
3100  * @return
3101  *   0 on success, a negative errno value otherwise and rte_errno is set.
3102  */
3103 static int
3104 flow_dv_validate_action_count(struct rte_eth_dev *dev,
3105 			      const struct rte_flow_action *action,
3106 			      uint64_t action_flags,
3107 			      struct rte_flow_error *error)
3108 {
3109 	struct mlx5_priv *priv = dev->data->dev_private;
3110 	const struct rte_flow_action_count *count;
3111 
3112 	if (!priv->config.devx)
3113 		goto notsup_err;
3114 	if (action_flags & MLX5_FLOW_ACTION_COUNT)
3115 		return rte_flow_error_set(error, EINVAL,
3116 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3117 					  "duplicate count actions set");
3118 	count = (const struct rte_flow_action_count *)action->conf;
3119 	if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3120 	    !priv->sh->flow_hit_aso_en)
3121 		return rte_flow_error_set(error, EINVAL,
3122 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123 					  "old age and shared count combination is not supported");
3124 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3125 	return 0;
3126 #endif
3127 notsup_err:
3128 	return rte_flow_error_set
3129 		      (error, ENOTSUP,
3130 		       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3131 		       NULL,
3132 		       "count action not supported");
3133 }
3134 
3135 /**
3136  * Validate the L2 encap action.
3137  *
3138  * @param[in] dev
3139  *   Pointer to the rte_eth_dev structure.
3140  * @param[in] action_flags
3141  *   Holds the actions detected until now.
3142  * @param[in] action
3143  *   Pointer to the action structure.
3144  * @param[in] attr
3145  *   Pointer to flow attributes.
3146  * @param[out] error
3147  *   Pointer to error structure.
3148  *
3149  * @return
3150  *   0 on success, a negative errno value otherwise and rte_errno is set.
3151  */
3152 static int
3153 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3154 				 uint64_t action_flags,
3155 				 const struct rte_flow_action *action,
3156 				 const struct rte_flow_attr *attr,
3157 				 struct rte_flow_error *error)
3158 {
3159 	const struct mlx5_priv *priv = dev->data->dev_private;
3160 
3161 	if (!(action->conf))
3162 		return rte_flow_error_set(error, EINVAL,
3163 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3164 					  "configuration cannot be null");
3165 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3166 		return rte_flow_error_set(error, EINVAL,
3167 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3168 					  "can only have a single encap action "
3169 					  "in a flow");
3170 	if (!attr->transfer && priv->representor)
3171 		return rte_flow_error_set(error, ENOTSUP,
3172 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3173 					  "encap action for VF representor "
3174 					  "not supported on NIC table");
3175 	return 0;
3176 }
3177 
3178 /**
3179  * Validate a decap action.
3180  *
3181  * @param[in] dev
3182  *   Pointer to the rte_eth_dev structure.
3183  * @param[in] action_flags
3184  *   Holds the actions detected until now.
3185  * @param[in] action
3186  *   Pointer to the action structure.
3187  * @param[in] item_flags
3188  *   Holds the items detected.
3189  * @param[in] attr
3190  *   Pointer to flow attributes
3191  * @param[out] error
3192  *   Pointer to error structure.
3193  *
3194  * @return
3195  *   0 on success, a negative errno value otherwise and rte_errno is set.
3196  */
3197 static int
3198 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3199 			      uint64_t action_flags,
3200 			      const struct rte_flow_action *action,
3201 			      const uint64_t item_flags,
3202 			      const struct rte_flow_attr *attr,
3203 			      struct rte_flow_error *error)
3204 {
3205 	const struct mlx5_priv *priv = dev->data->dev_private;
3206 
3207 	if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3208 	    !priv->config.decap_en)
3209 		return rte_flow_error_set(error, ENOTSUP,
3210 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3211 					  "decap is not enabled");
3212 	if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3213 		return rte_flow_error_set(error, ENOTSUP,
3214 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3215 					  action_flags &
3216 					  MLX5_FLOW_ACTION_DECAP ? "can only "
3217 					  "have a single decap action" : "decap "
3218 					  "after encap is not supported");
3219 	if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3220 		return rte_flow_error_set(error, EINVAL,
3221 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3222 					  "can't have decap action after"
3223 					  " modify action");
3224 	if (attr->egress)
3225 		return rte_flow_error_set(error, ENOTSUP,
3226 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3227 					  NULL,
3228 					  "decap action not supported for "
3229 					  "egress");
3230 	if (!attr->transfer && priv->representor)
3231 		return rte_flow_error_set(error, ENOTSUP,
3232 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3233 					  "decap action for VF representor "
3234 					  "not supported on NIC table");
3235 	if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3236 	    !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3237 		return rte_flow_error_set(error, ENOTSUP,
3238 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3239 				"VXLAN item should be present for VXLAN decap");
3240 	return 0;
3241 }
3242 
3243 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3244 
3245 /**
3246  * Validate the raw encap and decap actions.
3247  *
3248  * @param[in] dev
3249  *   Pointer to the rte_eth_dev structure.
3250  * @param[in] decap
3251  *   Pointer to the decap action.
3252  * @param[in] encap
3253  *   Pointer to the encap action.
3254  * @param[in] attr
3255  *   Pointer to flow attributes
3256  * @param[in/out] action_flags
3257  *   Holds the actions detected until now.
3258  * @param[out] actions_n
3259  *   pointer to the number of actions counter.
3260  * @param[in] action
3261  *   Pointer to the action structure.
3262  * @param[in] item_flags
3263  *   Holds the items detected.
3264  * @param[out] error
3265  *   Pointer to error structure.
3266  *
3267  * @return
3268  *   0 on success, a negative errno value otherwise and rte_errno is set.
3269  */
3270 static int
3271 flow_dv_validate_action_raw_encap_decap
3272 	(struct rte_eth_dev *dev,
3273 	 const struct rte_flow_action_raw_decap *decap,
3274 	 const struct rte_flow_action_raw_encap *encap,
3275 	 const struct rte_flow_attr *attr, uint64_t *action_flags,
3276 	 int *actions_n, const struct rte_flow_action *action,
3277 	 uint64_t item_flags, struct rte_flow_error *error)
3278 {
3279 	const struct mlx5_priv *priv = dev->data->dev_private;
3280 	int ret;
3281 
3282 	if (encap && (!encap->size || !encap->data))
3283 		return rte_flow_error_set(error, EINVAL,
3284 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3285 					  "raw encap data cannot be empty");
3286 	if (decap && encap) {
3287 		if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3288 		    encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3289 			/* L3 encap. */
3290 			decap = NULL;
3291 		else if (encap->size <=
3292 			   MLX5_ENCAPSULATION_DECISION_SIZE &&
3293 			   decap->size >
3294 			   MLX5_ENCAPSULATION_DECISION_SIZE)
3295 			/* L3 decap. */
3296 			encap = NULL;
3297 		else if (encap->size >
3298 			   MLX5_ENCAPSULATION_DECISION_SIZE &&
3299 			   decap->size >
3300 			   MLX5_ENCAPSULATION_DECISION_SIZE)
3301 			/* 2 L2 actions: encap and decap. */
3302 			;
3303 		else
3304 			return rte_flow_error_set(error,
3305 				ENOTSUP,
3306 				RTE_FLOW_ERROR_TYPE_ACTION,
3307 				NULL, "unsupported too small "
3308 				"raw decap and too small raw "
3309 				"encap combination");
3310 	}
3311 	if (decap) {
3312 		ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3313 						    item_flags, attr, error);
3314 		if (ret < 0)
3315 			return ret;
3316 		*action_flags |= MLX5_FLOW_ACTION_DECAP;
3317 		++(*actions_n);
3318 	}
3319 	if (encap) {
3320 		if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3321 			return rte_flow_error_set(error, ENOTSUP,
3322 						  RTE_FLOW_ERROR_TYPE_ACTION,
3323 						  NULL,
3324 						  "small raw encap size");
3325 		if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3326 			return rte_flow_error_set(error, EINVAL,
3327 						  RTE_FLOW_ERROR_TYPE_ACTION,
3328 						  NULL,
3329 						  "more than one encap action");
3330 		if (!attr->transfer && priv->representor)
3331 			return rte_flow_error_set
3332 					(error, ENOTSUP,
3333 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3334 					 "encap action for VF representor "
3335 					 "not supported on NIC table");
3336 		*action_flags |= MLX5_FLOW_ACTION_ENCAP;
3337 		++(*actions_n);
3338 	}
3339 	return 0;
3340 }
3341 
3342 /**
3343  * Match encap_decap resource.
3344  *
3345  * @param list
3346  *   Pointer to the hash list.
3347  * @param entry
3348  *   Pointer to exist resource entry object.
3349  * @param key
3350  *   Key of the new entry.
3351  * @param ctx_cb
3352  *   Pointer to new encap_decap resource.
3353  *
3354  * @return
3355  *   0 on matching, none-zero otherwise.
3356  */
3357 int
3358 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3359 			     struct mlx5_hlist_entry *entry,
3360 			     uint64_t key __rte_unused, void *cb_ctx)
3361 {
3362 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3363 	struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3364 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3365 
3366 	cache_resource = container_of(entry,
3367 				      struct mlx5_flow_dv_encap_decap_resource,
3368 				      entry);
3369 	if (resource->reformat_type == cache_resource->reformat_type &&
3370 	    resource->ft_type == cache_resource->ft_type &&
3371 	    resource->flags == cache_resource->flags &&
3372 	    resource->size == cache_resource->size &&
3373 	    !memcmp((const void *)resource->buf,
3374 		    (const void *)cache_resource->buf,
3375 		    resource->size))
3376 		return 0;
3377 	return -1;
3378 }
3379 
3380 /**
3381  * Allocate encap_decap resource.
3382  *
3383  * @param list
3384  *   Pointer to the hash list.
3385  * @param entry
3386  *   Pointer to exist resource entry object.
3387  * @param ctx_cb
3388  *   Pointer to new encap_decap resource.
3389  *
3390  * @return
3391  *   0 on matching, none-zero otherwise.
3392  */
3393 struct mlx5_hlist_entry *
3394 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3395 			      uint64_t key __rte_unused,
3396 			      void *cb_ctx)
3397 {
3398 	struct mlx5_dev_ctx_shared *sh = list->ctx;
3399 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3400 	struct mlx5dv_dr_domain *domain;
3401 	struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3402 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3403 	uint32_t idx;
3404 	int ret;
3405 
3406 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3407 		domain = sh->fdb_domain;
3408 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3409 		domain = sh->rx_domain;
3410 	else
3411 		domain = sh->tx_domain;
3412 	/* Register new encap/decap resource. */
3413 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3414 				       &idx);
3415 	if (!cache_resource) {
3416 		rte_flow_error_set(ctx->error, ENOMEM,
3417 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3418 				   "cannot allocate resource memory");
3419 		return NULL;
3420 	}
3421 	*cache_resource = *resource;
3422 	cache_resource->idx = idx;
3423 	ret = mlx5_flow_os_create_flow_action_packet_reformat
3424 					(sh->ctx, domain, cache_resource,
3425 					 &cache_resource->action);
3426 	if (ret) {
3427 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3428 		rte_flow_error_set(ctx->error, ENOMEM,
3429 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3430 				   NULL, "cannot create action");
3431 		return NULL;
3432 	}
3433 
3434 	return &cache_resource->entry;
3435 }
3436 
3437 /**
3438  * Find existing encap/decap resource or create and register a new one.
3439  *
3440  * @param[in, out] dev
3441  *   Pointer to rte_eth_dev structure.
3442  * @param[in, out] resource
3443  *   Pointer to encap/decap resource.
3444  * @parm[in, out] dev_flow
3445  *   Pointer to the dev_flow.
3446  * @param[out] error
3447  *   pointer to error structure.
3448  *
3449  * @return
3450  *   0 on success otherwise -errno and errno is set.
3451  */
3452 static int
3453 flow_dv_encap_decap_resource_register
3454 			(struct rte_eth_dev *dev,
3455 			 struct mlx5_flow_dv_encap_decap_resource *resource,
3456 			 struct mlx5_flow *dev_flow,
3457 			 struct rte_flow_error *error)
3458 {
3459 	struct mlx5_priv *priv = dev->data->dev_private;
3460 	struct mlx5_dev_ctx_shared *sh = priv->sh;
3461 	struct mlx5_hlist_entry *entry;
3462 	union {
3463 		struct {
3464 			uint32_t ft_type:8;
3465 			uint32_t refmt_type:8;
3466 			/*
3467 			 * Header reformat actions can be shared between
3468 			 * non-root tables. One bit to indicate non-root
3469 			 * table or not.
3470 			 */
3471 			uint32_t is_root:1;
3472 			uint32_t reserve:15;
3473 		};
3474 		uint32_t v32;
3475 	} encap_decap_key = {
3476 		{
3477 			.ft_type = resource->ft_type,
3478 			.refmt_type = resource->reformat_type,
3479 			.is_root = !!dev_flow->dv.group,
3480 			.reserve = 0,
3481 		}
3482 	};
3483 	struct mlx5_flow_cb_ctx ctx = {
3484 		.error = error,
3485 		.data = resource,
3486 	};
3487 	uint64_t key64;
3488 
3489 	resource->flags = dev_flow->dv.group ? 0 : 1;
3490 	key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3491 				 sizeof(encap_decap_key.v32), 0);
3492 	if (resource->reformat_type !=
3493 	    MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3494 	    resource->size)
3495 		key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3496 	entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3497 	if (!entry)
3498 		return -rte_errno;
3499 	resource = container_of(entry, typeof(*resource), entry);
3500 	dev_flow->dv.encap_decap = resource;
3501 	dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3502 	return 0;
3503 }
3504 
3505 /**
3506  * Find existing table jump resource or create and register a new one.
3507  *
3508  * @param[in, out] dev
3509  *   Pointer to rte_eth_dev structure.
3510  * @param[in, out] tbl
3511  *   Pointer to flow table resource.
3512  * @parm[in, out] dev_flow
3513  *   Pointer to the dev_flow.
3514  * @param[out] error
3515  *   pointer to error structure.
3516  *
3517  * @return
3518  *   0 on success otherwise -errno and errno is set.
3519  */
3520 static int
3521 flow_dv_jump_tbl_resource_register
3522 			(struct rte_eth_dev *dev __rte_unused,
3523 			 struct mlx5_flow_tbl_resource *tbl,
3524 			 struct mlx5_flow *dev_flow,
3525 			 struct rte_flow_error *error __rte_unused)
3526 {
3527 	struct mlx5_flow_tbl_data_entry *tbl_data =
3528 		container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3529 
3530 	MLX5_ASSERT(tbl);
3531 	MLX5_ASSERT(tbl_data->jump.action);
3532 	dev_flow->handle->rix_jump = tbl_data->idx;
3533 	dev_flow->dv.jump = &tbl_data->jump;
3534 	return 0;
3535 }
3536 
3537 int
3538 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3539 			 struct mlx5_cache_entry *entry, void *cb_ctx)
3540 {
3541 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3542 	struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3543 	struct mlx5_flow_dv_port_id_action_resource *res =
3544 			container_of(entry, typeof(*res), entry);
3545 
3546 	return ref->port_id != res->port_id;
3547 }
3548 
3549 struct mlx5_cache_entry *
3550 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3551 			  struct mlx5_cache_entry *entry __rte_unused,
3552 			  void *cb_ctx)
3553 {
3554 	struct mlx5_dev_ctx_shared *sh = list->ctx;
3555 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3556 	struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3557 	struct mlx5_flow_dv_port_id_action_resource *cache;
3558 	uint32_t idx;
3559 	int ret;
3560 
3561 	/* Register new port id action resource. */
3562 	cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3563 	if (!cache) {
3564 		rte_flow_error_set(ctx->error, ENOMEM,
3565 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3566 				   "cannot allocate port_id action cache memory");
3567 		return NULL;
3568 	}
3569 	*cache = *ref;
3570 	ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3571 							ref->port_id,
3572 							&cache->action);
3573 	if (ret) {
3574 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3575 		rte_flow_error_set(ctx->error, ENOMEM,
3576 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3577 				   "cannot create action");
3578 		return NULL;
3579 	}
3580 	cache->idx = idx;
3581 	return &cache->entry;
3582 }
3583 
3584 /**
3585  * Find existing table port ID resource or create and register a new one.
3586  *
3587  * @param[in, out] dev
3588  *   Pointer to rte_eth_dev structure.
3589  * @param[in, out] resource
3590  *   Pointer to port ID action resource.
3591  * @parm[in, out] dev_flow
3592  *   Pointer to the dev_flow.
3593  * @param[out] error
3594  *   pointer to error structure.
3595  *
3596  * @return
3597  *   0 on success otherwise -errno and errno is set.
3598  */
3599 static int
3600 flow_dv_port_id_action_resource_register
3601 			(struct rte_eth_dev *dev,
3602 			 struct mlx5_flow_dv_port_id_action_resource *resource,
3603 			 struct mlx5_flow *dev_flow,
3604 			 struct rte_flow_error *error)
3605 {
3606 	struct mlx5_priv *priv = dev->data->dev_private;
3607 	struct mlx5_cache_entry *entry;
3608 	struct mlx5_flow_dv_port_id_action_resource *cache;
3609 	struct mlx5_flow_cb_ctx ctx = {
3610 		.error = error,
3611 		.data = resource,
3612 	};
3613 
3614 	entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3615 	if (!entry)
3616 		return -rte_errno;
3617 	cache = container_of(entry, typeof(*cache), entry);
3618 	dev_flow->dv.port_id_action = cache;
3619 	dev_flow->handle->rix_port_id_action = cache->idx;
3620 	return 0;
3621 }
3622 
3623 int
3624 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3625 			 struct mlx5_cache_entry *entry, void *cb_ctx)
3626 {
3627 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3628 	struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3629 	struct mlx5_flow_dv_push_vlan_action_resource *res =
3630 			container_of(entry, typeof(*res), entry);
3631 
3632 	return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3633 }
3634 
3635 struct mlx5_cache_entry *
3636 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3637 			  struct mlx5_cache_entry *entry __rte_unused,
3638 			  void *cb_ctx)
3639 {
3640 	struct mlx5_dev_ctx_shared *sh = list->ctx;
3641 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3642 	struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3643 	struct mlx5_flow_dv_push_vlan_action_resource *cache;
3644 	struct mlx5dv_dr_domain *domain;
3645 	uint32_t idx;
3646 	int ret;
3647 
3648 	/* Register new port id action resource. */
3649 	cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3650 	if (!cache) {
3651 		rte_flow_error_set(ctx->error, ENOMEM,
3652 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3653 				   "cannot allocate push_vlan action cache memory");
3654 		return NULL;
3655 	}
3656 	*cache = *ref;
3657 	if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3658 		domain = sh->fdb_domain;
3659 	else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3660 		domain = sh->rx_domain;
3661 	else
3662 		domain = sh->tx_domain;
3663 	ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3664 							&cache->action);
3665 	if (ret) {
3666 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3667 		rte_flow_error_set(ctx->error, ENOMEM,
3668 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3669 				   "cannot create push vlan action");
3670 		return NULL;
3671 	}
3672 	cache->idx = idx;
3673 	return &cache->entry;
3674 }
3675 
3676 /**
3677  * Find existing push vlan resource or create and register a new one.
3678  *
3679  * @param [in, out] dev
3680  *   Pointer to rte_eth_dev structure.
3681  * @param[in, out] resource
3682  *   Pointer to port ID action resource.
3683  * @parm[in, out] dev_flow
3684  *   Pointer to the dev_flow.
3685  * @param[out] error
3686  *   pointer to error structure.
3687  *
3688  * @return
3689  *   0 on success otherwise -errno and errno is set.
3690  */
3691 static int
3692 flow_dv_push_vlan_action_resource_register
3693 		       (struct rte_eth_dev *dev,
3694 			struct mlx5_flow_dv_push_vlan_action_resource *resource,
3695 			struct mlx5_flow *dev_flow,
3696 			struct rte_flow_error *error)
3697 {
3698 	struct mlx5_priv *priv = dev->data->dev_private;
3699 	struct mlx5_flow_dv_push_vlan_action_resource *cache;
3700 	struct mlx5_cache_entry *entry;
3701 	struct mlx5_flow_cb_ctx ctx = {
3702 		.error = error,
3703 		.data = resource,
3704 	};
3705 
3706 	entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3707 	if (!entry)
3708 		return -rte_errno;
3709 	cache = container_of(entry, typeof(*cache), entry);
3710 
3711 	dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3712 	dev_flow->dv.push_vlan_res = cache;
3713 	return 0;
3714 }
3715 
3716 /**
3717  * Get the size of specific rte_flow_item_type hdr size
3718  *
3719  * @param[in] item_type
3720  *   Tested rte_flow_item_type.
3721  *
3722  * @return
3723  *   sizeof struct item_type, 0 if void or irrelevant.
3724  */
3725 static size_t
3726 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3727 {
3728 	size_t retval;
3729 
3730 	switch (item_type) {
3731 	case RTE_FLOW_ITEM_TYPE_ETH:
3732 		retval = sizeof(struct rte_ether_hdr);
3733 		break;
3734 	case RTE_FLOW_ITEM_TYPE_VLAN:
3735 		retval = sizeof(struct rte_vlan_hdr);
3736 		break;
3737 	case RTE_FLOW_ITEM_TYPE_IPV4:
3738 		retval = sizeof(struct rte_ipv4_hdr);
3739 		break;
3740 	case RTE_FLOW_ITEM_TYPE_IPV6:
3741 		retval = sizeof(struct rte_ipv6_hdr);
3742 		break;
3743 	case RTE_FLOW_ITEM_TYPE_UDP:
3744 		retval = sizeof(struct rte_udp_hdr);
3745 		break;
3746 	case RTE_FLOW_ITEM_TYPE_TCP:
3747 		retval = sizeof(struct rte_tcp_hdr);
3748 		break;
3749 	case RTE_FLOW_ITEM_TYPE_VXLAN:
3750 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3751 		retval = sizeof(struct rte_vxlan_hdr);
3752 		break;
3753 	case RTE_FLOW_ITEM_TYPE_GRE:
3754 	case RTE_FLOW_ITEM_TYPE_NVGRE:
3755 		retval = sizeof(struct rte_gre_hdr);
3756 		break;
3757 	case RTE_FLOW_ITEM_TYPE_MPLS:
3758 		retval = sizeof(struct rte_mpls_hdr);
3759 		break;
3760 	case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3761 	default:
3762 		retval = 0;
3763 		break;
3764 	}
3765 	return retval;
3766 }
3767 
3768 #define MLX5_ENCAP_IPV4_VERSION		0x40
3769 #define MLX5_ENCAP_IPV4_IHL_MIN		0x05
3770 #define MLX5_ENCAP_IPV4_TTL_DEF		0x40
3771 #define MLX5_ENCAP_IPV6_VTC_FLOW	0x60000000
3772 #define MLX5_ENCAP_IPV6_HOP_LIMIT	0xff
3773 #define MLX5_ENCAP_VXLAN_FLAGS		0x08000000
3774 #define MLX5_ENCAP_VXLAN_GPE_FLAGS	0x04
3775 
3776 /**
3777  * Convert the encap action data from list of rte_flow_item to raw buffer
3778  *
3779  * @param[in] items
3780  *   Pointer to rte_flow_item objects list.
3781  * @param[out] buf
3782  *   Pointer to the output buffer.
3783  * @param[out] size
3784  *   Pointer to the output buffer size.
3785  * @param[out] error
3786  *   Pointer to the error structure.
3787  *
3788  * @return
3789  *   0 on success, a negative errno value otherwise and rte_errno is set.
3790  */
3791 static int
3792 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3793 			   size_t *size, struct rte_flow_error *error)
3794 {
3795 	struct rte_ether_hdr *eth = NULL;
3796 	struct rte_vlan_hdr *vlan = NULL;
3797 	struct rte_ipv4_hdr *ipv4 = NULL;
3798 	struct rte_ipv6_hdr *ipv6 = NULL;
3799 	struct rte_udp_hdr *udp = NULL;
3800 	struct rte_vxlan_hdr *vxlan = NULL;
3801 	struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3802 	struct rte_gre_hdr *gre = NULL;
3803 	size_t len;
3804 	size_t temp_size = 0;
3805 
3806 	if (!items)
3807 		return rte_flow_error_set(error, EINVAL,
3808 					  RTE_FLOW_ERROR_TYPE_ACTION,
3809 					  NULL, "invalid empty data");
3810 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3811 		len = flow_dv_get_item_hdr_len(items->type);
3812 		if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3813 			return rte_flow_error_set(error, EINVAL,
3814 						  RTE_FLOW_ERROR_TYPE_ACTION,
3815 						  (void *)items->type,
3816 						  "items total size is too big"
3817 						  " for encap action");
3818 		rte_memcpy((void *)&buf[temp_size], items->spec, len);
3819 		switch (items->type) {
3820 		case RTE_FLOW_ITEM_TYPE_ETH:
3821 			eth = (struct rte_ether_hdr *)&buf[temp_size];
3822 			break;
3823 		case RTE_FLOW_ITEM_TYPE_VLAN:
3824 			vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3825 			if (!eth)
3826 				return rte_flow_error_set(error, EINVAL,
3827 						RTE_FLOW_ERROR_TYPE_ACTION,
3828 						(void *)items->type,
3829 						"eth header not found");
3830 			if (!eth->ether_type)
3831 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3832 			break;
3833 		case RTE_FLOW_ITEM_TYPE_IPV4:
3834 			ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3835 			if (!vlan && !eth)
3836 				return rte_flow_error_set(error, EINVAL,
3837 						RTE_FLOW_ERROR_TYPE_ACTION,
3838 						(void *)items->type,
3839 						"neither eth nor vlan"
3840 						" header found");
3841 			if (vlan && !vlan->eth_proto)
3842 				vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3843 			else if (eth && !eth->ether_type)
3844 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3845 			if (!ipv4->version_ihl)
3846 				ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3847 						    MLX5_ENCAP_IPV4_IHL_MIN;
3848 			if (!ipv4->time_to_live)
3849 				ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3850 			break;
3851 		case RTE_FLOW_ITEM_TYPE_IPV6:
3852 			ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3853 			if (!vlan && !eth)
3854 				return rte_flow_error_set(error, EINVAL,
3855 						RTE_FLOW_ERROR_TYPE_ACTION,
3856 						(void *)items->type,
3857 						"neither eth nor vlan"
3858 						" header found");
3859 			if (vlan && !vlan->eth_proto)
3860 				vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3861 			else if (eth && !eth->ether_type)
3862 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3863 			if (!ipv6->vtc_flow)
3864 				ipv6->vtc_flow =
3865 					RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3866 			if (!ipv6->hop_limits)
3867 				ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3868 			break;
3869 		case RTE_FLOW_ITEM_TYPE_UDP:
3870 			udp = (struct rte_udp_hdr *)&buf[temp_size];
3871 			if (!ipv4 && !ipv6)
3872 				return rte_flow_error_set(error, EINVAL,
3873 						RTE_FLOW_ERROR_TYPE_ACTION,
3874 						(void *)items->type,
3875 						"ip header not found");
3876 			if (ipv4 && !ipv4->next_proto_id)
3877 				ipv4->next_proto_id = IPPROTO_UDP;
3878 			else if (ipv6 && !ipv6->proto)
3879 				ipv6->proto = IPPROTO_UDP;
3880 			break;
3881 		case RTE_FLOW_ITEM_TYPE_VXLAN:
3882 			vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3883 			if (!udp)
3884 				return rte_flow_error_set(error, EINVAL,
3885 						RTE_FLOW_ERROR_TYPE_ACTION,
3886 						(void *)items->type,
3887 						"udp header not found");
3888 			if (!udp->dst_port)
3889 				udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3890 			if (!vxlan->vx_flags)
3891 				vxlan->vx_flags =
3892 					RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3893 			break;
3894 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3895 			vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3896 			if (!udp)
3897 				return rte_flow_error_set(error, EINVAL,
3898 						RTE_FLOW_ERROR_TYPE_ACTION,
3899 						(void *)items->type,
3900 						"udp header not found");
3901 			if (!vxlan_gpe->proto)
3902 				return rte_flow_error_set(error, EINVAL,
3903 						RTE_FLOW_ERROR_TYPE_ACTION,
3904 						(void *)items->type,
3905 						"next protocol not found");
3906 			if (!udp->dst_port)
3907 				udp->dst_port =
3908 					RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3909 			if (!vxlan_gpe->vx_flags)
3910 				vxlan_gpe->vx_flags =
3911 						MLX5_ENCAP_VXLAN_GPE_FLAGS;
3912 			break;
3913 		case RTE_FLOW_ITEM_TYPE_GRE:
3914 		case RTE_FLOW_ITEM_TYPE_NVGRE:
3915 			gre = (struct rte_gre_hdr *)&buf[temp_size];
3916 			if (!gre->proto)
3917 				return rte_flow_error_set(error, EINVAL,
3918 						RTE_FLOW_ERROR_TYPE_ACTION,
3919 						(void *)items->type,
3920 						"next protocol not found");
3921 			if (!ipv4 && !ipv6)
3922 				return rte_flow_error_set(error, EINVAL,
3923 						RTE_FLOW_ERROR_TYPE_ACTION,
3924 						(void *)items->type,
3925 						"ip header not found");
3926 			if (ipv4 && !ipv4->next_proto_id)
3927 				ipv4->next_proto_id = IPPROTO_GRE;
3928 			else if (ipv6 && !ipv6->proto)
3929 				ipv6->proto = IPPROTO_GRE;
3930 			break;
3931 		case RTE_FLOW_ITEM_TYPE_VOID:
3932 			break;
3933 		default:
3934 			return rte_flow_error_set(error, EINVAL,
3935 						  RTE_FLOW_ERROR_TYPE_ACTION,
3936 						  (void *)items->type,
3937 						  "unsupported item type");
3938 			break;
3939 		}
3940 		temp_size += len;
3941 	}
3942 	*size = temp_size;
3943 	return 0;
3944 }
3945 
3946 static int
3947 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3948 {
3949 	struct rte_ether_hdr *eth = NULL;
3950 	struct rte_vlan_hdr *vlan = NULL;
3951 	struct rte_ipv6_hdr *ipv6 = NULL;
3952 	struct rte_udp_hdr *udp = NULL;
3953 	char *next_hdr;
3954 	uint16_t proto;
3955 
3956 	eth = (struct rte_ether_hdr *)data;
3957 	next_hdr = (char *)(eth + 1);
3958 	proto = RTE_BE16(eth->ether_type);
3959 
3960 	/* VLAN skipping */
3961 	while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3962 		vlan = (struct rte_vlan_hdr *)next_hdr;
3963 		proto = RTE_BE16(vlan->eth_proto);
3964 		next_hdr += sizeof(struct rte_vlan_hdr);
3965 	}
3966 
3967 	/* HW calculates IPv4 csum. no need to proceed */
3968 	if (proto == RTE_ETHER_TYPE_IPV4)
3969 		return 0;
3970 
3971 	/* non IPv4/IPv6 header. not supported */
3972 	if (proto != RTE_ETHER_TYPE_IPV6) {
3973 		return rte_flow_error_set(error, ENOTSUP,
3974 					  RTE_FLOW_ERROR_TYPE_ACTION,
3975 					  NULL, "Cannot offload non IPv4/IPv6");
3976 	}
3977 
3978 	ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3979 
3980 	/* ignore non UDP */
3981 	if (ipv6->proto != IPPROTO_UDP)
3982 		return 0;
3983 
3984 	udp = (struct rte_udp_hdr *)(ipv6 + 1);
3985 	udp->dgram_cksum = 0;
3986 
3987 	return 0;
3988 }
3989 
3990 /**
3991  * Convert L2 encap action to DV specification.
3992  *
3993  * @param[in] dev
3994  *   Pointer to rte_eth_dev structure.
3995  * @param[in] action
3996  *   Pointer to action structure.
3997  * @param[in, out] dev_flow
3998  *   Pointer to the mlx5_flow.
3999  * @param[in] transfer
4000  *   Mark if the flow is E-Switch flow.
4001  * @param[out] error
4002  *   Pointer to the error structure.
4003  *
4004  * @return
4005  *   0 on success, a negative errno value otherwise and rte_errno is set.
4006  */
4007 static int
4008 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4009 			       const struct rte_flow_action *action,
4010 			       struct mlx5_flow *dev_flow,
4011 			       uint8_t transfer,
4012 			       struct rte_flow_error *error)
4013 {
4014 	const struct rte_flow_item *encap_data;
4015 	const struct rte_flow_action_raw_encap *raw_encap_data;
4016 	struct mlx5_flow_dv_encap_decap_resource res = {
4017 		.reformat_type =
4018 			MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4019 		.ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4020 				      MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4021 	};
4022 
4023 	if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4024 		raw_encap_data =
4025 			(const struct rte_flow_action_raw_encap *)action->conf;
4026 		res.size = raw_encap_data->size;
4027 		memcpy(res.buf, raw_encap_data->data, res.size);
4028 	} else {
4029 		if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4030 			encap_data =
4031 				((const struct rte_flow_action_vxlan_encap *)
4032 						action->conf)->definition;
4033 		else
4034 			encap_data =
4035 				((const struct rte_flow_action_nvgre_encap *)
4036 						action->conf)->definition;
4037 		if (flow_dv_convert_encap_data(encap_data, res.buf,
4038 					       &res.size, error))
4039 			return -rte_errno;
4040 	}
4041 	if (flow_dv_zero_encap_udp_csum(res.buf, error))
4042 		return -rte_errno;
4043 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4044 		return rte_flow_error_set(error, EINVAL,
4045 					  RTE_FLOW_ERROR_TYPE_ACTION,
4046 					  NULL, "can't create L2 encap action");
4047 	return 0;
4048 }
4049 
4050 /**
4051  * Convert L2 decap action to DV specification.
4052  *
4053  * @param[in] dev
4054  *   Pointer to rte_eth_dev structure.
4055  * @param[in, out] dev_flow
4056  *   Pointer to the mlx5_flow.
4057  * @param[in] transfer
4058  *   Mark if the flow is E-Switch flow.
4059  * @param[out] error
4060  *   Pointer to the error structure.
4061  *
4062  * @return
4063  *   0 on success, a negative errno value otherwise and rte_errno is set.
4064  */
4065 static int
4066 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4067 			       struct mlx5_flow *dev_flow,
4068 			       uint8_t transfer,
4069 			       struct rte_flow_error *error)
4070 {
4071 	struct mlx5_flow_dv_encap_decap_resource res = {
4072 		.size = 0,
4073 		.reformat_type =
4074 			MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4075 		.ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4076 				      MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4077 	};
4078 
4079 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4080 		return rte_flow_error_set(error, EINVAL,
4081 					  RTE_FLOW_ERROR_TYPE_ACTION,
4082 					  NULL, "can't create L2 decap action");
4083 	return 0;
4084 }
4085 
4086 /**
4087  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4088  *
4089  * @param[in] dev
4090  *   Pointer to rte_eth_dev structure.
4091  * @param[in] action
4092  *   Pointer to action structure.
4093  * @param[in, out] dev_flow
4094  *   Pointer to the mlx5_flow.
4095  * @param[in] attr
4096  *   Pointer to the flow attributes.
4097  * @param[out] error
4098  *   Pointer to the error structure.
4099  *
4100  * @return
4101  *   0 on success, a negative errno value otherwise and rte_errno is set.
4102  */
4103 static int
4104 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4105 				const struct rte_flow_action *action,
4106 				struct mlx5_flow *dev_flow,
4107 				const struct rte_flow_attr *attr,
4108 				struct rte_flow_error *error)
4109 {
4110 	const struct rte_flow_action_raw_encap *encap_data;
4111 	struct mlx5_flow_dv_encap_decap_resource res;
4112 
4113 	memset(&res, 0, sizeof(res));
4114 	encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4115 	res.size = encap_data->size;
4116 	memcpy(res.buf, encap_data->data, res.size);
4117 	res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4118 		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4119 		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4120 	if (attr->transfer)
4121 		res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4122 	else
4123 		res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4124 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4125 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4126 		return rte_flow_error_set(error, EINVAL,
4127 					  RTE_FLOW_ERROR_TYPE_ACTION,
4128 					  NULL, "can't create encap action");
4129 	return 0;
4130 }
4131 
4132 /**
4133  * Create action push VLAN.
4134  *
4135  * @param[in] dev
4136  *   Pointer to rte_eth_dev structure.
4137  * @param[in] attr
4138  *   Pointer to the flow attributes.
4139  * @param[in] vlan
4140  *   Pointer to the vlan to push to the Ethernet header.
4141  * @param[in, out] dev_flow
4142  *   Pointer to the mlx5_flow.
4143  * @param[out] error
4144  *   Pointer to the error structure.
4145  *
4146  * @return
4147  *   0 on success, a negative errno value otherwise and rte_errno is set.
4148  */
4149 static int
4150 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4151 				const struct rte_flow_attr *attr,
4152 				const struct rte_vlan_hdr *vlan,
4153 				struct mlx5_flow *dev_flow,
4154 				struct rte_flow_error *error)
4155 {
4156 	struct mlx5_flow_dv_push_vlan_action_resource res;
4157 
4158 	memset(&res, 0, sizeof(res));
4159 	res.vlan_tag =
4160 		rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4161 				 vlan->vlan_tci);
4162 	if (attr->transfer)
4163 		res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4164 	else
4165 		res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4166 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4167 	return flow_dv_push_vlan_action_resource_register
4168 					    (dev, &res, dev_flow, error);
4169 }
4170 
4171 /**
4172  * Validate the modify-header actions.
4173  *
4174  * @param[in] action_flags
4175  *   Holds the actions detected until now.
4176  * @param[in] action
4177  *   Pointer to the modify action.
4178  * @param[out] error
4179  *   Pointer to error structure.
4180  *
4181  * @return
4182  *   0 on success, a negative errno value otherwise and rte_errno is set.
4183  */
4184 static int
4185 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4186 				   const struct rte_flow_action *action,
4187 				   struct rte_flow_error *error)
4188 {
4189 	if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4190 		return rte_flow_error_set(error, EINVAL,
4191 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4192 					  NULL, "action configuration not set");
4193 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4194 		return rte_flow_error_set(error, EINVAL,
4195 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4196 					  "can't have encap action before"
4197 					  " modify action");
4198 	return 0;
4199 }
4200 
4201 /**
4202  * Validate the modify-header MAC address actions.
4203  *
4204  * @param[in] action_flags
4205  *   Holds the actions detected until now.
4206  * @param[in] action
4207  *   Pointer to the modify action.
4208  * @param[in] item_flags
4209  *   Holds the items detected.
4210  * @param[out] error
4211  *   Pointer to error structure.
4212  *
4213  * @return
4214  *   0 on success, a negative errno value otherwise and rte_errno is set.
4215  */
4216 static int
4217 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4218 				   const struct rte_flow_action *action,
4219 				   const uint64_t item_flags,
4220 				   struct rte_flow_error *error)
4221 {
4222 	int ret = 0;
4223 
4224 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4225 	if (!ret) {
4226 		if (!(item_flags & MLX5_FLOW_LAYER_L2))
4227 			return rte_flow_error_set(error, EINVAL,
4228 						  RTE_FLOW_ERROR_TYPE_ACTION,
4229 						  NULL,
4230 						  "no L2 item in pattern");
4231 	}
4232 	return ret;
4233 }
4234 
4235 /**
4236  * Validate the modify-header IPv4 address actions.
4237  *
4238  * @param[in] action_flags
4239  *   Holds the actions detected until now.
4240  * @param[in] action
4241  *   Pointer to the modify action.
4242  * @param[in] item_flags
4243  *   Holds the items detected.
4244  * @param[out] error
4245  *   Pointer to error structure.
4246  *
4247  * @return
4248  *   0 on success, a negative errno value otherwise and rte_errno is set.
4249  */
4250 static int
4251 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4252 				    const struct rte_flow_action *action,
4253 				    const uint64_t item_flags,
4254 				    struct rte_flow_error *error)
4255 {
4256 	int ret = 0;
4257 	uint64_t layer;
4258 
4259 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4260 	if (!ret) {
4261 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4262 				 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4263 				 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4264 		if (!(item_flags & layer))
4265 			return rte_flow_error_set(error, EINVAL,
4266 						  RTE_FLOW_ERROR_TYPE_ACTION,
4267 						  NULL,
4268 						  "no ipv4 item in pattern");
4269 	}
4270 	return ret;
4271 }
4272 
4273 /**
4274  * Validate the modify-header IPv6 address actions.
4275  *
4276  * @param[in] action_flags
4277  *   Holds the actions detected until now.
4278  * @param[in] action
4279  *   Pointer to the modify action.
4280  * @param[in] item_flags
4281  *   Holds the items detected.
4282  * @param[out] error
4283  *   Pointer to error structure.
4284  *
4285  * @return
4286  *   0 on success, a negative errno value otherwise and rte_errno is set.
4287  */
4288 static int
4289 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4290 				    const struct rte_flow_action *action,
4291 				    const uint64_t item_flags,
4292 				    struct rte_flow_error *error)
4293 {
4294 	int ret = 0;
4295 	uint64_t layer;
4296 
4297 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4298 	if (!ret) {
4299 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4300 				 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4301 				 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4302 		if (!(item_flags & layer))
4303 			return rte_flow_error_set(error, EINVAL,
4304 						  RTE_FLOW_ERROR_TYPE_ACTION,
4305 						  NULL,
4306 						  "no ipv6 item in pattern");
4307 	}
4308 	return ret;
4309 }
4310 
4311 /**
4312  * Validate the modify-header TP actions.
4313  *
4314  * @param[in] action_flags
4315  *   Holds the actions detected until now.
4316  * @param[in] action
4317  *   Pointer to the modify action.
4318  * @param[in] item_flags
4319  *   Holds the items detected.
4320  * @param[out] error
4321  *   Pointer to error structure.
4322  *
4323  * @return
4324  *   0 on success, a negative errno value otherwise and rte_errno is set.
4325  */
4326 static int
4327 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4328 				  const struct rte_flow_action *action,
4329 				  const uint64_t item_flags,
4330 				  struct rte_flow_error *error)
4331 {
4332 	int ret = 0;
4333 	uint64_t layer;
4334 
4335 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4336 	if (!ret) {
4337 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4338 				 MLX5_FLOW_LAYER_INNER_L4 :
4339 				 MLX5_FLOW_LAYER_OUTER_L4;
4340 		if (!(item_flags & layer))
4341 			return rte_flow_error_set(error, EINVAL,
4342 						  RTE_FLOW_ERROR_TYPE_ACTION,
4343 						  NULL, "no transport layer "
4344 						  "in pattern");
4345 	}
4346 	return ret;
4347 }
4348 
4349 /**
4350  * Validate the modify-header actions of increment/decrement
4351  * TCP Sequence-number.
4352  *
4353  * @param[in] action_flags
4354  *   Holds the actions detected until now.
4355  * @param[in] action
4356  *   Pointer to the modify action.
4357  * @param[in] item_flags
4358  *   Holds the items detected.
4359  * @param[out] error
4360  *   Pointer to error structure.
4361  *
4362  * @return
4363  *   0 on success, a negative errno value otherwise and rte_errno is set.
4364  */
4365 static int
4366 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4367 				       const struct rte_flow_action *action,
4368 				       const uint64_t item_flags,
4369 				       struct rte_flow_error *error)
4370 {
4371 	int ret = 0;
4372 	uint64_t layer;
4373 
4374 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4375 	if (!ret) {
4376 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4377 				 MLX5_FLOW_LAYER_INNER_L4_TCP :
4378 				 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4379 		if (!(item_flags & layer))
4380 			return rte_flow_error_set(error, EINVAL,
4381 						  RTE_FLOW_ERROR_TYPE_ACTION,
4382 						  NULL, "no TCP item in"
4383 						  " pattern");
4384 		if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4385 			(action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4386 		    (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4387 			(action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4388 			return rte_flow_error_set(error, EINVAL,
4389 						  RTE_FLOW_ERROR_TYPE_ACTION,
4390 						  NULL,
4391 						  "cannot decrease and increase"
4392 						  " TCP sequence number"
4393 						  " at the same time");
4394 	}
4395 	return ret;
4396 }
4397 
4398 /**
4399  * Validate the modify-header actions of increment/decrement
4400  * TCP Acknowledgment number.
4401  *
4402  * @param[in] action_flags
4403  *   Holds the actions detected until now.
4404  * @param[in] action
4405  *   Pointer to the modify action.
4406  * @param[in] item_flags
4407  *   Holds the items detected.
4408  * @param[out] error
4409  *   Pointer to error structure.
4410  *
4411  * @return
4412  *   0 on success, a negative errno value otherwise and rte_errno is set.
4413  */
4414 static int
4415 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4416 				       const struct rte_flow_action *action,
4417 				       const uint64_t item_flags,
4418 				       struct rte_flow_error *error)
4419 {
4420 	int ret = 0;
4421 	uint64_t layer;
4422 
4423 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4424 	if (!ret) {
4425 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4426 				 MLX5_FLOW_LAYER_INNER_L4_TCP :
4427 				 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4428 		if (!(item_flags & layer))
4429 			return rte_flow_error_set(error, EINVAL,
4430 						  RTE_FLOW_ERROR_TYPE_ACTION,
4431 						  NULL, "no TCP item in"
4432 						  " pattern");
4433 		if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4434 			(action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4435 		    (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4436 			(action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4437 			return rte_flow_error_set(error, EINVAL,
4438 						  RTE_FLOW_ERROR_TYPE_ACTION,
4439 						  NULL,
4440 						  "cannot decrease and increase"
4441 						  " TCP acknowledgment number"
4442 						  " at the same time");
4443 	}
4444 	return ret;
4445 }
4446 
4447 /**
4448  * Validate the modify-header TTL actions.
4449  *
4450  * @param[in] action_flags
4451  *   Holds the actions detected until now.
4452  * @param[in] action
4453  *   Pointer to the modify action.
4454  * @param[in] item_flags
4455  *   Holds the items detected.
4456  * @param[out] error
4457  *   Pointer to error structure.
4458  *
4459  * @return
4460  *   0 on success, a negative errno value otherwise and rte_errno is set.
4461  */
4462 static int
4463 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4464 				   const struct rte_flow_action *action,
4465 				   const uint64_t item_flags,
4466 				   struct rte_flow_error *error)
4467 {
4468 	int ret = 0;
4469 	uint64_t layer;
4470 
4471 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4472 	if (!ret) {
4473 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4474 				 MLX5_FLOW_LAYER_INNER_L3 :
4475 				 MLX5_FLOW_LAYER_OUTER_L3;
4476 		if (!(item_flags & layer))
4477 			return rte_flow_error_set(error, EINVAL,
4478 						  RTE_FLOW_ERROR_TYPE_ACTION,
4479 						  NULL,
4480 						  "no IP protocol in pattern");
4481 	}
4482 	return ret;
4483 }
4484 
4485 static int
4486 mlx5_flow_item_field_width(enum rte_flow_field_id field)
4487 {
4488 	switch (field) {
4489 	case RTE_FLOW_FIELD_START:
4490 		return 32;
4491 	case RTE_FLOW_FIELD_MAC_DST:
4492 	case RTE_FLOW_FIELD_MAC_SRC:
4493 		return 48;
4494 	case RTE_FLOW_FIELD_VLAN_TYPE:
4495 		return 16;
4496 	case RTE_FLOW_FIELD_VLAN_ID:
4497 		return 12;
4498 	case RTE_FLOW_FIELD_MAC_TYPE:
4499 		return 16;
4500 	case RTE_FLOW_FIELD_IPV4_DSCP:
4501 		return 6;
4502 	case RTE_FLOW_FIELD_IPV4_TTL:
4503 		return 8;
4504 	case RTE_FLOW_FIELD_IPV4_SRC:
4505 	case RTE_FLOW_FIELD_IPV4_DST:
4506 		return 32;
4507 	case RTE_FLOW_FIELD_IPV6_DSCP:
4508 		return 6;
4509 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
4510 		return 8;
4511 	case RTE_FLOW_FIELD_IPV6_SRC:
4512 	case RTE_FLOW_FIELD_IPV6_DST:
4513 		return 128;
4514 	case RTE_FLOW_FIELD_TCP_PORT_SRC:
4515 	case RTE_FLOW_FIELD_TCP_PORT_DST:
4516 		return 16;
4517 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
4518 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
4519 		return 32;
4520 	case RTE_FLOW_FIELD_TCP_FLAGS:
4521 		return 6;
4522 	case RTE_FLOW_FIELD_UDP_PORT_SRC:
4523 	case RTE_FLOW_FIELD_UDP_PORT_DST:
4524 		return 16;
4525 	case RTE_FLOW_FIELD_VXLAN_VNI:
4526 	case RTE_FLOW_FIELD_GENEVE_VNI:
4527 		return 24;
4528 	case RTE_FLOW_FIELD_GTP_TEID:
4529 	case RTE_FLOW_FIELD_TAG:
4530 		return 32;
4531 	case RTE_FLOW_FIELD_MARK:
4532 		return 24;
4533 	case RTE_FLOW_FIELD_META:
4534 	case RTE_FLOW_FIELD_POINTER:
4535 	case RTE_FLOW_FIELD_VALUE:
4536 		return 32;
4537 	default:
4538 		MLX5_ASSERT(false);
4539 	}
4540 	return 0;
4541 }
4542 
4543 /**
4544  * Validate the generic modify field actions.
4545  * @param[in] dev
4546  *   Pointer to the rte_eth_dev structure.
4547  * @param[in] action_flags
4548  *   Holds the actions detected until now.
4549  * @param[in] action
4550  *   Pointer to the modify action.
4551  * @param[in] attr
4552  *   Pointer to the flow attributes.
4553  * @param[out] error
4554  *   Pointer to error structure.
4555  *
4556  * @return
4557  *   Number of header fields to modify (0 or more) on success,
4558  *   a negative errno value otherwise and rte_errno is set.
4559  */
4560 static int
4561 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4562 				   const uint64_t action_flags,
4563 				   const struct rte_flow_action *action,
4564 				   const struct rte_flow_attr *attr,
4565 				   struct rte_flow_error *error)
4566 {
4567 	int ret = 0;
4568 	struct mlx5_priv *priv = dev->data->dev_private;
4569 	struct mlx5_dev_config *config = &priv->config;
4570 	const struct rte_flow_action_modify_field *action_modify_field =
4571 		action->conf;
4572 	uint32_t dst_width =
4573 		mlx5_flow_item_field_width(action_modify_field->dst.field);
4574 	uint32_t src_width =
4575 		mlx5_flow_item_field_width(action_modify_field->src.field);
4576 
4577 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4578 	if (ret)
4579 		return ret;
4580 
4581 	if (action_modify_field->width == 0)
4582 		return rte_flow_error_set(error, EINVAL,
4583 					RTE_FLOW_ERROR_TYPE_ACTION,
4584 					NULL,
4585 					"no bits are requested to be modified");
4586 	else if (action_modify_field->width > dst_width ||
4587 		 action_modify_field->width > src_width)
4588 		return rte_flow_error_set(error, EINVAL,
4589 					RTE_FLOW_ERROR_TYPE_ACTION,
4590 					NULL,
4591 					"cannot modify more bits than"
4592 					" the width of a field");
4593 	if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4594 	    action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4595 		if ((action_modify_field->dst.offset +
4596 		     action_modify_field->width > dst_width) ||
4597 		    (action_modify_field->dst.offset % 32))
4598 			return rte_flow_error_set(error, EINVAL,
4599 						RTE_FLOW_ERROR_TYPE_ACTION,
4600 						NULL,
4601 						"destination offset is too big"
4602 						" or not aligned to 4 bytes");
4603 		if (action_modify_field->dst.level &&
4604 		    action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4605 			return rte_flow_error_set(error, EINVAL,
4606 						RTE_FLOW_ERROR_TYPE_ACTION,
4607 						NULL,
4608 						"cannot modify inner headers");
4609 	}
4610 	if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4611 	    action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4612 		if (!attr->transfer && !attr->group)
4613 			return rte_flow_error_set(error, ENOTSUP,
4614 					RTE_FLOW_ERROR_TYPE_ACTION,
4615 					NULL, "modify field action "
4616 					"is not supported for group 0");
4617 		if ((action_modify_field->src.offset +
4618 		     action_modify_field->width > src_width) ||
4619 		    (action_modify_field->src.offset % 32))
4620 			return rte_flow_error_set(error, EINVAL,
4621 						RTE_FLOW_ERROR_TYPE_ACTION,
4622 						NULL,
4623 						"source offset is too big"
4624 						" or not aligned to 4 bytes");
4625 		if (action_modify_field->src.level &&
4626 		    action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4627 			return rte_flow_error_set(error, EINVAL,
4628 						RTE_FLOW_ERROR_TYPE_ACTION,
4629 						NULL,
4630 						"cannot copy from inner headers");
4631 	}
4632 	if (action_modify_field->dst.field ==
4633 	    action_modify_field->src.field)
4634 		return rte_flow_error_set(error, EINVAL,
4635 					RTE_FLOW_ERROR_TYPE_ACTION,
4636 					NULL,
4637 					"source and destination fields"
4638 					" cannot be the same");
4639 	if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4640 	    action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4641 		return rte_flow_error_set(error, EINVAL,
4642 					RTE_FLOW_ERROR_TYPE_ACTION,
4643 					NULL,
4644 					"immediate value or a pointer to it"
4645 					" cannot be used as a destination");
4646 	if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4647 	    action_modify_field->src.field == RTE_FLOW_FIELD_START)
4648 		return rte_flow_error_set(error, EINVAL,
4649 				RTE_FLOW_ERROR_TYPE_ACTION,
4650 				NULL,
4651 				"modifications of an arbitrary"
4652 				" place in a packet is not supported");
4653 	if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4654 	    action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4655 		return rte_flow_error_set(error, EINVAL,
4656 				RTE_FLOW_ERROR_TYPE_ACTION,
4657 				NULL,
4658 				"modifications of the 802.1Q Tag"
4659 				" Identifier is not supported");
4660 	if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4661 	    action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4662 		return rte_flow_error_set(error, EINVAL,
4663 				RTE_FLOW_ERROR_TYPE_ACTION,
4664 				NULL,
4665 				"modifications of the VXLAN Network"
4666 				" Identifier is not supported");
4667 	if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4668 	    action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4669 		return rte_flow_error_set(error, EINVAL,
4670 				RTE_FLOW_ERROR_TYPE_ACTION,
4671 				NULL,
4672 				"modifications of the GENEVE Network"
4673 				" Identifier is not supported");
4674 	if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4675 	    action_modify_field->src.field == RTE_FLOW_FIELD_MARK) {
4676 		if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4677 		    !mlx5_flow_ext_mreg_supported(dev))
4678 			return rte_flow_error_set(error, ENOTSUP,
4679 					RTE_FLOW_ERROR_TYPE_ACTION, action,
4680 					"cannot modify mark without extended"
4681 					" metadata register support");
4682 	}
4683 	if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4684 		return rte_flow_error_set(error, EINVAL,
4685 				RTE_FLOW_ERROR_TYPE_ACTION,
4686 				NULL,
4687 				"add and sub operations"
4688 				" are not supported");
4689 	return (action_modify_field->width / 32) +
4690 	       !!(action_modify_field->width % 32);
4691 }
4692 
4693 /**
4694  * Validate jump action.
4695  *
4696  * @param[in] action
4697  *   Pointer to the jump action.
4698  * @param[in] action_flags
4699  *   Holds the actions detected until now.
4700  * @param[in] attributes
4701  *   Pointer to flow attributes
4702  * @param[in] external
4703  *   Action belongs to flow rule created by request external to PMD.
4704  * @param[out] error
4705  *   Pointer to error structure.
4706  *
4707  * @return
4708  *   0 on success, a negative errno value otherwise and rte_errno is set.
4709  */
4710 static int
4711 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4712 			     const struct mlx5_flow_tunnel *tunnel,
4713 			     const struct rte_flow_action *action,
4714 			     uint64_t action_flags,
4715 			     const struct rte_flow_attr *attributes,
4716 			     bool external, struct rte_flow_error *error)
4717 {
4718 	uint32_t target_group, table;
4719 	int ret = 0;
4720 	struct flow_grp_info grp_info = {
4721 		.external = !!external,
4722 		.transfer = !!attributes->transfer,
4723 		.fdb_def_rule = 1,
4724 		.std_tbl_fix = 0
4725 	};
4726 	if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4727 			    MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4728 		return rte_flow_error_set(error, EINVAL,
4729 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4730 					  "can't have 2 fate actions in"
4731 					  " same flow");
4732 	if (action_flags & MLX5_FLOW_ACTION_METER)
4733 		return rte_flow_error_set(error, ENOTSUP,
4734 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4735 					  "jump with meter not support");
4736 	if (!action->conf)
4737 		return rte_flow_error_set(error, EINVAL,
4738 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4739 					  NULL, "action configuration not set");
4740 	target_group =
4741 		((const struct rte_flow_action_jump *)action->conf)->group;
4742 	ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4743 				       &grp_info, error);
4744 	if (ret)
4745 		return ret;
4746 	if (attributes->group == target_group &&
4747 	    !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4748 			      MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4749 		return rte_flow_error_set(error, EINVAL,
4750 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4751 					  "target group must be other than"
4752 					  " the current flow group");
4753 	return 0;
4754 }
4755 
4756 /*
4757  * Validate the port_id action.
4758  *
4759  * @param[in] dev
4760  *   Pointer to rte_eth_dev structure.
4761  * @param[in] action_flags
4762  *   Bit-fields that holds the actions detected until now.
4763  * @param[in] action
4764  *   Port_id RTE action structure.
4765  * @param[in] attr
4766  *   Attributes of flow that includes this action.
4767  * @param[out] error
4768  *   Pointer to error structure.
4769  *
4770  * @return
4771  *   0 on success, a negative errno value otherwise and rte_errno is set.
4772  */
4773 static int
4774 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4775 				uint64_t action_flags,
4776 				const struct rte_flow_action *action,
4777 				const struct rte_flow_attr *attr,
4778 				struct rte_flow_error *error)
4779 {
4780 	const struct rte_flow_action_port_id *port_id;
4781 	struct mlx5_priv *act_priv;
4782 	struct mlx5_priv *dev_priv;
4783 	uint16_t port;
4784 
4785 	if (!attr->transfer)
4786 		return rte_flow_error_set(error, ENOTSUP,
4787 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4788 					  NULL,
4789 					  "port id action is valid in transfer"
4790 					  " mode only");
4791 	if (!action || !action->conf)
4792 		return rte_flow_error_set(error, ENOTSUP,
4793 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4794 					  NULL,
4795 					  "port id action parameters must be"
4796 					  " specified");
4797 	if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4798 			    MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4799 		return rte_flow_error_set(error, EINVAL,
4800 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4801 					  "can have only one fate actions in"
4802 					  " a flow");
4803 	dev_priv = mlx5_dev_to_eswitch_info(dev);
4804 	if (!dev_priv)
4805 		return rte_flow_error_set(error, rte_errno,
4806 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4807 					  NULL,
4808 					  "failed to obtain E-Switch info");
4809 	port_id = action->conf;
4810 	port = port_id->original ? dev->data->port_id : port_id->id;
4811 	act_priv = mlx5_port_to_eswitch_info(port, false);
4812 	if (!act_priv)
4813 		return rte_flow_error_set
4814 				(error, rte_errno,
4815 				 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4816 				 "failed to obtain E-Switch port id for port");
4817 	if (act_priv->domain_id != dev_priv->domain_id)
4818 		return rte_flow_error_set
4819 				(error, EINVAL,
4820 				 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4821 				 "port does not belong to"
4822 				 " E-Switch being configured");
4823 	return 0;
4824 }
4825 
4826 /**
4827  * Get the maximum number of modify header actions.
4828  *
4829  * @param dev
4830  *   Pointer to rte_eth_dev structure.
4831  * @param flags
4832  *   Flags bits to check if root level.
4833  *
4834  * @return
4835  *   Max number of modify header actions device can support.
4836  */
4837 static inline unsigned int
4838 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4839 			      uint64_t flags)
4840 {
4841 	/*
4842 	 * There's no way to directly query the max capacity from FW.
4843 	 * The maximal value on root table should be assumed to be supported.
4844 	 */
4845 	if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4846 		return MLX5_MAX_MODIFY_NUM;
4847 	else
4848 		return MLX5_ROOT_TBL_MODIFY_NUM;
4849 }
4850 
4851 /**
4852  * Validate the meter action.
4853  *
4854  * @param[in] dev
4855  *   Pointer to rte_eth_dev structure.
4856  * @param[in] action_flags
4857  *   Bit-fields that holds the actions detected until now.
4858  * @param[in] action
4859  *   Pointer to the meter action.
4860  * @param[in] attr
4861  *   Attributes of flow that includes this action.
4862  * @param[out] error
4863  *   Pointer to error structure.
4864  *
4865  * @return
4866  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4867  */
4868 static int
4869 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4870 				uint64_t action_flags,
4871 				const struct rte_flow_action *action,
4872 				const struct rte_flow_attr *attr,
4873 				struct rte_flow_error *error)
4874 {
4875 	struct mlx5_priv *priv = dev->data->dev_private;
4876 	const struct rte_flow_action_meter *am = action->conf;
4877 	struct mlx5_flow_meter *fm;
4878 
4879 	if (!am)
4880 		return rte_flow_error_set(error, EINVAL,
4881 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4882 					  "meter action conf is NULL");
4883 
4884 	if (action_flags & MLX5_FLOW_ACTION_METER)
4885 		return rte_flow_error_set(error, ENOTSUP,
4886 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4887 					  "meter chaining not support");
4888 	if (action_flags & MLX5_FLOW_ACTION_JUMP)
4889 		return rte_flow_error_set(error, ENOTSUP,
4890 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4891 					  "meter with jump not support");
4892 	if (!priv->mtr_en)
4893 		return rte_flow_error_set(error, ENOTSUP,
4894 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4895 					  NULL,
4896 					  "meter action not supported");
4897 	fm = mlx5_flow_meter_find(priv, am->mtr_id);
4898 	if (!fm)
4899 		return rte_flow_error_set(error, EINVAL,
4900 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4901 					  "Meter not found");
4902 	if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4903 	      (!fm->ingress && !attr->ingress && attr->egress) ||
4904 	      (!fm->egress && !attr->egress && attr->ingress))))
4905 		return rte_flow_error_set(error, EINVAL,
4906 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4907 					  "Flow attributes are either invalid "
4908 					  "or have a conflict with current "
4909 					  "meter attributes");
4910 	return 0;
4911 }
4912 
4913 /**
4914  * Validate the age action.
4915  *
4916  * @param[in] action_flags
4917  *   Holds the actions detected until now.
4918  * @param[in] action
4919  *   Pointer to the age action.
4920  * @param[in] dev
4921  *   Pointer to the Ethernet device structure.
4922  * @param[out] error
4923  *   Pointer to error structure.
4924  *
4925  * @return
4926  *   0 on success, a negative errno value otherwise and rte_errno is set.
4927  */
4928 static int
4929 flow_dv_validate_action_age(uint64_t action_flags,
4930 			    const struct rte_flow_action *action,
4931 			    struct rte_eth_dev *dev,
4932 			    struct rte_flow_error *error)
4933 {
4934 	struct mlx5_priv *priv = dev->data->dev_private;
4935 	const struct rte_flow_action_age *age = action->conf;
4936 
4937 	if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4938 	    !priv->sh->aso_age_mng))
4939 		return rte_flow_error_set(error, ENOTSUP,
4940 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4941 					  NULL,
4942 					  "age action not supported");
4943 	if (!(action->conf))
4944 		return rte_flow_error_set(error, EINVAL,
4945 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4946 					  "configuration cannot be null");
4947 	if (!(age->timeout))
4948 		return rte_flow_error_set(error, EINVAL,
4949 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4950 					  "invalid timeout value 0");
4951 	if (action_flags & MLX5_FLOW_ACTION_AGE)
4952 		return rte_flow_error_set(error, EINVAL,
4953 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4954 					  "duplicate age actions set");
4955 	return 0;
4956 }
4957 
4958 /**
4959  * Validate the modify-header IPv4 DSCP actions.
4960  *
4961  * @param[in] action_flags
4962  *   Holds the actions detected until now.
4963  * @param[in] action
4964  *   Pointer to the modify action.
4965  * @param[in] item_flags
4966  *   Holds the items detected.
4967  * @param[out] error
4968  *   Pointer to error structure.
4969  *
4970  * @return
4971  *   0 on success, a negative errno value otherwise and rte_errno is set.
4972  */
4973 static int
4974 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4975 					 const struct rte_flow_action *action,
4976 					 const uint64_t item_flags,
4977 					 struct rte_flow_error *error)
4978 {
4979 	int ret = 0;
4980 
4981 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4982 	if (!ret) {
4983 		if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4984 			return rte_flow_error_set(error, EINVAL,
4985 						  RTE_FLOW_ERROR_TYPE_ACTION,
4986 						  NULL,
4987 						  "no ipv4 item in pattern");
4988 	}
4989 	return ret;
4990 }
4991 
4992 /**
4993  * Validate the modify-header IPv6 DSCP actions.
4994  *
4995  * @param[in] action_flags
4996  *   Holds the actions detected until now.
4997  * @param[in] action
4998  *   Pointer to the modify action.
4999  * @param[in] item_flags
5000  *   Holds the items detected.
5001  * @param[out] error
5002  *   Pointer to error structure.
5003  *
5004  * @return
5005  *   0 on success, a negative errno value otherwise and rte_errno is set.
5006  */
5007 static int
5008 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5009 					 const struct rte_flow_action *action,
5010 					 const uint64_t item_flags,
5011 					 struct rte_flow_error *error)
5012 {
5013 	int ret = 0;
5014 
5015 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5016 	if (!ret) {
5017 		if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5018 			return rte_flow_error_set(error, EINVAL,
5019 						  RTE_FLOW_ERROR_TYPE_ACTION,
5020 						  NULL,
5021 						  "no ipv6 item in pattern");
5022 	}
5023 	return ret;
5024 }
5025 
5026 /**
5027  * Match modify-header resource.
5028  *
5029  * @param list
5030  *   Pointer to the hash list.
5031  * @param entry
5032  *   Pointer to exist resource entry object.
5033  * @param key
5034  *   Key of the new entry.
5035  * @param ctx
5036  *   Pointer to new modify-header resource.
5037  *
5038  * @return
5039  *   0 on matching, non-zero otherwise.
5040  */
5041 int
5042 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5043 			struct mlx5_hlist_entry *entry,
5044 			uint64_t key __rte_unused, void *cb_ctx)
5045 {
5046 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5047 	struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5048 	struct mlx5_flow_dv_modify_hdr_resource *resource =
5049 			container_of(entry, typeof(*resource), entry);
5050 	uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5051 
5052 	key_len += ref->actions_num * sizeof(ref->actions[0]);
5053 	return ref->actions_num != resource->actions_num ||
5054 	       memcmp(&ref->ft_type, &resource->ft_type, key_len);
5055 }
5056 
5057 struct mlx5_hlist_entry *
5058 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5059 			 void *cb_ctx)
5060 {
5061 	struct mlx5_dev_ctx_shared *sh = list->ctx;
5062 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5063 	struct mlx5dv_dr_domain *ns;
5064 	struct mlx5_flow_dv_modify_hdr_resource *entry;
5065 	struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5066 	int ret;
5067 	uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5068 	uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5069 
5070 	entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5071 			    SOCKET_ID_ANY);
5072 	if (!entry) {
5073 		rte_flow_error_set(ctx->error, ENOMEM,
5074 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5075 				   "cannot allocate resource memory");
5076 		return NULL;
5077 	}
5078 	rte_memcpy(&entry->ft_type,
5079 		   RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5080 		   key_len + data_len);
5081 	if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5082 		ns = sh->fdb_domain;
5083 	else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5084 		ns = sh->tx_domain;
5085 	else
5086 		ns = sh->rx_domain;
5087 	ret = mlx5_flow_os_create_flow_action_modify_header
5088 					(sh->ctx, ns, entry,
5089 					 data_len, &entry->action);
5090 	if (ret) {
5091 		mlx5_free(entry);
5092 		rte_flow_error_set(ctx->error, ENOMEM,
5093 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5094 				   NULL, "cannot create modification action");
5095 		return NULL;
5096 	}
5097 	return &entry->entry;
5098 }
5099 
5100 /**
5101  * Validate the sample action.
5102  *
5103  * @param[in, out] action_flags
5104  *   Holds the actions detected until now.
5105  * @param[in] action
5106  *   Pointer to the sample action.
5107  * @param[in] dev
5108  *   Pointer to the Ethernet device structure.
5109  * @param[in] attr
5110  *   Attributes of flow that includes this action.
5111  * @param[in] item_flags
5112  *   Holds the items detected.
5113  * @param[in] rss
5114  *   Pointer to the RSS action.
5115  * @param[out] sample_rss
5116  *   Pointer to the RSS action in sample action list.
5117  * @param[out] count
5118  *   Pointer to the COUNT action in sample action list.
5119  * @param[out] fdb_mirror_limit
5120  *   Pointer to the FDB mirror limitation flag.
5121  * @param[out] error
5122  *   Pointer to error structure.
5123  *
5124  * @return
5125  *   0 on success, a negative errno value otherwise and rte_errno is set.
5126  */
5127 static int
5128 flow_dv_validate_action_sample(uint64_t *action_flags,
5129 			       const struct rte_flow_action *action,
5130 			       struct rte_eth_dev *dev,
5131 			       const struct rte_flow_attr *attr,
5132 			       uint64_t item_flags,
5133 			       const struct rte_flow_action_rss *rss,
5134 			       const struct rte_flow_action_rss **sample_rss,
5135 			       const struct rte_flow_action_count **count,
5136 			       int *fdb_mirror_limit,
5137 			       struct rte_flow_error *error)
5138 {
5139 	struct mlx5_priv *priv = dev->data->dev_private;
5140 	struct mlx5_dev_config *dev_conf = &priv->config;
5141 	const struct rte_flow_action_sample *sample = action->conf;
5142 	const struct rte_flow_action *act;
5143 	uint64_t sub_action_flags = 0;
5144 	uint16_t queue_index = 0xFFFF;
5145 	int actions_n = 0;
5146 	int ret;
5147 
5148 	if (!sample)
5149 		return rte_flow_error_set(error, EINVAL,
5150 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5151 					  "configuration cannot be NULL");
5152 	if (sample->ratio == 0)
5153 		return rte_flow_error_set(error, EINVAL,
5154 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5155 					  "ratio value starts from 1");
5156 	if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5157 		return rte_flow_error_set(error, ENOTSUP,
5158 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5159 					  NULL,
5160 					  "sample action not supported");
5161 	if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5162 		return rte_flow_error_set(error, EINVAL,
5163 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5164 					  "Multiple sample actions not "
5165 					  "supported");
5166 	if (*action_flags & MLX5_FLOW_ACTION_METER)
5167 		return rte_flow_error_set(error, EINVAL,
5168 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5169 					  "wrong action order, meter should "
5170 					  "be after sample action");
5171 	if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5172 		return rte_flow_error_set(error, EINVAL,
5173 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5174 					  "wrong action order, jump should "
5175 					  "be after sample action");
5176 	act = sample->actions;
5177 	for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5178 		if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5179 			return rte_flow_error_set(error, ENOTSUP,
5180 						  RTE_FLOW_ERROR_TYPE_ACTION,
5181 						  act, "too many actions");
5182 		switch (act->type) {
5183 		case RTE_FLOW_ACTION_TYPE_QUEUE:
5184 			ret = mlx5_flow_validate_action_queue(act,
5185 							      sub_action_flags,
5186 							      dev,
5187 							      attr, error);
5188 			if (ret < 0)
5189 				return ret;
5190 			queue_index = ((const struct rte_flow_action_queue *)
5191 							(act->conf))->index;
5192 			sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5193 			++actions_n;
5194 			break;
5195 		case RTE_FLOW_ACTION_TYPE_RSS:
5196 			*sample_rss = act->conf;
5197 			ret = mlx5_flow_validate_action_rss(act,
5198 							    sub_action_flags,
5199 							    dev, attr,
5200 							    item_flags,
5201 							    error);
5202 			if (ret < 0)
5203 				return ret;
5204 			if (rss && *sample_rss &&
5205 			    ((*sample_rss)->level != rss->level ||
5206 			    (*sample_rss)->types != rss->types))
5207 				return rte_flow_error_set(error, ENOTSUP,
5208 					RTE_FLOW_ERROR_TYPE_ACTION,
5209 					NULL,
5210 					"Can't use the different RSS types "
5211 					"or level in the same flow");
5212 			if (*sample_rss != NULL && (*sample_rss)->queue_num)
5213 				queue_index = (*sample_rss)->queue[0];
5214 			sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5215 			++actions_n;
5216 			break;
5217 		case RTE_FLOW_ACTION_TYPE_MARK:
5218 			ret = flow_dv_validate_action_mark(dev, act,
5219 							   sub_action_flags,
5220 							   attr, error);
5221 			if (ret < 0)
5222 				return ret;
5223 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5224 				sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5225 						MLX5_FLOW_ACTION_MARK_EXT;
5226 			else
5227 				sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5228 			++actions_n;
5229 			break;
5230 		case RTE_FLOW_ACTION_TYPE_COUNT:
5231 			ret = flow_dv_validate_action_count
5232 				(dev, act,
5233 				 *action_flags | sub_action_flags,
5234 				 error);
5235 			if (ret < 0)
5236 				return ret;
5237 			*count = act->conf;
5238 			sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5239 			*action_flags |= MLX5_FLOW_ACTION_COUNT;
5240 			++actions_n;
5241 			break;
5242 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
5243 			ret = flow_dv_validate_action_port_id(dev,
5244 							      sub_action_flags,
5245 							      act,
5246 							      attr,
5247 							      error);
5248 			if (ret)
5249 				return ret;
5250 			sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5251 			++actions_n;
5252 			break;
5253 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5254 			ret = flow_dv_validate_action_raw_encap_decap
5255 				(dev, NULL, act->conf, attr, &sub_action_flags,
5256 				 &actions_n, action, item_flags, error);
5257 			if (ret < 0)
5258 				return ret;
5259 			++actions_n;
5260 			break;
5261 		default:
5262 			return rte_flow_error_set(error, ENOTSUP,
5263 						  RTE_FLOW_ERROR_TYPE_ACTION,
5264 						  NULL,
5265 						  "Doesn't support optional "
5266 						  "action");
5267 		}
5268 	}
5269 	if (attr->ingress && !attr->transfer) {
5270 		if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5271 					  MLX5_FLOW_ACTION_RSS)))
5272 			return rte_flow_error_set(error, EINVAL,
5273 						  RTE_FLOW_ERROR_TYPE_ACTION,
5274 						  NULL,
5275 						  "Ingress must has a dest "
5276 						  "QUEUE for Sample");
5277 	} else if (attr->egress && !attr->transfer) {
5278 		return rte_flow_error_set(error, ENOTSUP,
5279 					  RTE_FLOW_ERROR_TYPE_ACTION,
5280 					  NULL,
5281 					  "Sample Only support Ingress "
5282 					  "or E-Switch");
5283 	} else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5284 		MLX5_ASSERT(attr->transfer);
5285 		if (sample->ratio > 1)
5286 			return rte_flow_error_set(error, ENOTSUP,
5287 						  RTE_FLOW_ERROR_TYPE_ACTION,
5288 						  NULL,
5289 						  "E-Switch doesn't support "
5290 						  "any optional action "
5291 						  "for sampling");
5292 		if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5293 			return rte_flow_error_set(error, ENOTSUP,
5294 						  RTE_FLOW_ERROR_TYPE_ACTION,
5295 						  NULL,
5296 						  "unsupported action QUEUE");
5297 		if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5298 			return rte_flow_error_set(error, ENOTSUP,
5299 						  RTE_FLOW_ERROR_TYPE_ACTION,
5300 						  NULL,
5301 						  "unsupported action QUEUE");
5302 		if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5303 			return rte_flow_error_set(error, EINVAL,
5304 						  RTE_FLOW_ERROR_TYPE_ACTION,
5305 						  NULL,
5306 						  "E-Switch must has a dest "
5307 						  "port for mirroring");
5308 		if (!priv->config.hca_attr.reg_c_preserve &&
5309 		     priv->representor_id != -1)
5310 			*fdb_mirror_limit = 1;
5311 	}
5312 	/* Continue validation for Xcap actions.*/
5313 	if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5314 	    (queue_index == 0xFFFF ||
5315 	     mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5316 		if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5317 		     MLX5_FLOW_XCAP_ACTIONS)
5318 			return rte_flow_error_set(error, ENOTSUP,
5319 						  RTE_FLOW_ERROR_TYPE_ACTION,
5320 						  NULL, "encap and decap "
5321 						  "combination aren't "
5322 						  "supported");
5323 		if (!attr->transfer && attr->ingress && (sub_action_flags &
5324 							MLX5_FLOW_ACTION_ENCAP))
5325 			return rte_flow_error_set(error, ENOTSUP,
5326 						  RTE_FLOW_ERROR_TYPE_ACTION,
5327 						  NULL, "encap is not supported"
5328 						  " for ingress traffic");
5329 	}
5330 	return 0;
5331 }
5332 
5333 /**
5334  * Find existing modify-header resource or create and register a new one.
5335  *
5336  * @param dev[in, out]
5337  *   Pointer to rte_eth_dev structure.
5338  * @param[in, out] resource
5339  *   Pointer to modify-header resource.
5340  * @parm[in, out] dev_flow
5341  *   Pointer to the dev_flow.
5342  * @param[out] error
5343  *   pointer to error structure.
5344  *
5345  * @return
5346  *   0 on success otherwise -errno and errno is set.
5347  */
5348 static int
5349 flow_dv_modify_hdr_resource_register
5350 			(struct rte_eth_dev *dev,
5351 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
5352 			 struct mlx5_flow *dev_flow,
5353 			 struct rte_flow_error *error)
5354 {
5355 	struct mlx5_priv *priv = dev->data->dev_private;
5356 	struct mlx5_dev_ctx_shared *sh = priv->sh;
5357 	uint32_t key_len = sizeof(*resource) -
5358 			   offsetof(typeof(*resource), ft_type) +
5359 			   resource->actions_num * sizeof(resource->actions[0]);
5360 	struct mlx5_hlist_entry *entry;
5361 	struct mlx5_flow_cb_ctx ctx = {
5362 		.error = error,
5363 		.data = resource,
5364 	};
5365 	uint64_t key64;
5366 
5367 	resource->flags = dev_flow->dv.group ? 0 :
5368 			  MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5369 	if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5370 				    resource->flags))
5371 		return rte_flow_error_set(error, EOVERFLOW,
5372 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5373 					  "too many modify header items");
5374 	key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5375 	entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5376 	if (!entry)
5377 		return -rte_errno;
5378 	resource = container_of(entry, typeof(*resource), entry);
5379 	dev_flow->handle->dvh.modify_hdr = resource;
5380 	return 0;
5381 }
5382 
5383 /**
5384  * Get DV flow counter by index.
5385  *
5386  * @param[in] dev
5387  *   Pointer to the Ethernet device structure.
5388  * @param[in] idx
5389  *   mlx5 flow counter index in the container.
5390  * @param[out] ppool
5391  *   mlx5 flow counter pool in the container,
5392  *
5393  * @return
5394  *   Pointer to the counter, NULL otherwise.
5395  */
5396 static struct mlx5_flow_counter *
5397 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5398 			   uint32_t idx,
5399 			   struct mlx5_flow_counter_pool **ppool)
5400 {
5401 	struct mlx5_priv *priv = dev->data->dev_private;
5402 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5403 	struct mlx5_flow_counter_pool *pool;
5404 
5405 	/* Decrease to original index and clear shared bit. */
5406 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5407 	MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5408 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5409 	MLX5_ASSERT(pool);
5410 	if (ppool)
5411 		*ppool = pool;
5412 	return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5413 }
5414 
5415 /**
5416  * Check the devx counter belongs to the pool.
5417  *
5418  * @param[in] pool
5419  *   Pointer to the counter pool.
5420  * @param[in] id
5421  *   The counter devx ID.
5422  *
5423  * @return
5424  *   True if counter belongs to the pool, false otherwise.
5425  */
5426 static bool
5427 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5428 {
5429 	int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5430 		   MLX5_COUNTERS_PER_POOL;
5431 
5432 	if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5433 		return true;
5434 	return false;
5435 }
5436 
5437 /**
5438  * Get a pool by devx counter ID.
5439  *
5440  * @param[in] cmng
5441  *   Pointer to the counter management.
5442  * @param[in] id
5443  *   The counter devx ID.
5444  *
5445  * @return
5446  *   The counter pool pointer if exists, NULL otherwise,
5447  */
5448 static struct mlx5_flow_counter_pool *
5449 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5450 {
5451 	uint32_t i;
5452 	struct mlx5_flow_counter_pool *pool = NULL;
5453 
5454 	rte_spinlock_lock(&cmng->pool_update_sl);
5455 	/* Check last used pool. */
5456 	if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5457 	    flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5458 		pool = cmng->pools[cmng->last_pool_idx];
5459 		goto out;
5460 	}
5461 	/* ID out of range means no suitable pool in the container. */
5462 	if (id > cmng->max_id || id < cmng->min_id)
5463 		goto out;
5464 	/*
5465 	 * Find the pool from the end of the container, since mostly counter
5466 	 * ID is sequence increasing, and the last pool should be the needed
5467 	 * one.
5468 	 */
5469 	i = cmng->n_valid;
5470 	while (i--) {
5471 		struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5472 
5473 		if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5474 			pool = pool_tmp;
5475 			break;
5476 		}
5477 	}
5478 out:
5479 	rte_spinlock_unlock(&cmng->pool_update_sl);
5480 	return pool;
5481 }
5482 
5483 /**
5484  * Resize a counter container.
5485  *
5486  * @param[in] dev
5487  *   Pointer to the Ethernet device structure.
5488  *
5489  * @return
5490  *   0 on success, otherwise negative errno value and rte_errno is set.
5491  */
5492 static int
5493 flow_dv_container_resize(struct rte_eth_dev *dev)
5494 {
5495 	struct mlx5_priv *priv = dev->data->dev_private;
5496 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5497 	void *old_pools = cmng->pools;
5498 	uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5499 	uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5500 	void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5501 
5502 	if (!pools) {
5503 		rte_errno = ENOMEM;
5504 		return -ENOMEM;
5505 	}
5506 	if (old_pools)
5507 		memcpy(pools, old_pools, cmng->n *
5508 				       sizeof(struct mlx5_flow_counter_pool *));
5509 	cmng->n = resize;
5510 	cmng->pools = pools;
5511 	if (old_pools)
5512 		mlx5_free(old_pools);
5513 	return 0;
5514 }
5515 
5516 /**
5517  * Query a devx flow counter.
5518  *
5519  * @param[in] dev
5520  *   Pointer to the Ethernet device structure.
5521  * @param[in] cnt
5522  *   Index to the flow counter.
5523  * @param[out] pkts
5524  *   The statistics value of packets.
5525  * @param[out] bytes
5526  *   The statistics value of bytes.
5527  *
5528  * @return
5529  *   0 on success, otherwise a negative errno value and rte_errno is set.
5530  */
5531 static inline int
5532 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5533 		     uint64_t *bytes)
5534 {
5535 	struct mlx5_priv *priv = dev->data->dev_private;
5536 	struct mlx5_flow_counter_pool *pool = NULL;
5537 	struct mlx5_flow_counter *cnt;
5538 	int offset;
5539 
5540 	cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5541 	MLX5_ASSERT(pool);
5542 	if (priv->sh->cmng.counter_fallback)
5543 		return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5544 					0, pkts, bytes, 0, NULL, NULL, 0);
5545 	rte_spinlock_lock(&pool->sl);
5546 	if (!pool->raw) {
5547 		*pkts = 0;
5548 		*bytes = 0;
5549 	} else {
5550 		offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5551 		*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5552 		*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5553 	}
5554 	rte_spinlock_unlock(&pool->sl);
5555 	return 0;
5556 }
5557 
5558 /**
5559  * Create and initialize a new counter pool.
5560  *
5561  * @param[in] dev
5562  *   Pointer to the Ethernet device structure.
5563  * @param[out] dcs
5564  *   The devX counter handle.
5565  * @param[in] age
5566  *   Whether the pool is for counter that was allocated for aging.
5567  * @param[in/out] cont_cur
5568  *   Pointer to the container pointer, it will be update in pool resize.
5569  *
5570  * @return
5571  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5572  */
5573 static struct mlx5_flow_counter_pool *
5574 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5575 		    uint32_t age)
5576 {
5577 	struct mlx5_priv *priv = dev->data->dev_private;
5578 	struct mlx5_flow_counter_pool *pool;
5579 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5580 	bool fallback = priv->sh->cmng.counter_fallback;
5581 	uint32_t size = sizeof(*pool);
5582 
5583 	size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5584 	size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5585 	pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5586 	if (!pool) {
5587 		rte_errno = ENOMEM;
5588 		return NULL;
5589 	}
5590 	pool->raw = NULL;
5591 	pool->is_aged = !!age;
5592 	pool->query_gen = 0;
5593 	pool->min_dcs = dcs;
5594 	rte_spinlock_init(&pool->sl);
5595 	rte_spinlock_init(&pool->csl);
5596 	TAILQ_INIT(&pool->counters[0]);
5597 	TAILQ_INIT(&pool->counters[1]);
5598 	pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5599 	rte_spinlock_lock(&cmng->pool_update_sl);
5600 	pool->index = cmng->n_valid;
5601 	if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5602 		mlx5_free(pool);
5603 		rte_spinlock_unlock(&cmng->pool_update_sl);
5604 		return NULL;
5605 	}
5606 	cmng->pools[pool->index] = pool;
5607 	cmng->n_valid++;
5608 	if (unlikely(fallback)) {
5609 		int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5610 
5611 		if (base < cmng->min_id)
5612 			cmng->min_id = base;
5613 		if (base > cmng->max_id)
5614 			cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5615 		cmng->last_pool_idx = pool->index;
5616 	}
5617 	rte_spinlock_unlock(&cmng->pool_update_sl);
5618 	return pool;
5619 }
5620 
5621 /**
5622  * Prepare a new counter and/or a new counter pool.
5623  *
5624  * @param[in] dev
5625  *   Pointer to the Ethernet device structure.
5626  * @param[out] cnt_free
5627  *   Where to put the pointer of a new counter.
5628  * @param[in] age
5629  *   Whether the pool is for counter that was allocated for aging.
5630  *
5631  * @return
5632  *   The counter pool pointer and @p cnt_free is set on success,
5633  *   NULL otherwise and rte_errno is set.
5634  */
5635 static struct mlx5_flow_counter_pool *
5636 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5637 			     struct mlx5_flow_counter **cnt_free,
5638 			     uint32_t age)
5639 {
5640 	struct mlx5_priv *priv = dev->data->dev_private;
5641 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5642 	struct mlx5_flow_counter_pool *pool;
5643 	struct mlx5_counters tmp_tq;
5644 	struct mlx5_devx_obj *dcs = NULL;
5645 	struct mlx5_flow_counter *cnt;
5646 	enum mlx5_counter_type cnt_type =
5647 			age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5648 	bool fallback = priv->sh->cmng.counter_fallback;
5649 	uint32_t i;
5650 
5651 	if (fallback) {
5652 		/* bulk_bitmap must be 0 for single counter allocation. */
5653 		dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5654 		if (!dcs)
5655 			return NULL;
5656 		pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5657 		if (!pool) {
5658 			pool = flow_dv_pool_create(dev, dcs, age);
5659 			if (!pool) {
5660 				mlx5_devx_cmd_destroy(dcs);
5661 				return NULL;
5662 			}
5663 		}
5664 		i = dcs->id % MLX5_COUNTERS_PER_POOL;
5665 		cnt = MLX5_POOL_GET_CNT(pool, i);
5666 		cnt->pool = pool;
5667 		cnt->dcs_when_free = dcs;
5668 		*cnt_free = cnt;
5669 		return pool;
5670 	}
5671 	dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5672 	if (!dcs) {
5673 		rte_errno = ENODATA;
5674 		return NULL;
5675 	}
5676 	pool = flow_dv_pool_create(dev, dcs, age);
5677 	if (!pool) {
5678 		mlx5_devx_cmd_destroy(dcs);
5679 		return NULL;
5680 	}
5681 	TAILQ_INIT(&tmp_tq);
5682 	for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5683 		cnt = MLX5_POOL_GET_CNT(pool, i);
5684 		cnt->pool = pool;
5685 		TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5686 	}
5687 	rte_spinlock_lock(&cmng->csl[cnt_type]);
5688 	TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5689 	rte_spinlock_unlock(&cmng->csl[cnt_type]);
5690 	*cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5691 	(*cnt_free)->pool = pool;
5692 	return pool;
5693 }
5694 
5695 /**
5696  * Allocate a flow counter.
5697  *
5698  * @param[in] dev
5699  *   Pointer to the Ethernet device structure.
5700  * @param[in] age
5701  *   Whether the counter was allocated for aging.
5702  *
5703  * @return
5704  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5705  */
5706 static uint32_t
5707 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5708 {
5709 	struct mlx5_priv *priv = dev->data->dev_private;
5710 	struct mlx5_flow_counter_pool *pool = NULL;
5711 	struct mlx5_flow_counter *cnt_free = NULL;
5712 	bool fallback = priv->sh->cmng.counter_fallback;
5713 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5714 	enum mlx5_counter_type cnt_type =
5715 			age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5716 	uint32_t cnt_idx;
5717 
5718 	if (!priv->config.devx) {
5719 		rte_errno = ENOTSUP;
5720 		return 0;
5721 	}
5722 	/* Get free counters from container. */
5723 	rte_spinlock_lock(&cmng->csl[cnt_type]);
5724 	cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5725 	if (cnt_free)
5726 		TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5727 	rte_spinlock_unlock(&cmng->csl[cnt_type]);
5728 	if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5729 		goto err;
5730 	pool = cnt_free->pool;
5731 	if (fallback)
5732 		cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5733 	/* Create a DV counter action only in the first time usage. */
5734 	if (!cnt_free->action) {
5735 		uint16_t offset;
5736 		struct mlx5_devx_obj *dcs;
5737 		int ret;
5738 
5739 		if (!fallback) {
5740 			offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5741 			dcs = pool->min_dcs;
5742 		} else {
5743 			offset = 0;
5744 			dcs = cnt_free->dcs_when_free;
5745 		}
5746 		ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5747 							    &cnt_free->action);
5748 		if (ret) {
5749 			rte_errno = errno;
5750 			goto err;
5751 		}
5752 	}
5753 	cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5754 				MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5755 	/* Update the counter reset values. */
5756 	if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5757 				 &cnt_free->bytes))
5758 		goto err;
5759 	if (!fallback && !priv->sh->cmng.query_thread_on)
5760 		/* Start the asynchronous batch query by the host thread. */
5761 		mlx5_set_query_alarm(priv->sh);
5762 	return cnt_idx;
5763 err:
5764 	if (cnt_free) {
5765 		cnt_free->pool = pool;
5766 		if (fallback)
5767 			cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5768 		rte_spinlock_lock(&cmng->csl[cnt_type]);
5769 		TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5770 		rte_spinlock_unlock(&cmng->csl[cnt_type]);
5771 	}
5772 	return 0;
5773 }
5774 
5775 /**
5776  * Allocate a shared flow counter.
5777  *
5778  * @param[in] ctx
5779  *   Pointer to the shared counter configuration.
5780  * @param[in] data
5781  *   Pointer to save the allocated counter index.
5782  *
5783  * @return
5784  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5785  */
5786 
5787 static int32_t
5788 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5789 {
5790 	struct mlx5_shared_counter_conf *conf = ctx;
5791 	struct rte_eth_dev *dev = conf->dev;
5792 	struct mlx5_flow_counter *cnt;
5793 
5794 	data->dword = flow_dv_counter_alloc(dev, 0);
5795 	data->dword |= MLX5_CNT_SHARED_OFFSET;
5796 	cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5797 	cnt->shared_info.id = conf->id;
5798 	return 0;
5799 }
5800 
5801 /**
5802  * Get a shared flow counter.
5803  *
5804  * @param[in] dev
5805  *   Pointer to the Ethernet device structure.
5806  * @param[in] id
5807  *   Counter identifier.
5808  *
5809  * @return
5810  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5811  */
5812 static uint32_t
5813 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5814 {
5815 	struct mlx5_priv *priv = dev->data->dev_private;
5816 	struct mlx5_shared_counter_conf conf = {
5817 		.dev = dev,
5818 		.id = id,
5819 	};
5820 	union mlx5_l3t_data data = {
5821 		.dword = 0,
5822 	};
5823 
5824 	mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5825 			       flow_dv_counter_alloc_shared_cb, &conf);
5826 	return data.dword;
5827 }
5828 
5829 /**
5830  * Get age param from counter index.
5831  *
5832  * @param[in] dev
5833  *   Pointer to the Ethernet device structure.
5834  * @param[in] counter
5835  *   Index to the counter handler.
5836  *
5837  * @return
5838  *   The aging parameter specified for the counter index.
5839  */
5840 static struct mlx5_age_param*
5841 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5842 				uint32_t counter)
5843 {
5844 	struct mlx5_flow_counter *cnt;
5845 	struct mlx5_flow_counter_pool *pool = NULL;
5846 
5847 	flow_dv_counter_get_by_idx(dev, counter, &pool);
5848 	counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5849 	cnt = MLX5_POOL_GET_CNT(pool, counter);
5850 	return MLX5_CNT_TO_AGE(cnt);
5851 }
5852 
5853 /**
5854  * Remove a flow counter from aged counter list.
5855  *
5856  * @param[in] dev
5857  *   Pointer to the Ethernet device structure.
5858  * @param[in] counter
5859  *   Index to the counter handler.
5860  * @param[in] cnt
5861  *   Pointer to the counter handler.
5862  */
5863 static void
5864 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5865 				uint32_t counter, struct mlx5_flow_counter *cnt)
5866 {
5867 	struct mlx5_age_info *age_info;
5868 	struct mlx5_age_param *age_param;
5869 	struct mlx5_priv *priv = dev->data->dev_private;
5870 	uint16_t expected = AGE_CANDIDATE;
5871 
5872 	age_info = GET_PORT_AGE_INFO(priv);
5873 	age_param = flow_dv_counter_idx_get_age(dev, counter);
5874 	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5875 					 AGE_FREE, false, __ATOMIC_RELAXED,
5876 					 __ATOMIC_RELAXED)) {
5877 		/**
5878 		 * We need the lock even it is age timeout,
5879 		 * since counter may still in process.
5880 		 */
5881 		rte_spinlock_lock(&age_info->aged_sl);
5882 		TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5883 		rte_spinlock_unlock(&age_info->aged_sl);
5884 		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5885 	}
5886 }
5887 
5888 /**
5889  * Release a flow counter.
5890  *
5891  * @param[in] dev
5892  *   Pointer to the Ethernet device structure.
5893  * @param[in] counter
5894  *   Index to the counter handler.
5895  */
5896 static void
5897 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5898 {
5899 	struct mlx5_priv *priv = dev->data->dev_private;
5900 	struct mlx5_flow_counter_pool *pool = NULL;
5901 	struct mlx5_flow_counter *cnt;
5902 	enum mlx5_counter_type cnt_type;
5903 
5904 	if (!counter)
5905 		return;
5906 	cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5907 	MLX5_ASSERT(pool);
5908 	if (IS_SHARED_CNT(counter) &&
5909 	    mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5910 		return;
5911 	if (pool->is_aged)
5912 		flow_dv_counter_remove_from_age(dev, counter, cnt);
5913 	cnt->pool = pool;
5914 	/*
5915 	 * Put the counter back to list to be updated in none fallback mode.
5916 	 * Currently, we are using two list alternately, while one is in query,
5917 	 * add the freed counter to the other list based on the pool query_gen
5918 	 * value. After query finishes, add counter the list to the global
5919 	 * container counter list. The list changes while query starts. In
5920 	 * this case, lock will not be needed as query callback and release
5921 	 * function both operate with the different list.
5922 	 *
5923 	 */
5924 	if (!priv->sh->cmng.counter_fallback) {
5925 		rte_spinlock_lock(&pool->csl);
5926 		TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5927 		rte_spinlock_unlock(&pool->csl);
5928 	} else {
5929 		cnt->dcs_when_free = cnt->dcs_when_active;
5930 		cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5931 					   MLX5_COUNTER_TYPE_ORIGIN;
5932 		rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5933 		TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5934 				  cnt, next);
5935 		rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5936 	}
5937 }
5938 
5939 /**
5940  * Verify the @p attributes will be correctly understood by the NIC and store
5941  * them in the @p flow if everything is correct.
5942  *
5943  * @param[in] dev
5944  *   Pointer to dev struct.
5945  * @param[in] attributes
5946  *   Pointer to flow attributes
5947  * @param[in] external
5948  *   This flow rule is created by request external to PMD.
5949  * @param[out] error
5950  *   Pointer to error structure.
5951  *
5952  * @return
5953  *   - 0 on success and non root table.
5954  *   - 1 on success and root table.
5955  *   - a negative errno value otherwise and rte_errno is set.
5956  */
5957 static int
5958 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5959 			    const struct mlx5_flow_tunnel *tunnel,
5960 			    const struct rte_flow_attr *attributes,
5961 			    const struct flow_grp_info *grp_info,
5962 			    struct rte_flow_error *error)
5963 {
5964 	struct mlx5_priv *priv = dev->data->dev_private;
5965 	uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
5966 	int ret = 0;
5967 
5968 #ifndef HAVE_MLX5DV_DR
5969 	RTE_SET_USED(tunnel);
5970 	RTE_SET_USED(grp_info);
5971 	if (attributes->group)
5972 		return rte_flow_error_set(error, ENOTSUP,
5973 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5974 					  NULL,
5975 					  "groups are not supported");
5976 #else
5977 	uint32_t table = 0;
5978 
5979 	ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5980 				       grp_info, error);
5981 	if (ret)
5982 		return ret;
5983 	if (!table)
5984 		ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5985 #endif
5986 	if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
5987 	    attributes->priority > lowest_priority)
5988 		return rte_flow_error_set(error, ENOTSUP,
5989 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5990 					  NULL,
5991 					  "priority out of range");
5992 	if (attributes->transfer) {
5993 		if (!priv->config.dv_esw_en)
5994 			return rte_flow_error_set
5995 				(error, ENOTSUP,
5996 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5997 				 "E-Switch dr is not supported");
5998 		if (!(priv->representor || priv->master))
5999 			return rte_flow_error_set
6000 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6001 				 NULL, "E-Switch configuration can only be"
6002 				 " done by a master or a representor device");
6003 		if (attributes->egress)
6004 			return rte_flow_error_set
6005 				(error, ENOTSUP,
6006 				 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6007 				 "egress is not supported");
6008 	}
6009 	if (!(attributes->egress ^ attributes->ingress))
6010 		return rte_flow_error_set(error, ENOTSUP,
6011 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6012 					  "must specify exactly one of "
6013 					  "ingress or egress");
6014 	return ret;
6015 }
6016 
6017 /**
6018  * Internal validation function. For validating both actions and items.
6019  *
6020  * @param[in] dev
6021  *   Pointer to the rte_eth_dev structure.
6022  * @param[in] attr
6023  *   Pointer to the flow attributes.
6024  * @param[in] items
6025  *   Pointer to the list of items.
6026  * @param[in] actions
6027  *   Pointer to the list of actions.
6028  * @param[in] external
6029  *   This flow rule is created by request external to PMD.
6030  * @param[in] hairpin
6031  *   Number of hairpin TX actions, 0 means classic flow.
6032  * @param[out] error
6033  *   Pointer to the error structure.
6034  *
6035  * @return
6036  *   0 on success, a negative errno value otherwise and rte_errno is set.
6037  */
6038 static int
6039 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6040 		 const struct rte_flow_item items[],
6041 		 const struct rte_flow_action actions[],
6042 		 bool external, int hairpin, struct rte_flow_error *error)
6043 {
6044 	int ret;
6045 	uint64_t action_flags = 0;
6046 	uint64_t item_flags = 0;
6047 	uint64_t last_item = 0;
6048 	uint8_t next_protocol = 0xff;
6049 	uint16_t ether_type = 0;
6050 	int actions_n = 0;
6051 	uint8_t item_ipv6_proto = 0;
6052 	int fdb_mirror_limit = 0;
6053 	int modify_after_mirror = 0;
6054 	const struct rte_flow_item *geneve_item = NULL;
6055 	const struct rte_flow_item *gre_item = NULL;
6056 	const struct rte_flow_item *gtp_item = NULL;
6057 	const struct rte_flow_action_raw_decap *decap;
6058 	const struct rte_flow_action_raw_encap *encap;
6059 	const struct rte_flow_action_rss *rss = NULL;
6060 	const struct rte_flow_action_rss *sample_rss = NULL;
6061 	const struct rte_flow_action_count *count = NULL;
6062 	const struct rte_flow_action_count *sample_count = NULL;
6063 	const struct rte_flow_item_tcp nic_tcp_mask = {
6064 		.hdr = {
6065 			.tcp_flags = 0xFF,
6066 			.src_port = RTE_BE16(UINT16_MAX),
6067 			.dst_port = RTE_BE16(UINT16_MAX),
6068 		}
6069 	};
6070 	const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6071 		.hdr = {
6072 			.src_addr =
6073 			"\xff\xff\xff\xff\xff\xff\xff\xff"
6074 			"\xff\xff\xff\xff\xff\xff\xff\xff",
6075 			.dst_addr =
6076 			"\xff\xff\xff\xff\xff\xff\xff\xff"
6077 			"\xff\xff\xff\xff\xff\xff\xff\xff",
6078 			.vtc_flow = RTE_BE32(0xffffffff),
6079 			.proto = 0xff,
6080 			.hop_limits = 0xff,
6081 		},
6082 		.has_frag_ext = 1,
6083 	};
6084 	const struct rte_flow_item_ecpri nic_ecpri_mask = {
6085 		.hdr = {
6086 			.common = {
6087 				.u32 =
6088 				RTE_BE32(((const struct rte_ecpri_common_hdr) {
6089 					.type = 0xFF,
6090 					}).u32),
6091 			},
6092 			.dummy[0] = 0xffffffff,
6093 		},
6094 	};
6095 	struct mlx5_priv *priv = dev->data->dev_private;
6096 	struct mlx5_dev_config *dev_conf = &priv->config;
6097 	uint16_t queue_index = 0xFFFF;
6098 	const struct rte_flow_item_vlan *vlan_m = NULL;
6099 	uint32_t rw_act_num = 0;
6100 	uint64_t is_root;
6101 	const struct mlx5_flow_tunnel *tunnel;
6102 	struct flow_grp_info grp_info = {
6103 		.external = !!external,
6104 		.transfer = !!attr->transfer,
6105 		.fdb_def_rule = !!priv->fdb_def_rule,
6106 	};
6107 	const struct rte_eth_hairpin_conf *conf;
6108 
6109 	if (items == NULL)
6110 		return -1;
6111 	if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
6112 		tunnel = flow_items_to_tunnel(items);
6113 		action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6114 				MLX5_FLOW_ACTION_DECAP;
6115 	} else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
6116 		tunnel = flow_actions_to_tunnel(actions);
6117 		action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6118 	} else {
6119 		tunnel = NULL;
6120 	}
6121 	if (tunnel && priv->representor)
6122 		return rte_flow_error_set(error, ENOTSUP,
6123 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6124 					  "decap not supported "
6125 					  "for VF representor");
6126 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6127 				(dev, tunnel, attr, items, actions);
6128 	ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6129 	if (ret < 0)
6130 		return ret;
6131 	is_root = (uint64_t)ret;
6132 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6133 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6134 		int type = items->type;
6135 
6136 		if (!mlx5_flow_os_item_supported(type))
6137 			return rte_flow_error_set(error, ENOTSUP,
6138 						  RTE_FLOW_ERROR_TYPE_ITEM,
6139 						  NULL, "item not supported");
6140 		switch (type) {
6141 		case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
6142 			if (items[0].type != (typeof(items[0].type))
6143 						MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
6144 				return rte_flow_error_set
6145 						(error, EINVAL,
6146 						RTE_FLOW_ERROR_TYPE_ITEM,
6147 						NULL, "MLX5 private items "
6148 						"must be the first");
6149 			break;
6150 		case RTE_FLOW_ITEM_TYPE_VOID:
6151 			break;
6152 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
6153 			ret = flow_dv_validate_item_port_id
6154 					(dev, items, attr, item_flags, error);
6155 			if (ret < 0)
6156 				return ret;
6157 			last_item = MLX5_FLOW_ITEM_PORT_ID;
6158 			break;
6159 		case RTE_FLOW_ITEM_TYPE_ETH:
6160 			ret = mlx5_flow_validate_item_eth(items, item_flags,
6161 							  true, error);
6162 			if (ret < 0)
6163 				return ret;
6164 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6165 					     MLX5_FLOW_LAYER_OUTER_L2;
6166 			if (items->mask != NULL && items->spec != NULL) {
6167 				ether_type =
6168 					((const struct rte_flow_item_eth *)
6169 					 items->spec)->type;
6170 				ether_type &=
6171 					((const struct rte_flow_item_eth *)
6172 					 items->mask)->type;
6173 				ether_type = rte_be_to_cpu_16(ether_type);
6174 			} else {
6175 				ether_type = 0;
6176 			}
6177 			break;
6178 		case RTE_FLOW_ITEM_TYPE_VLAN:
6179 			ret = flow_dv_validate_item_vlan(items, item_flags,
6180 							 dev, error);
6181 			if (ret < 0)
6182 				return ret;
6183 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6184 					     MLX5_FLOW_LAYER_OUTER_VLAN;
6185 			if (items->mask != NULL && items->spec != NULL) {
6186 				ether_type =
6187 					((const struct rte_flow_item_vlan *)
6188 					 items->spec)->inner_type;
6189 				ether_type &=
6190 					((const struct rte_flow_item_vlan *)
6191 					 items->mask)->inner_type;
6192 				ether_type = rte_be_to_cpu_16(ether_type);
6193 			} else {
6194 				ether_type = 0;
6195 			}
6196 			/* Store outer VLAN mask for of_push_vlan action. */
6197 			if (!tunnel)
6198 				vlan_m = items->mask;
6199 			break;
6200 		case RTE_FLOW_ITEM_TYPE_IPV4:
6201 			mlx5_flow_tunnel_ip_check(items, next_protocol,
6202 						  &item_flags, &tunnel);
6203 			ret = flow_dv_validate_item_ipv4(items, item_flags,
6204 							 last_item, ether_type,
6205 							 error);
6206 			if (ret < 0)
6207 				return ret;
6208 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6209 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6210 			if (items->mask != NULL &&
6211 			    ((const struct rte_flow_item_ipv4 *)
6212 			     items->mask)->hdr.next_proto_id) {
6213 				next_protocol =
6214 					((const struct rte_flow_item_ipv4 *)
6215 					 (items->spec))->hdr.next_proto_id;
6216 				next_protocol &=
6217 					((const struct rte_flow_item_ipv4 *)
6218 					 (items->mask))->hdr.next_proto_id;
6219 			} else {
6220 				/* Reset for inner layer. */
6221 				next_protocol = 0xff;
6222 			}
6223 			break;
6224 		case RTE_FLOW_ITEM_TYPE_IPV6:
6225 			mlx5_flow_tunnel_ip_check(items, next_protocol,
6226 						  &item_flags, &tunnel);
6227 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6228 							   last_item,
6229 							   ether_type,
6230 							   &nic_ipv6_mask,
6231 							   error);
6232 			if (ret < 0)
6233 				return ret;
6234 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6235 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6236 			if (items->mask != NULL &&
6237 			    ((const struct rte_flow_item_ipv6 *)
6238 			     items->mask)->hdr.proto) {
6239 				item_ipv6_proto =
6240 					((const struct rte_flow_item_ipv6 *)
6241 					 items->spec)->hdr.proto;
6242 				next_protocol =
6243 					((const struct rte_flow_item_ipv6 *)
6244 					 items->spec)->hdr.proto;
6245 				next_protocol &=
6246 					((const struct rte_flow_item_ipv6 *)
6247 					 items->mask)->hdr.proto;
6248 			} else {
6249 				/* Reset for inner layer. */
6250 				next_protocol = 0xff;
6251 			}
6252 			break;
6253 		case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6254 			ret = flow_dv_validate_item_ipv6_frag_ext(items,
6255 								  item_flags,
6256 								  error);
6257 			if (ret < 0)
6258 				return ret;
6259 			last_item = tunnel ?
6260 					MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6261 					MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6262 			if (items->mask != NULL &&
6263 			    ((const struct rte_flow_item_ipv6_frag_ext *)
6264 			     items->mask)->hdr.next_header) {
6265 				next_protocol =
6266 				((const struct rte_flow_item_ipv6_frag_ext *)
6267 				 items->spec)->hdr.next_header;
6268 				next_protocol &=
6269 				((const struct rte_flow_item_ipv6_frag_ext *)
6270 				 items->mask)->hdr.next_header;
6271 			} else {
6272 				/* Reset for inner layer. */
6273 				next_protocol = 0xff;
6274 			}
6275 			break;
6276 		case RTE_FLOW_ITEM_TYPE_TCP:
6277 			ret = mlx5_flow_validate_item_tcp
6278 						(items, item_flags,
6279 						 next_protocol,
6280 						 &nic_tcp_mask,
6281 						 error);
6282 			if (ret < 0)
6283 				return ret;
6284 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6285 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
6286 			break;
6287 		case RTE_FLOW_ITEM_TYPE_UDP:
6288 			ret = mlx5_flow_validate_item_udp(items, item_flags,
6289 							  next_protocol,
6290 							  error);
6291 			if (ret < 0)
6292 				return ret;
6293 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6294 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
6295 			break;
6296 		case RTE_FLOW_ITEM_TYPE_GRE:
6297 			ret = mlx5_flow_validate_item_gre(items, item_flags,
6298 							  next_protocol, error);
6299 			if (ret < 0)
6300 				return ret;
6301 			gre_item = items;
6302 			last_item = MLX5_FLOW_LAYER_GRE;
6303 			break;
6304 		case RTE_FLOW_ITEM_TYPE_NVGRE:
6305 			ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6306 							    next_protocol,
6307 							    error);
6308 			if (ret < 0)
6309 				return ret;
6310 			last_item = MLX5_FLOW_LAYER_NVGRE;
6311 			break;
6312 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6313 			ret = mlx5_flow_validate_item_gre_key
6314 				(items, item_flags, gre_item, error);
6315 			if (ret < 0)
6316 				return ret;
6317 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
6318 			break;
6319 		case RTE_FLOW_ITEM_TYPE_VXLAN:
6320 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6321 							    error);
6322 			if (ret < 0)
6323 				return ret;
6324 			last_item = MLX5_FLOW_LAYER_VXLAN;
6325 			break;
6326 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6327 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
6328 								item_flags, dev,
6329 								error);
6330 			if (ret < 0)
6331 				return ret;
6332 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6333 			break;
6334 		case RTE_FLOW_ITEM_TYPE_GENEVE:
6335 			ret = mlx5_flow_validate_item_geneve(items,
6336 							     item_flags, dev,
6337 							     error);
6338 			if (ret < 0)
6339 				return ret;
6340 			geneve_item = items;
6341 			last_item = MLX5_FLOW_LAYER_GENEVE;
6342 			break;
6343 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6344 			ret = mlx5_flow_validate_item_geneve_opt(items,
6345 								 last_item,
6346 								 geneve_item,
6347 								 dev,
6348 								 error);
6349 			if (ret < 0)
6350 				return ret;
6351 			last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6352 			break;
6353 		case RTE_FLOW_ITEM_TYPE_MPLS:
6354 			ret = mlx5_flow_validate_item_mpls(dev, items,
6355 							   item_flags,
6356 							   last_item, error);
6357 			if (ret < 0)
6358 				return ret;
6359 			last_item = MLX5_FLOW_LAYER_MPLS;
6360 			break;
6361 
6362 		case RTE_FLOW_ITEM_TYPE_MARK:
6363 			ret = flow_dv_validate_item_mark(dev, items, attr,
6364 							 error);
6365 			if (ret < 0)
6366 				return ret;
6367 			last_item = MLX5_FLOW_ITEM_MARK;
6368 			break;
6369 		case RTE_FLOW_ITEM_TYPE_META:
6370 			ret = flow_dv_validate_item_meta(dev, items, attr,
6371 							 error);
6372 			if (ret < 0)
6373 				return ret;
6374 			last_item = MLX5_FLOW_ITEM_METADATA;
6375 			break;
6376 		case RTE_FLOW_ITEM_TYPE_ICMP:
6377 			ret = mlx5_flow_validate_item_icmp(items, item_flags,
6378 							   next_protocol,
6379 							   error);
6380 			if (ret < 0)
6381 				return ret;
6382 			last_item = MLX5_FLOW_LAYER_ICMP;
6383 			break;
6384 		case RTE_FLOW_ITEM_TYPE_ICMP6:
6385 			ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6386 							    next_protocol,
6387 							    error);
6388 			if (ret < 0)
6389 				return ret;
6390 			item_ipv6_proto = IPPROTO_ICMPV6;
6391 			last_item = MLX5_FLOW_LAYER_ICMP6;
6392 			break;
6393 		case RTE_FLOW_ITEM_TYPE_TAG:
6394 			ret = flow_dv_validate_item_tag(dev, items,
6395 							attr, error);
6396 			if (ret < 0)
6397 				return ret;
6398 			last_item = MLX5_FLOW_ITEM_TAG;
6399 			break;
6400 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6401 		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6402 			break;
6403 		case RTE_FLOW_ITEM_TYPE_GTP:
6404 			ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6405 							error);
6406 			if (ret < 0)
6407 				return ret;
6408 			gtp_item = items;
6409 			last_item = MLX5_FLOW_LAYER_GTP;
6410 			break;
6411 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6412 			ret = flow_dv_validate_item_gtp_psc(items, last_item,
6413 							    gtp_item, attr,
6414 							    error);
6415 			if (ret < 0)
6416 				return ret;
6417 			last_item = MLX5_FLOW_LAYER_GTP_PSC;
6418 			break;
6419 		case RTE_FLOW_ITEM_TYPE_ECPRI:
6420 			/* Capacity will be checked in the translate stage. */
6421 			ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6422 							    last_item,
6423 							    ether_type,
6424 							    &nic_ecpri_mask,
6425 							    error);
6426 			if (ret < 0)
6427 				return ret;
6428 			last_item = MLX5_FLOW_LAYER_ECPRI;
6429 			break;
6430 		default:
6431 			return rte_flow_error_set(error, ENOTSUP,
6432 						  RTE_FLOW_ERROR_TYPE_ITEM,
6433 						  NULL, "item not supported");
6434 		}
6435 		item_flags |= last_item;
6436 	}
6437 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6438 		int type = actions->type;
6439 
6440 		if (!mlx5_flow_os_action_supported(type))
6441 			return rte_flow_error_set(error, ENOTSUP,
6442 						  RTE_FLOW_ERROR_TYPE_ACTION,
6443 						  actions,
6444 						  "action not supported");
6445 		if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
6446 			return rte_flow_error_set(error, ENOTSUP,
6447 						  RTE_FLOW_ERROR_TYPE_ACTION,
6448 						  actions, "too many actions");
6449 		switch (type) {
6450 		case RTE_FLOW_ACTION_TYPE_VOID:
6451 			break;
6452 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
6453 			ret = flow_dv_validate_action_port_id(dev,
6454 							      action_flags,
6455 							      actions,
6456 							      attr,
6457 							      error);
6458 			if (ret)
6459 				return ret;
6460 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6461 			++actions_n;
6462 			break;
6463 		case RTE_FLOW_ACTION_TYPE_FLAG:
6464 			ret = flow_dv_validate_action_flag(dev, action_flags,
6465 							   attr, error);
6466 			if (ret < 0)
6467 				return ret;
6468 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6469 				/* Count all modify-header actions as one. */
6470 				if (!(action_flags &
6471 				      MLX5_FLOW_MODIFY_HDR_ACTIONS))
6472 					++actions_n;
6473 				action_flags |= MLX5_FLOW_ACTION_FLAG |
6474 						MLX5_FLOW_ACTION_MARK_EXT;
6475 				if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6476 					modify_after_mirror = 1;
6477 
6478 			} else {
6479 				action_flags |= MLX5_FLOW_ACTION_FLAG;
6480 				++actions_n;
6481 			}
6482 			rw_act_num += MLX5_ACT_NUM_SET_MARK;
6483 			break;
6484 		case RTE_FLOW_ACTION_TYPE_MARK:
6485 			ret = flow_dv_validate_action_mark(dev, actions,
6486 							   action_flags,
6487 							   attr, error);
6488 			if (ret < 0)
6489 				return ret;
6490 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6491 				/* Count all modify-header actions as one. */
6492 				if (!(action_flags &
6493 				      MLX5_FLOW_MODIFY_HDR_ACTIONS))
6494 					++actions_n;
6495 				action_flags |= MLX5_FLOW_ACTION_MARK |
6496 						MLX5_FLOW_ACTION_MARK_EXT;
6497 				if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6498 					modify_after_mirror = 1;
6499 			} else {
6500 				action_flags |= MLX5_FLOW_ACTION_MARK;
6501 				++actions_n;
6502 			}
6503 			rw_act_num += MLX5_ACT_NUM_SET_MARK;
6504 			break;
6505 		case RTE_FLOW_ACTION_TYPE_SET_META:
6506 			ret = flow_dv_validate_action_set_meta(dev, actions,
6507 							       action_flags,
6508 							       attr, error);
6509 			if (ret < 0)
6510 				return ret;
6511 			/* Count all modify-header actions as one action. */
6512 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6513 				++actions_n;
6514 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6515 				modify_after_mirror = 1;
6516 			action_flags |= MLX5_FLOW_ACTION_SET_META;
6517 			rw_act_num += MLX5_ACT_NUM_SET_META;
6518 			break;
6519 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
6520 			ret = flow_dv_validate_action_set_tag(dev, actions,
6521 							      action_flags,
6522 							      attr, error);
6523 			if (ret < 0)
6524 				return ret;
6525 			/* Count all modify-header actions as one action. */
6526 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6527 				++actions_n;
6528 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6529 				modify_after_mirror = 1;
6530 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6531 			rw_act_num += MLX5_ACT_NUM_SET_TAG;
6532 			break;
6533 		case RTE_FLOW_ACTION_TYPE_DROP:
6534 			ret = mlx5_flow_validate_action_drop(action_flags,
6535 							     attr, error);
6536 			if (ret < 0)
6537 				return ret;
6538 			action_flags |= MLX5_FLOW_ACTION_DROP;
6539 			++actions_n;
6540 			break;
6541 		case RTE_FLOW_ACTION_TYPE_QUEUE:
6542 			ret = mlx5_flow_validate_action_queue(actions,
6543 							      action_flags, dev,
6544 							      attr, error);
6545 			if (ret < 0)
6546 				return ret;
6547 			queue_index = ((const struct rte_flow_action_queue *)
6548 							(actions->conf))->index;
6549 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
6550 			++actions_n;
6551 			break;
6552 		case RTE_FLOW_ACTION_TYPE_RSS:
6553 			rss = actions->conf;
6554 			ret = mlx5_flow_validate_action_rss(actions,
6555 							    action_flags, dev,
6556 							    attr, item_flags,
6557 							    error);
6558 			if (ret < 0)
6559 				return ret;
6560 			if (rss && sample_rss &&
6561 			    (sample_rss->level != rss->level ||
6562 			    sample_rss->types != rss->types))
6563 				return rte_flow_error_set(error, ENOTSUP,
6564 					RTE_FLOW_ERROR_TYPE_ACTION,
6565 					NULL,
6566 					"Can't use the different RSS types "
6567 					"or level in the same flow");
6568 			if (rss != NULL && rss->queue_num)
6569 				queue_index = rss->queue[0];
6570 			action_flags |= MLX5_FLOW_ACTION_RSS;
6571 			++actions_n;
6572 			break;
6573 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6574 			ret =
6575 			mlx5_flow_validate_action_default_miss(action_flags,
6576 					attr, error);
6577 			if (ret < 0)
6578 				return ret;
6579 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
6580 			++actions_n;
6581 			break;
6582 		case RTE_FLOW_ACTION_TYPE_COUNT:
6583 			ret = flow_dv_validate_action_count(dev, actions,
6584 							    action_flags,
6585 							    error);
6586 			if (ret < 0)
6587 				return ret;
6588 			count = actions->conf;
6589 			action_flags |= MLX5_FLOW_ACTION_COUNT;
6590 			++actions_n;
6591 			break;
6592 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6593 			if (flow_dv_validate_action_pop_vlan(dev,
6594 							     action_flags,
6595 							     actions,
6596 							     item_flags, attr,
6597 							     error))
6598 				return -rte_errno;
6599 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
6600 			++actions_n;
6601 			break;
6602 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6603 			ret = flow_dv_validate_action_push_vlan(dev,
6604 								action_flags,
6605 								vlan_m,
6606 								actions, attr,
6607 								error);
6608 			if (ret < 0)
6609 				return ret;
6610 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
6611 			++actions_n;
6612 			break;
6613 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
6614 			ret = flow_dv_validate_action_set_vlan_pcp
6615 						(action_flags, actions, error);
6616 			if (ret < 0)
6617 				return ret;
6618 			/* Count PCP with push_vlan command. */
6619 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
6620 			break;
6621 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6622 			ret = flow_dv_validate_action_set_vlan_vid
6623 						(item_flags, action_flags,
6624 						 actions, error);
6625 			if (ret < 0)
6626 				return ret;
6627 			/* Count VID with push_vlan command. */
6628 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
6629 			rw_act_num += MLX5_ACT_NUM_MDF_VID;
6630 			break;
6631 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6632 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6633 			ret = flow_dv_validate_action_l2_encap(dev,
6634 							       action_flags,
6635 							       actions, attr,
6636 							       error);
6637 			if (ret < 0)
6638 				return ret;
6639 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
6640 			++actions_n;
6641 			break;
6642 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6643 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6644 			ret = flow_dv_validate_action_decap(dev, action_flags,
6645 							    actions, item_flags,
6646 							    attr, error);
6647 			if (ret < 0)
6648 				return ret;
6649 			action_flags |= MLX5_FLOW_ACTION_DECAP;
6650 			++actions_n;
6651 			break;
6652 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6653 			ret = flow_dv_validate_action_raw_encap_decap
6654 				(dev, NULL, actions->conf, attr, &action_flags,
6655 				 &actions_n, actions, item_flags, error);
6656 			if (ret < 0)
6657 				return ret;
6658 			break;
6659 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6660 			decap = actions->conf;
6661 			while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
6662 				;
6663 			if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6664 				encap = NULL;
6665 				actions--;
6666 			} else {
6667 				encap = actions->conf;
6668 			}
6669 			ret = flow_dv_validate_action_raw_encap_decap
6670 					   (dev,
6671 					    decap ? decap : &empty_decap, encap,
6672 					    attr, &action_flags, &actions_n,
6673 					    actions, item_flags, error);
6674 			if (ret < 0)
6675 				return ret;
6676 			break;
6677 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
6678 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
6679 			ret = flow_dv_validate_action_modify_mac(action_flags,
6680 								 actions,
6681 								 item_flags,
6682 								 error);
6683 			if (ret < 0)
6684 				return ret;
6685 			/* Count all modify-header actions as one action. */
6686 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6687 				++actions_n;
6688 			action_flags |= actions->type ==
6689 					RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
6690 						MLX5_FLOW_ACTION_SET_MAC_SRC :
6691 						MLX5_FLOW_ACTION_SET_MAC_DST;
6692 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6693 				modify_after_mirror = 1;
6694 			/*
6695 			 * Even if the source and destination MAC addresses have
6696 			 * overlap in the header with 4B alignment, the convert
6697 			 * function will handle them separately and 4 SW actions
6698 			 * will be created. And 2 actions will be added each
6699 			 * time no matter how many bytes of address will be set.
6700 			 */
6701 			rw_act_num += MLX5_ACT_NUM_MDF_MAC;
6702 			break;
6703 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
6704 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
6705 			ret = flow_dv_validate_action_modify_ipv4(action_flags,
6706 								  actions,
6707 								  item_flags,
6708 								  error);
6709 			if (ret < 0)
6710 				return ret;
6711 			/* Count all modify-header actions as one action. */
6712 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6713 				++actions_n;
6714 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6715 				modify_after_mirror = 1;
6716 			action_flags |= actions->type ==
6717 					RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
6718 						MLX5_FLOW_ACTION_SET_IPV4_SRC :
6719 						MLX5_FLOW_ACTION_SET_IPV4_DST;
6720 			rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
6721 			break;
6722 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
6723 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
6724 			ret = flow_dv_validate_action_modify_ipv6(action_flags,
6725 								  actions,
6726 								  item_flags,
6727 								  error);
6728 			if (ret < 0)
6729 				return ret;
6730 			if (item_ipv6_proto == IPPROTO_ICMPV6)
6731 				return rte_flow_error_set(error, ENOTSUP,
6732 					RTE_FLOW_ERROR_TYPE_ACTION,
6733 					actions,
6734 					"Can't change header "
6735 					"with ICMPv6 proto");
6736 			/* Count all modify-header actions as one action. */
6737 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6738 				++actions_n;
6739 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6740 				modify_after_mirror = 1;
6741 			action_flags |= actions->type ==
6742 					RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
6743 						MLX5_FLOW_ACTION_SET_IPV6_SRC :
6744 						MLX5_FLOW_ACTION_SET_IPV6_DST;
6745 			rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
6746 			break;
6747 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6748 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6749 			ret = flow_dv_validate_action_modify_tp(action_flags,
6750 								actions,
6751 								item_flags,
6752 								error);
6753 			if (ret < 0)
6754 				return ret;
6755 			/* Count all modify-header actions as one action. */
6756 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6757 				++actions_n;
6758 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6759 				modify_after_mirror = 1;
6760 			action_flags |= actions->type ==
6761 					RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
6762 						MLX5_FLOW_ACTION_SET_TP_SRC :
6763 						MLX5_FLOW_ACTION_SET_TP_DST;
6764 			rw_act_num += MLX5_ACT_NUM_MDF_PORT;
6765 			break;
6766 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
6767 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
6768 			ret = flow_dv_validate_action_modify_ttl(action_flags,
6769 								 actions,
6770 								 item_flags,
6771 								 error);
6772 			if (ret < 0)
6773 				return ret;
6774 			/* Count all modify-header actions as one action. */
6775 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6776 				++actions_n;
6777 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6778 				modify_after_mirror = 1;
6779 			action_flags |= actions->type ==
6780 					RTE_FLOW_ACTION_TYPE_SET_TTL ?
6781 						MLX5_FLOW_ACTION_SET_TTL :
6782 						MLX5_FLOW_ACTION_DEC_TTL;
6783 			rw_act_num += MLX5_ACT_NUM_MDF_TTL;
6784 			break;
6785 		case RTE_FLOW_ACTION_TYPE_JUMP:
6786 			ret = flow_dv_validate_action_jump(dev, tunnel, actions,
6787 							   action_flags,
6788 							   attr, external,
6789 							   error);
6790 			if (ret)
6791 				return ret;
6792 			if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
6793 			    fdb_mirror_limit)
6794 				return rte_flow_error_set(error, EINVAL,
6795 						  RTE_FLOW_ERROR_TYPE_ACTION,
6796 						  NULL,
6797 						  "sample and jump action combination is not supported");
6798 			++actions_n;
6799 			action_flags |= MLX5_FLOW_ACTION_JUMP;
6800 			break;
6801 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
6802 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
6803 			ret = flow_dv_validate_action_modify_tcp_seq
6804 								(action_flags,
6805 								 actions,
6806 								 item_flags,
6807 								 error);
6808 			if (ret < 0)
6809 				return ret;
6810 			/* Count all modify-header actions as one action. */
6811 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6812 				++actions_n;
6813 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6814 				modify_after_mirror = 1;
6815 			action_flags |= actions->type ==
6816 					RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
6817 						MLX5_FLOW_ACTION_INC_TCP_SEQ :
6818 						MLX5_FLOW_ACTION_DEC_TCP_SEQ;
6819 			rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
6820 			break;
6821 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6822 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6823 			ret = flow_dv_validate_action_modify_tcp_ack
6824 								(action_flags,
6825 								 actions,
6826 								 item_flags,
6827 								 error);
6828 			if (ret < 0)
6829 				return ret;
6830 			/* Count all modify-header actions as one action. */
6831 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6832 				++actions_n;
6833 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6834 				modify_after_mirror = 1;
6835 			action_flags |= actions->type ==
6836 					RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
6837 						MLX5_FLOW_ACTION_INC_TCP_ACK :
6838 						MLX5_FLOW_ACTION_DEC_TCP_ACK;
6839 			rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
6840 			break;
6841 		case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
6842 			break;
6843 		case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
6844 		case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
6845 			rw_act_num += MLX5_ACT_NUM_SET_TAG;
6846 			break;
6847 		case RTE_FLOW_ACTION_TYPE_METER:
6848 			ret = mlx5_flow_validate_action_meter(dev,
6849 							      action_flags,
6850 							      actions, attr,
6851 							      error);
6852 			if (ret < 0)
6853 				return ret;
6854 			action_flags |= MLX5_FLOW_ACTION_METER;
6855 			++actions_n;
6856 			/* Meter action will add one more TAG action. */
6857 			rw_act_num += MLX5_ACT_NUM_SET_TAG;
6858 			break;
6859 		case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
6860 			if (!attr->transfer && !attr->group)
6861 				return rte_flow_error_set(error, ENOTSUP,
6862 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6863 									   NULL,
6864 			  "Shared ASO age action is not supported for group 0");
6865 			action_flags |= MLX5_FLOW_ACTION_AGE;
6866 			++actions_n;
6867 			break;
6868 		case RTE_FLOW_ACTION_TYPE_AGE:
6869 			ret = flow_dv_validate_action_age(action_flags,
6870 							  actions, dev,
6871 							  error);
6872 			if (ret < 0)
6873 				return ret;
6874 			/*
6875 			 * Validate the regular AGE action (using counter)
6876 			 * mutual exclusion with share counter actions.
6877 			 */
6878 			if (!priv->sh->flow_hit_aso_en) {
6879 				if (count && count->shared)
6880 					return rte_flow_error_set
6881 						(error, EINVAL,
6882 						RTE_FLOW_ERROR_TYPE_ACTION,
6883 						NULL,
6884 						"old age and shared count combination is not supported");
6885 				if (sample_count)
6886 					return rte_flow_error_set
6887 						(error, EINVAL,
6888 						RTE_FLOW_ERROR_TYPE_ACTION,
6889 						NULL,
6890 						"old age action and count must be in the same sub flow");
6891 			}
6892 			action_flags |= MLX5_FLOW_ACTION_AGE;
6893 			++actions_n;
6894 			break;
6895 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
6896 			ret = flow_dv_validate_action_modify_ipv4_dscp
6897 							 (action_flags,
6898 							  actions,
6899 							  item_flags,
6900 							  error);
6901 			if (ret < 0)
6902 				return ret;
6903 			/* Count all modify-header actions as one action. */
6904 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6905 				++actions_n;
6906 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6907 				modify_after_mirror = 1;
6908 			action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
6909 			rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6910 			break;
6911 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
6912 			ret = flow_dv_validate_action_modify_ipv6_dscp
6913 								(action_flags,
6914 								 actions,
6915 								 item_flags,
6916 								 error);
6917 			if (ret < 0)
6918 				return ret;
6919 			/* Count all modify-header actions as one action. */
6920 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6921 				++actions_n;
6922 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6923 				modify_after_mirror = 1;
6924 			action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6925 			rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6926 			break;
6927 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
6928 			ret = flow_dv_validate_action_sample(&action_flags,
6929 							     actions, dev,
6930 							     attr, item_flags,
6931 							     rss, &sample_rss,
6932 							     &sample_count,
6933 							     &fdb_mirror_limit,
6934 							     error);
6935 			if (ret < 0)
6936 				return ret;
6937 			action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6938 			++actions_n;
6939 			break;
6940 		case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6941 			if (actions[0].type != (typeof(actions[0].type))
6942 				MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6943 				return rte_flow_error_set
6944 						(error, EINVAL,
6945 						RTE_FLOW_ERROR_TYPE_ACTION,
6946 						NULL, "MLX5 private action "
6947 						"must be the first");
6948 
6949 			action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6950 			break;
6951 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6952 			ret = flow_dv_validate_action_modify_field(dev,
6953 								   action_flags,
6954 								   actions,
6955 								   attr,
6956 								   error);
6957 			if (ret < 0)
6958 				return ret;
6959 			/* Count all modify-header actions as one action. */
6960 			if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
6961 				++actions_n;
6962 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
6963 			rw_act_num += ret;
6964 			break;
6965 		default:
6966 			return rte_flow_error_set(error, ENOTSUP,
6967 						  RTE_FLOW_ERROR_TYPE_ACTION,
6968 						  actions,
6969 						  "action not supported");
6970 		}
6971 	}
6972 	/*
6973 	 * Validate actions in flow rules
6974 	 * - Explicit decap action is prohibited by the tunnel offload API.
6975 	 * - Drop action in tunnel steer rule is prohibited by the API.
6976 	 * - Application cannot use MARK action because it's value can mask
6977 	 *   tunnel default miss nitification.
6978 	 * - JUMP in tunnel match rule has no support in current PMD
6979 	 *   implementation.
6980 	 * - TAG & META are reserved for future uses.
6981 	 */
6982 	if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6983 		uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6984 					    MLX5_FLOW_ACTION_MARK     |
6985 					    MLX5_FLOW_ACTION_SET_TAG  |
6986 					    MLX5_FLOW_ACTION_SET_META |
6987 					    MLX5_FLOW_ACTION_DROP;
6988 
6989 		if (action_flags & bad_actions_mask)
6990 			return rte_flow_error_set
6991 					(error, EINVAL,
6992 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6993 					"Invalid RTE action in tunnel "
6994 					"set decap rule");
6995 		if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6996 			return rte_flow_error_set
6997 					(error, EINVAL,
6998 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6999 					"tunnel set decap rule must terminate "
7000 					"with JUMP");
7001 		if (!attr->ingress)
7002 			return rte_flow_error_set
7003 					(error, EINVAL,
7004 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7005 					"tunnel flows for ingress traffic only");
7006 	}
7007 	if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7008 		uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7009 					    MLX5_FLOW_ACTION_MARK    |
7010 					    MLX5_FLOW_ACTION_SET_TAG |
7011 					    MLX5_FLOW_ACTION_SET_META;
7012 
7013 		if (action_flags & bad_actions_mask)
7014 			return rte_flow_error_set
7015 					(error, EINVAL,
7016 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7017 					"Invalid RTE action in tunnel "
7018 					"set match rule");
7019 	}
7020 	/*
7021 	 * Validate the drop action mutual exclusion with other actions.
7022 	 * Drop action is mutually-exclusive with any other action, except for
7023 	 * Count action.
7024 	 * Drop action compatibility with tunnel offload was already validated.
7025 	 */
7026 	if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7027 			    MLX5_FLOW_ACTION_TUNNEL_MATCH));
7028 	else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7029 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7030 		return rte_flow_error_set(error, EINVAL,
7031 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7032 					  "Drop action is mutually-exclusive "
7033 					  "with any other action, except for "
7034 					  "Count action");
7035 	/* Eswitch has few restrictions on using items and actions */
7036 	if (attr->transfer) {
7037 		if (!mlx5_flow_ext_mreg_supported(dev) &&
7038 		    action_flags & MLX5_FLOW_ACTION_FLAG)
7039 			return rte_flow_error_set(error, ENOTSUP,
7040 						  RTE_FLOW_ERROR_TYPE_ACTION,
7041 						  NULL,
7042 						  "unsupported action FLAG");
7043 		if (!mlx5_flow_ext_mreg_supported(dev) &&
7044 		    action_flags & MLX5_FLOW_ACTION_MARK)
7045 			return rte_flow_error_set(error, ENOTSUP,
7046 						  RTE_FLOW_ERROR_TYPE_ACTION,
7047 						  NULL,
7048 						  "unsupported action MARK");
7049 		if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7050 			return rte_flow_error_set(error, ENOTSUP,
7051 						  RTE_FLOW_ERROR_TYPE_ACTION,
7052 						  NULL,
7053 						  "unsupported action QUEUE");
7054 		if (action_flags & MLX5_FLOW_ACTION_RSS)
7055 			return rte_flow_error_set(error, ENOTSUP,
7056 						  RTE_FLOW_ERROR_TYPE_ACTION,
7057 						  NULL,
7058 						  "unsupported action RSS");
7059 		if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7060 			return rte_flow_error_set(error, EINVAL,
7061 						  RTE_FLOW_ERROR_TYPE_ACTION,
7062 						  actions,
7063 						  "no fate action is found");
7064 	} else {
7065 		if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7066 			return rte_flow_error_set(error, EINVAL,
7067 						  RTE_FLOW_ERROR_TYPE_ACTION,
7068 						  actions,
7069 						  "no fate action is found");
7070 	}
7071 	/*
7072 	 * Continue validation for Xcap and VLAN actions.
7073 	 * If hairpin is working in explicit TX rule mode, there is no actions
7074 	 * splitting and the validation of hairpin ingress flow should be the
7075 	 * same as other standard flows.
7076 	 */
7077 	if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7078 			     MLX5_FLOW_VLAN_ACTIONS)) &&
7079 	    (queue_index == 0xFFFF ||
7080 	     mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7081 	     ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7082 	     conf->tx_explicit != 0))) {
7083 		if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7084 		    MLX5_FLOW_XCAP_ACTIONS)
7085 			return rte_flow_error_set(error, ENOTSUP,
7086 						  RTE_FLOW_ERROR_TYPE_ACTION,
7087 						  NULL, "encap and decap "
7088 						  "combination aren't supported");
7089 		if (!attr->transfer && attr->ingress) {
7090 			if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7091 				return rte_flow_error_set
7092 						(error, ENOTSUP,
7093 						 RTE_FLOW_ERROR_TYPE_ACTION,
7094 						 NULL, "encap is not supported"
7095 						 " for ingress traffic");
7096 			else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7097 				return rte_flow_error_set
7098 						(error, ENOTSUP,
7099 						 RTE_FLOW_ERROR_TYPE_ACTION,
7100 						 NULL, "push VLAN action not "
7101 						 "supported for ingress");
7102 			else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7103 					MLX5_FLOW_VLAN_ACTIONS)
7104 				return rte_flow_error_set
7105 						(error, ENOTSUP,
7106 						 RTE_FLOW_ERROR_TYPE_ACTION,
7107 						 NULL, "no support for "
7108 						 "multiple VLAN actions");
7109 		}
7110 	}
7111 	/*
7112 	 * Hairpin flow will add one more TAG action in TX implicit mode.
7113 	 * In TX explicit mode, there will be no hairpin flow ID.
7114 	 */
7115 	if (hairpin > 0)
7116 		rw_act_num += MLX5_ACT_NUM_SET_TAG;
7117 	/* extra metadata enabled: one more TAG action will be add. */
7118 	if (dev_conf->dv_flow_en &&
7119 	    dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7120 	    mlx5_flow_ext_mreg_supported(dev))
7121 		rw_act_num += MLX5_ACT_NUM_SET_TAG;
7122 	if (rw_act_num >
7123 			flow_dv_modify_hdr_action_max(dev, is_root)) {
7124 		return rte_flow_error_set(error, ENOTSUP,
7125 					  RTE_FLOW_ERROR_TYPE_ACTION,
7126 					  NULL, "too many header modify"
7127 					  " actions to support");
7128 	}
7129 	/* Eswitch egress mirror and modify flow has limitation on CX5 */
7130 	if (fdb_mirror_limit && modify_after_mirror)
7131 		return rte_flow_error_set(error, EINVAL,
7132 				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7133 				"sample before modify action is not supported");
7134 	return 0;
7135 }
7136 
7137 /**
7138  * Internal preparation function. Allocates the DV flow size,
7139  * this size is constant.
7140  *
7141  * @param[in] dev
7142  *   Pointer to the rte_eth_dev structure.
7143  * @param[in] attr
7144  *   Pointer to the flow attributes.
7145  * @param[in] items
7146  *   Pointer to the list of items.
7147  * @param[in] actions
7148  *   Pointer to the list of actions.
7149  * @param[out] error
7150  *   Pointer to the error structure.
7151  *
7152  * @return
7153  *   Pointer to mlx5_flow object on success,
7154  *   otherwise NULL and rte_errno is set.
7155  */
7156 static struct mlx5_flow *
7157 flow_dv_prepare(struct rte_eth_dev *dev,
7158 		const struct rte_flow_attr *attr __rte_unused,
7159 		const struct rte_flow_item items[] __rte_unused,
7160 		const struct rte_flow_action actions[] __rte_unused,
7161 		struct rte_flow_error *error)
7162 {
7163 	uint32_t handle_idx = 0;
7164 	struct mlx5_flow *dev_flow;
7165 	struct mlx5_flow_handle *dev_handle;
7166 	struct mlx5_priv *priv = dev->data->dev_private;
7167 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7168 
7169 	MLX5_ASSERT(wks);
7170 	/* In case of corrupting the memory. */
7171 	if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7172 		rte_flow_error_set(error, ENOSPC,
7173 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7174 				   "not free temporary device flow");
7175 		return NULL;
7176 	}
7177 	dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7178 				   &handle_idx);
7179 	if (!dev_handle) {
7180 		rte_flow_error_set(error, ENOMEM,
7181 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7182 				   "not enough memory to create flow handle");
7183 		return NULL;
7184 	}
7185 	MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7186 	dev_flow = &wks->flows[wks->flow_idx++];
7187 	memset(dev_flow, 0, sizeof(*dev_flow));
7188 	dev_flow->handle = dev_handle;
7189 	dev_flow->handle_idx = handle_idx;
7190 	/*
7191 	 * In some old rdma-core releases, before continuing, a check of the
7192 	 * length of matching parameter will be done at first. It needs to use
7193 	 * the length without misc4 param. If the flow has misc4 support, then
7194 	 * the length needs to be adjusted accordingly. Each param member is
7195 	 * aligned with a 64B boundary naturally.
7196 	 */
7197 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7198 				  MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7199 	dev_flow->ingress = attr->ingress;
7200 	dev_flow->dv.transfer = attr->transfer;
7201 	return dev_flow;
7202 }
7203 
7204 #ifdef RTE_LIBRTE_MLX5_DEBUG
7205 /**
7206  * Sanity check for match mask and value. Similar to check_valid_spec() in
7207  * kernel driver. If unmasked bit is present in value, it returns failure.
7208  *
7209  * @param match_mask
7210  *   pointer to match mask buffer.
7211  * @param match_value
7212  *   pointer to match value buffer.
7213  *
7214  * @return
7215  *   0 if valid, -EINVAL otherwise.
7216  */
7217 static int
7218 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7219 {
7220 	uint8_t *m = match_mask;
7221 	uint8_t *v = match_value;
7222 	unsigned int i;
7223 
7224 	for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7225 		if (v[i] & ~m[i]) {
7226 			DRV_LOG(ERR,
7227 				"match_value differs from match_criteria"
7228 				" %p[%u] != %p[%u]",
7229 				match_value, i, match_mask, i);
7230 			return -EINVAL;
7231 		}
7232 	}
7233 	return 0;
7234 }
7235 #endif
7236 
7237 /**
7238  * Add match of ip_version.
7239  *
7240  * @param[in] group
7241  *   Flow group.
7242  * @param[in] headers_v
7243  *   Values header pointer.
7244  * @param[in] headers_m
7245  *   Masks header pointer.
7246  * @param[in] ip_version
7247  *   The IP version to set.
7248  */
7249 static inline void
7250 flow_dv_set_match_ip_version(uint32_t group,
7251 			     void *headers_v,
7252 			     void *headers_m,
7253 			     uint8_t ip_version)
7254 {
7255 	if (group == 0)
7256 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7257 	else
7258 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7259 			 ip_version);
7260 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7261 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7262 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7263 }
7264 
7265 /**
7266  * Add Ethernet item to matcher and to the value.
7267  *
7268  * @param[in, out] matcher
7269  *   Flow matcher.
7270  * @param[in, out] key
7271  *   Flow matcher value.
7272  * @param[in] item
7273  *   Flow pattern to translate.
7274  * @param[in] inner
7275  *   Item is inner pattern.
7276  */
7277 static void
7278 flow_dv_translate_item_eth(void *matcher, void *key,
7279 			   const struct rte_flow_item *item, int inner,
7280 			   uint32_t group)
7281 {
7282 	const struct rte_flow_item_eth *eth_m = item->mask;
7283 	const struct rte_flow_item_eth *eth_v = item->spec;
7284 	const struct rte_flow_item_eth nic_mask = {
7285 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7286 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7287 		.type = RTE_BE16(0xffff),
7288 		.has_vlan = 0,
7289 	};
7290 	void *hdrs_m;
7291 	void *hdrs_v;
7292 	char *l24_v;
7293 	unsigned int i;
7294 
7295 	if (!eth_v)
7296 		return;
7297 	if (!eth_m)
7298 		eth_m = &nic_mask;
7299 	if (inner) {
7300 		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7301 					 inner_headers);
7302 		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7303 	} else {
7304 		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7305 					 outer_headers);
7306 		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7307 	}
7308 	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7309 	       &eth_m->dst, sizeof(eth_m->dst));
7310 	/* The value must be in the range of the mask. */
7311 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7312 	for (i = 0; i < sizeof(eth_m->dst); ++i)
7313 		l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7314 	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7315 	       &eth_m->src, sizeof(eth_m->src));
7316 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7317 	/* The value must be in the range of the mask. */
7318 	for (i = 0; i < sizeof(eth_m->dst); ++i)
7319 		l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7320 	/*
7321 	 * HW supports match on one Ethertype, the Ethertype following the last
7322 	 * VLAN tag of the packet (see PRM).
7323 	 * Set match on ethertype only if ETH header is not followed by VLAN.
7324 	 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7325 	 * ethertype, and use ip_version field instead.
7326 	 * eCPRI over Ether layer will use type value 0xAEFE.
7327 	 */
7328 	if (eth_m->type == 0xFFFF) {
7329 		/* Set cvlan_tag mask for any single\multi\un-tagged case. */
7330 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7331 		switch (eth_v->type) {
7332 		case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7333 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7334 			return;
7335 		case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7336 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7337 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7338 			return;
7339 		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7340 			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7341 			return;
7342 		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7343 			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7344 			return;
7345 		default:
7346 			break;
7347 		}
7348 	}
7349 	if (eth_m->has_vlan) {
7350 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7351 		if (eth_v->has_vlan) {
7352 			/*
7353 			 * Here, when also has_more_vlan field in VLAN item is
7354 			 * not set, only single-tagged packets will be matched.
7355 			 */
7356 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7357 			return;
7358 		}
7359 	}
7360 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7361 		 rte_be_to_cpu_16(eth_m->type));
7362 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
7363 	*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
7364 }
7365 
7366 /**
7367  * Add VLAN item to matcher and to the value.
7368  *
7369  * @param[in, out] dev_flow
7370  *   Flow descriptor.
7371  * @param[in, out] matcher
7372  *   Flow matcher.
7373  * @param[in, out] key
7374  *   Flow matcher value.
7375  * @param[in] item
7376  *   Flow pattern to translate.
7377  * @param[in] inner
7378  *   Item is inner pattern.
7379  */
7380 static void
7381 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
7382 			    void *matcher, void *key,
7383 			    const struct rte_flow_item *item,
7384 			    int inner, uint32_t group)
7385 {
7386 	const struct rte_flow_item_vlan *vlan_m = item->mask;
7387 	const struct rte_flow_item_vlan *vlan_v = item->spec;
7388 	void *hdrs_m;
7389 	void *hdrs_v;
7390 	uint16_t tci_m;
7391 	uint16_t tci_v;
7392 
7393 	if (inner) {
7394 		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7395 					 inner_headers);
7396 		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7397 	} else {
7398 		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7399 					 outer_headers);
7400 		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7401 		/*
7402 		 * This is workaround, masks are not supported,
7403 		 * and pre-validated.
7404 		 */
7405 		if (vlan_v)
7406 			dev_flow->handle->vf_vlan.tag =
7407 					rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
7408 	}
7409 	/*
7410 	 * When VLAN item exists in flow, mark packet as tagged,
7411 	 * even if TCI is not specified.
7412 	 */
7413 	if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
7414 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7415 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7416 	}
7417 	if (!vlan_v)
7418 		return;
7419 	if (!vlan_m)
7420 		vlan_m = &rte_flow_item_vlan_mask;
7421 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
7422 	tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
7423 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
7424 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
7425 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
7426 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
7427 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
7428 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
7429 	/*
7430 	 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7431 	 * ethertype, and use ip_version field instead.
7432 	 */
7433 	if (vlan_m->inner_type == 0xFFFF) {
7434 		switch (vlan_v->inner_type) {
7435 		case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7436 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7437 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7438 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7439 			return;
7440 		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7441 			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7442 			return;
7443 		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7444 			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7445 			return;
7446 		default:
7447 			break;
7448 		}
7449 	}
7450 	if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
7451 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7452 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7453 		/* Only one vlan_tag bit can be set. */
7454 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7455 		return;
7456 	}
7457 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7458 		 rte_be_to_cpu_16(vlan_m->inner_type));
7459 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
7460 		 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
7461 }
7462 
7463 /**
7464  * Add IPV4 item to matcher and to the value.
7465  *
7466  * @param[in, out] matcher
7467  *   Flow matcher.
7468  * @param[in, out] key
7469  *   Flow matcher value.
7470  * @param[in] item
7471  *   Flow pattern to translate.
7472  * @param[in] inner
7473  *   Item is inner pattern.
7474  * @param[in] group
7475  *   The group to insert the rule.
7476  */
7477 static void
7478 flow_dv_translate_item_ipv4(void *matcher, void *key,
7479 			    const struct rte_flow_item *item,
7480 			    int inner, uint32_t group)
7481 {
7482 	const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
7483 	const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
7484 	const struct rte_flow_item_ipv4 nic_mask = {
7485 		.hdr = {
7486 			.src_addr = RTE_BE32(0xffffffff),
7487 			.dst_addr = RTE_BE32(0xffffffff),
7488 			.type_of_service = 0xff,
7489 			.next_proto_id = 0xff,
7490 			.time_to_live = 0xff,
7491 		},
7492 	};
7493 	void *headers_m;
7494 	void *headers_v;
7495 	char *l24_m;
7496 	char *l24_v;
7497 	uint8_t tos;
7498 
7499 	if (inner) {
7500 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7501 					 inner_headers);
7502 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7503 	} else {
7504 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7505 					 outer_headers);
7506 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7507 	}
7508 	flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
7509 	if (!ipv4_v)
7510 		return;
7511 	if (!ipv4_m)
7512 		ipv4_m = &nic_mask;
7513 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7514 			     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7515 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7516 			     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7517 	*(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
7518 	*(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
7519 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7520 			  src_ipv4_src_ipv6.ipv4_layout.ipv4);
7521 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7522 			  src_ipv4_src_ipv6.ipv4_layout.ipv4);
7523 	*(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
7524 	*(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
7525 	tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
7526 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
7527 		 ipv4_m->hdr.type_of_service);
7528 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
7529 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
7530 		 ipv4_m->hdr.type_of_service >> 2);
7531 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
7532 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7533 		 ipv4_m->hdr.next_proto_id);
7534 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7535 		 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
7536 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7537 		 ipv4_m->hdr.time_to_live);
7538 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7539 		 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
7540 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7541 		 !!(ipv4_m->hdr.fragment_offset));
7542 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7543 		 !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
7544 }
7545 
7546 /**
7547  * Add IPV6 item to matcher and to the value.
7548  *
7549  * @param[in, out] matcher
7550  *   Flow matcher.
7551  * @param[in, out] key
7552  *   Flow matcher value.
7553  * @param[in] item
7554  *   Flow pattern to translate.
7555  * @param[in] inner
7556  *   Item is inner pattern.
7557  * @param[in] group
7558  *   The group to insert the rule.
7559  */
7560 static void
7561 flow_dv_translate_item_ipv6(void *matcher, void *key,
7562 			    const struct rte_flow_item *item,
7563 			    int inner, uint32_t group)
7564 {
7565 	const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
7566 	const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
7567 	const struct rte_flow_item_ipv6 nic_mask = {
7568 		.hdr = {
7569 			.src_addr =
7570 				"\xff\xff\xff\xff\xff\xff\xff\xff"
7571 				"\xff\xff\xff\xff\xff\xff\xff\xff",
7572 			.dst_addr =
7573 				"\xff\xff\xff\xff\xff\xff\xff\xff"
7574 				"\xff\xff\xff\xff\xff\xff\xff\xff",
7575 			.vtc_flow = RTE_BE32(0xffffffff),
7576 			.proto = 0xff,
7577 			.hop_limits = 0xff,
7578 		},
7579 	};
7580 	void *headers_m;
7581 	void *headers_v;
7582 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7583 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7584 	char *l24_m;
7585 	char *l24_v;
7586 	uint32_t vtc_m;
7587 	uint32_t vtc_v;
7588 	int i;
7589 	int size;
7590 
7591 	if (inner) {
7592 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7593 					 inner_headers);
7594 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7595 	} else {
7596 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7597 					 outer_headers);
7598 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7599 	}
7600 	flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
7601 	if (!ipv6_v)
7602 		return;
7603 	if (!ipv6_m)
7604 		ipv6_m = &nic_mask;
7605 	size = sizeof(ipv6_m->hdr.dst_addr);
7606 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7607 			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7608 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7609 			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7610 	memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
7611 	for (i = 0; i < size; ++i)
7612 		l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
7613 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7614 			     src_ipv4_src_ipv6.ipv6_layout.ipv6);
7615 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7616 			     src_ipv4_src_ipv6.ipv6_layout.ipv6);
7617 	memcpy(l24_m, ipv6_m->hdr.src_addr, size);
7618 	for (i = 0; i < size; ++i)
7619 		l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
7620 	/* TOS. */
7621 	vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
7622 	vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
7623 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
7624 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
7625 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
7626 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
7627 	/* Label. */
7628 	if (inner) {
7629 		MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
7630 			 vtc_m);
7631 		MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
7632 			 vtc_v);
7633 	} else {
7634 		MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
7635 			 vtc_m);
7636 		MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
7637 			 vtc_v);
7638 	}
7639 	/* Protocol. */
7640 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7641 		 ipv6_m->hdr.proto);
7642 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7643 		 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
7644 	/* Hop limit. */
7645 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7646 		 ipv6_m->hdr.hop_limits);
7647 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7648 		 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
7649 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7650 		 !!(ipv6_m->has_frag_ext));
7651 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7652 		 !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
7653 }
7654 
7655 /**
7656  * Add IPV6 fragment extension item to matcher and to the value.
7657  *
7658  * @param[in, out] matcher
7659  *   Flow matcher.
7660  * @param[in, out] key
7661  *   Flow matcher value.
7662  * @param[in] item
7663  *   Flow pattern to translate.
7664  * @param[in] inner
7665  *   Item is inner pattern.
7666  */
7667 static void
7668 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
7669 				     const struct rte_flow_item *item,
7670 				     int inner)
7671 {
7672 	const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
7673 	const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
7674 	const struct rte_flow_item_ipv6_frag_ext nic_mask = {
7675 		.hdr = {
7676 			.next_header = 0xff,
7677 			.frag_data = RTE_BE16(0xffff),
7678 		},
7679 	};
7680 	void *headers_m;
7681 	void *headers_v;
7682 
7683 	if (inner) {
7684 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7685 					 inner_headers);
7686 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7687 	} else {
7688 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7689 					 outer_headers);
7690 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7691 	}
7692 	/* IPv6 fragment extension item exists, so packet is IP fragment. */
7693 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7694 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
7695 	if (!ipv6_frag_ext_v)
7696 		return;
7697 	if (!ipv6_frag_ext_m)
7698 		ipv6_frag_ext_m = &nic_mask;
7699 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7700 		 ipv6_frag_ext_m->hdr.next_header);
7701 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7702 		 ipv6_frag_ext_v->hdr.next_header &
7703 		 ipv6_frag_ext_m->hdr.next_header);
7704 }
7705 
7706 /**
7707  * Add TCP item to matcher and to the value.
7708  *
7709  * @param[in, out] matcher
7710  *   Flow matcher.
7711  * @param[in, out] key
7712  *   Flow matcher value.
7713  * @param[in] item
7714  *   Flow pattern to translate.
7715  * @param[in] inner
7716  *   Item is inner pattern.
7717  */
7718 static void
7719 flow_dv_translate_item_tcp(void *matcher, void *key,
7720 			   const struct rte_flow_item *item,
7721 			   int inner)
7722 {
7723 	const struct rte_flow_item_tcp *tcp_m = item->mask;
7724 	const struct rte_flow_item_tcp *tcp_v = item->spec;
7725 	void *headers_m;
7726 	void *headers_v;
7727 
7728 	if (inner) {
7729 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7730 					 inner_headers);
7731 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7732 	} else {
7733 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7734 					 outer_headers);
7735 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7736 	}
7737 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7738 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
7739 	if (!tcp_v)
7740 		return;
7741 	if (!tcp_m)
7742 		tcp_m = &rte_flow_item_tcp_mask;
7743 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
7744 		 rte_be_to_cpu_16(tcp_m->hdr.src_port));
7745 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
7746 		 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
7747 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
7748 		 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
7749 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
7750 		 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
7751 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
7752 		 tcp_m->hdr.tcp_flags);
7753 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
7754 		 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
7755 }
7756 
7757 /**
7758  * Add UDP item to matcher and to the value.
7759  *
7760  * @param[in, out] matcher
7761  *   Flow matcher.
7762  * @param[in, out] key
7763  *   Flow matcher value.
7764  * @param[in] item
7765  *   Flow pattern to translate.
7766  * @param[in] inner
7767  *   Item is inner pattern.
7768  */
7769 static void
7770 flow_dv_translate_item_udp(void *matcher, void *key,
7771 			   const struct rte_flow_item *item,
7772 			   int inner)
7773 {
7774 	const struct rte_flow_item_udp *udp_m = item->mask;
7775 	const struct rte_flow_item_udp *udp_v = item->spec;
7776 	void *headers_m;
7777 	void *headers_v;
7778 
7779 	if (inner) {
7780 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7781 					 inner_headers);
7782 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7783 	} else {
7784 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7785 					 outer_headers);
7786 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7787 	}
7788 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7789 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
7790 	if (!udp_v)
7791 		return;
7792 	if (!udp_m)
7793 		udp_m = &rte_flow_item_udp_mask;
7794 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
7795 		 rte_be_to_cpu_16(udp_m->hdr.src_port));
7796 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
7797 		 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
7798 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
7799 		 rte_be_to_cpu_16(udp_m->hdr.dst_port));
7800 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7801 		 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
7802 }
7803 
7804 /**
7805  * Add GRE optional Key item to matcher and to the value.
7806  *
7807  * @param[in, out] matcher
7808  *   Flow matcher.
7809  * @param[in, out] key
7810  *   Flow matcher value.
7811  * @param[in] item
7812  *   Flow pattern to translate.
7813  * @param[in] inner
7814  *   Item is inner pattern.
7815  */
7816 static void
7817 flow_dv_translate_item_gre_key(void *matcher, void *key,
7818 				   const struct rte_flow_item *item)
7819 {
7820 	const rte_be32_t *key_m = item->mask;
7821 	const rte_be32_t *key_v = item->spec;
7822 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7823 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7824 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7825 
7826 	/* GRE K bit must be on and should already be validated */
7827 	MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
7828 	MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
7829 	if (!key_v)
7830 		return;
7831 	if (!key_m)
7832 		key_m = &gre_key_default_mask;
7833 	MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
7834 		 rte_be_to_cpu_32(*key_m) >> 8);
7835 	MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
7836 		 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
7837 	MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
7838 		 rte_be_to_cpu_32(*key_m) & 0xFF);
7839 	MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
7840 		 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
7841 }
7842 
7843 /**
7844  * Add GRE item to matcher and to the value.
7845  *
7846  * @param[in, out] matcher
7847  *   Flow matcher.
7848  * @param[in, out] key
7849  *   Flow matcher value.
7850  * @param[in] item
7851  *   Flow pattern to translate.
7852  * @param[in] inner
7853  *   Item is inner pattern.
7854  */
7855 static void
7856 flow_dv_translate_item_gre(void *matcher, void *key,
7857 			   const struct rte_flow_item *item,
7858 			   int inner)
7859 {
7860 	const struct rte_flow_item_gre *gre_m = item->mask;
7861 	const struct rte_flow_item_gre *gre_v = item->spec;
7862 	void *headers_m;
7863 	void *headers_v;
7864 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7865 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7866 	struct {
7867 		union {
7868 			__extension__
7869 			struct {
7870 				uint16_t version:3;
7871 				uint16_t rsvd0:9;
7872 				uint16_t s_present:1;
7873 				uint16_t k_present:1;
7874 				uint16_t rsvd_bit1:1;
7875 				uint16_t c_present:1;
7876 			};
7877 			uint16_t value;
7878 		};
7879 	} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
7880 
7881 	if (inner) {
7882 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7883 					 inner_headers);
7884 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7885 	} else {
7886 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7887 					 outer_headers);
7888 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7889 	}
7890 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7891 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
7892 	if (!gre_v)
7893 		return;
7894 	if (!gre_m)
7895 		gre_m = &rte_flow_item_gre_mask;
7896 	MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
7897 		 rte_be_to_cpu_16(gre_m->protocol));
7898 	MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7899 		 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
7900 	gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
7901 	gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
7902 	MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
7903 		 gre_crks_rsvd0_ver_m.c_present);
7904 	MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
7905 		 gre_crks_rsvd0_ver_v.c_present &
7906 		 gre_crks_rsvd0_ver_m.c_present);
7907 	MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
7908 		 gre_crks_rsvd0_ver_m.k_present);
7909 	MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
7910 		 gre_crks_rsvd0_ver_v.k_present &
7911 		 gre_crks_rsvd0_ver_m.k_present);
7912 	MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
7913 		 gre_crks_rsvd0_ver_m.s_present);
7914 	MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
7915 		 gre_crks_rsvd0_ver_v.s_present &
7916 		 gre_crks_rsvd0_ver_m.s_present);
7917 }
7918 
7919 /**
7920  * Add NVGRE item to matcher and to the value.
7921  *
7922  * @param[in, out] matcher
7923  *   Flow matcher.
7924  * @param[in, out] key
7925  *   Flow matcher value.
7926  * @param[in] item
7927  *   Flow pattern to translate.
7928  * @param[in] inner
7929  *   Item is inner pattern.
7930  */
7931 static void
7932 flow_dv_translate_item_nvgre(void *matcher, void *key,
7933 			     const struct rte_flow_item *item,
7934 			     int inner)
7935 {
7936 	const struct rte_flow_item_nvgre *nvgre_m = item->mask;
7937 	const struct rte_flow_item_nvgre *nvgre_v = item->spec;
7938 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7939 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7940 	const char *tni_flow_id_m;
7941 	const char *tni_flow_id_v;
7942 	char *gre_key_m;
7943 	char *gre_key_v;
7944 	int size;
7945 	int i;
7946 
7947 	/* For NVGRE, GRE header fields must be set with defined values. */
7948 	const struct rte_flow_item_gre gre_spec = {
7949 		.c_rsvd0_ver = RTE_BE16(0x2000),
7950 		.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7951 	};
7952 	const struct rte_flow_item_gre gre_mask = {
7953 		.c_rsvd0_ver = RTE_BE16(0xB000),
7954 		.protocol = RTE_BE16(UINT16_MAX),
7955 	};
7956 	const struct rte_flow_item gre_item = {
7957 		.spec = &gre_spec,
7958 		.mask = &gre_mask,
7959 		.last = NULL,
7960 	};
7961 	flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7962 	if (!nvgre_v)
7963 		return;
7964 	if (!nvgre_m)
7965 		nvgre_m = &rte_flow_item_nvgre_mask;
7966 	tni_flow_id_m = (const char *)nvgre_m->tni;
7967 	tni_flow_id_v = (const char *)nvgre_v->tni;
7968 	size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7969 	gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7970 	gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7971 	memcpy(gre_key_m, tni_flow_id_m, size);
7972 	for (i = 0; i < size; ++i)
7973 		gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7974 }
7975 
7976 /**
7977  * Add VXLAN item to matcher and to the value.
7978  *
7979  * @param[in, out] matcher
7980  *   Flow matcher.
7981  * @param[in, out] key
7982  *   Flow matcher value.
7983  * @param[in] item
7984  *   Flow pattern to translate.
7985  * @param[in] inner
7986  *   Item is inner pattern.
7987  */
7988 static void
7989 flow_dv_translate_item_vxlan(void *matcher, void *key,
7990 			     const struct rte_flow_item *item,
7991 			     int inner)
7992 {
7993 	const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7994 	const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7995 	void *headers_m;
7996 	void *headers_v;
7997 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7998 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7999 	char *vni_m;
8000 	char *vni_v;
8001 	uint16_t dport;
8002 	int size;
8003 	int i;
8004 
8005 	if (inner) {
8006 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8007 					 inner_headers);
8008 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8009 	} else {
8010 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8011 					 outer_headers);
8012 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8013 	}
8014 	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8015 		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8016 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8017 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8018 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8019 	}
8020 	if (!vxlan_v)
8021 		return;
8022 	if (!vxlan_m)
8023 		vxlan_m = &rte_flow_item_vxlan_mask;
8024 	size = sizeof(vxlan_m->vni);
8025 	vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8026 	vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8027 	memcpy(vni_m, vxlan_m->vni, size);
8028 	for (i = 0; i < size; ++i)
8029 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8030 }
8031 
8032 /**
8033  * Add VXLAN-GPE item to matcher and to the value.
8034  *
8035  * @param[in, out] matcher
8036  *   Flow matcher.
8037  * @param[in, out] key
8038  *   Flow matcher value.
8039  * @param[in] item
8040  *   Flow pattern to translate.
8041  * @param[in] inner
8042  *   Item is inner pattern.
8043  */
8044 
8045 static void
8046 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8047 				 const struct rte_flow_item *item, int inner)
8048 {
8049 	const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8050 	const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8051 	void *headers_m;
8052 	void *headers_v;
8053 	void *misc_m =
8054 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8055 	void *misc_v =
8056 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8057 	char *vni_m;
8058 	char *vni_v;
8059 	uint16_t dport;
8060 	int size;
8061 	int i;
8062 	uint8_t flags_m = 0xff;
8063 	uint8_t flags_v = 0xc;
8064 
8065 	if (inner) {
8066 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8067 					 inner_headers);
8068 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8069 	} else {
8070 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8071 					 outer_headers);
8072 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8073 	}
8074 	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8075 		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8076 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8077 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8078 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8079 	}
8080 	if (!vxlan_v)
8081 		return;
8082 	if (!vxlan_m)
8083 		vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8084 	size = sizeof(vxlan_m->vni);
8085 	vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8086 	vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8087 	memcpy(vni_m, vxlan_m->vni, size);
8088 	for (i = 0; i < size; ++i)
8089 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8090 	if (vxlan_m->flags) {
8091 		flags_m = vxlan_m->flags;
8092 		flags_v = vxlan_v->flags;
8093 	}
8094 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8095 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8096 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8097 		 vxlan_m->protocol);
8098 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8099 		 vxlan_v->protocol);
8100 }
8101 
8102 /**
8103  * Add Geneve item to matcher and to the value.
8104  *
8105  * @param[in, out] matcher
8106  *   Flow matcher.
8107  * @param[in, out] key
8108  *   Flow matcher value.
8109  * @param[in] item
8110  *   Flow pattern to translate.
8111  * @param[in] inner
8112  *   Item is inner pattern.
8113  */
8114 
8115 static void
8116 flow_dv_translate_item_geneve(void *matcher, void *key,
8117 			      const struct rte_flow_item *item, int inner)
8118 {
8119 	const struct rte_flow_item_geneve *geneve_m = item->mask;
8120 	const struct rte_flow_item_geneve *geneve_v = item->spec;
8121 	void *headers_m;
8122 	void *headers_v;
8123 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8124 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8125 	uint16_t dport;
8126 	uint16_t gbhdr_m;
8127 	uint16_t gbhdr_v;
8128 	char *vni_m;
8129 	char *vni_v;
8130 	size_t size, i;
8131 
8132 	if (inner) {
8133 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8134 					 inner_headers);
8135 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8136 	} else {
8137 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8138 					 outer_headers);
8139 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8140 	}
8141 	dport = MLX5_UDP_PORT_GENEVE;
8142 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8143 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8144 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8145 	}
8146 	if (!geneve_v)
8147 		return;
8148 	if (!geneve_m)
8149 		geneve_m = &rte_flow_item_geneve_mask;
8150 	size = sizeof(geneve_m->vni);
8151 	vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8152 	vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8153 	memcpy(vni_m, geneve_m->vni, size);
8154 	for (i = 0; i < size; ++i)
8155 		vni_v[i] = vni_m[i] & geneve_v->vni[i];
8156 	MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8157 		 rte_be_to_cpu_16(geneve_m->protocol));
8158 	MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8159 		 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8160 	gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8161 	gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8162 	MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8163 		 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8164 	MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8165 		 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8166 	MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8167 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8168 	MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8169 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8170 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8171 }
8172 
8173 /**
8174  * Create Geneve TLV option resource.
8175  *
8176  * @param dev[in, out]
8177  *   Pointer to rte_eth_dev structure.
8178  * @param[in, out] tag_be24
8179  *   Tag value in big endian then R-shift 8.
8180  * @parm[in, out] dev_flow
8181  *   Pointer to the dev_flow.
8182  * @param[out] error
8183  *   pointer to error structure.
8184  *
8185  * @return
8186  *   0 on success otherwise -errno and errno is set.
8187  */
8188 
8189 int
8190 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8191 					     const struct rte_flow_item *item,
8192 					     struct rte_flow_error *error)
8193 {
8194 	struct mlx5_priv *priv = dev->data->dev_private;
8195 	struct mlx5_dev_ctx_shared *sh = priv->sh;
8196 	struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8197 			sh->geneve_tlv_option_resource;
8198 	struct mlx5_devx_obj *obj;
8199 	const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8200 	int ret = 0;
8201 
8202 	if (!geneve_opt_v)
8203 		return -1;
8204 	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8205 	if (geneve_opt_resource != NULL) {
8206 		if (geneve_opt_resource->option_class ==
8207 			geneve_opt_v->option_class &&
8208 			geneve_opt_resource->option_type ==
8209 			geneve_opt_v->option_type &&
8210 			geneve_opt_resource->length ==
8211 			geneve_opt_v->option_len) {
8212 			/* We already have GENVE TLV option obj allocated. */
8213 			__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8214 					   __ATOMIC_RELAXED);
8215 		} else {
8216 			ret = rte_flow_error_set(error, ENOMEM,
8217 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8218 				"Only one GENEVE TLV option supported");
8219 			goto exit;
8220 		}
8221 	} else {
8222 		/* Create a GENEVE TLV object and resource. */
8223 		obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8224 				geneve_opt_v->option_class,
8225 				geneve_opt_v->option_type,
8226 				geneve_opt_v->option_len);
8227 		if (!obj) {
8228 			ret = rte_flow_error_set(error, ENODATA,
8229 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8230 				"Failed to create GENEVE TLV Devx object");
8231 			goto exit;
8232 		}
8233 		sh->geneve_tlv_option_resource =
8234 				mlx5_malloc(MLX5_MEM_ZERO,
8235 						sizeof(*geneve_opt_resource),
8236 						0, SOCKET_ID_ANY);
8237 		if (!sh->geneve_tlv_option_resource) {
8238 			claim_zero(mlx5_devx_cmd_destroy(obj));
8239 			ret = rte_flow_error_set(error, ENOMEM,
8240 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8241 				"GENEVE TLV object memory allocation failed");
8242 			goto exit;
8243 		}
8244 		geneve_opt_resource = sh->geneve_tlv_option_resource;
8245 		geneve_opt_resource->obj = obj;
8246 		geneve_opt_resource->option_class = geneve_opt_v->option_class;
8247 		geneve_opt_resource->option_type = geneve_opt_v->option_type;
8248 		geneve_opt_resource->length = geneve_opt_v->option_len;
8249 		__atomic_store_n(&geneve_opt_resource->refcnt, 1,
8250 				__ATOMIC_RELAXED);
8251 	}
8252 exit:
8253 	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8254 	return ret;
8255 }
8256 
8257 /**
8258  * Add Geneve TLV option item to matcher.
8259  *
8260  * @param[in, out] dev
8261  *   Pointer to rte_eth_dev structure.
8262  * @param[in, out] matcher
8263  *   Flow matcher.
8264  * @param[in, out] key
8265  *   Flow matcher value.
8266  * @param[in] item
8267  *   Flow pattern to translate.
8268  * @param[out] error
8269  *   Pointer to error structure.
8270  */
8271 static int
8272 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8273 				  void *key, const struct rte_flow_item *item,
8274 				  struct rte_flow_error *error)
8275 {
8276 	const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8277 	const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8278 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8279 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8280 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8281 			misc_parameters_3);
8282 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8283 	rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8284 	int ret = 0;
8285 
8286 	if (!geneve_opt_v)
8287 		return -1;
8288 	if (!geneve_opt_m)
8289 		geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8290 	ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8291 							   error);
8292 	if (ret) {
8293 		DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8294 		return ret;
8295 	}
8296 	/*
8297 	 * Set the option length in GENEVE header if not requested.
8298 	 * The GENEVE TLV option length is expressed by the option length field
8299 	 * in the GENEVE header.
8300 	 * If the option length was not requested but the GENEVE TLV option item
8301 	 * is present we set the option length field implicitly.
8302 	 */
8303 	if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8304 		MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8305 			 MLX5_GENEVE_OPTLEN_MASK);
8306 		MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8307 			 geneve_opt_v->option_len + 1);
8308 	}
8309 	/* Set the data. */
8310 	if (geneve_opt_v->data) {
8311 		memcpy(&opt_data_key, geneve_opt_v->data,
8312 			RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8313 				sizeof(opt_data_key)));
8314 		MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8315 				sizeof(opt_data_key));
8316 		memcpy(&opt_data_mask, geneve_opt_m->data,
8317 			RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8318 				sizeof(opt_data_mask)));
8319 		MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8320 				sizeof(opt_data_mask));
8321 		MLX5_SET(fte_match_set_misc3, misc3_m,
8322 				geneve_tlv_option_0_data,
8323 				rte_be_to_cpu_32(opt_data_mask));
8324 		MLX5_SET(fte_match_set_misc3, misc3_v,
8325 				geneve_tlv_option_0_data,
8326 			rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8327 	}
8328 	return ret;
8329 }
8330 
8331 /**
8332  * Add MPLS item to matcher and to the value.
8333  *
8334  * @param[in, out] matcher
8335  *   Flow matcher.
8336  * @param[in, out] key
8337  *   Flow matcher value.
8338  * @param[in] item
8339  *   Flow pattern to translate.
8340  * @param[in] prev_layer
8341  *   The protocol layer indicated in previous item.
8342  * @param[in] inner
8343  *   Item is inner pattern.
8344  */
8345 static void
8346 flow_dv_translate_item_mpls(void *matcher, void *key,
8347 			    const struct rte_flow_item *item,
8348 			    uint64_t prev_layer,
8349 			    int inner)
8350 {
8351 	const uint32_t *in_mpls_m = item->mask;
8352 	const uint32_t *in_mpls_v = item->spec;
8353 	uint32_t *out_mpls_m = 0;
8354 	uint32_t *out_mpls_v = 0;
8355 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8356 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8357 	void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8358 				     misc_parameters_2);
8359 	void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8360 	void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8361 	void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8362 
8363 	switch (prev_layer) {
8364 	case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8365 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
8366 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8367 			 MLX5_UDP_PORT_MPLS);
8368 		break;
8369 	case MLX5_FLOW_LAYER_GRE:
8370 		MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
8371 		MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8372 			 RTE_ETHER_TYPE_MPLS);
8373 		break;
8374 	default:
8375 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8376 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8377 			 IPPROTO_MPLS);
8378 		break;
8379 	}
8380 	if (!in_mpls_v)
8381 		return;
8382 	if (!in_mpls_m)
8383 		in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
8384 	switch (prev_layer) {
8385 	case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8386 		out_mpls_m =
8387 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8388 						 outer_first_mpls_over_udp);
8389 		out_mpls_v =
8390 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8391 						 outer_first_mpls_over_udp);
8392 		break;
8393 	case MLX5_FLOW_LAYER_GRE:
8394 		out_mpls_m =
8395 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8396 						 outer_first_mpls_over_gre);
8397 		out_mpls_v =
8398 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8399 						 outer_first_mpls_over_gre);
8400 		break;
8401 	default:
8402 		/* Inner MPLS not over GRE is not supported. */
8403 		if (!inner) {
8404 			out_mpls_m =
8405 				(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8406 							 misc2_m,
8407 							 outer_first_mpls);
8408 			out_mpls_v =
8409 				(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8410 							 misc2_v,
8411 							 outer_first_mpls);
8412 		}
8413 		break;
8414 	}
8415 	if (out_mpls_m && out_mpls_v) {
8416 		*out_mpls_m = *in_mpls_m;
8417 		*out_mpls_v = *in_mpls_v & *in_mpls_m;
8418 	}
8419 }
8420 
8421 /**
8422  * Add metadata register item to matcher
8423  *
8424  * @param[in, out] matcher
8425  *   Flow matcher.
8426  * @param[in, out] key
8427  *   Flow matcher value.
8428  * @param[in] reg_type
8429  *   Type of device metadata register
8430  * @param[in] value
8431  *   Register value
8432  * @param[in] mask
8433  *   Register mask
8434  */
8435 static void
8436 flow_dv_match_meta_reg(void *matcher, void *key,
8437 		       enum modify_reg reg_type,
8438 		       uint32_t data, uint32_t mask)
8439 {
8440 	void *misc2_m =
8441 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
8442 	void *misc2_v =
8443 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8444 	uint32_t temp;
8445 
8446 	data &= mask;
8447 	switch (reg_type) {
8448 	case REG_A:
8449 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
8450 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
8451 		break;
8452 	case REG_B:
8453 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
8454 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
8455 		break;
8456 	case REG_C_0:
8457 		/*
8458 		 * The metadata register C0 field might be divided into
8459 		 * source vport index and META item value, we should set
8460 		 * this field according to specified mask, not as whole one.
8461 		 */
8462 		temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
8463 		temp |= mask;
8464 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
8465 		temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
8466 		temp &= ~mask;
8467 		temp |= data;
8468 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
8469 		break;
8470 	case REG_C_1:
8471 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
8472 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
8473 		break;
8474 	case REG_C_2:
8475 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
8476 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
8477 		break;
8478 	case REG_C_3:
8479 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
8480 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
8481 		break;
8482 	case REG_C_4:
8483 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
8484 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
8485 		break;
8486 	case REG_C_5:
8487 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
8488 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
8489 		break;
8490 	case REG_C_6:
8491 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
8492 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
8493 		break;
8494 	case REG_C_7:
8495 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
8496 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
8497 		break;
8498 	default:
8499 		MLX5_ASSERT(false);
8500 		break;
8501 	}
8502 }
8503 
8504 /**
8505  * Add MARK item to matcher
8506  *
8507  * @param[in] dev
8508  *   The device to configure through.
8509  * @param[in, out] matcher
8510  *   Flow matcher.
8511  * @param[in, out] key
8512  *   Flow matcher value.
8513  * @param[in] item
8514  *   Flow pattern to translate.
8515  */
8516 static void
8517 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
8518 			    void *matcher, void *key,
8519 			    const struct rte_flow_item *item)
8520 {
8521 	struct mlx5_priv *priv = dev->data->dev_private;
8522 	const struct rte_flow_item_mark *mark;
8523 	uint32_t value;
8524 	uint32_t mask;
8525 
8526 	mark = item->mask ? (const void *)item->mask :
8527 			    &rte_flow_item_mark_mask;
8528 	mask = mark->id & priv->sh->dv_mark_mask;
8529 	mark = (const void *)item->spec;
8530 	MLX5_ASSERT(mark);
8531 	value = mark->id & priv->sh->dv_mark_mask & mask;
8532 	if (mask) {
8533 		enum modify_reg reg;
8534 
8535 		/* Get the metadata register index for the mark. */
8536 		reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
8537 		MLX5_ASSERT(reg > 0);
8538 		if (reg == REG_C_0) {
8539 			struct mlx5_priv *priv = dev->data->dev_private;
8540 			uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8541 			uint32_t shl_c0 = rte_bsf32(msk_c0);
8542 
8543 			mask &= msk_c0;
8544 			mask <<= shl_c0;
8545 			value <<= shl_c0;
8546 		}
8547 		flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8548 	}
8549 }
8550 
8551 /**
8552  * Add META item to matcher
8553  *
8554  * @param[in] dev
8555  *   The devich to configure through.
8556  * @param[in, out] matcher
8557  *   Flow matcher.
8558  * @param[in, out] key
8559  *   Flow matcher value.
8560  * @param[in] attr
8561  *   Attributes of flow that includes this item.
8562  * @param[in] item
8563  *   Flow pattern to translate.
8564  */
8565 static void
8566 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
8567 			    void *matcher, void *key,
8568 			    const struct rte_flow_attr *attr,
8569 			    const struct rte_flow_item *item)
8570 {
8571 	const struct rte_flow_item_meta *meta_m;
8572 	const struct rte_flow_item_meta *meta_v;
8573 
8574 	meta_m = (const void *)item->mask;
8575 	if (!meta_m)
8576 		meta_m = &rte_flow_item_meta_mask;
8577 	meta_v = (const void *)item->spec;
8578 	if (meta_v) {
8579 		int reg;
8580 		uint32_t value = meta_v->data;
8581 		uint32_t mask = meta_m->data;
8582 
8583 		reg = flow_dv_get_metadata_reg(dev, attr, NULL);
8584 		if (reg < 0)
8585 			return;
8586 		MLX5_ASSERT(reg != REG_NON);
8587 		/*
8588 		 * In datapath code there is no endianness
8589 		 * coversions for perfromance reasons, all
8590 		 * pattern conversions are done in rte_flow.
8591 		 */
8592 		value = rte_cpu_to_be_32(value);
8593 		mask = rte_cpu_to_be_32(mask);
8594 		if (reg == REG_C_0) {
8595 			struct mlx5_priv *priv = dev->data->dev_private;
8596 			uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8597 			uint32_t shl_c0 = rte_bsf32(msk_c0);
8598 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
8599 			uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
8600 
8601 			value >>= shr_c0;
8602 			mask >>= shr_c0;
8603 #endif
8604 			value <<= shl_c0;
8605 			mask <<= shl_c0;
8606 			MLX5_ASSERT(msk_c0);
8607 			MLX5_ASSERT(!(~msk_c0 & mask));
8608 		}
8609 		flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8610 	}
8611 }
8612 
8613 /**
8614  * Add vport metadata Reg C0 item to matcher
8615  *
8616  * @param[in, out] matcher
8617  *   Flow matcher.
8618  * @param[in, out] key
8619  *   Flow matcher value.
8620  * @param[in] reg
8621  *   Flow pattern to translate.
8622  */
8623 static void
8624 flow_dv_translate_item_meta_vport(void *matcher, void *key,
8625 				  uint32_t value, uint32_t mask)
8626 {
8627 	flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
8628 }
8629 
8630 /**
8631  * Add tag item to matcher
8632  *
8633  * @param[in] dev
8634  *   The devich to configure through.
8635  * @param[in, out] matcher
8636  *   Flow matcher.
8637  * @param[in, out] key
8638  *   Flow matcher value.
8639  * @param[in] item
8640  *   Flow pattern to translate.
8641  */
8642 static void
8643 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
8644 				void *matcher, void *key,
8645 				const struct rte_flow_item *item)
8646 {
8647 	const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
8648 	const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
8649 	uint32_t mask, value;
8650 
8651 	MLX5_ASSERT(tag_v);
8652 	value = tag_v->data;
8653 	mask = tag_m ? tag_m->data : UINT32_MAX;
8654 	if (tag_v->id == REG_C_0) {
8655 		struct mlx5_priv *priv = dev->data->dev_private;
8656 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8657 		uint32_t shl_c0 = rte_bsf32(msk_c0);
8658 
8659 		mask &= msk_c0;
8660 		mask <<= shl_c0;
8661 		value <<= shl_c0;
8662 	}
8663 	flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
8664 }
8665 
8666 /**
8667  * Add TAG item to matcher
8668  *
8669  * @param[in] dev
8670  *   The devich to configure through.
8671  * @param[in, out] matcher
8672  *   Flow matcher.
8673  * @param[in, out] key
8674  *   Flow matcher value.
8675  * @param[in] item
8676  *   Flow pattern to translate.
8677  */
8678 static void
8679 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
8680 			   void *matcher, void *key,
8681 			   const struct rte_flow_item *item)
8682 {
8683 	const struct rte_flow_item_tag *tag_v = item->spec;
8684 	const struct rte_flow_item_tag *tag_m = item->mask;
8685 	enum modify_reg reg;
8686 
8687 	MLX5_ASSERT(tag_v);
8688 	tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
8689 	/* Get the metadata register index for the tag. */
8690 	reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
8691 	MLX5_ASSERT(reg > 0);
8692 	flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
8693 }
8694 
8695 /**
8696  * Add source vport match to the specified matcher.
8697  *
8698  * @param[in, out] matcher
8699  *   Flow matcher.
8700  * @param[in, out] key
8701  *   Flow matcher value.
8702  * @param[in] port
8703  *   Source vport value to match
8704  * @param[in] mask
8705  *   Mask
8706  */
8707 static void
8708 flow_dv_translate_item_source_vport(void *matcher, void *key,
8709 				    int16_t port, uint16_t mask)
8710 {
8711 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8712 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8713 
8714 	MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
8715 	MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
8716 }
8717 
8718 /**
8719  * Translate port-id item to eswitch match on  port-id.
8720  *
8721  * @param[in] dev
8722  *   The devich to configure through.
8723  * @param[in, out] matcher
8724  *   Flow matcher.
8725  * @param[in, out] key
8726  *   Flow matcher value.
8727  * @param[in] item
8728  *   Flow pattern to translate.
8729  * @param[in]
8730  *   Flow attributes.
8731  *
8732  * @return
8733  *   0 on success, a negative errno value otherwise.
8734  */
8735 static int
8736 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
8737 			       void *key, const struct rte_flow_item *item,
8738 			       const struct rte_flow_attr *attr)
8739 {
8740 	const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
8741 	const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
8742 	struct mlx5_priv *priv;
8743 	uint16_t mask, id;
8744 
8745 	mask = pid_m ? pid_m->id : 0xffff;
8746 	id = pid_v ? pid_v->id : dev->data->port_id;
8747 	priv = mlx5_port_to_eswitch_info(id, item == NULL);
8748 	if (!priv)
8749 		return -rte_errno;
8750 	/*
8751 	 * Translate to vport field or to metadata, depending on mode.
8752 	 * Kernel can use either misc.source_port or half of C0 metadata
8753 	 * register.
8754 	 */
8755 	if (priv->vport_meta_mask) {
8756 		/*
8757 		 * Provide the hint for SW steering library
8758 		 * to insert the flow into ingress domain and
8759 		 * save the extra vport match.
8760 		 */
8761 		if (mask == 0xffff && priv->vport_id == 0xffff &&
8762 		    priv->pf_bond < 0 && attr->transfer)
8763 			flow_dv_translate_item_source_vport
8764 				(matcher, key, priv->vport_id, mask);
8765 		/*
8766 		 * We should always set the vport metadata register,
8767 		 * otherwise the SW steering library can drop
8768 		 * the rule if wire vport metadata value is not zero,
8769 		 * it depends on kernel configuration.
8770 		 */
8771 		flow_dv_translate_item_meta_vport(matcher, key,
8772 						  priv->vport_meta_tag,
8773 						  priv->vport_meta_mask);
8774 	} else {
8775 		flow_dv_translate_item_source_vport(matcher, key,
8776 						    priv->vport_id, mask);
8777 	}
8778 	return 0;
8779 }
8780 
8781 /**
8782  * Add ICMP6 item to matcher and to the value.
8783  *
8784  * @param[in, out] matcher
8785  *   Flow matcher.
8786  * @param[in, out] key
8787  *   Flow matcher value.
8788  * @param[in] item
8789  *   Flow pattern to translate.
8790  * @param[in] inner
8791  *   Item is inner pattern.
8792  */
8793 static void
8794 flow_dv_translate_item_icmp6(void *matcher, void *key,
8795 			      const struct rte_flow_item *item,
8796 			      int inner)
8797 {
8798 	const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
8799 	const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
8800 	void *headers_m;
8801 	void *headers_v;
8802 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8803 				     misc_parameters_3);
8804 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8805 	if (inner) {
8806 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8807 					 inner_headers);
8808 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8809 	} else {
8810 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8811 					 outer_headers);
8812 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8813 	}
8814 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8815 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
8816 	if (!icmp6_v)
8817 		return;
8818 	if (!icmp6_m)
8819 		icmp6_m = &rte_flow_item_icmp6_mask;
8820 	MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
8821 	MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
8822 		 icmp6_v->type & icmp6_m->type);
8823 	MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
8824 	MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
8825 		 icmp6_v->code & icmp6_m->code);
8826 }
8827 
8828 /**
8829  * Add ICMP item to matcher and to the value.
8830  *
8831  * @param[in, out] matcher
8832  *   Flow matcher.
8833  * @param[in, out] key
8834  *   Flow matcher value.
8835  * @param[in] item
8836  *   Flow pattern to translate.
8837  * @param[in] inner
8838  *   Item is inner pattern.
8839  */
8840 static void
8841 flow_dv_translate_item_icmp(void *matcher, void *key,
8842 			    const struct rte_flow_item *item,
8843 			    int inner)
8844 {
8845 	const struct rte_flow_item_icmp *icmp_m = item->mask;
8846 	const struct rte_flow_item_icmp *icmp_v = item->spec;
8847 	uint32_t icmp_header_data_m = 0;
8848 	uint32_t icmp_header_data_v = 0;
8849 	void *headers_m;
8850 	void *headers_v;
8851 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8852 				     misc_parameters_3);
8853 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8854 	if (inner) {
8855 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8856 					 inner_headers);
8857 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8858 	} else {
8859 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8860 					 outer_headers);
8861 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8862 	}
8863 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8864 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
8865 	if (!icmp_v)
8866 		return;
8867 	if (!icmp_m)
8868 		icmp_m = &rte_flow_item_icmp_mask;
8869 	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
8870 		 icmp_m->hdr.icmp_type);
8871 	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
8872 		 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
8873 	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
8874 		 icmp_m->hdr.icmp_code);
8875 	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
8876 		 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
8877 	icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
8878 	icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
8879 	if (icmp_header_data_m) {
8880 		icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
8881 		icmp_header_data_v |=
8882 			 rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
8883 		MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
8884 			 icmp_header_data_m);
8885 		MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
8886 			 icmp_header_data_v & icmp_header_data_m);
8887 	}
8888 }
8889 
8890 /**
8891  * Add GTP item to matcher and to the value.
8892  *
8893  * @param[in, out] matcher
8894  *   Flow matcher.
8895  * @param[in, out] key
8896  *   Flow matcher value.
8897  * @param[in] item
8898  *   Flow pattern to translate.
8899  * @param[in] inner
8900  *   Item is inner pattern.
8901  */
8902 static void
8903 flow_dv_translate_item_gtp(void *matcher, void *key,
8904 			   const struct rte_flow_item *item, int inner)
8905 {
8906 	const struct rte_flow_item_gtp *gtp_m = item->mask;
8907 	const struct rte_flow_item_gtp *gtp_v = item->spec;
8908 	void *headers_m;
8909 	void *headers_v;
8910 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8911 				     misc_parameters_3);
8912 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8913 	uint16_t dport = RTE_GTPU_UDP_PORT;
8914 
8915 	if (inner) {
8916 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8917 					 inner_headers);
8918 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8919 	} else {
8920 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8921 					 outer_headers);
8922 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8923 	}
8924 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8925 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8926 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8927 	}
8928 	if (!gtp_v)
8929 		return;
8930 	if (!gtp_m)
8931 		gtp_m = &rte_flow_item_gtp_mask;
8932 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
8933 		 gtp_m->v_pt_rsv_flags);
8934 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
8935 		 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
8936 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
8937 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
8938 		 gtp_v->msg_type & gtp_m->msg_type);
8939 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
8940 		 rte_be_to_cpu_32(gtp_m->teid));
8941 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
8942 		 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
8943 }
8944 
8945 /**
8946  * Add GTP PSC item to matcher.
8947  *
8948  * @param[in, out] matcher
8949  *   Flow matcher.
8950  * @param[in, out] key
8951  *   Flow matcher value.
8952  * @param[in] item
8953  *   Flow pattern to translate.
8954  */
8955 static int
8956 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
8957 			       const struct rte_flow_item *item)
8958 {
8959 	const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
8960 	const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
8961 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8962 			misc_parameters_3);
8963 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8964 	union {
8965 		uint32_t w32;
8966 		struct {
8967 			uint16_t seq_num;
8968 			uint8_t npdu_num;
8969 			uint8_t next_ext_header_type;
8970 		};
8971 	} dw_2;
8972 	uint8_t gtp_flags;
8973 
8974 	/* Always set E-flag match on one, regardless of GTP item settings. */
8975 	gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
8976 	gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8977 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
8978 	gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
8979 	gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8980 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
8981 	/*Set next extension header type. */
8982 	dw_2.seq_num = 0;
8983 	dw_2.npdu_num = 0;
8984 	dw_2.next_ext_header_type = 0xff;
8985 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
8986 		 rte_cpu_to_be_32(dw_2.w32));
8987 	dw_2.seq_num = 0;
8988 	dw_2.npdu_num = 0;
8989 	dw_2.next_ext_header_type = 0x85;
8990 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
8991 		 rte_cpu_to_be_32(dw_2.w32));
8992 	if (gtp_psc_v) {
8993 		union {
8994 			uint32_t w32;
8995 			struct {
8996 				uint8_t len;
8997 				uint8_t type_flags;
8998 				uint8_t qfi;
8999 				uint8_t reserved;
9000 			};
9001 		} dw_0;
9002 
9003 		/*Set extension header PDU type and Qos. */
9004 		if (!gtp_psc_m)
9005 			gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9006 		dw_0.w32 = 0;
9007 		dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9008 		dw_0.qfi = gtp_psc_m->qfi;
9009 		MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9010 			 rte_cpu_to_be_32(dw_0.w32));
9011 		dw_0.w32 = 0;
9012 		dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9013 							gtp_psc_m->pdu_type);
9014 		dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9015 		MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9016 			 rte_cpu_to_be_32(dw_0.w32));
9017 	}
9018 	return 0;
9019 }
9020 
9021 /**
9022  * Add eCPRI item to matcher and to the value.
9023  *
9024  * @param[in] dev
9025  *   The devich to configure through.
9026  * @param[in, out] matcher
9027  *   Flow matcher.
9028  * @param[in, out] key
9029  *   Flow matcher value.
9030  * @param[in] item
9031  *   Flow pattern to translate.
9032  * @param[in] samples
9033  *   Sample IDs to be used in the matching.
9034  */
9035 static void
9036 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9037 			     void *key, const struct rte_flow_item *item)
9038 {
9039 	struct mlx5_priv *priv = dev->data->dev_private;
9040 	const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9041 	const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9042 	struct rte_ecpri_common_hdr common;
9043 	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9044 				     misc_parameters_4);
9045 	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9046 	uint32_t *samples;
9047 	void *dw_m;
9048 	void *dw_v;
9049 
9050 	if (!ecpri_v)
9051 		return;
9052 	if (!ecpri_m)
9053 		ecpri_m = &rte_flow_item_ecpri_mask;
9054 	/*
9055 	 * Maximal four DW samples are supported in a single matching now.
9056 	 * Two are used now for a eCPRI matching:
9057 	 * 1. Type: one byte, mask should be 0x00ff0000 in network order
9058 	 * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9059 	 *    if any.
9060 	 */
9061 	if (!ecpri_m->hdr.common.u32)
9062 		return;
9063 	samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9064 	/* Need to take the whole DW as the mask to fill the entry. */
9065 	dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9066 			    prog_sample_field_value_0);
9067 	dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9068 			    prog_sample_field_value_0);
9069 	/* Already big endian (network order) in the header. */
9070 	*(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9071 	*(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9072 	/* Sample#0, used for matching type, offset 0. */
9073 	MLX5_SET(fte_match_set_misc4, misc4_m,
9074 		 prog_sample_field_id_0, samples[0]);
9075 	/* It makes no sense to set the sample ID in the mask field. */
9076 	MLX5_SET(fte_match_set_misc4, misc4_v,
9077 		 prog_sample_field_id_0, samples[0]);
9078 	/*
9079 	 * Checking if message body part needs to be matched.
9080 	 * Some wildcard rules only matching type field should be supported.
9081 	 */
9082 	if (ecpri_m->hdr.dummy[0]) {
9083 		common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9084 		switch (common.type) {
9085 		case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9086 		case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9087 		case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9088 			dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9089 					    prog_sample_field_value_1);
9090 			dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9091 					    prog_sample_field_value_1);
9092 			*(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9093 			*(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9094 					    ecpri_m->hdr.dummy[0];
9095 			/* Sample#1, to match message body, offset 4. */
9096 			MLX5_SET(fte_match_set_misc4, misc4_m,
9097 				 prog_sample_field_id_1, samples[1]);
9098 			MLX5_SET(fte_match_set_misc4, misc4_v,
9099 				 prog_sample_field_id_1, samples[1]);
9100 			break;
9101 		default:
9102 			/* Others, do not match any sample ID. */
9103 			break;
9104 		}
9105 	}
9106 }
9107 
9108 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9109 
9110 #define HEADER_IS_ZERO(match_criteria, headers)				     \
9111 	!(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9112 		 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9113 
9114 /**
9115  * Calculate flow matcher enable bitmap.
9116  *
9117  * @param match_criteria
9118  *   Pointer to flow matcher criteria.
9119  *
9120  * @return
9121  *   Bitmap of enabled fields.
9122  */
9123 static uint8_t
9124 flow_dv_matcher_enable(uint32_t *match_criteria)
9125 {
9126 	uint8_t match_criteria_enable;
9127 
9128 	match_criteria_enable =
9129 		(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9130 		MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9131 	match_criteria_enable |=
9132 		(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9133 		MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9134 	match_criteria_enable |=
9135 		(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9136 		MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9137 	match_criteria_enable |=
9138 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9139 		MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9140 	match_criteria_enable |=
9141 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9142 		MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9143 	match_criteria_enable |=
9144 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9145 		MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9146 	return match_criteria_enable;
9147 }
9148 
9149 struct mlx5_hlist_entry *
9150 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9151 {
9152 	struct mlx5_dev_ctx_shared *sh = list->ctx;
9153 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9154 	struct rte_eth_dev *dev = ctx->dev;
9155 	struct mlx5_flow_tbl_data_entry *tbl_data;
9156 	struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9157 	struct rte_flow_error *error = ctx->error;
9158 	union mlx5_flow_tbl_key key = { .v64 = key64 };
9159 	struct mlx5_flow_tbl_resource *tbl;
9160 	void *domain;
9161 	uint32_t idx = 0;
9162 	int ret;
9163 
9164 	tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9165 	if (!tbl_data) {
9166 		rte_flow_error_set(error, ENOMEM,
9167 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9168 				   NULL,
9169 				   "cannot allocate flow table data entry");
9170 		return NULL;
9171 	}
9172 	tbl_data->idx = idx;
9173 	tbl_data->tunnel = tt_prm->tunnel;
9174 	tbl_data->group_id = tt_prm->group_id;
9175 	tbl_data->external = !!tt_prm->external;
9176 	tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9177 	tbl_data->is_egress = !!key.direction;
9178 	tbl_data->is_transfer = !!key.domain;
9179 	tbl_data->dummy = !!key.dummy;
9180 	tbl_data->table_id = key.table_id;
9181 	tbl = &tbl_data->tbl;
9182 	if (key.dummy)
9183 		return &tbl_data->entry;
9184 	if (key.domain)
9185 		domain = sh->fdb_domain;
9186 	else if (key.direction)
9187 		domain = sh->tx_domain;
9188 	else
9189 		domain = sh->rx_domain;
9190 	ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
9191 	if (ret) {
9192 		rte_flow_error_set(error, ENOMEM,
9193 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9194 				   NULL, "cannot create flow table object");
9195 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9196 		return NULL;
9197 	}
9198 	if (key.table_id) {
9199 		ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9200 					(tbl->obj, &tbl_data->jump.action);
9201 		if (ret) {
9202 			rte_flow_error_set(error, ENOMEM,
9203 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9204 					   NULL,
9205 					   "cannot create flow jump action");
9206 			mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9207 			mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9208 			return NULL;
9209 		}
9210 	}
9211 	MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
9212 	      key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
9213 	      key.table_id);
9214 	mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9215 			     flow_dv_matcher_create_cb,
9216 			     flow_dv_matcher_match_cb,
9217 			     flow_dv_matcher_remove_cb);
9218 	return &tbl_data->entry;
9219 }
9220 
9221 int
9222 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9223 		     struct mlx5_hlist_entry *entry, uint64_t key64,
9224 		     void *cb_ctx __rte_unused)
9225 {
9226 	struct mlx5_flow_tbl_data_entry *tbl_data =
9227 		container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9228 	union mlx5_flow_tbl_key key = { .v64 = key64 };
9229 
9230 	return tbl_data->table_id != key.table_id ||
9231 	       tbl_data->dummy != key.dummy ||
9232 	       tbl_data->is_transfer != key.domain ||
9233 	       tbl_data->is_egress != key.direction;
9234 }
9235 
9236 /**
9237  * Get a flow table.
9238  *
9239  * @param[in, out] dev
9240  *   Pointer to rte_eth_dev structure.
9241  * @param[in] table_id
9242  *   Table id to use.
9243  * @param[in] egress
9244  *   Direction of the table.
9245  * @param[in] transfer
9246  *   E-Switch or NIC flow.
9247  * @param[in] dummy
9248  *   Dummy entry for dv API.
9249  * @param[out] error
9250  *   pointer to error structure.
9251  *
9252  * @return
9253  *   Returns tables resource based on the index, NULL in case of failed.
9254  */
9255 struct mlx5_flow_tbl_resource *
9256 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9257 			 uint32_t table_id, uint8_t egress,
9258 			 uint8_t transfer,
9259 			 bool external,
9260 			 const struct mlx5_flow_tunnel *tunnel,
9261 			 uint32_t group_id, uint8_t dummy,
9262 			 struct rte_flow_error *error)
9263 {
9264 	struct mlx5_priv *priv = dev->data->dev_private;
9265 	union mlx5_flow_tbl_key table_key = {
9266 		{
9267 			.table_id = table_id,
9268 			.dummy = dummy,
9269 			.domain = !!transfer,
9270 			.direction = !!egress,
9271 		}
9272 	};
9273 	struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9274 		.tunnel = tunnel,
9275 		.group_id = group_id,
9276 		.external = external,
9277 	};
9278 	struct mlx5_flow_cb_ctx ctx = {
9279 		.dev = dev,
9280 		.error = error,
9281 		.data = &tt_prm,
9282 	};
9283 	struct mlx5_hlist_entry *entry;
9284 	struct mlx5_flow_tbl_data_entry *tbl_data;
9285 
9286 	entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9287 	if (!entry) {
9288 		rte_flow_error_set(error, ENOMEM,
9289 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9290 				   "cannot get table");
9291 		return NULL;
9292 	}
9293 	DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
9294 		table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
9295 	tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9296 	return &tbl_data->tbl;
9297 }
9298 
9299 void
9300 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
9301 		      struct mlx5_hlist_entry *entry)
9302 {
9303 	struct mlx5_dev_ctx_shared *sh = list->ctx;
9304 	struct mlx5_flow_tbl_data_entry *tbl_data =
9305 		container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9306 
9307 	MLX5_ASSERT(entry && sh);
9308 	if (tbl_data->jump.action)
9309 		mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
9310 	if (tbl_data->tbl.obj)
9311 		mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
9312 	if (tbl_data->tunnel_offload && tbl_data->external) {
9313 		struct mlx5_hlist_entry *he;
9314 		struct mlx5_hlist *tunnel_grp_hash;
9315 		struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
9316 		union tunnel_tbl_key tunnel_key = {
9317 			.tunnel_id = tbl_data->tunnel ?
9318 					tbl_data->tunnel->tunnel_id : 0,
9319 			.group = tbl_data->group_id
9320 		};
9321 		uint32_t table_id = tbl_data->table_id;
9322 
9323 		tunnel_grp_hash = tbl_data->tunnel ?
9324 					tbl_data->tunnel->groups :
9325 					thub->groups;
9326 		he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
9327 		if (he)
9328 			mlx5_hlist_unregister(tunnel_grp_hash, he);
9329 		DRV_LOG(DEBUG,
9330 			"Table_id %u tunnel %u group %u released.",
9331 			table_id,
9332 			tbl_data->tunnel ?
9333 			tbl_data->tunnel->tunnel_id : 0,
9334 			tbl_data->group_id);
9335 	}
9336 	mlx5_cache_list_destroy(&tbl_data->matchers);
9337 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
9338 }
9339 
9340 /**
9341  * Release a flow table.
9342  *
9343  * @param[in] sh
9344  *   Pointer to device shared structure.
9345  * @param[in] tbl
9346  *   Table resource to be released.
9347  *
9348  * @return
9349  *   Returns 0 if table was released, else return 1;
9350  */
9351 static int
9352 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
9353 			     struct mlx5_flow_tbl_resource *tbl)
9354 {
9355 	struct mlx5_flow_tbl_data_entry *tbl_data =
9356 		container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9357 
9358 	if (!tbl)
9359 		return 0;
9360 	return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
9361 }
9362 
9363 int
9364 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
9365 			 struct mlx5_cache_entry *entry, void *cb_ctx)
9366 {
9367 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9368 	struct mlx5_flow_dv_matcher *ref = ctx->data;
9369 	struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
9370 							entry);
9371 
9372 	return cur->crc != ref->crc ||
9373 	       cur->priority != ref->priority ||
9374 	       memcmp((const void *)cur->mask.buf,
9375 		      (const void *)ref->mask.buf, ref->mask.size);
9376 }
9377 
9378 struct mlx5_cache_entry *
9379 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
9380 			  struct mlx5_cache_entry *entry __rte_unused,
9381 			  void *cb_ctx)
9382 {
9383 	struct mlx5_dev_ctx_shared *sh = list->ctx;
9384 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9385 	struct mlx5_flow_dv_matcher *ref = ctx->data;
9386 	struct mlx5_flow_dv_matcher *cache;
9387 	struct mlx5dv_flow_matcher_attr dv_attr = {
9388 		.type = IBV_FLOW_ATTR_NORMAL,
9389 		.match_mask = (void *)&ref->mask,
9390 	};
9391 	struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
9392 							    typeof(*tbl), tbl);
9393 	int ret;
9394 
9395 	cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
9396 	if (!cache) {
9397 		rte_flow_error_set(ctx->error, ENOMEM,
9398 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9399 				   "cannot create matcher");
9400 		return NULL;
9401 	}
9402 	*cache = *ref;
9403 	dv_attr.match_criteria_enable =
9404 		flow_dv_matcher_enable(cache->mask.buf);
9405 	dv_attr.priority = ref->priority;
9406 	if (tbl->is_egress)
9407 		dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
9408 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
9409 					       &cache->matcher_object);
9410 	if (ret) {
9411 		mlx5_free(cache);
9412 		rte_flow_error_set(ctx->error, ENOMEM,
9413 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9414 				   "cannot create matcher");
9415 		return NULL;
9416 	}
9417 	return &cache->entry;
9418 }
9419 
9420 /**
9421  * Register the flow matcher.
9422  *
9423  * @param[in, out] dev
9424  *   Pointer to rte_eth_dev structure.
9425  * @param[in, out] matcher
9426  *   Pointer to flow matcher.
9427  * @param[in, out] key
9428  *   Pointer to flow table key.
9429  * @parm[in, out] dev_flow
9430  *   Pointer to the dev_flow.
9431  * @param[out] error
9432  *   pointer to error structure.
9433  *
9434  * @return
9435  *   0 on success otherwise -errno and errno is set.
9436  */
9437 static int
9438 flow_dv_matcher_register(struct rte_eth_dev *dev,
9439 			 struct mlx5_flow_dv_matcher *ref,
9440 			 union mlx5_flow_tbl_key *key,
9441 			 struct mlx5_flow *dev_flow,
9442 			 const struct mlx5_flow_tunnel *tunnel,
9443 			 uint32_t group_id,
9444 			 struct rte_flow_error *error)
9445 {
9446 	struct mlx5_cache_entry *entry;
9447 	struct mlx5_flow_dv_matcher *cache;
9448 	struct mlx5_flow_tbl_resource *tbl;
9449 	struct mlx5_flow_tbl_data_entry *tbl_data;
9450 	struct mlx5_flow_cb_ctx ctx = {
9451 		.error = error,
9452 		.data = ref,
9453 	};
9454 
9455 	/**
9456 	 * tunnel offload API requires this registration for cases when
9457 	 * tunnel match rule was inserted before tunnel set rule.
9458 	 */
9459 	tbl = flow_dv_tbl_resource_get(dev, key->table_id,
9460 				       key->direction, key->domain,
9461 				       dev_flow->external, tunnel,
9462 				       group_id, 0, error);
9463 	if (!tbl)
9464 		return -rte_errno;	/* No need to refill the error info */
9465 	tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9466 	ref->tbl = tbl;
9467 	entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
9468 	if (!entry) {
9469 		flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9470 		return rte_flow_error_set(error, ENOMEM,
9471 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9472 					  "cannot allocate ref memory");
9473 	}
9474 	cache = container_of(entry, typeof(*cache), entry);
9475 	dev_flow->handle->dvh.matcher = cache;
9476 	return 0;
9477 }
9478 
9479 struct mlx5_hlist_entry *
9480 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
9481 {
9482 	struct mlx5_dev_ctx_shared *sh = list->ctx;
9483 	struct rte_flow_error *error = ctx;
9484 	struct mlx5_flow_dv_tag_resource *entry;
9485 	uint32_t idx = 0;
9486 	int ret;
9487 
9488 	entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
9489 	if (!entry) {
9490 		rte_flow_error_set(error, ENOMEM,
9491 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9492 				   "cannot allocate resource memory");
9493 		return NULL;
9494 	}
9495 	entry->idx = idx;
9496 	entry->tag_id = key;
9497 	ret = mlx5_flow_os_create_flow_action_tag(key,
9498 						  &entry->action);
9499 	if (ret) {
9500 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
9501 		rte_flow_error_set(error, ENOMEM,
9502 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9503 				   NULL, "cannot create action");
9504 		return NULL;
9505 	}
9506 	return &entry->entry;
9507 }
9508 
9509 int
9510 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
9511 		     struct mlx5_hlist_entry *entry, uint64_t key,
9512 		     void *cb_ctx __rte_unused)
9513 {
9514 	struct mlx5_flow_dv_tag_resource *tag =
9515 		container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9516 
9517 	return key != tag->tag_id;
9518 }
9519 
9520 /**
9521  * Find existing tag resource or create and register a new one.
9522  *
9523  * @param dev[in, out]
9524  *   Pointer to rte_eth_dev structure.
9525  * @param[in, out] tag_be24
9526  *   Tag value in big endian then R-shift 8.
9527  * @parm[in, out] dev_flow
9528  *   Pointer to the dev_flow.
9529  * @param[out] error
9530  *   pointer to error structure.
9531  *
9532  * @return
9533  *   0 on success otherwise -errno and errno is set.
9534  */
9535 static int
9536 flow_dv_tag_resource_register
9537 			(struct rte_eth_dev *dev,
9538 			 uint32_t tag_be24,
9539 			 struct mlx5_flow *dev_flow,
9540 			 struct rte_flow_error *error)
9541 {
9542 	struct mlx5_priv *priv = dev->data->dev_private;
9543 	struct mlx5_flow_dv_tag_resource *cache_resource;
9544 	struct mlx5_hlist_entry *entry;
9545 
9546 	entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
9547 	if (entry) {
9548 		cache_resource = container_of
9549 			(entry, struct mlx5_flow_dv_tag_resource, entry);
9550 		dev_flow->handle->dvh.rix_tag = cache_resource->idx;
9551 		dev_flow->dv.tag_resource = cache_resource;
9552 		return 0;
9553 	}
9554 	return -rte_errno;
9555 }
9556 
9557 void
9558 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
9559 		      struct mlx5_hlist_entry *entry)
9560 {
9561 	struct mlx5_dev_ctx_shared *sh = list->ctx;
9562 	struct mlx5_flow_dv_tag_resource *tag =
9563 		container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9564 
9565 	MLX5_ASSERT(tag && sh && tag->action);
9566 	claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
9567 	DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
9568 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
9569 }
9570 
9571 /**
9572  * Release the tag.
9573  *
9574  * @param dev
9575  *   Pointer to Ethernet device.
9576  * @param tag_idx
9577  *   Tag index.
9578  *
9579  * @return
9580  *   1 while a reference on it exists, 0 when freed.
9581  */
9582 static int
9583 flow_dv_tag_release(struct rte_eth_dev *dev,
9584 		    uint32_t tag_idx)
9585 {
9586 	struct mlx5_priv *priv = dev->data->dev_private;
9587 	struct mlx5_flow_dv_tag_resource *tag;
9588 
9589 	tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
9590 	if (!tag)
9591 		return 0;
9592 	DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
9593 		dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
9594 	return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
9595 }
9596 
9597 /**
9598  * Translate port ID action to vport.
9599  *
9600  * @param[in] dev
9601  *   Pointer to rte_eth_dev structure.
9602  * @param[in] action
9603  *   Pointer to the port ID action.
9604  * @param[out] dst_port_id
9605  *   The target port ID.
9606  * @param[out] error
9607  *   Pointer to the error structure.
9608  *
9609  * @return
9610  *   0 on success, a negative errno value otherwise and rte_errno is set.
9611  */
9612 static int
9613 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
9614 				 const struct rte_flow_action *action,
9615 				 uint32_t *dst_port_id,
9616 				 struct rte_flow_error *error)
9617 {
9618 	uint32_t port;
9619 	struct mlx5_priv *priv;
9620 	const struct rte_flow_action_port_id *conf =
9621 			(const struct rte_flow_action_port_id *)action->conf;
9622 
9623 	port = conf->original ? dev->data->port_id : conf->id;
9624 	priv = mlx5_port_to_eswitch_info(port, false);
9625 	if (!priv)
9626 		return rte_flow_error_set(error, -rte_errno,
9627 					  RTE_FLOW_ERROR_TYPE_ACTION,
9628 					  NULL,
9629 					  "No eswitch info was found for port");
9630 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
9631 	/*
9632 	 * This parameter is transferred to
9633 	 * mlx5dv_dr_action_create_dest_ib_port().
9634 	 */
9635 	*dst_port_id = priv->dev_port;
9636 #else
9637 	/*
9638 	 * Legacy mode, no LAG configurations is supported.
9639 	 * This parameter is transferred to
9640 	 * mlx5dv_dr_action_create_dest_vport().
9641 	 */
9642 	*dst_port_id = priv->vport_id;
9643 #endif
9644 	return 0;
9645 }
9646 
9647 /**
9648  * Create a counter with aging configuration.
9649  *
9650  * @param[in] dev
9651  *   Pointer to rte_eth_dev structure.
9652  * @param[out] count
9653  *   Pointer to the counter action configuration.
9654  * @param[in] age
9655  *   Pointer to the aging action configuration.
9656  *
9657  * @return
9658  *   Index to flow counter on success, 0 otherwise.
9659  */
9660 static uint32_t
9661 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
9662 				struct mlx5_flow *dev_flow,
9663 				const struct rte_flow_action_count *count,
9664 				const struct rte_flow_action_age *age)
9665 {
9666 	uint32_t counter;
9667 	struct mlx5_age_param *age_param;
9668 
9669 	if (count && count->shared)
9670 		counter = flow_dv_counter_get_shared(dev, count->id);
9671 	else
9672 		counter = flow_dv_counter_alloc(dev, !!age);
9673 	if (!counter || age == NULL)
9674 		return counter;
9675 	age_param  = flow_dv_counter_idx_get_age(dev, counter);
9676 	age_param->context = age->context ? age->context :
9677 		(void *)(uintptr_t)(dev_flow->flow_idx);
9678 	age_param->timeout = age->timeout;
9679 	age_param->port_id = dev->data->port_id;
9680 	__atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
9681 	__atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
9682 	return counter;
9683 }
9684 
9685 /**
9686  * Add Tx queue matcher
9687  *
9688  * @param[in] dev
9689  *   Pointer to the dev struct.
9690  * @param[in, out] matcher
9691  *   Flow matcher.
9692  * @param[in, out] key
9693  *   Flow matcher value.
9694  * @param[in] item
9695  *   Flow pattern to translate.
9696  * @param[in] inner
9697  *   Item is inner pattern.
9698  */
9699 static void
9700 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
9701 				void *matcher, void *key,
9702 				const struct rte_flow_item *item)
9703 {
9704 	const struct mlx5_rte_flow_item_tx_queue *queue_m;
9705 	const struct mlx5_rte_flow_item_tx_queue *queue_v;
9706 	void *misc_m =
9707 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9708 	void *misc_v =
9709 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9710 	struct mlx5_txq_ctrl *txq;
9711 	uint32_t queue;
9712 
9713 
9714 	queue_m = (const void *)item->mask;
9715 	if (!queue_m)
9716 		return;
9717 	queue_v = (const void *)item->spec;
9718 	if (!queue_v)
9719 		return;
9720 	txq = mlx5_txq_get(dev, queue_v->queue);
9721 	if (!txq)
9722 		return;
9723 	queue = txq->obj->sq->id;
9724 	MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
9725 	MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
9726 		 queue & queue_m->queue);
9727 	mlx5_txq_release(dev, queue_v->queue);
9728 }
9729 
9730 /**
9731  * Set the hash fields according to the @p flow information.
9732  *
9733  * @param[in] dev_flow
9734  *   Pointer to the mlx5_flow.
9735  * @param[in] rss_desc
9736  *   Pointer to the mlx5_flow_rss_desc.
9737  */
9738 static void
9739 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
9740 		       struct mlx5_flow_rss_desc *rss_desc)
9741 {
9742 	uint64_t items = dev_flow->handle->layers;
9743 	int rss_inner = 0;
9744 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
9745 
9746 	dev_flow->hash_fields = 0;
9747 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
9748 	if (rss_desc->level >= 2) {
9749 		dev_flow->hash_fields |= IBV_RX_HASH_INNER;
9750 		rss_inner = 1;
9751 	}
9752 #endif
9753 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
9754 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
9755 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
9756 			if (rss_types & ETH_RSS_L3_SRC_ONLY)
9757 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
9758 			else if (rss_types & ETH_RSS_L3_DST_ONLY)
9759 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
9760 			else
9761 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
9762 		}
9763 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
9764 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
9765 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
9766 			if (rss_types & ETH_RSS_L3_SRC_ONLY)
9767 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
9768 			else if (rss_types & ETH_RSS_L3_DST_ONLY)
9769 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
9770 			else
9771 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
9772 		}
9773 	}
9774 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
9775 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
9776 		if (rss_types & ETH_RSS_UDP) {
9777 			if (rss_types & ETH_RSS_L4_SRC_ONLY)
9778 				dev_flow->hash_fields |=
9779 						IBV_RX_HASH_SRC_PORT_UDP;
9780 			else if (rss_types & ETH_RSS_L4_DST_ONLY)
9781 				dev_flow->hash_fields |=
9782 						IBV_RX_HASH_DST_PORT_UDP;
9783 			else
9784 				dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
9785 		}
9786 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
9787 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
9788 		if (rss_types & ETH_RSS_TCP) {
9789 			if (rss_types & ETH_RSS_L4_SRC_ONLY)
9790 				dev_flow->hash_fields |=
9791 						IBV_RX_HASH_SRC_PORT_TCP;
9792 			else if (rss_types & ETH_RSS_L4_DST_ONLY)
9793 				dev_flow->hash_fields |=
9794 						IBV_RX_HASH_DST_PORT_TCP;
9795 			else
9796 				dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
9797 		}
9798 	}
9799 }
9800 
9801 /**
9802  * Prepare an Rx Hash queue.
9803  *
9804  * @param dev
9805  *   Pointer to Ethernet device.
9806  * @param[in] dev_flow
9807  *   Pointer to the mlx5_flow.
9808  * @param[in] rss_desc
9809  *   Pointer to the mlx5_flow_rss_desc.
9810  * @param[out] hrxq_idx
9811  *   Hash Rx queue index.
9812  *
9813  * @return
9814  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
9815  */
9816 static struct mlx5_hrxq *
9817 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
9818 		     struct mlx5_flow *dev_flow,
9819 		     struct mlx5_flow_rss_desc *rss_desc,
9820 		     uint32_t *hrxq_idx)
9821 {
9822 	struct mlx5_priv *priv = dev->data->dev_private;
9823 	struct mlx5_flow_handle *dh = dev_flow->handle;
9824 	struct mlx5_hrxq *hrxq;
9825 
9826 	MLX5_ASSERT(rss_desc->queue_num);
9827 	rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
9828 	rss_desc->hash_fields = dev_flow->hash_fields;
9829 	rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
9830 	rss_desc->shared_rss = 0;
9831 	*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
9832 	if (!*hrxq_idx)
9833 		return NULL;
9834 	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
9835 			      *hrxq_idx);
9836 	return hrxq;
9837 }
9838 
9839 /**
9840  * Release sample sub action resource.
9841  *
9842  * @param[in, out] dev
9843  *   Pointer to rte_eth_dev structure.
9844  * @param[in] act_res
9845  *   Pointer to sample sub action resource.
9846  */
9847 static void
9848 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
9849 				   struct mlx5_flow_sub_actions_idx *act_res)
9850 {
9851 	if (act_res->rix_hrxq) {
9852 		mlx5_hrxq_release(dev, act_res->rix_hrxq);
9853 		act_res->rix_hrxq = 0;
9854 	}
9855 	if (act_res->rix_encap_decap) {
9856 		flow_dv_encap_decap_resource_release(dev,
9857 						     act_res->rix_encap_decap);
9858 		act_res->rix_encap_decap = 0;
9859 	}
9860 	if (act_res->rix_port_id_action) {
9861 		flow_dv_port_id_action_resource_release(dev,
9862 						act_res->rix_port_id_action);
9863 		act_res->rix_port_id_action = 0;
9864 	}
9865 	if (act_res->rix_tag) {
9866 		flow_dv_tag_release(dev, act_res->rix_tag);
9867 		act_res->rix_tag = 0;
9868 	}
9869 	if (act_res->rix_jump) {
9870 		flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
9871 		act_res->rix_jump = 0;
9872 	}
9873 }
9874 
9875 int
9876 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
9877 			struct mlx5_cache_entry *entry, void *cb_ctx)
9878 {
9879 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9880 	struct rte_eth_dev *dev = ctx->dev;
9881 	struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9882 	struct mlx5_flow_dv_sample_resource *cache_resource =
9883 			container_of(entry, typeof(*cache_resource), entry);
9884 
9885 	if (resource->ratio == cache_resource->ratio &&
9886 	    resource->ft_type == cache_resource->ft_type &&
9887 	    resource->ft_id == cache_resource->ft_id &&
9888 	    resource->set_action == cache_resource->set_action &&
9889 	    !memcmp((void *)&resource->sample_act,
9890 		    (void *)&cache_resource->sample_act,
9891 		    sizeof(struct mlx5_flow_sub_actions_list))) {
9892 		/*
9893 		 * Existing sample action should release the prepared
9894 		 * sub-actions reference counter.
9895 		 */
9896 		flow_dv_sample_sub_actions_release(dev,
9897 						&resource->sample_idx);
9898 		return 0;
9899 	}
9900 	return 1;
9901 }
9902 
9903 struct mlx5_cache_entry *
9904 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
9905 			 struct mlx5_cache_entry *entry __rte_unused,
9906 			 void *cb_ctx)
9907 {
9908 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9909 	struct rte_eth_dev *dev = ctx->dev;
9910 	struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9911 	void **sample_dv_actions = resource->sub_actions;
9912 	struct mlx5_flow_dv_sample_resource *cache_resource;
9913 	struct mlx5dv_dr_flow_sampler_attr sampler_attr;
9914 	struct mlx5_priv *priv = dev->data->dev_private;
9915 	struct mlx5_dev_ctx_shared *sh = priv->sh;
9916 	struct mlx5_flow_tbl_resource *tbl;
9917 	uint32_t idx = 0;
9918 	const uint32_t next_ft_step = 1;
9919 	uint32_t next_ft_id = resource->ft_id +	next_ft_step;
9920 	uint8_t is_egress = 0;
9921 	uint8_t is_transfer = 0;
9922 	struct rte_flow_error *error = ctx->error;
9923 
9924 	/* Register new sample resource. */
9925 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
9926 	if (!cache_resource) {
9927 		rte_flow_error_set(error, ENOMEM,
9928 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9929 					  NULL,
9930 					  "cannot allocate resource memory");
9931 		return NULL;
9932 	}
9933 	*cache_resource = *resource;
9934 	/* Create normal path table level */
9935 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9936 		is_transfer = 1;
9937 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
9938 		is_egress = 1;
9939 	tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
9940 					is_egress, is_transfer,
9941 					true, NULL, 0, 0, error);
9942 	if (!tbl) {
9943 		rte_flow_error_set(error, ENOMEM,
9944 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9945 					  NULL,
9946 					  "fail to create normal path table "
9947 					  "for sample");
9948 		goto error;
9949 	}
9950 	cache_resource->normal_path_tbl = tbl;
9951 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
9952 		if (!sh->default_miss_action) {
9953 			rte_flow_error_set(error, ENOMEM,
9954 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9955 						NULL,
9956 						"default miss action was not "
9957 						"created");
9958 			goto error;
9959 		}
9960 		sample_dv_actions[resource->sample_act.actions_num++] =
9961 						sh->default_miss_action;
9962 	}
9963 	/* Create a DR sample action */
9964 	sampler_attr.sample_ratio = cache_resource->ratio;
9965 	sampler_attr.default_next_table = tbl->obj;
9966 	sampler_attr.num_sample_actions = resource->sample_act.actions_num;
9967 	sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
9968 							&sample_dv_actions[0];
9969 	sampler_attr.action = cache_resource->set_action;
9970 	if (mlx5_os_flow_dr_create_flow_action_sampler
9971 			(&sampler_attr, &cache_resource->verbs_action)) {
9972 		rte_flow_error_set(error, ENOMEM,
9973 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9974 					NULL, "cannot create sample action");
9975 		goto error;
9976 	}
9977 	cache_resource->idx = idx;
9978 	cache_resource->dev = dev;
9979 	return &cache_resource->entry;
9980 error:
9981 	if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
9982 		flow_dv_sample_sub_actions_release(dev,
9983 						   &cache_resource->sample_idx);
9984 	if (cache_resource->normal_path_tbl)
9985 		flow_dv_tbl_resource_release(MLX5_SH(dev),
9986 				cache_resource->normal_path_tbl);
9987 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
9988 	return NULL;
9989 
9990 }
9991 
9992 /**
9993  * Find existing sample resource or create and register a new one.
9994  *
9995  * @param[in, out] dev
9996  *   Pointer to rte_eth_dev structure.
9997  * @param[in] resource
9998  *   Pointer to sample resource.
9999  * @parm[in, out] dev_flow
10000  *   Pointer to the dev_flow.
10001  * @param[out] error
10002  *   pointer to error structure.
10003  *
10004  * @return
10005  *   0 on success otherwise -errno and errno is set.
10006  */
10007 static int
10008 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10009 			 struct mlx5_flow_dv_sample_resource *resource,
10010 			 struct mlx5_flow *dev_flow,
10011 			 struct rte_flow_error *error)
10012 {
10013 	struct mlx5_flow_dv_sample_resource *cache_resource;
10014 	struct mlx5_cache_entry *entry;
10015 	struct mlx5_priv *priv = dev->data->dev_private;
10016 	struct mlx5_flow_cb_ctx ctx = {
10017 		.dev = dev,
10018 		.error = error,
10019 		.data = resource,
10020 	};
10021 
10022 	entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10023 	if (!entry)
10024 		return -rte_errno;
10025 	cache_resource = container_of(entry, typeof(*cache_resource), entry);
10026 	dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10027 	dev_flow->dv.sample_res = cache_resource;
10028 	return 0;
10029 }
10030 
10031 int
10032 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10033 			    struct mlx5_cache_entry *entry, void *cb_ctx)
10034 {
10035 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10036 	struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10037 	struct rte_eth_dev *dev = ctx->dev;
10038 	struct mlx5_flow_dv_dest_array_resource *cache_resource =
10039 			container_of(entry, typeof(*cache_resource), entry);
10040 	uint32_t idx = 0;
10041 
10042 	if (resource->num_of_dest == cache_resource->num_of_dest &&
10043 	    resource->ft_type == cache_resource->ft_type &&
10044 	    !memcmp((void *)cache_resource->sample_act,
10045 		    (void *)resource->sample_act,
10046 		   (resource->num_of_dest *
10047 		   sizeof(struct mlx5_flow_sub_actions_list)))) {
10048 		/*
10049 		 * Existing sample action should release the prepared
10050 		 * sub-actions reference counter.
10051 		 */
10052 		for (idx = 0; idx < resource->num_of_dest; idx++)
10053 			flow_dv_sample_sub_actions_release(dev,
10054 					&resource->sample_idx[idx]);
10055 		return 0;
10056 	}
10057 	return 1;
10058 }
10059 
10060 struct mlx5_cache_entry *
10061 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10062 			 struct mlx5_cache_entry *entry __rte_unused,
10063 			 void *cb_ctx)
10064 {
10065 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10066 	struct rte_eth_dev *dev = ctx->dev;
10067 	struct mlx5_flow_dv_dest_array_resource *cache_resource;
10068 	struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10069 	struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10070 	struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10071 	struct mlx5_priv *priv = dev->data->dev_private;
10072 	struct mlx5_dev_ctx_shared *sh = priv->sh;
10073 	struct mlx5_flow_sub_actions_list *sample_act;
10074 	struct mlx5dv_dr_domain *domain;
10075 	uint32_t idx = 0, res_idx = 0;
10076 	struct rte_flow_error *error = ctx->error;
10077 	uint64_t action_flags;
10078 	int ret;
10079 
10080 	/* Register new destination array resource. */
10081 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10082 					    &res_idx);
10083 	if (!cache_resource) {
10084 		rte_flow_error_set(error, ENOMEM,
10085 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10086 					  NULL,
10087 					  "cannot allocate resource memory");
10088 		return NULL;
10089 	}
10090 	*cache_resource = *resource;
10091 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10092 		domain = sh->fdb_domain;
10093 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10094 		domain = sh->rx_domain;
10095 	else
10096 		domain = sh->tx_domain;
10097 	for (idx = 0; idx < resource->num_of_dest; idx++) {
10098 		dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10099 				 mlx5_malloc(MLX5_MEM_ZERO,
10100 				 sizeof(struct mlx5dv_dr_action_dest_attr),
10101 				 0, SOCKET_ID_ANY);
10102 		if (!dest_attr[idx]) {
10103 			rte_flow_error_set(error, ENOMEM,
10104 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10105 					   NULL,
10106 					   "cannot allocate resource memory");
10107 			goto error;
10108 		}
10109 		dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10110 		sample_act = &resource->sample_act[idx];
10111 		action_flags = sample_act->action_flags;
10112 		switch (action_flags) {
10113 		case MLX5_FLOW_ACTION_QUEUE:
10114 			dest_attr[idx]->dest = sample_act->dr_queue_action;
10115 			break;
10116 		case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10117 			dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10118 			dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10119 			dest_attr[idx]->dest_reformat->reformat =
10120 					sample_act->dr_encap_action;
10121 			dest_attr[idx]->dest_reformat->dest =
10122 					sample_act->dr_port_id_action;
10123 			break;
10124 		case MLX5_FLOW_ACTION_PORT_ID:
10125 			dest_attr[idx]->dest = sample_act->dr_port_id_action;
10126 			break;
10127 		case MLX5_FLOW_ACTION_JUMP:
10128 			dest_attr[idx]->dest = sample_act->dr_jump_action;
10129 			break;
10130 		default:
10131 			rte_flow_error_set(error, EINVAL,
10132 					   RTE_FLOW_ERROR_TYPE_ACTION,
10133 					   NULL,
10134 					   "unsupported actions type");
10135 			goto error;
10136 		}
10137 	}
10138 	/* create a dest array actioin */
10139 	ret = mlx5_os_flow_dr_create_flow_action_dest_array
10140 						(domain,
10141 						 cache_resource->num_of_dest,
10142 						 dest_attr,
10143 						 &cache_resource->action);
10144 	if (ret) {
10145 		rte_flow_error_set(error, ENOMEM,
10146 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10147 				   NULL,
10148 				   "cannot create destination array action");
10149 		goto error;
10150 	}
10151 	cache_resource->idx = res_idx;
10152 	cache_resource->dev = dev;
10153 	for (idx = 0; idx < resource->num_of_dest; idx++)
10154 		mlx5_free(dest_attr[idx]);
10155 	return &cache_resource->entry;
10156 error:
10157 	for (idx = 0; idx < resource->num_of_dest; idx++) {
10158 		struct mlx5_flow_sub_actions_idx *act_res =
10159 					&cache_resource->sample_idx[idx];
10160 		if (act_res->rix_hrxq &&
10161 		    !mlx5_hrxq_release(dev,
10162 				act_res->rix_hrxq))
10163 			act_res->rix_hrxq = 0;
10164 		if (act_res->rix_encap_decap &&
10165 			!flow_dv_encap_decap_resource_release(dev,
10166 				act_res->rix_encap_decap))
10167 			act_res->rix_encap_decap = 0;
10168 		if (act_res->rix_port_id_action &&
10169 			!flow_dv_port_id_action_resource_release(dev,
10170 				act_res->rix_port_id_action))
10171 			act_res->rix_port_id_action = 0;
10172 		if (act_res->rix_jump &&
10173 			!flow_dv_jump_tbl_resource_release(dev,
10174 				act_res->rix_jump))
10175 			act_res->rix_jump = 0;
10176 		if (dest_attr[idx])
10177 			mlx5_free(dest_attr[idx]);
10178 	}
10179 
10180 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10181 	return NULL;
10182 }
10183 
10184 /**
10185  * Find existing destination array resource or create and register a new one.
10186  *
10187  * @param[in, out] dev
10188  *   Pointer to rte_eth_dev structure.
10189  * @param[in] resource
10190  *   Pointer to destination array resource.
10191  * @parm[in, out] dev_flow
10192  *   Pointer to the dev_flow.
10193  * @param[out] error
10194  *   pointer to error structure.
10195  *
10196  * @return
10197  *   0 on success otherwise -errno and errno is set.
10198  */
10199 static int
10200 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10201 			 struct mlx5_flow_dv_dest_array_resource *resource,
10202 			 struct mlx5_flow *dev_flow,
10203 			 struct rte_flow_error *error)
10204 {
10205 	struct mlx5_flow_dv_dest_array_resource *cache_resource;
10206 	struct mlx5_priv *priv = dev->data->dev_private;
10207 	struct mlx5_cache_entry *entry;
10208 	struct mlx5_flow_cb_ctx ctx = {
10209 		.dev = dev,
10210 		.error = error,
10211 		.data = resource,
10212 	};
10213 
10214 	entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10215 	if (!entry)
10216 		return -rte_errno;
10217 	cache_resource = container_of(entry, typeof(*cache_resource), entry);
10218 	dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10219 	dev_flow->dv.dest_array_res = cache_resource;
10220 	return 0;
10221 }
10222 
10223 /**
10224  * Convert Sample action to DV specification.
10225  *
10226  * @param[in] dev
10227  *   Pointer to rte_eth_dev structure.
10228  * @param[in] action
10229  *   Pointer to sample action structure.
10230  * @param[in, out] dev_flow
10231  *   Pointer to the mlx5_flow.
10232  * @param[in] attr
10233  *   Pointer to the flow attributes.
10234  * @param[in, out] num_of_dest
10235  *   Pointer to the num of destination.
10236  * @param[in, out] sample_actions
10237  *   Pointer to sample actions list.
10238  * @param[in, out] res
10239  *   Pointer to sample resource.
10240  * @param[out] error
10241  *   Pointer to the error structure.
10242  *
10243  * @return
10244  *   0 on success, a negative errno value otherwise and rte_errno is set.
10245  */
10246 static int
10247 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10248 				const struct rte_flow_action_sample *action,
10249 				struct mlx5_flow *dev_flow,
10250 				const struct rte_flow_attr *attr,
10251 				uint32_t *num_of_dest,
10252 				void **sample_actions,
10253 				struct mlx5_flow_dv_sample_resource *res,
10254 				struct rte_flow_error *error)
10255 {
10256 	struct mlx5_priv *priv = dev->data->dev_private;
10257 	const struct rte_flow_action *sub_actions;
10258 	struct mlx5_flow_sub_actions_list *sample_act;
10259 	struct mlx5_flow_sub_actions_idx *sample_idx;
10260 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10261 	struct rte_flow *flow = dev_flow->flow;
10262 	struct mlx5_flow_rss_desc *rss_desc;
10263 	uint64_t action_flags = 0;
10264 
10265 	MLX5_ASSERT(wks);
10266 	rss_desc = &wks->rss_desc;
10267 	sample_act = &res->sample_act;
10268 	sample_idx = &res->sample_idx;
10269 	res->ratio = action->ratio;
10270 	sub_actions = action->actions;
10271 	for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10272 		int type = sub_actions->type;
10273 		uint32_t pre_rix = 0;
10274 		void *pre_r;
10275 		switch (type) {
10276 		case RTE_FLOW_ACTION_TYPE_QUEUE:
10277 		{
10278 			const struct rte_flow_action_queue *queue;
10279 			struct mlx5_hrxq *hrxq;
10280 			uint32_t hrxq_idx;
10281 
10282 			queue = sub_actions->conf;
10283 			rss_desc->queue_num = 1;
10284 			rss_desc->queue[0] = queue->index;
10285 			hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10286 						    rss_desc, &hrxq_idx);
10287 			if (!hrxq)
10288 				return rte_flow_error_set
10289 					(error, rte_errno,
10290 					 RTE_FLOW_ERROR_TYPE_ACTION,
10291 					 NULL,
10292 					 "cannot create fate queue");
10293 			sample_act->dr_queue_action = hrxq->action;
10294 			sample_idx->rix_hrxq = hrxq_idx;
10295 			sample_actions[sample_act->actions_num++] =
10296 						hrxq->action;
10297 			(*num_of_dest)++;
10298 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
10299 			if (action_flags & MLX5_FLOW_ACTION_MARK)
10300 				dev_flow->handle->rix_hrxq = hrxq_idx;
10301 			dev_flow->handle->fate_action =
10302 					MLX5_FLOW_FATE_QUEUE;
10303 			break;
10304 		}
10305 		case RTE_FLOW_ACTION_TYPE_RSS:
10306 		{
10307 			struct mlx5_hrxq *hrxq;
10308 			uint32_t hrxq_idx;
10309 			const struct rte_flow_action_rss *rss;
10310 			const uint8_t *rss_key;
10311 
10312 			rss = sub_actions->conf;
10313 			memcpy(rss_desc->queue, rss->queue,
10314 			       rss->queue_num * sizeof(uint16_t));
10315 			rss_desc->queue_num = rss->queue_num;
10316 			/* NULL RSS key indicates default RSS key. */
10317 			rss_key = !rss->key ? rss_hash_default_key : rss->key;
10318 			memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10319 			/*
10320 			 * rss->level and rss.types should be set in advance
10321 			 * when expanding items for RSS.
10322 			 */
10323 			flow_dv_hashfields_set(dev_flow, rss_desc);
10324 			hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10325 						    rss_desc, &hrxq_idx);
10326 			if (!hrxq)
10327 				return rte_flow_error_set
10328 					(error, rte_errno,
10329 					 RTE_FLOW_ERROR_TYPE_ACTION,
10330 					 NULL,
10331 					 "cannot create fate queue");
10332 			sample_act->dr_queue_action = hrxq->action;
10333 			sample_idx->rix_hrxq = hrxq_idx;
10334 			sample_actions[sample_act->actions_num++] =
10335 						hrxq->action;
10336 			(*num_of_dest)++;
10337 			action_flags |= MLX5_FLOW_ACTION_RSS;
10338 			if (action_flags & MLX5_FLOW_ACTION_MARK)
10339 				dev_flow->handle->rix_hrxq = hrxq_idx;
10340 			dev_flow->handle->fate_action =
10341 					MLX5_FLOW_FATE_QUEUE;
10342 			break;
10343 		}
10344 		case RTE_FLOW_ACTION_TYPE_MARK:
10345 		{
10346 			uint32_t tag_be = mlx5_flow_mark_set
10347 				(((const struct rte_flow_action_mark *)
10348 				(sub_actions->conf))->id);
10349 
10350 			dev_flow->handle->mark = 1;
10351 			pre_rix = dev_flow->handle->dvh.rix_tag;
10352 			/* Save the mark resource before sample */
10353 			pre_r = dev_flow->dv.tag_resource;
10354 			if (flow_dv_tag_resource_register(dev, tag_be,
10355 						  dev_flow, error))
10356 				return -rte_errno;
10357 			MLX5_ASSERT(dev_flow->dv.tag_resource);
10358 			sample_act->dr_tag_action =
10359 				dev_flow->dv.tag_resource->action;
10360 			sample_idx->rix_tag =
10361 				dev_flow->handle->dvh.rix_tag;
10362 			sample_actions[sample_act->actions_num++] =
10363 						sample_act->dr_tag_action;
10364 			/* Recover the mark resource after sample */
10365 			dev_flow->dv.tag_resource = pre_r;
10366 			dev_flow->handle->dvh.rix_tag = pre_rix;
10367 			action_flags |= MLX5_FLOW_ACTION_MARK;
10368 			break;
10369 		}
10370 		case RTE_FLOW_ACTION_TYPE_COUNT:
10371 		{
10372 			if (!flow->counter) {
10373 				flow->counter =
10374 					flow_dv_translate_create_counter(dev,
10375 						dev_flow, sub_actions->conf,
10376 						0);
10377 				if (!flow->counter)
10378 					return rte_flow_error_set
10379 						(error, rte_errno,
10380 						RTE_FLOW_ERROR_TYPE_ACTION,
10381 						NULL,
10382 						"cannot create counter"
10383 						" object.");
10384 			}
10385 			sample_act->dr_cnt_action =
10386 				  (flow_dv_counter_get_by_idx(dev,
10387 				  flow->counter, NULL))->action;
10388 			sample_actions[sample_act->actions_num++] =
10389 						sample_act->dr_cnt_action;
10390 			action_flags |= MLX5_FLOW_ACTION_COUNT;
10391 			break;
10392 		}
10393 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
10394 		{
10395 			struct mlx5_flow_dv_port_id_action_resource
10396 					port_id_resource;
10397 			uint32_t port_id = 0;
10398 
10399 			memset(&port_id_resource, 0, sizeof(port_id_resource));
10400 			/* Save the port id resource before sample */
10401 			pre_rix = dev_flow->handle->rix_port_id_action;
10402 			pre_r = dev_flow->dv.port_id_action;
10403 			if (flow_dv_translate_action_port_id(dev, sub_actions,
10404 							     &port_id, error))
10405 				return -rte_errno;
10406 			port_id_resource.port_id = port_id;
10407 			if (flow_dv_port_id_action_resource_register
10408 			    (dev, &port_id_resource, dev_flow, error))
10409 				return -rte_errno;
10410 			sample_act->dr_port_id_action =
10411 				dev_flow->dv.port_id_action->action;
10412 			sample_idx->rix_port_id_action =
10413 				dev_flow->handle->rix_port_id_action;
10414 			sample_actions[sample_act->actions_num++] =
10415 						sample_act->dr_port_id_action;
10416 			/* Recover the port id resource after sample */
10417 			dev_flow->dv.port_id_action = pre_r;
10418 			dev_flow->handle->rix_port_id_action = pre_rix;
10419 			(*num_of_dest)++;
10420 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10421 			break;
10422 		}
10423 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10424 			/* Save the encap resource before sample */
10425 			pre_rix = dev_flow->handle->dvh.rix_encap_decap;
10426 			pre_r = dev_flow->dv.encap_decap;
10427 			if (flow_dv_create_action_l2_encap(dev, sub_actions,
10428 							   dev_flow,
10429 							   attr->transfer,
10430 							   error))
10431 				return -rte_errno;
10432 			sample_act->dr_encap_action =
10433 				dev_flow->dv.encap_decap->action;
10434 			sample_idx->rix_encap_decap =
10435 				dev_flow->handle->dvh.rix_encap_decap;
10436 			sample_actions[sample_act->actions_num++] =
10437 						sample_act->dr_encap_action;
10438 			/* Recover the encap resource after sample */
10439 			dev_flow->dv.encap_decap = pre_r;
10440 			dev_flow->handle->dvh.rix_encap_decap = pre_rix;
10441 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
10442 			break;
10443 		default:
10444 			return rte_flow_error_set(error, EINVAL,
10445 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10446 				NULL,
10447 				"Not support for sampler action");
10448 		}
10449 	}
10450 	sample_act->action_flags = action_flags;
10451 	res->ft_id = dev_flow->dv.group;
10452 	if (attr->transfer) {
10453 		union {
10454 			uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
10455 			uint64_t set_action;
10456 		} action_ctx = { .set_action = 0 };
10457 
10458 		res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10459 		MLX5_SET(set_action_in, action_ctx.action_in, action_type,
10460 			 MLX5_MODIFICATION_TYPE_SET);
10461 		MLX5_SET(set_action_in, action_ctx.action_in, field,
10462 			 MLX5_MODI_META_REG_C_0);
10463 		MLX5_SET(set_action_in, action_ctx.action_in, data,
10464 			 priv->vport_meta_tag);
10465 		res->set_action = action_ctx.set_action;
10466 	} else if (attr->ingress) {
10467 		res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10468 	} else {
10469 		res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
10470 	}
10471 	return 0;
10472 }
10473 
10474 /**
10475  * Convert Sample action to DV specification.
10476  *
10477  * @param[in] dev
10478  *   Pointer to rte_eth_dev structure.
10479  * @param[in, out] dev_flow
10480  *   Pointer to the mlx5_flow.
10481  * @param[in] num_of_dest
10482  *   The num of destination.
10483  * @param[in, out] res
10484  *   Pointer to sample resource.
10485  * @param[in, out] mdest_res
10486  *   Pointer to destination array resource.
10487  * @param[in] sample_actions
10488  *   Pointer to sample path actions list.
10489  * @param[in] action_flags
10490  *   Holds the actions detected until now.
10491  * @param[out] error
10492  *   Pointer to the error structure.
10493  *
10494  * @return
10495  *   0 on success, a negative errno value otherwise and rte_errno is set.
10496  */
10497 static int
10498 flow_dv_create_action_sample(struct rte_eth_dev *dev,
10499 			     struct mlx5_flow *dev_flow,
10500 			     uint32_t num_of_dest,
10501 			     struct mlx5_flow_dv_sample_resource *res,
10502 			     struct mlx5_flow_dv_dest_array_resource *mdest_res,
10503 			     void **sample_actions,
10504 			     uint64_t action_flags,
10505 			     struct rte_flow_error *error)
10506 {
10507 	/* update normal path action resource into last index of array */
10508 	uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
10509 	struct mlx5_flow_sub_actions_list *sample_act =
10510 					&mdest_res->sample_act[dest_index];
10511 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10512 	struct mlx5_flow_rss_desc *rss_desc;
10513 	uint32_t normal_idx = 0;
10514 	struct mlx5_hrxq *hrxq;
10515 	uint32_t hrxq_idx;
10516 
10517 	MLX5_ASSERT(wks);
10518 	rss_desc = &wks->rss_desc;
10519 	if (num_of_dest > 1) {
10520 		if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
10521 			/* Handle QP action for mirroring */
10522 			hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10523 						    rss_desc, &hrxq_idx);
10524 			if (!hrxq)
10525 				return rte_flow_error_set
10526 				     (error, rte_errno,
10527 				      RTE_FLOW_ERROR_TYPE_ACTION,
10528 				      NULL,
10529 				      "cannot create rx queue");
10530 			normal_idx++;
10531 			mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
10532 			sample_act->dr_queue_action = hrxq->action;
10533 			if (action_flags & MLX5_FLOW_ACTION_MARK)
10534 				dev_flow->handle->rix_hrxq = hrxq_idx;
10535 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10536 		}
10537 		if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
10538 			normal_idx++;
10539 			mdest_res->sample_idx[dest_index].rix_encap_decap =
10540 				dev_flow->handle->dvh.rix_encap_decap;
10541 			sample_act->dr_encap_action =
10542 				dev_flow->dv.encap_decap->action;
10543 		}
10544 		if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
10545 			normal_idx++;
10546 			mdest_res->sample_idx[dest_index].rix_port_id_action =
10547 				dev_flow->handle->rix_port_id_action;
10548 			sample_act->dr_port_id_action =
10549 				dev_flow->dv.port_id_action->action;
10550 		}
10551 		if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
10552 			normal_idx++;
10553 			mdest_res->sample_idx[dest_index].rix_jump =
10554 				dev_flow->handle->rix_jump;
10555 			sample_act->dr_jump_action =
10556 				dev_flow->dv.jump->action;
10557 			dev_flow->handle->rix_jump = 0;
10558 		}
10559 		sample_act->actions_num = normal_idx;
10560 		/* update sample action resource into first index of array */
10561 		mdest_res->ft_type = res->ft_type;
10562 		memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
10563 				sizeof(struct mlx5_flow_sub_actions_idx));
10564 		memcpy(&mdest_res->sample_act[0], &res->sample_act,
10565 				sizeof(struct mlx5_flow_sub_actions_list));
10566 		mdest_res->num_of_dest = num_of_dest;
10567 		if (flow_dv_dest_array_resource_register(dev, mdest_res,
10568 							 dev_flow, error))
10569 			return rte_flow_error_set(error, EINVAL,
10570 						  RTE_FLOW_ERROR_TYPE_ACTION,
10571 						  NULL, "can't create sample "
10572 						  "action");
10573 	} else {
10574 		res->sub_actions = sample_actions;
10575 		if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
10576 			return rte_flow_error_set(error, EINVAL,
10577 						  RTE_FLOW_ERROR_TYPE_ACTION,
10578 						  NULL,
10579 						  "can't create sample action");
10580 	}
10581 	return 0;
10582 }
10583 
10584 /**
10585  * Remove an ASO age action from age actions list.
10586  *
10587  * @param[in] dev
10588  *   Pointer to the Ethernet device structure.
10589  * @param[in] age
10590  *   Pointer to the aso age action handler.
10591  */
10592 static void
10593 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
10594 				struct mlx5_aso_age_action *age)
10595 {
10596 	struct mlx5_age_info *age_info;
10597 	struct mlx5_age_param *age_param = &age->age_params;
10598 	struct mlx5_priv *priv = dev->data->dev_private;
10599 	uint16_t expected = AGE_CANDIDATE;
10600 
10601 	age_info = GET_PORT_AGE_INFO(priv);
10602 	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
10603 					 AGE_FREE, false, __ATOMIC_RELAXED,
10604 					 __ATOMIC_RELAXED)) {
10605 		/**
10606 		 * We need the lock even it is age timeout,
10607 		 * since age action may still in process.
10608 		 */
10609 		rte_spinlock_lock(&age_info->aged_sl);
10610 		LIST_REMOVE(age, next);
10611 		rte_spinlock_unlock(&age_info->aged_sl);
10612 		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
10613 	}
10614 }
10615 
10616 /**
10617  * Release an ASO age action.
10618  *
10619  * @param[in] dev
10620  *   Pointer to the Ethernet device structure.
10621  * @param[in] age_idx
10622  *   Index of ASO age action to release.
10623  * @param[in] flow
10624  *   True if the release operation is during flow destroy operation.
10625  *   False if the release operation is during action destroy operation.
10626  *
10627  * @return
10628  *   0 when age action was removed, otherwise the number of references.
10629  */
10630 static int
10631 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
10632 {
10633 	struct mlx5_priv *priv = dev->data->dev_private;
10634 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10635 	struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
10636 	uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
10637 
10638 	if (!ret) {
10639 		flow_dv_aso_age_remove_from_age(dev, age);
10640 		rte_spinlock_lock(&mng->free_sl);
10641 		LIST_INSERT_HEAD(&mng->free, age, next);
10642 		rte_spinlock_unlock(&mng->free_sl);
10643 	}
10644 	return ret;
10645 }
10646 
10647 /**
10648  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
10649  *
10650  * @param[in] dev
10651  *   Pointer to the Ethernet device structure.
10652  *
10653  * @return
10654  *   0 on success, otherwise negative errno value and rte_errno is set.
10655  */
10656 static int
10657 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
10658 {
10659 	struct mlx5_priv *priv = dev->data->dev_private;
10660 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10661 	void *old_pools = mng->pools;
10662 	uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
10663 	uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
10664 	void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
10665 
10666 	if (!pools) {
10667 		rte_errno = ENOMEM;
10668 		return -ENOMEM;
10669 	}
10670 	if (old_pools) {
10671 		memcpy(pools, old_pools,
10672 		       mng->n * sizeof(struct mlx5_flow_counter_pool *));
10673 		mlx5_free(old_pools);
10674 	} else {
10675 		/* First ASO flow hit allocation - starting ASO data-path. */
10676 		int ret = mlx5_aso_queue_start(priv->sh);
10677 
10678 		if (ret) {
10679 			mlx5_free(pools);
10680 			return ret;
10681 		}
10682 	}
10683 	mng->n = resize;
10684 	mng->pools = pools;
10685 	return 0;
10686 }
10687 
10688 /**
10689  * Create and initialize a new ASO aging pool.
10690  *
10691  * @param[in] dev
10692  *   Pointer to the Ethernet device structure.
10693  * @param[out] age_free
10694  *   Where to put the pointer of a new age action.
10695  *
10696  * @return
10697  *   The age actions pool pointer and @p age_free is set on success,
10698  *   NULL otherwise and rte_errno is set.
10699  */
10700 static struct mlx5_aso_age_pool *
10701 flow_dv_age_pool_create(struct rte_eth_dev *dev,
10702 			struct mlx5_aso_age_action **age_free)
10703 {
10704 	struct mlx5_priv *priv = dev->data->dev_private;
10705 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10706 	struct mlx5_aso_age_pool *pool = NULL;
10707 	struct mlx5_devx_obj *obj = NULL;
10708 	uint32_t i;
10709 
10710 	obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
10711 						    priv->sh->pdn);
10712 	if (!obj) {
10713 		rte_errno = ENODATA;
10714 		DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
10715 		return NULL;
10716 	}
10717 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
10718 	if (!pool) {
10719 		claim_zero(mlx5_devx_cmd_destroy(obj));
10720 		rte_errno = ENOMEM;
10721 		return NULL;
10722 	}
10723 	pool->flow_hit_aso_obj = obj;
10724 	pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
10725 	rte_spinlock_lock(&mng->resize_sl);
10726 	pool->index = mng->next;
10727 	/* Resize pools array if there is no room for the new pool in it. */
10728 	if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
10729 		claim_zero(mlx5_devx_cmd_destroy(obj));
10730 		mlx5_free(pool);
10731 		rte_spinlock_unlock(&mng->resize_sl);
10732 		return NULL;
10733 	}
10734 	mng->pools[pool->index] = pool;
10735 	mng->next++;
10736 	rte_spinlock_unlock(&mng->resize_sl);
10737 	/* Assign the first action in the new pool, the rest go to free list. */
10738 	*age_free = &pool->actions[0];
10739 	for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
10740 		pool->actions[i].offset = i;
10741 		LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
10742 	}
10743 	return pool;
10744 }
10745 
10746 /**
10747  * Allocate a ASO aging bit.
10748  *
10749  * @param[in] dev
10750  *   Pointer to the Ethernet device structure.
10751  * @param[out] error
10752  *   Pointer to the error structure.
10753  *
10754  * @return
10755  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
10756  */
10757 static uint32_t
10758 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
10759 {
10760 	struct mlx5_priv *priv = dev->data->dev_private;
10761 	const struct mlx5_aso_age_pool *pool;
10762 	struct mlx5_aso_age_action *age_free = NULL;
10763 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10764 
10765 	MLX5_ASSERT(mng);
10766 	/* Try to get the next free age action bit. */
10767 	rte_spinlock_lock(&mng->free_sl);
10768 	age_free = LIST_FIRST(&mng->free);
10769 	if (age_free) {
10770 		LIST_REMOVE(age_free, next);
10771 	} else if (!flow_dv_age_pool_create(dev, &age_free)) {
10772 		rte_spinlock_unlock(&mng->free_sl);
10773 		rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
10774 				   NULL, "failed to create ASO age pool");
10775 		return 0; /* 0 is an error. */
10776 	}
10777 	rte_spinlock_unlock(&mng->free_sl);
10778 	pool = container_of
10779 	  ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
10780 		  (age_free - age_free->offset), const struct mlx5_aso_age_pool,
10781 								       actions);
10782 	if (!age_free->dr_action) {
10783 		int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
10784 						 error);
10785 
10786 		if (reg_c < 0) {
10787 			rte_flow_error_set(error, rte_errno,
10788 					   RTE_FLOW_ERROR_TYPE_ACTION,
10789 					   NULL, "failed to get reg_c "
10790 					   "for ASO flow hit");
10791 			return 0; /* 0 is an error. */
10792 		}
10793 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
10794 		age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
10795 				(priv->sh->rx_domain,
10796 				 pool->flow_hit_aso_obj->obj, age_free->offset,
10797 				 MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
10798 				 (reg_c - REG_C_0));
10799 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
10800 		if (!age_free->dr_action) {
10801 			rte_errno = errno;
10802 			rte_spinlock_lock(&mng->free_sl);
10803 			LIST_INSERT_HEAD(&mng->free, age_free, next);
10804 			rte_spinlock_unlock(&mng->free_sl);
10805 			rte_flow_error_set(error, rte_errno,
10806 					   RTE_FLOW_ERROR_TYPE_ACTION,
10807 					   NULL, "failed to create ASO "
10808 					   "flow hit action");
10809 			return 0; /* 0 is an error. */
10810 		}
10811 	}
10812 	__atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
10813 	return pool->index | ((age_free->offset + 1) << 16);
10814 }
10815 
10816 /**
10817  * Create a age action using ASO mechanism.
10818  *
10819  * @param[in] dev
10820  *   Pointer to rte_eth_dev structure.
10821  * @param[in] age
10822  *   Pointer to the aging action configuration.
10823  * @param[out] error
10824  *   Pointer to the error structure.
10825  *
10826  * @return
10827  *   Index to flow counter on success, 0 otherwise.
10828  */
10829 static uint32_t
10830 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
10831 				 const struct rte_flow_action_age *age,
10832 				 struct rte_flow_error *error)
10833 {
10834 	uint32_t age_idx = 0;
10835 	struct mlx5_aso_age_action *aso_age;
10836 
10837 	age_idx = flow_dv_aso_age_alloc(dev, error);
10838 	if (!age_idx)
10839 		return 0;
10840 	aso_age = flow_aso_age_get_by_idx(dev, age_idx);
10841 	aso_age->age_params.context = age->context;
10842 	aso_age->age_params.timeout = age->timeout;
10843 	aso_age->age_params.port_id = dev->data->port_id;
10844 	__atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
10845 			 __ATOMIC_RELAXED);
10846 	__atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
10847 			 __ATOMIC_RELAXED);
10848 	return age_idx;
10849 }
10850 
10851 /**
10852  * Fill the flow with DV spec, lock free
10853  * (mutex should be acquired by caller).
10854  *
10855  * @param[in] dev
10856  *   Pointer to rte_eth_dev structure.
10857  * @param[in, out] dev_flow
10858  *   Pointer to the sub flow.
10859  * @param[in] attr
10860  *   Pointer to the flow attributes.
10861  * @param[in] items
10862  *   Pointer to the list of items.
10863  * @param[in] actions
10864  *   Pointer to the list of actions.
10865  * @param[out] error
10866  *   Pointer to the error structure.
10867  *
10868  * @return
10869  *   0 on success, a negative errno value otherwise and rte_errno is set.
10870  */
10871 static int
10872 flow_dv_translate(struct rte_eth_dev *dev,
10873 		  struct mlx5_flow *dev_flow,
10874 		  const struct rte_flow_attr *attr,
10875 		  const struct rte_flow_item items[],
10876 		  const struct rte_flow_action actions[],
10877 		  struct rte_flow_error *error)
10878 {
10879 	struct mlx5_priv *priv = dev->data->dev_private;
10880 	struct mlx5_dev_config *dev_conf = &priv->config;
10881 	struct rte_flow *flow = dev_flow->flow;
10882 	struct mlx5_flow_handle *handle = dev_flow->handle;
10883 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10884 	struct mlx5_flow_rss_desc *rss_desc;
10885 	uint64_t item_flags = 0;
10886 	uint64_t last_item = 0;
10887 	uint64_t action_flags = 0;
10888 	struct mlx5_flow_dv_matcher matcher = {
10889 		.mask = {
10890 			.size = sizeof(matcher.mask.buf) -
10891 				MLX5_ST_SZ_BYTES(fte_match_set_misc4),
10892 		},
10893 	};
10894 	int actions_n = 0;
10895 	bool actions_end = false;
10896 	union {
10897 		struct mlx5_flow_dv_modify_hdr_resource res;
10898 		uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
10899 			    sizeof(struct mlx5_modification_cmd) *
10900 			    (MLX5_MAX_MODIFY_NUM + 1)];
10901 	} mhdr_dummy;
10902 	struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
10903 	const struct rte_flow_action_count *count = NULL;
10904 	const struct rte_flow_action_age *age = NULL;
10905 	union flow_dv_attr flow_attr = { .attr = 0 };
10906 	uint32_t tag_be;
10907 	union mlx5_flow_tbl_key tbl_key;
10908 	uint32_t modify_action_position = UINT32_MAX;
10909 	void *match_mask = matcher.mask.buf;
10910 	void *match_value = dev_flow->dv.value.buf;
10911 	uint8_t next_protocol = 0xff;
10912 	struct rte_vlan_hdr vlan = { 0 };
10913 	struct mlx5_flow_dv_dest_array_resource mdest_res;
10914 	struct mlx5_flow_dv_sample_resource sample_res;
10915 	void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10916 	const struct rte_flow_action_sample *sample = NULL;
10917 	struct mlx5_flow_sub_actions_list *sample_act;
10918 	uint32_t sample_act_pos = UINT32_MAX;
10919 	uint32_t num_of_dest = 0;
10920 	int tmp_actions_n = 0;
10921 	uint32_t table;
10922 	int ret = 0;
10923 	const struct mlx5_flow_tunnel *tunnel;
10924 	struct flow_grp_info grp_info = {
10925 		.external = !!dev_flow->external,
10926 		.transfer = !!attr->transfer,
10927 		.fdb_def_rule = !!priv->fdb_def_rule,
10928 		.skip_scale = dev_flow->skip_scale &
10929 			(1 << MLX5_SCALE_FLOW_GROUP_BIT),
10930 	};
10931 
10932 	if (!wks)
10933 		return rte_flow_error_set(error, ENOMEM,
10934 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10935 					  NULL,
10936 					  "failed to push flow workspace");
10937 	rss_desc = &wks->rss_desc;
10938 	memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
10939 	memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
10940 	mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10941 					   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10942 	/* update normal path action resource into last index of array */
10943 	sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
10944 	tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
10945 		 flow_items_to_tunnel(items) :
10946 		 is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
10947 		 flow_actions_to_tunnel(actions) :
10948 		 dev_flow->tunnel ? dev_flow->tunnel : NULL;
10949 	mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10950 					   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10951 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
10952 				(dev, tunnel, attr, items, actions);
10953 	ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
10954 				       &grp_info, error);
10955 	if (ret)
10956 		return ret;
10957 	dev_flow->dv.group = table;
10958 	if (attr->transfer)
10959 		mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10960 	/* number of actions must be set to 0 in case of dirty stack. */
10961 	mhdr_res->actions_num = 0;
10962 	if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
10963 		/*
10964 		 * do not add decap action if match rule drops packet
10965 		 * HW rejects rules with decap & drop
10966 		 *
10967 		 * if tunnel match rule was inserted before matching tunnel set
10968 		 * rule flow table used in the match rule must be registered.
10969 		 * current implementation handles that in the
10970 		 * flow_dv_match_register() at the function end.
10971 		 */
10972 		bool add_decap = true;
10973 		const struct rte_flow_action *ptr = actions;
10974 
10975 		for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
10976 			if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
10977 				add_decap = false;
10978 				break;
10979 			}
10980 		}
10981 		if (add_decap) {
10982 			if (flow_dv_create_action_l2_decap(dev, dev_flow,
10983 							   attr->transfer,
10984 							   error))
10985 				return -rte_errno;
10986 			dev_flow->dv.actions[actions_n++] =
10987 					dev_flow->dv.encap_decap->action;
10988 			action_flags |= MLX5_FLOW_ACTION_DECAP;
10989 		}
10990 	}
10991 	for (; !actions_end ; actions++) {
10992 		const struct rte_flow_action_queue *queue;
10993 		const struct rte_flow_action_rss *rss;
10994 		const struct rte_flow_action *action = actions;
10995 		const uint8_t *rss_key;
10996 		const struct rte_flow_action_meter *mtr;
10997 		struct mlx5_flow_tbl_resource *tbl;
10998 		struct mlx5_aso_age_action *age_act;
10999 		uint32_t port_id = 0;
11000 		struct mlx5_flow_dv_port_id_action_resource port_id_resource;
11001 		int action_type = actions->type;
11002 		const struct rte_flow_action *found_action = NULL;
11003 		struct mlx5_flow_meter *fm = NULL;
11004 		uint32_t jump_group = 0;
11005 
11006 		if (!mlx5_flow_os_action_supported(action_type))
11007 			return rte_flow_error_set(error, ENOTSUP,
11008 						  RTE_FLOW_ERROR_TYPE_ACTION,
11009 						  actions,
11010 						  "action not supported");
11011 		switch (action_type) {
11012 		case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
11013 			action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
11014 			break;
11015 		case RTE_FLOW_ACTION_TYPE_VOID:
11016 			break;
11017 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
11018 			if (flow_dv_translate_action_port_id(dev, action,
11019 							     &port_id, error))
11020 				return -rte_errno;
11021 			port_id_resource.port_id = port_id;
11022 			MLX5_ASSERT(!handle->rix_port_id_action);
11023 			if (flow_dv_port_id_action_resource_register
11024 			    (dev, &port_id_resource, dev_flow, error))
11025 				return -rte_errno;
11026 			dev_flow->dv.actions[actions_n++] =
11027 					dev_flow->dv.port_id_action->action;
11028 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11029 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
11030 			sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11031 			num_of_dest++;
11032 			break;
11033 		case RTE_FLOW_ACTION_TYPE_FLAG:
11034 			action_flags |= MLX5_FLOW_ACTION_FLAG;
11035 			dev_flow->handle->mark = 1;
11036 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11037 				struct rte_flow_action_mark mark = {
11038 					.id = MLX5_FLOW_MARK_DEFAULT,
11039 				};
11040 
11041 				if (flow_dv_convert_action_mark(dev, &mark,
11042 								mhdr_res,
11043 								error))
11044 					return -rte_errno;
11045 				action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11046 				break;
11047 			}
11048 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
11049 			/*
11050 			 * Only one FLAG or MARK is supported per device flow
11051 			 * right now. So the pointer to the tag resource must be
11052 			 * zero before the register process.
11053 			 */
11054 			MLX5_ASSERT(!handle->dvh.rix_tag);
11055 			if (flow_dv_tag_resource_register(dev, tag_be,
11056 							  dev_flow, error))
11057 				return -rte_errno;
11058 			MLX5_ASSERT(dev_flow->dv.tag_resource);
11059 			dev_flow->dv.actions[actions_n++] =
11060 					dev_flow->dv.tag_resource->action;
11061 			break;
11062 		case RTE_FLOW_ACTION_TYPE_MARK:
11063 			action_flags |= MLX5_FLOW_ACTION_MARK;
11064 			dev_flow->handle->mark = 1;
11065 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11066 				const struct rte_flow_action_mark *mark =
11067 					(const struct rte_flow_action_mark *)
11068 						actions->conf;
11069 
11070 				if (flow_dv_convert_action_mark(dev, mark,
11071 								mhdr_res,
11072 								error))
11073 					return -rte_errno;
11074 				action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11075 				break;
11076 			}
11077 			/* Fall-through */
11078 		case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
11079 			/* Legacy (non-extensive) MARK action. */
11080 			tag_be = mlx5_flow_mark_set
11081 			      (((const struct rte_flow_action_mark *)
11082 			       (actions->conf))->id);
11083 			MLX5_ASSERT(!handle->dvh.rix_tag);
11084 			if (flow_dv_tag_resource_register(dev, tag_be,
11085 							  dev_flow, error))
11086 				return -rte_errno;
11087 			MLX5_ASSERT(dev_flow->dv.tag_resource);
11088 			dev_flow->dv.actions[actions_n++] =
11089 					dev_flow->dv.tag_resource->action;
11090 			break;
11091 		case RTE_FLOW_ACTION_TYPE_SET_META:
11092 			if (flow_dv_convert_action_set_meta
11093 				(dev, mhdr_res, attr,
11094 				 (const struct rte_flow_action_set_meta *)
11095 				  actions->conf, error))
11096 				return -rte_errno;
11097 			action_flags |= MLX5_FLOW_ACTION_SET_META;
11098 			break;
11099 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
11100 			if (flow_dv_convert_action_set_tag
11101 				(dev, mhdr_res,
11102 				 (const struct rte_flow_action_set_tag *)
11103 				  actions->conf, error))
11104 				return -rte_errno;
11105 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11106 			break;
11107 		case RTE_FLOW_ACTION_TYPE_DROP:
11108 			action_flags |= MLX5_FLOW_ACTION_DROP;
11109 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
11110 			break;
11111 		case RTE_FLOW_ACTION_TYPE_QUEUE:
11112 			queue = actions->conf;
11113 			rss_desc->queue_num = 1;
11114 			rss_desc->queue[0] = queue->index;
11115 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
11116 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11117 			sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
11118 			num_of_dest++;
11119 			break;
11120 		case RTE_FLOW_ACTION_TYPE_RSS:
11121 			rss = actions->conf;
11122 			memcpy(rss_desc->queue, rss->queue,
11123 			       rss->queue_num * sizeof(uint16_t));
11124 			rss_desc->queue_num = rss->queue_num;
11125 			/* NULL RSS key indicates default RSS key. */
11126 			rss_key = !rss->key ? rss_hash_default_key : rss->key;
11127 			memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11128 			/*
11129 			 * rss->level and rss.types should be set in advance
11130 			 * when expanding items for RSS.
11131 			 */
11132 			action_flags |= MLX5_FLOW_ACTION_RSS;
11133 			dev_flow->handle->fate_action = rss_desc->shared_rss ?
11134 				MLX5_FLOW_FATE_SHARED_RSS :
11135 				MLX5_FLOW_FATE_QUEUE;
11136 			break;
11137 		case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
11138 			flow->age = (uint32_t)(uintptr_t)(action->conf);
11139 			age_act = flow_aso_age_get_by_idx(dev, flow->age);
11140 			__atomic_fetch_add(&age_act->refcnt, 1,
11141 					   __ATOMIC_RELAXED);
11142 			dev_flow->dv.actions[actions_n++] = age_act->dr_action;
11143 			action_flags |= MLX5_FLOW_ACTION_AGE;
11144 			break;
11145 		case RTE_FLOW_ACTION_TYPE_AGE:
11146 			if (priv->sh->flow_hit_aso_en && attr->group) {
11147 				/*
11148 				 * Create one shared age action, to be used
11149 				 * by all sub-flows.
11150 				 */
11151 				if (!flow->age) {
11152 					flow->age =
11153 						flow_dv_translate_create_aso_age
11154 							(dev, action->conf,
11155 							 error);
11156 					if (!flow->age)
11157 						return rte_flow_error_set
11158 						(error, rte_errno,
11159 						 RTE_FLOW_ERROR_TYPE_ACTION,
11160 						 NULL,
11161 						 "can't create ASO age action");
11162 				}
11163 				dev_flow->dv.actions[actions_n++] =
11164 					  (flow_aso_age_get_by_idx
11165 						(dev, flow->age))->dr_action;
11166 				action_flags |= MLX5_FLOW_ACTION_AGE;
11167 				break;
11168 			}
11169 			/* Fall-through */
11170 		case RTE_FLOW_ACTION_TYPE_COUNT:
11171 			if (!dev_conf->devx) {
11172 				return rte_flow_error_set
11173 					      (error, ENOTSUP,
11174 					       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11175 					       NULL,
11176 					       "count action not supported");
11177 			}
11178 			/* Save information first, will apply later. */
11179 			if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
11180 				count = action->conf;
11181 			else
11182 				age = action->conf;
11183 			action_flags |= MLX5_FLOW_ACTION_COUNT;
11184 			break;
11185 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
11186 			dev_flow->dv.actions[actions_n++] =
11187 						priv->sh->pop_vlan_action;
11188 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
11189 			break;
11190 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
11191 			if (!(action_flags &
11192 			      MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
11193 				flow_dev_get_vlan_info_from_items(items, &vlan);
11194 			vlan.eth_proto = rte_be_to_cpu_16
11195 			     ((((const struct rte_flow_action_of_push_vlan *)
11196 						   actions->conf)->ethertype));
11197 			found_action = mlx5_flow_find_action
11198 					(actions + 1,
11199 					 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
11200 			if (found_action)
11201 				mlx5_update_vlan_vid_pcp(found_action, &vlan);
11202 			found_action = mlx5_flow_find_action
11203 					(actions + 1,
11204 					 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
11205 			if (found_action)
11206 				mlx5_update_vlan_vid_pcp(found_action, &vlan);
11207 			if (flow_dv_create_action_push_vlan
11208 					    (dev, attr, &vlan, dev_flow, error))
11209 				return -rte_errno;
11210 			dev_flow->dv.actions[actions_n++] =
11211 					dev_flow->dv.push_vlan_res->action;
11212 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
11213 			break;
11214 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
11215 			/* of_vlan_push action handled this action */
11216 			MLX5_ASSERT(action_flags &
11217 				    MLX5_FLOW_ACTION_OF_PUSH_VLAN);
11218 			break;
11219 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
11220 			if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
11221 				break;
11222 			flow_dev_get_vlan_info_from_items(items, &vlan);
11223 			mlx5_update_vlan_vid_pcp(actions, &vlan);
11224 			/* If no VLAN push - this is a modify header action */
11225 			if (flow_dv_convert_action_modify_vlan_vid
11226 						(mhdr_res, actions, error))
11227 				return -rte_errno;
11228 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
11229 			break;
11230 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11231 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11232 			if (flow_dv_create_action_l2_encap(dev, actions,
11233 							   dev_flow,
11234 							   attr->transfer,
11235 							   error))
11236 				return -rte_errno;
11237 			dev_flow->dv.actions[actions_n++] =
11238 					dev_flow->dv.encap_decap->action;
11239 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
11240 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11241 				sample_act->action_flags |=
11242 							MLX5_FLOW_ACTION_ENCAP;
11243 			break;
11244 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
11245 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
11246 			if (flow_dv_create_action_l2_decap(dev, dev_flow,
11247 							   attr->transfer,
11248 							   error))
11249 				return -rte_errno;
11250 			dev_flow->dv.actions[actions_n++] =
11251 					dev_flow->dv.encap_decap->action;
11252 			action_flags |= MLX5_FLOW_ACTION_DECAP;
11253 			break;
11254 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11255 			/* Handle encap with preceding decap. */
11256 			if (action_flags & MLX5_FLOW_ACTION_DECAP) {
11257 				if (flow_dv_create_action_raw_encap
11258 					(dev, actions, dev_flow, attr, error))
11259 					return -rte_errno;
11260 				dev_flow->dv.actions[actions_n++] =
11261 					dev_flow->dv.encap_decap->action;
11262 			} else {
11263 				/* Handle encap without preceding decap. */
11264 				if (flow_dv_create_action_l2_encap
11265 				    (dev, actions, dev_flow, attr->transfer,
11266 				     error))
11267 					return -rte_errno;
11268 				dev_flow->dv.actions[actions_n++] =
11269 					dev_flow->dv.encap_decap->action;
11270 			}
11271 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
11272 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11273 				sample_act->action_flags |=
11274 							MLX5_FLOW_ACTION_ENCAP;
11275 			break;
11276 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11277 			while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
11278 				;
11279 			if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11280 				if (flow_dv_create_action_l2_decap
11281 				    (dev, dev_flow, attr->transfer, error))
11282 					return -rte_errno;
11283 				dev_flow->dv.actions[actions_n++] =
11284 					dev_flow->dv.encap_decap->action;
11285 			}
11286 			/* If decap is followed by encap, handle it at encap. */
11287 			action_flags |= MLX5_FLOW_ACTION_DECAP;
11288 			break;
11289 		case RTE_FLOW_ACTION_TYPE_JUMP:
11290 			jump_group = ((const struct rte_flow_action_jump *)
11291 							action->conf)->group;
11292 			grp_info.std_tbl_fix = 0;
11293 			if (dev_flow->skip_scale &
11294 				(1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
11295 				grp_info.skip_scale = 1;
11296 			else
11297 				grp_info.skip_scale = 0;
11298 			ret = mlx5_flow_group_to_table(dev, tunnel,
11299 						       jump_group,
11300 						       &table,
11301 						       &grp_info, error);
11302 			if (ret)
11303 				return ret;
11304 			tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
11305 						       attr->transfer,
11306 						       !!dev_flow->external,
11307 						       tunnel, jump_group, 0,
11308 						       error);
11309 			if (!tbl)
11310 				return rte_flow_error_set
11311 						(error, errno,
11312 						 RTE_FLOW_ERROR_TYPE_ACTION,
11313 						 NULL,
11314 						 "cannot create jump action.");
11315 			if (flow_dv_jump_tbl_resource_register
11316 			    (dev, tbl, dev_flow, error)) {
11317 				flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11318 				return rte_flow_error_set
11319 						(error, errno,
11320 						 RTE_FLOW_ERROR_TYPE_ACTION,
11321 						 NULL,
11322 						 "cannot create jump action.");
11323 			}
11324 			dev_flow->dv.actions[actions_n++] =
11325 					dev_flow->dv.jump->action;
11326 			action_flags |= MLX5_FLOW_ACTION_JUMP;
11327 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
11328 			sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
11329 			num_of_dest++;
11330 			break;
11331 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
11332 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
11333 			if (flow_dv_convert_action_modify_mac
11334 					(mhdr_res, actions, error))
11335 				return -rte_errno;
11336 			action_flags |= actions->type ==
11337 					RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
11338 					MLX5_FLOW_ACTION_SET_MAC_SRC :
11339 					MLX5_FLOW_ACTION_SET_MAC_DST;
11340 			break;
11341 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
11342 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
11343 			if (flow_dv_convert_action_modify_ipv4
11344 					(mhdr_res, actions, error))
11345 				return -rte_errno;
11346 			action_flags |= actions->type ==
11347 					RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
11348 					MLX5_FLOW_ACTION_SET_IPV4_SRC :
11349 					MLX5_FLOW_ACTION_SET_IPV4_DST;
11350 			break;
11351 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
11352 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
11353 			if (flow_dv_convert_action_modify_ipv6
11354 					(mhdr_res, actions, error))
11355 				return -rte_errno;
11356 			action_flags |= actions->type ==
11357 					RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
11358 					MLX5_FLOW_ACTION_SET_IPV6_SRC :
11359 					MLX5_FLOW_ACTION_SET_IPV6_DST;
11360 			break;
11361 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
11362 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
11363 			if (flow_dv_convert_action_modify_tp
11364 					(mhdr_res, actions, items,
11365 					 &flow_attr, dev_flow, !!(action_flags &
11366 					 MLX5_FLOW_ACTION_DECAP), error))
11367 				return -rte_errno;
11368 			action_flags |= actions->type ==
11369 					RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
11370 					MLX5_FLOW_ACTION_SET_TP_SRC :
11371 					MLX5_FLOW_ACTION_SET_TP_DST;
11372 			break;
11373 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
11374 			if (flow_dv_convert_action_modify_dec_ttl
11375 					(mhdr_res, items, &flow_attr, dev_flow,
11376 					 !!(action_flags &
11377 					 MLX5_FLOW_ACTION_DECAP), error))
11378 				return -rte_errno;
11379 			action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
11380 			break;
11381 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
11382 			if (flow_dv_convert_action_modify_ttl
11383 					(mhdr_res, actions, items, &flow_attr,
11384 					 dev_flow, !!(action_flags &
11385 					 MLX5_FLOW_ACTION_DECAP), error))
11386 				return -rte_errno;
11387 			action_flags |= MLX5_FLOW_ACTION_SET_TTL;
11388 			break;
11389 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
11390 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
11391 			if (flow_dv_convert_action_modify_tcp_seq
11392 					(mhdr_res, actions, error))
11393 				return -rte_errno;
11394 			action_flags |= actions->type ==
11395 					RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
11396 					MLX5_FLOW_ACTION_INC_TCP_SEQ :
11397 					MLX5_FLOW_ACTION_DEC_TCP_SEQ;
11398 			break;
11399 
11400 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
11401 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
11402 			if (flow_dv_convert_action_modify_tcp_ack
11403 					(mhdr_res, actions, error))
11404 				return -rte_errno;
11405 			action_flags |= actions->type ==
11406 					RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
11407 					MLX5_FLOW_ACTION_INC_TCP_ACK :
11408 					MLX5_FLOW_ACTION_DEC_TCP_ACK;
11409 			break;
11410 		case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
11411 			if (flow_dv_convert_action_set_reg
11412 					(mhdr_res, actions, error))
11413 				return -rte_errno;
11414 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11415 			break;
11416 		case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
11417 			if (flow_dv_convert_action_copy_mreg
11418 					(dev, mhdr_res, actions, error))
11419 				return -rte_errno;
11420 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11421 			break;
11422 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
11423 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
11424 			dev_flow->handle->fate_action =
11425 					MLX5_FLOW_FATE_DEFAULT_MISS;
11426 			break;
11427 		case RTE_FLOW_ACTION_TYPE_METER:
11428 			mtr = actions->conf;
11429 			if (!flow->meter) {
11430 				fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
11431 							    attr, error);
11432 				if (!fm)
11433 					return rte_flow_error_set(error,
11434 						rte_errno,
11435 						RTE_FLOW_ERROR_TYPE_ACTION,
11436 						NULL,
11437 						"meter not found "
11438 						"or invalid parameters");
11439 				flow->meter = fm->idx;
11440 			}
11441 			/* Set the meter action. */
11442 			if (!fm) {
11443 				fm = mlx5_ipool_get(priv->sh->ipool
11444 						[MLX5_IPOOL_MTR], flow->meter);
11445 				if (!fm)
11446 					return rte_flow_error_set(error,
11447 						rte_errno,
11448 						RTE_FLOW_ERROR_TYPE_ACTION,
11449 						NULL,
11450 						"meter not found "
11451 						"or invalid parameters");
11452 			}
11453 			dev_flow->dv.actions[actions_n++] =
11454 				fm->mfts->meter_action;
11455 			action_flags |= MLX5_FLOW_ACTION_METER;
11456 			break;
11457 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
11458 			if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
11459 							      actions, error))
11460 				return -rte_errno;
11461 			action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
11462 			break;
11463 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
11464 			if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
11465 							      actions, error))
11466 				return -rte_errno;
11467 			action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
11468 			break;
11469 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
11470 			sample_act_pos = actions_n;
11471 			sample = (const struct rte_flow_action_sample *)
11472 				 action->conf;
11473 			actions_n++;
11474 			action_flags |= MLX5_FLOW_ACTION_SAMPLE;
11475 			/* put encap action into group if work with port id */
11476 			if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
11477 			    (action_flags & MLX5_FLOW_ACTION_PORT_ID))
11478 				sample_act->action_flags |=
11479 							MLX5_FLOW_ACTION_ENCAP;
11480 			break;
11481 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
11482 			if (flow_dv_convert_action_modify_field
11483 					(dev, mhdr_res, actions, attr, error))
11484 				return -rte_errno;
11485 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
11486 			break;
11487 		case RTE_FLOW_ACTION_TYPE_END:
11488 			actions_end = true;
11489 			if (mhdr_res->actions_num) {
11490 				/* create modify action if needed. */
11491 				if (flow_dv_modify_hdr_resource_register
11492 					(dev, mhdr_res, dev_flow, error))
11493 					return -rte_errno;
11494 				dev_flow->dv.actions[modify_action_position] =
11495 					handle->dvh.modify_hdr->action;
11496 			}
11497 			if (action_flags & MLX5_FLOW_ACTION_COUNT) {
11498 				/*
11499 				 * Create one count action, to be used
11500 				 * by all sub-flows.
11501 				 */
11502 				if (!flow->counter) {
11503 					flow->counter =
11504 						flow_dv_translate_create_counter
11505 							(dev, dev_flow, count,
11506 							 age);
11507 					if (!flow->counter)
11508 						return rte_flow_error_set
11509 						(error, rte_errno,
11510 						 RTE_FLOW_ERROR_TYPE_ACTION,
11511 						 NULL, "cannot create counter"
11512 						 " object.");
11513 				}
11514 				dev_flow->dv.actions[actions_n] =
11515 					  (flow_dv_counter_get_by_idx(dev,
11516 					  flow->counter, NULL))->action;
11517 				actions_n++;
11518 			}
11519 		default:
11520 			break;
11521 		}
11522 		if (mhdr_res->actions_num &&
11523 		    modify_action_position == UINT32_MAX)
11524 			modify_action_position = actions_n++;
11525 	}
11526 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
11527 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
11528 		int item_type = items->type;
11529 
11530 		if (!mlx5_flow_os_item_supported(item_type))
11531 			return rte_flow_error_set(error, ENOTSUP,
11532 						  RTE_FLOW_ERROR_TYPE_ITEM,
11533 						  NULL, "item not supported");
11534 		switch (item_type) {
11535 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
11536 			flow_dv_translate_item_port_id
11537 				(dev, match_mask, match_value, items, attr);
11538 			last_item = MLX5_FLOW_ITEM_PORT_ID;
11539 			break;
11540 		case RTE_FLOW_ITEM_TYPE_ETH:
11541 			flow_dv_translate_item_eth(match_mask, match_value,
11542 						   items, tunnel,
11543 						   dev_flow->dv.group);
11544 			matcher.priority = action_flags &
11545 					MLX5_FLOW_ACTION_DEFAULT_MISS &&
11546 					!dev_flow->external ?
11547 					MLX5_PRIORITY_MAP_L3 :
11548 					MLX5_PRIORITY_MAP_L2;
11549 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
11550 					     MLX5_FLOW_LAYER_OUTER_L2;
11551 			break;
11552 		case RTE_FLOW_ITEM_TYPE_VLAN:
11553 			flow_dv_translate_item_vlan(dev_flow,
11554 						    match_mask, match_value,
11555 						    items, tunnel,
11556 						    dev_flow->dv.group);
11557 			matcher.priority = MLX5_PRIORITY_MAP_L2;
11558 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
11559 					      MLX5_FLOW_LAYER_INNER_VLAN) :
11560 					     (MLX5_FLOW_LAYER_OUTER_L2 |
11561 					      MLX5_FLOW_LAYER_OUTER_VLAN);
11562 			break;
11563 		case RTE_FLOW_ITEM_TYPE_IPV4:
11564 			mlx5_flow_tunnel_ip_check(items, next_protocol,
11565 						  &item_flags, &tunnel);
11566 			flow_dv_translate_item_ipv4(match_mask, match_value,
11567 						    items, tunnel,
11568 						    dev_flow->dv.group);
11569 			matcher.priority = MLX5_PRIORITY_MAP_L3;
11570 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
11571 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
11572 			if (items->mask != NULL &&
11573 			    ((const struct rte_flow_item_ipv4 *)
11574 			     items->mask)->hdr.next_proto_id) {
11575 				next_protocol =
11576 					((const struct rte_flow_item_ipv4 *)
11577 					 (items->spec))->hdr.next_proto_id;
11578 				next_protocol &=
11579 					((const struct rte_flow_item_ipv4 *)
11580 					 (items->mask))->hdr.next_proto_id;
11581 			} else {
11582 				/* Reset for inner layer. */
11583 				next_protocol = 0xff;
11584 			}
11585 			break;
11586 		case RTE_FLOW_ITEM_TYPE_IPV6:
11587 			mlx5_flow_tunnel_ip_check(items, next_protocol,
11588 						  &item_flags, &tunnel);
11589 			flow_dv_translate_item_ipv6(match_mask, match_value,
11590 						    items, tunnel,
11591 						    dev_flow->dv.group);
11592 			matcher.priority = MLX5_PRIORITY_MAP_L3;
11593 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
11594 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
11595 			if (items->mask != NULL &&
11596 			    ((const struct rte_flow_item_ipv6 *)
11597 			     items->mask)->hdr.proto) {
11598 				next_protocol =
11599 					((const struct rte_flow_item_ipv6 *)
11600 					 items->spec)->hdr.proto;
11601 				next_protocol &=
11602 					((const struct rte_flow_item_ipv6 *)
11603 					 items->mask)->hdr.proto;
11604 			} else {
11605 				/* Reset for inner layer. */
11606 				next_protocol = 0xff;
11607 			}
11608 			break;
11609 		case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
11610 			flow_dv_translate_item_ipv6_frag_ext(match_mask,
11611 							     match_value,
11612 							     items, tunnel);
11613 			last_item = tunnel ?
11614 					MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
11615 					MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
11616 			if (items->mask != NULL &&
11617 			    ((const struct rte_flow_item_ipv6_frag_ext *)
11618 			     items->mask)->hdr.next_header) {
11619 				next_protocol =
11620 				((const struct rte_flow_item_ipv6_frag_ext *)
11621 				 items->spec)->hdr.next_header;
11622 				next_protocol &=
11623 				((const struct rte_flow_item_ipv6_frag_ext *)
11624 				 items->mask)->hdr.next_header;
11625 			} else {
11626 				/* Reset for inner layer. */
11627 				next_protocol = 0xff;
11628 			}
11629 			break;
11630 		case RTE_FLOW_ITEM_TYPE_TCP:
11631 			flow_dv_translate_item_tcp(match_mask, match_value,
11632 						   items, tunnel);
11633 			matcher.priority = MLX5_PRIORITY_MAP_L4;
11634 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
11635 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
11636 			break;
11637 		case RTE_FLOW_ITEM_TYPE_UDP:
11638 			flow_dv_translate_item_udp(match_mask, match_value,
11639 						   items, tunnel);
11640 			matcher.priority = MLX5_PRIORITY_MAP_L4;
11641 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
11642 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
11643 			break;
11644 		case RTE_FLOW_ITEM_TYPE_GRE:
11645 			flow_dv_translate_item_gre(match_mask, match_value,
11646 						   items, tunnel);
11647 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11648 			last_item = MLX5_FLOW_LAYER_GRE;
11649 			break;
11650 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
11651 			flow_dv_translate_item_gre_key(match_mask,
11652 						       match_value, items);
11653 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
11654 			break;
11655 		case RTE_FLOW_ITEM_TYPE_NVGRE:
11656 			flow_dv_translate_item_nvgre(match_mask, match_value,
11657 						     items, tunnel);
11658 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11659 			last_item = MLX5_FLOW_LAYER_GRE;
11660 			break;
11661 		case RTE_FLOW_ITEM_TYPE_VXLAN:
11662 			flow_dv_translate_item_vxlan(match_mask, match_value,
11663 						     items, tunnel);
11664 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11665 			last_item = MLX5_FLOW_LAYER_VXLAN;
11666 			break;
11667 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
11668 			flow_dv_translate_item_vxlan_gpe(match_mask,
11669 							 match_value, items,
11670 							 tunnel);
11671 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11672 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
11673 			break;
11674 		case RTE_FLOW_ITEM_TYPE_GENEVE:
11675 			flow_dv_translate_item_geneve(match_mask, match_value,
11676 						      items, tunnel);
11677 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11678 			last_item = MLX5_FLOW_LAYER_GENEVE;
11679 			break;
11680 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
11681 			ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
11682 							  match_value,
11683 							  items, error);
11684 			if (ret)
11685 				return rte_flow_error_set(error, -ret,
11686 					RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11687 					"cannot create GENEVE TLV option");
11688 			flow->geneve_tlv_option = 1;
11689 			last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
11690 			break;
11691 		case RTE_FLOW_ITEM_TYPE_MPLS:
11692 			flow_dv_translate_item_mpls(match_mask, match_value,
11693 						    items, last_item, tunnel);
11694 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11695 			last_item = MLX5_FLOW_LAYER_MPLS;
11696 			break;
11697 		case RTE_FLOW_ITEM_TYPE_MARK:
11698 			flow_dv_translate_item_mark(dev, match_mask,
11699 						    match_value, items);
11700 			last_item = MLX5_FLOW_ITEM_MARK;
11701 			break;
11702 		case RTE_FLOW_ITEM_TYPE_META:
11703 			flow_dv_translate_item_meta(dev, match_mask,
11704 						    match_value, attr, items);
11705 			last_item = MLX5_FLOW_ITEM_METADATA;
11706 			break;
11707 		case RTE_FLOW_ITEM_TYPE_ICMP:
11708 			flow_dv_translate_item_icmp(match_mask, match_value,
11709 						    items, tunnel);
11710 			last_item = MLX5_FLOW_LAYER_ICMP;
11711 			break;
11712 		case RTE_FLOW_ITEM_TYPE_ICMP6:
11713 			flow_dv_translate_item_icmp6(match_mask, match_value,
11714 						      items, tunnel);
11715 			last_item = MLX5_FLOW_LAYER_ICMP6;
11716 			break;
11717 		case RTE_FLOW_ITEM_TYPE_TAG:
11718 			flow_dv_translate_item_tag(dev, match_mask,
11719 						   match_value, items);
11720 			last_item = MLX5_FLOW_ITEM_TAG;
11721 			break;
11722 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
11723 			flow_dv_translate_mlx5_item_tag(dev, match_mask,
11724 							match_value, items);
11725 			last_item = MLX5_FLOW_ITEM_TAG;
11726 			break;
11727 		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
11728 			flow_dv_translate_item_tx_queue(dev, match_mask,
11729 							match_value,
11730 							items);
11731 			last_item = MLX5_FLOW_ITEM_TX_QUEUE;
11732 			break;
11733 		case RTE_FLOW_ITEM_TYPE_GTP:
11734 			flow_dv_translate_item_gtp(match_mask, match_value,
11735 						   items, tunnel);
11736 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11737 			last_item = MLX5_FLOW_LAYER_GTP;
11738 			break;
11739 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
11740 			ret = flow_dv_translate_item_gtp_psc(match_mask,
11741 							  match_value,
11742 							  items);
11743 			if (ret)
11744 				return rte_flow_error_set(error, -ret,
11745 					RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11746 					"cannot create GTP PSC item");
11747 			last_item = MLX5_FLOW_LAYER_GTP_PSC;
11748 			break;
11749 		case RTE_FLOW_ITEM_TYPE_ECPRI:
11750 			if (!mlx5_flex_parser_ecpri_exist(dev)) {
11751 				/* Create it only the first time to be used. */
11752 				ret = mlx5_flex_parser_ecpri_alloc(dev);
11753 				if (ret)
11754 					return rte_flow_error_set
11755 						(error, -ret,
11756 						RTE_FLOW_ERROR_TYPE_ITEM,
11757 						NULL,
11758 						"cannot create eCPRI parser");
11759 			}
11760 			/* Adjust the length matcher and device flow value. */
11761 			matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
11762 			dev_flow->dv.value.size =
11763 					MLX5_ST_SZ_BYTES(fte_match_param);
11764 			flow_dv_translate_item_ecpri(dev, match_mask,
11765 						     match_value, items);
11766 			/* No other protocol should follow eCPRI layer. */
11767 			last_item = MLX5_FLOW_LAYER_ECPRI;
11768 			break;
11769 		default:
11770 			break;
11771 		}
11772 		item_flags |= last_item;
11773 	}
11774 	/*
11775 	 * When E-Switch mode is enabled, we have two cases where we need to
11776 	 * set the source port manually.
11777 	 * The first one, is in case of Nic steering rule, and the second is
11778 	 * E-Switch rule where no port_id item was found. In both cases
11779 	 * the source port is set according the current port in use.
11780 	 */
11781 	if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
11782 	    (priv->representor || priv->master)) {
11783 		if (flow_dv_translate_item_port_id(dev, match_mask,
11784 						   match_value, NULL, attr))
11785 			return -rte_errno;
11786 	}
11787 #ifdef RTE_LIBRTE_MLX5_DEBUG
11788 	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
11789 					      dev_flow->dv.value.buf));
11790 #endif
11791 	/*
11792 	 * Layers may be already initialized from prefix flow if this dev_flow
11793 	 * is the suffix flow.
11794 	 */
11795 	handle->layers |= item_flags;
11796 	if (action_flags & MLX5_FLOW_ACTION_RSS)
11797 		flow_dv_hashfields_set(dev_flow, rss_desc);
11798 	/* If has RSS action in the sample action, the Sample/Mirror resource
11799 	 * should be registered after the hash filed be update.
11800 	 */
11801 	if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
11802 		ret = flow_dv_translate_action_sample(dev,
11803 						      sample,
11804 						      dev_flow, attr,
11805 						      &num_of_dest,
11806 						      sample_actions,
11807 						      &sample_res,
11808 						      error);
11809 		if (ret < 0)
11810 			return ret;
11811 		ret = flow_dv_create_action_sample(dev,
11812 						   dev_flow,
11813 						   num_of_dest,
11814 						   &sample_res,
11815 						   &mdest_res,
11816 						   sample_actions,
11817 						   action_flags,
11818 						   error);
11819 		if (ret < 0)
11820 			return rte_flow_error_set
11821 						(error, rte_errno,
11822 						RTE_FLOW_ERROR_TYPE_ACTION,
11823 						NULL,
11824 						"cannot create sample action");
11825 		if (num_of_dest > 1) {
11826 			dev_flow->dv.actions[sample_act_pos] =
11827 			dev_flow->dv.dest_array_res->action;
11828 		} else {
11829 			dev_flow->dv.actions[sample_act_pos] =
11830 			dev_flow->dv.sample_res->verbs_action;
11831 		}
11832 	}
11833 	/*
11834 	 * For multiple destination (sample action with ratio=1), the encap
11835 	 * action and port id action will be combined into group action.
11836 	 * So need remove the original these actions in the flow and only
11837 	 * use the sample action instead of.
11838 	 */
11839 	if (num_of_dest > 1 &&
11840 	    (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
11841 		int i;
11842 		void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11843 
11844 		for (i = 0; i < actions_n; i++) {
11845 			if ((sample_act->dr_encap_action &&
11846 				sample_act->dr_encap_action ==
11847 				dev_flow->dv.actions[i]) ||
11848 				(sample_act->dr_port_id_action &&
11849 				sample_act->dr_port_id_action ==
11850 				dev_flow->dv.actions[i]) ||
11851 				(sample_act->dr_jump_action &&
11852 				sample_act->dr_jump_action ==
11853 				dev_flow->dv.actions[i]))
11854 				continue;
11855 			temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
11856 		}
11857 		memcpy((void *)dev_flow->dv.actions,
11858 				(void *)temp_actions,
11859 				tmp_actions_n * sizeof(void *));
11860 		actions_n = tmp_actions_n;
11861 	}
11862 	dev_flow->dv.actions_n = actions_n;
11863 	dev_flow->act_flags = action_flags;
11864 	/* Register matcher. */
11865 	matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
11866 				    matcher.mask.size);
11867 	matcher.priority = mlx5_get_matcher_priority(dev, attr,
11868 					matcher.priority);
11869 	/* reserved field no needs to be set to 0 here. */
11870 	tbl_key.domain = attr->transfer;
11871 	tbl_key.direction = attr->egress;
11872 	tbl_key.table_id = dev_flow->dv.group;
11873 	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
11874 				     tunnel, attr->group, error))
11875 		return -rte_errno;
11876 	return 0;
11877 }
11878 
11879 /**
11880  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11881  * and tunnel.
11882  *
11883  * @param[in, out] action
11884  *   Shred RSS action holding hash RX queue objects.
11885  * @param[in] hash_fields
11886  *   Defines combination of packet fields to participate in RX hash.
11887  * @param[in] tunnel
11888  *   Tunnel type
11889  * @param[in] hrxq_idx
11890  *   Hash RX queue index to set.
11891  *
11892  * @return
11893  *   0 on success, otherwise negative errno value.
11894  */
11895 static int
11896 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
11897 			      const uint64_t hash_fields,
11898 			      const int tunnel,
11899 			      uint32_t hrxq_idx)
11900 {
11901 	uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
11902 
11903 	switch (hash_fields & ~IBV_RX_HASH_INNER) {
11904 	case MLX5_RSS_HASH_IPV4:
11905 		hrxqs[0] = hrxq_idx;
11906 		return 0;
11907 	case MLX5_RSS_HASH_IPV4_TCP:
11908 		hrxqs[1] = hrxq_idx;
11909 		return 0;
11910 	case MLX5_RSS_HASH_IPV4_UDP:
11911 		hrxqs[2] = hrxq_idx;
11912 		return 0;
11913 	case MLX5_RSS_HASH_IPV6:
11914 		hrxqs[3] = hrxq_idx;
11915 		return 0;
11916 	case MLX5_RSS_HASH_IPV6_TCP:
11917 		hrxqs[4] = hrxq_idx;
11918 		return 0;
11919 	case MLX5_RSS_HASH_IPV6_UDP:
11920 		hrxqs[5] = hrxq_idx;
11921 		return 0;
11922 	case MLX5_RSS_HASH_NONE:
11923 		hrxqs[6] = hrxq_idx;
11924 		return 0;
11925 	default:
11926 		return -1;
11927 	}
11928 }
11929 
11930 /**
11931  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11932  * and tunnel.
11933  *
11934  * @param[in] dev
11935  *   Pointer to the Ethernet device structure.
11936  * @param[in] idx
11937  *   Shared RSS action ID holding hash RX queue objects.
11938  * @param[in] hash_fields
11939  *   Defines combination of packet fields to participate in RX hash.
11940  * @param[in] tunnel
11941  *   Tunnel type
11942  *
11943  * @return
11944  *   Valid hash RX queue index, otherwise 0.
11945  */
11946 static uint32_t
11947 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
11948 				 const uint64_t hash_fields,
11949 				 const int tunnel)
11950 {
11951 	struct mlx5_priv *priv = dev->data->dev_private;
11952 	struct mlx5_shared_action_rss *shared_rss =
11953 	    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11954 	const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
11955 							shared_rss->hrxq_tunnel;
11956 
11957 	switch (hash_fields & ~IBV_RX_HASH_INNER) {
11958 	case MLX5_RSS_HASH_IPV4:
11959 		return hrxqs[0];
11960 	case MLX5_RSS_HASH_IPV4_TCP:
11961 		return hrxqs[1];
11962 	case MLX5_RSS_HASH_IPV4_UDP:
11963 		return hrxqs[2];
11964 	case MLX5_RSS_HASH_IPV6:
11965 		return hrxqs[3];
11966 	case MLX5_RSS_HASH_IPV6_TCP:
11967 		return hrxqs[4];
11968 	case MLX5_RSS_HASH_IPV6_UDP:
11969 		return hrxqs[5];
11970 	case MLX5_RSS_HASH_NONE:
11971 		return hrxqs[6];
11972 	default:
11973 		return 0;
11974 	}
11975 }
11976 
11977 /**
11978  * Apply the flow to the NIC, lock free,
11979  * (mutex should be acquired by caller).
11980  *
11981  * @param[in] dev
11982  *   Pointer to the Ethernet device structure.
11983  * @param[in, out] flow
11984  *   Pointer to flow structure.
11985  * @param[out] error
11986  *   Pointer to error structure.
11987  *
11988  * @return
11989  *   0 on success, a negative errno value otherwise and rte_errno is set.
11990  */
11991 static int
11992 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
11993 	      struct rte_flow_error *error)
11994 {
11995 	struct mlx5_flow_dv_workspace *dv;
11996 	struct mlx5_flow_handle *dh;
11997 	struct mlx5_flow_handle_dv *dv_h;
11998 	struct mlx5_flow *dev_flow;
11999 	struct mlx5_priv *priv = dev->data->dev_private;
12000 	uint32_t handle_idx;
12001 	int n;
12002 	int err;
12003 	int idx;
12004 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12005 	struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
12006 
12007 	MLX5_ASSERT(wks);
12008 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
12009 		dev_flow = &wks->flows[idx];
12010 		dv = &dev_flow->dv;
12011 		dh = dev_flow->handle;
12012 		dv_h = &dh->dvh;
12013 		n = dv->actions_n;
12014 		if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
12015 			if (dv->transfer) {
12016 				dv->actions[n++] = priv->sh->esw_drop_action;
12017 			} else {
12018 				MLX5_ASSERT(priv->drop_queue.hrxq);
12019 				dv->actions[n++] =
12020 						priv->drop_queue.hrxq->action;
12021 			}
12022 		} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
12023 			   !dv_h->rix_sample && !dv_h->rix_dest_array)) {
12024 			struct mlx5_hrxq *hrxq;
12025 			uint32_t hrxq_idx;
12026 
12027 			hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
12028 						    &hrxq_idx);
12029 			if (!hrxq) {
12030 				rte_flow_error_set
12031 					(error, rte_errno,
12032 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12033 					 "cannot get hash queue");
12034 				goto error;
12035 			}
12036 			dh->rix_hrxq = hrxq_idx;
12037 			dv->actions[n++] = hrxq->action;
12038 		} else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12039 			struct mlx5_hrxq *hrxq = NULL;
12040 			uint32_t hrxq_idx;
12041 
12042 			hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
12043 						rss_desc->shared_rss,
12044 						dev_flow->hash_fields,
12045 						!!(dh->layers &
12046 						MLX5_FLOW_LAYER_TUNNEL));
12047 			if (hrxq_idx)
12048 				hrxq = mlx5_ipool_get
12049 					(priv->sh->ipool[MLX5_IPOOL_HRXQ],
12050 					 hrxq_idx);
12051 			if (!hrxq) {
12052 				rte_flow_error_set
12053 					(error, rte_errno,
12054 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12055 					 "cannot get hash queue");
12056 				goto error;
12057 			}
12058 			dh->rix_srss = rss_desc->shared_rss;
12059 			dv->actions[n++] = hrxq->action;
12060 		} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
12061 			if (!priv->sh->default_miss_action) {
12062 				rte_flow_error_set
12063 					(error, rte_errno,
12064 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12065 					 "default miss action not be created.");
12066 				goto error;
12067 			}
12068 			dv->actions[n++] = priv->sh->default_miss_action;
12069 		}
12070 		err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
12071 					       (void *)&dv->value, n,
12072 					       dv->actions, &dh->drv_flow);
12073 		if (err) {
12074 			rte_flow_error_set(error, errno,
12075 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12076 					   NULL,
12077 					   "hardware refuses to create flow");
12078 			goto error;
12079 		}
12080 		if (priv->vmwa_context &&
12081 		    dh->vf_vlan.tag && !dh->vf_vlan.created) {
12082 			/*
12083 			 * The rule contains the VLAN pattern.
12084 			 * For VF we are going to create VLAN
12085 			 * interface to make hypervisor set correct
12086 			 * e-Switch vport context.
12087 			 */
12088 			mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
12089 		}
12090 	}
12091 	return 0;
12092 error:
12093 	err = rte_errno; /* Save rte_errno before cleanup. */
12094 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
12095 		       handle_idx, dh, next) {
12096 		/* hrxq is union, don't clear it if the flag is not set. */
12097 		if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
12098 			mlx5_hrxq_release(dev, dh->rix_hrxq);
12099 			dh->rix_hrxq = 0;
12100 		} else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12101 			dh->rix_srss = 0;
12102 		}
12103 		if (dh->vf_vlan.tag && dh->vf_vlan.created)
12104 			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12105 	}
12106 	rte_errno = err; /* Restore rte_errno. */
12107 	return -rte_errno;
12108 }
12109 
12110 void
12111 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
12112 			  struct mlx5_cache_entry *entry)
12113 {
12114 	struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
12115 							  entry);
12116 
12117 	claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
12118 	mlx5_free(cache);
12119 }
12120 
12121 /**
12122  * Release the flow matcher.
12123  *
12124  * @param dev
12125  *   Pointer to Ethernet device.
12126  * @param port_id
12127  *   Index to port ID action resource.
12128  *
12129  * @return
12130  *   1 while a reference on it exists, 0 when freed.
12131  */
12132 static int
12133 flow_dv_matcher_release(struct rte_eth_dev *dev,
12134 			struct mlx5_flow_handle *handle)
12135 {
12136 	struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
12137 	struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
12138 							    typeof(*tbl), tbl);
12139 	int ret;
12140 
12141 	MLX5_ASSERT(matcher->matcher_object);
12142 	ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
12143 	flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
12144 	return ret;
12145 }
12146 
12147 /**
12148  * Release encap_decap resource.
12149  *
12150  * @param list
12151  *   Pointer to the hash list.
12152  * @param entry
12153  *   Pointer to exist resource entry object.
12154  */
12155 void
12156 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
12157 			      struct mlx5_hlist_entry *entry)
12158 {
12159 	struct mlx5_dev_ctx_shared *sh = list->ctx;
12160 	struct mlx5_flow_dv_encap_decap_resource *res =
12161 		container_of(entry, typeof(*res), entry);
12162 
12163 	claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12164 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
12165 }
12166 
12167 /**
12168  * Release an encap/decap resource.
12169  *
12170  * @param dev
12171  *   Pointer to Ethernet device.
12172  * @param encap_decap_idx
12173  *   Index of encap decap resource.
12174  *
12175  * @return
12176  *   1 while a reference on it exists, 0 when freed.
12177  */
12178 static int
12179 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
12180 				     uint32_t encap_decap_idx)
12181 {
12182 	struct mlx5_priv *priv = dev->data->dev_private;
12183 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
12184 
12185 	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
12186 					encap_decap_idx);
12187 	if (!cache_resource)
12188 		return 0;
12189 	MLX5_ASSERT(cache_resource->action);
12190 	return mlx5_hlist_unregister(priv->sh->encaps_decaps,
12191 				     &cache_resource->entry);
12192 }
12193 
12194 /**
12195  * Release an jump to table action resource.
12196  *
12197  * @param dev
12198  *   Pointer to Ethernet device.
12199  * @param rix_jump
12200  *   Index to the jump action resource.
12201  *
12202  * @return
12203  *   1 while a reference on it exists, 0 when freed.
12204  */
12205 static int
12206 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
12207 				  uint32_t rix_jump)
12208 {
12209 	struct mlx5_priv *priv = dev->data->dev_private;
12210 	struct mlx5_flow_tbl_data_entry *tbl_data;
12211 
12212 	tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
12213 				  rix_jump);
12214 	if (!tbl_data)
12215 		return 0;
12216 	return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
12217 }
12218 
12219 void
12220 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
12221 			 struct mlx5_hlist_entry *entry)
12222 {
12223 	struct mlx5_flow_dv_modify_hdr_resource *res =
12224 		container_of(entry, typeof(*res), entry);
12225 
12226 	claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12227 	mlx5_free(entry);
12228 }
12229 
12230 /**
12231  * Release a modify-header resource.
12232  *
12233  * @param dev
12234  *   Pointer to Ethernet device.
12235  * @param handle
12236  *   Pointer to mlx5_flow_handle.
12237  *
12238  * @return
12239  *   1 while a reference on it exists, 0 when freed.
12240  */
12241 static int
12242 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
12243 				    struct mlx5_flow_handle *handle)
12244 {
12245 	struct mlx5_priv *priv = dev->data->dev_private;
12246 	struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
12247 
12248 	MLX5_ASSERT(entry->action);
12249 	return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
12250 }
12251 
12252 void
12253 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
12254 			  struct mlx5_cache_entry *entry)
12255 {
12256 	struct mlx5_dev_ctx_shared *sh = list->ctx;
12257 	struct mlx5_flow_dv_port_id_action_resource *cache =
12258 			container_of(entry, typeof(*cache), entry);
12259 
12260 	claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12261 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
12262 }
12263 
12264 /**
12265  * Release port ID action resource.
12266  *
12267  * @param dev
12268  *   Pointer to Ethernet device.
12269  * @param handle
12270  *   Pointer to mlx5_flow_handle.
12271  *
12272  * @return
12273  *   1 while a reference on it exists, 0 when freed.
12274  */
12275 static int
12276 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
12277 					uint32_t port_id)
12278 {
12279 	struct mlx5_priv *priv = dev->data->dev_private;
12280 	struct mlx5_flow_dv_port_id_action_resource *cache;
12281 
12282 	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
12283 	if (!cache)
12284 		return 0;
12285 	MLX5_ASSERT(cache->action);
12286 	return mlx5_cache_unregister(&priv->sh->port_id_action_list,
12287 				     &cache->entry);
12288 }
12289 
12290 /**
12291  * Release shared RSS action resource.
12292  *
12293  * @param dev
12294  *   Pointer to Ethernet device.
12295  * @param srss
12296  *   Shared RSS action index.
12297  */
12298 static void
12299 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
12300 {
12301 	struct mlx5_priv *priv = dev->data->dev_private;
12302 	struct mlx5_shared_action_rss *shared_rss;
12303 
12304 	shared_rss = mlx5_ipool_get
12305 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
12306 	__atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12307 }
12308 
12309 void
12310 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
12311 			    struct mlx5_cache_entry *entry)
12312 {
12313 	struct mlx5_dev_ctx_shared *sh = list->ctx;
12314 	struct mlx5_flow_dv_push_vlan_action_resource *cache =
12315 			container_of(entry, typeof(*cache), entry);
12316 
12317 	claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12318 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
12319 }
12320 
12321 /**
12322  * Release push vlan action resource.
12323  *
12324  * @param dev
12325  *   Pointer to Ethernet device.
12326  * @param handle
12327  *   Pointer to mlx5_flow_handle.
12328  *
12329  * @return
12330  *   1 while a reference on it exists, 0 when freed.
12331  */
12332 static int
12333 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
12334 					  struct mlx5_flow_handle *handle)
12335 {
12336 	struct mlx5_priv *priv = dev->data->dev_private;
12337 	struct mlx5_flow_dv_push_vlan_action_resource *cache;
12338 	uint32_t idx = handle->dvh.rix_push_vlan;
12339 
12340 	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
12341 	if (!cache)
12342 		return 0;
12343 	MLX5_ASSERT(cache->action);
12344 	return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
12345 				     &cache->entry);
12346 }
12347 
12348 /**
12349  * Release the fate resource.
12350  *
12351  * @param dev
12352  *   Pointer to Ethernet device.
12353  * @param handle
12354  *   Pointer to mlx5_flow_handle.
12355  */
12356 static void
12357 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
12358 			       struct mlx5_flow_handle *handle)
12359 {
12360 	if (!handle->rix_fate)
12361 		return;
12362 	switch (handle->fate_action) {
12363 	case MLX5_FLOW_FATE_QUEUE:
12364 		mlx5_hrxq_release(dev, handle->rix_hrxq);
12365 		break;
12366 	case MLX5_FLOW_FATE_JUMP:
12367 		flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
12368 		break;
12369 	case MLX5_FLOW_FATE_PORT_ID:
12370 		flow_dv_port_id_action_resource_release(dev,
12371 				handle->rix_port_id_action);
12372 		break;
12373 	default:
12374 		DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
12375 		break;
12376 	}
12377 	handle->rix_fate = 0;
12378 }
12379 
12380 void
12381 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
12382 			 struct mlx5_cache_entry *entry)
12383 {
12384 	struct mlx5_flow_dv_sample_resource *cache_resource =
12385 			container_of(entry, typeof(*cache_resource), entry);
12386 	struct rte_eth_dev *dev = cache_resource->dev;
12387 	struct mlx5_priv *priv = dev->data->dev_private;
12388 
12389 	if (cache_resource->verbs_action)
12390 		claim_zero(mlx5_flow_os_destroy_flow_action
12391 				(cache_resource->verbs_action));
12392 	if (cache_resource->normal_path_tbl)
12393 		flow_dv_tbl_resource_release(MLX5_SH(dev),
12394 			cache_resource->normal_path_tbl);
12395 	flow_dv_sample_sub_actions_release(dev,
12396 				&cache_resource->sample_idx);
12397 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12398 			cache_resource->idx);
12399 	DRV_LOG(DEBUG, "sample resource %p: removed",
12400 		(void *)cache_resource);
12401 }
12402 
12403 /**
12404  * Release an sample resource.
12405  *
12406  * @param dev
12407  *   Pointer to Ethernet device.
12408  * @param handle
12409  *   Pointer to mlx5_flow_handle.
12410  *
12411  * @return
12412  *   1 while a reference on it exists, 0 when freed.
12413  */
12414 static int
12415 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
12416 				     struct mlx5_flow_handle *handle)
12417 {
12418 	struct mlx5_priv *priv = dev->data->dev_private;
12419 	struct mlx5_flow_dv_sample_resource *cache_resource;
12420 
12421 	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12422 			 handle->dvh.rix_sample);
12423 	if (!cache_resource)
12424 		return 0;
12425 	MLX5_ASSERT(cache_resource->verbs_action);
12426 	return mlx5_cache_unregister(&priv->sh->sample_action_list,
12427 				     &cache_resource->entry);
12428 }
12429 
12430 void
12431 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
12432 			     struct mlx5_cache_entry *entry)
12433 {
12434 	struct mlx5_flow_dv_dest_array_resource *cache_resource =
12435 			container_of(entry, typeof(*cache_resource), entry);
12436 	struct rte_eth_dev *dev = cache_resource->dev;
12437 	struct mlx5_priv *priv = dev->data->dev_private;
12438 	uint32_t i = 0;
12439 
12440 	MLX5_ASSERT(cache_resource->action);
12441 	if (cache_resource->action)
12442 		claim_zero(mlx5_flow_os_destroy_flow_action
12443 					(cache_resource->action));
12444 	for (; i < cache_resource->num_of_dest; i++)
12445 		flow_dv_sample_sub_actions_release(dev,
12446 				&cache_resource->sample_idx[i]);
12447 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12448 			cache_resource->idx);
12449 	DRV_LOG(DEBUG, "destination array resource %p: removed",
12450 		(void *)cache_resource);
12451 }
12452 
12453 /**
12454  * Release an destination array resource.
12455  *
12456  * @param dev
12457  *   Pointer to Ethernet device.
12458  * @param handle
12459  *   Pointer to mlx5_flow_handle.
12460  *
12461  * @return
12462  *   1 while a reference on it exists, 0 when freed.
12463  */
12464 static int
12465 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
12466 				    struct mlx5_flow_handle *handle)
12467 {
12468 	struct mlx5_priv *priv = dev->data->dev_private;
12469 	struct mlx5_flow_dv_dest_array_resource *cache;
12470 
12471 	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12472 			       handle->dvh.rix_dest_array);
12473 	if (!cache)
12474 		return 0;
12475 	MLX5_ASSERT(cache->action);
12476 	return mlx5_cache_unregister(&priv->sh->dest_array_list,
12477 				     &cache->entry);
12478 }
12479 
12480 static void
12481 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
12482 {
12483 	struct mlx5_priv *priv = dev->data->dev_private;
12484 	struct mlx5_dev_ctx_shared *sh = priv->sh;
12485 	struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
12486 				sh->geneve_tlv_option_resource;
12487 	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
12488 	if (geneve_opt_resource) {
12489 		if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
12490 					 __ATOMIC_RELAXED))) {
12491 			claim_zero(mlx5_devx_cmd_destroy
12492 					(geneve_opt_resource->obj));
12493 			mlx5_free(sh->geneve_tlv_option_resource);
12494 			sh->geneve_tlv_option_resource = NULL;
12495 		}
12496 	}
12497 	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
12498 }
12499 
12500 /**
12501  * Remove the flow from the NIC but keeps it in memory.
12502  * Lock free, (mutex should be acquired by caller).
12503  *
12504  * @param[in] dev
12505  *   Pointer to Ethernet device.
12506  * @param[in, out] flow
12507  *   Pointer to flow structure.
12508  */
12509 static void
12510 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12511 {
12512 	struct mlx5_flow_handle *dh;
12513 	uint32_t handle_idx;
12514 	struct mlx5_priv *priv = dev->data->dev_private;
12515 
12516 	if (!flow)
12517 		return;
12518 	handle_idx = flow->dev_handles;
12519 	while (handle_idx) {
12520 		dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12521 				    handle_idx);
12522 		if (!dh)
12523 			return;
12524 		if (dh->drv_flow) {
12525 			claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
12526 			dh->drv_flow = NULL;
12527 		}
12528 		if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
12529 			flow_dv_fate_resource_release(dev, dh);
12530 		if (dh->vf_vlan.tag && dh->vf_vlan.created)
12531 			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12532 		handle_idx = dh->next.next;
12533 	}
12534 }
12535 
12536 /**
12537  * Remove the flow from the NIC and the memory.
12538  * Lock free, (mutex should be acquired by caller).
12539  *
12540  * @param[in] dev
12541  *   Pointer to the Ethernet device structure.
12542  * @param[in, out] flow
12543  *   Pointer to flow structure.
12544  */
12545 static void
12546 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12547 {
12548 	struct mlx5_flow_handle *dev_handle;
12549 	struct mlx5_priv *priv = dev->data->dev_private;
12550 	uint32_t srss = 0;
12551 
12552 	if (!flow)
12553 		return;
12554 	flow_dv_remove(dev, flow);
12555 	if (flow->counter) {
12556 		flow_dv_counter_free(dev, flow->counter);
12557 		flow->counter = 0;
12558 	}
12559 	if (flow->meter) {
12560 		struct mlx5_flow_meter *fm;
12561 
12562 		fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
12563 				    flow->meter);
12564 		if (fm)
12565 			mlx5_flow_meter_detach(fm);
12566 		flow->meter = 0;
12567 	}
12568 	if (flow->age)
12569 		flow_dv_aso_age_release(dev, flow->age);
12570 	if (flow->geneve_tlv_option) {
12571 		flow_dv_geneve_tlv_option_resource_release(dev);
12572 		flow->geneve_tlv_option = 0;
12573 	}
12574 	while (flow->dev_handles) {
12575 		uint32_t tmp_idx = flow->dev_handles;
12576 
12577 		dev_handle = mlx5_ipool_get(priv->sh->ipool
12578 					    [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
12579 		if (!dev_handle)
12580 			return;
12581 		flow->dev_handles = dev_handle->next.next;
12582 		if (dev_handle->dvh.matcher)
12583 			flow_dv_matcher_release(dev, dev_handle);
12584 		if (dev_handle->dvh.rix_sample)
12585 			flow_dv_sample_resource_release(dev, dev_handle);
12586 		if (dev_handle->dvh.rix_dest_array)
12587 			flow_dv_dest_array_resource_release(dev, dev_handle);
12588 		if (dev_handle->dvh.rix_encap_decap)
12589 			flow_dv_encap_decap_resource_release(dev,
12590 				dev_handle->dvh.rix_encap_decap);
12591 		if (dev_handle->dvh.modify_hdr)
12592 			flow_dv_modify_hdr_resource_release(dev, dev_handle);
12593 		if (dev_handle->dvh.rix_push_vlan)
12594 			flow_dv_push_vlan_action_resource_release(dev,
12595 								  dev_handle);
12596 		if (dev_handle->dvh.rix_tag)
12597 			flow_dv_tag_release(dev,
12598 					    dev_handle->dvh.rix_tag);
12599 		if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
12600 			flow_dv_fate_resource_release(dev, dev_handle);
12601 		else if (!srss)
12602 			srss = dev_handle->rix_srss;
12603 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12604 			   tmp_idx);
12605 	}
12606 	if (srss)
12607 		flow_dv_shared_rss_action_release(dev, srss);
12608 }
12609 
12610 /**
12611  * Release array of hash RX queue objects.
12612  * Helper function.
12613  *
12614  * @param[in] dev
12615  *   Pointer to the Ethernet device structure.
12616  * @param[in, out] hrxqs
12617  *   Array of hash RX queue objects.
12618  *
12619  * @return
12620  *   Total number of references to hash RX queue objects in *hrxqs* array
12621  *   after this operation.
12622  */
12623 static int
12624 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
12625 			uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
12626 {
12627 	size_t i;
12628 	int remaining = 0;
12629 
12630 	for (i = 0; i < RTE_DIM(*hrxqs); i++) {
12631 		int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
12632 
12633 		if (!ret)
12634 			(*hrxqs)[i] = 0;
12635 		remaining += ret;
12636 	}
12637 	return remaining;
12638 }
12639 
12640 /**
12641  * Release all hash RX queue objects representing shared RSS action.
12642  *
12643  * @param[in] dev
12644  *   Pointer to the Ethernet device structure.
12645  * @param[in, out] action
12646  *   Shared RSS action to remove hash RX queue objects from.
12647  *
12648  * @return
12649  *   Total number of references to hash RX queue objects stored in *action*
12650  *   after this operation.
12651  *   Expected to be 0 if no external references held.
12652  */
12653 static int
12654 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
12655 				 struct mlx5_shared_action_rss *shared_rss)
12656 {
12657 	return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq) +
12658 		__flow_dv_hrxqs_release(dev, &shared_rss->hrxq_tunnel);
12659 }
12660 
12661 /**
12662  * Setup shared RSS action.
12663  * Prepare set of hash RX queue objects sufficient to handle all valid
12664  * hash_fields combinations (see enum ibv_rx_hash_fields).
12665  *
12666  * @param[in] dev
12667  *   Pointer to the Ethernet device structure.
12668  * @param[in] action_idx
12669  *   Shared RSS action ipool index.
12670  * @param[in, out] action
12671  *   Partially initialized shared RSS action.
12672  * @param[out] error
12673  *   Perform verbose error reporting if not NULL. Initialized in case of
12674  *   error only.
12675  *
12676  * @return
12677  *   0 on success, otherwise negative errno value.
12678  */
12679 static int
12680 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
12681 			   uint32_t action_idx,
12682 			   struct mlx5_shared_action_rss *shared_rss,
12683 			   struct rte_flow_error *error)
12684 {
12685 	struct mlx5_flow_rss_desc rss_desc = { 0 };
12686 	size_t i;
12687 	int err;
12688 
12689 	if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
12690 		return rte_flow_error_set(error, rte_errno,
12691 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12692 					  "cannot setup indirection table");
12693 	}
12694 	memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
12695 	rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
12696 	rss_desc.const_q = shared_rss->origin.queue;
12697 	rss_desc.queue_num = shared_rss->origin.queue_num;
12698 	/* Set non-zero value to indicate a shared RSS. */
12699 	rss_desc.shared_rss = action_idx;
12700 	rss_desc.ind_tbl = shared_rss->ind_tbl;
12701 	for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
12702 		uint32_t hrxq_idx;
12703 		uint64_t hash_fields = mlx5_rss_hash_fields[i];
12704 		int tunnel;
12705 
12706 		for (tunnel = 0; tunnel < 2; tunnel++) {
12707 			rss_desc.tunnel = tunnel;
12708 			rss_desc.hash_fields = hash_fields;
12709 			hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
12710 			if (!hrxq_idx) {
12711 				rte_flow_error_set
12712 					(error, rte_errno,
12713 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12714 					 "cannot get hash queue");
12715 				goto error_hrxq_new;
12716 			}
12717 			err = __flow_dv_action_rss_hrxq_set
12718 				(shared_rss, hash_fields, tunnel, hrxq_idx);
12719 			MLX5_ASSERT(!err);
12720 		}
12721 	}
12722 	return 0;
12723 error_hrxq_new:
12724 	err = rte_errno;
12725 	__flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12726 	if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
12727 		shared_rss->ind_tbl = NULL;
12728 	rte_errno = err;
12729 	return -rte_errno;
12730 }
12731 
12732 /**
12733  * Create shared RSS action.
12734  *
12735  * @param[in] dev
12736  *   Pointer to the Ethernet device structure.
12737  * @param[in] conf
12738  *   Shared action configuration.
12739  * @param[in] rss
12740  *   RSS action specification used to create shared action.
12741  * @param[out] error
12742  *   Perform verbose error reporting if not NULL. Initialized in case of
12743  *   error only.
12744  *
12745  * @return
12746  *   A valid shared action ID in case of success, 0 otherwise and
12747  *   rte_errno is set.
12748  */
12749 static uint32_t
12750 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
12751 			    const struct rte_flow_shared_action_conf *conf,
12752 			    const struct rte_flow_action_rss *rss,
12753 			    struct rte_flow_error *error)
12754 {
12755 	struct mlx5_priv *priv = dev->data->dev_private;
12756 	struct mlx5_shared_action_rss *shared_rss = NULL;
12757 	void *queue = NULL;
12758 	struct rte_flow_action_rss *origin;
12759 	const uint8_t *rss_key;
12760 	uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
12761 	uint32_t idx;
12762 
12763 	RTE_SET_USED(conf);
12764 	queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
12765 			    0, SOCKET_ID_ANY);
12766 	shared_rss = mlx5_ipool_zmalloc
12767 			 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
12768 	if (!shared_rss || !queue) {
12769 		rte_flow_error_set(error, ENOMEM,
12770 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12771 				   "cannot allocate resource memory");
12772 		goto error_rss_init;
12773 	}
12774 	if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
12775 		rte_flow_error_set(error, E2BIG,
12776 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12777 				   "rss action number out of range");
12778 		goto error_rss_init;
12779 	}
12780 	shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
12781 					  sizeof(*shared_rss->ind_tbl),
12782 					  0, SOCKET_ID_ANY);
12783 	if (!shared_rss->ind_tbl) {
12784 		rte_flow_error_set(error, ENOMEM,
12785 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12786 				   "cannot allocate resource memory");
12787 		goto error_rss_init;
12788 	}
12789 	memcpy(queue, rss->queue, queue_size);
12790 	shared_rss->ind_tbl->queues = queue;
12791 	shared_rss->ind_tbl->queues_n = rss->queue_num;
12792 	origin = &shared_rss->origin;
12793 	origin->func = rss->func;
12794 	origin->level = rss->level;
12795 	/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
12796 	origin->types = !rss->types ? ETH_RSS_IP : rss->types;
12797 	/* NULL RSS key indicates default RSS key. */
12798 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
12799 	memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12800 	origin->key = &shared_rss->key[0];
12801 	origin->key_len = MLX5_RSS_HASH_KEY_LEN;
12802 	origin->queue = queue;
12803 	origin->queue_num = rss->queue_num;
12804 	if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
12805 		goto error_rss_init;
12806 	rte_spinlock_init(&shared_rss->action_rss_sl);
12807 	__atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12808 	rte_spinlock_lock(&priv->shared_act_sl);
12809 	ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12810 		     &priv->rss_shared_actions, idx, shared_rss, next);
12811 	rte_spinlock_unlock(&priv->shared_act_sl);
12812 	return idx;
12813 error_rss_init:
12814 	if (shared_rss) {
12815 		if (shared_rss->ind_tbl)
12816 			mlx5_free(shared_rss->ind_tbl);
12817 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12818 				idx);
12819 	}
12820 	if (queue)
12821 		mlx5_free(queue);
12822 	return 0;
12823 }
12824 
12825 /**
12826  * Destroy the shared RSS action.
12827  * Release related hash RX queue objects.
12828  *
12829  * @param[in] dev
12830  *   Pointer to the Ethernet device structure.
12831  * @param[in] idx
12832  *   The shared RSS action object ID to be removed.
12833  * @param[out] error
12834  *   Perform verbose error reporting if not NULL. Initialized in case of
12835  *   error only.
12836  *
12837  * @return
12838  *   0 on success, otherwise negative errno value.
12839  */
12840 static int
12841 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
12842 			     struct rte_flow_error *error)
12843 {
12844 	struct mlx5_priv *priv = dev->data->dev_private;
12845 	struct mlx5_shared_action_rss *shared_rss =
12846 	    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12847 	uint32_t old_refcnt = 1;
12848 	int remaining;
12849 	uint16_t *queue = NULL;
12850 
12851 	if (!shared_rss)
12852 		return rte_flow_error_set(error, EINVAL,
12853 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12854 					  "invalid shared action");
12855 	remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12856 	if (remaining)
12857 		return rte_flow_error_set(error, EBUSY,
12858 					  RTE_FLOW_ERROR_TYPE_ACTION,
12859 					  NULL,
12860 					  "shared rss hrxq has references");
12861 	if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
12862 					 0, 0, __ATOMIC_ACQUIRE,
12863 					 __ATOMIC_RELAXED))
12864 		return rte_flow_error_set(error, EBUSY,
12865 					  RTE_FLOW_ERROR_TYPE_ACTION,
12866 					  NULL,
12867 					  "shared rss has references");
12868 	queue = shared_rss->ind_tbl->queues;
12869 	remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
12870 	if (remaining)
12871 		return rte_flow_error_set(error, EBUSY,
12872 					  RTE_FLOW_ERROR_TYPE_ACTION,
12873 					  NULL,
12874 					  "shared rss indirection table has"
12875 					  " references");
12876 	mlx5_free(queue);
12877 	rte_spinlock_lock(&priv->shared_act_sl);
12878 	ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12879 		     &priv->rss_shared_actions, idx, shared_rss, next);
12880 	rte_spinlock_unlock(&priv->shared_act_sl);
12881 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12882 			idx);
12883 	return 0;
12884 }
12885 
12886 /**
12887  * Create shared action, lock free,
12888  * (mutex should be acquired by caller).
12889  * Dispatcher for action type specific call.
12890  *
12891  * @param[in] dev
12892  *   Pointer to the Ethernet device structure.
12893  * @param[in] conf
12894  *   Shared action configuration.
12895  * @param[in] action
12896  *   Action specification used to create shared action.
12897  * @param[out] error
12898  *   Perform verbose error reporting if not NULL. Initialized in case of
12899  *   error only.
12900  *
12901  * @return
12902  *   A valid shared action handle in case of success, NULL otherwise and
12903  *   rte_errno is set.
12904  */
12905 static struct rte_flow_shared_action *
12906 flow_dv_action_create(struct rte_eth_dev *dev,
12907 		      const struct rte_flow_shared_action_conf *conf,
12908 		      const struct rte_flow_action *action,
12909 		      struct rte_flow_error *err)
12910 {
12911 	uint32_t idx = 0;
12912 	uint32_t ret = 0;
12913 
12914 	switch (action->type) {
12915 	case RTE_FLOW_ACTION_TYPE_RSS:
12916 		ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
12917 		idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
12918 		       MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12919 		break;
12920 	case RTE_FLOW_ACTION_TYPE_AGE:
12921 		ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
12922 		idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
12923 		       MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12924 		if (ret) {
12925 			struct mlx5_aso_age_action *aso_age =
12926 					      flow_aso_age_get_by_idx(dev, ret);
12927 
12928 			if (!aso_age->age_params.context)
12929 				aso_age->age_params.context =
12930 							 (void *)(uintptr_t)idx;
12931 		}
12932 		break;
12933 	default:
12934 		rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12935 				   NULL, "action type not supported");
12936 		break;
12937 	}
12938 	return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
12939 }
12940 
12941 /**
12942  * Destroy the shared action.
12943  * Release action related resources on the NIC and the memory.
12944  * Lock free, (mutex should be acquired by caller).
12945  * Dispatcher for action type specific call.
12946  *
12947  * @param[in] dev
12948  *   Pointer to the Ethernet device structure.
12949  * @param[in] action
12950  *   The shared action object to be removed.
12951  * @param[out] error
12952  *   Perform verbose error reporting if not NULL. Initialized in case of
12953  *   error only.
12954  *
12955  * @return
12956  *   0 on success, otherwise negative errno value.
12957  */
12958 static int
12959 flow_dv_action_destroy(struct rte_eth_dev *dev,
12960 		       struct rte_flow_shared_action *action,
12961 		       struct rte_flow_error *error)
12962 {
12963 	uint32_t act_idx = (uint32_t)(uintptr_t)action;
12964 	uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12965 	uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12966 	int ret;
12967 
12968 	switch (type) {
12969 	case MLX5_SHARED_ACTION_TYPE_RSS:
12970 		return __flow_dv_action_rss_release(dev, idx, error);
12971 	case MLX5_SHARED_ACTION_TYPE_AGE:
12972 		ret = flow_dv_aso_age_release(dev, idx);
12973 		if (ret)
12974 			/*
12975 			 * In this case, the last flow has a reference will
12976 			 * actually release the age action.
12977 			 */
12978 			DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
12979 				" released with references %d.", idx, ret);
12980 		return 0;
12981 	default:
12982 		return rte_flow_error_set(error, ENOTSUP,
12983 					  RTE_FLOW_ERROR_TYPE_ACTION,
12984 					  NULL,
12985 					  "action type not supported");
12986 	}
12987 }
12988 
12989 /**
12990  * Updates in place shared RSS action configuration.
12991  *
12992  * @param[in] dev
12993  *   Pointer to the Ethernet device structure.
12994  * @param[in] idx
12995  *   The shared RSS action object ID to be updated.
12996  * @param[in] action_conf
12997  *   RSS action specification used to modify *shared_rss*.
12998  * @param[out] error
12999  *   Perform verbose error reporting if not NULL. Initialized in case of
13000  *   error only.
13001  *
13002  * @return
13003  *   0 on success, otherwise negative errno value.
13004  * @note: currently only support update of RSS queues.
13005  */
13006 static int
13007 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
13008 			    const struct rte_flow_action_rss *action_conf,
13009 			    struct rte_flow_error *error)
13010 {
13011 	struct mlx5_priv *priv = dev->data->dev_private;
13012 	struct mlx5_shared_action_rss *shared_rss =
13013 	    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13014 	int ret = 0;
13015 	void *queue = NULL;
13016 	uint16_t *queue_old = NULL;
13017 	uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
13018 
13019 	if (!shared_rss)
13020 		return rte_flow_error_set(error, EINVAL,
13021 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13022 					  "invalid shared action to update");
13023 	if (priv->obj_ops.ind_table_modify == NULL)
13024 		return rte_flow_error_set(error, ENOTSUP,
13025 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13026 					  "cannot modify indirection table");
13027 	queue = mlx5_malloc(MLX5_MEM_ZERO,
13028 			    RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13029 			    0, SOCKET_ID_ANY);
13030 	if (!queue)
13031 		return rte_flow_error_set(error, ENOMEM,
13032 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13033 					  NULL,
13034 					  "cannot allocate resource memory");
13035 	memcpy(queue, action_conf->queue, queue_size);
13036 	MLX5_ASSERT(shared_rss->ind_tbl);
13037 	rte_spinlock_lock(&shared_rss->action_rss_sl);
13038 	queue_old = shared_rss->ind_tbl->queues;
13039 	ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
13040 					queue, action_conf->queue_num, true);
13041 	if (ret) {
13042 		mlx5_free(queue);
13043 		ret = rte_flow_error_set(error, rte_errno,
13044 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13045 					  "cannot update indirection table");
13046 	} else {
13047 		mlx5_free(queue_old);
13048 		shared_rss->origin.queue = queue;
13049 		shared_rss->origin.queue_num = action_conf->queue_num;
13050 	}
13051 	rte_spinlock_unlock(&shared_rss->action_rss_sl);
13052 	return ret;
13053 }
13054 
13055 /**
13056  * Updates in place shared action configuration, lock free,
13057  * (mutex should be acquired by caller).
13058  *
13059  * @param[in] dev
13060  *   Pointer to the Ethernet device structure.
13061  * @param[in] action
13062  *   The shared action object to be updated.
13063  * @param[in] action_conf
13064  *   Action specification used to modify *action*.
13065  *   *action_conf* should be of type correlating with type of the *action*,
13066  *   otherwise considered as invalid.
13067  * @param[out] error
13068  *   Perform verbose error reporting if not NULL. Initialized in case of
13069  *   error only.
13070  *
13071  * @return
13072  *   0 on success, otherwise negative errno value.
13073  */
13074 static int
13075 flow_dv_action_update(struct rte_eth_dev *dev,
13076 			struct rte_flow_shared_action *action,
13077 			const void *action_conf,
13078 			struct rte_flow_error *err)
13079 {
13080 	uint32_t act_idx = (uint32_t)(uintptr_t)action;
13081 	uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
13082 	uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
13083 
13084 	switch (type) {
13085 	case MLX5_SHARED_ACTION_TYPE_RSS:
13086 		return __flow_dv_action_rss_update(dev, idx, action_conf, err);
13087 	default:
13088 		return rte_flow_error_set(err, ENOTSUP,
13089 					  RTE_FLOW_ERROR_TYPE_ACTION,
13090 					  NULL,
13091 					  "action type update not supported");
13092 	}
13093 }
13094 
13095 static int
13096 flow_dv_action_query(struct rte_eth_dev *dev,
13097 		     const struct rte_flow_shared_action *action, void *data,
13098 		     struct rte_flow_error *error)
13099 {
13100 	struct mlx5_age_param *age_param;
13101 	struct rte_flow_query_age *resp;
13102 	uint32_t act_idx = (uint32_t)(uintptr_t)action;
13103 	uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
13104 	uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
13105 
13106 	switch (type) {
13107 	case MLX5_SHARED_ACTION_TYPE_AGE:
13108 		age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
13109 		resp = data;
13110 		resp->aged = __atomic_load_n(&age_param->state,
13111 					      __ATOMIC_RELAXED) == AGE_TMOUT ?
13112 									  1 : 0;
13113 		resp->sec_since_last_hit_valid = !resp->aged;
13114 		if (resp->sec_since_last_hit_valid)
13115 			resp->sec_since_last_hit = __atomic_load_n
13116 			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
13117 		return 0;
13118 	default:
13119 		return rte_flow_error_set(error, ENOTSUP,
13120 					  RTE_FLOW_ERROR_TYPE_ACTION,
13121 					  NULL,
13122 					  "action type query not supported");
13123 	}
13124 }
13125 
13126 /**
13127  * Query a dv flow  rule for its statistics via devx.
13128  *
13129  * @param[in] dev
13130  *   Pointer to Ethernet device.
13131  * @param[in] flow
13132  *   Pointer to the sub flow.
13133  * @param[out] data
13134  *   data retrieved by the query.
13135  * @param[out] error
13136  *   Perform verbose error reporting if not NULL.
13137  *
13138  * @return
13139  *   0 on success, a negative errno value otherwise and rte_errno is set.
13140  */
13141 static int
13142 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
13143 		    void *data, struct rte_flow_error *error)
13144 {
13145 	struct mlx5_priv *priv = dev->data->dev_private;
13146 	struct rte_flow_query_count *qc = data;
13147 
13148 	if (!priv->config.devx)
13149 		return rte_flow_error_set(error, ENOTSUP,
13150 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13151 					  NULL,
13152 					  "counters are not supported");
13153 	if (flow->counter) {
13154 		uint64_t pkts, bytes;
13155 		struct mlx5_flow_counter *cnt;
13156 
13157 		cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
13158 						 NULL);
13159 		int err = _flow_dv_query_count(dev, flow->counter, &pkts,
13160 					       &bytes);
13161 
13162 		if (err)
13163 			return rte_flow_error_set(error, -err,
13164 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13165 					NULL, "cannot read counters");
13166 		qc->hits_set = 1;
13167 		qc->bytes_set = 1;
13168 		qc->hits = pkts - cnt->hits;
13169 		qc->bytes = bytes - cnt->bytes;
13170 		if (qc->reset) {
13171 			cnt->hits = pkts;
13172 			cnt->bytes = bytes;
13173 		}
13174 		return 0;
13175 	}
13176 	return rte_flow_error_set(error, EINVAL,
13177 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13178 				  NULL,
13179 				  "counters are not available");
13180 }
13181 
13182 /**
13183  * Query a flow rule AGE action for aging information.
13184  *
13185  * @param[in] dev
13186  *   Pointer to Ethernet device.
13187  * @param[in] flow
13188  *   Pointer to the sub flow.
13189  * @param[out] data
13190  *   data retrieved by the query.
13191  * @param[out] error
13192  *   Perform verbose error reporting if not NULL.
13193  *
13194  * @return
13195  *   0 on success, a negative errno value otherwise and rte_errno is set.
13196  */
13197 static int
13198 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
13199 		  void *data, struct rte_flow_error *error)
13200 {
13201 	struct rte_flow_query_age *resp = data;
13202 	struct mlx5_age_param *age_param;
13203 
13204 	if (flow->age) {
13205 		struct mlx5_aso_age_action *act =
13206 				     flow_aso_age_get_by_idx(dev, flow->age);
13207 
13208 		age_param = &act->age_params;
13209 	} else if (flow->counter) {
13210 		age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
13211 
13212 		if (!age_param || !age_param->timeout)
13213 			return rte_flow_error_set
13214 					(error, EINVAL,
13215 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13216 					 NULL, "cannot read age data");
13217 	} else {
13218 		return rte_flow_error_set(error, EINVAL,
13219 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13220 					  NULL, "age data not available");
13221 	}
13222 	resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
13223 				     AGE_TMOUT ? 1 : 0;
13224 	resp->sec_since_last_hit_valid = !resp->aged;
13225 	if (resp->sec_since_last_hit_valid)
13226 		resp->sec_since_last_hit = __atomic_load_n
13227 			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
13228 	return 0;
13229 }
13230 
13231 /**
13232  * Query a flow.
13233  *
13234  * @see rte_flow_query()
13235  * @see rte_flow_ops
13236  */
13237 static int
13238 flow_dv_query(struct rte_eth_dev *dev,
13239 	      struct rte_flow *flow __rte_unused,
13240 	      const struct rte_flow_action *actions __rte_unused,
13241 	      void *data __rte_unused,
13242 	      struct rte_flow_error *error __rte_unused)
13243 {
13244 	int ret = -EINVAL;
13245 
13246 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
13247 		switch (actions->type) {
13248 		case RTE_FLOW_ACTION_TYPE_VOID:
13249 			break;
13250 		case RTE_FLOW_ACTION_TYPE_COUNT:
13251 			ret = flow_dv_query_count(dev, flow, data, error);
13252 			break;
13253 		case RTE_FLOW_ACTION_TYPE_AGE:
13254 			ret = flow_dv_query_age(dev, flow, data, error);
13255 			break;
13256 		default:
13257 			return rte_flow_error_set(error, ENOTSUP,
13258 						  RTE_FLOW_ERROR_TYPE_ACTION,
13259 						  actions,
13260 						  "action not supported");
13261 		}
13262 	}
13263 	return ret;
13264 }
13265 
13266 /**
13267  * Destroy the meter table set.
13268  * Lock free, (mutex should be acquired by caller).
13269  *
13270  * @param[in] dev
13271  *   Pointer to Ethernet device.
13272  * @param[in] tbl
13273  *   Pointer to the meter table set.
13274  *
13275  * @return
13276  *   Always 0.
13277  */
13278 static int
13279 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
13280 			struct mlx5_meter_domains_infos *tbl)
13281 {
13282 	struct mlx5_priv *priv = dev->data->dev_private;
13283 	struct mlx5_meter_domains_infos *mtd =
13284 				(struct mlx5_meter_domains_infos *)tbl;
13285 
13286 	if (!mtd || !priv->config.dv_flow_en)
13287 		return 0;
13288 	if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
13289 		claim_zero(mlx5_flow_os_destroy_flow
13290 			   (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
13291 	if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
13292 		claim_zero(mlx5_flow_os_destroy_flow
13293 			   (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
13294 	if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
13295 		claim_zero(mlx5_flow_os_destroy_flow
13296 			   (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
13297 	if (mtd->egress.color_matcher)
13298 		claim_zero(mlx5_flow_os_destroy_flow_matcher
13299 			   (mtd->egress.color_matcher));
13300 	if (mtd->egress.any_matcher)
13301 		claim_zero(mlx5_flow_os_destroy_flow_matcher
13302 			   (mtd->egress.any_matcher));
13303 	if (mtd->egress.tbl)
13304 		flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
13305 	if (mtd->egress.sfx_tbl)
13306 		flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
13307 	if (mtd->ingress.color_matcher)
13308 		claim_zero(mlx5_flow_os_destroy_flow_matcher
13309 			   (mtd->ingress.color_matcher));
13310 	if (mtd->ingress.any_matcher)
13311 		claim_zero(mlx5_flow_os_destroy_flow_matcher
13312 			   (mtd->ingress.any_matcher));
13313 	if (mtd->ingress.tbl)
13314 		flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
13315 	if (mtd->ingress.sfx_tbl)
13316 		flow_dv_tbl_resource_release(MLX5_SH(dev),
13317 					     mtd->ingress.sfx_tbl);
13318 	if (mtd->transfer.color_matcher)
13319 		claim_zero(mlx5_flow_os_destroy_flow_matcher
13320 			   (mtd->transfer.color_matcher));
13321 	if (mtd->transfer.any_matcher)
13322 		claim_zero(mlx5_flow_os_destroy_flow_matcher
13323 			   (mtd->transfer.any_matcher));
13324 	if (mtd->transfer.tbl)
13325 		flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
13326 	if (mtd->transfer.sfx_tbl)
13327 		flow_dv_tbl_resource_release(MLX5_SH(dev),
13328 					     mtd->transfer.sfx_tbl);
13329 	if (mtd->drop_actn)
13330 		claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
13331 	mlx5_free(mtd);
13332 	return 0;
13333 }
13334 
13335 /* Number of meter flow actions, count and jump or count and drop. */
13336 #define METER_ACTIONS 2
13337 
13338 /**
13339  * Create specify domain meter table and suffix table.
13340  *
13341  * @param[in] dev
13342  *   Pointer to Ethernet device.
13343  * @param[in,out] mtb
13344  *   Pointer to DV meter table set.
13345  * @param[in] egress
13346  *   Table attribute.
13347  * @param[in] transfer
13348  *   Table attribute.
13349  * @param[in] color_reg_c_idx
13350  *   Reg C index for color match.
13351  *
13352  * @return
13353  *   0 on success, -1 otherwise and rte_errno is set.
13354  */
13355 static int
13356 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
13357 			   struct mlx5_meter_domains_infos *mtb,
13358 			   uint8_t egress, uint8_t transfer,
13359 			   uint32_t color_reg_c_idx)
13360 {
13361 	struct mlx5_priv *priv = dev->data->dev_private;
13362 	struct mlx5_dev_ctx_shared *sh = priv->sh;
13363 	struct mlx5_flow_dv_match_params mask = {
13364 		.size = sizeof(mask.buf),
13365 	};
13366 	struct mlx5_flow_dv_match_params value = {
13367 		.size = sizeof(value.buf),
13368 	};
13369 	struct mlx5dv_flow_matcher_attr dv_attr = {
13370 		.type = IBV_FLOW_ATTR_NORMAL,
13371 		.priority = 0,
13372 		.match_criteria_enable = 0,
13373 		.match_mask = (void *)&mask,
13374 	};
13375 	void *actions[METER_ACTIONS];
13376 	struct mlx5_meter_domain_info *dtb;
13377 	struct rte_flow_error error;
13378 	int i = 0;
13379 	int ret;
13380 
13381 	if (transfer)
13382 		dtb = &mtb->transfer;
13383 	else if (egress)
13384 		dtb = &mtb->egress;
13385 	else
13386 		dtb = &mtb->ingress;
13387 	/* Create the meter table with METER level. */
13388 	dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
13389 					    egress, transfer, false, NULL, 0,
13390 					    0, &error);
13391 	if (!dtb->tbl) {
13392 		DRV_LOG(ERR, "Failed to create meter policer table.");
13393 		return -1;
13394 	}
13395 	/* Create the meter suffix table with SUFFIX level. */
13396 	dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
13397 					    MLX5_FLOW_TABLE_LEVEL_SUFFIX,
13398 					    egress, transfer, false, NULL, 0,
13399 					    0, &error);
13400 	if (!dtb->sfx_tbl) {
13401 		DRV_LOG(ERR, "Failed to create meter suffix table.");
13402 		return -1;
13403 	}
13404 	/* Create matchers, Any and Color. */
13405 	dv_attr.priority = 3;
13406 	dv_attr.match_criteria_enable = 0;
13407 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
13408 					       &dtb->any_matcher);
13409 	if (ret) {
13410 		DRV_LOG(ERR, "Failed to create meter"
13411 			     " policer default matcher.");
13412 		goto error_exit;
13413 	}
13414 	dv_attr.priority = 0;
13415 	dv_attr.match_criteria_enable =
13416 				1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
13417 	flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
13418 			       rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
13419 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
13420 					       &dtb->color_matcher);
13421 	if (ret) {
13422 		DRV_LOG(ERR, "Failed to create meter policer color matcher.");
13423 		goto error_exit;
13424 	}
13425 	if (mtb->count_actns[RTE_MTR_DROPPED])
13426 		actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
13427 	actions[i++] = mtb->drop_actn;
13428 	/* Default rule: lowest priority, match any, actions: drop. */
13429 	ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
13430 				       actions,
13431 				       &dtb->policer_rules[RTE_MTR_DROPPED]);
13432 	if (ret) {
13433 		DRV_LOG(ERR, "Failed to create meter policer drop rule.");
13434 		goto error_exit;
13435 	}
13436 	return 0;
13437 error_exit:
13438 	return -1;
13439 }
13440 
13441 /**
13442  * Create the needed meter and suffix tables.
13443  * Lock free, (mutex should be acquired by caller).
13444  *
13445  * @param[in] dev
13446  *   Pointer to Ethernet device.
13447  * @param[in] fm
13448  *   Pointer to the flow meter.
13449  *
13450  * @return
13451  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
13452  */
13453 static struct mlx5_meter_domains_infos *
13454 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
13455 		       const struct mlx5_flow_meter *fm)
13456 {
13457 	struct mlx5_priv *priv = dev->data->dev_private;
13458 	struct mlx5_meter_domains_infos *mtb;
13459 	int ret;
13460 	int i;
13461 
13462 	if (!priv->mtr_en) {
13463 		rte_errno = ENOTSUP;
13464 		return NULL;
13465 	}
13466 	mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
13467 	if (!mtb) {
13468 		DRV_LOG(ERR, "Failed to allocate memory for meter.");
13469 		return NULL;
13470 	}
13471 	/* Create meter count actions */
13472 	for (i = 0; i <= RTE_MTR_DROPPED; i++) {
13473 		struct mlx5_flow_counter *cnt;
13474 		if (!fm->policer_stats.cnt[i])
13475 			continue;
13476 		cnt = flow_dv_counter_get_by_idx(dev,
13477 		      fm->policer_stats.cnt[i], NULL);
13478 		mtb->count_actns[i] = cnt->action;
13479 	}
13480 	/* Create drop action. */
13481 	ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
13482 	if (ret) {
13483 		DRV_LOG(ERR, "Failed to create drop action.");
13484 		goto error_exit;
13485 	}
13486 	/* Egress meter table. */
13487 	ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
13488 	if (ret) {
13489 		DRV_LOG(ERR, "Failed to prepare egress meter table.");
13490 		goto error_exit;
13491 	}
13492 	/* Ingress meter table. */
13493 	ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
13494 	if (ret) {
13495 		DRV_LOG(ERR, "Failed to prepare ingress meter table.");
13496 		goto error_exit;
13497 	}
13498 	/* FDB meter table. */
13499 	if (priv->config.dv_esw_en) {
13500 		ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
13501 						 priv->mtr_color_reg);
13502 		if (ret) {
13503 			DRV_LOG(ERR, "Failed to prepare fdb meter table.");
13504 			goto error_exit;
13505 		}
13506 	}
13507 	return mtb;
13508 error_exit:
13509 	flow_dv_destroy_mtr_tbl(dev, mtb);
13510 	return NULL;
13511 }
13512 
13513 /**
13514  * Destroy domain policer rule.
13515  *
13516  * @param[in] dt
13517  *   Pointer to domain table.
13518  */
13519 static void
13520 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
13521 {
13522 	int i;
13523 
13524 	for (i = 0; i < RTE_MTR_DROPPED; i++) {
13525 		if (dt->policer_rules[i]) {
13526 			claim_zero(mlx5_flow_os_destroy_flow
13527 				   (dt->policer_rules[i]));
13528 			dt->policer_rules[i] = NULL;
13529 		}
13530 	}
13531 	if (dt->jump_actn) {
13532 		claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
13533 		dt->jump_actn = NULL;
13534 	}
13535 }
13536 
13537 /**
13538  * Destroy policer rules.
13539  *
13540  * @param[in] dev
13541  *   Pointer to Ethernet device.
13542  * @param[in] fm
13543  *   Pointer to flow meter structure.
13544  * @param[in] attr
13545  *   Pointer to flow attributes.
13546  *
13547  * @return
13548  *   Always 0.
13549  */
13550 static int
13551 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
13552 			      const struct mlx5_flow_meter *fm,
13553 			      const struct rte_flow_attr *attr)
13554 {
13555 	struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
13556 
13557 	if (!mtb)
13558 		return 0;
13559 	if (attr->egress)
13560 		flow_dv_destroy_domain_policer_rule(&mtb->egress);
13561 	if (attr->ingress)
13562 		flow_dv_destroy_domain_policer_rule(&mtb->ingress);
13563 	if (attr->transfer)
13564 		flow_dv_destroy_domain_policer_rule(&mtb->transfer);
13565 	return 0;
13566 }
13567 
13568 /**
13569  * Create specify domain meter policer rule.
13570  *
13571  * @param[in] fm
13572  *   Pointer to flow meter structure.
13573  * @param[in] mtb
13574  *   Pointer to DV meter table set.
13575  * @param[in] mtr_reg_c
13576  *   Color match REG_C.
13577  *
13578  * @return
13579  *   0 on success, -1 otherwise.
13580  */
13581 static int
13582 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
13583 				    struct mlx5_meter_domain_info *dtb,
13584 				    uint8_t mtr_reg_c)
13585 {
13586 	struct mlx5_flow_dv_match_params matcher = {
13587 		.size = sizeof(matcher.buf),
13588 	};
13589 	struct mlx5_flow_dv_match_params value = {
13590 		.size = sizeof(value.buf),
13591 	};
13592 	struct mlx5_meter_domains_infos *mtb = fm->mfts;
13593 	void *actions[METER_ACTIONS];
13594 	int i;
13595 	int ret = 0;
13596 
13597 	/* Create jump action. */
13598 	if (!dtb->jump_actn)
13599 		ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
13600 				(dtb->sfx_tbl->obj, &dtb->jump_actn);
13601 	if (ret) {
13602 		DRV_LOG(ERR, "Failed to create policer jump action.");
13603 		goto error;
13604 	}
13605 	for (i = 0; i < RTE_MTR_DROPPED; i++) {
13606 		int j = 0;
13607 
13608 		flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
13609 				       rte_col_2_mlx5_col(i), UINT8_MAX);
13610 		if (mtb->count_actns[i])
13611 			actions[j++] = mtb->count_actns[i];
13612 		if (fm->action[i] == MTR_POLICER_ACTION_DROP)
13613 			actions[j++] = mtb->drop_actn;
13614 		else
13615 			actions[j++] = dtb->jump_actn;
13616 		ret = mlx5_flow_os_create_flow(dtb->color_matcher,
13617 					       (void *)&value, j, actions,
13618 					       &dtb->policer_rules[i]);
13619 		if (ret) {
13620 			DRV_LOG(ERR, "Failed to create policer rule.");
13621 			goto error;
13622 		}
13623 	}
13624 	return 0;
13625 error:
13626 	rte_errno = errno;
13627 	return -1;
13628 }
13629 
13630 /**
13631  * Create policer rules.
13632  *
13633  * @param[in] dev
13634  *   Pointer to Ethernet device.
13635  * @param[in] fm
13636  *   Pointer to flow meter structure.
13637  * @param[in] attr
13638  *   Pointer to flow attributes.
13639  *
13640  * @return
13641  *   0 on success, -1 otherwise.
13642  */
13643 static int
13644 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
13645 			     struct mlx5_flow_meter *fm,
13646 			     const struct rte_flow_attr *attr)
13647 {
13648 	struct mlx5_priv *priv = dev->data->dev_private;
13649 	struct mlx5_meter_domains_infos *mtb = fm->mfts;
13650 	int ret;
13651 
13652 	if (attr->egress) {
13653 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
13654 						priv->mtr_color_reg);
13655 		if (ret) {
13656 			DRV_LOG(ERR, "Failed to create egress policer.");
13657 			goto error;
13658 		}
13659 	}
13660 	if (attr->ingress) {
13661 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
13662 						priv->mtr_color_reg);
13663 		if (ret) {
13664 			DRV_LOG(ERR, "Failed to create ingress policer.");
13665 			goto error;
13666 		}
13667 	}
13668 	if (attr->transfer) {
13669 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
13670 						priv->mtr_color_reg);
13671 		if (ret) {
13672 			DRV_LOG(ERR, "Failed to create transfer policer.");
13673 			goto error;
13674 		}
13675 	}
13676 	return 0;
13677 error:
13678 	flow_dv_destroy_policer_rules(dev, fm, attr);
13679 	return -1;
13680 }
13681 
13682 /**
13683  * Validate the batch counter support in root table.
13684  *
13685  * Create a simple flow with invalid counter and drop action on root table to
13686  * validate if batch counter with offset on root table is supported or not.
13687  *
13688  * @param[in] dev
13689  *   Pointer to rte_eth_dev structure.
13690  *
13691  * @return
13692  *   0 on success, a negative errno value otherwise and rte_errno is set.
13693  */
13694 int
13695 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
13696 {
13697 	struct mlx5_priv *priv = dev->data->dev_private;
13698 	struct mlx5_dev_ctx_shared *sh = priv->sh;
13699 	struct mlx5_flow_dv_match_params mask = {
13700 		.size = sizeof(mask.buf),
13701 	};
13702 	struct mlx5_flow_dv_match_params value = {
13703 		.size = sizeof(value.buf),
13704 	};
13705 	struct mlx5dv_flow_matcher_attr dv_attr = {
13706 		.type = IBV_FLOW_ATTR_NORMAL,
13707 		.priority = 0,
13708 		.match_criteria_enable = 0,
13709 		.match_mask = (void *)&mask,
13710 	};
13711 	void *actions[2] = { 0 };
13712 	struct mlx5_flow_tbl_resource *tbl = NULL;
13713 	struct mlx5_devx_obj *dcs = NULL;
13714 	void *matcher = NULL;
13715 	void *flow = NULL;
13716 	int ret = -1;
13717 
13718 	tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
13719 	if (!tbl)
13720 		goto err;
13721 	dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
13722 	if (!dcs)
13723 		goto err;
13724 	ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
13725 						    &actions[0]);
13726 	if (ret)
13727 		goto err;
13728 	actions[1] = priv->drop_queue.hrxq->action;
13729 	dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
13730 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
13731 					       &matcher);
13732 	if (ret)
13733 		goto err;
13734 	ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
13735 				       actions, &flow);
13736 err:
13737 	/*
13738 	 * If batch counter with offset is not supported, the driver will not
13739 	 * validate the invalid offset value, flow create should success.
13740 	 * In this case, it means batch counter is not supported in root table.
13741 	 *
13742 	 * Otherwise, if flow create is failed, counter offset is supported.
13743 	 */
13744 	if (flow) {
13745 		DRV_LOG(INFO, "Batch counter is not supported in root "
13746 			      "table. Switch to fallback mode.");
13747 		rte_errno = ENOTSUP;
13748 		ret = -rte_errno;
13749 		claim_zero(mlx5_flow_os_destroy_flow(flow));
13750 	} else {
13751 		/* Check matcher to make sure validate fail at flow create. */
13752 		if (!matcher || (matcher && errno != EINVAL))
13753 			DRV_LOG(ERR, "Unexpected error in counter offset "
13754 				     "support detection");
13755 		ret = 0;
13756 	}
13757 	if (actions[0])
13758 		claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
13759 	if (matcher)
13760 		claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
13761 	if (tbl)
13762 		flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13763 	if (dcs)
13764 		claim_zero(mlx5_devx_cmd_destroy(dcs));
13765 	return ret;
13766 }
13767 
13768 /**
13769  * Query a devx counter.
13770  *
13771  * @param[in] dev
13772  *   Pointer to the Ethernet device structure.
13773  * @param[in] cnt
13774  *   Index to the flow counter.
13775  * @param[in] clear
13776  *   Set to clear the counter statistics.
13777  * @param[out] pkts
13778  *   The statistics value of packets.
13779  * @param[out] bytes
13780  *   The statistics value of bytes.
13781  *
13782  * @return
13783  *   0 on success, otherwise return -1.
13784  */
13785 static int
13786 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
13787 		      uint64_t *pkts, uint64_t *bytes)
13788 {
13789 	struct mlx5_priv *priv = dev->data->dev_private;
13790 	struct mlx5_flow_counter *cnt;
13791 	uint64_t inn_pkts, inn_bytes;
13792 	int ret;
13793 
13794 	if (!priv->config.devx)
13795 		return -1;
13796 
13797 	ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
13798 	if (ret)
13799 		return -1;
13800 	cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
13801 	*pkts = inn_pkts - cnt->hits;
13802 	*bytes = inn_bytes - cnt->bytes;
13803 	if (clear) {
13804 		cnt->hits = inn_pkts;
13805 		cnt->bytes = inn_bytes;
13806 	}
13807 	return 0;
13808 }
13809 
13810 /**
13811  * Get aged-out flows.
13812  *
13813  * @param[in] dev
13814  *   Pointer to the Ethernet device structure.
13815  * @param[in] context
13816  *   The address of an array of pointers to the aged-out flows contexts.
13817  * @param[in] nb_contexts
13818  *   The length of context array pointers.
13819  * @param[out] error
13820  *   Perform verbose error reporting if not NULL. Initialized in case of
13821  *   error only.
13822  *
13823  * @return
13824  *   how many contexts get in success, otherwise negative errno value.
13825  *   if nb_contexts is 0, return the amount of all aged contexts.
13826  *   if nb_contexts is not 0 , return the amount of aged flows reported
13827  *   in the context array.
13828  * @note: only stub for now
13829  */
13830 static int
13831 flow_get_aged_flows(struct rte_eth_dev *dev,
13832 		    void **context,
13833 		    uint32_t nb_contexts,
13834 		    struct rte_flow_error *error)
13835 {
13836 	struct mlx5_priv *priv = dev->data->dev_private;
13837 	struct mlx5_age_info *age_info;
13838 	struct mlx5_age_param *age_param;
13839 	struct mlx5_flow_counter *counter;
13840 	struct mlx5_aso_age_action *act;
13841 	int nb_flows = 0;
13842 
13843 	if (nb_contexts && !context)
13844 		return rte_flow_error_set(error, EINVAL,
13845 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13846 					  NULL, "empty context");
13847 	age_info = GET_PORT_AGE_INFO(priv);
13848 	rte_spinlock_lock(&age_info->aged_sl);
13849 	LIST_FOREACH(act, &age_info->aged_aso, next) {
13850 		nb_flows++;
13851 		if (nb_contexts) {
13852 			context[nb_flows - 1] =
13853 						act->age_params.context;
13854 			if (!(--nb_contexts))
13855 				break;
13856 		}
13857 	}
13858 	TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
13859 		nb_flows++;
13860 		if (nb_contexts) {
13861 			age_param = MLX5_CNT_TO_AGE(counter);
13862 			context[nb_flows - 1] = age_param->context;
13863 			if (!(--nb_contexts))
13864 				break;
13865 		}
13866 	}
13867 	rte_spinlock_unlock(&age_info->aged_sl);
13868 	MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13869 	return nb_flows;
13870 }
13871 
13872 /*
13873  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
13874  */
13875 static uint32_t
13876 flow_dv_counter_allocate(struct rte_eth_dev *dev)
13877 {
13878 	return flow_dv_counter_alloc(dev, 0);
13879 }
13880 
13881 /**
13882  * Validate shared action.
13883  * Dispatcher for action type specific validation.
13884  *
13885  * @param[in] dev
13886  *   Pointer to the Ethernet device structure.
13887  * @param[in] conf
13888  *   Shared action configuration.
13889  * @param[in] action
13890  *   The shared action object to validate.
13891  * @param[out] error
13892  *   Perform verbose error reporting if not NULL. Initialized in case of
13893  *   error only.
13894  *
13895  * @return
13896  *   0 on success, otherwise negative errno value.
13897  */
13898 static int
13899 flow_dv_action_validate(struct rte_eth_dev *dev,
13900 			const struct rte_flow_shared_action_conf *conf,
13901 			const struct rte_flow_action *action,
13902 			struct rte_flow_error *err)
13903 {
13904 	struct mlx5_priv *priv = dev->data->dev_private;
13905 
13906 	RTE_SET_USED(conf);
13907 	switch (action->type) {
13908 	case RTE_FLOW_ACTION_TYPE_RSS:
13909 		/*
13910 		 * priv->obj_ops is set according to driver capabilities.
13911 		 * When DevX capabilities are
13912 		 * sufficient, it is set to devx_obj_ops.
13913 		 * Otherwise, it is set to ibv_obj_ops.
13914 		 * ibv_obj_ops doesn't support ind_table_modify operation.
13915 		 * In this case the shared RSS action can't be used.
13916 		 */
13917 		if (priv->obj_ops.ind_table_modify == NULL)
13918 			return rte_flow_error_set
13919 					(err, ENOTSUP,
13920 					 RTE_FLOW_ERROR_TYPE_ACTION,
13921 					 NULL,
13922 					 "shared RSS action not supported");
13923 		return mlx5_validate_action_rss(dev, action, err);
13924 	case RTE_FLOW_ACTION_TYPE_AGE:
13925 		if (!priv->sh->aso_age_mng)
13926 			return rte_flow_error_set(err, ENOTSUP,
13927 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13928 						NULL,
13929 					     "shared age action not supported");
13930 		return flow_dv_validate_action_age(0, action, dev, err);
13931 	default:
13932 		return rte_flow_error_set(err, ENOTSUP,
13933 					  RTE_FLOW_ERROR_TYPE_ACTION,
13934 					  NULL,
13935 					  "action type not supported");
13936 	}
13937 }
13938 
13939 static int
13940 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
13941 {
13942 	struct mlx5_priv *priv = dev->data->dev_private;
13943 	int ret = 0;
13944 
13945 	if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
13946 		ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
13947 						flags);
13948 		if (ret != 0)
13949 			return ret;
13950 	}
13951 	if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
13952 		ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
13953 		if (ret != 0)
13954 			return ret;
13955 	}
13956 	if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
13957 		ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
13958 		if (ret != 0)
13959 			return ret;
13960 	}
13961 	return 0;
13962 }
13963 
13964 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
13965 	.validate = flow_dv_validate,
13966 	.prepare = flow_dv_prepare,
13967 	.translate = flow_dv_translate,
13968 	.apply = flow_dv_apply,
13969 	.remove = flow_dv_remove,
13970 	.destroy = flow_dv_destroy,
13971 	.query = flow_dv_query,
13972 	.create_mtr_tbls = flow_dv_create_mtr_tbl,
13973 	.destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
13974 	.create_policer_rules = flow_dv_create_policer_rules,
13975 	.destroy_policer_rules = flow_dv_destroy_policer_rules,
13976 	.counter_alloc = flow_dv_counter_allocate,
13977 	.counter_free = flow_dv_counter_free,
13978 	.counter_query = flow_dv_counter_query,
13979 	.get_aged_flows = flow_get_aged_flows,
13980 	.action_validate = flow_dv_action_validate,
13981 	.action_create = flow_dv_action_create,
13982 	.action_destroy = flow_dv_action_destroy,
13983 	.action_update = flow_dv_action_update,
13984 	.action_query = flow_dv_action_query,
13985 	.sync_domain = flow_dv_sync_domain,
13986 };
13987 
13988 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
13989 
13990